@@ -125,6 +125,20 @@ static const char *obj_states[ODEBUG_STATE_MAX] = {
125125 [ODEBUG_STATE_NOTAVAILABLE ] = "not available" ,
126126};
127127
128+ static void free_object_list (struct hlist_head * head )
129+ {
130+ struct hlist_node * tmp ;
131+ struct debug_obj * obj ;
132+ int cnt = 0 ;
133+
134+ hlist_for_each_entry_safe (obj , tmp , head , node ) {
135+ hlist_del (& obj -> node );
136+ kmem_cache_free (obj_cache , obj );
137+ cnt ++ ;
138+ }
139+ debug_objects_freed += cnt ;
140+ }
141+
128142static void fill_pool (void )
129143{
130144 gfp_t gfp = __GFP_HIGH | __GFP_NOWARN ;
@@ -286,7 +300,6 @@ alloc_object(void *addr, struct debug_bucket *b, const struct debug_obj_descr *d
286300 */
287301static void free_obj_work (struct work_struct * work )
288302{
289- struct hlist_node * tmp ;
290303 struct debug_obj * obj ;
291304 unsigned long flags ;
292305 HLIST_HEAD (tofree );
@@ -323,15 +336,11 @@ static void free_obj_work(struct work_struct *work)
323336 */
324337 if (obj_nr_tofree ) {
325338 hlist_move_list (& obj_to_free , & tofree );
326- debug_objects_freed += obj_nr_tofree ;
327339 WRITE_ONCE (obj_nr_tofree , 0 );
328340 }
329341 raw_spin_unlock_irqrestore (& pool_lock , flags );
330342
331- hlist_for_each_entry_safe (obj , tmp , & tofree , node ) {
332- hlist_del (& obj -> node );
333- kmem_cache_free (obj_cache , obj );
334- }
343+ free_object_list (& tofree );
335344}
336345
337346static void __free_object (struct debug_obj * obj )
@@ -1334,6 +1343,7 @@ static bool __init debug_objects_replace_static_objects(struct kmem_cache *cache
13341343 }
13351344 return true;
13361345free :
1346+ /* Can't use free_object_list() as the cache is not populated yet */
13371347 hlist_for_each_entry_safe (obj , tmp , & objects , node ) {
13381348 hlist_del (& obj -> node );
13391349 kmem_cache_free (cache , obj );
0 commit comments