@@ -68,6 +68,8 @@ static DEFINE_RAW_SPINLOCK(pool_lock);
6868static struct obj_pool pool_global ;
6969static struct obj_pool pool_to_free ;
7070
71+ static HLIST_HEAD (pool_boot );
72+
7173/*
7274 * Because of the presence of percpu free pools, obj_pool_free will
7375 * under-count those in the percpu free pools. Similarly, obj_pool_used
@@ -278,6 +280,9 @@ alloc_object(void *addr, struct debug_bucket *b, const struct debug_obj_descr *d
278280 percpu_pool -> obj_free -- ;
279281 goto init_obj ;
280282 }
283+ } else {
284+ obj = __alloc_object (& pool_boot );
285+ goto init_obj ;
281286 }
282287
283288 raw_spin_lock (& pool_lock );
@@ -381,12 +386,14 @@ static void __free_object(struct debug_obj *obj)
381386 struct debug_obj * objs [ODEBUG_BATCH_SIZE ];
382387 struct debug_percpu_free * percpu_pool ;
383388 int lookahead_count = 0 ;
384- unsigned long flags ;
385389 bool work ;
386390
387- local_irq_save (flags );
388- if (!obj_cache )
389- goto free_to_obj_pool ;
391+ guard (irqsave )();
392+
393+ if (unlikely (!obj_cache )) {
394+ hlist_add_head (& obj -> node , & pool_boot );
395+ return ;
396+ }
390397
391398 /*
392399 * Try to free it into the percpu pool first.
@@ -395,7 +402,6 @@ static void __free_object(struct debug_obj *obj)
395402 if (percpu_pool -> obj_free < ODEBUG_POOL_PERCPU_SIZE ) {
396403 hlist_add_head (& obj -> node , & percpu_pool -> free_objs );
397404 percpu_pool -> obj_free ++ ;
398- local_irq_restore (flags );
399405 return ;
400406 }
401407
@@ -410,7 +416,6 @@ static void __free_object(struct debug_obj *obj)
410416 percpu_pool -> obj_free -- ;
411417 }
412418
413- free_to_obj_pool :
414419 raw_spin_lock (& pool_lock );
415420 work = (pool_global .cnt > debug_objects_pool_size ) && obj_cache &&
416421 (pool_to_free .cnt < ODEBUG_FREE_WORK_MAX );
@@ -455,7 +460,6 @@ static void __free_object(struct debug_obj *obj)
455460 }
456461 }
457462 raw_spin_unlock (& pool_lock );
458- local_irq_restore (flags );
459463}
460464
461465/*
@@ -1341,10 +1345,9 @@ void __init debug_objects_early_init(void)
13411345 for (i = 0 ; i < ODEBUG_HASH_SIZE ; i ++ )
13421346 raw_spin_lock_init (& obj_hash [i ].lock );
13431347
1348+ /* Keep early boot simple and add everything to the boot list */
13441349 for (i = 0 ; i < ODEBUG_POOL_SIZE ; i ++ )
1345- hlist_add_head (& obj_static_pool [i ].node , & pool_global .objects );
1346-
1347- pool_global .cnt = ODEBUG_POOL_SIZE ;
1350+ hlist_add_head (& obj_static_pool [i ].node , & pool_boot );
13481351}
13491352
13501353/*
@@ -1372,10 +1375,11 @@ static bool __init debug_objects_replace_static_objects(struct kmem_cache *cache
13721375 pool_global .cnt = ODEBUG_POOL_SIZE ;
13731376
13741377 /*
1375- * Replace the statically allocated objects list with the allocated
1376- * objects list .
1378+ * Move the allocated objects to the global pool and disconnect the
1379+ * boot pool .
13771380 */
13781381 hlist_move_list (& objects , & pool_global .objects );
1382+ pool_boot .first = NULL ;
13791383
13801384 /* Replace the active object references */
13811385 for (i = 0 ; i < ODEBUG_HASH_SIZE ; i ++ , db ++ ) {
0 commit comments