@@ -204,9 +204,9 @@ DEFINE_STATIC_KEY_FALSE(slub_debug_enabled);
204204
205205/* Structure holding parameters for get_partial() call chain */
206206struct partial_context {
207- struct slab * * slab ;
208207 gfp_t flags ;
209208 unsigned int orig_size ;
209+ void * object ;
210210};
211211
212212static inline bool kmem_cache_debug (struct kmem_cache * s )
@@ -2269,10 +2269,11 @@ static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags);
22692269/*
22702270 * Try to allocate a partial slab from a specific node.
22712271 */
2272- static void * get_partial_node (struct kmem_cache * s , struct kmem_cache_node * n ,
2273- struct partial_context * pc )
2272+ static struct slab * get_partial_node (struct kmem_cache * s ,
2273+ struct kmem_cache_node * n ,
2274+ struct partial_context * pc )
22742275{
2275- struct slab * slab , * slab2 ;
2276+ struct slab * slab , * slab2 , * partial = NULL ;
22762277 void * object = NULL ;
22772278 unsigned long flags ;
22782279 unsigned int partial_slabs = 0 ;
@@ -2288,27 +2289,28 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
22882289
22892290 spin_lock_irqsave (& n -> list_lock , flags );
22902291 list_for_each_entry_safe (slab , slab2 , & n -> partial , slab_list ) {
2291- void * t ;
2292-
22932292 if (!pfmemalloc_match (slab , pc -> flags ))
22942293 continue ;
22952294
22962295 if (IS_ENABLED (CONFIG_SLUB_TINY ) || kmem_cache_debug (s )) {
22972296 object = alloc_single_from_partial (s , n , slab ,
22982297 pc -> orig_size );
2299- if (object )
2298+ if (object ) {
2299+ partial = slab ;
2300+ pc -> object = object ;
23002301 break ;
2302+ }
23012303 continue ;
23022304 }
23032305
2304- t = acquire_slab (s , n , slab , object == NULL );
2305- if (!t )
2306+ object = acquire_slab (s , n , slab , object == NULL );
2307+ if (!object )
23062308 break ;
23072309
2308- if (!object ) {
2309- * pc -> slab = slab ;
2310+ if (!partial ) {
2311+ partial = slab ;
2312+ pc -> object = object ;
23102313 stat (s , ALLOC_FROM_PARTIAL );
2311- object = t ;
23122314 } else {
23132315 put_cpu_partial (s , slab , 0 );
23142316 stat (s , CPU_PARTIAL_NODE );
@@ -2324,20 +2326,21 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
23242326
23252327 }
23262328 spin_unlock_irqrestore (& n -> list_lock , flags );
2327- return object ;
2329+ return partial ;
23282330}
23292331
23302332/*
23312333 * Get a slab from somewhere. Search in increasing NUMA distances.
23322334 */
2333- static void * get_any_partial (struct kmem_cache * s , struct partial_context * pc )
2335+ static struct slab * get_any_partial (struct kmem_cache * s ,
2336+ struct partial_context * pc )
23342337{
23352338#ifdef CONFIG_NUMA
23362339 struct zonelist * zonelist ;
23372340 struct zoneref * z ;
23382341 struct zone * zone ;
23392342 enum zone_type highest_zoneidx = gfp_zone (pc -> flags );
2340- void * object ;
2343+ struct slab * slab ;
23412344 unsigned int cpuset_mems_cookie ;
23422345
23432346 /*
@@ -2372,16 +2375,16 @@ static void *get_any_partial(struct kmem_cache *s, struct partial_context *pc)
23722375
23732376 if (n && cpuset_zone_allowed (zone , pc -> flags ) &&
23742377 n -> nr_partial > s -> min_partial ) {
2375- object = get_partial_node (s , n , pc );
2376- if (object ) {
2378+ slab = get_partial_node (s , n , pc );
2379+ if (slab ) {
23772380 /*
23782381 * Don't check read_mems_allowed_retry()
23792382 * here - if mems_allowed was updated in
23802383 * parallel, that was a harmless race
23812384 * between allocation and the cpuset
23822385 * update
23832386 */
2384- return object ;
2387+ return slab ;
23852388 }
23862389 }
23872390 }
@@ -2393,17 +2396,18 @@ static void *get_any_partial(struct kmem_cache *s, struct partial_context *pc)
23932396/*
23942397 * Get a partial slab, lock it and return it.
23952398 */
2396- static void * get_partial (struct kmem_cache * s , int node , struct partial_context * pc )
2399+ static struct slab * get_partial (struct kmem_cache * s , int node ,
2400+ struct partial_context * pc )
23972401{
2398- void * object ;
2402+ struct slab * slab ;
23992403 int searchnode = node ;
24002404
24012405 if (node == NUMA_NO_NODE )
24022406 searchnode = numa_mem_id ();
24032407
2404- object = get_partial_node (s , get_node (s , searchnode ), pc );
2405- if (object || node != NUMA_NO_NODE )
2406- return object ;
2408+ slab = get_partial_node (s , get_node (s , searchnode ), pc );
2409+ if (slab || node != NUMA_NO_NODE )
2410+ return slab ;
24072411
24082412 return get_any_partial (s , pc );
24092413}
@@ -3213,10 +3217,10 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
32133217new_objects :
32143218
32153219 pc .flags = gfpflags ;
3216- pc .slab = & slab ;
32173220 pc .orig_size = orig_size ;
3218- freelist = get_partial (s , node , & pc );
3219- if (freelist ) {
3221+ slab = get_partial (s , node , & pc );
3222+ if (slab ) {
3223+ freelist = pc .object ;
32203224 if (kmem_cache_debug (s )) {
32213225 /*
32223226 * For debug caches here we had to go through
@@ -3408,12 +3412,11 @@ static void *__slab_alloc_node(struct kmem_cache *s,
34083412 void * object ;
34093413
34103414 pc .flags = gfpflags ;
3411- pc .slab = & slab ;
34123415 pc .orig_size = orig_size ;
3413- object = get_partial (s , node , & pc );
3416+ slab = get_partial (s , node , & pc );
34143417
3415- if (object )
3416- return object ;
3418+ if (slab )
3419+ return pc . object ;
34173420
34183421 slab = new_slab (s , gfpflags , node );
34193422 if (unlikely (!slab )) {
0 commit comments