Skip to content

Commit 8cd3fa4

Browse files
Chengming Zhoutehcaster
authored andcommitted
slub: Delay freezing of partial slabs
Now we will freeze slabs when moving them out of node partial list to cpu partial list, this method needs two cmpxchg_double operations: 1. freeze slab (acquire_slab()) under the node list_lock 2. get_freelist() when pick used in ___slab_alloc() Actually we don't need to freeze when moving slabs out of node partial list, we can delay freezing to when use slab freelist in ___slab_alloc(), so we can save one cmpxchg_double(). And there are other good points: - The moving of slabs between node partial list and cpu partial list becomes simpler, since we don't need to freeze or unfreeze at all. - The node list_lock contention would be less, since we don't need to freeze any slab under the node list_lock. We can achieve this because there is no concurrent path would manipulate the partial slab list except the __slab_free() path, which is now serialized by slab_test_node_partial() under the list_lock. Since the slab returned by get_partial() interfaces is not frozen anymore and no freelist is returned in the partial_context, so we need to use the introduced freeze_slab() to freeze it and get its freelist. Similarly, the slabs on the CPU partial list are not frozen anymore, we need to freeze_slab() on it before use. We can now delete acquire_slab() as it became unused. Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com> Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Tested-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
1 parent 213094b commit 8cd3fa4

1 file changed

Lines changed: 24 additions & 91 deletions

File tree

mm/slub.c

Lines changed: 24 additions & 91 deletions
Original file line numberDiff line numberDiff line change
@@ -2166,7 +2166,7 @@ static inline void remove_partial(struct kmem_cache_node *n,
21662166
}
21672167

21682168
/*
2169-
* Called only for kmem_cache_debug() caches instead of acquire_slab(), with a
2169+
* Called only for kmem_cache_debug() caches instead of remove_partial(), with a
21702170
* slab from the n->partial list. Remove only a single object from the slab, do
21712171
* the alloc_debug_processing() checks and leave the slab on the list, or move
21722172
* it to full list if it was the last free object.
@@ -2234,51 +2234,6 @@ static void *alloc_single_from_new_slab(struct kmem_cache *s,
22342234
return object;
22352235
}
22362236

2237-
/*
2238-
* Remove slab from the partial list, freeze it and
2239-
* return the pointer to the freelist.
2240-
*
2241-
* Returns a list of objects or NULL if it fails.
2242-
*/
2243-
static inline void *acquire_slab(struct kmem_cache *s,
2244-
struct kmem_cache_node *n, struct slab *slab,
2245-
int mode)
2246-
{
2247-
void *freelist;
2248-
unsigned long counters;
2249-
struct slab new;
2250-
2251-
lockdep_assert_held(&n->list_lock);
2252-
2253-
/*
2254-
* Zap the freelist and set the frozen bit.
2255-
* The old freelist is the list of objects for the
2256-
* per cpu allocation list.
2257-
*/
2258-
freelist = slab->freelist;
2259-
counters = slab->counters;
2260-
new.counters = counters;
2261-
if (mode) {
2262-
new.inuse = slab->objects;
2263-
new.freelist = NULL;
2264-
} else {
2265-
new.freelist = freelist;
2266-
}
2267-
2268-
VM_BUG_ON(new.frozen);
2269-
new.frozen = 1;
2270-
2271-
if (!__slab_update_freelist(s, slab,
2272-
freelist, counters,
2273-
new.freelist, new.counters,
2274-
"acquire_slab"))
2275-
return NULL;
2276-
2277-
remove_partial(n, slab);
2278-
WARN_ON(!freelist);
2279-
return freelist;
2280-
}
2281-
22822237
#ifdef CONFIG_SLUB_CPU_PARTIAL
22832238
static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain);
22842239
#else
@@ -2295,7 +2250,6 @@ static struct slab *get_partial_node(struct kmem_cache *s,
22952250
struct partial_context *pc)
22962251
{
22972252
struct slab *slab, *slab2, *partial = NULL;
2298-
void *object = NULL;
22992253
unsigned long flags;
23002254
unsigned int partial_slabs = 0;
23012255

@@ -2314,7 +2268,7 @@ static struct slab *get_partial_node(struct kmem_cache *s,
23142268
continue;
23152269

23162270
if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) {
2317-
object = alloc_single_from_partial(s, n, slab,
2271+
void *object = alloc_single_from_partial(s, n, slab,
23182272
pc->orig_size);
23192273
if (object) {
23202274
partial = slab;
@@ -2324,13 +2278,10 @@ static struct slab *get_partial_node(struct kmem_cache *s,
23242278
continue;
23252279
}
23262280

2327-
object = acquire_slab(s, n, slab, object == NULL);
2328-
if (!object)
2329-
break;
2281+
remove_partial(n, slab);
23302282

23312283
if (!partial) {
23322284
partial = slab;
2333-
pc->object = object;
23342285
stat(s, ALLOC_FROM_PARTIAL);
23352286
} else {
23362287
put_cpu_partial(s, slab, 0);
@@ -2629,9 +2580,6 @@ static void __unfreeze_partials(struct kmem_cache *s, struct slab *partial_slab)
26292580
unsigned long flags = 0;
26302581

26312582
while (partial_slab) {
2632-
struct slab new;
2633-
struct slab old;
2634-
26352583
slab = partial_slab;
26362584
partial_slab = slab->next;
26372585

@@ -2644,23 +2592,7 @@ static void __unfreeze_partials(struct kmem_cache *s, struct slab *partial_slab)
26442592
spin_lock_irqsave(&n->list_lock, flags);
26452593
}
26462594

2647-
do {
2648-
2649-
old.freelist = slab->freelist;
2650-
old.counters = slab->counters;
2651-
VM_BUG_ON(!old.frozen);
2652-
2653-
new.counters = old.counters;
2654-
new.freelist = old.freelist;
2655-
2656-
new.frozen = 0;
2657-
2658-
} while (!__slab_update_freelist(s, slab,
2659-
old.freelist, old.counters,
2660-
new.freelist, new.counters,
2661-
"unfreezing slab"));
2662-
2663-
if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) {
2595+
if (unlikely(!slab->inuse && n->nr_partial >= s->min_partial)) {
26642596
slab->next = slab_to_discard;
26652597
slab_to_discard = slab;
26662598
} else {
@@ -3167,7 +3099,6 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
31673099
node = NUMA_NO_NODE;
31683100
goto new_slab;
31693101
}
3170-
redo:
31713102

31723103
if (unlikely(!node_match(slab, node))) {
31733104
/*
@@ -3243,7 +3174,8 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
32433174

32443175
new_slab:
32453176

3246-
if (slub_percpu_partial(c)) {
3177+
#ifdef CONFIG_SLUB_CPU_PARTIAL
3178+
while (slub_percpu_partial(c)) {
32473179
local_lock_irqsave(&s->cpu_slab->lock, flags);
32483180
if (unlikely(c->slab)) {
32493181
local_unlock_irqrestore(&s->cpu_slab->lock, flags);
@@ -3255,21 +3187,31 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
32553187
goto new_objects;
32563188
}
32573189

3258-
slab = c->slab = slub_percpu_partial(c);
3190+
slab = slub_percpu_partial(c);
32593191
slub_set_percpu_partial(c, slab);
32603192
local_unlock_irqrestore(&s->cpu_slab->lock, flags);
32613193
stat(s, CPU_PARTIAL_ALLOC);
3262-
goto redo;
3194+
3195+
if (unlikely(!node_match(slab, node) ||
3196+
!pfmemalloc_match(slab, gfpflags))) {
3197+
slab->next = NULL;
3198+
__unfreeze_partials(s, slab);
3199+
continue;
3200+
}
3201+
3202+
freelist = freeze_slab(s, slab);
3203+
goto retry_load_slab;
32633204
}
3205+
#endif
32643206

32653207
new_objects:
32663208

32673209
pc.flags = gfpflags;
32683210
pc.orig_size = orig_size;
32693211
slab = get_partial(s, node, &pc);
32703212
if (slab) {
3271-
freelist = pc.object;
32723213
if (kmem_cache_debug(s)) {
3214+
freelist = pc.object;
32733215
/*
32743216
* For debug caches here we had to go through
32753217
* alloc_single_from_partial() so just store the
@@ -3281,6 +3223,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
32813223
return freelist;
32823224
}
32833225

3226+
freelist = freeze_slab(s, slab);
32843227
goto retry_load_slab;
32853228
}
32863229

@@ -3682,18 +3625,8 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab,
36823625
was_frozen = new.frozen;
36833626
new.inuse -= cnt;
36843627
if ((!new.inuse || !prior) && !was_frozen) {
3685-
3686-
if (kmem_cache_has_cpu_partial(s) && !prior) {
3687-
3688-
/*
3689-
* Slab was on no list before and will be
3690-
* partially empty
3691-
* We can defer the list move and instead
3692-
* freeze it.
3693-
*/
3694-
new.frozen = 1;
3695-
3696-
} else { /* Needs to be taken off a list */
3628+
/* Needs to be taken off a list */
3629+
if (!kmem_cache_has_cpu_partial(s) || prior) {
36973630

36983631
n = get_node(s, slab_nid(slab));
36993632
/*
@@ -3723,9 +3656,9 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab,
37233656
* activity can be necessary.
37243657
*/
37253658
stat(s, FREE_FROZEN);
3726-
} else if (new.frozen) {
3659+
} else if (kmem_cache_has_cpu_partial(s) && !prior) {
37273660
/*
3728-
* If we just froze the slab then put it onto the
3661+
* If we started with a full slab then put it onto the
37293662
* per cpu partial list.
37303663
*/
37313664
put_cpu_partial(s, slab, 1);

0 commit comments

Comments
 (0)