Skip to content

Commit 21316fd

Browse files
Chengming Zhoutehcaster
authored andcommitted
slub: Rename all *unfreeze_partials* functions to *put_partials*
Since all partial slabs on the CPU partial list are not frozen anymore, we don't unfreeze when moving cpu partial slabs to node partial list, it's better to rename these functions. Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com> Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Tested-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
1 parent 00eb60c commit 21316fd

1 file changed

Lines changed: 17 additions & 17 deletions

File tree

mm/slub.c

Lines changed: 17 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -2546,7 +2546,7 @@ static void deactivate_slab(struct kmem_cache *s, struct slab *slab,
25462546
}
25472547

25482548
#ifdef CONFIG_SLUB_CPU_PARTIAL
2549-
static void __unfreeze_partials(struct kmem_cache *s, struct slab *partial_slab)
2549+
static void __put_partials(struct kmem_cache *s, struct slab *partial_slab)
25502550
{
25512551
struct kmem_cache_node *n = NULL, *n2 = NULL;
25522552
struct slab *slab, *slab_to_discard = NULL;
@@ -2588,9 +2588,9 @@ static void __unfreeze_partials(struct kmem_cache *s, struct slab *partial_slab)
25882588
}
25892589

25902590
/*
2591-
* Unfreeze all the cpu partial slabs.
2591+
* Put all the cpu partial slabs to the node partial list.
25922592
*/
2593-
static void unfreeze_partials(struct kmem_cache *s)
2593+
static void put_partials(struct kmem_cache *s)
25942594
{
25952595
struct slab *partial_slab;
25962596
unsigned long flags;
@@ -2601,19 +2601,19 @@ static void unfreeze_partials(struct kmem_cache *s)
26012601
local_unlock_irqrestore(&s->cpu_slab->lock, flags);
26022602

26032603
if (partial_slab)
2604-
__unfreeze_partials(s, partial_slab);
2604+
__put_partials(s, partial_slab);
26052605
}
26062606

2607-
static void unfreeze_partials_cpu(struct kmem_cache *s,
2608-
struct kmem_cache_cpu *c)
2607+
static void put_partials_cpu(struct kmem_cache *s,
2608+
struct kmem_cache_cpu *c)
26092609
{
26102610
struct slab *partial_slab;
26112611

26122612
partial_slab = slub_percpu_partial(c);
26132613
c->partial = NULL;
26142614

26152615
if (partial_slab)
2616-
__unfreeze_partials(s, partial_slab);
2616+
__put_partials(s, partial_slab);
26172617
}
26182618

26192619
/*
@@ -2626,7 +2626,7 @@ static void unfreeze_partials_cpu(struct kmem_cache *s,
26262626
static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain)
26272627
{
26282628
struct slab *oldslab;
2629-
struct slab *slab_to_unfreeze = NULL;
2629+
struct slab *slab_to_put = NULL;
26302630
unsigned long flags;
26312631
int slabs = 0;
26322632

@@ -2641,7 +2641,7 @@ static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain)
26412641
* per node partial list. Postpone the actual unfreezing
26422642
* outside of the critical section.
26432643
*/
2644-
slab_to_unfreeze = oldslab;
2644+
slab_to_put = oldslab;
26452645
oldslab = NULL;
26462646
} else {
26472647
slabs = oldslab->slabs;
@@ -2657,17 +2657,17 @@ static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain)
26572657

26582658
local_unlock_irqrestore(&s->cpu_slab->lock, flags);
26592659

2660-
if (slab_to_unfreeze) {
2661-
__unfreeze_partials(s, slab_to_unfreeze);
2660+
if (slab_to_put) {
2661+
__put_partials(s, slab_to_put);
26622662
stat(s, CPU_PARTIAL_DRAIN);
26632663
}
26642664
}
26652665

26662666
#else /* CONFIG_SLUB_CPU_PARTIAL */
26672667

2668-
static inline void unfreeze_partials(struct kmem_cache *s) { }
2669-
static inline void unfreeze_partials_cpu(struct kmem_cache *s,
2670-
struct kmem_cache_cpu *c) { }
2668+
static inline void put_partials(struct kmem_cache *s) { }
2669+
static inline void put_partials_cpu(struct kmem_cache *s,
2670+
struct kmem_cache_cpu *c) { }
26712671

26722672
#endif /* CONFIG_SLUB_CPU_PARTIAL */
26732673

@@ -2709,7 +2709,7 @@ static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
27092709
stat(s, CPUSLAB_FLUSH);
27102710
}
27112711

2712-
unfreeze_partials_cpu(s, c);
2712+
put_partials_cpu(s, c);
27132713
}
27142714

27152715
struct slub_flush_work {
@@ -2737,7 +2737,7 @@ static void flush_cpu_slab(struct work_struct *w)
27372737
if (c->slab)
27382738
flush_slab(s, c);
27392739

2740-
unfreeze_partials(s);
2740+
put_partials(s);
27412741
}
27422742

27432743
static bool has_cpu_slab(int cpu, struct kmem_cache *s)
@@ -3168,7 +3168,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
31683168
if (unlikely(!node_match(slab, node) ||
31693169
!pfmemalloc_match(slab, gfpflags))) {
31703170
slab->next = NULL;
3171-
__unfreeze_partials(s, slab);
3171+
__put_partials(s, slab);
31723172
continue;
31733173
}
31743174

0 commit comments

Comments
 (0)