Skip to content

Commit ab2f752

Browse files
committed
slab: remove defer_deactivate_slab()
There are no more cpu slabs so we don't need their deferred deactivation. The function is now only used from places where we allocate a new slab but then can't spin on node list_lock to put it on the partial list. Instead of the deferred action we can free it directly via __free_slab(), we just need to tell it to use _nolock() freeing of the underlying pages and take care of the accounting. Since free_frozen_pages_nolock() variant does not yet exist for code outside of the page allocator, create it as a trivial wrapper for __free_frozen_pages(..., FPI_TRYLOCK). Reviewed-by: Harry Yoo <harry.yoo@oracle.com> Reviewed-by: Hao Li <hao.li@linux.dev> Reviewed-by: Suren Baghdasaryan <surenb@google.com> Acked-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
1 parent bdc9282 commit ab2f752

4 files changed

Lines changed: 28 additions & 44 deletions

File tree

mm/internal.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -846,6 +846,7 @@ static inline struct page *alloc_frozen_pages_noprof(gfp_t gfp, unsigned int ord
846846
struct page *alloc_frozen_pages_nolock_noprof(gfp_t gfp_flags, int nid, unsigned int order);
847847
#define alloc_frozen_pages_nolock(...) \
848848
alloc_hooks(alloc_frozen_pages_nolock_noprof(__VA_ARGS__))
849+
void free_frozen_pages_nolock(struct page *page, unsigned int order);
849850

850851
extern void zone_pcp_reset(struct zone *zone);
851852
extern void zone_pcp_disable(struct zone *zone);

mm/page_alloc.c

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2981,6 +2981,11 @@ void free_frozen_pages(struct page *page, unsigned int order)
29812981
__free_frozen_pages(page, order, FPI_NONE);
29822982
}
29832983

2984+
void free_frozen_pages_nolock(struct page *page, unsigned int order)
2985+
{
2986+
__free_frozen_pages(page, order, FPI_TRYLOCK);
2987+
}
2988+
29842989
/*
29852990
* Free a batch of folios
29862991
*/

mm/slab.h

Lines changed: 1 addition & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -71,13 +71,7 @@ struct slab {
7171
struct kmem_cache *slab_cache;
7272
union {
7373
struct {
74-
union {
75-
struct list_head slab_list;
76-
struct { /* For deferred deactivate_slab() */
77-
struct llist_node llnode;
78-
void *flush_freelist;
79-
};
80-
};
74+
struct list_head slab_list;
8175
/* Double-word boundary */
8276
struct freelist_counters;
8377
};

mm/slub.c

Lines changed: 21 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -3262,7 +3262,7 @@ static struct slab *new_slab(struct kmem_cache *s, gfp_t flags, int node)
32623262
flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
32633263
}
32643264

3265-
static void __free_slab(struct kmem_cache *s, struct slab *slab)
3265+
static void __free_slab(struct kmem_cache *s, struct slab *slab, bool allow_spin)
32663266
{
32673267
struct page *page = slab_page(slab);
32683268
int order = compound_order(page);
@@ -3273,14 +3273,26 @@ static void __free_slab(struct kmem_cache *s, struct slab *slab)
32733273
__ClearPageSlab(page);
32743274
mm_account_reclaimed_pages(pages);
32753275
unaccount_slab(slab, order, s);
3276-
free_frozen_pages(page, order);
3276+
if (allow_spin)
3277+
free_frozen_pages(page, order);
3278+
else
3279+
free_frozen_pages_nolock(page, order);
3280+
}
3281+
3282+
static void free_new_slab_nolock(struct kmem_cache *s, struct slab *slab)
3283+
{
3284+
/*
3285+
* Since it was just allocated, we can skip the actions in
3286+
* discard_slab() and free_slab().
3287+
*/
3288+
__free_slab(s, slab, false);
32773289
}
32783290

32793291
static void rcu_free_slab(struct rcu_head *h)
32803292
{
32813293
struct slab *slab = container_of(h, struct slab, rcu_head);
32823294

3283-
__free_slab(slab->slab_cache, slab);
3295+
__free_slab(slab->slab_cache, slab, true);
32843296
}
32853297

32863298
static void free_slab(struct kmem_cache *s, struct slab *slab)
@@ -3296,7 +3308,7 @@ static void free_slab(struct kmem_cache *s, struct slab *slab)
32963308
if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU))
32973309
call_rcu(&slab->rcu_head, rcu_free_slab);
32983310
else
3299-
__free_slab(s, slab);
3311+
__free_slab(s, slab, true);
33003312
}
33013313

33023314
static void discard_slab(struct kmem_cache *s, struct slab *slab)
@@ -3389,8 +3401,6 @@ static void *alloc_single_from_partial(struct kmem_cache *s,
33893401
return object;
33903402
}
33913403

3392-
static void defer_deactivate_slab(struct slab *slab, void *flush_freelist);
3393-
33943404
/*
33953405
* Called only for kmem_cache_debug() caches to allocate from a freshly
33963406
* allocated slab. Allocate a single object instead of whole freelist
@@ -3406,8 +3416,8 @@ static void *alloc_single_from_new_slab(struct kmem_cache *s, struct slab *slab,
34063416
void *object;
34073417

34083418
if (!allow_spin && !spin_trylock_irqsave(&n->list_lock, flags)) {
3409-
/* Unlucky, discard newly allocated slab */
3410-
defer_deactivate_slab(slab, NULL);
3419+
/* Unlucky, discard newly allocated slab. */
3420+
free_new_slab_nolock(s, slab);
34113421
return NULL;
34123422
}
34133423

@@ -4279,7 +4289,7 @@ static unsigned int alloc_from_new_slab(struct kmem_cache *s, struct slab *slab,
42794289

42804290
if (!spin_trylock_irqsave(&n->list_lock, flags)) {
42814291
/* Unlucky, discard newly allocated slab */
4282-
defer_deactivate_slab(slab, NULL);
4292+
free_new_slab_nolock(s, slab);
42834293
return 0;
42844294
}
42854295
}
@@ -6059,31 +6069,28 @@ static void free_to_pcs_bulk(struct kmem_cache *s, size_t size, void **p)
60596069

60606070
struct defer_free {
60616071
struct llist_head objects;
6062-
struct llist_head slabs;
60636072
struct irq_work work;
60646073
};
60656074

60666075
static void free_deferred_objects(struct irq_work *work);
60676076

60686077
static DEFINE_PER_CPU(struct defer_free, defer_free_objects) = {
60696078
.objects = LLIST_HEAD_INIT(objects),
6070-
.slabs = LLIST_HEAD_INIT(slabs),
60716079
.work = IRQ_WORK_INIT(free_deferred_objects),
60726080
};
60736081

60746082
/*
60756083
* In PREEMPT_RT irq_work runs in per-cpu kthread, so it's safe
6076-
* to take sleeping spin_locks from __slab_free() and deactivate_slab().
6084+
* to take sleeping spin_locks from __slab_free().
60776085
* In !PREEMPT_RT irq_work will run after local_unlock_irqrestore().
60786086
*/
60796087
static void free_deferred_objects(struct irq_work *work)
60806088
{
60816089
struct defer_free *df = container_of(work, struct defer_free, work);
60826090
struct llist_head *objs = &df->objects;
6083-
struct llist_head *slabs = &df->slabs;
60846091
struct llist_node *llnode, *pos, *t;
60856092

6086-
if (llist_empty(objs) && llist_empty(slabs))
6093+
if (llist_empty(objs))
60876094
return;
60886095

60896096
llnode = llist_del_all(objs);
@@ -6107,16 +6114,6 @@ static void free_deferred_objects(struct irq_work *work)
61076114

61086115
__slab_free(s, slab, x, x, 1, _THIS_IP_);
61096116
}
6110-
6111-
llnode = llist_del_all(slabs);
6112-
llist_for_each_safe(pos, t, llnode) {
6113-
struct slab *slab = container_of(pos, struct slab, llnode);
6114-
6115-
if (slab->frozen)
6116-
deactivate_slab(slab->slab_cache, slab, slab->flush_freelist);
6117-
else
6118-
free_slab(slab->slab_cache, slab);
6119-
}
61206117
}
61216118

61226119
static void defer_free(struct kmem_cache *s, void *head)
@@ -6132,19 +6129,6 @@ static void defer_free(struct kmem_cache *s, void *head)
61326129
irq_work_queue(&df->work);
61336130
}
61346131

6135-
static void defer_deactivate_slab(struct slab *slab, void *flush_freelist)
6136-
{
6137-
struct defer_free *df;
6138-
6139-
slab->flush_freelist = flush_freelist;
6140-
6141-
guard(preempt)();
6142-
6143-
df = this_cpu_ptr(&defer_free_objects);
6144-
if (llist_add(&slab->llnode, &df->slabs))
6145-
irq_work_queue(&df->work);
6146-
}
6147-
61486132
void defer_free_barrier(void)
61496133
{
61506134
int cpu;

0 commit comments

Comments
 (0)