Skip to content

Commit 31e0886

Browse files
committed
slub: remove CONFIG_SLUB_TINY specific code paths
CONFIG_SLUB_TINY minimizes the SLUB's memory overhead in multiple ways, mainly by avoiding percpu caching of slabs and objects. It also reduces code size by replacing some code paths with simplified ones through ifdefs, but the benefits of that are smaller and would complicate the upcoming changes. Thus remove these code paths and associated ifdefs and simplify the code base. Link: https://patch.msgid.link/20251105-sheaves-cleanups-v1-4-b8218e1ac7ef@suse.cz Reviewed-by: Harry Yoo <harry.yoo@oracle.com> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
1 parent 1ce20c2 commit 31e0886

2 files changed

Lines changed: 4 additions & 105 deletions

File tree

mm/slab.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -236,10 +236,8 @@ struct kmem_cache_order_objects {
236236
* Slab cache management.
237237
*/
238238
struct kmem_cache {
239-
#ifndef CONFIG_SLUB_TINY
240239
struct kmem_cache_cpu __percpu *cpu_slab;
241240
struct lock_class_key lock_key;
242-
#endif
243241
struct slub_percpu_sheaves __percpu *cpu_sheaves;
244242
/* Used for retrieving partial slabs, etc. */
245243
slab_flags_t flags;

mm/slub.c

Lines changed: 4 additions & 103 deletions
Original file line numberDiff line numberDiff line change
@@ -410,7 +410,6 @@ enum stat_item {
410410
NR_SLUB_STAT_ITEMS
411411
};
412412

413-
#ifndef CONFIG_SLUB_TINY
414413
/*
415414
* When changing the layout, make sure freelist and tid are still compatible
416415
* with this_cpu_cmpxchg_double() alignment requirements.
@@ -432,7 +431,6 @@ struct kmem_cache_cpu {
432431
unsigned int stat[NR_SLUB_STAT_ITEMS];
433432
#endif
434433
};
435-
#endif /* CONFIG_SLUB_TINY */
436434

437435
static inline void stat(const struct kmem_cache *s, enum stat_item si)
438436
{
@@ -597,12 +595,10 @@ static inline void *get_freepointer(struct kmem_cache *s, void *object)
597595
return freelist_ptr_decode(s, p, ptr_addr);
598596
}
599597

600-
#ifndef CONFIG_SLUB_TINY
601598
static void prefetch_freepointer(const struct kmem_cache *s, void *object)
602599
{
603600
prefetchw(object + s->offset);
604601
}
605-
#endif
606602

607603
/*
608604
* When running under KMSAN, get_freepointer_safe() may return an uninitialized
@@ -714,10 +710,12 @@ static inline unsigned int slub_get_cpu_partial(struct kmem_cache *s)
714710
return s->cpu_partial_slabs;
715711
}
716712
#else
713+
#ifdef SLAB_SUPPORTS_SYSFS
717714
static inline void
718715
slub_set_cpu_partial(struct kmem_cache *s, unsigned int nr_objects)
719716
{
720717
}
718+
#endif
721719

722720
static inline unsigned int slub_get_cpu_partial(struct kmem_cache *s)
723721
{
@@ -2026,13 +2024,11 @@ static inline void inc_slabs_node(struct kmem_cache *s, int node,
20262024
int objects) {}
20272025
static inline void dec_slabs_node(struct kmem_cache *s, int node,
20282026
int objects) {}
2029-
#ifndef CONFIG_SLUB_TINY
20302027
static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab,
20312028
void **freelist, void *nextfree)
20322029
{
20332030
return false;
20342031
}
2035-
#endif
20362032
#endif /* CONFIG_SLUB_DEBUG */
20372033

20382034
#ifdef CONFIG_SLAB_OBJ_EXT
@@ -3623,8 +3619,6 @@ static struct slab *get_partial(struct kmem_cache *s, int node,
36233619
return get_any_partial(s, pc);
36243620
}
36253621

3626-
#ifndef CONFIG_SLUB_TINY
3627-
36283622
#ifdef CONFIG_PREEMPTION
36293623
/*
36303624
* Calculate the next globally unique transaction for disambiguation
@@ -4024,12 +4018,6 @@ static bool has_cpu_slab(int cpu, struct kmem_cache *s)
40244018
return c->slab || slub_percpu_partial(c);
40254019
}
40264020

4027-
#else /* CONFIG_SLUB_TINY */
4028-
static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) { }
4029-
static inline bool has_cpu_slab(int cpu, struct kmem_cache *s) { return false; }
4030-
static inline void flush_this_cpu_slab(struct kmem_cache *s) { }
4031-
#endif /* CONFIG_SLUB_TINY */
4032-
40334021
static bool has_pcs_used(int cpu, struct kmem_cache *s)
40344022
{
40354023
struct slub_percpu_sheaves *pcs;
@@ -4370,7 +4358,6 @@ static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags)
43704358
return true;
43714359
}
43724360

4373-
#ifndef CONFIG_SLUB_TINY
43744361
static inline bool
43754362
__update_cpu_freelist_fast(struct kmem_cache *s,
43764363
void *freelist_old, void *freelist_new,
@@ -4634,7 +4621,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
46344621
pc.orig_size = orig_size;
46354622
slab = get_partial(s, node, &pc);
46364623
if (slab) {
4637-
if (kmem_cache_debug(s)) {
4624+
if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) {
46384625
freelist = pc.object;
46394626
/*
46404627
* For debug caches here we had to go through
@@ -4672,7 +4659,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
46724659

46734660
stat(s, ALLOC_SLAB);
46744661

4675-
if (kmem_cache_debug(s)) {
4662+
if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) {
46764663
freelist = alloc_single_from_new_slab(s, slab, orig_size, gfpflags);
46774664

46784665
if (unlikely(!freelist)) {
@@ -4884,32 +4871,6 @@ static __always_inline void *__slab_alloc_node(struct kmem_cache *s,
48844871

48854872
return object;
48864873
}
4887-
#else /* CONFIG_SLUB_TINY */
4888-
static void *__slab_alloc_node(struct kmem_cache *s,
4889-
gfp_t gfpflags, int node, unsigned long addr, size_t orig_size)
4890-
{
4891-
struct partial_context pc;
4892-
struct slab *slab;
4893-
void *object;
4894-
4895-
pc.flags = gfpflags;
4896-
pc.orig_size = orig_size;
4897-
slab = get_partial(s, node, &pc);
4898-
4899-
if (slab)
4900-
return pc.object;
4901-
4902-
slab = new_slab(s, gfpflags, node);
4903-
if (unlikely(!slab)) {
4904-
slab_out_of_memory(s, gfpflags, node);
4905-
return NULL;
4906-
}
4907-
4908-
object = alloc_single_from_new_slab(s, slab, orig_size, gfpflags);
4909-
4910-
return object;
4911-
}
4912-
#endif /* CONFIG_SLUB_TINY */
49134874

49144875
/*
49154876
* If the object has been wiped upon free, make sure it's fully initialized by
@@ -5760,9 +5721,7 @@ void *kmalloc_nolock_noprof(size_t size, gfp_t gfp_flags, int node)
57605721
* it did local_lock_irqsave(&s->cpu_slab->lock, flags).
57615722
* In this case fast path with __update_cpu_freelist_fast() is not safe.
57625723
*/
5763-
#ifndef CONFIG_SLUB_TINY
57645724
if (!in_nmi() || !local_lock_is_locked(&s->cpu_slab->lock))
5765-
#endif
57665725
ret = __slab_alloc_node(s, alloc_gfp, node, _RET_IP_, size);
57675726

57685727
if (PTR_ERR(ret) == -EBUSY) {
@@ -6553,14 +6512,10 @@ static void free_deferred_objects(struct irq_work *work)
65536512
llist_for_each_safe(pos, t, llnode) {
65546513
struct slab *slab = container_of(pos, struct slab, llnode);
65556514

6556-
#ifdef CONFIG_SLUB_TINY
6557-
free_slab(slab->slab_cache, slab);
6558-
#else
65596515
if (slab->frozen)
65606516
deactivate_slab(slab->slab_cache, slab, slab->flush_freelist);
65616517
else
65626518
free_slab(slab->slab_cache, slab);
6563-
#endif
65646519
}
65656520
}
65666521

@@ -6596,7 +6551,6 @@ void defer_free_barrier(void)
65966551
irq_work_sync(&per_cpu_ptr(&defer_free_objects, cpu)->work);
65976552
}
65986553

6599-
#ifndef CONFIG_SLUB_TINY
66006554
/*
66016555
* Fastpath with forced inlining to produce a kfree and kmem_cache_free that
66026556
* can perform fastpath freeing without additional function calls.
@@ -6689,14 +6643,6 @@ static __always_inline void do_slab_free(struct kmem_cache *s,
66896643
}
66906644
stat_add(s, FREE_FASTPATH, cnt);
66916645
}
6692-
#else /* CONFIG_SLUB_TINY */
6693-
static void do_slab_free(struct kmem_cache *s,
6694-
struct slab *slab, void *head, void *tail,
6695-
int cnt, unsigned long addr)
6696-
{
6697-
__slab_free(s, slab, head, tail, cnt, addr);
6698-
}
6699-
#endif /* CONFIG_SLUB_TINY */
67006646

67016647
static __fastpath_inline
67026648
void slab_free(struct kmem_cache *s, struct slab *slab, void *object,
@@ -6974,11 +6920,7 @@ void kfree_nolock(const void *object)
69746920
* since kasan quarantine takes locks and not supported from NMI.
69756921
*/
69766922
kasan_slab_free(s, x, false, false, /* skip quarantine */true);
6977-
#ifndef CONFIG_SLUB_TINY
69786923
do_slab_free(s, slab, x, x, 0, _RET_IP_);
6979-
#else
6980-
defer_free(s, x);
6981-
#endif
69826924
}
69836925
EXPORT_SYMBOL_GPL(kfree_nolock);
69846926

@@ -7428,7 +7370,6 @@ void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
74287370
}
74297371
EXPORT_SYMBOL(kmem_cache_free_bulk);
74307372

7431-
#ifndef CONFIG_SLUB_TINY
74327373
static inline
74337374
int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
74347375
void **p)
@@ -7493,35 +7434,6 @@ int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
74937434
return 0;
74947435

74957436
}
7496-
#else /* CONFIG_SLUB_TINY */
7497-
static int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags,
7498-
size_t size, void **p)
7499-
{
7500-
int i;
7501-
7502-
for (i = 0; i < size; i++) {
7503-
void *object = kfence_alloc(s, s->object_size, flags);
7504-
7505-
if (unlikely(object)) {
7506-
p[i] = object;
7507-
continue;
7508-
}
7509-
7510-
p[i] = __slab_alloc_node(s, flags, NUMA_NO_NODE,
7511-
_RET_IP_, s->object_size);
7512-
if (unlikely(!p[i]))
7513-
goto error;
7514-
7515-
maybe_wipe_obj_freeptr(s, p[i]);
7516-
}
7517-
7518-
return i;
7519-
7520-
error:
7521-
__kmem_cache_free_bulk(s, i, p);
7522-
return 0;
7523-
}
7524-
#endif /* CONFIG_SLUB_TINY */
75257437

75267438
/* Note that interrupts must be enabled when calling this function. */
75277439
int kmem_cache_alloc_bulk_noprof(struct kmem_cache *s, gfp_t flags, size_t size,
@@ -7740,7 +7652,6 @@ init_kmem_cache_node(struct kmem_cache_node *n, struct node_barn *barn)
77407652
barn_init(barn);
77417653
}
77427654

7743-
#ifndef CONFIG_SLUB_TINY
77447655
static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
77457656
{
77467657
BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
@@ -7761,12 +7672,6 @@ static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
77617672

77627673
return 1;
77637674
}
7764-
#else
7765-
static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
7766-
{
7767-
return 1;
7768-
}
7769-
#endif /* CONFIG_SLUB_TINY */
77707675

77717676
static int init_percpu_sheaves(struct kmem_cache *s)
77727677
{
@@ -7856,13 +7761,11 @@ void __kmem_cache_release(struct kmem_cache *s)
78567761
cache_random_seq_destroy(s);
78577762
if (s->cpu_sheaves)
78587763
pcs_destroy(s);
7859-
#ifndef CONFIG_SLUB_TINY
78607764
#ifdef CONFIG_PREEMPT_RT
78617765
if (s->cpu_slab)
78627766
lockdep_unregister_key(&s->lock_key);
78637767
#endif
78647768
free_percpu(s->cpu_slab);
7865-
#endif
78667769
free_kmem_cache_nodes(s);
78677770
}
78687771

@@ -8605,10 +8508,8 @@ void __init kmem_cache_init(void)
86058508

86068509
void __init kmem_cache_init_late(void)
86078510
{
8608-
#ifndef CONFIG_SLUB_TINY
86098511
flushwq = alloc_workqueue("slub_flushwq", WQ_MEM_RECLAIM, 0);
86108512
WARN_ON(!flushwq);
8611-
#endif
86128513
}
86138514

86148515
struct kmem_cache *

0 commit comments

Comments
 (0)