Skip to content

Commit e323b52

Browse files
committed
slab: remove SLUB_CPU_PARTIAL
We have removed the partial slab usage from allocation paths. Now remove the whole config option and associated code. Reviewed-by: Harry Yoo <harry.yoo@oracle.com> Reviewed-by: Hao Li <hao.li@linux.dev> Reviewed-by: Suren Baghdasaryan <surenb@google.com> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
1 parent 17c38c8 commit e323b52

3 files changed

Lines changed: 19 additions & 342 deletions

File tree

mm/Kconfig

Lines changed: 0 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -247,17 +247,6 @@ config SLUB_STATS
247247
out which slabs are relevant to a particular load.
248248
Try running: slabinfo -DA
249249

250-
config SLUB_CPU_PARTIAL
251-
default y
252-
depends on SMP && !SLUB_TINY
253-
bool "Enable per cpu partial caches"
254-
help
255-
Per cpu partial caches accelerate objects allocation and freeing
256-
that is local to a processor at the price of more indeterminism
257-
in the latency of the free. On overflow these caches will be cleared
258-
which requires the taking of locks that may cause latency spikes.
259-
Typically one would choose no for a realtime system.
260-
261250
config RANDOM_KMALLOC_CACHES
262251
default n
263252
depends on !SLUB_TINY

mm/slab.h

Lines changed: 0 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -77,12 +77,6 @@ struct slab {
7777
struct llist_node llnode;
7878
void *flush_freelist;
7979
};
80-
#ifdef CONFIG_SLUB_CPU_PARTIAL
81-
struct {
82-
struct slab *next;
83-
int slabs; /* Nr of slabs left */
84-
};
85-
#endif
8680
};
8781
/* Double-word boundary */
8882
struct freelist_counters;
@@ -188,23 +182,6 @@ static inline size_t slab_size(const struct slab *slab)
188182
return PAGE_SIZE << slab_order(slab);
189183
}
190184

191-
#ifdef CONFIG_SLUB_CPU_PARTIAL
192-
#define slub_percpu_partial(c) ((c)->partial)
193-
194-
#define slub_set_percpu_partial(c, p) \
195-
({ \
196-
slub_percpu_partial(c) = (p)->next; \
197-
})
198-
199-
#define slub_percpu_partial_read_once(c) READ_ONCE(slub_percpu_partial(c))
200-
#else
201-
#define slub_percpu_partial(c) NULL
202-
203-
#define slub_set_percpu_partial(c, p)
204-
205-
#define slub_percpu_partial_read_once(c) NULL
206-
#endif // CONFIG_SLUB_CPU_PARTIAL
207-
208185
/*
209186
* Word size structure that can be atomically updated or read and that
210187
* contains both the order and the number of objects that a slab of the
@@ -228,12 +205,6 @@ struct kmem_cache {
228205
unsigned int object_size; /* Object size without metadata */
229206
struct reciprocal_value reciprocal_size;
230207
unsigned int offset; /* Free pointer offset */
231-
#ifdef CONFIG_SLUB_CPU_PARTIAL
232-
/* Number of per cpu partial objects to keep around */
233-
unsigned int cpu_partial;
234-
/* Number of per cpu partial slabs to keep around */
235-
unsigned int cpu_partial_slabs;
236-
#endif
237208
unsigned int sheaf_capacity;
238209
struct kmem_cache_order_objects oo;
239210

0 commit comments

Comments
 (0)