Skip to content

Commit de4d608

Browse files
committed
mm/slab: remove CONFIG_SLOB code from slab common code
CONFIG_SLOB has been removed from Kconfig. Remove code and #ifdef's specific to SLOB in the slab headers and common code. Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Acked-by: Lorenzo Stoakes <lstoakes@gmail.com> Acked-by: Mike Rapoport (IBM) <rppt@kernel.org>
1 parent d88e2a2 commit de4d608

3 files changed

Lines changed: 0 additions & 102 deletions

File tree

include/linux/slab.h

Lines changed: 0 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -298,19 +298,6 @@ static inline unsigned int arch_slab_minalign(void)
298298
#endif
299299
#endif
300300

301-
#ifdef CONFIG_SLOB
302-
/*
303-
* SLOB passes all requests larger than one page to the page allocator.
304-
* No kmalloc array is necessary since objects of different sizes can
305-
* be allocated from the same page.
306-
*/
307-
#define KMALLOC_SHIFT_HIGH PAGE_SHIFT
308-
#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
309-
#ifndef KMALLOC_SHIFT_LOW
310-
#define KMALLOC_SHIFT_LOW 3
311-
#endif
312-
#endif
313-
314301
/* Maximum allocatable size */
315302
#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX)
316303
/* Maximum size for which we actually use a slab cache */
@@ -366,7 +353,6 @@ enum kmalloc_cache_type {
366353
NR_KMALLOC_TYPES
367354
};
368355

369-
#ifndef CONFIG_SLOB
370356
extern struct kmem_cache *
371357
kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1];
372358

@@ -458,7 +444,6 @@ static __always_inline unsigned int __kmalloc_index(size_t size,
458444
}
459445
static_assert(PAGE_SHIFT <= 20);
460446
#define kmalloc_index(s) __kmalloc_index(s, true)
461-
#endif /* !CONFIG_SLOB */
462447

463448
void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_size(1);
464449

@@ -487,10 +472,6 @@ void kmem_cache_free(struct kmem_cache *s, void *objp);
487472
void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p);
488473
int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void **p);
489474

490-
/*
491-
* Caller must not use kfree_bulk() on memory not originally allocated
492-
* by kmalloc(), because the SLOB allocator cannot handle this.
493-
*/
494475
static __always_inline void kfree_bulk(size_t size, void **p)
495476
{
496477
kmem_cache_free_bulk(NULL, size, p);
@@ -567,7 +548,6 @@ void *kmalloc_large_node(size_t size, gfp_t flags, int node) __assume_page_align
567548
* Try really hard to succeed the allocation but fail
568549
* eventually.
569550
*/
570-
#ifndef CONFIG_SLOB
571551
static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
572552
{
573553
if (__builtin_constant_p(size) && size) {
@@ -583,17 +563,7 @@ static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
583563
}
584564
return __kmalloc(size, flags);
585565
}
586-
#else
587-
static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
588-
{
589-
if (__builtin_constant_p(size) && size > KMALLOC_MAX_CACHE_SIZE)
590-
return kmalloc_large(size, flags);
591-
592-
return __kmalloc(size, flags);
593-
}
594-
#endif
595566

596-
#ifndef CONFIG_SLOB
597567
static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
598568
{
599569
if (__builtin_constant_p(size) && size) {
@@ -609,15 +579,6 @@ static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t fla
609579
}
610580
return __kmalloc_node(size, flags, node);
611581
}
612-
#else
613-
static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
614-
{
615-
if (__builtin_constant_p(size) && size > KMALLOC_MAX_CACHE_SIZE)
616-
return kmalloc_large_node(size, flags, node);
617-
618-
return __kmalloc_node(size, flags, node);
619-
}
620-
#endif
621582

622583
/**
623584
* kmalloc_array - allocate memory for an array.

mm/slab.h

Lines changed: 0 additions & 61 deletions
Original file line numberDiff line numberDiff line change
@@ -51,14 +51,6 @@ struct slab {
5151
};
5252
unsigned int __unused;
5353

54-
#elif defined(CONFIG_SLOB)
55-
56-
struct list_head slab_list;
57-
void *__unused_1;
58-
void *freelist; /* first free block */
59-
long units;
60-
unsigned int __unused_2;
61-
6254
#else
6355
#error "Unexpected slab allocator configured"
6456
#endif
@@ -72,11 +64,7 @@ struct slab {
7264
#define SLAB_MATCH(pg, sl) \
7365
static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl))
7466
SLAB_MATCH(flags, __page_flags);
75-
#ifndef CONFIG_SLOB
7667
SLAB_MATCH(compound_head, slab_cache); /* Ensure bit 0 is clear */
77-
#else
78-
SLAB_MATCH(compound_head, slab_list); /* Ensure bit 0 is clear */
79-
#endif
8068
SLAB_MATCH(_refcount, __page_refcount);
8169
#ifdef CONFIG_MEMCG
8270
SLAB_MATCH(memcg_data, memcg_data);
@@ -200,31 +188,6 @@ static inline size_t slab_size(const struct slab *slab)
200188
return PAGE_SIZE << slab_order(slab);
201189
}
202190

203-
#ifdef CONFIG_SLOB
204-
/*
205-
* Common fields provided in kmem_cache by all slab allocators
206-
* This struct is either used directly by the allocator (SLOB)
207-
* or the allocator must include definitions for all fields
208-
* provided in kmem_cache_common in their definition of kmem_cache.
209-
*
210-
* Once we can do anonymous structs (C11 standard) we could put a
211-
* anonymous struct definition in these allocators so that the
212-
* separate allocations in the kmem_cache structure of SLAB and
213-
* SLUB is no longer needed.
214-
*/
215-
struct kmem_cache {
216-
unsigned int object_size;/* The original size of the object */
217-
unsigned int size; /* The aligned/padded/added on size */
218-
unsigned int align; /* Alignment as calculated */
219-
slab_flags_t flags; /* Active flags on the slab */
220-
const char *name; /* Slab name for sysfs */
221-
int refcount; /* Use counter */
222-
void (*ctor)(void *); /* Called on object slot creation */
223-
struct list_head list; /* List of all slab caches on the system */
224-
};
225-
226-
#endif /* CONFIG_SLOB */
227-
228191
#ifdef CONFIG_SLAB
229192
#include <linux/slab_def.h>
230193
#endif
@@ -274,7 +237,6 @@ extern const struct kmalloc_info_struct {
274237
unsigned int size;
275238
} kmalloc_info[];
276239

277-
#ifndef CONFIG_SLOB
278240
/* Kmalloc array related functions */
279241
void setup_kmalloc_cache_index_table(void);
280242
void create_kmalloc_caches(slab_flags_t);
@@ -286,7 +248,6 @@ void *__kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags,
286248
int node, size_t orig_size,
287249
unsigned long caller);
288250
void __kmem_cache_free(struct kmem_cache *s, void *x, unsigned long caller);
289-
#endif
290251

291252
gfp_t kmalloc_fix_flags(gfp_t flags);
292253

@@ -303,33 +264,16 @@ extern void create_boot_cache(struct kmem_cache *, const char *name,
303264
int slab_unmergeable(struct kmem_cache *s);
304265
struct kmem_cache *find_mergeable(unsigned size, unsigned align,
305266
slab_flags_t flags, const char *name, void (*ctor)(void *));
306-
#ifndef CONFIG_SLOB
307267
struct kmem_cache *
308268
__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
309269
slab_flags_t flags, void (*ctor)(void *));
310270

311271
slab_flags_t kmem_cache_flags(unsigned int object_size,
312272
slab_flags_t flags, const char *name);
313-
#else
314-
static inline struct kmem_cache *
315-
__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
316-
slab_flags_t flags, void (*ctor)(void *))
317-
{ return NULL; }
318-
319-
static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
320-
slab_flags_t flags, const char *name)
321-
{
322-
return flags;
323-
}
324-
#endif
325273

326274
static inline bool is_kmalloc_cache(struct kmem_cache *s)
327275
{
328-
#ifndef CONFIG_SLOB
329276
return (s->flags & SLAB_KMALLOC);
330-
#else
331-
return false;
332-
#endif
333277
}
334278

335279
/* Legal flag mask for kmem_cache_create(), for various configurations */
@@ -634,7 +578,6 @@ static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
634578
}
635579
#endif /* CONFIG_MEMCG_KMEM */
636580

637-
#ifndef CONFIG_SLOB
638581
static inline struct kmem_cache *virt_to_cache(const void *obj)
639582
{
640583
struct slab *slab;
@@ -684,8 +627,6 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
684627

685628
void free_large_kmalloc(struct folio *folio, void *object);
686629

687-
#endif /* CONFIG_SLOB */
688-
689630
size_t __ksize(const void *objp);
690631

691632
static inline size_t slab_ksize(const struct kmem_cache *s)
@@ -777,7 +718,6 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s,
777718
memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
778719
}
779720

780-
#ifndef CONFIG_SLOB
781721
/*
782722
* The slab lists for all objects.
783723
*/
@@ -824,7 +764,6 @@ static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
824764
for (__node = 0; __node < nr_node_ids; __node++) \
825765
if ((__n = get_node(__s, __node)))
826766

827-
#endif
828767

829768
#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
830769
void dump_unreclaimable_slab(void);

mm/slab_common.c

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -625,7 +625,6 @@ void kmem_dump_obj(void *object)
625625
EXPORT_SYMBOL_GPL(kmem_dump_obj);
626626
#endif
627627

628-
#ifndef CONFIG_SLOB
629628
/* Create a cache during boot when no slab services are available yet */
630629
void __init create_boot_cache(struct kmem_cache *s, const char *name,
631630
unsigned int size, slab_flags_t flags,
@@ -1079,7 +1078,6 @@ void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
10791078
return ret;
10801079
}
10811080
EXPORT_SYMBOL(kmalloc_node_trace);
1082-
#endif /* !CONFIG_SLOB */
10831081

10841082
gfp_t kmalloc_fix_flags(gfp_t flags)
10851083
{

0 commit comments

Comments
 (0)