@@ -51,14 +51,6 @@ struct slab {
5151 };
5252 unsigned int __unused ;
5353
54- #elif defined(CONFIG_SLOB )
55-
56- struct list_head slab_list ;
57- void * __unused_1 ;
58- void * freelist ; /* first free block */
59- long units ;
60- unsigned int __unused_2 ;
61-
6254#else
6355#error "Unexpected slab allocator configured"
6456#endif
@@ -72,11 +64,7 @@ struct slab {
7264#define SLAB_MATCH (pg , sl ) \
7365 static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl))
7466SLAB_MATCH (flags , __page_flags );
75- #ifndef CONFIG_SLOB
7667SLAB_MATCH (compound_head , slab_cache ); /* Ensure bit 0 is clear */
77- #else
78- SLAB_MATCH (compound_head , slab_list ); /* Ensure bit 0 is clear */
79- #endif
8068SLAB_MATCH (_refcount , __page_refcount );
8169#ifdef CONFIG_MEMCG
8270SLAB_MATCH (memcg_data , memcg_data );
@@ -200,31 +188,6 @@ static inline size_t slab_size(const struct slab *slab)
200188 return PAGE_SIZE << slab_order (slab );
201189}
202190
203- #ifdef CONFIG_SLOB
204- /*
205- * Common fields provided in kmem_cache by all slab allocators
206- * This struct is either used directly by the allocator (SLOB)
207- * or the allocator must include definitions for all fields
208- * provided in kmem_cache_common in their definition of kmem_cache.
209- *
210- * Once we can do anonymous structs (C11 standard) we could put a
211- * anonymous struct definition in these allocators so that the
212- * separate allocations in the kmem_cache structure of SLAB and
213- * SLUB is no longer needed.
214- */
215- struct kmem_cache {
216- unsigned int object_size ;/* The original size of the object */
217- unsigned int size ; /* The aligned/padded/added on size */
218- unsigned int align ; /* Alignment as calculated */
219- slab_flags_t flags ; /* Active flags on the slab */
220- const char * name ; /* Slab name for sysfs */
221- int refcount ; /* Use counter */
222- void (* ctor )(void * ); /* Called on object slot creation */
223- struct list_head list ; /* List of all slab caches on the system */
224- };
225-
226- #endif /* CONFIG_SLOB */
227-
228191#ifdef CONFIG_SLAB
229192#include <linux/slab_def.h>
230193#endif
@@ -274,7 +237,6 @@ extern const struct kmalloc_info_struct {
274237 unsigned int size ;
275238} kmalloc_info [];
276239
277- #ifndef CONFIG_SLOB
278240/* Kmalloc array related functions */
279241void setup_kmalloc_cache_index_table (void );
280242void create_kmalloc_caches (slab_flags_t );
@@ -286,7 +248,6 @@ void *__kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags,
286248 int node , size_t orig_size ,
287249 unsigned long caller );
288250void __kmem_cache_free (struct kmem_cache * s , void * x , unsigned long caller );
289- #endif
290251
291252gfp_t kmalloc_fix_flags (gfp_t flags );
292253
@@ -303,33 +264,16 @@ extern void create_boot_cache(struct kmem_cache *, const char *name,
303264int slab_unmergeable (struct kmem_cache * s );
304265struct kmem_cache * find_mergeable (unsigned size , unsigned align ,
305266 slab_flags_t flags , const char * name , void (* ctor )(void * ));
306- #ifndef CONFIG_SLOB
307267struct kmem_cache *
308268__kmem_cache_alias (const char * name , unsigned int size , unsigned int align ,
309269 slab_flags_t flags , void (* ctor )(void * ));
310270
311271slab_flags_t kmem_cache_flags (unsigned int object_size ,
312272 slab_flags_t flags , const char * name );
313- #else
314- static inline struct kmem_cache *
315- __kmem_cache_alias (const char * name , unsigned int size , unsigned int align ,
316- slab_flags_t flags , void (* ctor )(void * ))
317- { return NULL ; }
318-
319- static inline slab_flags_t kmem_cache_flags (unsigned int object_size ,
320- slab_flags_t flags , const char * name )
321- {
322- return flags ;
323- }
324- #endif
325273
326274static inline bool is_kmalloc_cache (struct kmem_cache * s )
327275{
328- #ifndef CONFIG_SLOB
329276 return (s -> flags & SLAB_KMALLOC );
330- #else
331- return false;
332- #endif
333277}
334278
335279/* Legal flag mask for kmem_cache_create(), for various configurations */
@@ -634,7 +578,6 @@ static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
634578}
635579#endif /* CONFIG_MEMCG_KMEM */
636580
637- #ifndef CONFIG_SLOB
638581static inline struct kmem_cache * virt_to_cache (const void * obj )
639582{
640583 struct slab * slab ;
@@ -684,8 +627,6 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
684627
685628void free_large_kmalloc (struct folio * folio , void * object );
686629
687- #endif /* CONFIG_SLOB */
688-
689630size_t __ksize (const void * objp );
690631
691632static inline size_t slab_ksize (const struct kmem_cache * s )
@@ -777,7 +718,6 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s,
777718 memcg_slab_post_alloc_hook (s , objcg , flags , size , p );
778719}
779720
780- #ifndef CONFIG_SLOB
781721/*
782722 * The slab lists for all objects.
783723 */
@@ -824,7 +764,6 @@ static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
824764 for (__node = 0; __node < nr_node_ids; __node++) \
825765 if ((__n = get_node(__s, __node)))
826766
827- #endif
828767
829768#if defined(CONFIG_SLAB ) || defined(CONFIG_SLUB_DEBUG )
830769void dump_unreclaimable_slab (void );
0 commit comments