|
24 | 24 |
|
25 | 25 | /* |
26 | 26 | * Flags to pass to kmem_cache_create(). |
27 | | - * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set. |
| 27 | + * The ones marked DEBUG need CONFIG_SLUB_DEBUG enabled, otherwise are no-op |
28 | 28 | */ |
29 | 29 | /* DEBUG: Perform (expensive) checks on alloc/free */ |
30 | 30 | #define SLAB_CONSISTENCY_CHECKS ((slab_flags_t __force)0x00000100U) |
@@ -302,25 +302,15 @@ static inline unsigned int arch_slab_minalign(void) |
302 | 302 | * Kmalloc array related definitions |
303 | 303 | */ |
304 | 304 |
|
305 | | -#ifdef CONFIG_SLAB |
306 | 305 | /* |
307 | | - * SLAB and SLUB directly allocates requests fitting in to an order-1 page |
| 306 | + * SLUB directly allocates requests fitting in to an order-1 page |
308 | 307 | * (PAGE_SIZE*2). Larger requests are passed to the page allocator. |
309 | 308 | */ |
310 | 309 | #define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1) |
311 | 310 | #define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT) |
312 | 311 | #ifndef KMALLOC_SHIFT_LOW |
313 | | -#define KMALLOC_SHIFT_LOW 5 |
314 | | -#endif |
315 | | -#endif |
316 | | - |
317 | | -#ifdef CONFIG_SLUB |
318 | | -#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1) |
319 | | -#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT) |
320 | | -#ifndef KMALLOC_SHIFT_LOW |
321 | 312 | #define KMALLOC_SHIFT_LOW 3 |
322 | 313 | #endif |
323 | | -#endif |
324 | 314 |
|
325 | 315 | /* Maximum allocatable size */ |
326 | 316 | #define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX) |
@@ -788,12 +778,4 @@ size_t kmalloc_size_roundup(size_t size); |
788 | 778 |
|
789 | 779 | void __init kmem_cache_init_late(void); |
790 | 780 |
|
791 | | -#if defined(CONFIG_SMP) && defined(CONFIG_SLAB) |
792 | | -int slab_prepare_cpu(unsigned int cpu); |
793 | | -int slab_dead_cpu(unsigned int cpu); |
794 | | -#else |
795 | | -#define slab_prepare_cpu NULL |
796 | | -#define slab_dead_cpu NULL |
797 | | -#endif |
798 | | - |
799 | 781 | #endif /* _LINUX_SLAB_H */ |
0 commit comments