@@ -295,7 +295,7 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
295295
296296/*
297297 * Debugging flags that require metadata to be stored in the slab. These get
298- * disabled when slub_debug =O is used and a cache's min order increases with
298+ * disabled when slab_debug =O is used and a cache's min order increases with
299299 * metadata.
300300 */
301301#define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
@@ -1616,7 +1616,7 @@ static inline int free_consistency_checks(struct kmem_cache *s,
16161616}
16171617
16181618/*
1619- * Parse a block of slub_debug options. Blocks are delimited by ';'
1619+ * Parse a block of slab_debug options. Blocks are delimited by ';'
16201620 *
16211621 * @str: start of block
16221622 * @flags: returns parsed flags, or DEBUG_DEFAULT_FLAGS if none specified
@@ -1677,7 +1677,7 @@ parse_slub_debug_flags(char *str, slab_flags_t *flags, char **slabs, bool init)
16771677 break ;
16781678 default :
16791679 if (init )
1680- pr_err ("slub_debug option '%c' unknown. skipped\n" , * str );
1680+ pr_err ("slab_debug option '%c' unknown. skipped\n" , * str );
16811681 }
16821682 }
16831683check_slabs :
@@ -1736,7 +1736,7 @@ static int __init setup_slub_debug(char *str)
17361736 /*
17371737 * For backwards compatibility, a single list of flags with list of
17381738 * slabs means debugging is only changed for those slabs, so the global
1739- * slub_debug should be unchanged (0 or DEBUG_DEFAULT_FLAGS, depending
1739+ * slab_debug should be unchanged (0 or DEBUG_DEFAULT_FLAGS, depending
17401740 * on CONFIG_SLUB_DEBUG_ON). We can extended that to multiple lists as
17411741 * long as there is no option specifying flags without a slab list.
17421742 */
@@ -1760,7 +1760,8 @@ static int __init setup_slub_debug(char *str)
17601760 return 1 ;
17611761}
17621762
1763- __setup ("slub_debug" , setup_slub_debug );
1763+ __setup ("slab_debug" , setup_slub_debug );
1764+ __setup_param ("slub_debug" , slub_debug , setup_slub_debug , 0 );
17641765
17651766/*
17661767 * kmem_cache_flags - apply debugging options to the cache
@@ -1770,7 +1771,7 @@ __setup("slub_debug", setup_slub_debug);
17701771 *
17711772 * Debug option(s) are applied to @flags. In addition to the debug
17721773 * option(s), if a slab name (or multiple) is specified i.e.
1773- * slub_debug =<Debug-Options>,<slab name1>,<slab name2> ...
1774+ * slab_debug =<Debug-Options>,<slab name1>,<slab name2> ...
17741775 * then only the select slabs will receive the debug option(s).
17751776 */
17761777slab_flags_t kmem_cache_flags (unsigned int object_size ,
@@ -3263,7 +3264,7 @@ slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
32633264 oo_order (s -> min ));
32643265
32653266 if (oo_order (s -> min ) > get_order (s -> object_size ))
3266- pr_warn (" %s debugging increased min order, use slub_debug =O to disable.\n" ,
3267+ pr_warn (" %s debugging increased min order, use slab_debug =O to disable.\n" ,
32673268 s -> name );
32683269
32693270 for_each_kmem_cache_node (s , node , n ) {
@@ -3792,11 +3793,11 @@ void slab_post_alloc_hook(struct kmem_cache *s, struct obj_cgroup *objcg,
37923793 zero_size = orig_size ;
37933794
37943795 /*
3795- * When slub_debug is enabled, avoid memory initialization integrated
3796+ * When slab_debug is enabled, avoid memory initialization integrated
37963797 * into KASAN and instead zero out the memory via the memset below with
37973798 * the proper size. Otherwise, KASAN might overwrite SLUB redzones and
37983799 * cause false-positive reports. This does not lead to a performance
3799- * penalty on production builds, as slub_debug is not intended to be
3800+ * penalty on production builds, as slab_debug is not intended to be
38003801 * enabled there.
38013802 */
38023803 if (__slub_debug_enabled ())
@@ -4702,8 +4703,8 @@ static unsigned int slub_min_objects;
47024703 * activity on the partial lists which requires taking the list_lock. This is
47034704 * less a concern for large slabs though which are rarely used.
47044705 *
4705- * slub_max_order specifies the order where we begin to stop considering the
4706- * number of objects in a slab as critical. If we reach slub_max_order then
4706+ * slab_max_order specifies the order where we begin to stop considering the
4707+ * number of objects in a slab as critical. If we reach slab_max_order then
47074708 * we try to keep the page order as low as possible. So we accept more waste
47084709 * of space in favor of a small page order.
47094710 *
@@ -4770,14 +4771,14 @@ static inline int calculate_order(unsigned int size)
47704771 * and backing off gradually.
47714772 *
47724773 * We start with accepting at most 1/16 waste and try to find the
4773- * smallest order from min_objects-derived/slub_min_order up to
4774- * slub_max_order that will satisfy the constraint. Note that increasing
4774+ * smallest order from min_objects-derived/slab_min_order up to
4775+ * slab_max_order that will satisfy the constraint. Note that increasing
47754776 * the order can only result in same or less fractional waste, not more.
47764777 *
47774778 * If that fails, we increase the acceptable fraction of waste and try
47784779 * again. The last iteration with fraction of 1/2 would effectively
47794780 * accept any waste and give us the order determined by min_objects, as
4780- * long as at least single object fits within slub_max_order .
4781+ * long as at least single object fits within slab_max_order .
47814782 */
47824783 for (unsigned int fraction = 16 ; fraction > 1 ; fraction /= 2 ) {
47834784 order = calc_slab_order (size , min_order , slub_max_order ,
@@ -4787,7 +4788,7 @@ static inline int calculate_order(unsigned int size)
47874788 }
47884789
47894790 /*
4790- * Doh this slab cannot be placed using slub_max_order .
4791+ * Doh this slab cannot be placed using slab_max_order .
47914792 */
47924793 order = get_order (size );
47934794 if (order <= MAX_PAGE_ORDER )
@@ -5313,7 +5314,9 @@ static int __init setup_slub_min_order(char *str)
53135314 return 1 ;
53145315}
53155316
5316- __setup ("slub_min_order=" , setup_slub_min_order );
5317+ __setup ("slab_min_order=" , setup_slub_min_order );
5318+ __setup_param ("slub_min_order=" , slub_min_order , setup_slub_min_order , 0 );
5319+
53175320
53185321static int __init setup_slub_max_order (char * str )
53195322{
@@ -5326,7 +5329,8 @@ static int __init setup_slub_max_order(char *str)
53265329 return 1 ;
53275330}
53285331
5329- __setup ("slub_max_order=" , setup_slub_max_order );
5332+ __setup ("slab_max_order=" , setup_slub_max_order );
5333+ __setup_param ("slub_max_order=" , slub_max_order , setup_slub_max_order , 0 );
53305334
53315335static int __init setup_slub_min_objects (char * str )
53325336{
@@ -5335,7 +5339,8 @@ static int __init setup_slub_min_objects(char *str)
53355339 return 1 ;
53365340}
53375341
5338- __setup ("slub_min_objects=" , setup_slub_min_objects );
5342+ __setup ("slab_min_objects=" , setup_slub_min_objects );
5343+ __setup_param ("slub_min_objects=" , slub_min_objects , setup_slub_min_objects , 0 );
53395344
53405345#ifdef CONFIG_HARDENED_USERCOPY
53415346/*
0 commit comments