Skip to content

Commit 4c0a17e

Browse files
committed
slab: prevent recursive kmalloc() in alloc_empty_sheaf()
We want to expand usage of sheaves to all non-boot caches, including kmalloc caches. Since sheaves themselves are also allocated by kmalloc(), we need to prevent excessive or infinite recursion - depending on sheaf size, the sheaf can be allocated from smaller, same or larger kmalloc size bucket, there's no particular constraint. This is similar to allocating the objext arrays so let's just reuse the existing mechanisms for those. __GFP_NO_OBJ_EXT in alloc_empty_sheaf() will prevent a nested kmalloc() from allocating a sheaf itself - it will either have sheaves already, or fallback to a non-sheaf-cached allocation (so bootstrap of sheaves in a kmalloc cache that allocates sheaves from its own size bucket is possible). Additionally, reuse OBJCGS_CLEAR_MASK to clear unwanted gfp flags from the nested allocation. Link: https://patch.msgid.link/20251105-sheaves-cleanups-v1-5-b8218e1ac7ef@suse.cz Reviewed-by: Harry Yoo <harry.yoo@oracle.com> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
1 parent 31e0886 commit 4c0a17e

2 files changed

Lines changed: 26 additions & 16 deletions

File tree

include/linux/gfp_types.h

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -55,9 +55,7 @@ enum {
5555
#ifdef CONFIG_LOCKDEP
5656
___GFP_NOLOCKDEP_BIT,
5757
#endif
58-
#ifdef CONFIG_SLAB_OBJ_EXT
5958
___GFP_NO_OBJ_EXT_BIT,
60-
#endif
6159
___GFP_LAST_BIT
6260
};
6361

@@ -98,11 +96,7 @@ enum {
9896
#else
9997
#define ___GFP_NOLOCKDEP 0
10098
#endif
101-
#ifdef CONFIG_SLAB_OBJ_EXT
10299
#define ___GFP_NO_OBJ_EXT BIT(___GFP_NO_OBJ_EXT_BIT)
103-
#else
104-
#define ___GFP_NO_OBJ_EXT 0
105-
#endif
106100

107101
/*
108102
* Physical address zone modifiers (see linux/mmzone.h - low four bits)

mm/slub.c

Lines changed: 26 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -2031,6 +2031,14 @@ static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab,
20312031
}
20322032
#endif /* CONFIG_SLUB_DEBUG */
20332033

2034+
/*
2035+
* The allocated objcg pointers array is not accounted directly.
2036+
* Moreover, it should not come from DMA buffer and is not readily
2037+
* reclaimable. So those GFP bits should be masked off.
2038+
*/
2039+
#define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | \
2040+
__GFP_ACCOUNT | __GFP_NOFAIL)
2041+
20342042
#ifdef CONFIG_SLAB_OBJ_EXT
20352043

20362044
#ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
@@ -2081,14 +2089,6 @@ static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
20812089

20822090
#endif /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
20832091

2084-
/*
2085-
* The allocated objcg pointers array is not accounted directly.
2086-
* Moreover, it should not come from DMA buffer and is not readily
2087-
* reclaimable. So those GFP bits should be masked off.
2088-
*/
2089-
#define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | \
2090-
__GFP_ACCOUNT | __GFP_NOFAIL)
2091-
20922092
static inline void init_slab_obj_exts(struct slab *slab)
20932093
{
20942094
slab->obj_exts = 0;
@@ -2596,8 +2596,24 @@ static void *setup_object(struct kmem_cache *s, void *object)
25962596

25972597
static struct slab_sheaf *alloc_empty_sheaf(struct kmem_cache *s, gfp_t gfp)
25982598
{
2599-
struct slab_sheaf *sheaf = kzalloc(struct_size(sheaf, objects,
2600-
s->sheaf_capacity), gfp);
2599+
struct slab_sheaf *sheaf;
2600+
size_t sheaf_size;
2601+
2602+
if (gfp & __GFP_NO_OBJ_EXT)
2603+
return NULL;
2604+
2605+
gfp &= ~OBJCGS_CLEAR_MASK;
2606+
2607+
/*
2608+
* Prevent recursion to the same cache, or a deep stack of kmallocs of
2609+
* varying sizes (sheaf capacity might differ for each kmalloc size
2610+
* bucket)
2611+
*/
2612+
if (s->flags & SLAB_KMALLOC)
2613+
gfp |= __GFP_NO_OBJ_EXT;
2614+
2615+
sheaf_size = struct_size(sheaf, objects, s->sheaf_capacity);
2616+
sheaf = kzalloc(sheaf_size, gfp);
26012617

26022618
if (unlikely(!sheaf))
26032619
return NULL;

0 commit comments

Comments
 (0)