Skip to content

Commit ea6b5e5

Browse files
committed
slab: move kfence_alloc() out of internal bulk alloc
SLUB's internal bulk allocation __kmem_cache_alloc_bulk() can currently allocate some objects from KFENCE, i.e. when refilling a sheaf. It works but it's conceptually the wrong layer, as KFENCE allocations should only happen when objects are actually handed out from slab to its users. Currently for sheaf-enabled caches, slab_alloc_node() can return KFENCE object via kfence_alloc(), but also via alloc_from_pcs() when a sheaf was refilled with KFENCE objects. Continuing like this would also complicate the upcoming sheaf refill changes. Thus remove KFENCE allocation from __kmem_cache_alloc_bulk() and move it to the places that return slab objects to users. slab_alloc_node() is already covered (see above). Add kfence_alloc() to kmem_cache_alloc_from_sheaf() to handle KFENCE allocations from prefilled sheafs, with a comment that the caller should not expect the sheaf size to decrease after every allocation because of this possibility. For kmem_cache_alloc_bulk() implement a different strategy to handle KFENCE upfront and rely on internal batched operations afterwards. Assume there will be at most once KFENCE allocation per bulk allocation and then assign its index in the array of objects randomly. Cc: Alexander Potapenko <glider@google.com> Cc: Marco Elver <elver@google.com> Cc: Dmitry Vyukov <dvyukov@google.com> Link: https://patch.msgid.link/20251105-sheaves-cleanups-v1-2-b8218e1ac7ef@suse.cz Reviewed-by: Harry Yoo <harry.yoo@oracle.com> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
1 parent f6087b9 commit ea6b5e5

1 file changed

Lines changed: 36 additions & 8 deletions

File tree

mm/slub.c

Lines changed: 36 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -5540,6 +5540,9 @@ int kmem_cache_refill_sheaf(struct kmem_cache *s, gfp_t gfp,
55405540
*
55415541
* The gfp parameter is meant only to specify __GFP_ZERO or __GFP_ACCOUNT
55425542
* memcg charging is forced over limit if necessary, to avoid failure.
5543+
*
5544+
* It is possible that the allocation comes from kfence and then the sheaf
5545+
* size is not decreased.
55435546
*/
55445547
void *
55455548
kmem_cache_alloc_from_sheaf_noprof(struct kmem_cache *s, gfp_t gfp,
@@ -5551,7 +5554,10 @@ kmem_cache_alloc_from_sheaf_noprof(struct kmem_cache *s, gfp_t gfp,
55515554
if (sheaf->size == 0)
55525555
goto out;
55535556

5554-
ret = sheaf->objects[--sheaf->size];
5557+
ret = kfence_alloc(s, s->object_size, gfp);
5558+
5559+
if (likely(!ret))
5560+
ret = sheaf->objects[--sheaf->size];
55555561

55565562
init = slab_want_init_on_alloc(gfp, s);
55575563

@@ -7399,14 +7405,8 @@ int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
73997405
local_lock_irqsave(&s->cpu_slab->lock, irqflags);
74007406

74017407
for (i = 0; i < size; i++) {
7402-
void *object = kfence_alloc(s, s->object_size, flags);
7403-
7404-
if (unlikely(object)) {
7405-
p[i] = object;
7406-
continue;
7407-
}
7408+
void *object = c->freelist;
74087409

7409-
object = c->freelist;
74107410
if (unlikely(!object)) {
74117411
/*
74127412
* We may have removed an object from c->freelist using
@@ -7487,6 +7487,7 @@ int kmem_cache_alloc_bulk_noprof(struct kmem_cache *s, gfp_t flags, size_t size,
74877487
void **p)
74887488
{
74897489
unsigned int i = 0;
7490+
void *kfence_obj;
74907491

74917492
if (!size)
74927493
return 0;
@@ -7495,6 +7496,20 @@ int kmem_cache_alloc_bulk_noprof(struct kmem_cache *s, gfp_t flags, size_t size,
74957496
if (unlikely(!s))
74967497
return 0;
74977498

7499+
/*
7500+
* to make things simpler, only assume at most once kfence allocated
7501+
* object per bulk allocation and choose its index randomly
7502+
*/
7503+
kfence_obj = kfence_alloc(s, s->object_size, flags);
7504+
7505+
if (unlikely(kfence_obj)) {
7506+
if (unlikely(size == 1)) {
7507+
p[0] = kfence_obj;
7508+
goto out;
7509+
}
7510+
size--;
7511+
}
7512+
74987513
if (s->cpu_sheaves)
74997514
i = alloc_from_pcs_bulk(s, size, p);
75007515

@@ -7506,10 +7521,23 @@ int kmem_cache_alloc_bulk_noprof(struct kmem_cache *s, gfp_t flags, size_t size,
75067521
if (unlikely(__kmem_cache_alloc_bulk(s, flags, size - i, p + i) == 0)) {
75077522
if (i > 0)
75087523
__kmem_cache_free_bulk(s, i, p);
7524+
if (kfence_obj)
7525+
__kfence_free(kfence_obj);
75097526
return 0;
75107527
}
75117528
}
75127529

7530+
if (unlikely(kfence_obj)) {
7531+
int idx = get_random_u32_below(size + 1);
7532+
7533+
if (idx != size)
7534+
p[size] = p[idx];
7535+
p[idx] = kfence_obj;
7536+
7537+
size++;
7538+
}
7539+
7540+
out:
75137541
/*
75147542
* memcg and kmem_cache debug support and memory initialization.
75157543
* Done outside of the IRQ disabled fastpath loop.

0 commit comments

Comments
 (0)