Skip to content

Commit 3feb464

Browse files
committed
Merge tag 'slab-for-7.0-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab
Pull slab fixes from Vlastimil Babka: - Fix for spurious page allocation warnings on sheaf refill (Harry Yoo) - Fix for CONFIG_MEM_ALLOC_PROFILING_DEBUG warnings (Suren Baghdasaryan) - Fix for kernel-doc warning on ksize() (Sanjay Chitroda) - Fix to avoid setting slab->stride later than on slab allocation. Doesn't yet fix the reports from powerpc; debugging is making progress (Harry Yoo) * tag 'slab-for-7.0-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab: mm/slab: initialize slab->stride early to avoid memory ordering issues mm/slub: drop duplicate kernel-doc for ksize() mm/slab: mark alloc tags empty for sheaves allocated with __GFP_NO_OBJ_EXT mm/slab: pass __GFP_NOWARN to refill_sheaf() if fallback is available
2 parents d5a8e4b + e9217ca commit 3feb464

4 files changed

Lines changed: 39 additions & 30 deletions

File tree

include/linux/gfp_types.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -139,6 +139,8 @@ enum {
139139
* %__GFP_ACCOUNT causes the allocation to be accounted to kmemcg.
140140
*
141141
* %__GFP_NO_OBJ_EXT causes slab allocation to have no object extension.
142+
* mark_obj_codetag_empty() should be called upon freeing for objects allocated
143+
* with this flag to indicate that their NULL tags are expected and normal.
142144
*/
143145
#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE)
144146
#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE)

include/linux/slab.h

Lines changed: 0 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -517,18 +517,6 @@ void kfree_sensitive(const void *objp);
517517
DEFINE_FREE(kfree, void *, if (!IS_ERR_OR_NULL(_T)) kfree(_T))
518518
DEFINE_FREE(kfree_sensitive, void *, if (_T) kfree_sensitive(_T))
519519

520-
/**
521-
* ksize - Report actual allocation size of associated object
522-
*
523-
* @objp: Pointer returned from a prior kmalloc()-family allocation.
524-
*
525-
* This should not be used for writing beyond the originally requested
526-
* allocation size. Either use krealloc() or round up the allocation size
527-
* with kmalloc_size_roundup() prior to allocation. If this is used to
528-
* access beyond the originally requested allocation size, UBSAN_BOUNDS
529-
* and/or FORTIFY_SOURCE may trip, since they only know about the
530-
* originally allocated size via the __alloc_size attribute.
531-
*/
532520
size_t ksize(const void *objp);
533521

534522
#ifdef CONFIG_PRINTK

mm/slab.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -290,14 +290,14 @@ static inline void *nearest_obj(struct kmem_cache *cache,
290290

291291
/* Determine object index from a given position */
292292
static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
293-
void *addr, void *obj)
293+
void *addr, const void *obj)
294294
{
295295
return reciprocal_divide(kasan_reset_tag(obj) - addr,
296296
cache->reciprocal_size);
297297
}
298298

299299
static inline unsigned int obj_to_index(const struct kmem_cache *cache,
300-
const struct slab *slab, void *obj)
300+
const struct slab *slab, const void *obj)
301301
{
302302
if (is_kfence_address(obj))
303303
return 0;

mm/slub.c

Lines changed: 35 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -2041,18 +2041,18 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node,
20412041

20422042
#ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
20432043

2044-
static inline void mark_objexts_empty(struct slabobj_ext *obj_exts)
2044+
static inline void mark_obj_codetag_empty(const void *obj)
20452045
{
2046-
struct slab *obj_exts_slab;
2046+
struct slab *obj_slab;
20472047
unsigned long slab_exts;
20482048

2049-
obj_exts_slab = virt_to_slab(obj_exts);
2050-
slab_exts = slab_obj_exts(obj_exts_slab);
2049+
obj_slab = virt_to_slab(obj);
2050+
slab_exts = slab_obj_exts(obj_slab);
20512051
if (slab_exts) {
20522052
get_slab_obj_exts(slab_exts);
2053-
unsigned int offs = obj_to_index(obj_exts_slab->slab_cache,
2054-
obj_exts_slab, obj_exts);
2055-
struct slabobj_ext *ext = slab_obj_ext(obj_exts_slab,
2053+
unsigned int offs = obj_to_index(obj_slab->slab_cache,
2054+
obj_slab, obj);
2055+
struct slabobj_ext *ext = slab_obj_ext(obj_slab,
20562056
slab_exts, offs);
20572057

20582058
if (unlikely(is_codetag_empty(&ext->ref))) {
@@ -2090,7 +2090,7 @@ static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
20902090

20912091
#else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
20922092

2093-
static inline void mark_objexts_empty(struct slabobj_ext *obj_exts) {}
2093+
static inline void mark_obj_codetag_empty(const void *obj) {}
20942094
static inline bool mark_failed_objexts_alloc(struct slab *slab) { return false; }
20952095
static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
20962096
struct slabobj_ext *vec, unsigned int objects) {}
@@ -2196,7 +2196,6 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
21962196
retry:
21972197
old_exts = READ_ONCE(slab->obj_exts);
21982198
handle_failed_objexts_alloc(old_exts, vec, objects);
2199-
slab_set_stride(slab, sizeof(struct slabobj_ext));
22002199

22012200
if (new_slab) {
22022201
/*
@@ -2211,7 +2210,7 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
22112210
* assign slabobj_exts in parallel. In this case the existing
22122211
* objcg vector should be reused.
22132212
*/
2214-
mark_objexts_empty(vec);
2213+
mark_obj_codetag_empty(vec);
22152214
if (unlikely(!allow_spin))
22162215
kfree_nolock(vec);
22172216
else
@@ -2254,7 +2253,7 @@ static inline void free_slab_obj_exts(struct slab *slab, bool allow_spin)
22542253
* NULL, therefore replace NULL with CODETAG_EMPTY to indicate that
22552254
* the extension for obj_exts is expected to be NULL.
22562255
*/
2257-
mark_objexts_empty(obj_exts);
2256+
mark_obj_codetag_empty(obj_exts);
22582257
if (allow_spin)
22592258
kfree(obj_exts);
22602259
else
@@ -2272,6 +2271,9 @@ static void alloc_slab_obj_exts_early(struct kmem_cache *s, struct slab *slab)
22722271
void *addr;
22732272
unsigned long obj_exts;
22742273

2274+
/* Initialize stride early to avoid memory ordering issues */
2275+
slab_set_stride(slab, sizeof(struct slabobj_ext));
2276+
22752277
if (!need_slab_obj_exts(s))
22762278
return;
22772279

@@ -2288,7 +2290,6 @@ static void alloc_slab_obj_exts_early(struct kmem_cache *s, struct slab *slab)
22882290
obj_exts |= MEMCG_DATA_OBJEXTS;
22892291
#endif
22902292
slab->obj_exts = obj_exts;
2291-
slab_set_stride(slab, sizeof(struct slabobj_ext));
22922293
} else if (s->flags & SLAB_OBJ_EXT_IN_OBJ) {
22932294
unsigned int offset = obj_exts_offset_in_object(s);
22942295

@@ -2312,6 +2313,10 @@ static void alloc_slab_obj_exts_early(struct kmem_cache *s, struct slab *slab)
23122313

23132314
#else /* CONFIG_SLAB_OBJ_EXT */
23142315

2316+
static inline void mark_obj_codetag_empty(const void *obj)
2317+
{
2318+
}
2319+
23152320
static inline void init_slab_obj_exts(struct slab *slab)
23162321
{
23172322
}
@@ -2783,6 +2788,15 @@ static inline struct slab_sheaf *alloc_empty_sheaf(struct kmem_cache *s,
27832788

27842789
static void free_empty_sheaf(struct kmem_cache *s, struct slab_sheaf *sheaf)
27852790
{
2791+
/*
2792+
* If the sheaf was created with __GFP_NO_OBJ_EXT flag then its
2793+
* corresponding extension is NULL and alloc_tag_sub() will throw a
2794+
* warning, therefore replace NULL with CODETAG_EMPTY to indicate
2795+
* that the extension for this sheaf is expected to be NULL.
2796+
*/
2797+
if (s->flags & SLAB_KMALLOC)
2798+
mark_obj_codetag_empty(sheaf);
2799+
27862800
kfree(sheaf);
27872801

27882802
stat(s, SHEAF_FREE);
@@ -2822,7 +2836,7 @@ static struct slab_sheaf *alloc_full_sheaf(struct kmem_cache *s, gfp_t gfp)
28222836
if (!sheaf)
28232837
return NULL;
28242838

2825-
if (refill_sheaf(s, sheaf, gfp | __GFP_NOMEMALLOC)) {
2839+
if (refill_sheaf(s, sheaf, gfp | __GFP_NOMEMALLOC | __GFP_NOWARN)) {
28262840
free_empty_sheaf(s, sheaf);
28272841
return NULL;
28282842
}
@@ -4575,7 +4589,7 @@ __pcs_replace_empty_main(struct kmem_cache *s, struct slub_percpu_sheaves *pcs,
45754589
return NULL;
45764590

45774591
if (empty) {
4578-
if (!refill_sheaf(s, empty, gfp | __GFP_NOMEMALLOC)) {
4592+
if (!refill_sheaf(s, empty, gfp | __GFP_NOMEMALLOC | __GFP_NOWARN)) {
45794593
full = empty;
45804594
} else {
45814595
/*
@@ -4890,9 +4904,14 @@ EXPORT_SYMBOL(kmem_cache_alloc_node_noprof);
48904904
static int __prefill_sheaf_pfmemalloc(struct kmem_cache *s,
48914905
struct slab_sheaf *sheaf, gfp_t gfp)
48924906
{
4893-
int ret = 0;
4907+
gfp_t gfp_nomemalloc;
4908+
int ret;
4909+
4910+
gfp_nomemalloc = gfp | __GFP_NOMEMALLOC;
4911+
if (gfp_pfmemalloc_allowed(gfp))
4912+
gfp_nomemalloc |= __GFP_NOWARN;
48944913

4895-
ret = refill_sheaf(s, sheaf, gfp | __GFP_NOMEMALLOC);
4914+
ret = refill_sheaf(s, sheaf, gfp_nomemalloc);
48964915

48974916
if (likely(!ret || !gfp_pfmemalloc_allowed(gfp)))
48984917
return ret;

0 commit comments

Comments
 (0)