@@ -2041,18 +2041,18 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node,
20412041
20422042#ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
20432043
2044- static inline void mark_objexts_empty ( struct slabobj_ext * obj_exts )
2044+ static inline void mark_obj_codetag_empty ( const void * obj )
20452045{
2046- struct slab * obj_exts_slab ;
2046+ struct slab * obj_slab ;
20472047 unsigned long slab_exts ;
20482048
2049- obj_exts_slab = virt_to_slab (obj_exts );
2050- slab_exts = slab_obj_exts (obj_exts_slab );
2049+ obj_slab = virt_to_slab (obj );
2050+ slab_exts = slab_obj_exts (obj_slab );
20512051 if (slab_exts ) {
20522052 get_slab_obj_exts (slab_exts );
2053- unsigned int offs = obj_to_index (obj_exts_slab -> slab_cache ,
2054- obj_exts_slab , obj_exts );
2055- struct slabobj_ext * ext = slab_obj_ext (obj_exts_slab ,
2053+ unsigned int offs = obj_to_index (obj_slab -> slab_cache ,
2054+ obj_slab , obj );
2055+ struct slabobj_ext * ext = slab_obj_ext (obj_slab ,
20562056 slab_exts , offs );
20572057
20582058 if (unlikely (is_codetag_empty (& ext -> ref ))) {
@@ -2090,7 +2090,7 @@ static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
20902090
20912091#else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
20922092
2093- static inline void mark_objexts_empty ( struct slabobj_ext * obj_exts ) {}
2093+ static inline void mark_obj_codetag_empty ( const void * obj ) {}
20942094static inline bool mark_failed_objexts_alloc (struct slab * slab ) { return false; }
20952095static inline void handle_failed_objexts_alloc (unsigned long obj_exts ,
20962096 struct slabobj_ext * vec , unsigned int objects ) {}
@@ -2196,7 +2196,6 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
21962196retry :
21972197 old_exts = READ_ONCE (slab -> obj_exts );
21982198 handle_failed_objexts_alloc (old_exts , vec , objects );
2199- slab_set_stride (slab , sizeof (struct slabobj_ext ));
22002199
22012200 if (new_slab ) {
22022201 /*
@@ -2211,7 +2210,7 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
22112210 * assign slabobj_exts in parallel. In this case the existing
22122211 * objcg vector should be reused.
22132212 */
2214- mark_objexts_empty (vec );
2213+ mark_obj_codetag_empty (vec );
22152214 if (unlikely (!allow_spin ))
22162215 kfree_nolock (vec );
22172216 else
@@ -2254,7 +2253,7 @@ static inline void free_slab_obj_exts(struct slab *slab, bool allow_spin)
22542253 * NULL, therefore replace NULL with CODETAG_EMPTY to indicate that
22552254 * the extension for obj_exts is expected to be NULL.
22562255 */
2257- mark_objexts_empty (obj_exts );
2256+ mark_obj_codetag_empty (obj_exts );
22582257 if (allow_spin )
22592258 kfree (obj_exts );
22602259 else
@@ -2272,6 +2271,9 @@ static void alloc_slab_obj_exts_early(struct kmem_cache *s, struct slab *slab)
22722271 void * addr ;
22732272 unsigned long obj_exts ;
22742273
2274+ /* Initialize stride early to avoid memory ordering issues */
2275+ slab_set_stride (slab , sizeof (struct slabobj_ext ));
2276+
22752277 if (!need_slab_obj_exts (s ))
22762278 return ;
22772279
@@ -2288,7 +2290,6 @@ static void alloc_slab_obj_exts_early(struct kmem_cache *s, struct slab *slab)
22882290 obj_exts |= MEMCG_DATA_OBJEXTS ;
22892291#endif
22902292 slab -> obj_exts = obj_exts ;
2291- slab_set_stride (slab , sizeof (struct slabobj_ext ));
22922293 } else if (s -> flags & SLAB_OBJ_EXT_IN_OBJ ) {
22932294 unsigned int offset = obj_exts_offset_in_object (s );
22942295
@@ -2312,6 +2313,10 @@ static void alloc_slab_obj_exts_early(struct kmem_cache *s, struct slab *slab)
23122313
23132314#else /* CONFIG_SLAB_OBJ_EXT */
23142315
2316+ static inline void mark_obj_codetag_empty (const void * obj )
2317+ {
2318+ }
2319+
23152320static inline void init_slab_obj_exts (struct slab * slab )
23162321{
23172322}
@@ -2783,6 +2788,15 @@ static inline struct slab_sheaf *alloc_empty_sheaf(struct kmem_cache *s,
27832788
27842789static void free_empty_sheaf (struct kmem_cache * s , struct slab_sheaf * sheaf )
27852790{
2791+ /*
2792+ * If the sheaf was created with __GFP_NO_OBJ_EXT flag then its
2793+ * corresponding extension is NULL and alloc_tag_sub() will throw a
2794+ * warning, therefore replace NULL with CODETAG_EMPTY to indicate
2795+ * that the extension for this sheaf is expected to be NULL.
2796+ */
2797+ if (s -> flags & SLAB_KMALLOC )
2798+ mark_obj_codetag_empty (sheaf );
2799+
27862800 kfree (sheaf );
27872801
27882802 stat (s , SHEAF_FREE );
@@ -2822,7 +2836,7 @@ static struct slab_sheaf *alloc_full_sheaf(struct kmem_cache *s, gfp_t gfp)
28222836 if (!sheaf )
28232837 return NULL ;
28242838
2825- if (refill_sheaf (s , sheaf , gfp | __GFP_NOMEMALLOC )) {
2839+ if (refill_sheaf (s , sheaf , gfp | __GFP_NOMEMALLOC | __GFP_NOWARN )) {
28262840 free_empty_sheaf (s , sheaf );
28272841 return NULL ;
28282842 }
@@ -4575,7 +4589,7 @@ __pcs_replace_empty_main(struct kmem_cache *s, struct slub_percpu_sheaves *pcs,
45754589 return NULL ;
45764590
45774591 if (empty ) {
4578- if (!refill_sheaf (s , empty , gfp | __GFP_NOMEMALLOC )) {
4592+ if (!refill_sheaf (s , empty , gfp | __GFP_NOMEMALLOC | __GFP_NOWARN )) {
45794593 full = empty ;
45804594 } else {
45814595 /*
@@ -4890,9 +4904,14 @@ EXPORT_SYMBOL(kmem_cache_alloc_node_noprof);
48904904static int __prefill_sheaf_pfmemalloc (struct kmem_cache * s ,
48914905 struct slab_sheaf * sheaf , gfp_t gfp )
48924906{
4893- int ret = 0 ;
4907+ gfp_t gfp_nomemalloc ;
4908+ int ret ;
4909+
4910+ gfp_nomemalloc = gfp | __GFP_NOMEMALLOC ;
4911+ if (gfp_pfmemalloc_allowed (gfp ))
4912+ gfp_nomemalloc |= __GFP_NOWARN ;
48944913
4895- ret = refill_sheaf (s , sheaf , gfp | __GFP_NOMEMALLOC );
4914+ ret = refill_sheaf (s , sheaf , gfp_nomemalloc );
48964915
48974916 if (likely (!ret || !gfp_pfmemalloc_allowed (gfp )))
48984917 return ret ;
0 commit comments