@@ -2033,11 +2033,54 @@ prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p)
20332033 return slab_obj_exts (slab ) + obj_to_index (s , slab , p );
20342034}
20352035
2036+ #else /* CONFIG_SLAB_OBJ_EXT */
2037+
2038+ static int alloc_slab_obj_exts (struct slab * slab , struct kmem_cache * s ,
2039+ gfp_t gfp , bool new_slab )
2040+ {
2041+ return 0 ;
2042+ }
2043+
2044+ static inline void free_slab_obj_exts (struct slab * slab )
2045+ {
2046+ }
2047+
2048+ static inline bool need_slab_obj_ext (void )
2049+ {
2050+ return false;
2051+ }
2052+
2053+ static inline struct slabobj_ext *
2054+ prepare_slab_obj_exts_hook (struct kmem_cache * s , gfp_t flags , void * p )
2055+ {
2056+ return NULL ;
2057+ }
2058+
2059+ #endif /* CONFIG_SLAB_OBJ_EXT */
2060+
2061+ #ifdef CONFIG_MEM_ALLOC_PROFILING
2062+
2063+ static inline void
2064+ alloc_tagging_slab_alloc_hook (struct kmem_cache * s , void * object , gfp_t flags )
2065+ {
2066+ if (need_slab_obj_ext ()) {
2067+ struct slabobj_ext * obj_exts ;
2068+
2069+ obj_exts = prepare_slab_obj_exts_hook (s , flags , object );
2070+ /*
2071+ * Currently obj_exts is used only for allocation profiling.
2072+ * If other users appear then mem_alloc_profiling_enabled()
2073+ * check should be added before alloc_tag_add().
2074+ */
2075+ if (likely (obj_exts ))
2076+ alloc_tag_add (& obj_exts -> ref , current -> alloc_tag , s -> size );
2077+ }
2078+ }
2079+
20362080static inline void
20372081alloc_tagging_slab_free_hook (struct kmem_cache * s , struct slab * slab , void * * p ,
20382082 int objects )
20392083{
2040- #ifdef CONFIG_MEM_ALLOC_PROFILING
20412084 struct slabobj_ext * obj_exts ;
20422085 int i ;
20432086
@@ -2053,30 +2096,13 @@ alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
20532096
20542097 alloc_tag_sub (& obj_exts [off ].ref , s -> size );
20552098 }
2056- #endif
2057- }
2058-
2059- #else /* CONFIG_SLAB_OBJ_EXT */
2060-
2061- static int alloc_slab_obj_exts (struct slab * slab , struct kmem_cache * s ,
2062- gfp_t gfp , bool new_slab )
2063- {
2064- return 0 ;
2065- }
2066-
2067- static inline void free_slab_obj_exts (struct slab * slab )
2068- {
20692099}
20702100
2071- static inline bool need_slab_obj_ext (void )
2072- {
2073- return false;
2074- }
2101+ #else /* CONFIG_MEM_ALLOC_PROFILING */
20752102
2076- static inline struct slabobj_ext *
2077- prepare_slab_obj_exts_hook (struct kmem_cache * s , gfp_t flags , void * p )
2103+ static inline void
2104+ alloc_tagging_slab_alloc_hook (struct kmem_cache * s , void * object , gfp_t flags )
20782105{
2079- return NULL ;
20802106}
20812107
20822108static inline void
@@ -2085,7 +2111,8 @@ alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
20852111{
20862112}
20872113
2088- #endif /* CONFIG_SLAB_OBJ_EXT */
2114+ #endif /* CONFIG_MEM_ALLOC_PROFILING */
2115+
20892116
20902117#ifdef CONFIG_MEMCG_KMEM
20912118
@@ -3944,20 +3971,7 @@ bool slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
39443971 kmemleak_alloc_recursive (p [i ], s -> object_size , 1 ,
39453972 s -> flags , init_flags );
39463973 kmsan_slab_alloc (s , p [i ], init_flags );
3947- #ifdef CONFIG_MEM_ALLOC_PROFILING
3948- if (need_slab_obj_ext ()) {
3949- struct slabobj_ext * obj_exts ;
3950-
3951- obj_exts = prepare_slab_obj_exts_hook (s , flags , p [i ]);
3952- /*
3953- * Currently obj_exts is used only for allocation profiling.
3954- * If other users appear then mem_alloc_profiling_enabled()
3955- * check should be added before alloc_tag_add().
3956- */
3957- if (likely (obj_exts ))
3958- alloc_tag_add (& obj_exts -> ref , current -> alloc_tag , s -> size );
3959- }
3960- #endif
3974+ alloc_tagging_slab_alloc_hook (s , p [i ], flags );
39613975 }
39623976
39633977 return memcg_slab_post_alloc_hook (s , lru , flags , size , p );
0 commit comments