Skip to content

Commit d5dc831

Browse files
committed
slab: replace cache_from_obj() with inline checks
Eric Dumazet has noticed cache_from_obj() is not inlined with clang and suggested splitting it into two functions, where the smaller inlined one assumes the fastpath is !CONFIG_SLAB_FREELIST_HARDENED. However most distros enable it these days and so this would likely add a function call to the object free fastpaths. Instead take a step back and consider that cache_from_obj() is a relict from when memcgs created their separate kmem_cache copies, as the outdated comment in build_detached_freelist() reminds us. Meanwhile hardening/debugging had reused cache_from_obj() to validate that the freed object really belongs to a slab from the cache we think we are freeing from. In build_detached_freelist() simply remove this, because it did not handle the NULL result from cache_from_obj() failure properly, nor validate objects (for the NULL slab->slab_cache pointer) when called via kfree_bulk(). If anyone is motivated to implement it properly, it should be possible in a similar way to kmem_cache_free(). In kmem_cache_free(), do the hardening/debugging checks directly so they are inlined by definition and virt_to_slab(obj) is performed just once. In case they failed, call a newly introduced warn_free_bad_obj() that performs the warnings outside of the fastpath, and leak the object. As an intentional change, leak the object when slab->slab_cache differs from the cache given to kmem_cache_free(). Previously we would only leak when the object is not in a valid slab page or the slab->slab_cache pointer is NULL, and otherwise trust the slab->slab_cache over the kmem_cache_free() argument. But if those differ, it means something went wrong enough that it's best not to continue freeing. As a result the fastpath should be inlined in all configs and the warnings are moved away. Reported-by: Eric Dumazet <edumazet@google.com> Closes: https://lore.kernel.org/all/20260115130642.3419324-1-edumazet@google.com/ Reviewed-by: Harry Yoo <harry.yoo@oracle.com> Reviewed-by: Hao Li <hao.li@linux.dev> Acked-by: Eric Dumazet <edumazet@google.com> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
1 parent 99a3e3a commit d5dc831

1 file changed

Lines changed: 33 additions & 23 deletions

File tree

mm/slub.c

Lines changed: 33 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -6742,30 +6742,26 @@ void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr)
67426742
}
67436743
#endif
67446744

6745-
static inline struct kmem_cache *virt_to_cache(const void *obj)
6745+
static noinline void warn_free_bad_obj(struct kmem_cache *s, void *obj)
67466746
{
6747+
struct kmem_cache *cachep;
67476748
struct slab *slab;
67486749

67496750
slab = virt_to_slab(obj);
6750-
if (WARN_ONCE(!slab, "%s: Object is not a Slab page!\n", __func__))
6751-
return NULL;
6752-
return slab->slab_cache;
6753-
}
6754-
6755-
static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
6756-
{
6757-
struct kmem_cache *cachep;
6751+
if (WARN_ONCE(!slab,
6752+
"kmem_cache_free(%s, %p): object is not in a slab page\n",
6753+
s->name, obj))
6754+
return;
67586755

6759-
if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) &&
6760-
!kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS))
6761-
return s;
6756+
cachep = slab->slab_cache;
67626757

6763-
cachep = virt_to_cache(x);
6764-
if (WARN(cachep && cachep != s,
6765-
"%s: Wrong slab cache. %s but object is from %s\n",
6766-
__func__, s->name, cachep->name))
6767-
print_tracking(cachep, x);
6768-
return cachep;
6758+
if (WARN_ONCE(cachep != s,
6759+
"kmem_cache_free(%s, %p): object belongs to different cache %s\n",
6760+
s->name, obj, cachep ? cachep->name : "(NULL)")) {
6761+
if (cachep)
6762+
print_tracking(cachep, obj);
6763+
return;
6764+
}
67696765
}
67706766

67716767
/**
@@ -6778,11 +6774,25 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
67786774
*/
67796775
void kmem_cache_free(struct kmem_cache *s, void *x)
67806776
{
6781-
s = cache_from_obj(s, x);
6782-
if (!s)
6783-
return;
6777+
struct slab *slab;
6778+
6779+
slab = virt_to_slab(x);
6780+
6781+
if (IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) ||
6782+
kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) {
6783+
6784+
/*
6785+
* Intentionally leak the object in these cases, because it
6786+
* would be too dangerous to continue.
6787+
*/
6788+
if (unlikely(!slab || (slab->slab_cache != s))) {
6789+
warn_free_bad_obj(s, x);
6790+
return;
6791+
}
6792+
}
6793+
67846794
trace_kmem_cache_free(_RET_IP_, x, s);
6785-
slab_free(s, virt_to_slab(x), x, _RET_IP_);
6795+
slab_free(s, slab, x, _RET_IP_);
67866796
}
67876797
EXPORT_SYMBOL(kmem_cache_free);
67886798

@@ -7309,7 +7319,7 @@ int build_detached_freelist(struct kmem_cache *s, size_t size,
73097319
df->s = slab->slab_cache;
73107320
} else {
73117321
df->slab = slab;
7312-
df->s = cache_from_obj(s, object); /* Support for memcg */
7322+
df->s = s;
73137323
}
73147324

73157325
/* Start new detached freelist */

0 commit comments

Comments
 (0)