Skip to content

Commit c4d6d78

Browse files
hygonitehcaster
authored andcommitted
mm/slab: allow freeing kmalloc_nolock()'d objects using kfree[_rcu]()
Slab objects that are allocated with kmalloc_nolock() must be freed using kfree_nolock() because only a subset of alloc hooks are called, since kmalloc_nolock() can't spin on a lock during allocation. This imposes a limitation: such objects cannot be freed with kfree_rcu(), forcing users to work around this limitation by calling call_rcu() with a callback that frees the object using kfree_nolock(). Remove this limitation by teaching kmemleak to gracefully ignore cases when kmemleak_free() or kmemleak_ignore() is called without a prior kmemleak_alloc(). Unlike kmemleak, kfence already handles this case, because, due to its design, only a subset of allocations are served from kfence. With this change, kfree() and kfree_rcu() can be used to free objects that are allocated using kmalloc_nolock(). Suggested-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Alexei Starovoitov <ast@kernel.org> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Harry Yoo <harry.yoo@oracle.com> Link: https://patch.msgid.link/20260210044642.139482-2-harry.yoo@oracle.com Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
1 parent a1e244a commit c4d6d78

3 files changed

Lines changed: 32 additions & 15 deletions

File tree

include/linux/rcupdate.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1076,8 +1076,8 @@ static inline void rcu_read_unlock_migrate(void)
10761076
* either fall back to use of call_rcu() or rearrange the structure to
10771077
* position the rcu_head structure into the first 4096 bytes.
10781078
*
1079-
* The object to be freed can be allocated either by kmalloc() or
1080-
* kmem_cache_alloc().
1079+
* The object to be freed can be allocated either by kmalloc(),
1080+
* kmalloc_nolock(), or kmem_cache_alloc().
10811081
*
10821082
* Note that the allowable offset might decrease in the future.
10831083
*

mm/kmemleak.c

Lines changed: 10 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -837,13 +837,12 @@ static void delete_object_full(unsigned long ptr, unsigned int objflags)
837837
struct kmemleak_object *object;
838838

839839
object = find_and_remove_object(ptr, 0, objflags);
840-
if (!object) {
841-
#ifdef DEBUG
842-
kmemleak_warn("Freeing unknown object at 0x%08lx\n",
843-
ptr);
844-
#endif
840+
if (!object)
841+
/*
842+
* kmalloc_nolock() -> kfree() calls kmemleak_free()
843+
* without kmemleak_alloc().
844+
*/
845845
return;
846-
}
847846
__delete_object(object);
848847
}
849848

@@ -926,13 +925,12 @@ static void paint_ptr(unsigned long ptr, int color, unsigned int objflags)
926925
struct kmemleak_object *object;
927926

928927
object = __find_and_get_object(ptr, 0, objflags);
929-
if (!object) {
930-
kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n",
931-
ptr,
932-
(color == KMEMLEAK_GREY) ? "Grey" :
933-
(color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
928+
if (!object)
929+
/*
930+
* kmalloc_nolock() -> kfree_rcu() calls kmemleak_ignore()
931+
* without kmemleak_alloc().
932+
*/
934933
return;
935-
}
936934
paint_it(object, color);
937935
put_object(object);
938936
}

mm/slub.c

Lines changed: 20 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2585,6 +2585,24 @@ struct rcu_delayed_free {
25852585
* Returns true if freeing of the object can proceed, false if its reuse
25862586
* was delayed by CONFIG_SLUB_RCU_DEBUG or KASAN quarantine, or it was returned
25872587
* to KFENCE.
2588+
*
2589+
* For objects allocated via kmalloc_nolock(), only a subset of alloc hooks
2590+
* are invoked, so some free hooks must handle asymmetric hook calls.
2591+
*
2592+
* Alloc hooks called for kmalloc_nolock():
2593+
* - kmsan_slab_alloc()
2594+
* - kasan_slab_alloc()
2595+
* - memcg_slab_post_alloc_hook()
2596+
* - alloc_tagging_slab_alloc_hook()
2597+
*
2598+
* Free hooks that must handle missing corresponding alloc hooks:
2599+
* - kmemleak_free_recursive()
2600+
* - kfence_free()
2601+
*
2602+
* Free hooks that have no alloc hook counterpart, and thus safe to call:
2603+
* - debug_check_no_locks_freed()
2604+
* - debug_check_no_obj_freed()
2605+
* - __kcsan_check_access()
25882606
*/
25892607
static __always_inline
25902608
bool slab_free_hook(struct kmem_cache *s, void *x, bool init,
@@ -6394,7 +6412,7 @@ void kvfree_rcu_cb(struct rcu_head *head)
63946412

63956413
/**
63966414
* kfree - free previously allocated memory
6397-
* @object: pointer returned by kmalloc() or kmem_cache_alloc()
6415+
* @object: pointer returned by kmalloc(), kmalloc_nolock(), or kmem_cache_alloc()
63986416
*
63996417
* If @object is NULL, no operation is performed.
64006418
*/
@@ -6413,6 +6431,7 @@ void kfree(const void *object)
64136431
page = virt_to_page(object);
64146432
slab = page_slab(page);
64156433
if (!slab) {
6434+
/* kmalloc_nolock() doesn't support large kmalloc */
64166435
free_large_kmalloc(page, (void *)object);
64176436
return;
64186437
}

0 commit comments

Comments
 (0)