Skip to content

Commit 33f9714

Browse files
aryabiningregkh
authored andcommitted
mm/kasan: fix KASAN poisoning in vrealloc()
commit 9b47d4e upstream. A KASAN warning can be triggered when vrealloc() changes the requested size to a value that is not aligned to KASAN_GRANULE_SIZE. ------------[ cut here ]------------ WARNING: CPU: 2 PID: 1 at mm/kasan/shadow.c:174 kasan_unpoison+0x40/0x48 ... pc : kasan_unpoison+0x40/0x48 lr : __kasan_unpoison_vmalloc+0x40/0x68 Call trace: kasan_unpoison+0x40/0x48 (P) vrealloc_node_align_noprof+0x200/0x320 bpf_patch_insn_data+0x90/0x2f0 convert_ctx_accesses+0x8c0/0x1158 bpf_check+0x1488/0x1900 bpf_prog_load+0xd20/0x1258 __sys_bpf+0x96c/0xdf0 __arm64_sys_bpf+0x50/0xa0 invoke_syscall+0x90/0x160 Introduce a dedicated kasan_vrealloc() helper that centralizes KASAN handling for vmalloc reallocations. The helper accounts for KASAN granule alignment when growing or shrinking an allocation and ensures that partial granules are handled correctly. Use this helper from vrealloc_node_align_noprof() to fix poisoning logic. [ryabinin.a.a@gmail.com: move kasan_enabled() check, fix build] Link: https://lkml.kernel.org/r/20260119144509.32767-1-ryabinin.a.a@gmail.com Link: https://lkml.kernel.org/r/20260113191516.31015-1-ryabinin.a.a@gmail.com Fixes: d699440 ("mm: fix vrealloc()'s KASAN poisoning logic") Signed-off-by: Andrey Ryabinin <ryabinin.a.a@gmail.com> Reported-by: Maciej Żenczykowski <maze@google.com> Reported-by: <joonki.min@samsung-slsi.corp-partner.google.com> Closes: https://lkml.kernel.org/r/CANP3RGeuRW53vukDy7WDO3FiVgu34-xVJYkfpm08oLO3odYFrA@mail.gmail.com Reviewed-by: Andrey Konovalov <andreyknvl@gmail.com> Tested-by: Maciej Wieczor-Retman <maciej.wieczor-retman@intel.com> Cc: Alexander Potapenko <glider@google.com> Cc: Dmitriy Vyukov <dvyukov@google.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Uladzislau Rezki <urezki@gmail.com> Cc: Vincenzo Frascino <vincenzo.frascino@arm.com> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
1 parent 0ac0e2e commit 33f9714

3 files changed

Lines changed: 37 additions & 5 deletions

File tree

include/linux/kasan.h

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -625,6 +625,17 @@ kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms,
625625
__kasan_unpoison_vmap_areas(vms, nr_vms, flags);
626626
}
627627

628+
void __kasan_vrealloc(const void *start, unsigned long old_size,
629+
unsigned long new_size);
630+
631+
static __always_inline void kasan_vrealloc(const void *start,
632+
unsigned long old_size,
633+
unsigned long new_size)
634+
{
635+
if (kasan_enabled())
636+
__kasan_vrealloc(start, old_size, new_size);
637+
}
638+
628639
#else /* CONFIG_KASAN_VMALLOC */
629640

630641
static inline void kasan_populate_early_vm_area_shadow(void *start,
@@ -654,6 +665,9 @@ kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms,
654665
kasan_vmalloc_flags_t flags)
655666
{ }
656667

668+
static inline void kasan_vrealloc(const void *start, unsigned long old_size,
669+
unsigned long new_size) { }
670+
657671
#endif /* CONFIG_KASAN_VMALLOC */
658672

659673
#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \

mm/kasan/common.c

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -613,4 +613,25 @@ void __kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms,
613613
__kasan_unpoison_vmalloc(addr, size, flags | KASAN_VMALLOC_KEEP_TAG);
614614
}
615615
}
616+
617+
void __kasan_vrealloc(const void *addr, unsigned long old_size,
618+
unsigned long new_size)
619+
{
620+
if (new_size < old_size) {
621+
kasan_poison_last_granule(addr, new_size);
622+
623+
new_size = round_up(new_size, KASAN_GRANULE_SIZE);
624+
old_size = round_up(old_size, KASAN_GRANULE_SIZE);
625+
if (new_size < old_size)
626+
__kasan_poison_vmalloc(addr + new_size,
627+
old_size - new_size);
628+
} else if (new_size > old_size) {
629+
old_size = round_down(old_size, KASAN_GRANULE_SIZE);
630+
__kasan_unpoison_vmalloc(addr + old_size,
631+
new_size - old_size,
632+
KASAN_VMALLOC_PROT_NORMAL |
633+
KASAN_VMALLOC_VM_ALLOC |
634+
KASAN_VMALLOC_KEEP_TAG);
635+
}
636+
}
616637
#endif

mm/vmalloc.c

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -4167,24 +4167,21 @@ void *vrealloc_node_align_noprof(const void *p, size_t size, unsigned long align
41674167
if (want_init_on_free() || want_init_on_alloc(flags))
41684168
memset((void *)p + size, 0, old_size - size);
41694169
vm->requested_size = size;
4170-
kasan_poison_vmalloc(p + size, old_size - size);
4170+
kasan_vrealloc(p, old_size, size);
41714171
return (void *)p;
41724172
}
41734173

41744174
/*
41754175
* We already have the bytes available in the allocation; use them.
41764176
*/
41774177
if (size <= alloced_size) {
4178-
kasan_unpoison_vmalloc(p + old_size, size - old_size,
4179-
KASAN_VMALLOC_PROT_NORMAL |
4180-
KASAN_VMALLOC_VM_ALLOC |
4181-
KASAN_VMALLOC_KEEP_TAG);
41824178
/*
41834179
* No need to zero memory here, as unused memory will have
41844180
* already been zeroed at initial allocation time or during
41854181
* realloc shrink time.
41864182
*/
41874183
vm->requested_size = size;
4184+
kasan_vrealloc(p, old_size, size);
41884185
return (void *)p;
41894186
}
41904187

0 commit comments

Comments
 (0)