Skip to content

Commit 537d196

Browse files
committed
Merge tag 'mm-hotfixes-stable-2025-11-10-19-30' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull misc fixes from Andrew Morton: "26 hotfixes. 22(!) are cc:stable, 22 are MM. - address some Kexec Handover issues (Pasha Tatashin) - fix handling of large folios which are mapped outside i_size (Kiryl Shutsemau) - fix some DAMON time issues on 32-bit machines (Quanmin Yan) Plus the usual shower of singletons" * tag 'mm-hotfixes-stable-2025-11-10-19-30' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (26 commits) kho: warn and exit when unpreserved page wasn't preserved kho: fix unpreservation of higher-order vmalloc preservations kho: fix out-of-bounds access of vmalloc chunk MAINTAINERS: add Chris and Kairui as the swap maintainer mm/secretmem: fix use-after-free race in fault handler mm/huge_memory: initialise the tags of the huge zero folio nilfs2: avoid having an active sc_timer before freeing sci scripts/decode_stacktrace.sh: fix build ID and PC source parsing mm/damon/sysfs: change next_update_jiffies to a global variable mm/damon/stat: change last_refresh_jiffies to a global variable maple_tree: fix tracepoint string pointers codetag: debug: handle existing CODETAG_EMPTY in mark_objexts_empty for slabobj_ext mm/mremap: honour writable bit in mremap pte batching gcov: add support for GCC 15 mm/mm_init: fix hash table order logging in alloc_large_system_hash() mm/truncate: unmap large folio on split failure mm/memory: do not populate page table entries beyond i_size fs/proc: fix uaf in proc_readdir_de() mm/huge_memory: preserve PG_has_hwpoisoned if a folio is split to >0 order ksm: use range-walk function to jump over holes in scan_get_next_rmap_item ...
2 parents 4427259 + b05addf commit 537d196

30 files changed

Lines changed: 424 additions & 152 deletions

MAINTAINERS

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16498,12 +16498,12 @@ F: mm/secretmem.c
1649816498

1649916499
MEMORY MANAGEMENT - SWAP
1650016500
M: Andrew Morton <akpm@linux-foundation.org>
16501+
M: Chris Li <chrisl@kernel.org>
16502+
M: Kairui Song <kasong@tencent.com>
1650116503
R: Kemeng Shi <shikemeng@huaweicloud.com>
16502-
R: Kairui Song <kasong@tencent.com>
1650316504
R: Nhat Pham <nphamcs@gmail.com>
1650416505
R: Baoquan He <bhe@redhat.com>
1650516506
R: Barry Song <baohua@kernel.org>
16506-
R: Chris Li <chrisl@kernel.org>
1650716507
L: linux-mm@kvack.org
1650816508
S: Maintained
1650916509
F: Documentation/mm/swap-table.rst

arch/arm64/kernel/mte.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -476,7 +476,8 @@ static int __access_remote_tags(struct mm_struct *mm, unsigned long addr,
476476

477477
folio = page_folio(page);
478478
if (folio_test_hugetlb(folio))
479-
WARN_ON_ONCE(!folio_test_hugetlb_mte_tagged(folio));
479+
WARN_ON_ONCE(!folio_test_hugetlb_mte_tagged(folio) &&
480+
!is_huge_zero_folio(folio));
480481
else
481482
WARN_ON_ONCE(!page_mte_tagged(page) && !is_zero_page(page));
482483

arch/arm64/mm/fault.c

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -969,6 +969,16 @@ struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma,
969969

970970
void tag_clear_highpage(struct page *page)
971971
{
972+
/*
973+
* Check if MTE is supported and fall back to clear_highpage().
974+
* get_huge_zero_folio() unconditionally passes __GFP_ZEROTAGS and
975+
* post_alloc_hook() will invoke tag_clear_highpage().
976+
*/
977+
if (!system_supports_mte()) {
978+
clear_highpage(page);
979+
return;
980+
}
981+
972982
/* Newly allocated page, shouldn't have been tagged yet */
973983
WARN_ON_ONCE(!try_page_mte_tagging(page));
974984
mte_zero_clear_page_tags(page_address(page));

fs/nilfs2/segment.c

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2768,7 +2768,12 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
27682768

27692769
if (sci->sc_task) {
27702770
wake_up(&sci->sc_wait_daemon);
2771-
kthread_stop(sci->sc_task);
2771+
if (kthread_stop(sci->sc_task)) {
2772+
spin_lock(&sci->sc_state_lock);
2773+
sci->sc_task = NULL;
2774+
timer_shutdown_sync(&sci->sc_timer);
2775+
spin_unlock(&sci->sc_state_lock);
2776+
}
27722777
}
27732778

27742779
spin_lock(&sci->sc_state_lock);

fs/proc/generic.c

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -698,6 +698,12 @@ void pde_put(struct proc_dir_entry *pde)
698698
}
699699
}
700700

701+
static void pde_erase(struct proc_dir_entry *pde, struct proc_dir_entry *parent)
702+
{
703+
rb_erase(&pde->subdir_node, &parent->subdir);
704+
RB_CLEAR_NODE(&pde->subdir_node);
705+
}
706+
701707
/*
702708
* Remove a /proc entry and free it if it's not currently in use.
703709
*/
@@ -720,7 +726,7 @@ void remove_proc_entry(const char *name, struct proc_dir_entry *parent)
720726
WARN(1, "removing permanent /proc entry '%s'", de->name);
721727
de = NULL;
722728
} else {
723-
rb_erase(&de->subdir_node, &parent->subdir);
729+
pde_erase(de, parent);
724730
if (S_ISDIR(de->mode))
725731
parent->nlink--;
726732
}
@@ -764,7 +770,7 @@ int remove_proc_subtree(const char *name, struct proc_dir_entry *parent)
764770
root->parent->name, root->name);
765771
return -EINVAL;
766772
}
767-
rb_erase(&root->subdir_node, &parent->subdir);
773+
pde_erase(root, parent);
768774

769775
de = root;
770776
while (1) {
@@ -776,7 +782,7 @@ int remove_proc_subtree(const char *name, struct proc_dir_entry *parent)
776782
next->parent->name, next->name);
777783
return -EINVAL;
778784
}
779-
rb_erase(&next->subdir_node, &de->subdir);
785+
pde_erase(next, de);
780786
de = next;
781787
continue;
782788
}

include/linux/gfp.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77
#include <linux/mmzone.h>
88
#include <linux/topology.h>
99
#include <linux/alloc_tag.h>
10+
#include <linux/cleanup.h>
1011
#include <linux/sched.h>
1112

1213
struct vm_area_struct;
@@ -463,4 +464,6 @@ static inline struct folio *folio_alloc_gigantic_noprof(int order, gfp_t gfp,
463464
/* This should be paired with folio_put() rather than free_contig_range(). */
464465
#define folio_alloc_gigantic(...) alloc_hooks(folio_alloc_gigantic_noprof(__VA_ARGS__))
465466

467+
DEFINE_FREE(free_page, void *, free_page((unsigned long)_T))
468+
466469
#endif /* __LINUX_GFP_H */

include/linux/huge_mm.h

Lines changed: 23 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -376,45 +376,30 @@ bool non_uniform_split_supported(struct folio *folio, unsigned int new_order,
376376
int folio_split(struct folio *folio, unsigned int new_order, struct page *page,
377377
struct list_head *list);
378378
/*
379-
* try_folio_split - try to split a @folio at @page using non uniform split.
379+
* try_folio_split_to_order - try to split a @folio at @page to @new_order using
380+
* non uniform split.
380381
* @folio: folio to be split
381-
* @page: split to order-0 at the given page
382-
* @list: store the after-split folios
382+
* @page: split to @new_order at the given page
383+
* @new_order: the target split order
383384
*
384-
* Try to split a @folio at @page using non uniform split to order-0, if
385-
* non uniform split is not supported, fall back to uniform split.
385+
* Try to split a @folio at @page using non uniform split to @new_order, if
386+
* non uniform split is not supported, fall back to uniform split. After-split
387+
* folios are put back to LRU list. Use min_order_for_split() to get the lower
388+
* bound of @new_order.
386389
*
387390
* Return: 0: split is successful, otherwise split failed.
388391
*/
389-
static inline int try_folio_split(struct folio *folio, struct page *page,
390-
struct list_head *list)
392+
static inline int try_folio_split_to_order(struct folio *folio,
393+
struct page *page, unsigned int new_order)
391394
{
392-
int ret = min_order_for_split(folio);
393-
394-
if (ret < 0)
395-
return ret;
396-
397-
if (!non_uniform_split_supported(folio, 0, false))
398-
return split_huge_page_to_list_to_order(&folio->page, list,
399-
ret);
400-
return folio_split(folio, ret, page, list);
395+
if (!non_uniform_split_supported(folio, new_order, /* warns= */ false))
396+
return split_huge_page_to_list_to_order(&folio->page, NULL,
397+
new_order);
398+
return folio_split(folio, new_order, page, NULL);
401399
}
402400
static inline int split_huge_page(struct page *page)
403401
{
404-
struct folio *folio = page_folio(page);
405-
int ret = min_order_for_split(folio);
406-
407-
if (ret < 0)
408-
return ret;
409-
410-
/*
411-
* split_huge_page() locks the page before splitting and
412-
* expects the same page that has been split to be locked when
413-
* returned. split_folio(page_folio(page)) cannot be used here
414-
* because it converts the page to folio and passes the head
415-
* page to be split.
416-
*/
417-
return split_huge_page_to_list_to_order(page, NULL, ret);
402+
return split_huge_page_to_list_to_order(page, NULL, 0);
418403
}
419404
void deferred_split_folio(struct folio *folio, bool partially_mapped);
420405

@@ -597,14 +582,20 @@ static inline int split_huge_page(struct page *page)
597582
return -EINVAL;
598583
}
599584

585+
static inline int min_order_for_split(struct folio *folio)
586+
{
587+
VM_WARN_ON_ONCE_FOLIO(1, folio);
588+
return -EINVAL;
589+
}
590+
600591
static inline int split_folio_to_list(struct folio *folio, struct list_head *list)
601592
{
602593
VM_WARN_ON_ONCE_FOLIO(1, folio);
603594
return -EINVAL;
604595
}
605596

606-
static inline int try_folio_split(struct folio *folio, struct page *page,
607-
struct list_head *list)
597+
static inline int try_folio_split_to_order(struct folio *folio,
598+
struct page *page, unsigned int new_order)
608599
{
609600
VM_WARN_ON_ONCE_FOLIO(1, folio);
610601
return -EINVAL;

kernel/Kconfig.kexec

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -109,6 +109,15 @@ config KEXEC_HANDOVER
109109
to keep data or state alive across the kexec. For this to work,
110110
both source and target kernels need to have this option enabled.
111111

112+
config KEXEC_HANDOVER_DEBUG
113+
bool "Enable Kexec Handover debug checks"
114+
depends on KEXEC_HANDOVER
115+
help
116+
This option enables extra sanity checks for the Kexec Handover
117+
subsystem. Since, KHO performance is crucial in live update
118+
scenarios and the extra code might be adding overhead it is
119+
only optionally enabled.
120+
112121
config CRASH_DUMP
113122
bool "kernel crash dumps"
114123
default ARCH_DEFAULT_CRASH_DUMP

kernel/Makefile

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -83,6 +83,7 @@ obj-$(CONFIG_KEXEC) += kexec.o
8383
obj-$(CONFIG_KEXEC_FILE) += kexec_file.o
8484
obj-$(CONFIG_KEXEC_ELF) += kexec_elf.o
8585
obj-$(CONFIG_KEXEC_HANDOVER) += kexec_handover.o
86+
obj-$(CONFIG_KEXEC_HANDOVER_DEBUG) += kexec_handover_debug.o
8687
obj-$(CONFIG_BACKTRACE_SELF_TEST) += backtracetest.o
8788
obj-$(CONFIG_COMPAT) += compat.o
8889
obj-$(CONFIG_CGROUPS) += cgroup/

kernel/gcov/gcc_4_7.c

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,9 @@
1818
#include <linux/mm.h>
1919
#include "gcov.h"
2020

21-
#if (__GNUC__ >= 14)
21+
#if (__GNUC__ >= 15)
22+
#define GCOV_COUNTERS 10
23+
#elif (__GNUC__ >= 14)
2224
#define GCOV_COUNTERS 9
2325
#elif (__GNUC__ >= 10)
2426
#define GCOV_COUNTERS 8

0 commit comments

Comments
 (0)