Skip to content

Commit 9eb220e

Browse files
committed
Merge tag 'mm-hotfixes-stable-2025-11-26-11-51' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull misc fixes from Andrew Morton: "8 hotfixes. 4 are cc:stable, 7 are against mm/. All are singletons - please see the respective changelogs for details" * tag 'mm-hotfixes-stable-2025-11-26-11-51' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: mm/filemap: fix logic around SIGBUS in filemap_map_pages() mm/huge_memory: fix NULL pointer deference when splitting folio MAINTAINERS: add test_kho to KHO's entry mailmap: add entry for Sam Protsenko selftests/mm: fix division-by-zero in uffd-unit-tests mm/mmap_lock: reset maple state on lock_vma_under_rcu() retry mm/memfd: fix information leak in hugetlb folios mm: swap: remove duplicate nr_swap_pages decrement in get_swap_page_of_type()
2 parents ad8cccc + 7c9580f commit 9eb220e

8 files changed

Lines changed: 63 additions & 36 deletions

File tree

.mailmap

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -691,6 +691,8 @@ Sachin Mokashi <sachin.mokashi@intel.com> <sachinx.mokashi@intel.com>
691691
Sachin P Sant <ssant@in.ibm.com>
692692
Sai Prakash Ranjan <quic_saipraka@quicinc.com> <saiprakash.ranjan@codeaurora.org>
693693
Sakari Ailus <sakari.ailus@linux.intel.com> <sakari.ailus@iki.fi>
694+
Sam Protsenko <semen.protsenko@linaro.org>
695+
Sam Protsenko <semen.protsenko@linaro.org> <semen.protsenko@globallogic.com>
694696
Sam Ravnborg <sam@mars.ravnborg.org>
695697
Sankeerth Billakanti <quic_sbillaka@quicinc.com> <sbillaka@codeaurora.org>
696698
Santosh Shilimkar <santosh.shilimkar@oracle.org>

MAINTAINERS

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13799,6 +13799,7 @@ F: Documentation/admin-guide/mm/kho.rst
1379913799
F: Documentation/core-api/kho/*
1380013800
F: include/linux/kexec_handover.h
1380113801
F: kernel/kexec_handover.c
13802+
F: lib/test_kho.c
1380213803
F: tools/testing/selftests/kho/
1380313804

1380413805
KEYS-ENCRYPTED

mm/filemap.c

Lines changed: 14 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -3682,8 +3682,9 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
36823682
struct folio *folio, unsigned long start,
36833683
unsigned long addr, unsigned int nr_pages,
36843684
unsigned long *rss, unsigned short *mmap_miss,
3685-
bool can_map_large)
3685+
pgoff_t file_end)
36863686
{
3687+
struct address_space *mapping = folio->mapping;
36873688
unsigned int ref_from_caller = 1;
36883689
vm_fault_t ret = 0;
36893690
struct page *page = folio_page(folio, start);
@@ -3692,12 +3693,16 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
36923693
unsigned long addr0;
36933694

36943695
/*
3695-
* Map the large folio fully where possible.
3696+
* Map the large folio fully where possible:
36963697
*
3697-
* The folio must not cross VMA or page table boundary.
3698+
* - The folio is fully within size of the file or belong
3699+
* to shmem/tmpfs;
3700+
* - The folio doesn't cross VMA boundary;
3701+
* - The folio doesn't cross page table boundary;
36983702
*/
36993703
addr0 = addr - start * PAGE_SIZE;
3700-
if (can_map_large && folio_within_vma(folio, vmf->vma) &&
3704+
if ((file_end >= folio_next_index(folio) || shmem_mapping(mapping)) &&
3705+
folio_within_vma(folio, vmf->vma) &&
37013706
(addr0 & PMD_MASK) == ((addr0 + folio_size(folio) - 1) & PMD_MASK)) {
37023707
vmf->pte -= start;
37033708
page -= start;
@@ -3812,7 +3817,6 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
38123817
unsigned long rss = 0;
38133818
unsigned int nr_pages = 0, folio_type;
38143819
unsigned short mmap_miss = 0, mmap_miss_saved;
3815-
bool can_map_large;
38163820

38173821
rcu_read_lock();
38183822
folio = next_uptodate_folio(&xas, mapping, end_pgoff);
@@ -3823,16 +3827,14 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
38233827
end_pgoff = min(end_pgoff, file_end);
38243828

38253829
/*
3826-
* Do not allow to map with PTEs beyond i_size and with PMD
3827-
* across i_size to preserve SIGBUS semantics.
3830+
* Do not allow to map with PMD across i_size to preserve
3831+
* SIGBUS semantics.
38283832
*
38293833
* Make an exception for shmem/tmpfs that for long time
38303834
* intentionally mapped with PMDs across i_size.
38313835
*/
3832-
can_map_large = shmem_mapping(mapping) ||
3833-
file_end >= folio_next_index(folio);
3834-
3835-
if (can_map_large && filemap_map_pmd(vmf, folio, start_pgoff)) {
3836+
if ((file_end >= folio_next_index(folio) || shmem_mapping(mapping)) &&
3837+
filemap_map_pmd(vmf, folio, start_pgoff)) {
38363838
ret = VM_FAULT_NOPAGE;
38373839
goto out;
38383840
}
@@ -3861,8 +3863,7 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
38613863
else
38623864
ret |= filemap_map_folio_range(vmf, folio,
38633865
xas.xa_index - folio->index, addr,
3864-
nr_pages, &rss, &mmap_miss,
3865-
can_map_large);
3866+
nr_pages, &rss, &mmap_miss, file_end);
38663867

38673868
folio_unlock(folio);
38683869
} while ((folio = next_uptodate_folio(&xas, mapping, end_pgoff)) != NULL);

mm/huge_memory.c

Lines changed: 10 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -3619,6 +3619,16 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
36193619
if (folio != page_folio(split_at) || folio != page_folio(lock_at))
36203620
return -EINVAL;
36213621

3622+
/*
3623+
* Folios that just got truncated cannot get split. Signal to the
3624+
* caller that there was a race.
3625+
*
3626+
* TODO: this will also currently refuse shmem folios that are in the
3627+
* swapcache.
3628+
*/
3629+
if (!is_anon && !folio->mapping)
3630+
return -EBUSY;
3631+
36223632
if (new_order >= folio_order(folio))
36233633
return -EINVAL;
36243634

@@ -3659,18 +3669,6 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
36593669
gfp_t gfp;
36603670

36613671
mapping = folio->mapping;
3662-
3663-
/* Truncated ? */
3664-
/*
3665-
* TODO: add support for large shmem folio in swap cache.
3666-
* When shmem is in swap cache, mapping is NULL and
3667-
* folio_test_swapcache() is true.
3668-
*/
3669-
if (!mapping) {
3670-
ret = -EBUSY;
3671-
goto out;
3672-
}
3673-
36743672
min_order = mapping_min_folio_order(folio->mapping);
36753673
if (new_order < min_order) {
36763674
ret = -EINVAL;

mm/memfd.c

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -96,9 +96,36 @@ struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx)
9696
NULL,
9797
gfp_mask);
9898
if (folio) {
99+
u32 hash;
100+
101+
/*
102+
* Zero the folio to prevent information leaks to userspace.
103+
* Use folio_zero_user() which is optimized for huge/gigantic
104+
* pages. Pass 0 as addr_hint since this is not a faulting path
105+
* and we don't have a user virtual address yet.
106+
*/
107+
folio_zero_user(folio, 0);
108+
109+
/*
110+
* Mark the folio uptodate before adding to page cache,
111+
* as required by filemap.c and other hugetlb paths.
112+
*/
113+
__folio_mark_uptodate(folio);
114+
115+
/*
116+
* Serialize hugepage allocation and instantiation to prevent
117+
* races with concurrent allocations, as required by all other
118+
* callers of hugetlb_add_to_page_cache().
119+
*/
120+
hash = hugetlb_fault_mutex_hash(memfd->f_mapping, idx);
121+
mutex_lock(&hugetlb_fault_mutex_table[hash]);
122+
99123
err = hugetlb_add_to_page_cache(folio,
100124
memfd->f_mapping,
101125
idx);
126+
127+
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
128+
102129
if (err) {
103130
folio_put(folio);
104131
goto err_unresv;

mm/mmap_lock.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -241,6 +241,7 @@ struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
241241
if (PTR_ERR(vma) == -EAGAIN) {
242242
count_vm_vma_lock_event(VMA_LOCK_MISS);
243243
/* The area was replaced with another one */
244+
mas_set(&mas, address);
244245
goto retry;
245246
}
246247

mm/swapfile.c

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2005,10 +2005,8 @@ swp_entry_t get_swap_page_of_type(int type)
20052005
local_lock(&percpu_swap_cluster.lock);
20062006
offset = cluster_alloc_swap_entry(si, 0, 1);
20072007
local_unlock(&percpu_swap_cluster.lock);
2008-
if (offset) {
2008+
if (offset)
20092009
entry = swp_entry(si->type, offset);
2010-
atomic_long_dec(&nr_swap_pages);
2011-
}
20122010
}
20132011
put_swap_device(si);
20142012
}

tools/testing/selftests/mm/uffd-unit-tests.c

Lines changed: 7 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1758,10 +1758,15 @@ int main(int argc, char *argv[])
17581758
uffd_test_ops = mem_type->mem_ops;
17591759
uffd_test_case_ops = test->test_case_ops;
17601760

1761-
if (mem_type->mem_flag & (MEM_HUGETLB_PRIVATE | MEM_HUGETLB))
1761+
if (mem_type->mem_flag & (MEM_HUGETLB_PRIVATE | MEM_HUGETLB)) {
17621762
gopts.page_size = default_huge_page_size();
1763-
else
1763+
if (gopts.page_size == 0) {
1764+
uffd_test_skip("huge page size is 0, feature missing?");
1765+
continue;
1766+
}
1767+
} else {
17641768
gopts.page_size = psize();
1769+
}
17651770

17661771
/* Ensure we have at least 2 pages */
17671772
gopts.nr_pages = MAX(UFFD_TEST_MEM_SIZE, gopts.page_size * 2)
@@ -1776,12 +1781,6 @@ int main(int argc, char *argv[])
17761781
continue;
17771782

17781783
uffd_test_start("%s on %s", test->name, mem_type->name);
1779-
if ((mem_type->mem_flag == MEM_HUGETLB ||
1780-
mem_type->mem_flag == MEM_HUGETLB_PRIVATE) &&
1781-
(default_huge_page_size() == 0)) {
1782-
uffd_test_skip("huge page size is 0, feature missing?");
1783-
continue;
1784-
}
17851784
if (!uffd_feature_supported(test)) {
17861785
uffd_test_skip("feature missing");
17871786
continue;

0 commit comments

Comments
 (0)