Skip to content

Commit 0f3ad9c

Browse files
committed
Merge tag 'mm-hotfixes-stable-2025-10-22-12-43' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull hotfixes from Andrew Morton: "17 hotfixes. 12 are cc:stable and 14 are for MM. There's a two-patch DAMON series from SeongJae Park which addresses a missed check and possible memory leak. Apart from that it's all singletons - please see the changelogs for details" * tag 'mm-hotfixes-stable-2025-10-22-12-43' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: csky: abiv2: adapt to new folio flags field mm/damon/core: use damos_commit_quota_goal() for new goal commit mm/damon/core: fix potential memory leak by cleaning ops_filter in damon_destroy_scheme hugetlbfs: move lock assertions after early returns in huge_pmd_unshare() vmw_balloon: indicate success when effectively deflating during migration mm/damon/core: fix list_add_tail() call on damon_call() mm/mremap: correctly account old mapping after MREMAP_DONTUNMAP remap mm: prevent poison consumption when splitting THP ocfs2: clear extent cache after moving/defragmenting extents mm: don't spin in add_stack_record when gfp flags don't allow dma-debug: don't report false positives with DMA_BOUNCE_UNALIGNED_KMALLOC mm/damon/sysfs: dealloc commit test ctx always mm/damon/sysfs: catch commit test ctx alloc failure hung_task: fix warnings caused by unaligned lock pointers
2 parents dd72c8f + 9aa1216 commit 0f3ad9c

14 files changed

Lines changed: 45 additions & 39 deletions

File tree

arch/csky/abiv2/cacheflush.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
2121

2222
folio = page_folio(pfn_to_page(pfn));
2323

24-
if (test_and_set_bit(PG_dcache_clean, &folio->flags))
24+
if (test_and_set_bit(PG_dcache_clean, &folio->flags.f))
2525
return;
2626

2727
icache_inv_range(address, address + nr*PAGE_SIZE);

arch/csky/abiv2/inc/abi/cacheflush.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,8 +20,8 @@
2020

2121
static inline void flush_dcache_folio(struct folio *folio)
2222
{
23-
if (test_bit(PG_dcache_clean, &folio->flags))
24-
clear_bit(PG_dcache_clean, &folio->flags);
23+
if (test_bit(PG_dcache_clean, &folio->flags.f))
24+
clear_bit(PG_dcache_clean, &folio->flags.f);
2525
}
2626
#define flush_dcache_folio flush_dcache_folio
2727

drivers/misc/vmw_balloon.c

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1737,7 +1737,7 @@ static int vmballoon_migratepage(struct balloon_dev_info *b_dev_info,
17371737
{
17381738
unsigned long status, flags;
17391739
struct vmballoon *b;
1740-
int ret;
1740+
int ret = 0;
17411741

17421742
b = container_of(b_dev_info, struct vmballoon, b_dev_info);
17431743

@@ -1796,17 +1796,15 @@ static int vmballoon_migratepage(struct balloon_dev_info *b_dev_info,
17961796
* A failure happened. While we can deflate the page we just
17971797
* inflated, this deflation can also encounter an error. Instead
17981798
* we will decrease the size of the balloon to reflect the
1799-
* change and report failure.
1799+
* change.
18001800
*/
18011801
atomic64_dec(&b->size);
1802-
ret = -EBUSY;
18031802
} else {
18041803
/*
18051804
* Success. Take a reference for the page, and we will add it to
18061805
* the list after acquiring the lock.
18071806
*/
18081807
get_page(newpage);
1809-
ret = 0;
18101808
}
18111809

18121810
/* Update the balloon list under the @pages_lock */
@@ -1817,7 +1815,7 @@ static int vmballoon_migratepage(struct balloon_dev_info *b_dev_info,
18171815
* If we succeed just insert it to the list and update the statistics
18181816
* under the lock.
18191817
*/
1820-
if (!ret) {
1818+
if (status == VMW_BALLOON_SUCCESS) {
18211819
balloon_page_insert(&b->b_dev_info, newpage);
18221820
__count_vm_event(BALLOON_MIGRATE);
18231821
}

fs/hugetlbfs/inode.c

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -478,14 +478,6 @@ hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end,
478478
if (!hugetlb_vma_trylock_write(vma))
479479
continue;
480480

481-
/*
482-
* Skip VMAs without shareable locks. Per the design in commit
483-
* 40549ba8f8e0, these will be handled by remove_inode_hugepages()
484-
* called after this function with proper locking.
485-
*/
486-
if (!__vma_shareable_lock(vma))
487-
goto skip;
488-
489481
v_start = vma_offset_start(vma, start);
490482
v_end = vma_offset_end(vma, end);
491483

@@ -496,7 +488,6 @@ hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end,
496488
* vmas. Therefore, lock is not held when calling
497489
* unmap_hugepage_range for private vmas.
498490
*/
499-
skip:
500491
hugetlb_vma_unlock_write(vma);
501492
}
502493
}

fs/ocfs2/move_extents.c

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -867,6 +867,11 @@ static int __ocfs2_move_extents_range(struct buffer_head *di_bh,
867867
mlog_errno(ret);
868868
goto out;
869869
}
870+
/*
871+
* Invalidate extent cache after moving/defragging to prevent
872+
* stale cached data with outdated extent flags.
873+
*/
874+
ocfs2_extent_map_trunc(inode, cpos);
870875

871876
context->clusters_moved += alloc_size;
872877
next:

include/linux/hung_task.h

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,10 @@
2020
* always zero. So we can use these bits to encode the specific blocking
2121
* type.
2222
*
23+
* Note that on architectures where this is not guaranteed, or for any
24+
* unaligned lock, this tracking mechanism is silently skipped for that
25+
* lock.
26+
*
2327
* Type encoding:
2428
* 00 - Blocked on mutex (BLOCKER_TYPE_MUTEX)
2529
* 01 - Blocked on semaphore (BLOCKER_TYPE_SEM)
@@ -45,16 +49,14 @@ static inline void hung_task_set_blocker(void *lock, unsigned long type)
4549
* If the lock pointer matches the BLOCKER_TYPE_MASK, return
4650
* without writing anything.
4751
*/
48-
if (WARN_ON_ONCE(lock_ptr & BLOCKER_TYPE_MASK))
52+
if (lock_ptr & BLOCKER_TYPE_MASK)
4953
return;
5054

5155
WRITE_ONCE(current->blocker, lock_ptr | type);
5256
}
5357

5458
static inline void hung_task_clear_blocker(void)
5559
{
56-
WARN_ON_ONCE(!READ_ONCE(current->blocker));
57-
5860
WRITE_ONCE(current->blocker, 0UL);
5961
}
6062

kernel/dma/debug.c

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@
2323
#include <linux/ctype.h>
2424
#include <linux/list.h>
2525
#include <linux/slab.h>
26+
#include <linux/swiotlb.h>
2627
#include <asm/sections.h>
2728
#include "debug.h"
2829

@@ -594,7 +595,9 @@ static void add_dma_entry(struct dma_debug_entry *entry, unsigned long attrs)
594595
if (rc == -ENOMEM) {
595596
pr_err_once("cacheline tracking ENOMEM, dma-debug disabled\n");
596597
global_disable = true;
597-
} else if (rc == -EEXIST && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
598+
} else if (rc == -EEXIST && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
599+
!(IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC) &&
600+
is_swiotlb_active(entry->dev))) {
598601
err_printk(entry->dev, entry,
599602
"cacheline tracking EEXIST, overlapping mappings aren't supported\n");
600603
}

mm/damon/core.c

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -452,6 +452,9 @@ void damon_destroy_scheme(struct damos *s)
452452
damos_for_each_filter_safe(f, next, s)
453453
damos_destroy_filter(f);
454454

455+
damos_for_each_ops_filter_safe(f, next, s)
456+
damos_destroy_filter(f);
457+
455458
kfree(s->migrate_dests.node_id_arr);
456459
kfree(s->migrate_dests.weight_arr);
457460
damon_del_scheme(s);
@@ -832,7 +835,7 @@ int damos_commit_quota_goals(struct damos_quota *dst, struct damos_quota *src)
832835
src_goal->metric, src_goal->target_value);
833836
if (!new_goal)
834837
return -ENOMEM;
835-
damos_commit_quota_goal_union(new_goal, src_goal);
838+
damos_commit_quota_goal(new_goal, src_goal);
836839
damos_add_quota_goal(dst, new_goal);
837840
}
838841
return 0;
@@ -1450,7 +1453,7 @@ int damon_call(struct damon_ctx *ctx, struct damon_call_control *control)
14501453
INIT_LIST_HEAD(&control->list);
14511454

14521455
mutex_lock(&ctx->call_controls_lock);
1453-
list_add_tail(&ctx->call_controls, &control->list);
1456+
list_add_tail(&control->list, &ctx->call_controls);
14541457
mutex_unlock(&ctx->call_controls_lock);
14551458
if (!damon_is_running(ctx))
14561459
return -EINVAL;

mm/damon/sysfs.c

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1473,13 +1473,14 @@ static int damon_sysfs_commit_input(void *data)
14731473
if (IS_ERR(param_ctx))
14741474
return PTR_ERR(param_ctx);
14751475
test_ctx = damon_new_ctx();
1476+
if (!test_ctx)
1477+
return -ENOMEM;
14761478
err = damon_commit_ctx(test_ctx, param_ctx);
1477-
if (err) {
1478-
damon_destroy_ctx(test_ctx);
1479+
if (err)
14791480
goto out;
1480-
}
14811481
err = damon_commit_ctx(kdamond->damon_ctx, param_ctx);
14821482
out:
1483+
damon_destroy_ctx(test_ctx);
14831484
damon_destroy_ctx(param_ctx);
14841485
return err;
14851486
}

mm/huge_memory.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4109,6 +4109,9 @@ static bool thp_underused(struct folio *folio)
41094109
if (khugepaged_max_ptes_none == HPAGE_PMD_NR - 1)
41104110
return false;
41114111

4112+
if (folio_contain_hwpoisoned_page(folio))
4113+
return false;
4114+
41124115
for (i = 0; i < folio_nr_pages(folio); i++) {
41134116
if (pages_identical(folio_page(folio, i), ZERO_PAGE(0))) {
41144117
if (++num_zero_pages > khugepaged_max_ptes_none)

0 commit comments

Comments
 (0)