Skip to content

Commit 2f031c6

Browse files
author
Matthew Wilcox (Oracle)
committed
mm/rmap: Convert rmap_walk() to take a folio
This ripples all the way through to every calling and called function from rmap. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
1 parent e05b345 commit 2f031c6

9 files changed

Lines changed: 80 additions & 99 deletions

File tree

include/linux/ksm.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ static inline void ksm_exit(struct mm_struct *mm)
5151
struct page *ksm_might_need_to_copy(struct page *page,
5252
struct vm_area_struct *vma, unsigned long address);
5353

54-
void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc);
54+
void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc);
5555
void folio_migrate_ksm(struct folio *newfolio, struct folio *folio);
5656

5757
#else /* !CONFIG_KSM */
@@ -78,7 +78,7 @@ static inline struct page *ksm_might_need_to_copy(struct page *page,
7878
return page;
7979
}
8080

81-
static inline void rmap_walk_ksm(struct page *page,
81+
static inline void rmap_walk_ksm(struct folio *folio,
8282
struct rmap_walk_control *rwc)
8383
{
8484
}

include/linux/rmap.h

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -266,7 +266,6 @@ void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked);
266266
/*
267267
* Called by memory-failure.c to kill processes.
268268
*/
269-
struct anon_vma *page_lock_anon_vma_read(struct page *page);
270269
struct anon_vma *folio_lock_anon_vma_read(struct folio *folio);
271270
void page_unlock_anon_vma_read(struct anon_vma *anon_vma);
272271
int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
@@ -286,15 +285,15 @@ struct rmap_walk_control {
286285
* Return false if page table scanning in rmap_walk should be stopped.
287286
* Otherwise, return true.
288287
*/
289-
bool (*rmap_one)(struct page *page, struct vm_area_struct *vma,
288+
bool (*rmap_one)(struct folio *folio, struct vm_area_struct *vma,
290289
unsigned long addr, void *arg);
291-
int (*done)(struct page *page);
292-
struct anon_vma *(*anon_lock)(struct page *page);
290+
int (*done)(struct folio *folio);
291+
struct anon_vma *(*anon_lock)(struct folio *folio);
293292
bool (*invalid_vma)(struct vm_area_struct *vma, void *arg);
294293
};
295294

296-
void rmap_walk(struct page *page, struct rmap_walk_control *rwc);
297-
void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc);
295+
void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc);
296+
void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc);
298297

299298
#else /* !CONFIG_MMU */
300299

mm/damon/paddr.c

Lines changed: 7 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -16,10 +16,10 @@
1616
#include "../internal.h"
1717
#include "prmtv-common.h"
1818

19-
static bool __damon_pa_mkold(struct page *page, struct vm_area_struct *vma,
19+
static bool __damon_pa_mkold(struct folio *folio, struct vm_area_struct *vma,
2020
unsigned long addr, void *arg)
2121
{
22-
DEFINE_PAGE_VMA_WALK(pvmw, page, vma, addr, 0);
22+
DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
2323

2424
while (page_vma_mapped_walk(&pvmw)) {
2525
addr = pvmw.address;
@@ -37,7 +37,7 @@ static void damon_pa_mkold(unsigned long paddr)
3737
struct page *page = damon_get_page(PHYS_PFN(paddr));
3838
struct rmap_walk_control rwc = {
3939
.rmap_one = __damon_pa_mkold,
40-
.anon_lock = page_lock_anon_vma_read,
40+
.anon_lock = folio_lock_anon_vma_read,
4141
};
4242
bool need_lock;
4343

@@ -54,7 +54,7 @@ static void damon_pa_mkold(unsigned long paddr)
5454
if (need_lock && !folio_trylock(folio))
5555
goto out;
5656

57-
rmap_walk(&folio->page, &rwc);
57+
rmap_walk(folio, &rwc);
5858

5959
if (need_lock)
6060
folio_unlock(folio);
@@ -87,10 +87,9 @@ struct damon_pa_access_chk_result {
8787
bool accessed;
8888
};
8989

90-
static bool __damon_pa_young(struct page *page, struct vm_area_struct *vma,
90+
static bool __damon_pa_young(struct folio *folio, struct vm_area_struct *vma,
9191
unsigned long addr, void *arg)
9292
{
93-
struct folio *folio = page_folio(page);
9493
struct damon_pa_access_chk_result *result = arg;
9594
DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
9695

@@ -133,7 +132,7 @@ static bool damon_pa_young(unsigned long paddr, unsigned long *page_sz)
133132
struct rmap_walk_control rwc = {
134133
.arg = &result,
135134
.rmap_one = __damon_pa_young,
136-
.anon_lock = page_lock_anon_vma_read,
135+
.anon_lock = folio_lock_anon_vma_read,
137136
};
138137
bool need_lock;
139138

@@ -156,7 +155,7 @@ static bool damon_pa_young(unsigned long paddr, unsigned long *page_sz)
156155
return NULL;
157156
}
158157

159-
rmap_walk(&folio->page, &rwc);
158+
rmap_walk(folio, &rwc);
160159

161160
if (need_lock)
162161
folio_unlock(folio);

mm/folio-compat.c

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -164,10 +164,3 @@ void putback_lru_page(struct page *page)
164164
{
165165
folio_putback_lru(page_folio(page));
166166
}
167-
168-
#ifdef CONFIG_MMU
169-
struct anon_vma *page_lock_anon_vma_read(struct page *page)
170-
{
171-
return folio_lock_anon_vma_read(page_folio(page));
172-
}
173-
#endif

mm/huge_memory.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2572,7 +2572,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
25722572
* The caller does not necessarily hold an mmap_lock that would
25732573
* prevent the anon_vma disappearing so we first we take a
25742574
* reference to it and then lock the anon_vma for write. This
2575-
* is similar to page_lock_anon_vma_read except the write lock
2575+
* is similar to folio_lock_anon_vma_read except the write lock
25762576
* is taken to serialise against parallel split or collapse
25772577
* operations.
25782578
*/

mm/ksm.c

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -2588,21 +2588,21 @@ struct page *ksm_might_need_to_copy(struct page *page,
25882588
return new_page;
25892589
}
25902590

2591-
void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc)
2591+
void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc)
25922592
{
25932593
struct stable_node *stable_node;
25942594
struct rmap_item *rmap_item;
25952595
int search_new_forks = 0;
25962596

2597-
VM_BUG_ON_PAGE(!PageKsm(page), page);
2597+
VM_BUG_ON_FOLIO(!folio_test_ksm(folio), folio);
25982598

25992599
/*
26002600
* Rely on the page lock to protect against concurrent modifications
26012601
* to that page's node of the stable tree.
26022602
*/
2603-
VM_BUG_ON_PAGE(!PageLocked(page), page);
2603+
VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
26042604

2605-
stable_node = page_stable_node(page);
2605+
stable_node = folio_stable_node(folio);
26062606
if (!stable_node)
26072607
return;
26082608
again:
@@ -2637,11 +2637,11 @@ void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc)
26372637
if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
26382638
continue;
26392639

2640-
if (!rwc->rmap_one(page, vma, addr, rwc->arg)) {
2640+
if (!rwc->rmap_one(folio, vma, addr, rwc->arg)) {
26412641
anon_vma_unlock_read(anon_vma);
26422642
return;
26432643
}
2644-
if (rwc->done && rwc->done(page)) {
2644+
if (rwc->done && rwc->done(folio)) {
26452645
anon_vma_unlock_read(anon_vma);
26462646
return;
26472647
}

mm/migrate.c

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -171,13 +171,11 @@ void putback_movable_pages(struct list_head *l)
171171
/*
172172
* Restore a potential migration pte to a working pte entry
173173
*/
174-
static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
175-
unsigned long addr, void *old)
174+
static bool remove_migration_pte(struct folio *folio,
175+
struct vm_area_struct *vma, unsigned long addr, void *old)
176176
{
177-
struct folio *folio = page_folio(page);
178177
DEFINE_FOLIO_VMA_WALK(pvmw, old, vma, addr, PVMW_SYNC | PVMW_MIGRATION);
179178

180-
VM_BUG_ON_PAGE(PageTail(page), page);
181179
while (page_vma_mapped_walk(&pvmw)) {
182180
pte_t pte;
183181
swp_entry_t entry;
@@ -269,9 +267,9 @@ void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked)
269267
};
270268

271269
if (locked)
272-
rmap_walk_locked(&dst->page, &rwc);
270+
rmap_walk_locked(dst, &rwc);
273271
else
274-
rmap_walk(&dst->page, &rwc);
272+
rmap_walk(dst, &rwc);
275273
}
276274

277275
/*

mm/page_idle.c

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -46,11 +46,10 @@ static struct page *page_idle_get_page(unsigned long pfn)
4646
return page;
4747
}
4848

49-
static bool page_idle_clear_pte_refs_one(struct page *page,
49+
static bool page_idle_clear_pte_refs_one(struct folio *folio,
5050
struct vm_area_struct *vma,
5151
unsigned long addr, void *arg)
5252
{
53-
struct folio *folio = page_folio(page);
5453
DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
5554
bool referenced = false;
5655

@@ -93,7 +92,7 @@ static void page_idle_clear_pte_refs(struct page *page)
9392
*/
9493
static const struct rmap_walk_control rwc = {
9594
.rmap_one = page_idle_clear_pte_refs_one,
96-
.anon_lock = page_lock_anon_vma_read,
95+
.anon_lock = folio_lock_anon_vma_read,
9796
};
9897
bool need_lock;
9998

@@ -104,7 +103,7 @@ static void page_idle_clear_pte_refs(struct page *page)
104103
if (need_lock && !folio_trylock(folio))
105104
return;
106105

107-
rmap_walk(&folio->page, (struct rmap_walk_control *)&rwc);
106+
rmap_walk(folio, (struct rmap_walk_control *)&rwc);
108107

109108
if (need_lock)
110109
folio_unlock(folio);

0 commit comments

Comments
 (0)