Skip to content

Commit 4eecb8b

Browse files
author
Matthew Wilcox (Oracle)
committed
mm/migrate: Convert remove_migration_ptes() to folios
Convert the implementation and all callers. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
1 parent 0d25148 commit 4eecb8b

4 files changed

Lines changed: 54 additions & 42 deletions

File tree

include/linux/rmap.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -261,7 +261,7 @@ unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
261261
*/
262262
int folio_mkclean(struct folio *);
263263

264-
void remove_migration_ptes(struct page *old, struct page *new, bool locked);
264+
void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked);
265265

266266
/*
267267
* Called by memory-failure.c to kill processes.

mm/huge_memory.c

Lines changed: 13 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -2270,18 +2270,19 @@ static void unmap_page(struct page *page)
22702270
VM_WARN_ON_ONCE_PAGE(page_mapped(page), page);
22712271
}
22722272

2273-
static void remap_page(struct page *page, unsigned int nr)
2273+
static void remap_page(struct folio *folio, unsigned long nr)
22742274
{
2275-
int i;
2275+
int i = 0;
22762276

22772277
/* If unmap_page() uses try_to_migrate() on file, remove this check */
2278-
if (!PageAnon(page))
2278+
if (!folio_test_anon(folio))
22792279
return;
2280-
if (PageTransHuge(page)) {
2281-
remove_migration_ptes(page, page, true);
2282-
} else {
2283-
for (i = 0; i < nr; i++)
2284-
remove_migration_ptes(page + i, page + i, true);
2280+
for (;;) {
2281+
remove_migration_ptes(folio, folio, true);
2282+
i += folio_nr_pages(folio);
2283+
if (i >= nr)
2284+
break;
2285+
folio = folio_next(folio);
22852286
}
22862287
}
22872288

@@ -2441,7 +2442,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
24412442
}
24422443
local_irq_enable();
24432444

2444-
remap_page(head, nr);
2445+
remap_page(folio, nr);
24452446

24462447
if (PageSwapCache(head)) {
24472448
swp_entry_t entry = { .val = page_private(head) };
@@ -2550,7 +2551,8 @@ bool can_split_huge_page(struct page *page, int *pextra_pins)
25502551
*/
25512552
int split_huge_page_to_list(struct page *page, struct list_head *list)
25522553
{
2553-
struct page *head = compound_head(page);
2554+
struct folio *folio = page_folio(page);
2555+
struct page *head = &folio->page;
25542556
struct deferred_split *ds_queue = get_deferred_split_queue(head);
25552557
XA_STATE(xas, &head->mapping->i_pages, head->index);
25562558
struct anon_vma *anon_vma = NULL;
@@ -2667,7 +2669,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
26672669
if (mapping)
26682670
xas_unlock(&xas);
26692671
local_irq_enable();
2670-
remap_page(head, thp_nr_pages(head));
2672+
remap_page(folio, folio_nr_pages(folio));
26712673
ret = -EBUSY;
26722674
}
26732675

mm/migrate.c

Lines changed: 30 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -174,30 +174,32 @@ void putback_movable_pages(struct list_head *l)
174174
static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
175175
unsigned long addr, void *old)
176176
{
177-
DEFINE_PAGE_VMA_WALK(pvmw, (struct page *)old, vma, addr,
178-
PVMW_SYNC | PVMW_MIGRATION);
179-
struct page *new;
180-
pte_t pte;
181-
swp_entry_t entry;
177+
struct folio *folio = page_folio(page);
178+
DEFINE_FOLIO_VMA_WALK(pvmw, old, vma, addr, PVMW_SYNC | PVMW_MIGRATION);
182179

183180
VM_BUG_ON_PAGE(PageTail(page), page);
184181
while (page_vma_mapped_walk(&pvmw)) {
185-
if (PageKsm(page))
186-
new = page;
187-
else
188-
new = page - pvmw.pgoff +
189-
linear_page_index(vma, pvmw.address);
182+
pte_t pte;
183+
swp_entry_t entry;
184+
struct page *new;
185+
unsigned long idx = 0;
186+
187+
/* pgoff is invalid for ksm pages, but they are never large */
188+
if (folio_test_large(folio) && !folio_test_hugetlb(folio))
189+
idx = linear_page_index(vma, pvmw.address) - pvmw.pgoff;
190+
new = folio_page(folio, idx);
190191

191192
#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
192193
/* PMD-mapped THP migration entry */
193194
if (!pvmw.pte) {
194-
VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page);
195+
VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) ||
196+
!folio_test_pmd_mappable(folio), folio);
195197
remove_migration_pmd(&pvmw, new);
196198
continue;
197199
}
198200
#endif
199201

200-
get_page(new);
202+
folio_get(folio);
201203
pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot)));
202204
if (pte_swp_soft_dirty(*pvmw.pte))
203205
pte = pte_mksoft_dirty(pte);
@@ -226,20 +228,20 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
226228
}
227229

228230
#ifdef CONFIG_HUGETLB_PAGE
229-
if (PageHuge(new)) {
231+
if (folio_test_hugetlb(folio)) {
230232
unsigned int shift = huge_page_shift(hstate_vma(vma));
231233

232234
pte = pte_mkhuge(pte);
233235
pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
234-
if (PageAnon(new))
236+
if (folio_test_anon(folio))
235237
hugepage_add_anon_rmap(new, vma, pvmw.address);
236238
else
237239
page_dup_rmap(new, true);
238240
set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
239241
} else
240242
#endif
241243
{
242-
if (PageAnon(new))
244+
if (folio_test_anon(folio))
243245
page_add_anon_rmap(new, vma, pvmw.address, false);
244246
else
245247
page_add_file_rmap(new, vma, false);
@@ -259,17 +261,17 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
259261
* Get rid of all migration entries and replace them by
260262
* references to the indicated page.
261263
*/
262-
void remove_migration_ptes(struct page *old, struct page *new, bool locked)
264+
void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked)
263265
{
264266
struct rmap_walk_control rwc = {
265267
.rmap_one = remove_migration_pte,
266-
.arg = old,
268+
.arg = src,
267269
};
268270

269271
if (locked)
270-
rmap_walk_locked(new, &rwc);
272+
rmap_walk_locked(&dst->page, &rwc);
271273
else
272-
rmap_walk(new, &rwc);
274+
rmap_walk(&dst->page, &rwc);
273275
}
274276

275277
/*
@@ -756,6 +758,7 @@ int buffer_migrate_page_norefs(struct address_space *mapping,
756758
*/
757759
static int writeout(struct address_space *mapping, struct page *page)
758760
{
761+
struct folio *folio = page_folio(page);
759762
struct writeback_control wbc = {
760763
.sync_mode = WB_SYNC_NONE,
761764
.nr_to_write = 1,
@@ -781,7 +784,7 @@ static int writeout(struct address_space *mapping, struct page *page)
781784
* At this point we know that the migration attempt cannot
782785
* be successful.
783786
*/
784-
remove_migration_ptes(page, page, false);
787+
remove_migration_ptes(folio, folio, false);
785788

786789
rc = mapping->a_ops->writepage(page, &wbc);
787790

@@ -913,6 +916,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
913916
int force, enum migrate_mode mode)
914917
{
915918
struct folio *folio = page_folio(page);
919+
struct folio *dst = page_folio(newpage);
916920
int rc = -EAGAIN;
917921
bool page_was_mapped = false;
918922
struct anon_vma *anon_vma = NULL;
@@ -1039,8 +1043,8 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
10391043
}
10401044

10411045
if (page_was_mapped)
1042-
remove_migration_ptes(page,
1043-
rc == MIGRATEPAGE_SUCCESS ? newpage : page, false);
1046+
remove_migration_ptes(folio,
1047+
rc == MIGRATEPAGE_SUCCESS ? dst : folio, false);
10441048

10451049
out_unlock_both:
10461050
unlock_page(newpage);
@@ -1166,7 +1170,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
11661170
enum migrate_mode mode, int reason,
11671171
struct list_head *ret)
11681172
{
1169-
struct folio *src = page_folio(hpage);
1173+
struct folio *dst, *src = page_folio(hpage);
11701174
int rc = -EAGAIN;
11711175
int page_was_mapped = 0;
11721176
struct page *new_hpage;
@@ -1194,6 +1198,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
11941198
new_hpage = get_new_page(hpage, private);
11951199
if (!new_hpage)
11961200
return -ENOMEM;
1201+
dst = page_folio(new_hpage);
11971202

11981203
if (!trylock_page(hpage)) {
11991204
if (!force)
@@ -1254,8 +1259,8 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
12541259
rc = move_to_new_page(new_hpage, hpage, mode);
12551260

12561261
if (page_was_mapped)
1257-
remove_migration_ptes(hpage,
1258-
rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage, false);
1262+
remove_migration_ptes(src,
1263+
rc == MIGRATEPAGE_SUCCESS ? dst : src, false);
12591264

12601265
unlock_put_anon:
12611266
unlock_page(new_hpage);

mm/migrate_device.c

Lines changed: 10 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -376,15 +376,17 @@ static void migrate_vma_unmap(struct migrate_vma *migrate)
376376

377377
for (i = 0; i < npages && restore; i++) {
378378
struct page *page = migrate_pfn_to_page(migrate->src[i]);
379+
struct folio *folio;
379380

380381
if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE))
381382
continue;
382383

383-
remove_migration_ptes(page, page, false);
384+
folio = page_folio(page);
385+
remove_migration_ptes(folio, folio, false);
384386

385387
migrate->src[i] = 0;
386-
unlock_page(page);
387-
put_page(page);
388+
folio_unlock(folio);
389+
folio_put(folio);
388390
restore--;
389391
}
390392
}
@@ -729,6 +731,7 @@ void migrate_vma_finalize(struct migrate_vma *migrate)
729731
unsigned long i;
730732

731733
for (i = 0; i < npages; i++) {
734+
struct folio *dst, *src;
732735
struct page *newpage = migrate_pfn_to_page(migrate->dst[i]);
733736
struct page *page = migrate_pfn_to_page(migrate->src[i]);
734737

@@ -748,8 +751,10 @@ void migrate_vma_finalize(struct migrate_vma *migrate)
748751
newpage = page;
749752
}
750753

751-
remove_migration_ptes(page, newpage, false);
752-
unlock_page(page);
754+
src = page_folio(page);
755+
dst = page_folio(newpage);
756+
remove_migration_ptes(src, dst, false);
757+
folio_unlock(src);
753758

754759
if (is_zone_device_page(page))
755760
put_page(page);

0 commit comments

Comments
 (0)