Skip to content

Commit 4b8554c

Browse files
author
Matthew Wilcox (Oracle)
committed
mm/rmap: Convert try_to_migrate() to folios
Convert the callers to pass a folio and the try_to_migrate_one() worker to use a folio throughout. Fixes an assumption that a folio must be <= PMD size. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
1 parent 869f7ee commit 4b8554c

5 files changed

Lines changed: 42 additions & 35 deletions

File tree

include/linux/rmap.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -193,7 +193,7 @@ static inline void page_dup_rmap(struct page *page, bool compound)
193193
int folio_referenced(struct folio *, int is_locked,
194194
struct mem_cgroup *memcg, unsigned long *vm_flags);
195195

196-
void try_to_migrate(struct page *page, enum ttu_flags flags);
196+
void try_to_migrate(struct folio *folio, enum ttu_flags flags);
197197
void try_to_unmap(struct folio *, enum ttu_flags flags);
198198

199199
int make_device_exclusive_range(struct mm_struct *mm, unsigned long start,

mm/huge_memory.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2262,8 +2262,8 @@ static void unmap_page(struct page *page)
22622262
* pages can simply be left unmapped, then faulted back on demand.
22632263
* If that is ever changed (perhaps for mlock), update remap_page().
22642264
*/
2265-
if (PageAnon(page))
2266-
try_to_migrate(page, ttu_flags);
2265+
if (folio_test_anon(folio))
2266+
try_to_migrate(folio, ttu_flags);
22672267
else
22682268
try_to_unmap(folio, ttu_flags | TTU_IGNORE_MLOCK);
22692269

mm/migrate.c

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -912,6 +912,7 @@ static int move_to_new_page(struct page *newpage, struct page *page,
912912
static int __unmap_and_move(struct page *page, struct page *newpage,
913913
int force, enum migrate_mode mode)
914914
{
915+
struct folio *folio = page_folio(page);
915916
int rc = -EAGAIN;
916917
bool page_was_mapped = false;
917918
struct anon_vma *anon_vma = NULL;
@@ -1015,7 +1016,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
10151016
/* Establish migration ptes */
10161017
VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma,
10171018
page);
1018-
try_to_migrate(page, 0);
1019+
try_to_migrate(folio, 0);
10191020
page_was_mapped = true;
10201021
}
10211022

@@ -1165,6 +1166,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
11651166
enum migrate_mode mode, int reason,
11661167
struct list_head *ret)
11671168
{
1169+
struct folio *src = page_folio(hpage);
11681170
int rc = -EAGAIN;
11691171
int page_was_mapped = 0;
11701172
struct page *new_hpage;
@@ -1241,7 +1243,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
12411243
ttu |= TTU_RMAP_LOCKED;
12421244
}
12431245

1244-
try_to_migrate(hpage, ttu);
1246+
try_to_migrate(src, ttu);
12451247
page_was_mapped = 1;
12461248

12471249
if (mapping_locked)

mm/migrate_device.c

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -333,6 +333,7 @@ static void migrate_vma_unmap(struct migrate_vma *migrate)
333333

334334
for (i = 0; i < npages; i++) {
335335
struct page *page = migrate_pfn_to_page(migrate->src[i]);
336+
struct folio *folio;
336337

337338
if (!page)
338339
continue;
@@ -356,8 +357,9 @@ static void migrate_vma_unmap(struct migrate_vma *migrate)
356357
put_page(page);
357358
}
358359

359-
if (page_mapped(page))
360-
try_to_migrate(page, 0);
360+
folio = page_folio(page);
361+
if (folio_mapped(folio))
362+
try_to_migrate(folio, 0);
361363

362364
if (page_mapped(page) || !migrate_vma_check_page(page)) {
363365
if (!is_zone_device_page(page)) {

mm/rmap.c

Lines changed: 31 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -1706,7 +1706,7 @@ static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma,
17061706
{
17071707
struct folio *folio = page_folio(page);
17081708
struct mm_struct *mm = vma->vm_mm;
1709-
DEFINE_PAGE_VMA_WALK(pvmw, page, vma, address, 0);
1709+
DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
17101710
pte_t pteval;
17111711
struct page *subpage;
17121712
bool ret = true;
@@ -1740,7 +1740,7 @@ static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma,
17401740
range.end = vma_address_end(&pvmw);
17411741
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
17421742
address, range.end);
1743-
if (PageHuge(page)) {
1743+
if (folio_test_hugetlb(folio)) {
17441744
/*
17451745
* If sharing is possible, start and end will be adjusted
17461746
* accordingly.
@@ -1754,21 +1754,24 @@ static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma,
17541754
#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
17551755
/* PMD-mapped THP migration entry */
17561756
if (!pvmw.pte) {
1757-
VM_BUG_ON_PAGE(PageHuge(page) ||
1758-
!PageTransCompound(page), page);
1757+
subpage = folio_page(folio,
1758+
pmd_pfn(*pvmw.pmd) - folio_pfn(folio));
1759+
VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) ||
1760+
!folio_test_pmd_mappable(folio), folio);
17591761

1760-
set_pmd_migration_entry(&pvmw, page);
1762+
set_pmd_migration_entry(&pvmw, subpage);
17611763
continue;
17621764
}
17631765
#endif
17641766

17651767
/* Unexpected PMD-mapped THP? */
1766-
VM_BUG_ON_PAGE(!pvmw.pte, page);
1768+
VM_BUG_ON_FOLIO(!pvmw.pte, folio);
17671769

1768-
subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte);
1770+
subpage = folio_page(folio,
1771+
pte_pfn(*pvmw.pte) - folio_pfn(folio));
17691772
address = pvmw.address;
17701773

1771-
if (PageHuge(page) && !PageAnon(page)) {
1774+
if (folio_test_hugetlb(folio) && !folio_test_anon(folio)) {
17721775
/*
17731776
* To call huge_pmd_unshare, i_mmap_rwsem must be
17741777
* held in write mode. Caller needs to explicitly
@@ -1806,15 +1809,15 @@ static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma,
18061809
flush_cache_page(vma, address, pte_pfn(*pvmw.pte));
18071810
pteval = ptep_clear_flush(vma, address, pvmw.pte);
18081811

1809-
/* Move the dirty bit to the page. Now the pte is gone. */
1812+
/* Set the dirty flag on the folio now the pte is gone. */
18101813
if (pte_dirty(pteval))
1811-
set_page_dirty(page);
1814+
folio_mark_dirty(folio);
18121815

18131816
/* Update high watermark before we lower rss */
18141817
update_hiwater_rss(mm);
18151818

1816-
if (is_zone_device_page(page)) {
1817-
unsigned long pfn = page_to_pfn(page);
1819+
if (folio_is_zone_device(folio)) {
1820+
unsigned long pfn = folio_pfn(folio);
18181821
swp_entry_t entry;
18191822
pte_t swp_pte;
18201823

@@ -1850,16 +1853,16 @@ static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma,
18501853
* changed when hugepage migrations to device private
18511854
* memory are supported.
18521855
*/
1853-
subpage = page;
1854-
} else if (PageHWPoison(page)) {
1856+
subpage = &folio->page;
1857+
} else if (PageHWPoison(subpage)) {
18551858
pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
1856-
if (PageHuge(page)) {
1857-
hugetlb_count_sub(compound_nr(page), mm);
1859+
if (folio_test_hugetlb(folio)) {
1860+
hugetlb_count_sub(folio_nr_pages(folio), mm);
18581861
set_huge_swap_pte_at(mm, address,
18591862
pvmw.pte, pteval,
18601863
vma_mmu_pagesize(vma));
18611864
} else {
1862-
dec_mm_counter(mm, mm_counter(page));
1865+
dec_mm_counter(mm, mm_counter(&folio->page));
18631866
set_pte_at(mm, address, pvmw.pte, pteval);
18641867
}
18651868

@@ -1874,7 +1877,7 @@ static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma,
18741877
* migration) will not expect userfaults on already
18751878
* copied pages.
18761879
*/
1877-
dec_mm_counter(mm, mm_counter(page));
1880+
dec_mm_counter(mm, mm_counter(&folio->page));
18781881
/* We have to invalidate as we cleared the pte */
18791882
mmu_notifier_invalidate_range(mm, address,
18801883
address + PAGE_SIZE);
@@ -1920,10 +1923,10 @@ static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma,
19201923
*
19211924
* See Documentation/vm/mmu_notifier.rst
19221925
*/
1923-
page_remove_rmap(subpage, vma, PageHuge(page));
1926+
page_remove_rmap(subpage, vma, folio_test_hugetlb(folio));
19241927
if (vma->vm_flags & VM_LOCKED)
19251928
mlock_page_drain(smp_processor_id());
1926-
put_page(page);
1929+
folio_put(folio);
19271930
}
19281931

19291932
mmu_notifier_invalidate_range_end(&range);
@@ -1933,13 +1936,13 @@ static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma,
19331936

19341937
/**
19351938
* try_to_migrate - try to replace all page table mappings with swap entries
1936-
* @page: the page to replace page table entries for
1939+
* @folio: the folio to replace page table entries for
19371940
* @flags: action and flags
19381941
*
1939-
* Tries to remove all the page table entries which are mapping this page and
1940-
* replace them with special swap entries. Caller must hold the page lock.
1942+
* Tries to remove all the page table entries which are mapping this folio and
1943+
* replace them with special swap entries. Caller must hold the folio lock.
19411944
*/
1942-
void try_to_migrate(struct page *page, enum ttu_flags flags)
1945+
void try_to_migrate(struct folio *folio, enum ttu_flags flags)
19431946
{
19441947
struct rmap_walk_control rwc = {
19451948
.rmap_one = try_to_migrate_one,
@@ -1956,7 +1959,7 @@ void try_to_migrate(struct page *page, enum ttu_flags flags)
19561959
TTU_SYNC)))
19571960
return;
19581961

1959-
if (is_zone_device_page(page) && !is_device_private_page(page))
1962+
if (folio_is_zone_device(folio) && !folio_is_device_private(folio))
19601963
return;
19611964

19621965
/*
@@ -1967,13 +1970,13 @@ void try_to_migrate(struct page *page, enum ttu_flags flags)
19671970
* locking requirements of exec(), migration skips
19681971
* temporary VMAs until after exec() completes.
19691972
*/
1970-
if (!PageKsm(page) && PageAnon(page))
1973+
if (!folio_test_ksm(folio) && folio_test_anon(folio))
19711974
rwc.invalid_vma = invalid_migration_vma;
19721975

19731976
if (flags & TTU_RMAP_LOCKED)
1974-
rmap_walk_locked(page, &rwc);
1977+
rmap_walk_locked(&folio->page, &rwc);
19751978
else
1976-
rmap_walk(page, &rwc);
1979+
rmap_walk(&folio->page, &rwc);
19771980
}
19781981

19791982
#ifdef CONFIG_DEVICE_PRIVATE

0 commit comments

Comments
 (0)