Skip to content

Commit 869f7ee

Browse files
author
Matthew Wilcox (Oracle)
committed
mm/rmap: Convert try_to_unmap() to take a folio
Change all three callers and the worker function try_to_unmap_one(). Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
1 parent af28a98 commit 869f7ee

7 files changed

Lines changed: 62 additions & 53 deletions

File tree

include/linux/rmap.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -194,7 +194,7 @@ int folio_referenced(struct folio *, int is_locked,
194194
struct mem_cgroup *memcg, unsigned long *vm_flags);
195195

196196
void try_to_migrate(struct page *page, enum ttu_flags flags);
197-
void try_to_unmap(struct page *, enum ttu_flags flags);
197+
void try_to_unmap(struct folio *, enum ttu_flags flags);
198198

199199
int make_device_exclusive_range(struct mm_struct *mm, unsigned long start,
200200
unsigned long end, struct page **pages,
@@ -309,7 +309,7 @@ static inline int folio_referenced(struct folio *folio, int is_locked,
309309
return 0;
310310
}
311311

312-
static inline void try_to_unmap(struct page *page, enum ttu_flags flags)
312+
static inline void try_to_unmap(struct folio *folio, enum ttu_flags flags)
313313
{
314314
}
315315

mm/huge_memory.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2251,6 +2251,7 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma,
22512251

22522252
static void unmap_page(struct page *page)
22532253
{
2254+
struct folio *folio = page_folio(page);
22542255
enum ttu_flags ttu_flags = TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD |
22552256
TTU_SYNC;
22562257

@@ -2264,7 +2265,7 @@ static void unmap_page(struct page *page)
22642265
if (PageAnon(page))
22652266
try_to_migrate(page, ttu_flags);
22662267
else
2267-
try_to_unmap(page, ttu_flags | TTU_IGNORE_MLOCK);
2268+
try_to_unmap(folio, ttu_flags | TTU_IGNORE_MLOCK);
22682269

22692270
VM_WARN_ON_ONCE_PAGE(page_mapped(page), page);
22702271
}

mm/khugepaged.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1834,7 +1834,8 @@ static void collapse_file(struct mm_struct *mm,
18341834
}
18351835

18361836
if (page_mapped(page))
1837-
try_to_unmap(page, TTU_IGNORE_MLOCK | TTU_BATCH_FLUSH);
1837+
try_to_unmap(page_folio(page),
1838+
TTU_IGNORE_MLOCK | TTU_BATCH_FLUSH);
18381839

18391840
xas_lock_irq(&xas);
18401841
xas_set(&xas, index);

mm/memory-failure.c

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1347,6 +1347,7 @@ static int get_hwpoison_page(struct page *p, unsigned long flags)
13471347
static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
13481348
int flags, struct page *hpage)
13491349
{
1350+
struct folio *folio = page_folio(hpage);
13501351
enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_SYNC;
13511352
struct address_space *mapping;
13521353
LIST_HEAD(tokill);
@@ -1412,7 +1413,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
14121413
collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED);
14131414

14141415
if (!PageHuge(hpage)) {
1415-
try_to_unmap(hpage, ttu);
1416+
try_to_unmap(folio, ttu);
14161417
} else {
14171418
if (!PageAnon(hpage)) {
14181419
/*
@@ -1424,12 +1425,12 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
14241425
*/
14251426
mapping = hugetlb_page_mapping_lock_write(hpage);
14261427
if (mapping) {
1427-
try_to_unmap(hpage, ttu|TTU_RMAP_LOCKED);
1428+
try_to_unmap(folio, ttu|TTU_RMAP_LOCKED);
14281429
i_mmap_unlock_write(mapping);
14291430
} else
14301431
pr_info("Memory failure: %#lx: could not lock mapping for mapped huge page\n", pfn);
14311432
} else {
1432-
try_to_unmap(hpage, ttu);
1433+
try_to_unmap(folio, ttu);
14331434
}
14341435
}
14351436

mm/memory_hotplug.c

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1690,10 +1690,13 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
16901690
DEFAULT_RATELIMIT_BURST);
16911691

16921692
for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1693+
struct folio *folio;
1694+
16931695
if (!pfn_valid(pfn))
16941696
continue;
16951697
page = pfn_to_page(pfn);
1696-
head = compound_head(page);
1698+
folio = page_folio(page);
1699+
head = &folio->page;
16971700

16981701
if (PageHuge(page)) {
16991702
pfn = page_to_pfn(head) + compound_nr(head) - 1;
@@ -1710,10 +1713,10 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
17101713
* the unmap as the catch all safety net).
17111714
*/
17121715
if (PageHWPoison(page)) {
1713-
if (WARN_ON(PageLRU(page)))
1714-
isolate_lru_page(page);
1715-
if (page_mapped(page))
1716-
try_to_unmap(page, TTU_IGNORE_MLOCK);
1716+
if (WARN_ON(folio_test_lru(folio)))
1717+
folio_isolate_lru(folio);
1718+
if (folio_mapped(folio))
1719+
try_to_unmap(folio, TTU_IGNORE_MLOCK);
17171720
continue;
17181721
}
17191722

mm/rmap.c

Lines changed: 43 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -1412,7 +1412,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
14121412
{
14131413
struct folio *folio = page_folio(page);
14141414
struct mm_struct *mm = vma->vm_mm;
1415-
DEFINE_PAGE_VMA_WALK(pvmw, page, vma, address, 0);
1415+
DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
14161416
pte_t pteval;
14171417
struct page *subpage;
14181418
bool ret = true;
@@ -1436,13 +1436,13 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
14361436
* For hugetlb, it could be much worse if we need to do pud
14371437
* invalidation in the case of pmd sharing.
14381438
*
1439-
* Note that the page can not be free in this function as call of
1440-
* try_to_unmap() must hold a reference on the page.
1439+
* Note that the folio can not be freed in this function as call of
1440+
* try_to_unmap() must hold a reference on the folio.
14411441
*/
14421442
range.end = vma_address_end(&pvmw);
14431443
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
14441444
address, range.end);
1445-
if (PageHuge(page)) {
1445+
if (folio_test_hugetlb(folio)) {
14461446
/*
14471447
* If sharing is possible, start and end will be adjusted
14481448
* accordingly.
@@ -1454,24 +1454,25 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
14541454

14551455
while (page_vma_mapped_walk(&pvmw)) {
14561456
/* Unexpected PMD-mapped THP? */
1457-
VM_BUG_ON_PAGE(!pvmw.pte, page);
1457+
VM_BUG_ON_FOLIO(!pvmw.pte, folio);
14581458

14591459
/*
1460-
* If the page is in an mlock()d vma, we must not swap it out.
1460+
* If the folio is in an mlock()d vma, we must not swap it out.
14611461
*/
14621462
if (!(flags & TTU_IGNORE_MLOCK) &&
14631463
(vma->vm_flags & VM_LOCKED)) {
14641464
/* Restore the mlock which got missed */
1465-
mlock_vma_page(page, vma, false);
1465+
mlock_vma_folio(folio, vma, false);
14661466
page_vma_mapped_walk_done(&pvmw);
14671467
ret = false;
14681468
break;
14691469
}
14701470

1471-
subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte);
1471+
subpage = folio_page(folio,
1472+
pte_pfn(*pvmw.pte) - folio_pfn(folio));
14721473
address = pvmw.address;
14731474

1474-
if (PageHuge(page) && !PageAnon(page)) {
1475+
if (folio_test_hugetlb(folio) && !folio_test_anon(folio)) {
14751476
/*
14761477
* To call huge_pmd_unshare, i_mmap_rwsem must be
14771478
* held in write mode. Caller needs to explicitly
@@ -1510,7 +1511,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
15101511
if (should_defer_flush(mm, flags)) {
15111512
/*
15121513
* We clear the PTE but do not flush so potentially
1513-
* a remote CPU could still be writing to the page.
1514+
* a remote CPU could still be writing to the folio.
15141515
* If the entry was previously clean then the
15151516
* architecture must guarantee that a clear->dirty
15161517
* transition on a cached TLB entry is written through
@@ -1523,22 +1524,22 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
15231524
pteval = ptep_clear_flush(vma, address, pvmw.pte);
15241525
}
15251526

1526-
/* Move the dirty bit to the page. Now the pte is gone. */
1527+
/* Set the dirty flag on the folio now the pte is gone. */
15271528
if (pte_dirty(pteval))
1528-
set_page_dirty(page);
1529+
folio_mark_dirty(folio);
15291530

15301531
/* Update high watermark before we lower rss */
15311532
update_hiwater_rss(mm);
15321533

1533-
if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) {
1534+
if (PageHWPoison(subpage) && !(flags & TTU_IGNORE_HWPOISON)) {
15341535
pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
1535-
if (PageHuge(page)) {
1536-
hugetlb_count_sub(compound_nr(page), mm);
1536+
if (folio_test_hugetlb(folio)) {
1537+
hugetlb_count_sub(folio_nr_pages(folio), mm);
15371538
set_huge_swap_pte_at(mm, address,
15381539
pvmw.pte, pteval,
15391540
vma_mmu_pagesize(vma));
15401541
} else {
1541-
dec_mm_counter(mm, mm_counter(page));
1542+
dec_mm_counter(mm, mm_counter(&folio->page));
15421543
set_pte_at(mm, address, pvmw.pte, pteval);
15431544
}
15441545

@@ -1553,18 +1554,19 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
15531554
* migration) will not expect userfaults on already
15541555
* copied pages.
15551556
*/
1556-
dec_mm_counter(mm, mm_counter(page));
1557+
dec_mm_counter(mm, mm_counter(&folio->page));
15571558
/* We have to invalidate as we cleared the pte */
15581559
mmu_notifier_invalidate_range(mm, address,
15591560
address + PAGE_SIZE);
1560-
} else if (PageAnon(page)) {
1561+
} else if (folio_test_anon(folio)) {
15611562
swp_entry_t entry = { .val = page_private(subpage) };
15621563
pte_t swp_pte;
15631564
/*
15641565
* Store the swap location in the pte.
15651566
* See handle_pte_fault() ...
15661567
*/
1567-
if (unlikely(PageSwapBacked(page) != PageSwapCache(page))) {
1568+
if (unlikely(folio_test_swapbacked(folio) !=
1569+
folio_test_swapcache(folio))) {
15681570
WARN_ON_ONCE(1);
15691571
ret = false;
15701572
/* We have to invalidate as we cleared the pte */
@@ -1575,8 +1577,8 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
15751577
}
15761578

15771579
/* MADV_FREE page check */
1578-
if (!PageSwapBacked(page)) {
1579-
if (!PageDirty(page)) {
1580+
if (!folio_test_swapbacked(folio)) {
1581+
if (!folio_test_dirty(folio)) {
15801582
/* Invalidate as we cleared the pte */
15811583
mmu_notifier_invalidate_range(mm,
15821584
address, address + PAGE_SIZE);
@@ -1585,11 +1587,11 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
15851587
}
15861588

15871589
/*
1588-
* If the page was redirtied, it cannot be
1590+
* If the folio was redirtied, it cannot be
15891591
* discarded. Remap the page to page table.
15901592
*/
15911593
set_pte_at(mm, address, pvmw.pte, pteval);
1592-
SetPageSwapBacked(page);
1594+
folio_set_swapbacked(folio);
15931595
ret = false;
15941596
page_vma_mapped_walk_done(&pvmw);
15951597
break;
@@ -1626,16 +1628,17 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
16261628
address + PAGE_SIZE);
16271629
} else {
16281630
/*
1629-
* This is a locked file-backed page, thus it cannot
1630-
* be removed from the page cache and replaced by a new
1631-
* page before mmu_notifier_invalidate_range_end, so no
1632-
* concurrent thread might update its page table to
1633-
* point at new page while a device still is using this
1634-
* page.
1631+
* This is a locked file-backed folio,
1632+
* so it cannot be removed from the page
1633+
* cache and replaced by a new folio before
1634+
* mmu_notifier_invalidate_range_end, so no
1635+
* concurrent thread might update its page table
1636+
* to point at a new folio while a device is
1637+
* still using this folio.
16351638
*
16361639
* See Documentation/vm/mmu_notifier.rst
16371640
*/
1638-
dec_mm_counter(mm, mm_counter_file(page));
1641+
dec_mm_counter(mm, mm_counter_file(&folio->page));
16391642
}
16401643
discard:
16411644
/*
@@ -1645,10 +1648,10 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
16451648
*
16461649
* See Documentation/vm/mmu_notifier.rst
16471650
*/
1648-
page_remove_rmap(subpage, vma, PageHuge(page));
1651+
page_remove_rmap(subpage, vma, folio_test_hugetlb(folio));
16491652
if (vma->vm_flags & VM_LOCKED)
16501653
mlock_page_drain(smp_processor_id());
1651-
put_page(page);
1654+
folio_put(folio);
16521655
}
16531656

16541657
mmu_notifier_invalidate_range_end(&range);
@@ -1667,17 +1670,17 @@ static int page_not_mapped(struct page *page)
16671670
}
16681671

16691672
/**
1670-
* try_to_unmap - try to remove all page table mappings to a page
1671-
* @page: the page to get unmapped
1673+
* try_to_unmap - Try to remove all page table mappings to a folio.
1674+
* @folio: The folio to unmap.
16721675
* @flags: action and flags
16731676
*
16741677
* Tries to remove all the page table entries which are mapping this
1675-
* page, used in the pageout path. Caller must hold the page lock.
1678+
* folio. It is the caller's responsibility to check if the folio is
1679+
* still mapped if needed (use TTU_SYNC to prevent accounting races).
16761680
*
1677-
* It is the caller's responsibility to check if the page is still
1678-
* mapped when needed (use TTU_SYNC to prevent accounting races).
1681+
* Context: Caller must hold the folio lock.
16791682
*/
1680-
void try_to_unmap(struct page *page, enum ttu_flags flags)
1683+
void try_to_unmap(struct folio *folio, enum ttu_flags flags)
16811684
{
16821685
struct rmap_walk_control rwc = {
16831686
.rmap_one = try_to_unmap_one,
@@ -1687,9 +1690,9 @@ void try_to_unmap(struct page *page, enum ttu_flags flags)
16871690
};
16881691

16891692
if (flags & TTU_RMAP_LOCKED)
1690-
rmap_walk_locked(page, &rwc);
1693+
rmap_walk_locked(&folio->page, &rwc);
16911694
else
1692-
rmap_walk(page, &rwc);
1695+
rmap_walk(&folio->page, &rwc);
16931696
}
16941697

16951698
/*

mm/vmscan.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1768,7 +1768,7 @@ static unsigned int shrink_page_list(struct list_head *page_list,
17681768
if (unlikely(PageTransHuge(page)))
17691769
flags |= TTU_SPLIT_HUGE_PMD;
17701770

1771-
try_to_unmap(page, flags);
1771+
try_to_unmap(folio, flags);
17721772
if (page_mapped(page)) {
17731773
stat->nr_unmap_fail += nr_pages;
17741774
if (!was_swapbacked && PageSwapBacked(page))

0 commit comments

Comments
 (0)