@@ -1412,7 +1412,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
14121412{
14131413 struct folio * folio = page_folio (page );
14141414 struct mm_struct * mm = vma -> vm_mm ;
1415- DEFINE_PAGE_VMA_WALK (pvmw , page , vma , address , 0 );
1415+ DEFINE_FOLIO_VMA_WALK (pvmw , folio , vma , address , 0 );
14161416 pte_t pteval ;
14171417 struct page * subpage ;
14181418 bool ret = true;
@@ -1436,13 +1436,13 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
14361436 * For hugetlb, it could be much worse if we need to do pud
14371437 * invalidation in the case of pmd sharing.
14381438 *
1439- * Note that the page can not be free in this function as call of
1440- * try_to_unmap() must hold a reference on the page .
1439+ * Note that the folio can not be freed in this function as call of
1440+ * try_to_unmap() must hold a reference on the folio .
14411441 */
14421442 range .end = vma_address_end (& pvmw );
14431443 mmu_notifier_range_init (& range , MMU_NOTIFY_CLEAR , 0 , vma , vma -> vm_mm ,
14441444 address , range .end );
1445- if (PageHuge ( page )) {
1445+ if (folio_test_hugetlb ( folio )) {
14461446 /*
14471447 * If sharing is possible, start and end will be adjusted
14481448 * accordingly.
@@ -1454,24 +1454,25 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
14541454
14551455 while (page_vma_mapped_walk (& pvmw )) {
14561456 /* Unexpected PMD-mapped THP? */
1457- VM_BUG_ON_PAGE (!pvmw .pte , page );
1457+ VM_BUG_ON_FOLIO (!pvmw .pte , folio );
14581458
14591459 /*
1460- * If the page is in an mlock()d vma, we must not swap it out.
1460+ * If the folio is in an mlock()d vma, we must not swap it out.
14611461 */
14621462 if (!(flags & TTU_IGNORE_MLOCK ) &&
14631463 (vma -> vm_flags & VM_LOCKED )) {
14641464 /* Restore the mlock which got missed */
1465- mlock_vma_page ( page , vma , false);
1465+ mlock_vma_folio ( folio , vma , false);
14661466 page_vma_mapped_walk_done (& pvmw );
14671467 ret = false;
14681468 break ;
14691469 }
14701470
1471- subpage = page - page_to_pfn (page ) + pte_pfn (* pvmw .pte );
1471+ subpage = folio_page (folio ,
1472+ pte_pfn (* pvmw .pte ) - folio_pfn (folio ));
14721473 address = pvmw .address ;
14731474
1474- if (PageHuge ( page ) && !PageAnon ( page )) {
1475+ if (folio_test_hugetlb ( folio ) && !folio_test_anon ( folio )) {
14751476 /*
14761477 * To call huge_pmd_unshare, i_mmap_rwsem must be
14771478 * held in write mode. Caller needs to explicitly
@@ -1510,7 +1511,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
15101511 if (should_defer_flush (mm , flags )) {
15111512 /*
15121513 * We clear the PTE but do not flush so potentially
1513- * a remote CPU could still be writing to the page .
1514+ * a remote CPU could still be writing to the folio .
15141515 * If the entry was previously clean then the
15151516 * architecture must guarantee that a clear->dirty
15161517 * transition on a cached TLB entry is written through
@@ -1523,22 +1524,22 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
15231524 pteval = ptep_clear_flush (vma , address , pvmw .pte );
15241525 }
15251526
1526- /* Move the dirty bit to the page. Now the pte is gone. */
1527+ /* Set the dirty flag on the folio now the pte is gone. */
15271528 if (pte_dirty (pteval ))
1528- set_page_dirty ( page );
1529+ folio_mark_dirty ( folio );
15291530
15301531 /* Update high watermark before we lower rss */
15311532 update_hiwater_rss (mm );
15321533
1533- if (PageHWPoison (page ) && !(flags & TTU_IGNORE_HWPOISON )) {
1534+ if (PageHWPoison (subpage ) && !(flags & TTU_IGNORE_HWPOISON )) {
15341535 pteval = swp_entry_to_pte (make_hwpoison_entry (subpage ));
1535- if (PageHuge ( page )) {
1536- hugetlb_count_sub (compound_nr ( page ), mm );
1536+ if (folio_test_hugetlb ( folio )) {
1537+ hugetlb_count_sub (folio_nr_pages ( folio ), mm );
15371538 set_huge_swap_pte_at (mm , address ,
15381539 pvmw .pte , pteval ,
15391540 vma_mmu_pagesize (vma ));
15401541 } else {
1541- dec_mm_counter (mm , mm_counter (page ));
1542+ dec_mm_counter (mm , mm_counter (& folio -> page ));
15421543 set_pte_at (mm , address , pvmw .pte , pteval );
15431544 }
15441545
@@ -1553,18 +1554,19 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
15531554 * migration) will not expect userfaults on already
15541555 * copied pages.
15551556 */
1556- dec_mm_counter (mm , mm_counter (page ));
1557+ dec_mm_counter (mm , mm_counter (& folio -> page ));
15571558 /* We have to invalidate as we cleared the pte */
15581559 mmu_notifier_invalidate_range (mm , address ,
15591560 address + PAGE_SIZE );
1560- } else if (PageAnon ( page )) {
1561+ } else if (folio_test_anon ( folio )) {
15611562 swp_entry_t entry = { .val = page_private (subpage ) };
15621563 pte_t swp_pte ;
15631564 /*
15641565 * Store the swap location in the pte.
15651566 * See handle_pte_fault() ...
15661567 */
1567- if (unlikely (PageSwapBacked (page ) != PageSwapCache (page ))) {
1568+ if (unlikely (folio_test_swapbacked (folio ) !=
1569+ folio_test_swapcache (folio ))) {
15681570 WARN_ON_ONCE (1 );
15691571 ret = false;
15701572 /* We have to invalidate as we cleared the pte */
@@ -1575,8 +1577,8 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
15751577 }
15761578
15771579 /* MADV_FREE page check */
1578- if (!PageSwapBacked ( page )) {
1579- if (!PageDirty ( page )) {
1580+ if (!folio_test_swapbacked ( folio )) {
1581+ if (!folio_test_dirty ( folio )) {
15801582 /* Invalidate as we cleared the pte */
15811583 mmu_notifier_invalidate_range (mm ,
15821584 address , address + PAGE_SIZE );
@@ -1585,11 +1587,11 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
15851587 }
15861588
15871589 /*
1588- * If the page was redirtied, it cannot be
1590+ * If the folio was redirtied, it cannot be
15891591 * discarded. Remap the page to page table.
15901592 */
15911593 set_pte_at (mm , address , pvmw .pte , pteval );
1592- SetPageSwapBacked ( page );
1594+ folio_set_swapbacked ( folio );
15931595 ret = false;
15941596 page_vma_mapped_walk_done (& pvmw );
15951597 break ;
@@ -1626,16 +1628,17 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
16261628 address + PAGE_SIZE );
16271629 } else {
16281630 /*
1629- * This is a locked file-backed page, thus it cannot
1630- * be removed from the page cache and replaced by a new
1631- * page before mmu_notifier_invalidate_range_end, so no
1632- * concurrent thread might update its page table to
1633- * point at new page while a device still is using this
1634- * page.
1631+ * This is a locked file-backed folio,
1632+ * so it cannot be removed from the page
1633+ * cache and replaced by a new folio before
1634+ * mmu_notifier_invalidate_range_end, so no
1635+ * concurrent thread might update its page table
1636+ * to point at a new folio while a device is
1637+ * still using this folio.
16351638 *
16361639 * See Documentation/vm/mmu_notifier.rst
16371640 */
1638- dec_mm_counter (mm , mm_counter_file (page ));
1641+ dec_mm_counter (mm , mm_counter_file (& folio -> page ));
16391642 }
16401643discard :
16411644 /*
@@ -1645,10 +1648,10 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
16451648 *
16461649 * See Documentation/vm/mmu_notifier.rst
16471650 */
1648- page_remove_rmap (subpage , vma , PageHuge ( page ));
1651+ page_remove_rmap (subpage , vma , folio_test_hugetlb ( folio ));
16491652 if (vma -> vm_flags & VM_LOCKED )
16501653 mlock_page_drain (smp_processor_id ());
1651- put_page ( page );
1654+ folio_put ( folio );
16521655 }
16531656
16541657 mmu_notifier_invalidate_range_end (& range );
@@ -1667,17 +1670,17 @@ static int page_not_mapped(struct page *page)
16671670}
16681671
16691672/**
1670- * try_to_unmap - try to remove all page table mappings to a page
1671- * @page: the page to get unmapped
1673+ * try_to_unmap - Try to remove all page table mappings to a folio.
1674+ * @folio: The folio to unmap.
16721675 * @flags: action and flags
16731676 *
16741677 * Tries to remove all the page table entries which are mapping this
1675- * page, used in the pageout path. Caller must hold the page lock.
1678+ * folio. It is the caller's responsibility to check if the folio is
1679+ * still mapped if needed (use TTU_SYNC to prevent accounting races).
16761680 *
1677- * It is the caller's responsibility to check if the page is still
1678- * mapped when needed (use TTU_SYNC to prevent accounting races).
1681+ * Context: Caller must hold the folio lock.
16791682 */
1680- void try_to_unmap (struct page * page , enum ttu_flags flags )
1683+ void try_to_unmap (struct folio * folio , enum ttu_flags flags )
16811684{
16821685 struct rmap_walk_control rwc = {
16831686 .rmap_one = try_to_unmap_one ,
@@ -1687,9 +1690,9 @@ void try_to_unmap(struct page *page, enum ttu_flags flags)
16871690 };
16881691
16891692 if (flags & TTU_RMAP_LOCKED )
1690- rmap_walk_locked (page , & rwc );
1693+ rmap_walk_locked (& folio -> page , & rwc );
16911694 else
1692- rmap_walk (page , & rwc );
1695+ rmap_walk (& folio -> page , & rwc );
16931696}
16941697
16951698/*
0 commit comments