@@ -1706,7 +1706,7 @@ static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma,
17061706{
17071707 struct folio * folio = page_folio (page );
17081708 struct mm_struct * mm = vma -> vm_mm ;
1709- DEFINE_PAGE_VMA_WALK (pvmw , page , vma , address , 0 );
1709+ DEFINE_FOLIO_VMA_WALK (pvmw , folio , vma , address , 0 );
17101710 pte_t pteval ;
17111711 struct page * subpage ;
17121712 bool ret = true;
@@ -1740,7 +1740,7 @@ static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma,
17401740 range .end = vma_address_end (& pvmw );
17411741 mmu_notifier_range_init (& range , MMU_NOTIFY_CLEAR , 0 , vma , vma -> vm_mm ,
17421742 address , range .end );
1743- if (PageHuge ( page )) {
1743+ if (folio_test_hugetlb ( folio )) {
17441744 /*
17451745 * If sharing is possible, start and end will be adjusted
17461746 * accordingly.
@@ -1754,21 +1754,24 @@ static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma,
17541754#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
17551755 /* PMD-mapped THP migration entry */
17561756 if (!pvmw .pte ) {
1757- VM_BUG_ON_PAGE (PageHuge (page ) ||
1758- !PageTransCompound (page ), page );
1757+ subpage = folio_page (folio ,
1758+ pmd_pfn (* pvmw .pmd ) - folio_pfn (folio ));
1759+ VM_BUG_ON_FOLIO (folio_test_hugetlb (folio ) ||
1760+ !folio_test_pmd_mappable (folio ), folio );
17591761
1760- set_pmd_migration_entry (& pvmw , page );
1762+ set_pmd_migration_entry (& pvmw , subpage );
17611763 continue ;
17621764 }
17631765#endif
17641766
17651767 /* Unexpected PMD-mapped THP? */
1766- VM_BUG_ON_PAGE (!pvmw .pte , page );
1768+ VM_BUG_ON_FOLIO (!pvmw .pte , folio );
17671769
1768- subpage = page - page_to_pfn (page ) + pte_pfn (* pvmw .pte );
1770+ subpage = folio_page (folio ,
1771+ pte_pfn (* pvmw .pte ) - folio_pfn (folio ));
17691772 address = pvmw .address ;
17701773
1771- if (PageHuge ( page ) && !PageAnon ( page )) {
1774+ if (folio_test_hugetlb ( folio ) && !folio_test_anon ( folio )) {
17721775 /*
17731776 * To call huge_pmd_unshare, i_mmap_rwsem must be
17741777 * held in write mode. Caller needs to explicitly
@@ -1806,15 +1809,15 @@ static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma,
18061809 flush_cache_page (vma , address , pte_pfn (* pvmw .pte ));
18071810 pteval = ptep_clear_flush (vma , address , pvmw .pte );
18081811
1809- /* Move the dirty bit to the page. Now the pte is gone. */
1812+ /* Set the dirty flag on the folio now the pte is gone. */
18101813 if (pte_dirty (pteval ))
1811- set_page_dirty ( page );
1814+ folio_mark_dirty ( folio );
18121815
18131816 /* Update high watermark before we lower rss */
18141817 update_hiwater_rss (mm );
18151818
1816- if (is_zone_device_page ( page )) {
1817- unsigned long pfn = page_to_pfn ( page );
1819+ if (folio_is_zone_device ( folio )) {
1820+ unsigned long pfn = folio_pfn ( folio );
18181821 swp_entry_t entry ;
18191822 pte_t swp_pte ;
18201823
@@ -1850,16 +1853,16 @@ static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma,
18501853 * changed when hugepage migrations to device private
18511854 * memory are supported.
18521855 */
1853- subpage = page ;
1854- } else if (PageHWPoison (page )) {
1856+ subpage = & folio -> page ;
1857+ } else if (PageHWPoison (subpage )) {
18551858 pteval = swp_entry_to_pte (make_hwpoison_entry (subpage ));
1856- if (PageHuge ( page )) {
1857- hugetlb_count_sub (compound_nr ( page ), mm );
1859+ if (folio_test_hugetlb ( folio )) {
1860+ hugetlb_count_sub (folio_nr_pages ( folio ), mm );
18581861 set_huge_swap_pte_at (mm , address ,
18591862 pvmw .pte , pteval ,
18601863 vma_mmu_pagesize (vma ));
18611864 } else {
1862- dec_mm_counter (mm , mm_counter (page ));
1865+ dec_mm_counter (mm , mm_counter (& folio -> page ));
18631866 set_pte_at (mm , address , pvmw .pte , pteval );
18641867 }
18651868
@@ -1874,7 +1877,7 @@ static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma,
18741877 * migration) will not expect userfaults on already
18751878 * copied pages.
18761879 */
1877- dec_mm_counter (mm , mm_counter (page ));
1880+ dec_mm_counter (mm , mm_counter (& folio -> page ));
18781881 /* We have to invalidate as we cleared the pte */
18791882 mmu_notifier_invalidate_range (mm , address ,
18801883 address + PAGE_SIZE );
@@ -1920,10 +1923,10 @@ static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma,
19201923 *
19211924 * See Documentation/vm/mmu_notifier.rst
19221925 */
1923- page_remove_rmap (subpage , vma , PageHuge ( page ));
1926+ page_remove_rmap (subpage , vma , folio_test_hugetlb ( folio ));
19241927 if (vma -> vm_flags & VM_LOCKED )
19251928 mlock_page_drain (smp_processor_id ());
1926- put_page ( page );
1929+ folio_put ( folio );
19271930 }
19281931
19291932 mmu_notifier_invalidate_range_end (& range );
@@ -1933,13 +1936,13 @@ static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma,
19331936
19341937/**
19351938 * try_to_migrate - try to replace all page table mappings with swap entries
1936- * @page : the page to replace page table entries for
1939+ * @folio : the folio to replace page table entries for
19371940 * @flags: action and flags
19381941 *
1939- * Tries to remove all the page table entries which are mapping this page and
1940- * replace them with special swap entries. Caller must hold the page lock.
1942+ * Tries to remove all the page table entries which are mapping this folio and
1943+ * replace them with special swap entries. Caller must hold the folio lock.
19411944 */
1942- void try_to_migrate (struct page * page , enum ttu_flags flags )
1945+ void try_to_migrate (struct folio * folio , enum ttu_flags flags )
19431946{
19441947 struct rmap_walk_control rwc = {
19451948 .rmap_one = try_to_migrate_one ,
@@ -1956,7 +1959,7 @@ void try_to_migrate(struct page *page, enum ttu_flags flags)
19561959 TTU_SYNC )))
19571960 return ;
19581961
1959- if (is_zone_device_page ( page ) && !is_device_private_page ( page ))
1962+ if (folio_is_zone_device ( folio ) && !folio_is_device_private ( folio ))
19601963 return ;
19611964
19621965 /*
@@ -1967,13 +1970,13 @@ void try_to_migrate(struct page *page, enum ttu_flags flags)
19671970 * locking requirements of exec(), migration skips
19681971 * temporary VMAs until after exec() completes.
19691972 */
1970- if (!PageKsm ( page ) && PageAnon ( page ))
1973+ if (!folio_test_ksm ( folio ) && folio_test_anon ( folio ))
19711974 rwc .invalid_vma = invalid_migration_vma ;
19721975
19731976 if (flags & TTU_RMAP_LOCKED )
1974- rmap_walk_locked (page , & rwc );
1977+ rmap_walk_locked (& folio -> page , & rwc );
19751978 else
1976- rmap_walk (page , & rwc );
1979+ rmap_walk (& folio -> page , & rwc );
19771980}
19781981
19791982#ifdef CONFIG_DEVICE_PRIVATE
0 commit comments