@@ -174,30 +174,32 @@ void putback_movable_pages(struct list_head *l)
174174static bool remove_migration_pte (struct page * page , struct vm_area_struct * vma ,
175175 unsigned long addr , void * old )
176176{
177- DEFINE_PAGE_VMA_WALK (pvmw , (struct page * )old , vma , addr ,
178- PVMW_SYNC | PVMW_MIGRATION );
179- struct page * new ;
180- pte_t pte ;
181- swp_entry_t entry ;
177+ struct folio * folio = page_folio (page );
178+ DEFINE_FOLIO_VMA_WALK (pvmw , old , vma , addr , PVMW_SYNC | PVMW_MIGRATION );
182179
183180 VM_BUG_ON_PAGE (PageTail (page ), page );
184181 while (page_vma_mapped_walk (& pvmw )) {
185- if (PageKsm (page ))
186- new = page ;
187- else
188- new = page - pvmw .pgoff +
189- linear_page_index (vma , pvmw .address );
182+ pte_t pte ;
183+ swp_entry_t entry ;
184+ struct page * new ;
185+ unsigned long idx = 0 ;
186+
187+ /* pgoff is invalid for ksm pages, but they are never large */
188+ if (folio_test_large (folio ) && !folio_test_hugetlb (folio ))
189+ idx = linear_page_index (vma , pvmw .address ) - pvmw .pgoff ;
190+ new = folio_page (folio , idx );
190191
191192#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
192193 /* PMD-mapped THP migration entry */
193194 if (!pvmw .pte ) {
194- VM_BUG_ON_PAGE (PageHuge (page ) || !PageTransCompound (page ), page );
195+ VM_BUG_ON_FOLIO (folio_test_hugetlb (folio ) ||
196+ !folio_test_pmd_mappable (folio ), folio );
195197 remove_migration_pmd (& pvmw , new );
196198 continue ;
197199 }
198200#endif
199201
200- get_page ( new );
202+ folio_get ( folio );
201203 pte = pte_mkold (mk_pte (new , READ_ONCE (vma -> vm_page_prot )));
202204 if (pte_swp_soft_dirty (* pvmw .pte ))
203205 pte = pte_mksoft_dirty (pte );
@@ -226,20 +228,20 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
226228 }
227229
228230#ifdef CONFIG_HUGETLB_PAGE
229- if (PageHuge ( new )) {
231+ if (folio_test_hugetlb ( folio )) {
230232 unsigned int shift = huge_page_shift (hstate_vma (vma ));
231233
232234 pte = pte_mkhuge (pte );
233235 pte = arch_make_huge_pte (pte , shift , vma -> vm_flags );
234- if (PageAnon ( new ))
236+ if (folio_test_anon ( folio ))
235237 hugepage_add_anon_rmap (new , vma , pvmw .address );
236238 else
237239 page_dup_rmap (new , true);
238240 set_huge_pte_at (vma -> vm_mm , pvmw .address , pvmw .pte , pte );
239241 } else
240242#endif
241243 {
242- if (PageAnon ( new ))
244+ if (folio_test_anon ( folio ))
243245 page_add_anon_rmap (new , vma , pvmw .address , false);
244246 else
245247 page_add_file_rmap (new , vma , false);
@@ -259,17 +261,17 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
259261 * Get rid of all migration entries and replace them by
260262 * references to the indicated page.
261263 */
262- void remove_migration_ptes (struct page * old , struct page * new , bool locked )
264+ void remove_migration_ptes (struct folio * src , struct folio * dst , bool locked )
263265{
264266 struct rmap_walk_control rwc = {
265267 .rmap_one = remove_migration_pte ,
266- .arg = old ,
268+ .arg = src ,
267269 };
268270
269271 if (locked )
270- rmap_walk_locked (new , & rwc );
272+ rmap_walk_locked (& dst -> page , & rwc );
271273 else
272- rmap_walk (new , & rwc );
274+ rmap_walk (& dst -> page , & rwc );
273275}
274276
275277/*
@@ -756,6 +758,7 @@ int buffer_migrate_page_norefs(struct address_space *mapping,
756758 */
757759static int writeout (struct address_space * mapping , struct page * page )
758760{
761+ struct folio * folio = page_folio (page );
759762 struct writeback_control wbc = {
760763 .sync_mode = WB_SYNC_NONE ,
761764 .nr_to_write = 1 ,
@@ -781,7 +784,7 @@ static int writeout(struct address_space *mapping, struct page *page)
781784 * At this point we know that the migration attempt cannot
782785 * be successful.
783786 */
784- remove_migration_ptes (page , page , false);
787+ remove_migration_ptes (folio , folio , false);
785788
786789 rc = mapping -> a_ops -> writepage (page , & wbc );
787790
@@ -913,6 +916,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
913916 int force , enum migrate_mode mode )
914917{
915918 struct folio * folio = page_folio (page );
919+ struct folio * dst = page_folio (newpage );
916920 int rc = - EAGAIN ;
917921 bool page_was_mapped = false;
918922 struct anon_vma * anon_vma = NULL ;
@@ -1039,8 +1043,8 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
10391043 }
10401044
10411045 if (page_was_mapped )
1042- remove_migration_ptes (page ,
1043- rc == MIGRATEPAGE_SUCCESS ? newpage : page , false);
1046+ remove_migration_ptes (folio ,
1047+ rc == MIGRATEPAGE_SUCCESS ? dst : folio , false);
10441048
10451049out_unlock_both :
10461050 unlock_page (newpage );
@@ -1166,7 +1170,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
11661170 enum migrate_mode mode , int reason ,
11671171 struct list_head * ret )
11681172{
1169- struct folio * src = page_folio (hpage );
1173+ struct folio * dst , * src = page_folio (hpage );
11701174 int rc = - EAGAIN ;
11711175 int page_was_mapped = 0 ;
11721176 struct page * new_hpage ;
@@ -1194,6 +1198,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
11941198 new_hpage = get_new_page (hpage , private );
11951199 if (!new_hpage )
11961200 return - ENOMEM ;
1201+ dst = page_folio (new_hpage );
11971202
11981203 if (!trylock_page (hpage )) {
11991204 if (!force )
@@ -1254,8 +1259,8 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
12541259 rc = move_to_new_page (new_hpage , hpage , mode );
12551260
12561261 if (page_was_mapped )
1257- remove_migration_ptes (hpage ,
1258- rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage , false);
1262+ remove_migration_ptes (src ,
1263+ rc == MIGRATEPAGE_SUCCESS ? dst : src , false);
12591264
12601265unlock_put_anon :
12611266 unlock_page (new_hpage );
0 commit comments