@@ -737,8 +737,9 @@ static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
737737 */
738738unsigned long page_address_in_vma (struct page * page , struct vm_area_struct * vma )
739739{
740- if (PageAnon (page )) {
741- struct anon_vma * page__anon_vma = page_anon_vma (page );
740+ struct folio * folio = page_folio (page );
741+ if (folio_test_anon (folio )) {
742+ struct anon_vma * page__anon_vma = folio_anon_vma (folio );
742743 /*
743744 * Note: swapoff's unuse_vma() is more efficient with this
744745 * check, and needs it to match anon_vma when KSM is active.
@@ -748,7 +749,7 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
748749 return - EFAULT ;
749750 } else if (!vma -> vm_file ) {
750751 return - EFAULT ;
751- } else if (vma -> vm_file -> f_mapping != compound_head ( page ) -> mapping ) {
752+ } else if (vma -> vm_file -> f_mapping != folio -> mapping ) {
752753 return - EFAULT ;
753754 }
754755
@@ -1103,6 +1104,7 @@ static void __page_set_anon_rmap(struct page *page,
11031104static void __page_check_anon_rmap (struct page * page ,
11041105 struct vm_area_struct * vma , unsigned long address )
11051106{
1107+ struct folio * folio = page_folio (page );
11061108 /*
11071109 * The page's anon-rmap details (mapping and index) are guaranteed to
11081110 * be set up correctly at this point.
@@ -1114,7 +1116,8 @@ static void __page_check_anon_rmap(struct page *page,
11141116 * are initially only visible via the pagetables, and the pte is locked
11151117 * over the call to page_add_new_anon_rmap.
11161118 */
1117- VM_BUG_ON_PAGE (page_anon_vma (page )-> root != vma -> anon_vma -> root , page );
1119+ VM_BUG_ON_FOLIO (folio_anon_vma (folio )-> root != vma -> anon_vma -> root ,
1120+ folio );
11181121 VM_BUG_ON_PAGE (page_to_pgoff (page ) != linear_page_index (vma , address ),
11191122 page );
11201123}
@@ -2177,6 +2180,7 @@ void __put_anon_vma(struct anon_vma *anon_vma)
21772180static struct anon_vma * rmap_walk_anon_lock (struct page * page ,
21782181 struct rmap_walk_control * rwc )
21792182{
2183+ struct folio * folio = page_folio (page );
21802184 struct anon_vma * anon_vma ;
21812185
21822186 if (rwc -> anon_lock )
@@ -2188,7 +2192,7 @@ static struct anon_vma *rmap_walk_anon_lock(struct page *page,
21882192 * are holding mmap_lock. Users without mmap_lock are required to
21892193 * take a reference count to prevent the anon_vma disappearing
21902194 */
2191- anon_vma = page_anon_vma ( page );
2195+ anon_vma = folio_anon_vma ( folio );
21922196 if (!anon_vma )
21932197 return NULL ;
21942198
@@ -2208,14 +2212,15 @@ static struct anon_vma *rmap_walk_anon_lock(struct page *page,
22082212static void rmap_walk_anon (struct page * page , struct rmap_walk_control * rwc ,
22092213 bool locked )
22102214{
2215+ struct folio * folio = page_folio (page );
22112216 struct anon_vma * anon_vma ;
22122217 pgoff_t pgoff_start , pgoff_end ;
22132218 struct anon_vma_chain * avc ;
22142219
22152220 if (locked ) {
2216- anon_vma = page_anon_vma ( page );
2221+ anon_vma = folio_anon_vma ( folio );
22172222 /* anon_vma disappear under us? */
2218- VM_BUG_ON_PAGE (!anon_vma , page );
2223+ VM_BUG_ON_FOLIO (!anon_vma , folio );
22192224 } else {
22202225 anon_vma = rmap_walk_anon_lock (page , rwc );
22212226 }
0 commit comments