@@ -90,21 +90,22 @@ struct damon_pa_access_chk_result {
9090static bool __damon_pa_young (struct page * page , struct vm_area_struct * vma ,
9191 unsigned long addr , void * arg )
9292{
93+ struct folio * folio = page_folio (page );
9394 struct damon_pa_access_chk_result * result = arg ;
94- DEFINE_PAGE_VMA_WALK (pvmw , page , vma , addr , 0 );
95+ DEFINE_FOLIO_VMA_WALK (pvmw , folio , vma , addr , 0 );
9596
9697 result -> accessed = false;
9798 result -> page_sz = PAGE_SIZE ;
9899 while (page_vma_mapped_walk (& pvmw )) {
99100 addr = pvmw .address ;
100101 if (pvmw .pte ) {
101102 result -> accessed = pte_young (* pvmw .pte ) ||
102- !page_is_idle ( page ) ||
103+ !folio_test_idle ( folio ) ||
103104 mmu_notifier_test_young (vma -> vm_mm , addr );
104105 } else {
105106#ifdef CONFIG_TRANSPARENT_HUGEPAGE
106107 result -> accessed = pmd_young (* pvmw .pmd ) ||
107- !page_is_idle ( page ) ||
108+ !folio_test_idle ( folio ) ||
108109 mmu_notifier_test_young (vma -> vm_mm , addr );
109110 result -> page_sz = ((1UL ) << HPAGE_PMD_SHIFT );
110111#else
@@ -123,6 +124,7 @@ static bool __damon_pa_young(struct page *page, struct vm_area_struct *vma,
123124
124125static bool damon_pa_young (unsigned long paddr , unsigned long * page_sz )
125126{
127+ struct folio * folio ;
126128 struct page * page = damon_get_page (PHYS_PFN (paddr ));
127129 struct damon_pa_access_chk_result result = {
128130 .page_sz = PAGE_SIZE ,
@@ -137,27 +139,28 @@ static bool damon_pa_young(unsigned long paddr, unsigned long *page_sz)
137139
138140 if (!page )
139141 return false;
142+ folio = page_folio (page );
140143
141- if (!page_mapped ( page ) || !page_rmapping ( page )) {
142- if (page_is_idle ( page ))
144+ if (!folio_mapped ( folio ) || !folio_raw_mapping ( folio )) {
145+ if (folio_test_idle ( folio ))
143146 result .accessed = false;
144147 else
145148 result .accessed = true;
146- put_page ( page );
149+ folio_put ( folio );
147150 goto out ;
148151 }
149152
150- need_lock = !PageAnon ( page ) || PageKsm ( page );
151- if (need_lock && !trylock_page ( page )) {
152- put_page ( page );
153+ need_lock = !folio_test_anon ( folio ) || folio_test_ksm ( folio );
154+ if (need_lock && !folio_trylock ( folio )) {
155+ folio_put ( folio );
153156 return NULL ;
154157 }
155158
156- rmap_walk (page , & rwc );
159+ rmap_walk (& folio -> page , & rwc );
157160
158161 if (need_lock )
159- unlock_page ( page );
160- put_page ( page );
162+ folio_unlock ( folio );
163+ folio_put ( folio );
161164
162165out :
163166 * page_sz = result .page_sz ;
0 commit comments