@@ -822,6 +822,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
822822 struct lruvec * lruvec ;
823823 unsigned long flags = 0 ;
824824 struct lruvec * locked = NULL ;
825+ struct folio * folio = NULL ;
825826 struct page * page = NULL , * valid_page = NULL ;
826827 struct address_space * mapping ;
827828 unsigned long start_pfn = low_pfn ;
@@ -918,7 +919,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
918919 if (!valid_page && pageblock_aligned (low_pfn )) {
919920 if (!isolation_suitable (cc , page )) {
920921 low_pfn = end_pfn ;
921- page = NULL ;
922+ folio = NULL ;
922923 goto isolate_abort ;
923924 }
924925 valid_page = page ;
@@ -950,7 +951,8 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
950951 * Hugepage was successfully isolated and placed
951952 * on the cc->migratepages list.
952953 */
953- low_pfn += compound_nr (page ) - 1 ;
954+ folio = page_folio (page );
955+ low_pfn += folio_nr_pages (folio ) - 1 ;
954956 goto isolate_success_no_list ;
955957 }
956958
@@ -1018,8 +1020,10 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
10181020 locked = NULL ;
10191021 }
10201022
1021- if (isolate_movable_page (page , mode ))
1023+ if (isolate_movable_page (page , mode )) {
1024+ folio = page_folio (page );
10221025 goto isolate_success ;
1026+ }
10231027 }
10241028
10251029 goto isolate_fail ;
@@ -1030,16 +1034,17 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
10301034 * sure the page is not being freed elsewhere -- the
10311035 * page release code relies on it.
10321036 */
1033- if (unlikely (!get_page_unless_zero (page )))
1037+ folio = folio_get_nontail_page (page );
1038+ if (unlikely (!folio ))
10341039 goto isolate_fail ;
10351040
10361041 /*
10371042 * Migration will fail if an anonymous page is pinned in memory,
10381043 * so avoid taking lru_lock and isolating it unnecessarily in an
10391044 * admittedly racy check.
10401045 */
1041- mapping = page_mapping ( page );
1042- if (!mapping && (page_count ( page ) - 1 ) > total_mapcount ( page ))
1046+ mapping = folio_mapping ( folio );
1047+ if (!mapping && (folio_ref_count ( folio ) - 1 ) > folio_mapcount ( folio ))
10431048 goto isolate_fail_put ;
10441049
10451050 /*
@@ -1050,11 +1055,11 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
10501055 goto isolate_fail_put ;
10511056
10521057 /* Only take pages on LRU: a check now makes later tests safe */
1053- if (!PageLRU ( page ))
1058+ if (!folio_test_lru ( folio ))
10541059 goto isolate_fail_put ;
10551060
10561061 /* Compaction might skip unevictable pages but CMA takes them */
1057- if (!(mode & ISOLATE_UNEVICTABLE ) && PageUnevictable ( page ))
1062+ if (!(mode & ISOLATE_UNEVICTABLE ) && folio_test_unevictable ( folio ))
10581063 goto isolate_fail_put ;
10591064
10601065 /*
@@ -1063,10 +1068,10 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
10631068 * it will be able to migrate without blocking - clean pages
10641069 * for the most part. PageWriteback would require blocking.
10651070 */
1066- if ((mode & ISOLATE_ASYNC_MIGRATE ) && PageWriteback ( page ))
1071+ if ((mode & ISOLATE_ASYNC_MIGRATE ) && folio_test_writeback ( folio ))
10671072 goto isolate_fail_put ;
10681073
1069- if ((mode & ISOLATE_ASYNC_MIGRATE ) && PageDirty ( page )) {
1074+ if ((mode & ISOLATE_ASYNC_MIGRATE ) && folio_test_dirty ( folio )) {
10701075 bool migrate_dirty ;
10711076
10721077 /*
@@ -1078,22 +1083,22 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
10781083 * the page lock until after the page is removed
10791084 * from the page cache.
10801085 */
1081- if (!trylock_page ( page ))
1086+ if (!folio_trylock ( folio ))
10821087 goto isolate_fail_put ;
10831088
1084- mapping = page_mapping ( page );
1089+ mapping = folio_mapping ( folio );
10851090 migrate_dirty = !mapping ||
10861091 mapping -> a_ops -> migrate_folio ;
1087- unlock_page ( page );
1092+ folio_unlock ( folio );
10881093 if (!migrate_dirty )
10891094 goto isolate_fail_put ;
10901095 }
10911096
1092- /* Try isolate the page */
1093- if (!TestClearPageLRU ( page ))
1097+ /* Try isolate the folio */
1098+ if (!folio_test_clear_lru ( folio ))
10941099 goto isolate_fail_put ;
10951100
1096- lruvec = folio_lruvec (page_folio ( page ) );
1101+ lruvec = folio_lruvec (folio );
10971102
10981103 /* If we already hold the lock, we can skip some rechecking */
10991104 if (lruvec != locked ) {
@@ -1103,7 +1108,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
11031108 compact_lock_irqsave (& lruvec -> lru_lock , & flags , cc );
11041109 locked = lruvec ;
11051110
1106- lruvec_memcg_debug (lruvec , page_folio ( page ) );
1111+ lruvec_memcg_debug (lruvec , folio );
11071112
11081113 /*
11091114 * Try get exclusive access under lock. If marked for
@@ -1119,34 +1124,33 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
11191124 }
11201125
11211126 /*
1122- * Page become compound since the non-locked check,
1123- * and it's on LRU. It can only be a THP so the order
1124- * is safe to read and it's 0 for tail pages.
1127+ * folio become large since the non-locked check,
1128+ * and it's on LRU.
11251129 */
1126- if (unlikely (PageCompound ( page ) && !cc -> alloc_contig )) {
1127- low_pfn += compound_nr ( page ) - 1 ;
1128- nr_scanned += compound_nr ( page ) - 1 ;
1129- SetPageLRU ( page );
1130+ if (unlikely (folio_test_large ( folio ) && !cc -> alloc_contig )) {
1131+ low_pfn += folio_nr_pages ( folio ) - 1 ;
1132+ nr_scanned += folio_nr_pages ( folio ) - 1 ;
1133+ folio_set_lru ( folio );
11301134 goto isolate_fail_put ;
11311135 }
11321136 }
11331137
1134- /* The whole page is taken off the LRU; skip the tail pages. */
1135- if (PageCompound ( page ))
1136- low_pfn += compound_nr ( page ) - 1 ;
1138+ /* The folio is taken off the LRU */
1139+ if (folio_test_large ( folio ))
1140+ low_pfn += folio_nr_pages ( folio ) - 1 ;
11371141
11381142 /* Successfully isolated */
1139- del_page_from_lru_list ( page , lruvec );
1140- mod_node_page_state ( page_pgdat ( page ) ,
1141- NR_ISOLATED_ANON + page_is_file_lru ( page ),
1142- thp_nr_pages ( page ));
1143+ lruvec_del_folio ( lruvec , folio );
1144+ node_stat_mod_folio ( folio ,
1145+ NR_ISOLATED_ANON + folio_is_file_lru ( folio ),
1146+ folio_nr_pages ( folio ));
11431147
11441148isolate_success :
1145- list_add (& page -> lru , & cc -> migratepages );
1149+ list_add (& folio -> lru , & cc -> migratepages );
11461150isolate_success_no_list :
1147- cc -> nr_migratepages += compound_nr ( page );
1148- nr_isolated += compound_nr ( page );
1149- nr_scanned += compound_nr ( page ) - 1 ;
1151+ cc -> nr_migratepages += folio_nr_pages ( folio );
1152+ nr_isolated += folio_nr_pages ( folio );
1153+ nr_scanned += folio_nr_pages ( folio ) - 1 ;
11501154
11511155 /*
11521156 * Avoid isolating too much unless this block is being
@@ -1168,7 +1172,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
11681172 unlock_page_lruvec_irqrestore (locked , flags );
11691173 locked = NULL ;
11701174 }
1171- put_page ( page );
1175+ folio_put ( folio );
11721176
11731177isolate_fail :
11741178 if (!skip_on_failure && ret != - ENOMEM )
@@ -1209,14 +1213,14 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
12091213 if (unlikely (low_pfn > end_pfn ))
12101214 low_pfn = end_pfn ;
12111215
1212- page = NULL ;
1216+ folio = NULL ;
12131217
12141218isolate_abort :
12151219 if (locked )
12161220 unlock_page_lruvec_irqrestore (locked , flags );
1217- if (page ) {
1218- SetPageLRU ( page );
1219- put_page ( page );
1221+ if (folio ) {
1222+ folio_set_lru ( folio );
1223+ folio_put ( folio );
12201224 }
12211225
12221226 /*
0 commit comments