@@ -122,7 +122,7 @@ struct kioctx {
122122 unsigned long mmap_base ;
123123 unsigned long mmap_size ;
124124
125- struct page * * ring_pages ;
125+ struct folio * * ring_folios ;
126126 long nr_pages ;
127127
128128 struct rcu_work free_rwork ; /* see free_ioctx() */
@@ -160,7 +160,7 @@ struct kioctx {
160160 spinlock_t completion_lock ;
161161 } ____cacheline_aligned_in_smp ;
162162
163- struct page * internal_pages [AIO_RING_PAGES ];
163+ struct folio * internal_folios [AIO_RING_PAGES ];
164164 struct file * aio_ring_file ;
165165
166166 unsigned id ;
@@ -334,20 +334,20 @@ static void aio_free_ring(struct kioctx *ctx)
334334 put_aio_ring_file (ctx );
335335
336336 for (i = 0 ; i < ctx -> nr_pages ; i ++ ) {
337- struct folio * folio = page_folio ( ctx -> ring_pages [i ]) ;
337+ struct folio * folio = ctx -> ring_folios [i ];
338338
339339 if (!folio )
340340 continue ;
341341
342342 pr_debug ("pid(%d) [%d] folio->count=%d\n" , current -> pid , i ,
343343 folio_ref_count (folio ));
344- ctx -> ring_pages [i ] = NULL ;
344+ ctx -> ring_folios [i ] = NULL ;
345345 folio_put (folio );
346346 }
347347
348- if (ctx -> ring_pages && ctx -> ring_pages != ctx -> internal_pages ) {
349- kfree (ctx -> ring_pages );
350- ctx -> ring_pages = NULL ;
348+ if (ctx -> ring_folios && ctx -> ring_folios != ctx -> internal_folios ) {
349+ kfree (ctx -> ring_folios );
350+ ctx -> ring_folios = NULL ;
351351 }
352352}
353353
@@ -442,7 +442,7 @@ static int aio_migrate_folio(struct address_space *mapping, struct folio *dst,
442442 idx = src -> index ;
443443 if (idx < (pgoff_t )ctx -> nr_pages ) {
444444 /* Make sure the old folio hasn't already been changed */
445- if (ctx -> ring_pages [idx ] != & src -> page )
445+ if (ctx -> ring_folios [idx ] != src )
446446 rc = - EAGAIN ;
447447 } else
448448 rc = - EINVAL ;
@@ -466,8 +466,8 @@ static int aio_migrate_folio(struct address_space *mapping, struct folio *dst,
466466 */
467467 spin_lock_irqsave (& ctx -> completion_lock , flags );
468468 folio_migrate_copy (dst , src );
469- BUG_ON (ctx -> ring_pages [idx ] != & src -> page );
470- ctx -> ring_pages [idx ] = & dst -> page ;
469+ BUG_ON (ctx -> ring_folios [idx ] != src );
470+ ctx -> ring_folios [idx ] = dst ;
471471 spin_unlock_irqrestore (& ctx -> completion_lock , flags );
472472
473473 /* The old folio is no longer accessible. */
@@ -517,11 +517,11 @@ static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events)
517517 nr_events = (PAGE_SIZE * nr_pages - sizeof (struct aio_ring ))
518518 / sizeof (struct io_event );
519519
520- ctx -> ring_pages = ctx -> internal_pages ;
520+ ctx -> ring_folios = ctx -> internal_folios ;
521521 if (nr_pages > AIO_RING_PAGES ) {
522- ctx -> ring_pages = kcalloc (nr_pages , sizeof (struct page * ),
523- GFP_KERNEL );
524- if (!ctx -> ring_pages ) {
522+ ctx -> ring_folios = kcalloc (nr_pages , sizeof (struct folio * ),
523+ GFP_KERNEL );
524+ if (!ctx -> ring_folios ) {
525525 put_aio_ring_file (ctx );
526526 return - ENOMEM ;
527527 }
@@ -540,7 +540,7 @@ static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events)
540540 folio_ref_count (folio ));
541541 folio_end_read (folio , true);
542542
543- ctx -> ring_pages [i ] = & folio -> page ;
543+ ctx -> ring_folios [i ] = folio ;
544544 }
545545 ctx -> nr_pages = i ;
546546
@@ -573,15 +573,15 @@ static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events)
573573 ctx -> user_id = ctx -> mmap_base ;
574574 ctx -> nr_events = nr_events ; /* trusted copy */
575575
576- ring = page_address (ctx -> ring_pages [0 ]);
576+ ring = folio_address (ctx -> ring_folios [0 ]);
577577 ring -> nr = nr_events ; /* user copy */
578578 ring -> id = ~0U ;
579579 ring -> head = ring -> tail = 0 ;
580580 ring -> magic = AIO_RING_MAGIC ;
581581 ring -> compat_features = AIO_RING_COMPAT_FEATURES ;
582582 ring -> incompat_features = AIO_RING_INCOMPAT_FEATURES ;
583583 ring -> header_length = sizeof (struct aio_ring );
584- flush_dcache_page (ctx -> ring_pages [0 ]);
584+ flush_dcache_folio (ctx -> ring_folios [0 ]);
585585
586586 return 0 ;
587587}
@@ -692,9 +692,9 @@ static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
692692
693693 /* While kioctx setup is in progress,
694694 * we are protected from page migration
695- * changes ring_pages by ->ring_lock.
695+ * changes ring_folios by ->ring_lock.
696696 */
697- ring = page_address (ctx -> ring_pages [0 ]);
697+ ring = folio_address (ctx -> ring_folios [0 ]);
698698 ring -> id = ctx -> id ;
699699 return 0 ;
700700 }
@@ -1036,7 +1036,7 @@ static void user_refill_reqs_available(struct kioctx *ctx)
10361036 * against ctx->completed_events below will make sure we do the
10371037 * safe/right thing.
10381038 */
1039- ring = page_address (ctx -> ring_pages [0 ]);
1039+ ring = folio_address (ctx -> ring_folios [0 ]);
10401040 head = ring -> head ;
10411041
10421042 refill_reqs_available (ctx , head , ctx -> tail );
@@ -1148,12 +1148,12 @@ static void aio_complete(struct aio_kiocb *iocb)
11481148 if (++ tail >= ctx -> nr_events )
11491149 tail = 0 ;
11501150
1151- ev_page = page_address (ctx -> ring_pages [pos / AIO_EVENTS_PER_PAGE ]);
1151+ ev_page = folio_address (ctx -> ring_folios [pos / AIO_EVENTS_PER_PAGE ]);
11521152 event = ev_page + pos % AIO_EVENTS_PER_PAGE ;
11531153
11541154 * event = iocb -> ki_res ;
11551155
1156- flush_dcache_page (ctx -> ring_pages [pos / AIO_EVENTS_PER_PAGE ]);
1156+ flush_dcache_folio (ctx -> ring_folios [pos / AIO_EVENTS_PER_PAGE ]);
11571157
11581158 pr_debug ("%p[%u]: %p: %p %Lx %Lx %Lx\n" , ctx , tail , iocb ,
11591159 (void __user * )(unsigned long )iocb -> ki_res .obj ,
@@ -1166,10 +1166,10 @@ static void aio_complete(struct aio_kiocb *iocb)
11661166
11671167 ctx -> tail = tail ;
11681168
1169- ring = page_address (ctx -> ring_pages [0 ]);
1169+ ring = folio_address (ctx -> ring_folios [0 ]);
11701170 head = ring -> head ;
11711171 ring -> tail = tail ;
1172- flush_dcache_page (ctx -> ring_pages [0 ]);
1172+ flush_dcache_folio (ctx -> ring_folios [0 ]);
11731173
11741174 ctx -> completed_events ++ ;
11751175 if (ctx -> completed_events > 1 )
@@ -1241,8 +1241,8 @@ static long aio_read_events_ring(struct kioctx *ctx,
12411241 sched_annotate_sleep ();
12421242 mutex_lock (& ctx -> ring_lock );
12431243
1244- /* Access to ->ring_pages here is protected by ctx->ring_lock. */
1245- ring = page_address (ctx -> ring_pages [0 ]);
1244+ /* Access to ->ring_folios here is protected by ctx->ring_lock. */
1245+ ring = folio_address (ctx -> ring_folios [0 ]);
12461246 head = ring -> head ;
12471247 tail = ring -> tail ;
12481248
@@ -1263,20 +1263,20 @@ static long aio_read_events_ring(struct kioctx *ctx,
12631263 while (ret < nr ) {
12641264 long avail ;
12651265 struct io_event * ev ;
1266- struct page * page ;
1266+ struct folio * folio ;
12671267
12681268 avail = (head <= tail ? tail : ctx -> nr_events ) - head ;
12691269 if (head == tail )
12701270 break ;
12711271
12721272 pos = head + AIO_EVENTS_OFFSET ;
1273- page = ctx -> ring_pages [pos / AIO_EVENTS_PER_PAGE ];
1273+ folio = ctx -> ring_folios [pos / AIO_EVENTS_PER_PAGE ];
12741274 pos %= AIO_EVENTS_PER_PAGE ;
12751275
12761276 avail = min (avail , nr - ret );
12771277 avail = min_t (long , avail , AIO_EVENTS_PER_PAGE - pos );
12781278
1279- ev = page_address ( page );
1279+ ev = folio_address ( folio );
12801280 copy_ret = copy_to_user (event + ret , ev + pos ,
12811281 sizeof (* ev ) * avail );
12821282
@@ -1290,9 +1290,9 @@ static long aio_read_events_ring(struct kioctx *ctx,
12901290 head %= ctx -> nr_events ;
12911291 }
12921292
1293- ring = page_address (ctx -> ring_pages [0 ]);
1293+ ring = folio_address (ctx -> ring_folios [0 ]);
12941294 ring -> head = head ;
1295- flush_dcache_page (ctx -> ring_pages [0 ]);
1295+ flush_dcache_folio (ctx -> ring_folios [0 ]);
12961296
12971297 pr_debug ("%li h%u t%u\n" , ret , head , tail );
12981298out :
0 commit comments