@@ -122,7 +122,7 @@ struct kioctx {
122122 unsigned long mmap_base ;
123123 unsigned long mmap_size ;
124124
125- struct page * * ring_pages ;
125+ struct folio * * ring_folios ;
126126 long nr_pages ;
127127
128128 struct rcu_work free_rwork ; /* see free_ioctx() */
@@ -160,7 +160,7 @@ struct kioctx {
160160 spinlock_t completion_lock ;
161161 } ____cacheline_aligned_in_smp ;
162162
163- struct page * internal_pages [AIO_RING_PAGES ];
163+ struct folio * internal_folios [AIO_RING_PAGES ];
164164 struct file * aio_ring_file ;
165165
166166 unsigned id ;
@@ -334,19 +334,20 @@ static void aio_free_ring(struct kioctx *ctx)
334334 put_aio_ring_file (ctx );
335335
336336 for (i = 0 ; i < ctx -> nr_pages ; i ++ ) {
337- struct page * page ;
338- pr_debug ("pid(%d) [%d] page->count=%d\n" , current -> pid , i ,
339- page_count (ctx -> ring_pages [i ]));
340- page = ctx -> ring_pages [i ];
341- if (!page )
337+ struct folio * folio = ctx -> ring_folios [i ];
338+
339+ if (!folio )
342340 continue ;
343- ctx -> ring_pages [i ] = NULL ;
344- put_page (page );
341+
342+ pr_debug ("pid(%d) [%d] folio->count=%d\n" , current -> pid , i ,
343+ folio_ref_count (folio ));
344+ ctx -> ring_folios [i ] = NULL ;
345+ folio_put (folio );
345346 }
346347
347- if (ctx -> ring_pages && ctx -> ring_pages != ctx -> internal_pages ) {
348- kfree (ctx -> ring_pages );
349- ctx -> ring_pages = NULL ;
348+ if (ctx -> ring_folios && ctx -> ring_folios != ctx -> internal_folios ) {
349+ kfree (ctx -> ring_folios );
350+ ctx -> ring_folios = NULL ;
350351 }
351352}
352353
@@ -441,7 +442,7 @@ static int aio_migrate_folio(struct address_space *mapping, struct folio *dst,
441442 idx = src -> index ;
442443 if (idx < (pgoff_t )ctx -> nr_pages ) {
443444 /* Make sure the old folio hasn't already been changed */
444- if (ctx -> ring_pages [idx ] != & src -> page )
445+ if (ctx -> ring_folios [idx ] != src )
445446 rc = - EAGAIN ;
446447 } else
447448 rc = - EINVAL ;
@@ -465,8 +466,8 @@ static int aio_migrate_folio(struct address_space *mapping, struct folio *dst,
465466 */
466467 spin_lock_irqsave (& ctx -> completion_lock , flags );
467468 folio_migrate_copy (dst , src );
468- BUG_ON (ctx -> ring_pages [idx ] != & src -> page );
469- ctx -> ring_pages [idx ] = & dst -> page ;
469+ BUG_ON (ctx -> ring_folios [idx ] != src );
470+ ctx -> ring_folios [idx ] = dst ;
470471 spin_unlock_irqrestore (& ctx -> completion_lock , flags );
471472
472473 /* The old folio is no longer accessible. */
@@ -516,28 +517,30 @@ static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events)
516517 nr_events = (PAGE_SIZE * nr_pages - sizeof (struct aio_ring ))
517518 / sizeof (struct io_event );
518519
519- ctx -> ring_pages = ctx -> internal_pages ;
520+ ctx -> ring_folios = ctx -> internal_folios ;
520521 if (nr_pages > AIO_RING_PAGES ) {
521- ctx -> ring_pages = kcalloc (nr_pages , sizeof (struct page * ),
522- GFP_KERNEL );
523- if (!ctx -> ring_pages ) {
522+ ctx -> ring_folios = kcalloc (nr_pages , sizeof (struct folio * ),
523+ GFP_KERNEL );
524+ if (!ctx -> ring_folios ) {
524525 put_aio_ring_file (ctx );
525526 return - ENOMEM ;
526527 }
527528 }
528529
529530 for (i = 0 ; i < nr_pages ; i ++ ) {
530- struct page * page ;
531- page = find_or_create_page (file -> f_mapping ,
532- i , GFP_USER | __GFP_ZERO );
533- if (!page )
531+ struct folio * folio ;
532+
533+ folio = __filemap_get_folio (file -> f_mapping , i ,
534+ FGP_LOCK | FGP_ACCESSED | FGP_CREAT ,
535+ GFP_USER | __GFP_ZERO );
536+ if (IS_ERR (folio ))
534537 break ;
535- pr_debug ("pid(%d) page[%d]->count=%d\n" ,
536- current -> pid , i , page_count (page ));
537- SetPageUptodate (page );
538- unlock_page (page );
539538
540- ctx -> ring_pages [i ] = page ;
539+ pr_debug ("pid(%d) [%d] folio->count=%d\n" , current -> pid , i ,
540+ folio_ref_count (folio ));
541+ folio_end_read (folio , true);
542+
543+ ctx -> ring_folios [i ] = folio ;
541544 }
542545 ctx -> nr_pages = i ;
543546
@@ -570,15 +573,15 @@ static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events)
570573 ctx -> user_id = ctx -> mmap_base ;
571574 ctx -> nr_events = nr_events ; /* trusted copy */
572575
573- ring = page_address (ctx -> ring_pages [0 ]);
576+ ring = folio_address (ctx -> ring_folios [0 ]);
574577 ring -> nr = nr_events ; /* user copy */
575578 ring -> id = ~0U ;
576579 ring -> head = ring -> tail = 0 ;
577580 ring -> magic = AIO_RING_MAGIC ;
578581 ring -> compat_features = AIO_RING_COMPAT_FEATURES ;
579582 ring -> incompat_features = AIO_RING_INCOMPAT_FEATURES ;
580583 ring -> header_length = sizeof (struct aio_ring );
581- flush_dcache_page (ctx -> ring_pages [0 ]);
584+ flush_dcache_folio (ctx -> ring_folios [0 ]);
582585
583586 return 0 ;
584587}
@@ -689,9 +692,9 @@ static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
689692
690693 /* While kioctx setup is in progress,
691694 * we are protected from page migration
692- * changes ring_pages by ->ring_lock.
695+ * changes ring_folios by ->ring_lock.
693696 */
694- ring = page_address (ctx -> ring_pages [0 ]);
697+ ring = folio_address (ctx -> ring_folios [0 ]);
695698 ring -> id = ctx -> id ;
696699 return 0 ;
697700 }
@@ -1033,7 +1036,7 @@ static void user_refill_reqs_available(struct kioctx *ctx)
10331036 * against ctx->completed_events below will make sure we do the
10341037 * safe/right thing.
10351038 */
1036- ring = page_address (ctx -> ring_pages [0 ]);
1039+ ring = folio_address (ctx -> ring_folios [0 ]);
10371040 head = ring -> head ;
10381041
10391042 refill_reqs_available (ctx , head , ctx -> tail );
@@ -1145,12 +1148,12 @@ static void aio_complete(struct aio_kiocb *iocb)
11451148 if (++ tail >= ctx -> nr_events )
11461149 tail = 0 ;
11471150
1148- ev_page = page_address (ctx -> ring_pages [pos / AIO_EVENTS_PER_PAGE ]);
1151+ ev_page = folio_address (ctx -> ring_folios [pos / AIO_EVENTS_PER_PAGE ]);
11491152 event = ev_page + pos % AIO_EVENTS_PER_PAGE ;
11501153
11511154 * event = iocb -> ki_res ;
11521155
1153- flush_dcache_page (ctx -> ring_pages [pos / AIO_EVENTS_PER_PAGE ]);
1156+ flush_dcache_folio (ctx -> ring_folios [pos / AIO_EVENTS_PER_PAGE ]);
11541157
11551158 pr_debug ("%p[%u]: %p: %p %Lx %Lx %Lx\n" , ctx , tail , iocb ,
11561159 (void __user * )(unsigned long )iocb -> ki_res .obj ,
@@ -1163,10 +1166,10 @@ static void aio_complete(struct aio_kiocb *iocb)
11631166
11641167 ctx -> tail = tail ;
11651168
1166- ring = page_address (ctx -> ring_pages [0 ]);
1169+ ring = folio_address (ctx -> ring_folios [0 ]);
11671170 head = ring -> head ;
11681171 ring -> tail = tail ;
1169- flush_dcache_page (ctx -> ring_pages [0 ]);
1172+ flush_dcache_folio (ctx -> ring_folios [0 ]);
11701173
11711174 ctx -> completed_events ++ ;
11721175 if (ctx -> completed_events > 1 )
@@ -1238,8 +1241,8 @@ static long aio_read_events_ring(struct kioctx *ctx,
12381241 sched_annotate_sleep ();
12391242 mutex_lock (& ctx -> ring_lock );
12401243
1241- /* Access to ->ring_pages here is protected by ctx->ring_lock. */
1242- ring = page_address (ctx -> ring_pages [0 ]);
1244+ /* Access to ->ring_folios here is protected by ctx->ring_lock. */
1245+ ring = folio_address (ctx -> ring_folios [0 ]);
12431246 head = ring -> head ;
12441247 tail = ring -> tail ;
12451248
@@ -1260,20 +1263,20 @@ static long aio_read_events_ring(struct kioctx *ctx,
12601263 while (ret < nr ) {
12611264 long avail ;
12621265 struct io_event * ev ;
1263- struct page * page ;
1266+ struct folio * folio ;
12641267
12651268 avail = (head <= tail ? tail : ctx -> nr_events ) - head ;
12661269 if (head == tail )
12671270 break ;
12681271
12691272 pos = head + AIO_EVENTS_OFFSET ;
1270- page = ctx -> ring_pages [pos / AIO_EVENTS_PER_PAGE ];
1273+ folio = ctx -> ring_folios [pos / AIO_EVENTS_PER_PAGE ];
12711274 pos %= AIO_EVENTS_PER_PAGE ;
12721275
12731276 avail = min (avail , nr - ret );
12741277 avail = min_t (long , avail , AIO_EVENTS_PER_PAGE - pos );
12751278
1276- ev = page_address ( page );
1279+ ev = folio_address ( folio );
12771280 copy_ret = copy_to_user (event + ret , ev + pos ,
12781281 sizeof (* ev ) * avail );
12791282
@@ -1287,9 +1290,9 @@ static long aio_read_events_ring(struct kioctx *ctx,
12871290 head %= ctx -> nr_events ;
12881291 }
12891292
1290- ring = page_address (ctx -> ring_pages [0 ]);
1293+ ring = folio_address (ctx -> ring_folios [0 ]);
12911294 ring -> head = head ;
1292- flush_dcache_page (ctx -> ring_pages [0 ]);
1295+ flush_dcache_folio (ctx -> ring_folios [0 ]);
12931296
12941297 pr_debug ("%li h%u t%u\n" , ret , head , tail );
12951298out :
0 commit comments