@@ -243,7 +243,7 @@ static int orangefs_writepages(struct address_space *mapping,
243243 return ret ;
244244}
245245
246- static int orangefs_launder_page (struct page * );
246+ static int orangefs_launder_folio (struct folio * );
247247
248248static void orangefs_readahead (struct readahead_control * rac )
249249{
@@ -290,14 +290,15 @@ static void orangefs_readahead(struct readahead_control *rac)
290290
291291static int orangefs_readpage (struct file * file , struct page * page )
292292{
293+ struct folio * folio = page_folio (page );
293294 struct inode * inode = page -> mapping -> host ;
294295 struct iov_iter iter ;
295296 struct bio_vec bv ;
296297 ssize_t ret ;
297298 loff_t off ; /* offset into this page */
298299
299- if (PageDirty ( page ))
300- orangefs_launder_page ( page );
300+ if (folio_test_dirty ( folio ))
301+ orangefs_launder_folio ( folio );
301302
302303 off = page_offset (page );
303304 bv .bv_page = page ;
@@ -330,6 +331,7 @@ static int orangefs_write_begin(struct file *file,
330331 void * * fsdata )
331332{
332333 struct orangefs_write_range * wr ;
334+ struct folio * folio ;
333335 struct page * page ;
334336 pgoff_t index ;
335337 int ret ;
@@ -341,27 +343,28 @@ static int orangefs_write_begin(struct file *file,
341343 return - ENOMEM ;
342344
343345 * pagep = page ;
346+ folio = page_folio (page );
344347
345- if (PageDirty ( page ) && !PagePrivate ( page )) {
348+ if (folio_test_dirty ( folio ) && !folio_test_private ( folio )) {
346349 /*
347350 * Should be impossible. If it happens, launder the page
348351 * since we don't know what's dirty. This will WARN in
349352 * orangefs_writepage_locked.
350353 */
351- ret = orangefs_launder_page ( page );
354+ ret = orangefs_launder_folio ( folio );
352355 if (ret )
353356 return ret ;
354357 }
355- if (PagePrivate ( page )) {
358+ if (folio_test_private ( folio )) {
356359 struct orangefs_write_range * wr ;
357- wr = ( struct orangefs_write_range * ) page_private ( page );
360+ wr = folio_get_private ( folio );
358361 if (wr -> pos + wr -> len == pos &&
359362 uid_eq (wr -> uid , current_fsuid ()) &&
360363 gid_eq (wr -> gid , current_fsgid ())) {
361364 wr -> len += len ;
362365 goto okay ;
363366 } else {
364- ret = orangefs_launder_page ( page );
367+ ret = orangefs_launder_folio ( folio );
365368 if (ret )
366369 return ret ;
367370 }
@@ -375,7 +378,7 @@ static int orangefs_write_begin(struct file *file,
375378 wr -> len = len ;
376379 wr -> uid = current_fsuid ();
377380 wr -> gid = current_fsgid ();
378- attach_page_private ( page , wr );
381+ folio_attach_private ( folio , wr );
379382okay :
380383 return 0 ;
381384}
@@ -481,7 +484,7 @@ static void orangefs_invalidate_folio(struct folio *folio,
481484 * Thus the following runs if wr was modified above.
482485 */
483486
484- orangefs_launder_page ( & folio -> page );
487+ orangefs_launder_folio ( folio );
485488}
486489
487490static int orangefs_releasepage (struct page * page , gfp_t foo )
@@ -494,17 +497,17 @@ static void orangefs_freepage(struct page *page)
494497 kfree (detach_page_private (page ));
495498}
496499
497- static int orangefs_launder_page (struct page * page )
500+ static int orangefs_launder_folio (struct folio * folio )
498501{
499502 int r = 0 ;
500503 struct writeback_control wbc = {
501504 .sync_mode = WB_SYNC_ALL ,
502505 .nr_to_write = 0 ,
503506 };
504- wait_on_page_writeback ( page );
505- if (clear_page_dirty_for_io ( page )) {
506- r = orangefs_writepage_locked (page , & wbc );
507- end_page_writeback ( page );
507+ folio_wait_writeback ( folio );
508+ if (folio_clear_dirty_for_io ( folio )) {
509+ r = orangefs_writepage_locked (& folio -> page , & wbc );
510+ folio_end_writeback ( folio );
508511 }
509512 return r ;
510513}
@@ -637,13 +640,13 @@ static const struct address_space_operations orangefs_address_operations = {
637640 .invalidate_folio = orangefs_invalidate_folio ,
638641 .releasepage = orangefs_releasepage ,
639642 .freepage = orangefs_freepage ,
640- .launder_page = orangefs_launder_page ,
643+ .launder_folio = orangefs_launder_folio ,
641644 .direct_IO = orangefs_direct_IO ,
642645};
643646
644647vm_fault_t orangefs_page_mkwrite (struct vm_fault * vmf )
645648{
646- struct page * page = vmf -> page ;
649+ struct folio * folio = page_folio ( vmf -> page ) ;
647650 struct inode * inode = file_inode (vmf -> vma -> vm_file );
648651 struct orangefs_inode_s * orangefs_inode = ORANGEFS_I (inode );
649652 unsigned long * bitlock = & orangefs_inode -> bitlock ;
@@ -657,27 +660,27 @@ vm_fault_t orangefs_page_mkwrite(struct vm_fault *vmf)
657660 goto out ;
658661 }
659662
660- lock_page ( page );
661- if (PageDirty ( page ) && !PagePrivate ( page )) {
663+ folio_lock ( folio );
664+ if (folio_test_dirty ( folio ) && !folio_test_private ( folio )) {
662665 /*
663- * Should be impossible. If it happens, launder the page
666+ * Should be impossible. If it happens, launder the folio
664667 * since we don't know what's dirty. This will WARN in
665668 * orangefs_writepage_locked.
666669 */
667- if (orangefs_launder_page ( page )) {
670+ if (orangefs_launder_folio ( folio )) {
668671 ret = VM_FAULT_LOCKED |VM_FAULT_RETRY ;
669672 goto out ;
670673 }
671674 }
672- if (PagePrivate ( page )) {
673- wr = ( struct orangefs_write_range * ) page_private ( page );
675+ if (folio_test_private ( folio )) {
676+ wr = folio_get_private ( folio );
674677 if (uid_eq (wr -> uid , current_fsuid ()) &&
675678 gid_eq (wr -> gid , current_fsgid ())) {
676- wr -> pos = page_offset (page );
679+ wr -> pos = page_offset (vmf -> page );
677680 wr -> len = PAGE_SIZE ;
678681 goto okay ;
679682 } else {
680- if (orangefs_launder_page ( page )) {
683+ if (orangefs_launder_folio ( folio )) {
681684 ret = VM_FAULT_LOCKED |VM_FAULT_RETRY ;
682685 goto out ;
683686 }
@@ -688,27 +691,27 @@ vm_fault_t orangefs_page_mkwrite(struct vm_fault *vmf)
688691 ret = VM_FAULT_LOCKED |VM_FAULT_RETRY ;
689692 goto out ;
690693 }
691- wr -> pos = page_offset (page );
694+ wr -> pos = page_offset (vmf -> page );
692695 wr -> len = PAGE_SIZE ;
693696 wr -> uid = current_fsuid ();
694697 wr -> gid = current_fsgid ();
695- attach_page_private ( page , wr );
698+ folio_attach_private ( folio , wr );
696699okay :
697700
698701 file_update_time (vmf -> vma -> vm_file );
699- if (page -> mapping != inode -> i_mapping ) {
700- unlock_page ( page );
702+ if (folio -> mapping != inode -> i_mapping ) {
703+ folio_unlock ( folio );
701704 ret = VM_FAULT_LOCKED |VM_FAULT_NOPAGE ;
702705 goto out ;
703706 }
704707
705708 /*
706- * We mark the page dirty already here so that when freeze is in
709+ * We mark the folio dirty already here so that when freeze is in
707710 * progress, we are guaranteed that writeback during freezing will
708- * see the dirty page and writeprotect it again.
711+ * see the dirty folio and writeprotect it again.
709712 */
710- set_page_dirty ( page );
711- wait_for_stable_page ( page );
713+ folio_mark_dirty ( folio );
714+ folio_wait_stable ( folio );
712715 ret = VM_FAULT_LOCKED ;
713716out :
714717 sb_end_pagefault (inode -> i_sb );
0 commit comments