Skip to content

Commit fa589ac

Browse files
CFSworksidryomov
authored andcommitted
ceph: remove error return from ceph_process_folio_batch()
Following an earlier commit, ceph_process_folio_batch() no longer returns errors because the writeback loop cannot handle them. Since this function already indicates failure to lock any pages by leaving `ceph_wbc.locked_pages == 0`, and the writeback loop has no way to handle abandonment of a locked batch, change the return type of ceph_process_folio_batch() to `void` and remove the pathological goto in the writeback loop. The lack of a return code emphasizes that ceph_process_folio_batch() is designed to be abort-free: that is, once it commits a folio for writeback, it will not later abandon it or propagate an error for that folio. Any future changes requiring "abort" logic should follow this invariant by cleaning up its array and resetting ceph_wbc.locked_pages appropriately. Signed-off-by: Sam Edwards <CFSworks@gmail.com> Reviewed-by: Ilya Dryomov <idryomov@gmail.com> Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
1 parent cac190c commit fa589ac

1 file changed

Lines changed: 5 additions & 12 deletions

File tree

fs/ceph/addr.c

Lines changed: 5 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1284,16 +1284,16 @@ static inline int move_dirty_folio_in_page_array(struct address_space *mapping,
12841284
}
12851285

12861286
static
1287-
int ceph_process_folio_batch(struct address_space *mapping,
1288-
struct writeback_control *wbc,
1289-
struct ceph_writeback_ctl *ceph_wbc)
1287+
void ceph_process_folio_batch(struct address_space *mapping,
1288+
struct writeback_control *wbc,
1289+
struct ceph_writeback_ctl *ceph_wbc)
12901290
{
12911291
struct inode *inode = mapping->host;
12921292
struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
12931293
struct ceph_client *cl = fsc->client;
12941294
struct folio *folio = NULL;
12951295
unsigned i;
1296-
int rc = 0;
1296+
int rc;
12971297

12981298
for (i = 0; can_next_page_be_processed(ceph_wbc, i); i++) {
12991299
folio = ceph_wbc->fbatch.folios[i];
@@ -1323,12 +1323,10 @@ int ceph_process_folio_batch(struct address_space *mapping,
13231323
rc = ceph_check_page_before_write(mapping, wbc,
13241324
ceph_wbc, folio);
13251325
if (rc == -ENODATA) {
1326-
rc = 0;
13271326
folio_unlock(folio);
13281327
ceph_wbc->fbatch.folios[i] = NULL;
13291328
continue;
13301329
} else if (rc == -E2BIG) {
1331-
rc = 0;
13321330
folio_unlock(folio);
13331331
ceph_wbc->fbatch.folios[i] = NULL;
13341332
break;
@@ -1370,7 +1368,6 @@ int ceph_process_folio_batch(struct address_space *mapping,
13701368
rc = move_dirty_folio_in_page_array(mapping, wbc, ceph_wbc,
13711369
folio);
13721370
if (rc) {
1373-
rc = 0;
13741371
folio_redirty_for_writepage(wbc, folio);
13751372
folio_unlock(folio);
13761373
break;
@@ -1381,8 +1378,6 @@ int ceph_process_folio_batch(struct address_space *mapping,
13811378
}
13821379

13831380
ceph_wbc->processed_in_fbatch = i;
1384-
1385-
return rc;
13861381
}
13871382

13881383
static inline
@@ -1686,10 +1681,8 @@ static int ceph_writepages_start(struct address_space *mapping,
16861681
break;
16871682

16881683
process_folio_batch:
1689-
rc = ceph_process_folio_batch(mapping, wbc, &ceph_wbc);
1684+
ceph_process_folio_batch(mapping, wbc, &ceph_wbc);
16901685
ceph_shift_unused_folios_left(&ceph_wbc.fbatch);
1691-
if (rc)
1692-
goto release_folios;
16931686

16941687
/* did we get anything? */
16951688
if (!ceph_wbc.locked_pages)

0 commit comments

Comments
 (0)