Skip to content

Commit 786da5d

Browse files
committed
Merge tag 'ceph-for-5.20-rc1' of https://github.com/ceph/ceph-client
Pull ceph updates from Ilya Dryomov: "We have a good pile of various fixes and cleanups from Xiubo, Jeff, Luis and others, almost exclusively in the filesystem. Several patches touch files outside of our normal purview to set the stage for bringing in Jeff's long awaited ceph+fscrypt series in the near future. All of them have appropriate acks and sat in linux-next for a while" * tag 'ceph-for-5.20-rc1' of https://github.com/ceph/ceph-client: (27 commits) libceph: clean up ceph_osdc_start_request prototype libceph: fix ceph_pagelist_reserve() comment typo ceph: remove useless check for the folio ceph: don't truncate file in atomic_open ceph: make f_bsize always equal to f_frsize ceph: flush the dirty caps immediatelly when quota is approaching libceph: print fsid and epoch with osd id libceph: check pointer before assigned to "c->rules[]" ceph: don't get the inline data for new creating files ceph: update the auth cap when the async create req is forwarded ceph: make change_auth_cap_ses a global symbol ceph: fix incorrect old_size length in ceph_mds_request_args ceph: switch back to testing for NULL folio->private in ceph_dirty_folio ceph: call netfs_subreq_terminated with was_async == false ceph: convert to generic_file_llseek ceph: fix the incorrect comment for the ceph_mds_caps struct ceph: don't leak snap_rwsem in handle_cap_grant ceph: prevent a client from exceeding the MDS maximum xattr size ceph: choose auth MDS for getxattr with the Xs caps ceph: add session already open notify support ...
2 parents e18a904 + a8af0d6 commit 786da5d

27 files changed

Lines changed: 538 additions & 233 deletions

drivers/block/rbd.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1297,7 +1297,7 @@ static void rbd_osd_submit(struct ceph_osd_request *osd_req)
12971297
dout("%s osd_req %p for obj_req %p objno %llu %llu~%llu\n",
12981298
__func__, osd_req, obj_req, obj_req->ex.oe_objno,
12991299
obj_req->ex.oe_off, obj_req->ex.oe_len);
1300-
ceph_osdc_start_request(osd_req->r_osdc, osd_req, false);
1300+
ceph_osdc_start_request(osd_req->r_osdc, osd_req);
13011301
}
13021302

13031303
/*
@@ -2081,7 +2081,7 @@ static int rbd_object_map_update(struct rbd_obj_request *obj_req, u64 snap_id,
20812081
if (ret)
20822082
return ret;
20832083

2084-
ceph_osdc_start_request(osdc, req, false);
2084+
ceph_osdc_start_request(osdc, req);
20852085
return 0;
20862086
}
20872087

@@ -4768,7 +4768,7 @@ static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
47684768
if (ret)
47694769
goto out_req;
47704770

4771-
ceph_osdc_start_request(osdc, req, false);
4771+
ceph_osdc_start_request(osdc, req);
47724772
ret = ceph_osdc_wait_request(osdc, req);
47734773
if (ret >= 0)
47744774
ceph_copy_from_page_vector(pages, buf, 0, ret);

fs/ceph/addr.c

Lines changed: 24 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -122,7 +122,7 @@ static bool ceph_dirty_folio(struct address_space *mapping, struct folio *folio)
122122
* Reference snap context in folio->private. Also set
123123
* PagePrivate so that we get invalidate_folio callback.
124124
*/
125-
VM_BUG_ON_FOLIO(folio_test_private(folio), folio);
125+
VM_WARN_ON_FOLIO(folio->private, folio);
126126
folio_attach_private(folio, snapc);
127127

128128
return ceph_fscache_dirty_folio(mapping, folio);
@@ -237,7 +237,7 @@ static void finish_netfs_read(struct ceph_osd_request *req)
237237
if (err >= 0 && err < subreq->len)
238238
__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
239239

240-
netfs_subreq_terminated(subreq, err, true);
240+
netfs_subreq_terminated(subreq, err, false);
241241

242242
num_pages = calc_pages_for(osd_data->alignment, osd_data->length);
243243
ceph_put_page_vector(osd_data->pages, num_pages, false);
@@ -313,8 +313,7 @@ static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
313313
int err = 0;
314314
u64 len = subreq->len;
315315

316-
if (ci->i_inline_version != CEPH_INLINE_NONE &&
317-
ceph_netfs_issue_op_inline(subreq))
316+
if (ceph_has_inline_data(ci) && ceph_netfs_issue_op_inline(subreq))
318317
return;
319318

320319
req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, vino, subreq->start, &len,
@@ -338,16 +337,15 @@ static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
338337
/* should always give us a page-aligned read */
339338
WARN_ON_ONCE(page_off);
340339
len = err;
340+
err = 0;
341341

342342
osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, false, false);
343343
req->r_callback = finish_netfs_read;
344344
req->r_priv = subreq;
345345
req->r_inode = inode;
346346
ihold(inode);
347347

348-
err = ceph_osdc_start_request(req->r_osdc, req, false);
349-
if (err)
350-
iput(inode);
348+
ceph_osdc_start_request(req->r_osdc, req);
351349
out:
352350
ceph_osdc_put_request(req);
353351
if (err)
@@ -621,9 +619,8 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
621619
dout("writepage %llu~%llu (%llu bytes)\n", page_off, len, len);
622620

623621
req->r_mtime = inode->i_mtime;
624-
err = ceph_osdc_start_request(osdc, req, true);
625-
if (!err)
626-
err = ceph_osdc_wait_request(osdc, req);
622+
ceph_osdc_start_request(osdc, req);
623+
err = ceph_osdc_wait_request(osdc, req);
627624

628625
ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency,
629626
req->r_end_latency, len, err);
@@ -1151,8 +1148,7 @@ static int ceph_writepages_start(struct address_space *mapping,
11511148
}
11521149

11531150
req->r_mtime = inode->i_mtime;
1154-
rc = ceph_osdc_start_request(&fsc->client->osdc, req, true);
1155-
BUG_ON(rc);
1151+
ceph_osdc_start_request(&fsc->client->osdc, req);
11561152
req = NULL;
11571153

11581154
wbc->nr_to_write -= i;
@@ -1327,16 +1323,13 @@ static int ceph_write_begin(struct file *file, struct address_space *mapping,
13271323
int r;
13281324

13291325
r = netfs_write_begin(&ci->netfs, file, inode->i_mapping, pos, len, &folio, NULL);
1330-
if (r == 0)
1331-
folio_wait_fscache(folio);
1332-
if (r < 0) {
1333-
if (folio)
1334-
folio_put(folio);
1335-
} else {
1336-
WARN_ON_ONCE(!folio_test_locked(folio));
1337-
*pagep = &folio->page;
1338-
}
1339-
return r;
1326+
if (r < 0)
1327+
return r;
1328+
1329+
folio_wait_fscache(folio);
1330+
WARN_ON_ONCE(!folio_test_locked(folio));
1331+
*pagep = &folio->page;
1332+
return 0;
13401333
}
13411334

13421335
/*
@@ -1439,7 +1432,7 @@ static vm_fault_t ceph_filemap_fault(struct vm_fault *vmf)
14391432
inode, off, ceph_cap_string(got));
14401433

14411434
if ((got & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) ||
1442-
ci->i_inline_version == CEPH_INLINE_NONE) {
1435+
!ceph_has_inline_data(ci)) {
14431436
CEPH_DEFINE_RW_CONTEXT(rw_ctx, got);
14441437
ceph_add_rw_context(fi, &rw_ctx);
14451438
ret = filemap_fault(vmf);
@@ -1696,9 +1689,8 @@ int ceph_uninline_data(struct file *file)
16961689
}
16971690

16981691
req->r_mtime = inode->i_mtime;
1699-
err = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1700-
if (!err)
1701-
err = ceph_osdc_wait_request(&fsc->client->osdc, req);
1692+
ceph_osdc_start_request(&fsc->client->osdc, req);
1693+
err = ceph_osdc_wait_request(&fsc->client->osdc, req);
17021694
ceph_osdc_put_request(req);
17031695
if (err < 0)
17041696
goto out_unlock;
@@ -1739,9 +1731,8 @@ int ceph_uninline_data(struct file *file)
17391731
}
17401732

17411733
req->r_mtime = inode->i_mtime;
1742-
err = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1743-
if (!err)
1744-
err = ceph_osdc_wait_request(&fsc->client->osdc, req);
1734+
ceph_osdc_start_request(&fsc->client->osdc, req);
1735+
err = ceph_osdc_wait_request(&fsc->client->osdc, req);
17451736

17461737
ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency,
17471738
req->r_end_latency, len, err);
@@ -1912,15 +1903,13 @@ static int __ceph_pool_perm_get(struct ceph_inode_info *ci,
19121903

19131904
osd_req_op_raw_data_in_pages(rd_req, 0, pages, PAGE_SIZE,
19141905
0, false, true);
1915-
err = ceph_osdc_start_request(&fsc->client->osdc, rd_req, false);
1906+
ceph_osdc_start_request(&fsc->client->osdc, rd_req);
19161907

19171908
wr_req->r_mtime = ci->netfs.inode.i_mtime;
1918-
err2 = ceph_osdc_start_request(&fsc->client->osdc, wr_req, false);
1909+
ceph_osdc_start_request(&fsc->client->osdc, wr_req);
19191910

1920-
if (!err)
1921-
err = ceph_osdc_wait_request(&fsc->client->osdc, rd_req);
1922-
if (!err2)
1923-
err2 = ceph_osdc_wait_request(&fsc->client->osdc, wr_req);
1911+
err = ceph_osdc_wait_request(&fsc->client->osdc, rd_req);
1912+
err2 = ceph_osdc_wait_request(&fsc->client->osdc, wr_req);
19241913

19251914
if (err >= 0 || err == -ENOENT)
19261915
have |= POOL_READ;

fs/ceph/caps.c

Lines changed: 19 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -602,8 +602,8 @@ static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap,
602602
* @ci: inode to be moved
603603
* @session: new auth caps session
604604
*/
605-
static void change_auth_cap_ses(struct ceph_inode_info *ci,
606-
struct ceph_mds_session *session)
605+
void change_auth_cap_ses(struct ceph_inode_info *ci,
606+
struct ceph_mds_session *session)
607607
{
608608
lockdep_assert_held(&ci->i_ceph_lock);
609609

@@ -1978,14 +1978,15 @@ void ceph_check_caps(struct ceph_inode_info *ci, int flags,
19781978
}
19791979

19801980
dout("check_caps %llx.%llx file_want %s used %s dirty %s flushing %s"
1981-
" issued %s revoking %s retain %s %s%s\n", ceph_vinop(inode),
1981+
" issued %s revoking %s retain %s %s%s%s\n", ceph_vinop(inode),
19821982
ceph_cap_string(file_wanted),
19831983
ceph_cap_string(used), ceph_cap_string(ci->i_dirty_caps),
19841984
ceph_cap_string(ci->i_flushing_caps),
19851985
ceph_cap_string(issued), ceph_cap_string(revoking),
19861986
ceph_cap_string(retain),
19871987
(flags & CHECK_CAPS_AUTHONLY) ? " AUTHONLY" : "",
1988-
(flags & CHECK_CAPS_FLUSH) ? " FLUSH" : "");
1988+
(flags & CHECK_CAPS_FLUSH) ? " FLUSH" : "",
1989+
(flags & CHECK_CAPS_NOINVAL) ? " NOINVAL" : "");
19891990

19901991
/*
19911992
* If we no longer need to hold onto old our caps, and we may
@@ -3005,7 +3006,7 @@ int ceph_get_caps(struct file *filp, int need, int want, loff_t endoff, int *got
30053006
}
30063007

30073008
if (S_ISREG(ci->netfs.inode.i_mode) &&
3008-
ci->i_inline_version != CEPH_INLINE_NONE &&
3009+
ceph_has_inline_data(ci) &&
30093010
(_got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) &&
30103011
i_size_read(inode) > 0) {
30113012
struct page *page =
@@ -3578,24 +3579,23 @@ static void handle_cap_grant(struct inode *inode,
35783579
fill_inline = true;
35793580
}
35803581

3581-
if (ci->i_auth_cap == cap &&
3582-
le32_to_cpu(grant->op) == CEPH_CAP_OP_IMPORT) {
3583-
if (newcaps & ~extra_info->issued)
3584-
wake = true;
3582+
if (le32_to_cpu(grant->op) == CEPH_CAP_OP_IMPORT) {
3583+
if (ci->i_auth_cap == cap) {
3584+
if (newcaps & ~extra_info->issued)
3585+
wake = true;
35853586

3586-
if (ci->i_requested_max_size > max_size ||
3587-
!(le32_to_cpu(grant->wanted) & CEPH_CAP_ANY_FILE_WR)) {
3588-
/* re-request max_size if necessary */
3589-
ci->i_requested_max_size = 0;
3590-
wake = true;
3591-
}
3587+
if (ci->i_requested_max_size > max_size ||
3588+
!(le32_to_cpu(grant->wanted) & CEPH_CAP_ANY_FILE_WR)) {
3589+
/* re-request max_size if necessary */
3590+
ci->i_requested_max_size = 0;
3591+
wake = true;
3592+
}
35923593

3593-
ceph_kick_flushing_inode_caps(session, ci);
3594-
spin_unlock(&ci->i_ceph_lock);
3594+
ceph_kick_flushing_inode_caps(session, ci);
3595+
}
35953596
up_read(&session->s_mdsc->snap_rwsem);
3596-
} else {
3597-
spin_unlock(&ci->i_ceph_lock);
35983597
}
3598+
spin_unlock(&ci->i_ceph_lock);
35993599

36003600
if (fill_inline)
36013601
ceph_fill_inline_data(inode, NULL, extra_info->inline_data,

0 commit comments

Comments
 (0)