Skip to content

Commit 47a7ce6

Browse files
committed
Merge tag 'ceph-for-5.14-rc1' of git://github.com/ceph/ceph-client
Pull ceph updates from Ilya Dryomov: "We have new filesystem client metrics for reporting I/O sizes from Xiubo, two patchsets from Jeff that begin to untangle some heavyweight blocking locks in the filesystem and a bunch of code cleanups" * tag 'ceph-for-5.14-rc1' of git://github.com/ceph/ceph-client: ceph: take reference to req->r_parent at point of assignment ceph: eliminate ceph_async_iput() ceph: don't take s_mutex in ceph_flush_snaps ceph: don't take s_mutex in try_flush_caps ceph: don't take s_mutex or snap_rwsem in ceph_check_caps ceph: eliminate session->s_gen_ttl_lock ceph: allow ceph_put_mds_session to take NULL or ERR_PTR ceph: clean up locking annotation for ceph_get_snap_realm and __lookup_snap_realm ceph: add some lockdep assertions around snaprealm handling ceph: decoding error in ceph_update_snap_realm should return -EIO ceph: add IO size metrics support ceph: update and rename __update_latency helper to __update_stdev ceph: simplify the metrics struct libceph: fix doc warnings in cls_lock_client.c libceph: remove unnecessary ret variable in ceph_auth_init() libceph: fix some spelling mistakes libceph: kill ceph_none_authorizer::reply_buf ceph: make ceph_queue_cap_snap static ceph: make ceph_netfs_read_ops static ceph: remove bogus checks and WARN_ONs from ceph_set_page_dirty
2 parents 96890bc + 4c18347 commit 47a7ce6

18 files changed

Lines changed: 312 additions & 352 deletions

fs/ceph/addr.c

Lines changed: 10 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -82,10 +82,6 @@ static int ceph_set_page_dirty(struct page *page)
8282
struct inode *inode;
8383
struct ceph_inode_info *ci;
8484
struct ceph_snap_context *snapc;
85-
int ret;
86-
87-
if (unlikely(!mapping))
88-
return !TestSetPageDirty(page);
8985

9086
if (PageDirty(page)) {
9187
dout("%p set_page_dirty %p idx %lu -- already dirty\n",
@@ -130,11 +126,7 @@ static int ceph_set_page_dirty(struct page *page)
130126
BUG_ON(PagePrivate(page));
131127
attach_page_private(page, snapc);
132128

133-
ret = __set_page_dirty_nobuffers(page);
134-
WARN_ON(!PageLocked(page));
135-
WARN_ON(!page->mapping);
136-
137-
return ret;
129+
return __set_page_dirty_nobuffers(page);
138130
}
139131

140132
/*
@@ -226,7 +218,7 @@ static void finish_netfs_read(struct ceph_osd_request *req)
226218
int err = req->r_result;
227219

228220
ceph_update_read_metrics(&fsc->mdsc->metric, req->r_start_latency,
229-
req->r_end_latency, err);
221+
req->r_end_latency, osd_data->length, err);
230222

231223
dout("%s: result %d subreq->len=%zu i_size=%lld\n", __func__, req->r_result,
232224
subreq->len, i_size_read(req->r_inode));
@@ -313,7 +305,7 @@ static void ceph_readahead_cleanup(struct address_space *mapping, void *priv)
313305
ceph_put_cap_refs(ci, got);
314306
}
315307

316-
const struct netfs_read_request_ops ceph_netfs_read_ops = {
308+
static const struct netfs_read_request_ops ceph_netfs_read_ops = {
317309
.init_rreq = ceph_init_rreq,
318310
.is_cache_enabled = ceph_is_cache_enabled,
319311
.begin_cache_operation = ceph_begin_cache_operation,
@@ -560,7 +552,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
560552
err = ceph_osdc_wait_request(osdc, req);
561553

562554
ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency,
563-
req->r_end_latency, err);
555+
req->r_end_latency, len, err);
564556

565557
ceph_osdc_put_request(req);
566558
if (err == 0)
@@ -635,6 +627,7 @@ static void writepages_finish(struct ceph_osd_request *req)
635627
struct ceph_snap_context *snapc = req->r_snapc;
636628
struct address_space *mapping = inode->i_mapping;
637629
struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
630+
unsigned int len = 0;
638631
bool remove_page;
639632

640633
dout("writepages_finish %p rc %d\n", inode, rc);
@@ -647,9 +640,6 @@ static void writepages_finish(struct ceph_osd_request *req)
647640
ceph_clear_error_write(ci);
648641
}
649642

650-
ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency,
651-
req->r_end_latency, rc);
652-
653643
/*
654644
* We lost the cache cap, need to truncate the page before
655645
* it is unlocked, otherwise we'd truncate it later in the
@@ -666,6 +656,7 @@ static void writepages_finish(struct ceph_osd_request *req)
666656

667657
osd_data = osd_req_op_extent_osd_data(req, i);
668658
BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES);
659+
len += osd_data->length;
669660
num_pages = calc_pages_for((u64)osd_data->alignment,
670661
(u64)osd_data->length);
671662
total_pages += num_pages;
@@ -696,6 +687,9 @@ static void writepages_finish(struct ceph_osd_request *req)
696687
release_pages(osd_data->pages, num_pages);
697688
}
698689

690+
ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency,
691+
req->r_end_latency, len, rc);
692+
699693
ceph_put_wrbuffer_cap_refs(ci, total_pages, snapc);
700694

701695
osd_data = osd_req_op_extent_osd_data(req, 0);
@@ -1711,7 +1705,7 @@ int ceph_uninline_data(struct file *filp, struct page *locked_page)
17111705
err = ceph_osdc_wait_request(&fsc->client->osdc, req);
17121706

17131707
ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency,
1714-
req->r_end_latency, err);
1708+
req->r_end_latency, len, err);
17151709

17161710
out_put:
17171711
ceph_osdc_put_request(req);

0 commit comments

Comments
 (0)