Skip to content

Commit ae67831

Browse files
committed
netfs: Remove deprecated use of PG_private_2 as a second writeback flag
Remove the deprecated use of PG_private_2 in netfslib. Signed-off-by: David Howells <dhowells@redhat.com> Reviewed-by: Jeff Layton <jlayton@kernel.org> cc: Matthew Wilcox (Oracle) <willy@infradead.org> cc: linux-cachefs@redhat.com cc: linux-fsdevel@vger.kernel.org cc: linux-mm@kvack.org
1 parent 2e9d7e4 commit ae67831

3 files changed

Lines changed: 2 additions & 169 deletions

File tree

fs/ceph/addr.c

Lines changed: 1 addition & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -498,11 +498,6 @@ const struct netfs_request_ops ceph_netfs_ops = {
498498
};
499499

500500
#ifdef CONFIG_CEPH_FSCACHE
501-
static void ceph_set_page_fscache(struct page *page)
502-
{
503-
folio_start_private_2(page_folio(page)); /* [DEPRECATED] */
504-
}
505-
506501
static void ceph_fscache_write_terminated(void *priv, ssize_t error, bool was_async)
507502
{
508503
struct inode *inode = priv;
@@ -520,10 +515,6 @@ static void ceph_fscache_write_to_cache(struct inode *inode, u64 off, u64 len, b
520515
ceph_fscache_write_terminated, inode, true, caching);
521516
}
522517
#else
523-
static inline void ceph_set_page_fscache(struct page *page)
524-
{
525-
}
526-
527518
static inline void ceph_fscache_write_to_cache(struct inode *inode, u64 off, u64 len, bool caching)
528519
{
529520
}
@@ -715,8 +706,6 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
715706
len = wlen;
716707

717708
set_page_writeback(page);
718-
if (caching)
719-
ceph_set_page_fscache(page);
720709
ceph_fscache_write_to_cache(inode, page_off, len, caching);
721710

722711
if (IS_ENCRYPTED(inode)) {
@@ -800,8 +789,6 @@ static int ceph_writepage(struct page *page, struct writeback_control *wbc)
800789
return AOP_WRITEPAGE_ACTIVATE;
801790
}
802791

803-
folio_wait_private_2(page_folio(page)); /* [DEPRECATED] */
804-
805792
err = writepage_nounlock(page, wbc);
806793
if (err == -ERESTARTSYS) {
807794
/* direct memory reclaimer was killed by SIGKILL. return 0
@@ -1075,16 +1062,14 @@ static int ceph_writepages_start(struct address_space *mapping,
10751062
unlock_page(page);
10761063
break;
10771064
}
1078-
if (PageWriteback(page) ||
1079-
PagePrivate2(page) /* [DEPRECATED] */) {
1065+
if (PageWriteback(page)) {
10801066
if (wbc->sync_mode == WB_SYNC_NONE) {
10811067
doutc(cl, "%p under writeback\n", page);
10821068
unlock_page(page);
10831069
continue;
10841070
}
10851071
doutc(cl, "waiting on writeback %p\n", page);
10861072
wait_on_page_writeback(page);
1087-
folio_wait_private_2(page_folio(page)); /* [DEPRECATED] */
10881073
}
10891074

10901075
if (!clear_page_dirty_for_io(page)) {
@@ -1269,8 +1254,6 @@ static int ceph_writepages_start(struct address_space *mapping,
12691254
}
12701255

12711256
set_page_writeback(page);
1272-
if (caching)
1273-
ceph_set_page_fscache(page);
12741257
len += thp_size(page);
12751258
}
12761259
ceph_fscache_write_to_cache(inode, offset, len, caching);

fs/netfs/buffered_read.c

Lines changed: 1 addition & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -464,7 +464,7 @@ int netfs_write_begin(struct netfs_inode *ctx,
464464
if (!netfs_is_cache_enabled(ctx) &&
465465
netfs_skip_folio_read(folio, pos, len, false)) {
466466
netfs_stat(&netfs_n_rh_write_zskip);
467-
goto have_folio_no_wait;
467+
goto have_folio;
468468
}
469469

470470
rreq = netfs_alloc_request(mapping, file,
@@ -505,12 +505,6 @@ int netfs_write_begin(struct netfs_inode *ctx,
505505
netfs_put_request(rreq, false, netfs_rreq_trace_put_return);
506506

507507
have_folio:
508-
if (test_bit(NETFS_ICTX_USE_PGPRIV2, &ctx->flags)) {
509-
ret = folio_wait_private_2_killable(folio);
510-
if (ret < 0)
511-
goto error;
512-
}
513-
have_folio_no_wait:
514508
*_folio = folio;
515509
_leave(" = 0");
516510
return 0;

fs/netfs/io.c

Lines changed: 0 additions & 144 deletions
Original file line numberDiff line numberDiff line change
@@ -98,146 +98,6 @@ static void netfs_rreq_completed(struct netfs_io_request *rreq, bool was_async)
9898
netfs_put_request(rreq, was_async, netfs_rreq_trace_put_complete);
9999
}
100100

101-
/*
102-
* [DEPRECATED] Deal with the completion of writing the data to the cache. We
103-
* have to clear the PG_fscache bits on the folios involved and release the
104-
* caller's ref.
105-
*
106-
* May be called in softirq mode and we inherit a ref from the caller.
107-
*/
108-
static void netfs_rreq_unmark_after_write(struct netfs_io_request *rreq,
109-
bool was_async)
110-
{
111-
struct netfs_io_subrequest *subreq;
112-
struct folio *folio;
113-
pgoff_t unlocked = 0;
114-
bool have_unlocked = false;
115-
116-
rcu_read_lock();
117-
118-
list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
119-
XA_STATE(xas, &rreq->mapping->i_pages, subreq->start / PAGE_SIZE);
120-
121-
xas_for_each(&xas, folio, (subreq->start + subreq->len - 1) / PAGE_SIZE) {
122-
if (xas_retry(&xas, folio))
123-
continue;
124-
125-
/* We might have multiple writes from the same huge
126-
* folio, but we mustn't unlock a folio more than once.
127-
*/
128-
if (have_unlocked && folio->index <= unlocked)
129-
continue;
130-
unlocked = folio_next_index(folio) - 1;
131-
trace_netfs_folio(folio, netfs_folio_trace_end_copy);
132-
folio_end_private_2(folio);
133-
have_unlocked = true;
134-
}
135-
}
136-
137-
rcu_read_unlock();
138-
netfs_rreq_completed(rreq, was_async);
139-
}
140-
141-
static void netfs_rreq_copy_terminated(void *priv, ssize_t transferred_or_error,
142-
bool was_async) /* [DEPRECATED] */
143-
{
144-
struct netfs_io_subrequest *subreq = priv;
145-
struct netfs_io_request *rreq = subreq->rreq;
146-
147-
if (IS_ERR_VALUE(transferred_or_error)) {
148-
netfs_stat(&netfs_n_rh_write_failed);
149-
trace_netfs_failure(rreq, subreq, transferred_or_error,
150-
netfs_fail_copy_to_cache);
151-
} else {
152-
netfs_stat(&netfs_n_rh_write_done);
153-
}
154-
155-
trace_netfs_sreq(subreq, netfs_sreq_trace_write_term);
156-
157-
/* If we decrement nr_copy_ops to 0, the ref belongs to us. */
158-
if (atomic_dec_and_test(&rreq->nr_copy_ops))
159-
netfs_rreq_unmark_after_write(rreq, was_async);
160-
161-
netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_terminated);
162-
}
163-
164-
/*
165-
* [DEPRECATED] Perform any outstanding writes to the cache. We inherit a ref
166-
* from the caller.
167-
*/
168-
static void netfs_rreq_do_write_to_cache(struct netfs_io_request *rreq)
169-
{
170-
struct netfs_cache_resources *cres = &rreq->cache_resources;
171-
struct netfs_io_subrequest *subreq, *next, *p;
172-
struct iov_iter iter;
173-
int ret;
174-
175-
trace_netfs_rreq(rreq, netfs_rreq_trace_copy);
176-
177-
/* We don't want terminating writes trying to wake us up whilst we're
178-
* still going through the list.
179-
*/
180-
atomic_inc(&rreq->nr_copy_ops);
181-
182-
list_for_each_entry_safe(subreq, p, &rreq->subrequests, rreq_link) {
183-
if (!test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) {
184-
list_del_init(&subreq->rreq_link);
185-
netfs_put_subrequest(subreq, false,
186-
netfs_sreq_trace_put_no_copy);
187-
}
188-
}
189-
190-
list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
191-
/* Amalgamate adjacent writes */
192-
while (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) {
193-
next = list_next_entry(subreq, rreq_link);
194-
if (next->start != subreq->start + subreq->len)
195-
break;
196-
subreq->len += next->len;
197-
list_del_init(&next->rreq_link);
198-
netfs_put_subrequest(next, false,
199-
netfs_sreq_trace_put_merged);
200-
}
201-
202-
ret = cres->ops->prepare_write(cres, &subreq->start, &subreq->len,
203-
subreq->len, rreq->i_size, true);
204-
if (ret < 0) {
205-
trace_netfs_failure(rreq, subreq, ret, netfs_fail_prepare_write);
206-
trace_netfs_sreq(subreq, netfs_sreq_trace_write_skip);
207-
continue;
208-
}
209-
210-
iov_iter_xarray(&iter, ITER_SOURCE, &rreq->mapping->i_pages,
211-
subreq->start, subreq->len);
212-
213-
atomic_inc(&rreq->nr_copy_ops);
214-
netfs_stat(&netfs_n_rh_write);
215-
netfs_get_subrequest(subreq, netfs_sreq_trace_get_copy_to_cache);
216-
trace_netfs_sreq(subreq, netfs_sreq_trace_write);
217-
cres->ops->write(cres, subreq->start, &iter,
218-
netfs_rreq_copy_terminated, subreq);
219-
}
220-
221-
/* If we decrement nr_copy_ops to 0, the usage ref belongs to us. */
222-
if (atomic_dec_and_test(&rreq->nr_copy_ops))
223-
netfs_rreq_unmark_after_write(rreq, false);
224-
}
225-
226-
static void netfs_rreq_write_to_cache_work(struct work_struct *work) /* [DEPRECATED] */
227-
{
228-
struct netfs_io_request *rreq =
229-
container_of(work, struct netfs_io_request, work);
230-
231-
netfs_rreq_do_write_to_cache(rreq);
232-
}
233-
234-
static void netfs_rreq_write_to_cache(struct netfs_io_request *rreq) /* [DEPRECATED] */
235-
{
236-
rreq->work.func = netfs_rreq_write_to_cache_work;
237-
if (!queue_work(system_unbound_wq, &rreq->work))
238-
BUG();
239-
}
240-
241101
/*
242102
* Handle a short read.
243103
*/
@@ -410,10 +270,6 @@ static void netfs_rreq_assess(struct netfs_io_request *rreq, bool was_async)
410270
clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
411271
wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS);
412272

413-
if (test_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags) &&
414-
test_bit(NETFS_RREQ_USE_PGPRIV2, &rreq->flags))
415-
return netfs_rreq_write_to_cache(rreq);
416-
417273
netfs_rreq_completed(rreq, was_async);
418274
}
419275

0 commit comments

Comments
 (0)