Skip to content

Commit 796a404

Browse files
dhowellsbrauner
authored andcommitted
netfs: In readahead, put the folio refs as soon extracted
netfslib currently defers dropping the ref on the folios it obtains during readahead to after it has started I/O on the basis that we can do it whilst we wait for the I/O to complete, but this runs the risk of the I/O collection racing with this in future. Furthermore, Matthew Wilcox strongly suggests that the refs should be dropped immediately, as readahead_folio() does (netfslib is using __readahead_batch() which doesn't drop the refs). Fixes: ee4cdf7 ("netfs: Speed up buffered reading") Suggested-by: Matthew Wilcox <willy@infradead.org> Signed-off-by: David Howells <dhowells@redhat.com> Link: https://lore.kernel.org/r/3771538.1728052438@warthog.procyon.org.uk cc: Jeff Layton <jlayton@kernel.org> cc: netfs@lists.linux.dev cc: linux-fsdevel@vger.kernel.org Signed-off-by: Christian Brauner <brauner@kernel.org>
1 parent 8cf0b93 commit 796a404

3 files changed

Lines changed: 16 additions & 34 deletions

File tree

fs/netfs/buffered_read.c

Lines changed: 14 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,8 @@ static int netfs_begin_cache_read(struct netfs_io_request *rreq, struct netfs_in
6767
* Decant the list of folios to read into a rolling buffer.
6868
*/
6969
static size_t netfs_load_buffer_from_ra(struct netfs_io_request *rreq,
70-
struct folio_queue *folioq)
70+
struct folio_queue *folioq,
71+
struct folio_batch *put_batch)
7172
{
7273
unsigned int order, nr;
7374
size_t size = 0;
@@ -82,6 +83,9 @@ static size_t netfs_load_buffer_from_ra(struct netfs_io_request *rreq,
8283
order = folio_order(folio);
8384
folioq->orders[i] = order;
8485
size += PAGE_SIZE << order;
86+
87+
if (!folio_batch_add(put_batch, folio))
88+
folio_batch_release(put_batch);
8589
}
8690

8791
for (int i = nr; i < folioq_nr_slots(folioq); i++)
@@ -120,6 +124,9 @@ static ssize_t netfs_prepare_read_iterator(struct netfs_io_subrequest *subreq)
120124
* that we will need to release later - but we don't want to do
121125
* that until after we've started the I/O.
122126
*/
127+
struct folio_batch put_batch;
128+
129+
folio_batch_init(&put_batch);
123130
while (rreq->submitted < subreq->start + rsize) {
124131
struct folio_queue *tail = rreq->buffer_tail, *new;
125132
size_t added;
@@ -132,10 +139,11 @@ static ssize_t netfs_prepare_read_iterator(struct netfs_io_subrequest *subreq)
132139
new->prev = tail;
133140
tail->next = new;
134141
rreq->buffer_tail = new;
135-
added = netfs_load_buffer_from_ra(rreq, new);
142+
added = netfs_load_buffer_from_ra(rreq, new, &put_batch);
136143
rreq->iter.count += added;
137144
rreq->submitted += added;
138145
}
146+
folio_batch_release(&put_batch);
139147
}
140148

141149
subreq->len = rsize;
@@ -348,6 +356,7 @@ static int netfs_wait_for_read(struct netfs_io_request *rreq)
348356
static int netfs_prime_buffer(struct netfs_io_request *rreq)
349357
{
350358
struct folio_queue *folioq;
359+
struct folio_batch put_batch;
351360
size_t added;
352361

353362
folioq = kmalloc(sizeof(*folioq), GFP_KERNEL);
@@ -360,39 +369,14 @@ static int netfs_prime_buffer(struct netfs_io_request *rreq)
360369
rreq->submitted = rreq->start;
361370
iov_iter_folio_queue(&rreq->iter, ITER_DEST, folioq, 0, 0, 0);
362371

363-
added = netfs_load_buffer_from_ra(rreq, folioq);
372+
folio_batch_init(&put_batch);
373+
added = netfs_load_buffer_from_ra(rreq, folioq, &put_batch);
374+
folio_batch_release(&put_batch);
364375
rreq->iter.count += added;
365376
rreq->submitted += added;
366377
return 0;
367378
}
368379

369-
/*
370-
* Drop the ref on each folio that we inherited from the VM readahead code. We
371-
* still have the folio locks to pin the page until we complete the I/O.
372-
*
373-
* Note that we can't just release the batch in each queue struct as we use the
374-
* occupancy count in other places.
375-
*/
376-
static void netfs_put_ra_refs(struct folio_queue *folioq)
377-
{
378-
struct folio_batch fbatch;
379-
380-
folio_batch_init(&fbatch);
381-
while (folioq) {
382-
for (unsigned int slot = 0; slot < folioq_count(folioq); slot++) {
383-
struct folio *folio = folioq_folio(folioq, slot);
384-
if (!folio)
385-
continue;
386-
trace_netfs_folio(folio, netfs_folio_trace_read_put);
387-
if (!folio_batch_add(&fbatch, folio))
388-
folio_batch_release(&fbatch);
389-
}
390-
folioq = folioq->next;
391-
}
392-
393-
folio_batch_release(&fbatch);
394-
}
395-
396380
/**
397381
* netfs_readahead - Helper to manage a read request
398382
* @ractl: The description of the readahead request
@@ -436,9 +420,6 @@ void netfs_readahead(struct readahead_control *ractl)
436420
goto cleanup_free;
437421
netfs_read_to_pagecache(rreq);
438422

439-
/* Release the folio refs whilst we're waiting for the I/O. */
440-
netfs_put_ra_refs(rreq->buffer);
441-
442423
netfs_put_request(rreq, true, netfs_rreq_trace_put_return);
443424
return;
444425

fs/netfs/read_collect.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -77,6 +77,8 @@ static void netfs_unlock_read_folio(struct netfs_io_subrequest *subreq,
7777
folio_unlock(folio);
7878
}
7979
}
80+
81+
folioq_clear(folioq, slot);
8082
}
8183

8284
/*

include/trace/events/netfs.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -172,7 +172,6 @@
172172
EM(netfs_folio_trace_read, "read") \
173173
EM(netfs_folio_trace_read_done, "read-done") \
174174
EM(netfs_folio_trace_read_gaps, "read-gaps") \
175-
EM(netfs_folio_trace_read_put, "read-put") \
176175
EM(netfs_folio_trace_read_unlock, "read-unlock") \
177176
EM(netfs_folio_trace_redirtied, "redirtied") \
178177
EM(netfs_folio_trace_store, "store") \

0 commit comments

Comments
 (0)