Skip to content

Commit c17a1c0

Browse files
Christoph Hellwigcmaiolino
authored andcommitted
xfs: use a seprate member to track space availabe in the GC scatch buffer
When scratch_head wraps back to 0 and scratch_tail is also 0 because no I/O has completed yet, the ring buffer could be mistaken for empty. Fix this by introducing a separate scratch_available member in struct xfs_zone_gc_data. This actually ends up simplifying the code as well. Reported-by: Chris Mason <clm@meta.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Hans Holmberg <hans.holmberg@wdc.com> Signed-off-by: Carlos Maiolino <cem@kernel.org>
1 parent 692243c commit c17a1c0

1 file changed

Lines changed: 9 additions & 16 deletions

File tree

fs/xfs/xfs_zone_gc.c

Lines changed: 9 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -131,10 +131,13 @@ struct xfs_zone_gc_data {
131131
/*
132132
* Scratchpad to buffer GC data, organized as a ring buffer over
133133
* discontiguous folios. scratch_head is where the buffer is filled,
134-
* and scratch_tail tracks the buffer space freed.
134+
* scratch_tail tracks the buffer space freed, and scratch_available
135+
* counts the space available in the ring buffer between the head and
136+
* the tail.
135137
*/
136138
struct folio *scratch_folios[XFS_GC_NR_BUFS];
137139
unsigned int scratch_size;
140+
unsigned int scratch_available;
138141
unsigned int scratch_head;
139142
unsigned int scratch_tail;
140143

@@ -212,6 +215,7 @@ xfs_zone_gc_data_alloc(
212215
goto out_free_scratch;
213216
}
214217
data->scratch_size = XFS_GC_BUF_SIZE * XFS_GC_NR_BUFS;
218+
data->scratch_available = data->scratch_size;
215219
INIT_LIST_HEAD(&data->reading);
216220
INIT_LIST_HEAD(&data->writing);
217221
INIT_LIST_HEAD(&data->resetting);
@@ -574,18 +578,6 @@ xfs_zone_gc_ensure_target(
574578
return oz;
575579
}
576580

577-
static unsigned int
578-
xfs_zone_gc_scratch_available(
579-
struct xfs_zone_gc_data *data)
580-
{
581-
if (!data->scratch_tail)
582-
return data->scratch_size - data->scratch_head;
583-
584-
if (!data->scratch_head)
585-
return data->scratch_tail;
586-
return (data->scratch_size - data->scratch_head) + data->scratch_tail;
587-
}
588-
589581
static bool
590582
xfs_zone_gc_space_available(
591583
struct xfs_zone_gc_data *data)
@@ -596,7 +588,7 @@ xfs_zone_gc_space_available(
596588
if (!oz)
597589
return false;
598590
return oz->oz_allocated < rtg_blocks(oz->oz_rtg) &&
599-
xfs_zone_gc_scratch_available(data);
591+
data->scratch_available;
600592
}
601593

602594
static void
@@ -625,8 +617,7 @@ xfs_zone_gc_alloc_blocks(
625617
if (!oz)
626618
return NULL;
627619

628-
*count_fsb = min(*count_fsb,
629-
XFS_B_TO_FSB(mp, xfs_zone_gc_scratch_available(data)));
620+
*count_fsb = min(*count_fsb, XFS_B_TO_FSB(mp, data->scratch_available));
630621

631622
/*
632623
* Directly allocate GC blocks from the reserved pool.
@@ -730,6 +721,7 @@ xfs_zone_gc_start_chunk(
730721
bio->bi_end_io = xfs_zone_gc_end_io;
731722
xfs_zone_gc_add_data(chunk);
732723
data->scratch_head = (data->scratch_head + len) % data->scratch_size;
724+
data->scratch_available -= len;
733725

734726
WRITE_ONCE(chunk->state, XFS_GC_BIO_NEW);
735727
list_add_tail(&chunk->entry, &data->reading);
@@ -862,6 +854,7 @@ xfs_zone_gc_finish_chunk(
862854

863855
data->scratch_tail =
864856
(data->scratch_tail + chunk->len) % data->scratch_size;
857+
data->scratch_available += chunk->len;
865858

866859
/*
867860
* Cycle through the iolock and wait for direct I/O and layouts to

0 commit comments

Comments
 (0)