Skip to content

Commit e7cef2f

Browse files
dchinnerdgchinner
authored andcommitted
Merge tag 'scrub-detect-refcount-gaps-6.4_2023-04-11' of git://git.kernel.org/pub/scm/linux/kernel/git/djwong/xfs-linux into guilt/xfs-for-next
xfs: detect incorrect gaps in refcount btree [v24.5] The next few patchsets address a deficiency in scrub that I found while QAing the refcount btree scrubber. If there's a gap between refcount records, we need to cross-reference that gap with the reverse mappings to ensure that there are no overlapping records in the rmap btree. If we find any, then the refcount btree is not consistent. This is not a property that is specific to the refcount btree; they all need to have this sort of keyspace scanning logic to detect inconsistencies. To do this accurately, we need to be able to scan the keyspace of a btree (which we already do) to be able to tell the caller if the keyspace is empty, sparse, or fully covered by records. The first few patches add the keyspace scanner to the generic btree code, along with the ability to mask off parts of btree keys because when we scan the rmapbt, we only care about space usage, not the owners. The final patch closes the scanning gap in the refcountbt scanner. v23.1: create helpers for the key extraction and comparison functions, improve documentation, and eliminate the ->mask_key indirect calls Signed-off-by: Darrick J. Wong <djwong@kernel.org> Signed-off-by: Dave Chinner <david@fromorbit.com>
2 parents 6858c88 + 7ac14fa commit e7cef2f

23 files changed

Lines changed: 610 additions & 129 deletions

fs/xfs/libxfs/xfs_alloc.c

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3745,13 +3745,16 @@ xfs_alloc_query_all(
37453745
return xfs_btree_query_all(cur, xfs_alloc_query_range_helper, &query);
37463746
}
37473747

3748-
/* Is there a record covering a given extent? */
3748+
/*
3749+
* Scan part of the keyspace of the free space and tell us if the area has no
3750+
* records, is fully mapped by records, or is partially filled.
3751+
*/
37493752
int
3750-
xfs_alloc_has_record(
3753+
xfs_alloc_has_records(
37513754
struct xfs_btree_cur *cur,
37523755
xfs_agblock_t bno,
37533756
xfs_extlen_t len,
3754-
bool *exists)
3757+
enum xbtree_recpacking *outcome)
37553758
{
37563759
union xfs_btree_irec low;
37573760
union xfs_btree_irec high;
@@ -3761,7 +3764,7 @@ xfs_alloc_has_record(
37613764
memset(&high, 0xFF, sizeof(high));
37623765
high.a.ar_startblock = bno + len - 1;
37633766

3764-
return xfs_btree_has_record(cur, &low, &high, exists);
3767+
return xfs_btree_has_records(cur, &low, &high, NULL, outcome);
37653768
}
37663769

37673770
/*

fs/xfs/libxfs/xfs_alloc.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -213,8 +213,8 @@ int xfs_alloc_query_range(struct xfs_btree_cur *cur,
213213
int xfs_alloc_query_all(struct xfs_btree_cur *cur, xfs_alloc_query_range_fn fn,
214214
void *priv);
215215

216-
int xfs_alloc_has_record(struct xfs_btree_cur *cur, xfs_agblock_t bno,
217-
xfs_extlen_t len, bool *exist);
216+
int xfs_alloc_has_records(struct xfs_btree_cur *cur, xfs_agblock_t bno,
217+
xfs_extlen_t len, enum xbtree_recpacking *outcome);
218218

219219
typedef int (*xfs_agfl_walk_fn)(struct xfs_mount *mp, xfs_agblock_t bno,
220220
void *priv);

fs/xfs/libxfs/xfs_alloc_btree.c

Lines changed: 25 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -260,20 +260,27 @@ STATIC int64_t
260260
xfs_bnobt_diff_two_keys(
261261
struct xfs_btree_cur *cur,
262262
const union xfs_btree_key *k1,
263-
const union xfs_btree_key *k2)
263+
const union xfs_btree_key *k2,
264+
const union xfs_btree_key *mask)
264265
{
266+
ASSERT(!mask || mask->alloc.ar_startblock);
267+
265268
return (int64_t)be32_to_cpu(k1->alloc.ar_startblock) -
266-
be32_to_cpu(k2->alloc.ar_startblock);
269+
be32_to_cpu(k2->alloc.ar_startblock);
267270
}
268271

269272
STATIC int64_t
270273
xfs_cntbt_diff_two_keys(
271274
struct xfs_btree_cur *cur,
272275
const union xfs_btree_key *k1,
273-
const union xfs_btree_key *k2)
276+
const union xfs_btree_key *k2,
277+
const union xfs_btree_key *mask)
274278
{
275279
int64_t diff;
276280

281+
ASSERT(!mask || (mask->alloc.ar_blockcount &&
282+
mask->alloc.ar_startblock));
283+
277284
diff = be32_to_cpu(k1->alloc.ar_blockcount) -
278285
be32_to_cpu(k2->alloc.ar_blockcount);
279286
if (diff)
@@ -423,6 +430,19 @@ xfs_cntbt_recs_inorder(
423430
be32_to_cpu(r2->alloc.ar_startblock));
424431
}
425432

433+
STATIC enum xbtree_key_contig
434+
xfs_allocbt_keys_contiguous(
435+
struct xfs_btree_cur *cur,
436+
const union xfs_btree_key *key1,
437+
const union xfs_btree_key *key2,
438+
const union xfs_btree_key *mask)
439+
{
440+
ASSERT(!mask || mask->alloc.ar_startblock);
441+
442+
return xbtree_key_contig(be32_to_cpu(key1->alloc.ar_startblock),
443+
be32_to_cpu(key2->alloc.ar_startblock));
444+
}
445+
426446
static const struct xfs_btree_ops xfs_bnobt_ops = {
427447
.rec_len = sizeof(xfs_alloc_rec_t),
428448
.key_len = sizeof(xfs_alloc_key_t),
@@ -443,6 +463,7 @@ static const struct xfs_btree_ops xfs_bnobt_ops = {
443463
.diff_two_keys = xfs_bnobt_diff_two_keys,
444464
.keys_inorder = xfs_bnobt_keys_inorder,
445465
.recs_inorder = xfs_bnobt_recs_inorder,
466+
.keys_contiguous = xfs_allocbt_keys_contiguous,
446467
};
447468

448469
static const struct xfs_btree_ops xfs_cntbt_ops = {
@@ -465,6 +486,7 @@ static const struct xfs_btree_ops xfs_cntbt_ops = {
465486
.diff_two_keys = xfs_cntbt_diff_two_keys,
466487
.keys_inorder = xfs_cntbt_keys_inorder,
467488
.recs_inorder = xfs_cntbt_recs_inorder,
489+
.keys_contiguous = NULL, /* not needed right now */
468490
};
469491

470492
/* Allocate most of a new allocation btree cursor. */

fs/xfs/libxfs/xfs_bmap_btree.c

Lines changed: 18 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -382,11 +382,14 @@ STATIC int64_t
382382
xfs_bmbt_diff_two_keys(
383383
struct xfs_btree_cur *cur,
384384
const union xfs_btree_key *k1,
385-
const union xfs_btree_key *k2)
385+
const union xfs_btree_key *k2,
386+
const union xfs_btree_key *mask)
386387
{
387388
uint64_t a = be64_to_cpu(k1->bmbt.br_startoff);
388389
uint64_t b = be64_to_cpu(k2->bmbt.br_startoff);
389390

391+
ASSERT(!mask || mask->bmbt.br_startoff);
392+
390393
/*
391394
* Note: This routine previously casted a and b to int64 and subtracted
392395
* them to generate a result. This lead to problems if b was the
@@ -500,6 +503,19 @@ xfs_bmbt_recs_inorder(
500503
xfs_bmbt_disk_get_startoff(&r2->bmbt);
501504
}
502505

506+
STATIC enum xbtree_key_contig
507+
xfs_bmbt_keys_contiguous(
508+
struct xfs_btree_cur *cur,
509+
const union xfs_btree_key *key1,
510+
const union xfs_btree_key *key2,
511+
const union xfs_btree_key *mask)
512+
{
513+
ASSERT(!mask || mask->bmbt.br_startoff);
514+
515+
return xbtree_key_contig(be64_to_cpu(key1->bmbt.br_startoff),
516+
be64_to_cpu(key2->bmbt.br_startoff));
517+
}
518+
503519
static const struct xfs_btree_ops xfs_bmbt_ops = {
504520
.rec_len = sizeof(xfs_bmbt_rec_t),
505521
.key_len = sizeof(xfs_bmbt_key_t),
@@ -520,6 +536,7 @@ static const struct xfs_btree_ops xfs_bmbt_ops = {
520536
.buf_ops = &xfs_bmbt_buf_ops,
521537
.keys_inorder = xfs_bmbt_keys_inorder,
522538
.recs_inorder = xfs_bmbt_recs_inorder,
539+
.keys_contiguous = xfs_bmbt_keys_contiguous,
523540
};
524541

525542
/*

0 commit comments

Comments
 (0)