Skip to content

Commit 634d4a7

Browse files
author
Darrick J. Wong
committed
xfs: accumulate iextent records when checking bmap
Currently, the bmap scrubber checks file fork mappings individually. In the case that the file uses multiple mappings to a single contiguous piece of space, the scrubber repeatedly locks the AG to check the existence of a reverse mapping that overlaps this file mapping. If the reverse mapping starts before or ends after the mapping we're checking, it will also crawl around in the bmbt checking correspondence for adjacent extents. This is not very time efficient because it does the crawling while holding the AGF buffer, and checks the middle mappings multiple times. Instead, create a custom iextent record iterator function that combines multiple adjacent allocated mappings into one large incore bmbt record. This is feasible because the incore bmbt record length is 64-bits wide. Signed-off-by: Darrick J. Wong <djwong@kernel.org> Reviewed-by: Dave Chinner <dchinner@redhat.com>
1 parent 971ee3a commit 634d4a7

2 files changed

Lines changed: 107 additions & 78 deletions

File tree

fs/xfs/libxfs/xfs_bmap.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -145,7 +145,7 @@ static inline int xfs_bmapi_whichfork(uint32_t bmapi_flags)
145145
{ BMAP_COWFORK, "COW" }
146146

147147
/* Return true if the extent is an allocated extent, written or not. */
148-
static inline bool xfs_bmap_is_real_extent(struct xfs_bmbt_irec *irec)
148+
static inline bool xfs_bmap_is_real_extent(const struct xfs_bmbt_irec *irec)
149149
{
150150
return irec->br_startblock != HOLESTARTBLOCK &&
151151
irec->br_startblock != DELAYSTARTBLOCK &&

fs/xfs/scrub/bmap.c

Lines changed: 106 additions & 77 deletions
Original file line numberDiff line numberDiff line change
@@ -165,48 +165,6 @@ xchk_bmap_get_rmap(
165165
return has_rmap;
166166
}
167167

168-
static inline bool
169-
xchk_bmap_has_prev(
170-
struct xchk_bmap_info *info,
171-
struct xfs_bmbt_irec *irec)
172-
{
173-
struct xfs_bmbt_irec got;
174-
struct xfs_ifork *ifp;
175-
176-
ifp = xfs_ifork_ptr(info->sc->ip, info->whichfork);
177-
178-
if (!xfs_iext_peek_prev_extent(ifp, &info->icur, &got))
179-
return false;
180-
if (got.br_startoff + got.br_blockcount != irec->br_startoff)
181-
return false;
182-
if (got.br_startblock + got.br_blockcount != irec->br_startblock)
183-
return false;
184-
if (got.br_state != irec->br_state)
185-
return false;
186-
return true;
187-
}
188-
189-
static inline bool
190-
xchk_bmap_has_next(
191-
struct xchk_bmap_info *info,
192-
struct xfs_bmbt_irec *irec)
193-
{
194-
struct xfs_bmbt_irec got;
195-
struct xfs_ifork *ifp;
196-
197-
ifp = xfs_ifork_ptr(info->sc->ip, info->whichfork);
198-
199-
if (!xfs_iext_peek_next_extent(ifp, &info->icur, &got))
200-
return false;
201-
if (irec->br_startoff + irec->br_blockcount != got.br_startoff)
202-
return false;
203-
if (irec->br_startblock + irec->br_blockcount != got.br_startblock)
204-
return false;
205-
if (got.br_state != irec->br_state)
206-
return false;
207-
return true;
208-
}
209-
210168
/* Make sure that we have rmapbt records for this extent. */
211169
STATIC void
212170
xchk_bmap_xref_rmap(
@@ -277,31 +235,20 @@ xchk_bmap_xref_rmap(
277235
irec->br_startoff);
278236

279237
/*
280-
* If the rmap starts before this bmbt record, make sure there's a bmbt
281-
* record for the previous offset that is contiguous with this mapping.
282-
* Skip this for CoW fork extents because the refcount btree (and not
283-
* the inode) is the ondisk owner for those extents.
284-
*/
285-
if (info->whichfork != XFS_COW_FORK && rmap.rm_startblock < agbno &&
286-
!xchk_bmap_has_prev(info, irec)) {
287-
xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
288-
irec->br_startoff);
289-
return;
290-
}
291-
292-
/*
293-
* If the rmap ends after this bmbt record, make sure there's a bmbt
294-
* record for the next offset that is contiguous with this mapping.
295-
* Skip this for CoW fork extents because the refcount btree (and not
296-
* the inode) is the ondisk owner for those extents.
238+
* The rmap must correspond exactly with this bmbt record. Skip this
239+
* for CoW fork extents because the refcount btree (and not the inode)
240+
* is the ondisk owner for those extents.
297241
*/
298-
rmap_end = (unsigned long long)rmap.rm_startblock + rmap.rm_blockcount;
299-
if (info->whichfork != XFS_COW_FORK &&
300-
rmap_end > agbno + irec->br_blockcount &&
301-
!xchk_bmap_has_next(info, irec)) {
302-
xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
303-
irec->br_startoff);
304-
return;
242+
if (info->whichfork != XFS_COW_FORK) {
243+
if (rmap.rm_startblock != agbno)
244+
xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
245+
irec->br_startoff);
246+
247+
rmap_end = (unsigned long long)rmap.rm_startblock +
248+
rmap.rm_blockcount;
249+
if (rmap_end != agbno + irec->br_blockcount)
250+
xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
251+
irec->br_startoff);
305252
}
306253
}
307254

@@ -428,15 +375,7 @@ xchk_bmap_iextent(
428375

429376
xchk_bmap_dirattr_extent(ip, info, irec);
430377

431-
/* There should never be a "hole" extent in either extent list. */
432-
if (irec->br_startblock == HOLESTARTBLOCK)
433-
xchk_fblock_set_corrupt(info->sc, info->whichfork,
434-
irec->br_startoff);
435-
436378
/* Make sure the extent points to a valid place. */
437-
if (irec->br_blockcount > XFS_MAX_BMBT_EXTLEN)
438-
xchk_fblock_set_corrupt(info->sc, info->whichfork,
439-
irec->br_startoff);
440379
if (info->is_rt &&
441380
!xfs_verify_rtext(mp, irec->br_startblock, irec->br_blockcount))
442381
xchk_fblock_set_corrupt(info->sc, info->whichfork,
@@ -740,6 +679,90 @@ xchk_bmap_iextent_delalloc(
740679
irec->br_startoff);
741680
}
742681

682+
/* Decide if this individual fork mapping is ok. */
683+
static bool
684+
xchk_bmap_iext_mapping(
685+
struct xchk_bmap_info *info,
686+
const struct xfs_bmbt_irec *irec)
687+
{
688+
/* There should never be a "hole" extent in either extent list. */
689+
if (irec->br_startblock == HOLESTARTBLOCK)
690+
return false;
691+
if (irec->br_blockcount > XFS_MAX_BMBT_EXTLEN)
692+
return false;
693+
return true;
694+
}
695+
696+
/* Are these two mappings contiguous with each other? */
697+
static inline bool
698+
xchk_are_bmaps_contiguous(
699+
const struct xfs_bmbt_irec *b1,
700+
const struct xfs_bmbt_irec *b2)
701+
{
702+
/* Don't try to combine unallocated mappings. */
703+
if (!xfs_bmap_is_real_extent(b1))
704+
return false;
705+
if (!xfs_bmap_is_real_extent(b2))
706+
return false;
707+
708+
/* Does b2 come right after b1 in the logical and physical range? */
709+
if (b1->br_startoff + b1->br_blockcount != b2->br_startoff)
710+
return false;
711+
if (b1->br_startblock + b1->br_blockcount != b2->br_startblock)
712+
return false;
713+
if (b1->br_state != b2->br_state)
714+
return false;
715+
return true;
716+
}
717+
718+
/*
719+
* Walk the incore extent records, accumulating consecutive contiguous records
720+
* into a single incore mapping. Returns true if @irec has been set to a
721+
* mapping or false if there are no more mappings. Caller must ensure that
722+
* @info.icur is zeroed before the first call.
723+
*/
724+
static int
725+
xchk_bmap_iext_iter(
726+
struct xchk_bmap_info *info,
727+
struct xfs_bmbt_irec *irec)
728+
{
729+
struct xfs_bmbt_irec got;
730+
struct xfs_ifork *ifp;
731+
732+
ifp = xfs_ifork_ptr(info->sc->ip, info->whichfork);
733+
734+
/* Advance to the next iextent record and check the mapping. */
735+
xfs_iext_next(ifp, &info->icur);
736+
if (!xfs_iext_get_extent(ifp, &info->icur, irec))
737+
return false;
738+
739+
if (!xchk_bmap_iext_mapping(info, irec)) {
740+
xchk_fblock_set_corrupt(info->sc, info->whichfork,
741+
irec->br_startoff);
742+
return false;
743+
}
744+
745+
/*
746+
* Iterate subsequent iextent records and merge them with the one
747+
* that we just read, if possible.
748+
*/
749+
while (xfs_iext_peek_next_extent(ifp, &info->icur, &got)) {
750+
if (!xchk_are_bmaps_contiguous(irec, &got))
751+
break;
752+
753+
if (!xchk_bmap_iext_mapping(info, &got)) {
754+
xchk_fblock_set_corrupt(info->sc, info->whichfork,
755+
got.br_startoff);
756+
return false;
757+
}
758+
759+
irec->br_blockcount += got.br_blockcount;
760+
xfs_iext_next(ifp, &info->icur);
761+
}
762+
763+
return true;
764+
}
765+
743766
/*
744767
* Scrub an inode fork's block mappings.
745768
*
@@ -819,9 +842,15 @@ xchk_bmap(
819842
if (!xchk_fblock_process_error(sc, whichfork, 0, &error))
820843
goto out;
821844

822-
/* Scrub extent records. */
823-
ifp = xfs_ifork_ptr(ip, whichfork);
824-
for_each_xfs_iext(ifp, &info.icur, &irec) {
845+
/*
846+
* Scrub extent records. We use a special iterator function here that
847+
* combines adjacent mappings if they are logically and physically
848+
* contiguous. For large allocations that require multiple bmbt
849+
* records, this reduces the number of cross-referencing calls, which
850+
* reduces runtime. Cross referencing with the rmap is simpler because
851+
* the rmap must match the combined mapping exactly.
852+
*/
853+
while (xchk_bmap_iext_iter(&info, &irec)) {
825854
if (xchk_should_terminate(sc, &error) ||
826855
(sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
827856
goto out;

0 commit comments

Comments
 (0)