Skip to content

Commit 9ba67fd

Browse files
adam900710kdave
authored andcommitted
btrfs: raid56: prepare recover_vertical() to support bs > ps cases
Currently recover_vertical() assumes that every fs block can be mapped by one page, this is blocking bs > ps support for raid56. Prepare recover_vertical() to support bs > ps cases by: - Introduce recover_vertical_step() helper Which will recover a full step (min(PAGE_SIZE, sectorsize)). Now recover_vertical() will do the error check for the specified sector, do the recover step by step, then do the sector verification. - Fix a spelling error of get_rbio_vertical_errors() The old name has a typo: "veritical". Signed-off-by: Qu Wenruo <wqu@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
1 parent 826325b commit 9ba67fd

1 file changed

Lines changed: 68 additions & 73 deletions

File tree

fs/btrfs/raid56.c

Lines changed: 68 additions & 73 deletions
Original file line numberDiff line numberDiff line change
@@ -1007,21 +1007,13 @@ static phys_addr_t sector_paddr_in_rbio(struct btrfs_raid_bio *rbio,
10071007
* Similar to sector_paddr_in_rbio(), but with extra consideration for
10081008
* bs > ps cases, where we can have multiple steps for a fs block.
10091009
*/
1010-
static phys_addr_t step_paddr_in_rbio(struct btrfs_raid_bio *rbio,
1011-
int stripe_nr, int sector_nr, int step_nr,
1012-
bool bio_list_only)
1010+
static phys_addr_t sector_step_paddr_in_rbio(struct btrfs_raid_bio *rbio,
1011+
int stripe_nr, int sector_nr, int step_nr,
1012+
bool bio_list_only)
10131013
{
10141014
phys_addr_t ret = INVALID_PADDR;
1015-
int index;
1015+
const int index = rbio_paddr_index(rbio, stripe_nr, sector_nr, step_nr);
10161016

1017-
ASSERT_RBIO_STRIPE(stripe_nr >= 0 && stripe_nr < rbio->real_stripes,
1018-
rbio, stripe_nr);
1019-
ASSERT_RBIO_SECTOR(sector_nr >= 0 && sector_nr < rbio->stripe_nsectors,
1020-
rbio, sector_nr);
1021-
ASSERT_RBIO_SECTOR(step_nr >= 0 && step_nr < rbio->sector_nsteps,
1022-
rbio, sector_nr);
1023-
1024-
index = (stripe_nr * rbio->stripe_nsectors + sector_nr) * rbio->sector_nsteps + step_nr;
10251017
ASSERT(index >= 0 && index < rbio->nr_sectors * rbio->sector_nsteps);
10261018

10271019
scoped_guard(spinlock, &rbio->bio_list_lock) {
@@ -1147,8 +1139,8 @@ static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
11471139
* @faila and @failb will also be updated to the first and second stripe
11481140
* number of the errors.
11491141
*/
1150-
static int get_rbio_veritical_errors(struct btrfs_raid_bio *rbio, int sector_nr,
1151-
int *faila, int *failb)
1142+
static int get_rbio_vertical_errors(struct btrfs_raid_bio *rbio, int sector_nr,
1143+
int *faila, int *failb)
11521144
{
11531145
int stripe_nr;
11541146
int found_errors = 0;
@@ -1219,8 +1211,8 @@ static int rbio_add_io_paddr(struct btrfs_raid_bio *rbio, struct bio_list *bio_l
12191211
rbio->error_bitmap);
12201212

12211213
/* Check if we have reached tolerance early. */
1222-
found_errors = get_rbio_veritical_errors(rbio, sector_nr,
1223-
NULL, NULL);
1214+
found_errors = get_rbio_vertical_errors(rbio, sector_nr,
1215+
NULL, NULL);
12241216
if (unlikely(found_errors > rbio->bioc->max_errors))
12251217
return -EIO;
12261218
return 0;
@@ -1367,7 +1359,7 @@ static void generate_pq_vertical_step(struct btrfs_raid_bio *rbio, unsigned int
13671359
/* First collect one sector from each data stripe */
13681360
for (stripe = 0; stripe < rbio->nr_data; stripe++)
13691361
pointers[stripe] = kmap_local_paddr(
1370-
step_paddr_in_rbio(rbio, stripe, sector_nr, step_nr, 0));
1362+
sector_step_paddr_in_rbio(rbio, stripe, sector_nr, step_nr, 0));
13711363

13721364
/* Then add the parity stripe */
13731365
pointers[stripe++] = kmap_local_paddr(rbio_pstripe_step_paddr(rbio, sector_nr, step_nr));
@@ -1868,41 +1860,18 @@ static int verify_one_sector(struct btrfs_raid_bio *rbio,
18681860
return ret;
18691861
}
18701862

1871-
/*
1872-
* Recover a vertical stripe specified by @sector_nr.
1873-
* @*pointers are the pre-allocated pointers by the caller, so we don't
1874-
* need to allocate/free the pointers again and again.
1875-
*/
1876-
static int recover_vertical(struct btrfs_raid_bio *rbio, int sector_nr,
1877-
void **pointers, void **unmap_array)
1863+
static void recover_vertical_step(struct btrfs_raid_bio *rbio,
1864+
unsigned int sector_nr,
1865+
unsigned int step_nr,
1866+
int faila, int failb,
1867+
void **pointers, void **unmap_array)
18781868
{
18791869
struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
1880-
const u32 sectorsize = fs_info->sectorsize;
1881-
int found_errors;
1882-
int faila;
1883-
int failb;
1870+
const u32 step = min(fs_info->sectorsize, PAGE_SIZE);
18841871
int stripe_nr;
1885-
int ret = 0;
18861872

1887-
/*
1888-
* Now we just use bitmap to mark the horizontal stripes in
1889-
* which we have data when doing parity scrub.
1890-
*/
1891-
if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB &&
1892-
!test_bit(sector_nr, &rbio->dbitmap))
1893-
return 0;
1894-
1895-
found_errors = get_rbio_veritical_errors(rbio, sector_nr, &faila,
1896-
&failb);
1897-
/*
1898-
* No errors in the vertical stripe, skip it. Can happen for recovery
1899-
* which only part of a stripe failed csum check.
1900-
*/
1901-
if (!found_errors)
1902-
return 0;
1903-
1904-
if (unlikely(found_errors > rbio->bioc->max_errors))
1905-
return -EIO;
1873+
ASSERT(step_nr < rbio->sector_nsteps);
1874+
ASSERT(sector_nr < rbio->stripe_nsectors);
19061875

19071876
/*
19081877
* Setup our array of pointers with sectors from each stripe
@@ -1918,9 +1887,9 @@ static int recover_vertical(struct btrfs_raid_bio *rbio, int sector_nr,
19181887
* bio list if possible.
19191888
*/
19201889
if (rbio->operation == BTRFS_RBIO_READ_REBUILD) {
1921-
paddr = sector_paddr_in_rbio(rbio, stripe_nr, sector_nr, 0);
1890+
paddr = sector_step_paddr_in_rbio(rbio, stripe_nr, sector_nr, step_nr, 0);
19221891
} else {
1923-
paddr = rbio_stripe_paddr(rbio, stripe_nr, sector_nr);
1892+
paddr = rbio_stripe_step_paddr(rbio, stripe_nr, sector_nr, step_nr);
19241893
}
19251894
pointers[stripe_nr] = kmap_local_paddr(paddr);
19261895
unmap_array[stripe_nr] = pointers[stripe_nr];
@@ -1968,10 +1937,10 @@ static int recover_vertical(struct btrfs_raid_bio *rbio, int sector_nr,
19681937
}
19691938

19701939
if (failb == rbio->real_stripes - 2) {
1971-
raid6_datap_recov(rbio->real_stripes, sectorsize,
1940+
raid6_datap_recov(rbio->real_stripes, step,
19721941
faila, pointers);
19731942
} else {
1974-
raid6_2data_recov(rbio->real_stripes, sectorsize,
1943+
raid6_2data_recov(rbio->real_stripes, step,
19751944
faila, failb, pointers);
19761945
}
19771946
} else {
@@ -1981,7 +1950,7 @@ static int recover_vertical(struct btrfs_raid_bio *rbio, int sector_nr,
19811950
ASSERT(failb == -1);
19821951
pstripe:
19831952
/* Copy parity block into failed block to start with */
1984-
memcpy(pointers[faila], pointers[rbio->nr_data], sectorsize);
1953+
memcpy(pointers[faila], pointers[rbio->nr_data], step);
19851954

19861955
/* Rearrange the pointer array */
19871956
p = pointers[faila];
@@ -1991,40 +1960,66 @@ static int recover_vertical(struct btrfs_raid_bio *rbio, int sector_nr,
19911960
pointers[rbio->nr_data - 1] = p;
19921961

19931962
/* Xor in the rest */
1994-
run_xor(pointers, rbio->nr_data - 1, sectorsize);
1995-
1963+
run_xor(pointers, rbio->nr_data - 1, step);
19961964
}
19971965

1966+
cleanup:
1967+
for (stripe_nr = rbio->real_stripes - 1; stripe_nr >= 0; stripe_nr--)
1968+
kunmap_local(unmap_array[stripe_nr]);
1969+
}
1970+
1971+
/*
1972+
* Recover a vertical stripe specified by @sector_nr.
1973+
* @*pointers are the pre-allocated pointers by the caller, so we don't
1974+
* need to allocate/free the pointers again and again.
1975+
*/
1976+
static int recover_vertical(struct btrfs_raid_bio *rbio, int sector_nr,
1977+
void **pointers, void **unmap_array)
1978+
{
1979+
int found_errors;
1980+
int faila;
1981+
int failb;
1982+
int ret = 0;
1983+
19981984
/*
1999-
* No matter if this is a RMW or recovery, we should have all
2000-
* failed sectors repaired in the vertical stripe, thus they are now
2001-
* uptodate.
2002-
* Especially if we determine to cache the rbio, we need to
2003-
* have at least all data sectors uptodate.
2004-
*
2005-
* If possible, also check if the repaired sector matches its data
2006-
* checksum.
1985+
* Now we just use bitmap to mark the horizontal stripes in
1986+
* which we have data when doing parity scrub.
20071987
*/
1988+
if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB &&
1989+
!test_bit(sector_nr, &rbio->dbitmap))
1990+
return 0;
1991+
1992+
found_errors = get_rbio_vertical_errors(rbio, sector_nr, &faila,
1993+
&failb);
1994+
/*
1995+
* No errors in the vertical stripe, skip it. Can happen for recovery
1996+
* which only part of a stripe failed csum check.
1997+
*/
1998+
if (!found_errors)
1999+
return 0;
2000+
2001+
if (unlikely(found_errors > rbio->bioc->max_errors))
2002+
return -EIO;
2003+
2004+
for (int i = 0; i < rbio->sector_nsteps; i++)
2005+
recover_vertical_step(rbio, sector_nr, i, faila, failb,
2006+
pointers, unmap_array);
20082007
if (faila >= 0) {
20092008
ret = verify_one_sector(rbio, faila, sector_nr);
20102009
if (ret < 0)
2011-
goto cleanup;
2010+
return ret;
20122011

20132012
set_bit(rbio_sector_index(rbio, faila, sector_nr),
20142013
rbio->stripe_uptodate_bitmap);
20152014
}
20162015
if (failb >= 0) {
20172016
ret = verify_one_sector(rbio, failb, sector_nr);
20182017
if (ret < 0)
2019-
goto cleanup;
2018+
return ret;
20202019

20212020
set_bit(rbio_sector_index(rbio, failb, sector_nr),
20222021
rbio->stripe_uptodate_bitmap);
20232022
}
2024-
2025-
cleanup:
2026-
for (stripe_nr = rbio->real_stripes - 1; stripe_nr >= 0; stripe_nr--)
2027-
kunmap_local(unmap_array[stripe_nr]);
20282023
return ret;
20292024
}
20302025

@@ -2162,7 +2157,7 @@ static void set_rbio_raid6_extra_error(struct btrfs_raid_bio *rbio, int mirror_n
21622157
int faila;
21632158
int failb;
21642159

2165-
found_errors = get_rbio_veritical_errors(rbio, sector_nr,
2160+
found_errors = get_rbio_vertical_errors(rbio, sector_nr,
21662161
&faila, &failb);
21672162
/* This vertical stripe doesn't have errors. */
21682163
if (!found_errors)
@@ -2455,7 +2450,7 @@ static void rmw_rbio(struct btrfs_raid_bio *rbio)
24552450
for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) {
24562451
int found_errors;
24572452

2458-
found_errors = get_rbio_veritical_errors(rbio, sectornr, NULL, NULL);
2453+
found_errors = get_rbio_vertical_errors(rbio, sectornr, NULL, NULL);
24592454
if (unlikely(found_errors > rbio->bioc->max_errors)) {
24602455
ret = -EIO;
24612456
break;
@@ -2735,7 +2730,7 @@ static int recover_scrub_rbio(struct btrfs_raid_bio *rbio)
27352730
int failb;
27362731
int found_errors;
27372732

2738-
found_errors = get_rbio_veritical_errors(rbio, sector_nr,
2733+
found_errors = get_rbio_vertical_errors(rbio, sector_nr,
27392734
&faila, &failb);
27402735
if (unlikely(found_errors > rbio->bioc->max_errors)) {
27412736
ret = -EIO;
@@ -2869,7 +2864,7 @@ static void scrub_rbio(struct btrfs_raid_bio *rbio)
28692864
for (sector_nr = 0; sector_nr < rbio->stripe_nsectors; sector_nr++) {
28702865
int found_errors;
28712866

2872-
found_errors = get_rbio_veritical_errors(rbio, sector_nr, NULL, NULL);
2867+
found_errors = get_rbio_vertical_errors(rbio, sector_nr, NULL, NULL);
28732868
if (unlikely(found_errors > rbio->bioc->max_errors)) {
28742869
ret = -EIO;
28752870
break;

0 commit comments

Comments
 (0)