Skip to content

Commit 89ca1a4

Browse files
adam900710kdave
authored andcommitted
btrfs: raid56: prepare finish_parity_scrub() to support bs > ps cases
The function finish_parity_scrub() assume each fs block can be mapped by one page, blocking bs > ps support for raid56. Prepare it for bs > ps cases by: - Introduce a helper, verify_one_parity_step() Since the P/Q generation is always done in a vertical stripe, we have to handle the range step by step. - Only clear the rbio->dbitmap if all steps of an fs block match - Remove rbio_stripe_paddr() and sector_paddr_in_rbio() helpers Now we either use the paddrs version for checksum, or the step version for P/Q generation/recovery. - Make alloc_rbio_essential_pages() to handle bs > ps cases Since for bs > ps cases, one fs block needs multiple pages, the existing simple check against rbio->stripe_pages[] is not enough. Extract a dedicated helper, alloc_rbio_sector_pages(), for the existing alloc_rbio_essential_pages(), which is still based on sector number. Signed-off-by: Qu Wenruo <wqu@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
1 parent ba88278 commit 89ca1a4

1 file changed

Lines changed: 87 additions & 89 deletions

File tree

fs/btrfs/raid56.c

Lines changed: 87 additions & 89 deletions
Original file line numberDiff line numberDiff line change
@@ -735,13 +735,6 @@ static unsigned int rbio_paddr_index(const struct btrfs_raid_bio *rbio,
735735
return ret;
736736
}
737737

738-
/* Return a paddr from rbio->stripe_sectors, not from the bio list */
739-
static phys_addr_t rbio_stripe_paddr(const struct btrfs_raid_bio *rbio,
740-
unsigned int stripe_nr, unsigned int sector_nr)
741-
{
742-
return rbio->stripe_paddrs[rbio_paddr_index(rbio, stripe_nr, sector_nr, 0)];
743-
}
744-
745738
static phys_addr_t rbio_stripe_step_paddr(const struct btrfs_raid_bio *rbio,
746739
unsigned int stripe_nr, unsigned int sector_nr,
747740
unsigned int step_nr)
@@ -1001,46 +994,6 @@ static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t status)
1001994
rbio_endio_bio_list(extra, status);
1002995
}
1003996

1004-
/*
1005-
* Get the paddr specified by its @stripe_nr and @sector_nr.
1006-
*
1007-
* @rbio: The raid bio
1008-
* @stripe_nr: Stripe number, valid range [0, real_stripe)
1009-
* @sector_nr: Sector number inside the stripe,
1010-
* valid range [0, stripe_nsectors)
1011-
* @bio_list_only: Whether to use sectors inside the bio list only.
1012-
*
1013-
* The read/modify/write code wants to reuse the original bio page as much
1014-
* as possible, and only use stripe_sectors as fallback.
1015-
*/
1016-
static phys_addr_t sector_paddr_in_rbio(struct btrfs_raid_bio *rbio,
1017-
int stripe_nr, int sector_nr,
1018-
bool bio_list_only)
1019-
{
1020-
phys_addr_t ret = INVALID_PADDR;
1021-
int index;
1022-
1023-
ASSERT_RBIO_STRIPE(stripe_nr >= 0 && stripe_nr < rbio->real_stripes,
1024-
rbio, stripe_nr);
1025-
ASSERT_RBIO_SECTOR(sector_nr >= 0 && sector_nr < rbio->stripe_nsectors,
1026-
rbio, sector_nr);
1027-
1028-
index = stripe_nr * rbio->stripe_nsectors + sector_nr;
1029-
ASSERT(index >= 0 && index < rbio->nr_sectors);
1030-
1031-
spin_lock(&rbio->bio_list_lock);
1032-
if (rbio->bio_paddrs[index] != INVALID_PADDR || bio_list_only) {
1033-
/* Don't return sector without a valid page pointer */
1034-
if (rbio->bio_paddrs[index] != INVALID_PADDR)
1035-
ret = rbio->bio_paddrs[index];
1036-
spin_unlock(&rbio->bio_list_lock);
1037-
return ret;
1038-
}
1039-
spin_unlock(&rbio->bio_list_lock);
1040-
1041-
return rbio->stripe_paddrs[index];
1042-
}
1043-
1044997
/*
1045998
* Get paddr pointer for the sector specified by its @stripe_nr and @sector_nr.
1046999
*
@@ -2635,42 +2588,116 @@ struct btrfs_raid_bio *raid56_parity_alloc_scrub_rbio(struct bio *bio,
26352588
return rbio;
26362589
}
26372590

2591+
static int alloc_rbio_sector_pages(struct btrfs_raid_bio *rbio,
2592+
int sector_nr)
2593+
{
2594+
const u32 step = min(PAGE_SIZE, rbio->bioc->fs_info->sectorsize);
2595+
const u32 base = sector_nr * rbio->sector_nsteps;
2596+
2597+
for (int i = base; i < base + rbio->sector_nsteps; i++) {
2598+
const unsigned int page_index = (i * step) >> PAGE_SHIFT;
2599+
struct page *page;
2600+
2601+
if (rbio->stripe_pages[page_index])
2602+
continue;
2603+
page = alloc_page(GFP_NOFS);
2604+
if (!page)
2605+
return -ENOMEM;
2606+
rbio->stripe_pages[page_index] = page;
2607+
}
2608+
return 0;
2609+
}
2610+
26382611
/*
26392612
* We just scrub the parity that we have correct data on the same horizontal,
26402613
* so we needn't allocate all pages for all the stripes.
26412614
*/
26422615
static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio)
26432616
{
2644-
const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
26452617
int total_sector_nr;
26462618

26472619
for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
26482620
total_sector_nr++) {
2649-
struct page *page;
26502621
int sectornr = total_sector_nr % rbio->stripe_nsectors;
2651-
int index = (total_sector_nr * sectorsize) >> PAGE_SHIFT;
2622+
int ret;
26522623

26532624
if (!test_bit(sectornr, &rbio->dbitmap))
26542625
continue;
2655-
if (rbio->stripe_pages[index])
2656-
continue;
2657-
page = alloc_page(GFP_NOFS);
2658-
if (!page)
2659-
return -ENOMEM;
2660-
rbio->stripe_pages[index] = page;
2626+
ret = alloc_rbio_sector_pages(rbio, total_sector_nr);
2627+
if (ret < 0)
2628+
return ret;
26612629
}
26622630
index_stripe_sectors(rbio);
26632631
return 0;
26642632
}
26652633

2634+
/* Return true if the content of the step matches the caclulated one. */
2635+
static bool verify_one_parity_step(struct btrfs_raid_bio *rbio,
2636+
void *pointers[], unsigned int sector_nr,
2637+
unsigned int step_nr)
2638+
{
2639+
const unsigned int nr_data = rbio->nr_data;
2640+
const bool has_qstripe = (rbio->real_stripes - rbio->nr_data == 2);
2641+
const u32 step = min(rbio->bioc->fs_info->sectorsize, PAGE_SIZE);
2642+
void *parity;
2643+
bool ret = false;
2644+
2645+
ASSERT(step_nr < rbio->sector_nsteps);
2646+
2647+
/* First collect one page from each data stripe. */
2648+
for (int stripe = 0; stripe < nr_data; stripe++)
2649+
pointers[stripe] = kmap_local_paddr(
2650+
sector_step_paddr_in_rbio(rbio, stripe, sector_nr,
2651+
step_nr, 0));
2652+
2653+
if (has_qstripe) {
2654+
assert_rbio(rbio);
2655+
/* RAID6, call the library function to fill in our P/Q. */
2656+
raid6_call.gen_syndrome(rbio->real_stripes, step, pointers);
2657+
} else {
2658+
/* RAID5. */
2659+
memcpy(pointers[nr_data], pointers[0], step);
2660+
run_xor(pointers + 1, nr_data - 1, step);
2661+
}
2662+
2663+
/* Check scrubbing parity and repair it. */
2664+
parity = kmap_local_paddr(rbio_stripe_step_paddr(rbio, rbio->scrubp, sector_nr, step_nr));
2665+
if (memcmp(parity, pointers[rbio->scrubp], step) != 0)
2666+
memcpy(parity, pointers[rbio->scrubp], step);
2667+
else
2668+
ret = true;
2669+
kunmap_local(parity);
2670+
2671+
for (int stripe = nr_data - 1; stripe >= 0; stripe--)
2672+
kunmap_local(pointers[stripe]);
2673+
return ret;
2674+
}
2675+
2676+
/*
2677+
* The @pointers array should have the P/Q parity already mapped.
2678+
*/
2679+
static void verify_one_parity_sector(struct btrfs_raid_bio *rbio,
2680+
void *pointers[], unsigned int sector_nr)
2681+
{
2682+
bool found_error = false;
2683+
2684+
for (int step_nr = 0; step_nr < rbio->sector_nsteps; step_nr++) {
2685+
bool match;
2686+
2687+
match = verify_one_parity_step(rbio, pointers, sector_nr, step_nr);
2688+
if (!match)
2689+
found_error = true;
2690+
}
2691+
if (!found_error)
2692+
bitmap_clear(&rbio->dbitmap, sector_nr, 1);
2693+
}
2694+
26662695
static int finish_parity_scrub(struct btrfs_raid_bio *rbio)
26672696
{
26682697
struct btrfs_io_context *bioc = rbio->bioc;
2669-
const u32 sectorsize = bioc->fs_info->sectorsize;
26702698
void **pointers = rbio->finish_pointers;
26712699
unsigned long *pbitmap = &rbio->finish_pbitmap;
26722700
int nr_data = rbio->nr_data;
2673-
int stripe;
26742701
int sectornr;
26752702
bool has_qstripe;
26762703
struct page *page;
@@ -2729,37 +2756,8 @@ static int finish_parity_scrub(struct btrfs_raid_bio *rbio)
27292756

27302757
/* Map the parity stripe just once */
27312758

2732-
for_each_set_bit(sectornr, &rbio->dbitmap, rbio->stripe_nsectors) {
2733-
void *parity;
2734-
2735-
/* first collect one page from each data stripe */
2736-
for (stripe = 0; stripe < nr_data; stripe++)
2737-
pointers[stripe] = kmap_local_paddr(
2738-
sector_paddr_in_rbio(rbio, stripe, sectornr, 0));
2739-
2740-
if (has_qstripe) {
2741-
assert_rbio(rbio);
2742-
/* RAID6, call the library function to fill in our P/Q */
2743-
raid6_call.gen_syndrome(rbio->real_stripes, sectorsize,
2744-
pointers);
2745-
} else {
2746-
/* raid5 */
2747-
memcpy(pointers[nr_data], pointers[0], sectorsize);
2748-
run_xor(pointers + 1, nr_data - 1, sectorsize);
2749-
}
2750-
2751-
/* Check scrubbing parity and repair it */
2752-
parity = kmap_local_paddr(rbio_stripe_paddr(rbio, rbio->scrubp, sectornr));
2753-
if (memcmp(parity, pointers[rbio->scrubp], sectorsize) != 0)
2754-
memcpy(parity, pointers[rbio->scrubp], sectorsize);
2755-
else
2756-
/* Parity is right, needn't writeback */
2757-
bitmap_clear(&rbio->dbitmap, sectornr, 1);
2758-
kunmap_local(parity);
2759-
2760-
for (stripe = nr_data - 1; stripe >= 0; stripe--)
2761-
kunmap_local(pointers[stripe]);
2762-
}
2759+
for_each_set_bit(sectornr, &rbio->dbitmap, rbio->stripe_nsectors)
2760+
verify_one_parity_sector(rbio, pointers, sectornr);
27632761

27642762
kunmap_local(pointers[nr_data]);
27652763
__free_page(phys_to_page(p_paddr));

0 commit comments

Comments
 (0)