Skip to content

Commit 16f9399

Browse files
adam900710kdave
authored andcommitted
btrfs: scrub: remove the old writeback infrastructure
Since the whole scrub path has been switched to scrub_stripe based solution, the old writeback path can be removed completely, which involves: - scrub_ctx::wr_curr_bio member - scrub_ctx::flush_all_writes member - function scrub_write_block_to_dev_replace() - function scrub_write_sector_to_dev_replace() - function scrub_add_sector_to_wr_bio() - function scrub_wr_submit() - function scrub_wr_bio_end_io() - function scrub_wr_bio_end_io_worker() And one more function needs to be exported temporarily: - scrub_sector_get() Signed-off-by: Qu Wenruo <wqu@suse.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
1 parent 5dc96f8 commit 16f9399

2 files changed

Lines changed: 3 additions & 219 deletions

File tree

fs/btrfs/scrub.c

Lines changed: 2 additions & 219 deletions
Original file line numberDiff line numberDiff line change
@@ -275,10 +275,8 @@ struct scrub_ctx {
275275
int is_dev_replace;
276276
u64 write_pointer;
277277

278-
struct scrub_bio *wr_curr_bio;
279278
struct mutex wr_lock;
280279
struct btrfs_device *wr_tgtdev;
281-
bool flush_all_writes;
282280

283281
/*
284282
* statistics
@@ -547,23 +545,14 @@ static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
547545
static int scrub_repair_sector_from_good_copy(struct scrub_block *sblock_bad,
548546
struct scrub_block *sblock_good,
549547
int sector_num, int force_write);
550-
static void scrub_write_block_to_dev_replace(struct scrub_block *sblock);
551-
static int scrub_write_sector_to_dev_replace(struct scrub_block *sblock,
552-
int sector_num);
553548
static int scrub_checksum_data(struct scrub_block *sblock);
554549
static int scrub_checksum_tree_block(struct scrub_block *sblock);
555550
static int scrub_checksum_super(struct scrub_block *sblock);
556551
static void scrub_block_put(struct scrub_block *sblock);
557-
static void scrub_sector_get(struct scrub_sector *sector);
558552
static void scrub_sector_put(struct scrub_sector *sector);
559553
static void scrub_bio_end_io(struct bio *bio);
560554
static void scrub_bio_end_io_worker(struct work_struct *work);
561555
static void scrub_block_complete(struct scrub_block *sblock);
562-
static int scrub_add_sector_to_wr_bio(struct scrub_ctx *sctx,
563-
struct scrub_sector *sector);
564-
static void scrub_wr_submit(struct scrub_ctx *sctx);
565-
static void scrub_wr_bio_end_io(struct bio *bio);
566-
static void scrub_wr_bio_end_io_worker(struct work_struct *work);
567556
static void scrub_put_ctx(struct scrub_ctx *sctx);
568557

569558
static inline int scrub_is_page_on_raid56(struct scrub_sector *sector)
@@ -872,7 +861,6 @@ static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
872861
for (i = 0; i < SCRUB_STRIPES_PER_SCTX; i++)
873862
release_scrub_stripe(&sctx->stripes[i]);
874863

875-
kfree(sctx->wr_curr_bio);
876864
scrub_free_csums(sctx);
877865
kfree(sctx);
878866
}
@@ -934,13 +922,10 @@ static noinline_for_stack struct scrub_ctx *scrub_setup_ctx(
934922
init_waitqueue_head(&sctx->list_wait);
935923
sctx->throttle_deadline = 0;
936924

937-
WARN_ON(sctx->wr_curr_bio != NULL);
938925
mutex_init(&sctx->wr_lock);
939-
sctx->wr_curr_bio = NULL;
940926
if (is_dev_replace) {
941927
WARN_ON(!fs_info->dev_replace.tgtdev);
942928
sctx->wr_tgtdev = fs_info->dev_replace.tgtdev;
943-
sctx->flush_all_writes = false;
944929
}
945930

946931
return sctx;
@@ -1304,8 +1289,6 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
13041289
sblock_to_check->data_corrected = 1;
13051290
spin_unlock(&sctx->stat_lock);
13061291

1307-
if (sctx->is_dev_replace)
1308-
scrub_write_block_to_dev_replace(sblock_bad);
13091292
goto out;
13101293
}
13111294

@@ -1394,7 +1377,6 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
13941377
!sblock_other->checksum_error &&
13951378
sblock_other->no_io_error_seen) {
13961379
if (sctx->is_dev_replace) {
1397-
scrub_write_block_to_dev_replace(sblock_other);
13981380
goto corrected_error;
13991381
} else {
14001382
ret = scrub_repair_block_from_good_copy(
@@ -1476,13 +1458,6 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
14761458
*/
14771459
if (!sblock_other)
14781460
sblock_other = sblock_bad;
1479-
1480-
if (scrub_write_sector_to_dev_replace(sblock_other,
1481-
sector_num) != 0) {
1482-
atomic64_inc(
1483-
&fs_info->dev_replace.num_write_errors);
1484-
success = 0;
1485-
}
14861461
} else if (sblock_other) {
14871462
ret = scrub_repair_sector_from_good_copy(sblock_bad,
14881463
sblock_other,
@@ -1904,31 +1879,6 @@ static int scrub_repair_sector_from_good_copy(struct scrub_block *sblock_bad,
19041879
return 0;
19051880
}
19061881

1907-
static void scrub_write_block_to_dev_replace(struct scrub_block *sblock)
1908-
{
1909-
struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
1910-
int i;
1911-
1912-
for (i = 0; i < sblock->sector_count; i++) {
1913-
int ret;
1914-
1915-
ret = scrub_write_sector_to_dev_replace(sblock, i);
1916-
if (ret)
1917-
atomic64_inc(&fs_info->dev_replace.num_write_errors);
1918-
}
1919-
}
1920-
1921-
static int scrub_write_sector_to_dev_replace(struct scrub_block *sblock, int sector_num)
1922-
{
1923-
const u32 sectorsize = sblock->sctx->fs_info->sectorsize;
1924-
struct scrub_sector *sector = sblock->sectors[sector_num];
1925-
1926-
if (sector->io_error)
1927-
memset(scrub_sector_get_kaddr(sector), 0, sectorsize);
1928-
1929-
return scrub_add_sector_to_wr_bio(sblock->sctx, sector);
1930-
}
1931-
19321882
static int fill_writer_pointer_gap(struct scrub_ctx *sctx, u64 physical)
19331883
{
19341884
int ret = 0;
@@ -1956,150 +1906,6 @@ static void scrub_block_get(struct scrub_block *sblock)
19561906
refcount_inc(&sblock->refs);
19571907
}
19581908

1959-
static int scrub_add_sector_to_wr_bio(struct scrub_ctx *sctx,
1960-
struct scrub_sector *sector)
1961-
{
1962-
struct scrub_block *sblock = sector->sblock;
1963-
struct scrub_bio *sbio;
1964-
int ret;
1965-
const u32 sectorsize = sctx->fs_info->sectorsize;
1966-
1967-
mutex_lock(&sctx->wr_lock);
1968-
again:
1969-
if (!sctx->wr_curr_bio) {
1970-
sctx->wr_curr_bio = kzalloc(sizeof(*sctx->wr_curr_bio),
1971-
GFP_KERNEL);
1972-
if (!sctx->wr_curr_bio) {
1973-
mutex_unlock(&sctx->wr_lock);
1974-
return -ENOMEM;
1975-
}
1976-
sctx->wr_curr_bio->sctx = sctx;
1977-
sctx->wr_curr_bio->sector_count = 0;
1978-
}
1979-
sbio = sctx->wr_curr_bio;
1980-
if (sbio->sector_count == 0) {
1981-
ret = fill_writer_pointer_gap(sctx, sector->offset +
1982-
sblock->physical_for_dev_replace);
1983-
if (ret) {
1984-
mutex_unlock(&sctx->wr_lock);
1985-
return ret;
1986-
}
1987-
1988-
sbio->physical = sblock->physical_for_dev_replace + sector->offset;
1989-
sbio->logical = sblock->logical + sector->offset;
1990-
sbio->dev = sctx->wr_tgtdev;
1991-
if (!sbio->bio) {
1992-
sbio->bio = bio_alloc(sbio->dev->bdev, sctx->sectors_per_bio,
1993-
REQ_OP_WRITE, GFP_NOFS);
1994-
}
1995-
sbio->bio->bi_private = sbio;
1996-
sbio->bio->bi_end_io = scrub_wr_bio_end_io;
1997-
sbio->bio->bi_iter.bi_sector = sbio->physical >> 9;
1998-
sbio->status = 0;
1999-
} else if (sbio->physical + sbio->sector_count * sectorsize !=
2000-
sblock->physical_for_dev_replace + sector->offset ||
2001-
sbio->logical + sbio->sector_count * sectorsize !=
2002-
sblock->logical + sector->offset) {
2003-
scrub_wr_submit(sctx);
2004-
goto again;
2005-
}
2006-
2007-
ret = bio_add_scrub_sector(sbio->bio, sector, sectorsize);
2008-
if (ret != sectorsize) {
2009-
if (sbio->sector_count < 1) {
2010-
bio_put(sbio->bio);
2011-
sbio->bio = NULL;
2012-
mutex_unlock(&sctx->wr_lock);
2013-
return -EIO;
2014-
}
2015-
scrub_wr_submit(sctx);
2016-
goto again;
2017-
}
2018-
2019-
sbio->sectors[sbio->sector_count] = sector;
2020-
scrub_sector_get(sector);
2021-
/*
2022-
* Since ssector no longer holds a page, but uses sblock::pages, we
2023-
* have to ensure the sblock had not been freed before our write bio
2024-
* finished.
2025-
*/
2026-
scrub_block_get(sector->sblock);
2027-
2028-
sbio->sector_count++;
2029-
if (sbio->sector_count == sctx->sectors_per_bio)
2030-
scrub_wr_submit(sctx);
2031-
mutex_unlock(&sctx->wr_lock);
2032-
2033-
return 0;
2034-
}
2035-
2036-
static void scrub_wr_submit(struct scrub_ctx *sctx)
2037-
{
2038-
struct scrub_bio *sbio;
2039-
2040-
if (!sctx->wr_curr_bio)
2041-
return;
2042-
2043-
sbio = sctx->wr_curr_bio;
2044-
sctx->wr_curr_bio = NULL;
2045-
scrub_pending_bio_inc(sctx);
2046-
/* process all writes in a single worker thread. Then the block layer
2047-
* orders the requests before sending them to the driver which
2048-
* doubled the write performance on spinning disks when measured
2049-
* with Linux 3.5 */
2050-
btrfsic_check_bio(sbio->bio);
2051-
submit_bio(sbio->bio);
2052-
2053-
if (btrfs_is_zoned(sctx->fs_info))
2054-
sctx->write_pointer = sbio->physical + sbio->sector_count *
2055-
sctx->fs_info->sectorsize;
2056-
}
2057-
2058-
static void scrub_wr_bio_end_io(struct bio *bio)
2059-
{
2060-
struct scrub_bio *sbio = bio->bi_private;
2061-
struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
2062-
2063-
sbio->status = bio->bi_status;
2064-
sbio->bio = bio;
2065-
2066-
INIT_WORK(&sbio->work, scrub_wr_bio_end_io_worker);
2067-
queue_work(fs_info->scrub_wr_completion_workers, &sbio->work);
2068-
}
2069-
2070-
static void scrub_wr_bio_end_io_worker(struct work_struct *work)
2071-
{
2072-
struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
2073-
struct scrub_ctx *sctx = sbio->sctx;
2074-
int i;
2075-
2076-
ASSERT(sbio->sector_count <= SCRUB_SECTORS_PER_BIO);
2077-
if (sbio->status) {
2078-
struct btrfs_dev_replace *dev_replace =
2079-
&sbio->sctx->fs_info->dev_replace;
2080-
2081-
for (i = 0; i < sbio->sector_count; i++) {
2082-
struct scrub_sector *sector = sbio->sectors[i];
2083-
2084-
sector->io_error = 1;
2085-
atomic64_inc(&dev_replace->num_write_errors);
2086-
}
2087-
}
2088-
2089-
/*
2090-
* In scrub_add_sector_to_wr_bio() we grab extra ref for sblock, now in
2091-
* endio we should put the sblock.
2092-
*/
2093-
for (i = 0; i < sbio->sector_count; i++) {
2094-
scrub_block_put(sbio->sectors[i]->sblock);
2095-
scrub_sector_put(sbio->sectors[i]);
2096-
}
2097-
2098-
bio_put(sbio->bio);
2099-
kfree(sbio);
2100-
scrub_pending_bio_dec(sctx);
2101-
}
2102-
21031909
static int scrub_checksum(struct scrub_block *sblock)
21041910
{
21051911
u64 flags;
@@ -2904,7 +2710,7 @@ static void scrub_block_put(struct scrub_block *sblock)
29042710
}
29052711
}
29062712

2907-
static void scrub_sector_get(struct scrub_sector *sector)
2713+
void scrub_sector_get(struct scrub_sector *sector)
29082714
{
29092715
atomic_inc(&sector->refs);
29102716
}
@@ -3105,31 +2911,20 @@ static void scrub_bio_end_io_worker(struct work_struct *work)
31052911
sctx->first_free = sbio->index;
31062912
spin_unlock(&sctx->list_lock);
31072913

3108-
if (sctx->is_dev_replace && sctx->flush_all_writes) {
3109-
mutex_lock(&sctx->wr_lock);
3110-
scrub_wr_submit(sctx);
3111-
mutex_unlock(&sctx->wr_lock);
3112-
}
3113-
31142914
scrub_pending_bio_dec(sctx);
31152915
}
31162916

31172917
static void scrub_block_complete(struct scrub_block *sblock)
31182918
{
3119-
int corrupted = 0;
3120-
31212919
if (!sblock->no_io_error_seen) {
3122-
corrupted = 1;
31232920
scrub_handle_errored_block(sblock);
31242921
} else {
31252922
/*
31262923
* if has checksum error, write via repair mechanism in
31272924
* dev replace case, otherwise write here in dev replace
31282925
* case.
31292926
*/
3130-
corrupted = scrub_checksum(sblock);
3131-
if (!corrupted && sblock->sctx->is_dev_replace)
3132-
scrub_write_block_to_dev_replace(sblock);
2927+
scrub_checksum(sblock);
31332928
}
31342929
}
31352930

@@ -3904,14 +3699,11 @@ static int scrub_simple_mirror(struct scrub_ctx *sctx,
39043699
/* Paused? */
39053700
if (atomic_read(&fs_info->scrub_pause_req)) {
39063701
/* Push queued extents */
3907-
sctx->flush_all_writes = true;
39083702
scrub_submit(sctx);
39093703
mutex_lock(&sctx->wr_lock);
3910-
scrub_wr_submit(sctx);
39113704
mutex_unlock(&sctx->wr_lock);
39123705
wait_event(sctx->list_wait,
39133706
atomic_read(&sctx->bios_in_flight) == 0);
3914-
sctx->flush_all_writes = false;
39153707
scrub_blocked_if_needed(fs_info);
39163708
}
39173709
/* Block group removed? */
@@ -4048,7 +3840,6 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
40483840
mutex_lock(&sctx->wr_lock);
40493841
sctx->write_pointer = physical;
40503842
mutex_unlock(&sctx->wr_lock);
4051-
sctx->flush_all_writes = true;
40523843
}
40533844

40543845
/* Prepare the extra data stripes used by RAID56. */
@@ -4159,9 +3950,6 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
41593950
out:
41603951
/* push queued extents */
41613952
scrub_submit(sctx);
4162-
mutex_lock(&sctx->wr_lock);
4163-
scrub_wr_submit(sctx);
4164-
mutex_unlock(&sctx->wr_lock);
41653953
flush_scrub_stripes(sctx);
41663954
if (sctx->raid56_data_stripes) {
41673955
for (int i = 0; i < nr_data_stripes(map); i++)
@@ -4497,11 +4285,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
44974285
* write requests are really completed when bios_in_flight
44984286
* changes to 0.
44994287
*/
4500-
sctx->flush_all_writes = true;
45014288
scrub_submit(sctx);
4502-
mutex_lock(&sctx->wr_lock);
4503-
scrub_wr_submit(sctx);
4504-
mutex_unlock(&sctx->wr_lock);
45054289

45064290
wait_event(sctx->list_wait,
45074291
atomic_read(&sctx->bios_in_flight) == 0);
@@ -4515,7 +4299,6 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
45154299
*/
45164300
wait_event(sctx->list_wait,
45174301
atomic_read(&sctx->workers_pending) == 0);
4518-
sctx->flush_all_writes = false;
45194302

45204303
scrub_pause_off(fs_info);
45214304

fs/btrfs/scrub.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,5 +19,6 @@ struct scrub_sector;
1919
int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u8 *csum);
2020
int scrub_add_sector_to_rd_bio(struct scrub_ctx *sctx,
2121
struct scrub_sector *sector);
22+
void scrub_sector_get(struct scrub_sector *sector);
2223

2324
#endif

0 commit comments

Comments
 (0)