Skip to content

Commit 89f94b6

Browse files
Christoph Hellwigliu-song-6
authored andcommitted
raid5-cache: statically allocate the recovery ra bio
There is no need to preallocate the bio and reset it when use. Just allocate it on-stack and use a bvec places next to the pages used for it. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Song Liu <song@kernel.org>
1 parent 0dd00cb commit 89f94b6

1 file changed

Lines changed: 13 additions & 15 deletions

File tree

drivers/md/raid5-cache.c

Lines changed: 13 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1623,22 +1623,17 @@ struct r5l_recovery_ctx {
16231623
* just copy data from the pool.
16241624
*/
16251625
struct page *ra_pool[R5L_RECOVERY_PAGE_POOL_SIZE];
1626+
struct bio_vec ra_bvec[R5L_RECOVERY_PAGE_POOL_SIZE];
16261627
sector_t pool_offset; /* offset of first page in the pool */
16271628
int total_pages; /* total allocated pages */
16281629
int valid_pages; /* pages with valid data */
1629-
struct bio *ra_bio; /* bio to do the read ahead */
16301630
};
16311631

16321632
static int r5l_recovery_allocate_ra_pool(struct r5l_log *log,
16331633
struct r5l_recovery_ctx *ctx)
16341634
{
16351635
struct page *page;
16361636

1637-
ctx->ra_bio = bio_alloc_bioset(NULL, BIO_MAX_VECS, 0, GFP_KERNEL,
1638-
&log->bs);
1639-
if (!ctx->ra_bio)
1640-
return -ENOMEM;
1641-
16421637
ctx->valid_pages = 0;
16431638
ctx->total_pages = 0;
16441639
while (ctx->total_pages < R5L_RECOVERY_PAGE_POOL_SIZE) {
@@ -1650,10 +1645,8 @@ static int r5l_recovery_allocate_ra_pool(struct r5l_log *log,
16501645
ctx->total_pages += 1;
16511646
}
16521647

1653-
if (ctx->total_pages == 0) {
1654-
bio_put(ctx->ra_bio);
1648+
if (ctx->total_pages == 0)
16551649
return -ENOMEM;
1656-
}
16571650

16581651
ctx->pool_offset = 0;
16591652
return 0;
@@ -1666,7 +1659,6 @@ static void r5l_recovery_free_ra_pool(struct r5l_log *log,
16661659

16671660
for (i = 0; i < ctx->total_pages; ++i)
16681661
put_page(ctx->ra_pool[i]);
1669-
bio_put(ctx->ra_bio);
16701662
}
16711663

16721664
/*
@@ -1679,15 +1671,19 @@ static int r5l_recovery_fetch_ra_pool(struct r5l_log *log,
16791671
struct r5l_recovery_ctx *ctx,
16801672
sector_t offset)
16811673
{
1682-
bio_reset(ctx->ra_bio, log->rdev->bdev, REQ_OP_READ);
1683-
ctx->ra_bio->bi_iter.bi_sector = log->rdev->data_offset + offset;
1674+
struct bio bio;
1675+
int ret;
1676+
1677+
bio_init(&bio, log->rdev->bdev, ctx->ra_bvec,
1678+
R5L_RECOVERY_PAGE_POOL_SIZE, REQ_OP_READ);
1679+
bio.bi_iter.bi_sector = log->rdev->data_offset + offset;
16841680

16851681
ctx->valid_pages = 0;
16861682
ctx->pool_offset = offset;
16871683

16881684
while (ctx->valid_pages < ctx->total_pages) {
1689-
bio_add_page(ctx->ra_bio,
1690-
ctx->ra_pool[ctx->valid_pages], PAGE_SIZE, 0);
1685+
__bio_add_page(&bio, ctx->ra_pool[ctx->valid_pages], PAGE_SIZE,
1686+
0);
16911687
ctx->valid_pages += 1;
16921688

16931689
offset = r5l_ring_add(log, offset, BLOCK_SECTORS);
@@ -1696,7 +1692,9 @@ static int r5l_recovery_fetch_ra_pool(struct r5l_log *log,
16961692
break;
16971693
}
16981694

1699-
return submit_bio_wait(ctx->ra_bio);
1695+
ret = submit_bio_wait(&bio);
1696+
bio_uninit(&bio);
1697+
return ret;
17001698
}
17011699

17021700
/*

0 commit comments

Comments
 (0)