Skip to content

Commit 48f22f8

Browse files
Fengnan Changaxboe
authored andcommitted
block: enable per-cpu bio cache by default
Since after commit 12e4e8c ("io_uring/rw: enable bio caches for IRQ rw"), bio_put is safe for task and irq context, bio_alloc_bioset is safe for task context and no one calls in irq context, so we can enable per cpu bio cache by default. Benchmarked with t/io_uring and ext4+nvme: taskset -c 6 /root/fio/t/io_uring -p0 -d128 -b4096 -s1 -c1 -F1 -B1 -R1 -X1 -n1 -P1 /mnt/testfile base IOPS is 562K, patch IOPS is 574K. The CPU usage of bio_alloc_bioset decrease from 1.42% to 1.22%. The worst case is allocate bio in CPU A but free in CPU B, still use t/io_uring and ext4+nvme: base IOPS is 648K, patch IOPS is 647K. Also use fio test ext4/xfs with libaio/sync/io_uring on null_blk and nvme, no obvious performance regression. Signed-off-by: Fengnan Chang <changfengnan@bytedance.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent 05ce4c5 commit 48f22f8

3 files changed

Lines changed: 12 additions & 19 deletions

File tree

block/bio.c

Lines changed: 12 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -517,20 +517,18 @@ struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
517517
if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) && nr_vecs > 0))
518518
return NULL;
519519

520-
if (opf & REQ_ALLOC_CACHE) {
521-
if (bs->cache && nr_vecs <= BIO_INLINE_VECS) {
522-
bio = bio_alloc_percpu_cache(bdev, nr_vecs, opf,
523-
gfp_mask, bs);
524-
if (bio)
525-
return bio;
526-
/*
527-
* No cached bio available, bio returned below marked with
528-
* REQ_ALLOC_CACHE to particpate in per-cpu alloc cache.
529-
*/
530-
} else {
531-
opf &= ~REQ_ALLOC_CACHE;
532-
}
533-
}
520+
if (bs->cache && nr_vecs <= BIO_INLINE_VECS) {
521+
opf |= REQ_ALLOC_CACHE;
522+
bio = bio_alloc_percpu_cache(bdev, nr_vecs, opf,
523+
gfp_mask, bs);
524+
if (bio)
525+
return bio;
526+
/*
527+
* No cached bio available, bio returned below marked with
528+
* REQ_ALLOC_CACHE to participate in per-cpu alloc cache.
529+
*/
530+
} else
531+
opf &= ~REQ_ALLOC_CACHE;
534532

535533
/*
536534
* submit_bio_noacct() converts recursion to iteration; this means if

block/fops.c

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -184,8 +184,6 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
184184
loff_t pos = iocb->ki_pos;
185185
int ret = 0;
186186

187-
if (iocb->ki_flags & IOCB_ALLOC_CACHE)
188-
opf |= REQ_ALLOC_CACHE;
189187
bio = bio_alloc_bioset(bdev, nr_pages, opf, GFP_KERNEL,
190188
&blkdev_dio_pool);
191189
dio = container_of(bio, struct blkdev_dio, bio);
@@ -333,8 +331,6 @@ static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb,
333331
loff_t pos = iocb->ki_pos;
334332
int ret = 0;
335333

336-
if (iocb->ki_flags & IOCB_ALLOC_CACHE)
337-
opf |= REQ_ALLOC_CACHE;
338334
bio = bio_alloc_bioset(bdev, nr_pages, opf, GFP_KERNEL,
339335
&blkdev_dio_pool);
340336
dio = container_of(bio, struct blkdev_dio, bio);

io_uring/rw.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -855,7 +855,6 @@ static int io_rw_init_file(struct io_kiocb *req, fmode_t mode, int rw_type)
855855
ret = kiocb_set_rw_flags(kiocb, rw->flags, rw_type);
856856
if (unlikely(ret))
857857
return ret;
858-
kiocb->ki_flags |= IOCB_ALLOC_CACHE;
859858

860859
/*
861860
* If the file is marked O_NONBLOCK, still allow retry for it if it

0 commit comments

Comments
 (0)