Skip to content

Commit 05ce4c5

Browse files
Fengnan Changaxboe
authored andcommitted
block: use bio_alloc_bioset for passthru IO by default
Use bio_alloc_bioset for passthru IO by default, so that we can enable bio cache for irq and polled passthru IO in later. Signed-off-by: Fengnan Chang <changfengnan@bytedance.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent 559e608 commit 05ce4c5

2 files changed

Lines changed: 37 additions & 55 deletions

File tree

block/blk-map.c

Lines changed: 36 additions & 54 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,25 @@ static struct bio_map_data *bio_alloc_map_data(struct iov_iter *data,
3737
return bmd;
3838
}
3939

40+
static inline void blk_mq_map_bio_put(struct bio *bio)
41+
{
42+
bio_put(bio);
43+
}
44+
45+
static struct bio *blk_rq_map_bio_alloc(struct request *rq,
46+
unsigned int nr_vecs, gfp_t gfp_mask)
47+
{
48+
struct block_device *bdev = rq->q->disk ? rq->q->disk->part0 : NULL;
49+
struct bio *bio;
50+
51+
bio = bio_alloc_bioset(bdev, nr_vecs, rq->cmd_flags, gfp_mask,
52+
&fs_bio_set);
53+
if (!bio)
54+
return NULL;
55+
56+
return bio;
57+
}
58+
4059
/**
4160
* bio_copy_from_iter - copy all pages from iov_iter to bio
4261
* @bio: The &struct bio which describes the I/O as destination
@@ -154,10 +173,9 @@ static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data,
154173
nr_pages = bio_max_segs(DIV_ROUND_UP(offset + len, PAGE_SIZE));
155174

156175
ret = -ENOMEM;
157-
bio = bio_kmalloc(nr_pages, gfp_mask);
176+
bio = blk_rq_map_bio_alloc(rq, nr_pages, gfp_mask);
158177
if (!bio)
159178
goto out_bmd;
160-
bio_init_inline(bio, NULL, nr_pages, req_op(rq));
161179

162180
if (map_data) {
163181
nr_pages = 1U << map_data->page_order;
@@ -233,43 +251,12 @@ static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data,
233251
cleanup:
234252
if (!map_data)
235253
bio_free_pages(bio);
236-
bio_uninit(bio);
237-
kfree(bio);
254+
blk_mq_map_bio_put(bio);
238255
out_bmd:
239256
kfree(bmd);
240257
return ret;
241258
}
242259

243-
static void blk_mq_map_bio_put(struct bio *bio)
244-
{
245-
if (bio->bi_opf & REQ_ALLOC_CACHE) {
246-
bio_put(bio);
247-
} else {
248-
bio_uninit(bio);
249-
kfree(bio);
250-
}
251-
}
252-
253-
static struct bio *blk_rq_map_bio_alloc(struct request *rq,
254-
unsigned int nr_vecs, gfp_t gfp_mask)
255-
{
256-
struct block_device *bdev = rq->q->disk ? rq->q->disk->part0 : NULL;
257-
struct bio *bio;
258-
259-
if (rq->cmd_flags & REQ_ALLOC_CACHE && (nr_vecs <= BIO_INLINE_VECS)) {
260-
bio = bio_alloc_bioset(bdev, nr_vecs, rq->cmd_flags, gfp_mask,
261-
&fs_bio_set);
262-
if (!bio)
263-
return NULL;
264-
} else {
265-
bio = bio_kmalloc(nr_vecs, gfp_mask);
266-
if (!bio)
267-
return NULL;
268-
bio_init_inline(bio, bdev, nr_vecs, req_op(rq));
269-
}
270-
return bio;
271-
}
272-
273260
static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
274261
gfp_t gfp_mask)
275262
{
@@ -318,25 +305,23 @@ static void bio_invalidate_vmalloc_pages(struct bio *bio)
318305
static void bio_map_kern_endio(struct bio *bio)
319306
{
320307
bio_invalidate_vmalloc_pages(bio);
321-
bio_uninit(bio);
322-
kfree(bio);
308+
blk_mq_map_bio_put(bio);
323309
}
324310

325-
static struct bio *bio_map_kern(void *data, unsigned int len, enum req_op op,
311+
static struct bio *bio_map_kern(struct request *rq, void *data, unsigned int len,
326312
gfp_t gfp_mask)
327313
{
328314
unsigned int nr_vecs = bio_add_max_vecs(data, len);
329315
struct bio *bio;
330316

331-
bio = bio_kmalloc(nr_vecs, gfp_mask);
317+
bio = blk_rq_map_bio_alloc(rq, nr_vecs, gfp_mask);
332318
if (!bio)
333319
return ERR_PTR(-ENOMEM);
334-
bio_init_inline(bio, NULL, nr_vecs, op);
320+
335321
if (is_vmalloc_addr(data)) {
336322
bio->bi_private = data;
337323
if (!bio_add_vmalloc(bio, data, len)) {
338-
bio_uninit(bio);
339-
kfree(bio);
324+
blk_mq_map_bio_put(bio);
340325
return ERR_PTR(-EINVAL);
341326
}
342327
} else {
@@ -349,8 +334,7 @@ static struct bio *bio_map_kern(void *data, unsigned int len, enum req_op op,
349334
static void bio_copy_kern_endio(struct bio *bio)
350335
{
351336
bio_free_pages(bio);
352-
bio_uninit(bio);
353-
kfree(bio);
337+
blk_mq_map_bio_put(bio);
354338
}
355339

356340
static void bio_copy_kern_endio_read(struct bio *bio)
@@ -369,6 +353,7 @@ static void bio_copy_kern_endio_read(struct bio *bio)
369353

370354
/**
371355
* bio_copy_kern - copy kernel address into bio
356+
* @rq: request to fill
372357
* @data: pointer to buffer to copy
373358
* @len: length in bytes
374359
* @op: bio/request operation
@@ -377,9 +362,10 @@ static void bio_copy_kern_endio_read(struct bio *bio)
377362
* copy the kernel address into a bio suitable for io to a block
378363
* device. Returns an error pointer in case of error.
379364
*/
380-
static struct bio *bio_copy_kern(void *data, unsigned int len, enum req_op op,
365+
static struct bio *bio_copy_kern(struct request *rq, void *data, unsigned int len,
381366
gfp_t gfp_mask)
382367
{
368+
enum req_op op = req_op(rq);
383369
unsigned long kaddr = (unsigned long)data;
384370
unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
385371
unsigned long start = kaddr >> PAGE_SHIFT;
@@ -394,10 +380,9 @@ static struct bio *bio_copy_kern(void *data, unsigned int len, enum req_op op,
394380
return ERR_PTR(-EINVAL);
395381

396382
nr_pages = end - start;
397-
bio = bio_kmalloc(nr_pages, gfp_mask);
383+
bio = blk_rq_map_bio_alloc(rq, nr_pages, gfp_mask);
398384
if (!bio)
399385
return ERR_PTR(-ENOMEM);
400-
bio_init_inline(bio, NULL, nr_pages, op);
401386

402387
while (len) {
403388
struct page *page;
@@ -431,8 +416,7 @@ static struct bio *bio_copy_kern(void *data, unsigned int len, enum req_op op,
431416

432417
cleanup:
433418
bio_free_pages(bio);
434-
bio_uninit(bio);
435-
kfree(bio);
419+
blk_mq_map_bio_put(bio);
436420
return ERR_PTR(-ENOMEM);
437421
}
438422

@@ -679,18 +663,16 @@ int blk_rq_map_kern(struct request *rq, void *kbuf, unsigned int len,
679663
return -EINVAL;
680664

681665
if (!blk_rq_aligned(rq->q, addr, len) || object_is_on_stack(kbuf))
682-
bio = bio_copy_kern(kbuf, len, req_op(rq), gfp_mask);
666+
bio = bio_copy_kern(rq, kbuf, len, gfp_mask);
683667
else
684-
bio = bio_map_kern(kbuf, len, req_op(rq), gfp_mask);
668+
bio = bio_map_kern(rq, kbuf, len, gfp_mask);
685669

686670
if (IS_ERR(bio))
687671
return PTR_ERR(bio);
688672

689673
ret = blk_rq_append_bio(rq, bio);
690-
if (unlikely(ret)) {
691-
bio_uninit(bio);
692-
kfree(bio);
693-
}
674+
if (unlikely(ret))
675+
blk_mq_map_bio_put(bio);
694676
return ret;
695677
}
696678
EXPORT_SYMBOL(blk_rq_map_kern);

drivers/nvme/host/ioctl.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -447,7 +447,7 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
447447
struct iov_iter iter;
448448
struct iov_iter *map_iter = NULL;
449449
struct request *req;
450-
blk_opf_t rq_flags = REQ_ALLOC_CACHE;
450+
blk_opf_t rq_flags = 0;
451451
blk_mq_req_flags_t blk_flags = 0;
452452
int ret;
453453

0 commit comments

Comments
 (0)