Skip to content

Commit f6c80cf

Browse files
keithbuschaxboe
authored andcommitted
block: add request polling helper
Provide a direct request polling will for drivers. The interface does not require a bio, and can skip the overhead associated with polling those. The biggest gain from skipping the relatively expensive xarray lookup unnecessary when you already have the request. With this, the simple rq/qc conversion functions have only one caller each, so open code this and remove the helpers. Signed-off-by: Keith Busch <kbusch@kernel.org> Reviewed-by: Kanchan Joshi <joshi.k@samsung.com> Reviewed-by: Sagi Grimberg <sagi@grimberg.me> Reviewed-by: Christoph Hellwig <hch@lst.de> Link: https://lore.kernel.org/r/20230612190343.2087040-2-kbusch@meta.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent 3a08284 commit f6c80cf

2 files changed

Lines changed: 34 additions & 16 deletions

File tree

block/blk-mq.c

Lines changed: 32 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -49,17 +49,8 @@ static void blk_mq_request_bypass_insert(struct request *rq,
4949
blk_insert_t flags);
5050
static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
5151
struct list_head *list);
52-
53-
static inline struct blk_mq_hw_ctx *blk_qc_to_hctx(struct request_queue *q,
54-
blk_qc_t qc)
55-
{
56-
return xa_load(&q->hctx_table, qc);
57-
}
58-
59-
static inline blk_qc_t blk_rq_to_qc(struct request *rq)
60-
{
61-
return rq->mq_hctx->queue_num;
62-
}
52+
static int blk_hctx_poll(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
53+
struct io_comp_batch *iob, unsigned int flags);
6354

6455
/*
6556
* Check if any of the ctx, dispatch list or elevator
@@ -1248,7 +1239,7 @@ void blk_mq_start_request(struct request *rq)
12481239
q->integrity.profile->prepare_fn(rq);
12491240
#endif
12501241
if (rq->bio && rq->bio->bi_opf & REQ_POLLED)
1251-
WRITE_ONCE(rq->bio->bi_cookie, blk_rq_to_qc(rq));
1242+
WRITE_ONCE(rq->bio->bi_cookie, rq->mq_hctx->queue_num);
12521243
}
12531244
EXPORT_SYMBOL(blk_mq_start_request);
12541245

@@ -1354,7 +1345,7 @@ EXPORT_SYMBOL_GPL(blk_rq_is_poll);
13541345
static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
13551346
{
13561347
do {
1357-
blk_mq_poll(rq->q, blk_rq_to_qc(rq), NULL, 0);
1348+
blk_hctx_poll(rq->q, rq->mq_hctx, NULL, 0);
13581349
cond_resched();
13591350
} while (!completion_done(wait));
13601351
}
@@ -4749,10 +4740,9 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
47494740
}
47504741
EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
47514742

4752-
int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob,
4753-
unsigned int flags)
4743+
static int blk_hctx_poll(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
4744+
struct io_comp_batch *iob, unsigned int flags)
47544745
{
4755-
struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, cookie);
47564746
long state = get_current_state();
47574747
int ret;
47584748

@@ -4777,6 +4767,32 @@ int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *
47774767
return 0;
47784768
}
47794769

4770+
int blk_mq_poll(struct request_queue *q, blk_qc_t cookie,
4771+
struct io_comp_batch *iob, unsigned int flags)
4772+
{
4773+
struct blk_mq_hw_ctx *hctx = xa_load(&q->hctx_table, cookie);
4774+
4775+
return blk_hctx_poll(q, hctx, iob, flags);
4776+
}
4777+
4778+
int blk_rq_poll(struct request *rq, struct io_comp_batch *iob,
4779+
unsigned int poll_flags)
4780+
{
4781+
struct request_queue *q = rq->q;
4782+
int ret;
4783+
4784+
if (!blk_rq_is_poll(rq))
4785+
return 0;
4786+
if (!percpu_ref_tryget(&q->q_usage_counter))
4787+
return 0;
4788+
4789+
ret = blk_hctx_poll(q, rq->mq_hctx, iob, poll_flags);
4790+
blk_queue_exit(q);
4791+
4792+
return ret;
4793+
}
4794+
EXPORT_SYMBOL_GPL(blk_rq_poll);
4795+
47804796
unsigned int blk_mq_rq_cpu(struct request *rq)
47814797
{
47824798
return rq->mq_ctx->cpu;

include/linux/blk-mq.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -715,6 +715,8 @@ int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set,
715715
void blk_mq_free_tag_set(struct blk_mq_tag_set *set);
716716

717717
void blk_mq_free_request(struct request *rq);
718+
int blk_rq_poll(struct request *rq, struct io_comp_batch *iob,
719+
unsigned int poll_flags);
718720

719721
bool blk_mq_queue_inflight(struct request_queue *q);
720722

0 commit comments

Comments
 (0)