Skip to content

Commit f0dbe6e

Browse files
Christoph Hellwigaxboe
authored andcommitted
blk-mq: don't run the hw_queue from blk_mq_insert_request
blk_mq_insert_request takes two bool parameters to control how to run the queue at the end of the function. Move the blk_mq_run_hw_queue call to the callers that want it instead. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Bart Van Assche <bvanassche@acm.org> Reviewed-by: Damien Le Moal <dlemoal@kernel.org> Link: https://lore.kernel.org/r/20230413064057.707578-15-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent e1f44ac commit f0dbe6e

1 file changed

Lines changed: 32 additions & 24 deletions

File tree

block/blk-mq.c

Lines changed: 32 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -44,8 +44,7 @@
4444

4545
static DEFINE_PER_CPU(struct llist_head, blk_cpu_done);
4646

47-
static void blk_mq_insert_request(struct request *rq, bool at_head,
48-
bool run_queue, bool async);
47+
static void blk_mq_insert_request(struct request *rq, bool at_head);
4948
static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
5049
struct list_head *list);
5150

@@ -1292,6 +1291,8 @@ static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
12921291
*/
12931292
void blk_execute_rq_nowait(struct request *rq, bool at_head)
12941293
{
1294+
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
1295+
12951296
WARN_ON(irqs_disabled());
12961297
WARN_ON(!blk_rq_is_passthrough(rq));
12971298

@@ -1302,10 +1303,13 @@ void blk_execute_rq_nowait(struct request *rq, bool at_head)
13021303
* device, directly accessing the plug instead of using blk_mq_plug()
13031304
* should not have any consequences.
13041305
*/
1305-
if (current->plug && !at_head)
1306+
if (current->plug && !at_head) {
13061307
blk_add_rq_to_plug(current->plug, rq);
1307-
else
1308-
blk_mq_insert_request(rq, at_head, true, false);
1308+
return;
1309+
}
1310+
1311+
blk_mq_insert_request(rq, at_head);
1312+
blk_mq_run_hw_queue(hctx, false);
13091313
}
13101314
EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
13111315

@@ -1355,6 +1359,7 @@ static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
13551359
*/
13561360
blk_status_t blk_execute_rq(struct request *rq, bool at_head)
13571361
{
1362+
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
13581363
struct blk_rq_wait wait = {
13591364
.done = COMPLETION_INITIALIZER_ONSTACK(wait.done),
13601365
};
@@ -1366,7 +1371,8 @@ blk_status_t blk_execute_rq(struct request *rq, bool at_head)
13661371
rq->end_io = blk_end_sync_rq;
13671372

13681373
blk_account_io_start(rq);
1369-
blk_mq_insert_request(rq, at_head, true, false);
1374+
blk_mq_insert_request(rq, at_head);
1375+
blk_mq_run_hw_queue(hctx, false);
13701376

13711377
if (blk_rq_is_poll(rq)) {
13721378
blk_rq_poll_completion(rq, &wait.done);
@@ -1440,14 +1446,14 @@ static void blk_mq_requeue_work(struct work_struct *work)
14401446
} else if (rq->rq_flags & RQF_SOFTBARRIER) {
14411447
rq->rq_flags &= ~RQF_SOFTBARRIER;
14421448
list_del_init(&rq->queuelist);
1443-
blk_mq_insert_request(rq, true, false, false);
1449+
blk_mq_insert_request(rq, true);
14441450
}
14451451
}
14461452

14471453
while (!list_empty(&rq_list)) {
14481454
rq = list_entry(rq_list.next, struct request, queuelist);
14491455
list_del_init(&rq->queuelist);
1450-
blk_mq_insert_request(rq, false, false, false);
1456+
blk_mq_insert_request(rq, false);
14511457
}
14521458

14531459
blk_mq_run_hw_queues(q, false);
@@ -2507,8 +2513,7 @@ static void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx,
25072513
blk_mq_run_hw_queue(hctx, run_queue_async);
25082514
}
25092515

2510-
static void blk_mq_insert_request(struct request *rq, bool at_head,
2511-
bool run_queue, bool async)
2516+
static void blk_mq_insert_request(struct request *rq, bool at_head)
25122517
{
25132518
struct request_queue *q = rq->q;
25142519
struct blk_mq_ctx *ctx = rq->mq_ctx;
@@ -2568,9 +2573,6 @@ static void blk_mq_insert_request(struct request *rq, bool at_head,
25682573
blk_mq_hctx_mark_pending(hctx, ctx);
25692574
spin_unlock(&ctx->lock);
25702575
}
2571-
2572-
if (run_queue)
2573-
blk_mq_run_hw_queue(hctx, async);
25742576
}
25752577

25762578
static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
@@ -2655,12 +2657,13 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
26552657
blk_status_t ret;
26562658

26572659
if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(rq->q)) {
2658-
blk_mq_insert_request(rq, false, false, false);
2660+
blk_mq_insert_request(rq, false);
26592661
return;
26602662
}
26612663

26622664
if ((rq->rq_flags & RQF_ELV) || !blk_mq_get_budget_and_tag(rq)) {
2663-
blk_mq_insert_request(rq, false, true, false);
2665+
blk_mq_insert_request(rq, false);
2666+
blk_mq_run_hw_queue(hctx, false);
26642667
return;
26652668
}
26662669

@@ -2683,7 +2686,7 @@ static blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
26832686
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
26842687

26852688
if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(rq->q)) {
2686-
blk_mq_insert_request(rq, false, false, false);
2689+
blk_mq_insert_request(rq, false);
26872690
return BLK_STS_OK;
26882691
}
26892692

@@ -2963,6 +2966,7 @@ void blk_mq_submit_bio(struct bio *bio)
29632966
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
29642967
struct blk_plug *plug = blk_mq_plug(bio);
29652968
const int is_sync = op_is_sync(bio->bi_opf);
2969+
struct blk_mq_hw_ctx *hctx;
29662970
struct request *rq;
29672971
unsigned int nr_segs = 1;
29682972
blk_status_t ret;
@@ -3007,15 +3011,19 @@ void blk_mq_submit_bio(struct bio *bio)
30073011
return;
30083012
}
30093013

3010-
if (plug)
3014+
if (plug) {
30113015
blk_add_rq_to_plug(plug, rq);
3012-
else if ((rq->rq_flags & RQF_ELV) ||
3013-
(rq->mq_hctx->dispatch_busy &&
3014-
(q->nr_hw_queues == 1 || !is_sync)))
3015-
blk_mq_insert_request(rq, false, true, true);
3016-
else
3017-
blk_mq_run_dispatch_ops(rq->q,
3018-
blk_mq_try_issue_directly(rq->mq_hctx, rq));
3016+
return;
3017+
}
3018+
3019+
hctx = rq->mq_hctx;
3020+
if ((rq->rq_flags & RQF_ELV) ||
3021+
(hctx->dispatch_busy && (q->nr_hw_queues == 1 || !is_sync))) {
3022+
blk_mq_insert_request(rq, false);
3023+
blk_mq_run_hw_queue(hctx, true);
3024+
} else {
3025+
blk_mq_run_dispatch_ops(q, blk_mq_try_issue_directly(hctx, rq));
3026+
}
30193027
}
30203028

30213029
#ifdef CONFIG_BLK_MQ_STACKING

0 commit comments

Comments
 (0)