Skip to content

Commit 2394395

Browse files
Christoph Hellwigaxboe
authored andcommitted
blk-mq: don't run the hw_queue from blk_mq_request_bypass_insert
blk_mq_request_bypass_insert takes a bool parameter to control how to run the queue at the end of the function. Move the blk_mq_run_hw_queue call to the callers that want it instead. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Damien Le Moal <dlemoal@kernel.org> Link: https://lore.kernel.org/r/20230413064057.707578-16-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent f0dbe6e commit 2394395

3 files changed

Lines changed: 15 additions & 16 deletions

File tree

block/blk-flush.c

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -389,6 +389,7 @@ void blk_insert_flush(struct request *rq)
389389
unsigned long fflags = q->queue_flags; /* may change, cache */
390390
unsigned int policy = blk_flush_policy(fflags, rq);
391391
struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);
392+
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
392393

393394
/*
394395
* @policy now records what operations need to be done. Adjust
@@ -425,7 +426,8 @@ void blk_insert_flush(struct request *rq)
425426
*/
426427
if ((policy & REQ_FSEQ_DATA) &&
427428
!(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
428-
blk_mq_request_bypass_insert(rq, false, true);
429+
blk_mq_request_bypass_insert(rq, false);
430+
blk_mq_run_hw_queue(hctx, false);
429431
return;
430432
}
431433

block/blk-mq.c

Lines changed: 11 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1442,7 +1442,7 @@ static void blk_mq_requeue_work(struct work_struct *work)
14421442
if (rq->rq_flags & RQF_DONTPREP) {
14431443
rq->rq_flags &= ~RQF_SOFTBARRIER;
14441444
list_del_init(&rq->queuelist);
1445-
blk_mq_request_bypass_insert(rq, false, false);
1445+
blk_mq_request_bypass_insert(rq, false);
14461446
} else if (rq->rq_flags & RQF_SOFTBARRIER) {
14471447
rq->rq_flags &= ~RQF_SOFTBARRIER;
14481448
list_del_init(&rq->queuelist);
@@ -2457,13 +2457,11 @@ static void blk_mq_run_work_fn(struct work_struct *work)
24572457
* blk_mq_request_bypass_insert - Insert a request at dispatch list.
24582458
* @rq: Pointer to request to be inserted.
24592459
* @at_head: true if the request should be inserted at the head of the list.
2460-
* @run_queue: If we should run the hardware queue after inserting the request.
24612460
*
24622461
* Should only be used carefully, when the caller knows we want to
24632462
* bypass a potential IO scheduler on the target device.
24642463
*/
2465-
void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
2466-
bool run_queue)
2464+
void blk_mq_request_bypass_insert(struct request *rq, bool at_head)
24672465
{
24682466
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
24692467

@@ -2473,9 +2471,6 @@ void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
24732471
else
24742472
list_add_tail(&rq->queuelist, &hctx->dispatch);
24752473
spin_unlock(&hctx->lock);
2476-
2477-
if (run_queue)
2478-
blk_mq_run_hw_queue(hctx, false);
24792474
}
24802475

24812476
static void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx,
@@ -2530,7 +2525,7 @@ static void blk_mq_insert_request(struct request *rq, bool at_head)
25302525
* and it is added to the scheduler queue, there is no chance to
25312526
* dispatch it given we prioritize requests in hctx->dispatch.
25322527
*/
2533-
blk_mq_request_bypass_insert(rq, at_head, false);
2528+
blk_mq_request_bypass_insert(rq, at_head);
25342529
} else if (rq->rq_flags & RQF_FLUSH_SEQ) {
25352530
/*
25362531
* Firstly normal IO request is inserted to scheduler queue or
@@ -2553,7 +2548,7 @@ static void blk_mq_insert_request(struct request *rq, bool at_head)
25532548
* Simply queue flush rq to the front of hctx->dispatch so that
25542549
* intensive flush workloads can benefit in case of NCQ HW.
25552550
*/
2556-
blk_mq_request_bypass_insert(rq, true, false);
2551+
blk_mq_request_bypass_insert(rq, true);
25572552
} else if (q->elevator) {
25582553
LIST_HEAD(list);
25592554

@@ -2673,7 +2668,8 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
26732668
break;
26742669
case BLK_STS_RESOURCE:
26752670
case BLK_STS_DEV_RESOURCE:
2676-
blk_mq_request_bypass_insert(rq, false, true);
2671+
blk_mq_request_bypass_insert(rq, false);
2672+
blk_mq_run_hw_queue(hctx, false);
26772673
break;
26782674
default:
26792675
blk_mq_end_request(rq, ret);
@@ -2720,7 +2716,8 @@ static void blk_mq_plug_issue_direct(struct blk_plug *plug)
27202716
break;
27212717
case BLK_STS_RESOURCE:
27222718
case BLK_STS_DEV_RESOURCE:
2723-
blk_mq_request_bypass_insert(rq, false, true);
2719+
blk_mq_request_bypass_insert(rq, false);
2720+
blk_mq_run_hw_queue(hctx, false);
27242721
goto out;
27252722
default:
27262723
blk_mq_end_request(rq, ret);
@@ -2838,8 +2835,9 @@ static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
28382835
break;
28392836
case BLK_STS_RESOURCE:
28402837
case BLK_STS_DEV_RESOURCE:
2841-
blk_mq_request_bypass_insert(rq, false,
2842-
list_empty(list));
2838+
blk_mq_request_bypass_insert(rq, false);
2839+
if (list_empty(list))
2840+
blk_mq_run_hw_queue(hctx, false);
28432841
goto out;
28442842
default:
28452843
blk_mq_end_request(rq, ret);

block/blk-mq.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -65,8 +65,7 @@ void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
6565
/*
6666
* Internal helpers for request insertion into sw queues
6767
*/
68-
void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
69-
bool run_queue);
68+
void blk_mq_request_bypass_insert(struct request *rq, bool at_head);
7069

7170
/*
7271
* CPU -> queue mappings

0 commit comments

Comments
 (0)