Skip to content

Commit 2b59761

Browse files
Christoph Hellwigaxboe
authored andcommitted
blk-mq: pass a flags argument to blk_mq_request_bypass_insert
Replace the boolean at_head argument with the same flags that are already passed to blk_mq_insert_request. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Bart Van Assche <bvanassche@acm.org> Reviewed-by: Damien Le Moal <dlemoal@kernel.org> Link: https://lore.kernel.org/r/20230413064057.707578-19-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent 710fa37 commit 2b59761

3 files changed

Lines changed: 11 additions & 11 deletions

File tree

block/blk-flush.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -428,7 +428,7 @@ void blk_insert_flush(struct request *rq)
428428
*/
429429
if ((policy & REQ_FSEQ_DATA) &&
430430
!(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
431-
blk_mq_request_bypass_insert(rq, false);
431+
blk_mq_request_bypass_insert(rq, 0);
432432
blk_mq_run_hw_queue(hctx, false);
433433
return;
434434
}

block/blk-mq.c

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1447,7 +1447,7 @@ static void blk_mq_requeue_work(struct work_struct *work)
14471447
if (rq->rq_flags & RQF_DONTPREP) {
14481448
rq->rq_flags &= ~RQF_SOFTBARRIER;
14491449
list_del_init(&rq->queuelist);
1450-
blk_mq_request_bypass_insert(rq, false);
1450+
blk_mq_request_bypass_insert(rq, 0);
14511451
} else if (rq->rq_flags & RQF_SOFTBARRIER) {
14521452
rq->rq_flags &= ~RQF_SOFTBARRIER;
14531453
list_del_init(&rq->queuelist);
@@ -2457,17 +2457,17 @@ static void blk_mq_run_work_fn(struct work_struct *work)
24572457
/**
24582458
* blk_mq_request_bypass_insert - Insert a request at dispatch list.
24592459
* @rq: Pointer to request to be inserted.
2460-
* @at_head: true if the request should be inserted at the head of the list.
2460+
* @flags: BLK_MQ_INSERT_*
24612461
*
24622462
* Should only be used carefully, when the caller knows we want to
24632463
* bypass a potential IO scheduler on the target device.
24642464
*/
2465-
void blk_mq_request_bypass_insert(struct request *rq, bool at_head)
2465+
void blk_mq_request_bypass_insert(struct request *rq, blk_insert_t flags)
24662466
{
24672467
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
24682468

24692469
spin_lock(&hctx->lock);
2470-
if (at_head)
2470+
if (flags & BLK_MQ_INSERT_AT_HEAD)
24712471
list_add(&rq->queuelist, &hctx->dispatch);
24722472
else
24732473
list_add_tail(&rq->queuelist, &hctx->dispatch);
@@ -2526,7 +2526,7 @@ static void blk_mq_insert_request(struct request *rq, blk_insert_t flags)
25262526
* and it is added to the scheduler queue, there is no chance to
25272527
* dispatch it given we prioritize requests in hctx->dispatch.
25282528
*/
2529-
blk_mq_request_bypass_insert(rq, flags & BLK_MQ_INSERT_AT_HEAD);
2529+
blk_mq_request_bypass_insert(rq, flags);
25302530
} else if (rq->rq_flags & RQF_FLUSH_SEQ) {
25312531
/*
25322532
* Firstly normal IO request is inserted to scheduler queue or
@@ -2549,7 +2549,7 @@ static void blk_mq_insert_request(struct request *rq, blk_insert_t flags)
25492549
* Simply queue flush rq to the front of hctx->dispatch so that
25502550
* intensive flush workloads can benefit in case of NCQ HW.
25512551
*/
2552-
blk_mq_request_bypass_insert(rq, true);
2552+
blk_mq_request_bypass_insert(rq, BLK_MQ_INSERT_AT_HEAD);
25532553
} else if (q->elevator) {
25542554
LIST_HEAD(list);
25552555

@@ -2670,7 +2670,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
26702670
break;
26712671
case BLK_STS_RESOURCE:
26722672
case BLK_STS_DEV_RESOURCE:
2673-
blk_mq_request_bypass_insert(rq, false);
2673+
blk_mq_request_bypass_insert(rq, 0);
26742674
blk_mq_run_hw_queue(hctx, false);
26752675
break;
26762676
default:
@@ -2718,7 +2718,7 @@ static void blk_mq_plug_issue_direct(struct blk_plug *plug)
27182718
break;
27192719
case BLK_STS_RESOURCE:
27202720
case BLK_STS_DEV_RESOURCE:
2721-
blk_mq_request_bypass_insert(rq, false);
2721+
blk_mq_request_bypass_insert(rq, 0);
27222722
blk_mq_run_hw_queue(hctx, false);
27232723
goto out;
27242724
default:
@@ -2837,7 +2837,7 @@ static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
28372837
break;
28382838
case BLK_STS_RESOURCE:
28392839
case BLK_STS_DEV_RESOURCE:
2840-
blk_mq_request_bypass_insert(rq, false);
2840+
blk_mq_request_bypass_insert(rq, 0);
28412841
if (list_empty(list))
28422842
blk_mq_run_hw_queue(hctx, false);
28432843
goto out;

block/blk-mq.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,7 @@ void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
6767
/*
6868
* Internal helpers for request insertion into sw queues
6969
*/
70-
void blk_mq_request_bypass_insert(struct request *rq, bool at_head);
70+
void blk_mq_request_bypass_insert(struct request *rq, blk_insert_t flags);
7171

7272
/*
7373
* CPU -> queue mappings

0 commit comments

Comments
 (0)