Skip to content

Commit e1f44ac

Browse files
Christoph Hellwigaxboe
authored andcommitted
blk-mq: fold __blk_mq_try_issue_directly into its two callers
Due to the wildly different behavior based on the bypass_insert argument, not a whole lot of code in __blk_mq_try_issue_directly is actually shared between blk_mq_try_issue_directly and blk_mq_request_issue_directly. Remove __blk_mq_try_issue_directly and fold the code into the two callers instead. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Bart Van Assche <bvanassche@acm.org> Reviewed-by: Damien Le Moal <dlemoal@kernel.org> Link: https://lore.kernel.org/r/20230413064057.707578-14-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent 2b71b87 commit e1f44ac

1 file changed

Lines changed: 31 additions & 41 deletions

File tree

block/blk-mq.c

Lines changed: 31 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -2639,42 +2639,6 @@ static bool blk_mq_get_budget_and_tag(struct request *rq)
26392639
return true;
26402640
}
26412641

2642-
static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
2643-
struct request *rq,
2644-
bool bypass_insert, bool last)
2645-
{
2646-
struct request_queue *q = rq->q;
2647-
bool run_queue = true;
2648-
2649-
/*
2650-
* RCU or SRCU read lock is needed before checking quiesced flag.
2651-
*
2652-
* When queue is stopped or quiesced, ignore 'bypass_insert' from
2653-
* blk_mq_request_issue_directly(), and return BLK_STS_OK to caller,
2654-
* and avoid driver to try to dispatch again.
2655-
*/
2656-
if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) {
2657-
run_queue = false;
2658-
bypass_insert = false;
2659-
goto insert;
2660-
}
2661-
2662-
if ((rq->rq_flags & RQF_ELV) && !bypass_insert)
2663-
goto insert;
2664-
2665-
if (!blk_mq_get_budget_and_tag(rq))
2666-
goto insert;
2667-
2668-
return __blk_mq_issue_directly(hctx, rq, last);
2669-
insert:
2670-
if (bypass_insert)
2671-
return BLK_STS_RESOURCE;
2672-
2673-
blk_mq_insert_request(rq, false, run_queue, false);
2674-
2675-
return BLK_STS_OK;
2676-
}
2677-
26782642
/**
26792643
* blk_mq_try_issue_directly - Try to send a request directly to device driver.
26802644
* @hctx: Pointer of the associated hardware queue.
@@ -2688,18 +2652,44 @@ static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
26882652
static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
26892653
struct request *rq)
26902654
{
2691-
blk_status_t ret =
2692-
__blk_mq_try_issue_directly(hctx, rq, false, true);
2655+
blk_status_t ret;
2656+
2657+
if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(rq->q)) {
2658+
blk_mq_insert_request(rq, false, false, false);
2659+
return;
2660+
}
2661+
2662+
if ((rq->rq_flags & RQF_ELV) || !blk_mq_get_budget_and_tag(rq)) {
2663+
blk_mq_insert_request(rq, false, true, false);
2664+
return;
2665+
}
26932666

2694-
if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
2667+
ret = __blk_mq_issue_directly(hctx, rq, true);
2668+
switch (ret) {
2669+
case BLK_STS_OK:
2670+
break;
2671+
case BLK_STS_RESOURCE:
2672+
case BLK_STS_DEV_RESOURCE:
26952673
blk_mq_request_bypass_insert(rq, false, true);
2696-
else if (ret != BLK_STS_OK)
2674+
break;
2675+
default:
26972676
blk_mq_end_request(rq, ret);
2677+
break;
2678+
}
26982679
}
26992680

27002681
static blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
27012682
{
2702-
return __blk_mq_try_issue_directly(rq->mq_hctx, rq, true, last);
2683+
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
2684+
2685+
if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(rq->q)) {
2686+
blk_mq_insert_request(rq, false, false, false);
2687+
return BLK_STS_OK;
2688+
}
2689+
2690+
if (!blk_mq_get_budget_and_tag(rq))
2691+
return BLK_STS_RESOURCE;
2692+
return __blk_mq_issue_directly(hctx, rq, last);
27032693
}
27042694

27052695
static void blk_mq_plug_issue_direct(struct blk_plug *plug)

0 commit comments

Comments
 (0)