@@ -1442,7 +1442,7 @@ static void blk_mq_requeue_work(struct work_struct *work)
14421442 if (rq -> rq_flags & RQF_DONTPREP ) {
14431443 rq -> rq_flags &= ~RQF_SOFTBARRIER ;
14441444 list_del_init (& rq -> queuelist );
1445- blk_mq_request_bypass_insert (rq , false, false );
1445+ blk_mq_request_bypass_insert (rq , false);
14461446 } else if (rq -> rq_flags & RQF_SOFTBARRIER ) {
14471447 rq -> rq_flags &= ~RQF_SOFTBARRIER ;
14481448 list_del_init (& rq -> queuelist );
@@ -2457,13 +2457,11 @@ static void blk_mq_run_work_fn(struct work_struct *work)
24572457 * blk_mq_request_bypass_insert - Insert a request at dispatch list.
24582458 * @rq: Pointer to request to be inserted.
24592459 * @at_head: true if the request should be inserted at the head of the list.
2460- * @run_queue: If we should run the hardware queue after inserting the request.
24612460 *
24622461 * Should only be used carefully, when the caller knows we want to
24632462 * bypass a potential IO scheduler on the target device.
24642463 */
2465- void blk_mq_request_bypass_insert (struct request * rq , bool at_head ,
2466- bool run_queue )
2464+ void blk_mq_request_bypass_insert (struct request * rq , bool at_head )
24672465{
24682466 struct blk_mq_hw_ctx * hctx = rq -> mq_hctx ;
24692467
@@ -2473,9 +2471,6 @@ void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
24732471 else
24742472 list_add_tail (& rq -> queuelist , & hctx -> dispatch );
24752473 spin_unlock (& hctx -> lock );
2476-
2477- if (run_queue )
2478- blk_mq_run_hw_queue (hctx , false);
24792474}
24802475
24812476static void blk_mq_insert_requests (struct blk_mq_hw_ctx * hctx ,
@@ -2530,7 +2525,7 @@ static void blk_mq_insert_request(struct request *rq, bool at_head)
25302525 * and it is added to the scheduler queue, there is no chance to
25312526 * dispatch it given we prioritize requests in hctx->dispatch.
25322527 */
2533- blk_mq_request_bypass_insert (rq , at_head , false );
2528+ blk_mq_request_bypass_insert (rq , at_head );
25342529 } else if (rq -> rq_flags & RQF_FLUSH_SEQ ) {
25352530 /*
25362531 * Firstly normal IO request is inserted to scheduler queue or
@@ -2553,7 +2548,7 @@ static void blk_mq_insert_request(struct request *rq, bool at_head)
25532548 * Simply queue flush rq to the front of hctx->dispatch so that
25542549 * intensive flush workloads can benefit in case of NCQ HW.
25552550 */
2556- blk_mq_request_bypass_insert (rq , true, false );
2551+ blk_mq_request_bypass_insert (rq , true);
25572552 } else if (q -> elevator ) {
25582553 LIST_HEAD (list );
25592554
@@ -2673,7 +2668,8 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
26732668 break ;
26742669 case BLK_STS_RESOURCE :
26752670 case BLK_STS_DEV_RESOURCE :
2676- blk_mq_request_bypass_insert (rq , false, true);
2671+ blk_mq_request_bypass_insert (rq , false);
2672+ blk_mq_run_hw_queue (hctx , false);
26772673 break ;
26782674 default :
26792675 blk_mq_end_request (rq , ret );
@@ -2720,7 +2716,8 @@ static void blk_mq_plug_issue_direct(struct blk_plug *plug)
27202716 break ;
27212717 case BLK_STS_RESOURCE :
27222718 case BLK_STS_DEV_RESOURCE :
2723- blk_mq_request_bypass_insert (rq , false, true);
2719+ blk_mq_request_bypass_insert (rq , false);
2720+ blk_mq_run_hw_queue (hctx , false);
27242721 goto out ;
27252722 default :
27262723 blk_mq_end_request (rq , ret );
@@ -2838,8 +2835,9 @@ static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
28382835 break ;
28392836 case BLK_STS_RESOURCE :
28402837 case BLK_STS_DEV_RESOURCE :
2841- blk_mq_request_bypass_insert (rq , false,
2842- list_empty (list ));
2838+ blk_mq_request_bypass_insert (rq , false);
2839+ if (list_empty (list ))
2840+ blk_mq_run_hw_queue (hctx , false);
28432841 goto out ;
28442842 default :
28452843 blk_mq_end_request (rq , ret );
0 commit comments