@@ -1447,7 +1447,7 @@ static void blk_mq_requeue_work(struct work_struct *work)
14471447 if (rq -> rq_flags & RQF_DONTPREP ) {
14481448 rq -> rq_flags &= ~RQF_SOFTBARRIER ;
14491449 list_del_init (& rq -> queuelist );
1450- blk_mq_request_bypass_insert (rq , false );
1450+ blk_mq_request_bypass_insert (rq , 0 );
14511451 } else if (rq -> rq_flags & RQF_SOFTBARRIER ) {
14521452 rq -> rq_flags &= ~RQF_SOFTBARRIER ;
14531453 list_del_init (& rq -> queuelist );
@@ -2457,17 +2457,17 @@ static void blk_mq_run_work_fn(struct work_struct *work)
24572457/**
24582458 * blk_mq_request_bypass_insert - Insert a request at dispatch list.
24592459 * @rq: Pointer to request to be inserted.
2460- * @at_head: true if the request should be inserted at the head of the list.
2460+ * @flags: BLK_MQ_INSERT_*
24612461 *
24622462 * Should only be used carefully, when the caller knows we want to
24632463 * bypass a potential IO scheduler on the target device.
24642464 */
2465- void blk_mq_request_bypass_insert (struct request * rq , bool at_head )
2465+ void blk_mq_request_bypass_insert (struct request * rq , blk_insert_t flags )
24662466{
24672467 struct blk_mq_hw_ctx * hctx = rq -> mq_hctx ;
24682468
24692469 spin_lock (& hctx -> lock );
2470- if (at_head )
2470+ if (flags & BLK_MQ_INSERT_AT_HEAD )
24712471 list_add (& rq -> queuelist , & hctx -> dispatch );
24722472 else
24732473 list_add_tail (& rq -> queuelist , & hctx -> dispatch );
@@ -2526,7 +2526,7 @@ static void blk_mq_insert_request(struct request *rq, blk_insert_t flags)
25262526 * and it is added to the scheduler queue, there is no chance to
25272527 * dispatch it given we prioritize requests in hctx->dispatch.
25282528 */
2529- blk_mq_request_bypass_insert (rq , flags & BLK_MQ_INSERT_AT_HEAD );
2529+ blk_mq_request_bypass_insert (rq , flags );
25302530 } else if (rq -> rq_flags & RQF_FLUSH_SEQ ) {
25312531 /*
25322532 * Firstly normal IO request is inserted to scheduler queue or
@@ -2549,7 +2549,7 @@ static void blk_mq_insert_request(struct request *rq, blk_insert_t flags)
25492549 * Simply queue flush rq to the front of hctx->dispatch so that
25502550 * intensive flush workloads can benefit in case of NCQ HW.
25512551 */
2552- blk_mq_request_bypass_insert (rq , true );
2552+ blk_mq_request_bypass_insert (rq , BLK_MQ_INSERT_AT_HEAD );
25532553 } else if (q -> elevator ) {
25542554 LIST_HEAD (list );
25552555
@@ -2670,7 +2670,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
26702670 break ;
26712671 case BLK_STS_RESOURCE :
26722672 case BLK_STS_DEV_RESOURCE :
2673- blk_mq_request_bypass_insert (rq , false );
2673+ blk_mq_request_bypass_insert (rq , 0 );
26742674 blk_mq_run_hw_queue (hctx , false);
26752675 break ;
26762676 default :
@@ -2718,7 +2718,7 @@ static void blk_mq_plug_issue_direct(struct blk_plug *plug)
27182718 break ;
27192719 case BLK_STS_RESOURCE :
27202720 case BLK_STS_DEV_RESOURCE :
2721- blk_mq_request_bypass_insert (rq , false );
2721+ blk_mq_request_bypass_insert (rq , 0 );
27222722 blk_mq_run_hw_queue (hctx , false);
27232723 goto out ;
27242724 default :
@@ -2837,7 +2837,7 @@ static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
28372837 break ;
28382838 case BLK_STS_RESOURCE :
28392839 case BLK_STS_DEV_RESOURCE :
2840- blk_mq_request_bypass_insert (rq , false );
2840+ blk_mq_request_bypass_insert (rq , 0 );
28412841 if (list_empty (list ))
28422842 blk_mq_run_hw_queue (hctx , false);
28432843 goto out ;
0 commit comments