4444
4545static DEFINE_PER_CPU (struct llist_head , blk_cpu_done ) ;
4646
47- static void blk_mq_insert_request (struct request * rq , bool at_head );
47+ static void blk_mq_insert_request (struct request * rq , blk_insert_t flags );
4848static void blk_mq_try_issue_list_directly (struct blk_mq_hw_ctx * hctx ,
4949 struct list_head * list );
5050
@@ -1308,7 +1308,7 @@ void blk_execute_rq_nowait(struct request *rq, bool at_head)
13081308 return ;
13091309 }
13101310
1311- blk_mq_insert_request (rq , at_head );
1311+ blk_mq_insert_request (rq , at_head ? BLK_MQ_INSERT_AT_HEAD : 0 );
13121312 blk_mq_run_hw_queue (hctx , false);
13131313}
13141314EXPORT_SYMBOL_GPL (blk_execute_rq_nowait );
@@ -1371,7 +1371,7 @@ blk_status_t blk_execute_rq(struct request *rq, bool at_head)
13711371 rq -> end_io = blk_end_sync_rq ;
13721372
13731373 blk_account_io_start (rq );
1374- blk_mq_insert_request (rq , at_head );
1374+ blk_mq_insert_request (rq , at_head ? BLK_MQ_INSERT_AT_HEAD : 0 );
13751375 blk_mq_run_hw_queue (hctx , false);
13761376
13771377 if (blk_rq_is_poll (rq )) {
@@ -1451,14 +1451,14 @@ static void blk_mq_requeue_work(struct work_struct *work)
14511451 } else if (rq -> rq_flags & RQF_SOFTBARRIER ) {
14521452 rq -> rq_flags &= ~RQF_SOFTBARRIER ;
14531453 list_del_init (& rq -> queuelist );
1454- blk_mq_insert_request (rq , true );
1454+ blk_mq_insert_request (rq , BLK_MQ_INSERT_AT_HEAD );
14551455 }
14561456 }
14571457
14581458 while (!list_empty (& rq_list )) {
14591459 rq = list_entry (rq_list .next , struct request , queuelist );
14601460 list_del_init (& rq -> queuelist );
1461- blk_mq_insert_request (rq , false );
1461+ blk_mq_insert_request (rq , 0 );
14621462 }
14631463
14641464 blk_mq_run_hw_queues (q , false);
@@ -2509,7 +2509,7 @@ static void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx,
25092509 blk_mq_run_hw_queue (hctx , run_queue_async );
25102510}
25112511
2512- static void blk_mq_insert_request (struct request * rq , bool at_head )
2512+ static void blk_mq_insert_request (struct request * rq , blk_insert_t flags )
25132513{
25142514 struct request_queue * q = rq -> q ;
25152515 struct blk_mq_ctx * ctx = rq -> mq_ctx ;
@@ -2526,7 +2526,7 @@ static void blk_mq_insert_request(struct request *rq, bool at_head)
25262526 * and it is added to the scheduler queue, there is no chance to
25272527 * dispatch it given we prioritize requests in hctx->dispatch.
25282528 */
2529- blk_mq_request_bypass_insert (rq , at_head );
2529+ blk_mq_request_bypass_insert (rq , flags & BLK_MQ_INSERT_AT_HEAD );
25302530 } else if (rq -> rq_flags & RQF_FLUSH_SEQ ) {
25312531 /*
25322532 * Firstly normal IO request is inserted to scheduler queue or
@@ -2556,12 +2556,13 @@ static void blk_mq_insert_request(struct request *rq, bool at_head)
25562556 WARN_ON_ONCE (rq -> tag != BLK_MQ_NO_TAG );
25572557
25582558 list_add (& rq -> queuelist , & list );
2559- q -> elevator -> type -> ops .insert_requests (hctx , & list , at_head );
2559+ q -> elevator -> type -> ops .insert_requests (hctx , & list ,
2560+ flags & BLK_MQ_INSERT_AT_HEAD );
25602561 } else {
25612562 trace_block_rq_insert (rq );
25622563
25632564 spin_lock (& ctx -> lock );
2564- if (at_head )
2565+ if (flags & BLK_MQ_INSERT_AT_HEAD )
25652566 list_add (& rq -> queuelist , & ctx -> rq_lists [hctx -> type ]);
25662567 else
25672568 list_add_tail (& rq -> queuelist ,
@@ -2653,12 +2654,12 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
26532654 blk_status_t ret ;
26542655
26552656 if (blk_mq_hctx_stopped (hctx ) || blk_queue_quiesced (rq -> q )) {
2656- blk_mq_insert_request (rq , false );
2657+ blk_mq_insert_request (rq , 0 );
26572658 return ;
26582659 }
26592660
26602661 if ((rq -> rq_flags & RQF_ELV ) || !blk_mq_get_budget_and_tag (rq )) {
2661- blk_mq_insert_request (rq , false );
2662+ blk_mq_insert_request (rq , 0 );
26622663 blk_mq_run_hw_queue (hctx , false);
26632664 return ;
26642665 }
@@ -2683,7 +2684,7 @@ static blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
26832684 struct blk_mq_hw_ctx * hctx = rq -> mq_hctx ;
26842685
26852686 if (blk_mq_hctx_stopped (hctx ) || blk_queue_quiesced (rq -> q )) {
2686- blk_mq_insert_request (rq , false );
2687+ blk_mq_insert_request (rq , 0 );
26872688 return BLK_STS_OK ;
26882689 }
26892690
@@ -3018,7 +3019,7 @@ void blk_mq_submit_bio(struct bio *bio)
30183019 hctx = rq -> mq_hctx ;
30193020 if ((rq -> rq_flags & RQF_ELV ) ||
30203021 (hctx -> dispatch_busy && (q -> nr_hw_queues == 1 || !is_sync ))) {
3021- blk_mq_insert_request (rq , false );
3022+ blk_mq_insert_request (rq , 0 );
30223023 blk_mq_run_hw_queue (hctx , true);
30233024 } else {
30243025 blk_mq_run_dispatch_ops (q , blk_mq_try_issue_directly (hctx , rq ));
0 commit comments