@@ -639,13 +639,8 @@ static inline void nvme_clear_nvme_request(struct request *req)
639639 req -> rq_flags |= RQF_DONTPREP ;
640640}
641641
642- static inline unsigned int nvme_req_op (struct nvme_command * cmd )
643- {
644- return nvme_is_write (cmd ) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN ;
645- }
646-
647- static inline void nvme_init_request (struct request * req ,
648- struct nvme_command * cmd )
642+ /* initialize a passthrough request */
643+ void nvme_init_request (struct request * req , struct nvme_command * cmd )
649644{
650645 if (req -> q -> queuedata )
651646 req -> timeout = NVME_IO_TIMEOUT ;
@@ -661,30 +656,7 @@ static inline void nvme_init_request(struct request *req,
661656 nvme_clear_nvme_request (req );
662657 memcpy (nvme_req (req )-> cmd , cmd , sizeof (* cmd ));
663658}
664-
665- struct request * nvme_alloc_request (struct request_queue * q ,
666- struct nvme_command * cmd , blk_mq_req_flags_t flags )
667- {
668- struct request * req ;
669-
670- req = blk_mq_alloc_request (q , nvme_req_op (cmd ), flags );
671- if (!IS_ERR (req ))
672- nvme_init_request (req , cmd );
673- return req ;
674- }
675- EXPORT_SYMBOL_GPL (nvme_alloc_request );
676-
677- static struct request * nvme_alloc_request_qid (struct request_queue * q ,
678- struct nvme_command * cmd , blk_mq_req_flags_t flags , int qid )
679- {
680- struct request * req ;
681-
682- req = blk_mq_alloc_request_hctx (q , nvme_req_op (cmd ), flags ,
683- qid ? qid - 1 : 0 );
684- if (!IS_ERR (req ))
685- nvme_init_request (req , cmd );
686- return req ;
687- }
659+ EXPORT_SYMBOL_GPL (nvme_init_request );
688660
689661/*
690662 * For something we're not in a state to send to the device the default action
@@ -1110,11 +1082,14 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
11101082 int ret ;
11111083
11121084 if (qid == NVME_QID_ANY )
1113- req = nvme_alloc_request (q , cmd , flags );
1085+ req = blk_mq_alloc_request (q , nvme_req_op ( cmd ) , flags );
11141086 else
1115- req = nvme_alloc_request_qid (q , cmd , flags , qid );
1087+ req = blk_mq_alloc_request_hctx (q , nvme_req_op (cmd ), flags ,
1088+ qid ? qid - 1 : 0 );
1089+
11161090 if (IS_ERR (req ))
11171091 return PTR_ERR (req );
1092+ nvme_init_request (req , cmd );
11181093
11191094 if (timeout )
11201095 req -> timeout = timeout ;
@@ -1304,14 +1279,15 @@ static void nvme_keep_alive_work(struct work_struct *work)
13041279 return ;
13051280 }
13061281
1307- rq = nvme_alloc_request (ctrl -> admin_q , & ctrl -> ka_cmd ,
1308- BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT );
1282+ rq = blk_mq_alloc_request (ctrl -> admin_q , nvme_req_op ( & ctrl -> ka_cmd ) ,
1283+ BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT );
13091284 if (IS_ERR (rq )) {
13101285 /* allocation failure, reset the controller */
13111286 dev_err (ctrl -> device , "keep-alive failed: %ld\n" , PTR_ERR (rq ));
13121287 nvme_reset_ctrl (ctrl );
13131288 return ;
13141289 }
1290+ nvme_init_request (rq , & ctrl -> ka_cmd );
13151291
13161292 rq -> timeout = ctrl -> kato * HZ ;
13171293 rq -> end_io_data = ctrl ;
0 commit comments