@@ -1084,13 +1084,10 @@ static void __ublk_fail_req(struct ublk_queue *ubq, struct ublk_io *io,
10841084{
10851085 WARN_ON_ONCE (io -> flags & UBLK_IO_FLAG_ACTIVE );
10861086
1087- if (!(io -> flags & UBLK_IO_FLAG_ABORTED )) {
1088- io -> flags |= UBLK_IO_FLAG_ABORTED ;
1089- if (ublk_queue_can_use_recovery_reissue (ubq ))
1090- blk_mq_requeue_request (req , false);
1091- else
1092- ublk_put_req_ref (ubq , req );
1093- }
1087+ if (ublk_queue_can_use_recovery_reissue (ubq ))
1088+ blk_mq_requeue_request (req , false);
1089+ else
1090+ ublk_put_req_ref (ubq , req );
10941091}
10951092
10961093static void ubq_complete_io_cmd (struct ublk_io * io , int res ,
@@ -1231,27 +1228,10 @@ static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd, unsigned issue_flags)
12311228static void ublk_queue_cmd (struct ublk_queue * ubq , struct request * rq )
12321229{
12331230 struct ublk_rq_data * data = blk_mq_rq_to_pdu (rq );
1234- struct ublk_io * io ;
12351231
1236- if (! llist_add (& data -> node , & ubq -> io_cmds ))
1237- return ;
1232+ if (llist_add (& data -> node , & ubq -> io_cmds )) {
1233+ struct ublk_io * io = & ubq -> ios [ rq -> tag ] ;
12381234
1239- io = & ubq -> ios [rq -> tag ];
1240- /*
1241- * If the check pass, we know that this is a re-issued request aborted
1242- * previously in cancel fn because the ubq_daemon(cmd's task) is
1243- * PF_EXITING. We cannot call io_uring_cmd_complete_in_task() anymore
1244- * because this ioucmd's io_uring context may be freed now if no inflight
1245- * ioucmd exists. Otherwise we may cause null-deref in ctx->fallback_work.
1246- *
1247- * Note: cancel fn sets UBLK_IO_FLAG_ABORTED and ends this request(releasing
1248- * the tag). Then the request is re-started(allocating the tag) and we are here.
1249- * Since releasing/allocating a tag implies smp_mb(), finding UBLK_IO_FLAG_ABORTED
1250- * guarantees that here is a re-issued request aborted previously.
1251- */
1252- if (unlikely (io -> flags & UBLK_IO_FLAG_ABORTED )) {
1253- ublk_abort_io_cmds (ubq );
1254- } else {
12551235 io_uring_cmd_complete_in_task (io -> cmd , ublk_rq_task_work_cb );
12561236 }
12571237}
@@ -1321,13 +1301,12 @@ static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
13211301 if (ublk_queue_can_use_recovery (ubq ) && unlikely (ubq -> force_abort ))
13221302 return BLK_STS_IOERR ;
13231303
1324- blk_mq_start_request (bd -> rq );
1325-
13261304 if (unlikely (ubq -> canceling )) {
13271305 __ublk_abort_rq (ubq , rq );
13281306 return BLK_STS_OK ;
13291307 }
13301308
1309+ blk_mq_start_request (bd -> rq );
13311310 ublk_queue_cmd (ubq , rq );
13321311
13331312 return BLK_STS_OK ;
@@ -1450,8 +1429,10 @@ static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq)
14501429 * will do it
14511430 */
14521431 rq = blk_mq_tag_to_rq (ub -> tag_set .tags [ubq -> q_id ], i );
1453- if (rq )
1432+ if (rq && blk_mq_request_started (rq )) {
1433+ io -> flags |= UBLK_IO_FLAG_ABORTED ;
14541434 __ublk_fail_req (ubq , io , rq );
1435+ }
14551436 }
14561437 }
14571438}
@@ -1535,7 +1516,7 @@ static void ublk_uring_cmd_cancel_fn(struct io_uring_cmd *cmd,
15351516
15361517 io = & ubq -> ios [pdu -> tag ];
15371518 WARN_ON_ONCE (io -> cmd != cmd );
1538- ublk_cancel_cmd (ubq , & ubq -> ios [ pdu -> tag ] , issue_flags );
1519+ ublk_cancel_cmd (ubq , io , issue_flags );
15391520
15401521 if (need_schedule ) {
15411522 if (ublk_can_use_recovery (ub ))
0 commit comments