Skip to content

Commit 79525b5

Browse files
keithbuschaxboe
authored andcommitted
io_uring: fix nvme's 32b cqes on mixed cq
The nvme uring_cmd only uses 32b CQEs. If the ring uses a mixed CQ, then we need to make sure we flag the completion as a 32b CQE. On the other hand, if nvme uring_cmd was using a dedicated 32b CQE, the posting was missing the extra memcpy because it only applied to bit CQEs on a mixed CQ. Fixes: e26dca6 ("io_uring: add support for IORING_SETUP_CQE_MIXED") Signed-off-by: Keith Busch <kbusch@kernel.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent 7ea2432 commit 79525b5

4 files changed

Lines changed: 25 additions & 10 deletions

File tree

drivers/nvme/host/ioctl.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -410,7 +410,7 @@ static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd,
410410

411411
if (pdu->bio)
412412
blk_rq_unmap_user(pdu->bio);
413-
io_uring_cmd_done(ioucmd, pdu->status, pdu->result, issue_flags);
413+
io_uring_cmd_done32(ioucmd, pdu->status, pdu->result, issue_flags);
414414
}
415415

416416
static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,

include/linux/io_uring/cmd.h

Lines changed: 16 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -56,8 +56,8 @@ int io_uring_cmd_import_fixed_vec(struct io_uring_cmd *ioucmd,
5656
* Note: the caller should never hard code @issue_flags and is only allowed
5757
* to pass the mask provided by the core io_uring code.
5858
*/
59-
void io_uring_cmd_done(struct io_uring_cmd *cmd, s32 ret, u64 res2,
60-
unsigned issue_flags);
59+
void __io_uring_cmd_done(struct io_uring_cmd *cmd, s32 ret, u64 res2,
60+
unsigned issue_flags, bool is_cqe32);
6161

6262
void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
6363
io_uring_cmd_tw_t task_work_cb,
@@ -104,8 +104,8 @@ static inline int io_uring_cmd_import_fixed_vec(struct io_uring_cmd *ioucmd,
104104
{
105105
return -EOPNOTSUPP;
106106
}
107-
static inline void io_uring_cmd_done(struct io_uring_cmd *cmd, s32 ret,
108-
u64 ret2, unsigned issue_flags)
107+
static inline void __io_uring_cmd_done(struct io_uring_cmd *cmd, s32 ret,
108+
u64 ret2, unsigned issue_flags, bool is_cqe32)
109109
{
110110
}
111111
static inline void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
@@ -159,6 +159,18 @@ static inline void *io_uring_cmd_ctx_handle(struct io_uring_cmd *cmd)
159159
return cmd_to_io_kiocb(cmd)->ctx;
160160
}
161161

162+
static inline void io_uring_cmd_done(struct io_uring_cmd *ioucmd, s32 ret,
163+
u64 res2, unsigned issue_flags)
164+
{
165+
return __io_uring_cmd_done(ioucmd, ret, res2, issue_flags, false);
166+
}
167+
168+
static inline void io_uring_cmd_done32(struct io_uring_cmd *ioucmd, s32 ret,
169+
u64 res2, unsigned issue_flags)
170+
{
171+
return __io_uring_cmd_done(ioucmd, ret, res2, issue_flags, true);
172+
}
173+
162174
int io_buffer_register_bvec(struct io_uring_cmd *cmd, struct request *rq,
163175
void (*release)(void *), unsigned int index,
164176
unsigned int issue_flags);

io_uring/io_uring.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -275,7 +275,7 @@ static __always_inline bool io_fill_cqe_req(struct io_ring_ctx *ctx,
275275
return false;
276276

277277
memcpy(cqe, &req->cqe, sizeof(*cqe));
278-
if (is_cqe32) {
278+
if (ctx->flags & IORING_SETUP_CQE32 || is_cqe32) {
279279
memcpy(cqe->big_cqe, &req->big_cqe, sizeof(*cqe));
280280
memset(&req->big_cqe, 0, sizeof(req->big_cqe));
281281
}

io_uring/uring_cmd.c

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -151,8 +151,8 @@ static inline void io_req_set_cqe32_extra(struct io_kiocb *req,
151151
* Called by consumers of io_uring_cmd, if they originally returned
152152
* -EIOCBQUEUED upon receiving the command.
153153
*/
154-
void io_uring_cmd_done(struct io_uring_cmd *ioucmd, s32 ret, u64 res2,
155-
unsigned issue_flags)
154+
void __io_uring_cmd_done(struct io_uring_cmd *ioucmd, s32 ret, u64 res2,
155+
unsigned issue_flags, bool is_cqe32)
156156
{
157157
struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
158158

@@ -165,8 +165,11 @@ void io_uring_cmd_done(struct io_uring_cmd *ioucmd, s32 ret, u64 res2,
165165
req_set_fail(req);
166166

167167
io_req_set_res(req, ret, 0);
168-
if (req->ctx->flags & IORING_SETUP_CQE32)
168+
if (is_cqe32) {
169+
if (req->ctx->flags & IORING_SETUP_CQE_MIXED)
170+
req->cqe.flags |= IORING_CQE_F_32;
169171
io_req_set_cqe32_extra(req, res2, 0);
172+
}
170173
io_req_uring_cleanup(req, issue_flags);
171174
if (req->ctx->flags & IORING_SETUP_IOPOLL) {
172175
/* order with io_iopoll_req_issued() checking ->iopoll_complete */
@@ -180,7 +183,7 @@ void io_uring_cmd_done(struct io_uring_cmd *ioucmd, s32 ret, u64 res2,
180183
io_req_task_work_add(req);
181184
}
182185
}
183-
EXPORT_SYMBOL_GPL(io_uring_cmd_done);
186+
EXPORT_SYMBOL_GPL(__io_uring_cmd_done);
184187

185188
int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
186189
{

0 commit comments

Comments
 (0)