Skip to content

Commit 584b018

Browse files
committed
io_uring: move read/write file prep state into actual opcode handler
In preparation for not necessarily having a file assigned at prep time, defer any initialization associated with the file to when the opcode handler is run. Cc: stable@vger.kernel.org # v5.15+ Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent a3e4bc2 commit 584b018

1 file changed

Lines changed: 53 additions & 48 deletions

File tree

fs/io_uring.c

Lines changed: 53 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -592,7 +592,8 @@ struct io_rw {
592592
/* NOTE: kiocb has the file as the first member, so don't do it here */
593593
struct kiocb kiocb;
594594
u64 addr;
595-
u64 len;
595+
u32 len;
596+
u32 flags;
596597
};
597598

598599
struct io_connect {
@@ -3178,42 +3179,11 @@ static inline bool io_file_supports_nowait(struct io_kiocb *req)
31783179

31793180
static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
31803181
{
3181-
struct io_ring_ctx *ctx = req->ctx;
31823182
struct kiocb *kiocb = &req->rw.kiocb;
3183-
struct file *file = req->file;
31843183
unsigned ioprio;
31853184
int ret;
31863185

3187-
if (!io_req_ffs_set(req))
3188-
req->flags |= io_file_get_flags(file) << REQ_F_SUPPORT_NOWAIT_BIT;
3189-
31903186
kiocb->ki_pos = READ_ONCE(sqe->off);
3191-
kiocb->ki_flags = iocb_flags(file);
3192-
ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
3193-
if (unlikely(ret))
3194-
return ret;
3195-
3196-
/*
3197-
* If the file is marked O_NONBLOCK, still allow retry for it if it
3198-
* supports async. Otherwise it's impossible to use O_NONBLOCK files
3199-
* reliably. If not, or it IOCB_NOWAIT is set, don't retry.
3200-
*/
3201-
if ((kiocb->ki_flags & IOCB_NOWAIT) ||
3202-
((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req)))
3203-
req->flags |= REQ_F_NOWAIT;
3204-
3205-
if (ctx->flags & IORING_SETUP_IOPOLL) {
3206-
if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll)
3207-
return -EOPNOTSUPP;
3208-
3209-
kiocb->ki_flags |= IOCB_HIPRI | IOCB_ALLOC_CACHE;
3210-
kiocb->ki_complete = io_complete_rw_iopoll;
3211-
req->iopoll_completed = 0;
3212-
} else {
3213-
if (kiocb->ki_flags & IOCB_HIPRI)
3214-
return -EINVAL;
3215-
kiocb->ki_complete = io_complete_rw;
3216-
}
32173187

32183188
ioprio = READ_ONCE(sqe->ioprio);
32193189
if (ioprio) {
@@ -3229,6 +3199,7 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
32293199
req->imu = NULL;
32303200
req->rw.addr = READ_ONCE(sqe->addr);
32313201
req->rw.len = READ_ONCE(sqe->len);
3202+
req->rw.flags = READ_ONCE(sqe->rw_flags);
32323203
req->buf_index = READ_ONCE(sqe->buf_index);
32333204
return 0;
32343205
}
@@ -3732,13 +3703,6 @@ static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
37323703
return 0;
37333704
}
37343705

3735-
static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3736-
{
3737-
if (unlikely(!(req->file->f_mode & FMODE_READ)))
3738-
return -EBADF;
3739-
return io_prep_rw(req, sqe);
3740-
}
3741-
37423706
/*
37433707
* This is our waitqueue callback handler, registered through __folio_lock_async()
37443708
* when we initially tried to do the IO with the iocb armed our waitqueue.
@@ -3826,6 +3790,49 @@ static bool need_read_all(struct io_kiocb *req)
38263790
S_ISBLK(file_inode(req->file)->i_mode);
38273791
}
38283792

3793+
static int io_rw_init_file(struct io_kiocb *req, fmode_t mode)
3794+
{
3795+
struct kiocb *kiocb = &req->rw.kiocb;
3796+
struct io_ring_ctx *ctx = req->ctx;
3797+
struct file *file = req->file;
3798+
int ret;
3799+
3800+
if (unlikely(!file || !(file->f_mode & mode)))
3801+
return -EBADF;
3802+
3803+
if (!io_req_ffs_set(req))
3804+
req->flags |= io_file_get_flags(file) << REQ_F_SUPPORT_NOWAIT_BIT;
3805+
3806+
kiocb->ki_flags = iocb_flags(file);
3807+
ret = kiocb_set_rw_flags(kiocb, req->rw.flags);
3808+
if (unlikely(ret))
3809+
return ret;
3810+
3811+
/*
3812+
* If the file is marked O_NONBLOCK, still allow retry for it if it
3813+
* supports async. Otherwise it's impossible to use O_NONBLOCK files
3814+
* reliably. If not, or it IOCB_NOWAIT is set, don't retry.
3815+
*/
3816+
if ((kiocb->ki_flags & IOCB_NOWAIT) ||
3817+
((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req)))
3818+
req->flags |= REQ_F_NOWAIT;
3819+
3820+
if (ctx->flags & IORING_SETUP_IOPOLL) {
3821+
if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll)
3822+
return -EOPNOTSUPP;
3823+
3824+
kiocb->ki_flags |= IOCB_HIPRI | IOCB_ALLOC_CACHE;
3825+
kiocb->ki_complete = io_complete_rw_iopoll;
3826+
req->iopoll_completed = 0;
3827+
} else {
3828+
if (kiocb->ki_flags & IOCB_HIPRI)
3829+
return -EINVAL;
3830+
kiocb->ki_complete = io_complete_rw;
3831+
}
3832+
3833+
return 0;
3834+
}
3835+
38293836
static int io_read(struct io_kiocb *req, unsigned int issue_flags)
38303837
{
38313838
struct io_rw_state __s, *s = &__s;
@@ -3861,6 +3868,9 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
38613868
iov_iter_restore(&s->iter, &s->iter_state);
38623869
iovec = NULL;
38633870
}
3871+
ret = io_rw_init_file(req, FMODE_READ);
3872+
if (unlikely(ret))
3873+
return ret;
38643874
req->result = iov_iter_count(&s->iter);
38653875

38663876
if (force_nonblock) {
@@ -3964,13 +3974,6 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
39643974
return 0;
39653975
}
39663976

3967-
static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3968-
{
3969-
if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
3970-
return -EBADF;
3971-
return io_prep_rw(req, sqe);
3972-
}
3973-
39743977
static int io_write(struct io_kiocb *req, unsigned int issue_flags)
39753978
{
39763979
struct io_rw_state __s, *s = &__s;
@@ -3991,6 +3994,9 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags)
39913994
iov_iter_restore(&s->iter, &s->iter_state);
39923995
iovec = NULL;
39933996
}
3997+
ret = io_rw_init_file(req, FMODE_WRITE);
3998+
if (unlikely(ret))
3999+
return ret;
39944000
req->result = iov_iter_count(&s->iter);
39954001

39964002
if (force_nonblock) {
@@ -6987,11 +6993,10 @@ static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
69876993
case IORING_OP_READV:
69886994
case IORING_OP_READ_FIXED:
69896995
case IORING_OP_READ:
6990-
return io_read_prep(req, sqe);
69916996
case IORING_OP_WRITEV:
69926997
case IORING_OP_WRITE_FIXED:
69936998
case IORING_OP_WRITE:
6994-
return io_write_prep(req, sqe);
6999+
return io_prep_rw(req, sqe);
69957000
case IORING_OP_POLL_ADD:
69967001
return io_poll_add_prep(req, sqe);
69977002
case IORING_OP_POLL_REMOVE:

0 commit comments

Comments
 (0)