Skip to content

Commit 11c0239

Browse files
committed
Merge tag 'io_uring-5.11-2021-01-16' of git://git.kernel.dk/linux-block
Pull io_uring fixes from Jens Axboe: "We still have a pending fix for a cancelation issue, but it's still being investigated. In the meantime: - Dead mm handling fix (Pavel) - SQPOLL setup error handling (Pavel) - Flush timeout sequence fix (Marcelo) - Missing finish_wait() for one exit case" * tag 'io_uring-5.11-2021-01-16' of git://git.kernel.dk/linux-block: io_uring: ensure finish_wait() is always called in __io_uring_task_cancel() io_uring: flush timeouts that should already have expired io_uring: do sqo disable on install_fd error io_uring: fix null-deref in io_disable_sqo_submit io_uring: don't take files/mm for a dead task io_uring: drop mm and files after task_work_run
2 parents acda701 + a8d13db commit 11c0239

1 file changed

Lines changed: 41 additions & 5 deletions

File tree

fs/io_uring.c

Lines changed: 41 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -354,6 +354,7 @@ struct io_ring_ctx {
354354
unsigned cq_entries;
355355
unsigned cq_mask;
356356
atomic_t cq_timeouts;
357+
unsigned cq_last_tm_flush;
357358
unsigned long cq_check_overflow;
358359
struct wait_queue_head cq_wait;
359360
struct fasync_struct *cq_fasync;
@@ -1106,6 +1107,9 @@ static void io_sq_thread_drop_mm_files(void)
11061107

11071108
static int __io_sq_thread_acquire_files(struct io_ring_ctx *ctx)
11081109
{
1110+
if (current->flags & PF_EXITING)
1111+
return -EFAULT;
1112+
11091113
if (!current->files) {
11101114
struct files_struct *files;
11111115
struct nsproxy *nsproxy;
@@ -1133,6 +1137,8 @@ static int __io_sq_thread_acquire_mm(struct io_ring_ctx *ctx)
11331137
{
11341138
struct mm_struct *mm;
11351139

1140+
if (current->flags & PF_EXITING)
1141+
return -EFAULT;
11361142
if (current->mm)
11371143
return 0;
11381144

@@ -1634,19 +1640,38 @@ static void __io_queue_deferred(struct io_ring_ctx *ctx)
16341640

16351641
static void io_flush_timeouts(struct io_ring_ctx *ctx)
16361642
{
1637-
while (!list_empty(&ctx->timeout_list)) {
1643+
u32 seq;
1644+
1645+
if (list_empty(&ctx->timeout_list))
1646+
return;
1647+
1648+
seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
1649+
1650+
do {
1651+
u32 events_needed, events_got;
16381652
struct io_kiocb *req = list_first_entry(&ctx->timeout_list,
16391653
struct io_kiocb, timeout.list);
16401654

16411655
if (io_is_timeout_noseq(req))
16421656
break;
1643-
if (req->timeout.target_seq != ctx->cached_cq_tail
1644-
- atomic_read(&ctx->cq_timeouts))
1657+
1658+
/*
1659+
* Since seq can easily wrap around over time, subtract
1660+
* the last seq at which timeouts were flushed before comparing.
1661+
* Assuming not more than 2^31-1 events have happened since,
1662+
* these subtractions won't have wrapped, so we can check if
1663+
* target is in [last_seq, current_seq] by comparing the two.
1664+
*/
1665+
events_needed = req->timeout.target_seq - ctx->cq_last_tm_flush;
1666+
events_got = seq - ctx->cq_last_tm_flush;
1667+
if (events_got < events_needed)
16451668
break;
16461669

16471670
list_del_init(&req->timeout.list);
16481671
io_kill_timeout(req);
1649-
}
1672+
} while (!list_empty(&ctx->timeout_list));
1673+
1674+
ctx->cq_last_tm_flush = seq;
16501675
}
16511676

16521677
static void io_commit_cqring(struct io_ring_ctx *ctx)
@@ -5832,6 +5857,12 @@ static int io_timeout(struct io_kiocb *req)
58325857
tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
58335858
req->timeout.target_seq = tail + off;
58345859

5860+
/* Update the last seq here in case io_flush_timeouts() hasn't.
5861+
* This is safe because ->completion_lock is held, and submissions
5862+
* and completions are never mixed in the same ->completion_lock section.
5863+
*/
5864+
ctx->cq_last_tm_flush = tail;
5865+
58355866
/*
58365867
* Insertion sort, ensuring the first entry in the list is always
58375868
* the one we need first.
@@ -7056,6 +7087,7 @@ static int io_sq_thread(void *data)
70567087

70577088
if (sqt_spin || !time_after(jiffies, timeout)) {
70587089
io_run_task_work();
7090+
io_sq_thread_drop_mm_files();
70597091
cond_resched();
70607092
if (sqt_spin)
70617093
timeout = jiffies + sqd->sq_thread_idle;
@@ -7093,6 +7125,7 @@ static int io_sq_thread(void *data)
70937125
}
70947126

70957127
io_run_task_work();
7128+
io_sq_thread_drop_mm_files();
70967129

70977130
if (cur_css)
70987131
io_sq_thread_unassociate_blkcg();
@@ -8888,7 +8921,8 @@ static void io_disable_sqo_submit(struct io_ring_ctx *ctx)
88888921
mutex_unlock(&ctx->uring_lock);
88898922

88908923
/* make sure callers enter the ring to get error */
8891-
io_ring_set_wakeup_flag(ctx);
8924+
if (ctx->rings)
8925+
io_ring_set_wakeup_flag(ctx);
88928926
}
88938927

88948928
/*
@@ -9067,6 +9101,7 @@ void __io_uring_task_cancel(void)
90679101
finish_wait(&tctx->wait, &wait);
90689102
} while (1);
90699103

9104+
finish_wait(&tctx->wait, &wait);
90709105
atomic_dec(&tctx->in_idle);
90719106

90729107
io_uring_remove_task_files(tctx);
@@ -9700,6 +9735,7 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p,
97009735
*/
97019736
ret = io_uring_install_fd(ctx, file);
97029737
if (ret < 0) {
9738+
io_disable_sqo_submit(ctx);
97039739
/* fput will clean it up */
97049740
fput(file);
97059741
return ret;

0 commit comments

Comments
 (0)