Skip to content

Commit ca0a265

Browse files
committed
io_uring: don't keep looping for more events if we can't flush overflow
It doesn't make sense to wait for more events to come in, if we can't even flush the overflow we already have to the ring. Return -EBUSY for that condition, just like we do for attempts to submit with overflow pending. Cc: stable@vger.kernel.org # 5.11 Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent 46fe18b commit ca0a265

1 file changed

Lines changed: 12 additions & 3 deletions

File tree

fs/io_uring.c

Lines changed: 12 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1451,18 +1451,22 @@ static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
14511451
return all_flushed;
14521452
}
14531453

1454-
static void io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
1454+
static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
14551455
struct task_struct *tsk,
14561456
struct files_struct *files)
14571457
{
1458+
bool ret = true;
1459+
14581460
if (test_bit(0, &ctx->cq_check_overflow)) {
14591461
/* iopoll syncs against uring_lock, not completion_lock */
14601462
if (ctx->flags & IORING_SETUP_IOPOLL)
14611463
mutex_lock(&ctx->uring_lock);
1462-
__io_cqring_overflow_flush(ctx, force, tsk, files);
1464+
ret = __io_cqring_overflow_flush(ctx, force, tsk, files);
14631465
if (ctx->flags & IORING_SETUP_IOPOLL)
14641466
mutex_unlock(&ctx->uring_lock);
14651467
}
1468+
1469+
return ret;
14661470
}
14671471

14681472
static void __io_cqring_fill_event(struct io_kiocb *req, long res, long cflags)
@@ -6883,11 +6887,16 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
68836887
iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
68846888
trace_io_uring_cqring_wait(ctx, min_events);
68856889
do {
6886-
io_cqring_overflow_flush(ctx, false, NULL, NULL);
6890+
/* if we can't even flush overflow, don't wait for more */
6891+
if (!io_cqring_overflow_flush(ctx, false, NULL, NULL)) {
6892+
ret = -EBUSY;
6893+
break;
6894+
}
68876895
prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
68886896
TASK_INTERRUPTIBLE);
68896897
ret = io_cqring_wait_schedule(ctx, &iowq, &timeout);
68906898
finish_wait(&ctx->wait, &iowq.wq);
6899+
cond_resched();
68916900
} while (ret > 0);
68926901

68936902
restore_saved_sigmask_unless(ret == -EINTR);

0 commit comments

Comments
 (0)