Skip to content

Commit 0d64ebf

Browse files
committed
Merge tag 'io_uring-6.17-20250919' of git://git.kernel.dk/linux
Pull io_uring fixes from Jens Axboe: - Fix for a regression introduced in the io-wq worker creation logic. - Remove the allocation cache for the msg_ring io_kiocb allocations. I have a suspicion that there's a bug there, and since we just fixed one in that area, let's just yank the use of that cache entirely. It's not that important, and it kills some code. - Treat a closed ring like task exiting in that any requests that trigger post that condition should just get canceled. Doesn't fix any real issues, outside of having tasks being able to rely on that guarantee. - Fix for a bug in the network zero-copy notification mechanism, where a comparison for matching tctx/ctx for notifications was buggy in that it didn't correctly compare with the previous notification. * tag 'io_uring-6.17-20250919' of git://git.kernel.dk/linux: io_uring: fix incorrect io_kiocb reference in io_link_skb io_uring/msg_ring: kill alloc_cache for io_kiocb allocations io_uring: include dying ring in task_work "should cancel" state io_uring/io-wq: fix `max_workers` breakage and `nr_workers` underflow
2 parents 0903d09 + 2c139a4 commit 0d64ebf

9 files changed

Lines changed: 15 additions & 40 deletions

File tree

include/linux/io_uring_types.h

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -420,9 +420,6 @@ struct io_ring_ctx {
420420
struct list_head defer_list;
421421
unsigned nr_drained;
422422

423-
struct io_alloc_cache msg_cache;
424-
spinlock_t msg_lock;
425-
426423
#ifdef CONFIG_NET_RX_BUSY_POLL
427424
struct list_head napi_list; /* track busy poll napi_id */
428425
spinlock_t napi_lock; /* napi_list lock */

io_uring/io-wq.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -352,16 +352,16 @@ static void create_worker_cb(struct callback_head *cb)
352352
struct io_wq *wq;
353353

354354
struct io_wq_acct *acct;
355-
bool do_create = false;
355+
bool activated_free_worker, do_create = false;
356356

357357
worker = container_of(cb, struct io_worker, create_work);
358358
wq = worker->wq;
359359
acct = worker->acct;
360360

361361
rcu_read_lock();
362-
do_create = !io_acct_activate_free_worker(acct);
362+
activated_free_worker = io_acct_activate_free_worker(acct);
363363
rcu_read_unlock();
364-
if (!do_create)
364+
if (activated_free_worker)
365365
goto no_need_create;
366366

367367
raw_spin_lock(&acct->workers_lock);

io_uring/io_uring.c

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -290,7 +290,6 @@ static void io_free_alloc_caches(struct io_ring_ctx *ctx)
290290
io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free);
291291
io_alloc_cache_free(&ctx->rw_cache, io_rw_cache_free);
292292
io_alloc_cache_free(&ctx->cmd_cache, io_cmd_cache_free);
293-
io_alloc_cache_free(&ctx->msg_cache, kfree);
294293
io_futex_cache_free(ctx);
295294
io_rsrc_cache_free(ctx);
296295
}
@@ -337,9 +336,6 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
337336
ret |= io_alloc_cache_init(&ctx->cmd_cache, IO_ALLOC_CACHE_MAX,
338337
sizeof(struct io_async_cmd),
339338
sizeof(struct io_async_cmd));
340-
spin_lock_init(&ctx->msg_lock);
341-
ret |= io_alloc_cache_init(&ctx->msg_cache, IO_ALLOC_CACHE_MAX,
342-
sizeof(struct io_kiocb), 0);
343339
ret |= io_futex_cache_init(ctx);
344340
ret |= io_rsrc_cache_init(ctx);
345341
if (ret)
@@ -1406,8 +1402,10 @@ static void io_req_task_cancel(struct io_kiocb *req, io_tw_token_t tw)
14061402

14071403
void io_req_task_submit(struct io_kiocb *req, io_tw_token_t tw)
14081404
{
1409-
io_tw_lock(req->ctx, tw);
1410-
if (unlikely(io_should_terminate_tw()))
1405+
struct io_ring_ctx *ctx = req->ctx;
1406+
1407+
io_tw_lock(ctx, tw);
1408+
if (unlikely(io_should_terminate_tw(ctx)))
14111409
io_req_defer_failed(req, -EFAULT);
14121410
else if (req->flags & REQ_F_FORCE_ASYNC)
14131411
io_queue_iowq(req);

io_uring/io_uring.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -476,9 +476,9 @@ static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx)
476476
* 2) PF_KTHREAD is set, in which case the invoker of the task_work is
477477
* our fallback task_work.
478478
*/
479-
static inline bool io_should_terminate_tw(void)
479+
static inline bool io_should_terminate_tw(struct io_ring_ctx *ctx)
480480
{
481-
return current->flags & (PF_KTHREAD | PF_EXITING);
481+
return (current->flags & (PF_KTHREAD | PF_EXITING)) || percpu_ref_is_dying(&ctx->refs);
482482
}
483483

484484
static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res)

io_uring/msg_ring.c

Lines changed: 2 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,6 @@
1111
#include "io_uring.h"
1212
#include "rsrc.h"
1313
#include "filetable.h"
14-
#include "alloc_cache.h"
1514
#include "msg_ring.h"
1615

1716
/* All valid masks for MSG_RING */
@@ -76,13 +75,7 @@ static void io_msg_tw_complete(struct io_kiocb *req, io_tw_token_t tw)
7675
struct io_ring_ctx *ctx = req->ctx;
7776

7877
io_add_aux_cqe(ctx, req->cqe.user_data, req->cqe.res, req->cqe.flags);
79-
if (spin_trylock(&ctx->msg_lock)) {
80-
if (io_alloc_cache_put(&ctx->msg_cache, req))
81-
req = NULL;
82-
spin_unlock(&ctx->msg_lock);
83-
}
84-
if (req)
85-
kfree_rcu(req, rcu_head);
78+
kfree_rcu(req, rcu_head);
8679
percpu_ref_put(&ctx->refs);
8780
}
8881

@@ -104,26 +97,13 @@ static int io_msg_remote_post(struct io_ring_ctx *ctx, struct io_kiocb *req,
10497
return 0;
10598
}
10699

107-
static struct io_kiocb *io_msg_get_kiocb(struct io_ring_ctx *ctx)
108-
{
109-
struct io_kiocb *req = NULL;
110-
111-
if (spin_trylock(&ctx->msg_lock)) {
112-
req = io_alloc_cache_get(&ctx->msg_cache);
113-
spin_unlock(&ctx->msg_lock);
114-
if (req)
115-
return req;
116-
}
117-
return kmem_cache_alloc(req_cachep, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
118-
}
119-
120100
static int io_msg_data_remote(struct io_ring_ctx *target_ctx,
121101
struct io_msg *msg)
122102
{
123103
struct io_kiocb *target;
124104
u32 flags = 0;
125105

126-
target = io_msg_get_kiocb(target_ctx);
106+
target = kmem_cache_alloc(req_cachep, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO) ;
127107
if (unlikely(!target))
128108
return -ENOMEM;
129109

io_uring/notif.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -85,7 +85,7 @@ static int io_link_skb(struct sk_buff *skb, struct ubuf_info *uarg)
8585
return -EEXIST;
8686

8787
prev_nd = container_of(prev_uarg, struct io_notif_data, uarg);
88-
prev_notif = cmd_to_io_kiocb(nd);
88+
prev_notif = cmd_to_io_kiocb(prev_nd);
8989

9090
/* make sure all noifications can be finished in the same task_work */
9191
if (unlikely(notif->ctx != prev_notif->ctx ||

io_uring/poll.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -224,7 +224,7 @@ static int io_poll_check_events(struct io_kiocb *req, io_tw_token_t tw)
224224
{
225225
int v;
226226

227-
if (unlikely(io_should_terminate_tw()))
227+
if (unlikely(io_should_terminate_tw(req->ctx)))
228228
return -ECANCELED;
229229

230230
do {

io_uring/timeout.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -324,7 +324,7 @@ static void io_req_task_link_timeout(struct io_kiocb *req, io_tw_token_t tw)
324324
int ret;
325325

326326
if (prev) {
327-
if (!io_should_terminate_tw()) {
327+
if (!io_should_terminate_tw(req->ctx)) {
328328
struct io_cancel_data cd = {
329329
.ctx = req->ctx,
330330
.data = prev->cqe.user_data,

io_uring/uring_cmd.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -118,7 +118,7 @@ static void io_uring_cmd_work(struct io_kiocb *req, io_tw_token_t tw)
118118
struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
119119
unsigned int flags = IO_URING_F_COMPLETE_DEFER;
120120

121-
if (io_should_terminate_tw())
121+
if (io_should_terminate_tw(req->ctx))
122122
flags |= IO_URING_F_TASK_DEAD;
123123

124124
/* task_work executor checks the deffered list completion */

0 commit comments

Comments
 (0)