@@ -719,7 +719,7 @@ static void io_put_task_remote(struct task_struct *task, int nr)
719719 struct io_uring_task * tctx = task -> io_uring ;
720720
721721 percpu_counter_sub (& tctx -> inflight , nr );
722- if (unlikely (atomic_read (& tctx -> in_idle )))
722+ if (unlikely (atomic_read (& tctx -> in_cancel )))
723723 wake_up (& tctx -> wait );
724724 put_task_struct_many (task , nr );
725725}
@@ -1258,8 +1258,8 @@ void tctx_task_work(struct callback_head *cb)
12581258
12591259 ctx_flush_and_put (ctx , & uring_locked );
12601260
1261- /* relaxed read is enough as only the task itself sets ->in_idle */
1262- if (unlikely (atomic_read (& tctx -> in_idle )))
1261+ /* relaxed read is enough as only the task itself sets ->in_cancel */
1262+ if (unlikely (atomic_read (& tctx -> in_cancel )))
12631263 io_uring_drop_tctx_refs (current );
12641264
12651265 trace_io_uring_task_work_run (tctx , count , loops );
@@ -1291,7 +1291,7 @@ static void io_req_local_work_add(struct io_kiocb *req)
12911291 /* needed for the following wake up */
12921292 smp_mb__after_atomic ();
12931293
1294- if (unlikely (atomic_read (& req -> task -> io_uring -> in_idle ))) {
1294+ if (unlikely (atomic_read (& req -> task -> io_uring -> in_cancel ))) {
12951295 io_move_task_work_from_local (ctx );
12961296 goto put_ref ;
12971297 }
@@ -2937,12 +2937,12 @@ static __cold void io_tctx_exit_cb(struct callback_head *cb)
29372937
29382938 work = container_of (cb , struct io_tctx_exit , task_work );
29392939 /*
2940- * When @in_idle , we're in cancellation and it's racy to remove the
2940+ * When @in_cancel , we're in cancellation and it's racy to remove the
29412941 * node. It'll be removed by the end of cancellation, just ignore it.
29422942 * tctx can be NULL if the queueing of this task_work raced with
29432943 * work cancelation off the exec path.
29442944 */
2945- if (tctx && !atomic_read (& tctx -> in_idle ))
2945+ if (tctx && !atomic_read (& tctx -> in_cancel ))
29462946 io_uring_del_tctx_node ((unsigned long )work -> ctx );
29472947 complete (& work -> completion );
29482948}
@@ -3210,7 +3210,7 @@ __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd)
32103210 if (tctx -> io_wq )
32113211 io_wq_exit_start (tctx -> io_wq );
32123212
3213- atomic_inc (& tctx -> in_idle );
3213+ atomic_inc (& tctx -> in_cancel );
32143214 do {
32153215 bool loop = false;
32163216
@@ -3261,9 +3261,9 @@ __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd)
32613261 if (cancel_all ) {
32623262 /*
32633263 * We shouldn't run task_works after cancel, so just leave
3264- * ->in_idle set for normal exit.
3264+ * ->in_cancel set for normal exit.
32653265 */
3266- atomic_dec (& tctx -> in_idle );
3266+ atomic_dec (& tctx -> in_cancel );
32673267 /* for exec all current's requests should be gone, kill tctx */
32683268 __io_uring_free (current );
32693269 }
0 commit comments