Skip to content

Commit 38aa434

Browse files
FirstLoveLifeaxboe
authored andcommitted
io_uring/io-wq: add exit-on-idle state
io-wq uses an idle timeout to shrink the pool, but keeps the last worker around indefinitely to avoid churn. For tasks that used io_uring for file I/O and then stop using io_uring, this can leave an iou-wrk-* thread behind even after all io_uring instances are gone. This is unnecessary overhead and also gets in the way of process checkpoint/restore. Add an exit-on-idle state that makes all io-wq workers exit as soon as they become idle, and provide io_wq_set_exit_on_idle() to toggle it. Signed-off-by: Li Chen <me@linux.beauty> Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent 806ae93 commit 38aa434

2 files changed

Lines changed: 26 additions & 2 deletions

File tree

io_uring/io-wq.c

Lines changed: 25 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,7 @@ enum {
3535

3636
enum {
3737
IO_WQ_BIT_EXIT = 0, /* wq exiting */
38+
IO_WQ_BIT_EXIT_ON_IDLE = 1, /* allow all workers to exit on idle */
3839
};
3940

4041
enum {
@@ -707,9 +708,13 @@ static int io_wq_worker(void *data)
707708
raw_spin_lock(&acct->workers_lock);
708709
/*
709710
* Last sleep timed out. Exit if we're not the last worker,
710-
* or if someone modified our affinity.
711+
* or if someone modified our affinity. If wq is marked
712+
* idle-exit, drop the worker as well. This is used to avoid
713+
* keeping io-wq workers around for tasks that no longer have
714+
* any active io_uring instances.
711715
*/
712-
if (last_timeout && (exit_mask || acct->nr_workers > 1)) {
716+
if ((last_timeout && (exit_mask || acct->nr_workers > 1)) ||
717+
test_bit(IO_WQ_BIT_EXIT_ON_IDLE, &wq->state)) {
713718
acct->nr_workers--;
714719
raw_spin_unlock(&acct->workers_lock);
715720
__set_current_state(TASK_RUNNING);
@@ -967,6 +972,24 @@ static bool io_wq_worker_wake(struct io_worker *worker, void *data)
967972
return false;
968973
}
969974

975+
void io_wq_set_exit_on_idle(struct io_wq *wq, bool enable)
976+
{
977+
if (!wq->task)
978+
return;
979+
980+
if (!enable) {
981+
clear_bit(IO_WQ_BIT_EXIT_ON_IDLE, &wq->state);
982+
return;
983+
}
984+
985+
if (test_and_set_bit(IO_WQ_BIT_EXIT_ON_IDLE, &wq->state))
986+
return;
987+
988+
rcu_read_lock();
989+
io_wq_for_each_worker(wq, io_wq_worker_wake, NULL);
990+
rcu_read_unlock();
991+
}
992+
970993
static void io_run_cancel(struct io_wq_work *work, struct io_wq *wq)
971994
{
972995
do {

io_uring/io-wq.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,7 @@ struct io_wq_data {
4141
struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data);
4242
void io_wq_exit_start(struct io_wq *wq);
4343
void io_wq_put_and_exit(struct io_wq *wq);
44+
void io_wq_set_exit_on_idle(struct io_wq *wq, bool enable);
4445

4546
void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work);
4647
void io_wq_hash_work(struct io_wq_work *work, void *val);

0 commit comments

Comments
 (0)