Skip to content

Commit 5b0a6ac

Browse files
isilenceaxboe
authored andcommitted
io_uring: simplify task_work func
Since we don't really use req->task_work anymore, get rid of it together with the nasty ->func aliasing between ->io_task_work and ->task_work, and hide ->fallback_node inside of io_task_work. Also, as task_work is gone now, replace the callback type from task_work_func_t to a function taking io_kiocb to avoid casting and simplify code. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent 9011bf9 commit 5b0a6ac

1 file changed

Lines changed: 28 additions & 44 deletions

File tree

fs/io_uring.c

Lines changed: 28 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -785,9 +785,14 @@ struct async_poll {
785785
struct io_poll_iocb *double_poll;
786786
};
787787

788+
typedef void (*io_req_tw_func_t)(struct io_kiocb *req);
789+
788790
struct io_task_work {
789-
struct io_wq_work_node node;
790-
task_work_func_t func;
791+
union {
792+
struct io_wq_work_node node;
793+
struct llist_node fallback_node;
794+
};
795+
io_req_tw_func_t func;
791796
};
792797

793798
enum {
@@ -850,18 +855,13 @@ struct io_kiocb {
850855

851856
/* used with ctx->iopoll_list with reads/writes */
852857
struct list_head inflight_entry;
853-
union {
854-
struct io_task_work io_task_work;
855-
struct callback_head task_work;
856-
};
858+
struct io_task_work io_task_work;
857859
/* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
858860
struct hlist_node hash_node;
859861
struct async_poll *apoll;
860862
struct io_wq_work work;
861863
const struct cred *creds;
862864

863-
struct llist_node fallback_node;
864-
865865
/* store used ubuf, so we can prevent reloading */
866866
struct io_mapped_ubuf *imu;
867867
};
@@ -1935,7 +1935,7 @@ static void tctx_task_work(struct callback_head *cb)
19351935
ctx = req->ctx;
19361936
percpu_ref_get(&ctx->refs);
19371937
}
1938-
req->task_work.func(&req->task_work);
1938+
req->io_task_work.func(req);
19391939
node = next;
19401940
}
19411941
if (wq_list_empty(&tctx->task_list)) {
@@ -2006,16 +2006,16 @@ static int io_req_task_work_add(struct io_kiocb *req)
20062006
}
20072007

20082008
static void io_req_task_work_add_fallback(struct io_kiocb *req,
2009-
task_work_func_t cb)
2009+
io_req_tw_func_t cb)
20102010
{
2011-
init_task_work(&req->task_work, cb);
2012-
if (llist_add(&req->fallback_node, &req->ctx->fallback_llist))
2011+
req->io_task_work.func = cb;
2012+
if (llist_add(&req->io_task_work.fallback_node,
2013+
&req->ctx->fallback_llist))
20132014
schedule_delayed_work(&req->ctx->fallback_work, 1);
20142015
}
20152016

2016-
static void io_req_task_cancel(struct callback_head *cb)
2017+
static void io_req_task_cancel(struct io_kiocb *req)
20172018
{
2018-
struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
20192019
struct io_ring_ctx *ctx = req->ctx;
20202020

20212021
/* ctx is guaranteed to stay alive while we hold uring_lock */
@@ -2024,7 +2024,7 @@ static void io_req_task_cancel(struct callback_head *cb)
20242024
mutex_unlock(&ctx->uring_lock);
20252025
}
20262026

2027-
static void __io_req_task_submit(struct io_kiocb *req)
2027+
static void io_req_task_submit(struct io_kiocb *req)
20282028
{
20292029
struct io_ring_ctx *ctx = req->ctx;
20302030

@@ -2037,25 +2037,18 @@ static void __io_req_task_submit(struct io_kiocb *req)
20372037
mutex_unlock(&ctx->uring_lock);
20382038
}
20392039

2040-
static void io_req_task_submit(struct callback_head *cb)
2041-
{
2042-
struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
2043-
2044-
__io_req_task_submit(req);
2045-
}
2046-
20472040
static void io_req_task_queue_fail(struct io_kiocb *req, int ret)
20482041
{
20492042
req->result = ret;
2050-
req->task_work.func = io_req_task_cancel;
2043+
req->io_task_work.func = io_req_task_cancel;
20512044

20522045
if (unlikely(io_req_task_work_add(req)))
20532046
io_req_task_work_add_fallback(req, io_req_task_cancel);
20542047
}
20552048

20562049
static void io_req_task_queue(struct io_kiocb *req)
20572050
{
2058-
req->task_work.func = io_req_task_submit;
2051+
req->io_task_work.func = io_req_task_submit;
20592052

20602053
if (unlikely(io_req_task_work_add(req)))
20612054
io_req_task_queue_fail(req, -ECANCELED);
@@ -2169,18 +2162,11 @@ static inline void io_put_req(struct io_kiocb *req)
21692162
io_free_req(req);
21702163
}
21712164

2172-
static void io_put_req_deferred_cb(struct callback_head *cb)
2173-
{
2174-
struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
2175-
2176-
io_free_req(req);
2177-
}
2178-
21792165
static void io_free_req_deferred(struct io_kiocb *req)
21802166
{
2181-
req->task_work.func = io_put_req_deferred_cb;
2167+
req->io_task_work.func = io_free_req;
21822168
if (unlikely(io_req_task_work_add(req)))
2183-
io_req_task_work_add_fallback(req, io_put_req_deferred_cb);
2169+
io_req_task_work_add_fallback(req, io_free_req);
21842170
}
21852171

21862172
static inline void io_put_req_deferred(struct io_kiocb *req, int refs)
@@ -2466,8 +2452,8 @@ static void io_fallback_req_func(struct work_struct *work)
24662452
struct llist_node *node = llist_del_all(&ctx->fallback_llist);
24672453
struct io_kiocb *req, *tmp;
24682454

2469-
llist_for_each_entry_safe(req, tmp, node, fallback_node)
2470-
req->task_work.func(&req->task_work);
2455+
llist_for_each_entry_safe(req, tmp, node, io_task_work.fallback_node)
2456+
req->io_task_work.func(req);
24712457
}
24722458

24732459
static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
@@ -4835,7 +4821,7 @@ struct io_poll_table {
48354821
};
48364822

48374823
static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
4838-
__poll_t mask, task_work_func_t func)
4824+
__poll_t mask, io_req_tw_func_t func)
48394825
{
48404826
int ret;
48414827

@@ -4848,7 +4834,7 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
48484834
list_del_init(&poll->wait.entry);
48494835

48504836
req->result = mask;
4851-
req->task_work.func = func;
4837+
req->io_task_work.func = func;
48524838

48534839
/*
48544840
* If this fails, then the task is exiting. When a task exits, the
@@ -4945,9 +4931,8 @@ static bool io_poll_complete(struct io_kiocb *req, __poll_t mask)
49454931
return !(flags & IORING_CQE_F_MORE);
49464932
}
49474933

4948-
static void io_poll_task_func(struct callback_head *cb)
4934+
static void io_poll_task_func(struct io_kiocb *req)
49494935
{
4950-
struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
49514936
struct io_ring_ctx *ctx = req->ctx;
49524937
struct io_kiocb *nxt;
49534938

@@ -4969,7 +4954,7 @@ static void io_poll_task_func(struct callback_head *cb)
49694954
if (done) {
49704955
nxt = io_put_req_find_next(req);
49714956
if (nxt)
4972-
__io_req_task_submit(nxt);
4957+
io_req_task_submit(nxt);
49734958
}
49744959
}
49754960
}
@@ -5078,9 +5063,8 @@ static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
50785063
__io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
50795064
}
50805065

5081-
static void io_async_task_func(struct callback_head *cb)
5066+
static void io_async_task_func(struct io_kiocb *req)
50825067
{
5083-
struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
50845068
struct async_poll *apoll = req->apoll;
50855069
struct io_ring_ctx *ctx = req->ctx;
50865070

@@ -5096,7 +5080,7 @@ static void io_async_task_func(struct callback_head *cb)
50965080
spin_unlock_irq(&ctx->completion_lock);
50975081

50985082
if (!READ_ONCE(apoll->poll.canceled))
5099-
__io_req_task_submit(req);
5083+
io_req_task_submit(req);
51005084
else
51015085
io_req_complete_failed(req, -ECANCELED);
51025086
}
@@ -8817,7 +8801,7 @@ static void io_ring_exit_work(struct work_struct *work)
88178801
/*
88188802
* Some may use context even when all refs and requests have been put,
88198803
* and they are free to do so while still holding uring_lock or
8820-
* completion_lock, see __io_req_task_submit(). Apart from other work,
8804+
* completion_lock, see io_req_task_submit(). Apart from other work,
88218805
* this lock/unlock section also waits them to finish.
88228806
*/
88238807
mutex_lock(&ctx->uring_lock);

0 commit comments

Comments
 (0)