@@ -465,7 +465,8 @@ struct io_ring_ctx {
465465 struct mm_struct * mm_account ;
466466
467467 /* ctx exit and cancelation */
468- struct callback_head * exit_task_work ;
468+ struct llist_head fallback_llist ;
469+ struct delayed_work fallback_work ;
469470 struct work_struct exit_work ;
470471 struct list_head tctx_list ;
471472 struct completion ref_comp ;
@@ -859,6 +860,8 @@ struct io_kiocb {
859860 struct io_wq_work work ;
860861 const struct cred * creds ;
861862
863+ struct llist_node fallback_node ;
864+
862865 /* store used ubuf, so we can prevent reloading */
863866 struct io_mapped_ubuf * imu ;
864867};
@@ -1071,6 +1074,8 @@ static void io_submit_flush_completions(struct io_ring_ctx *ctx);
10711074static bool io_poll_remove_waitqs (struct io_kiocb * req );
10721075static int io_req_prep_async (struct io_kiocb * req );
10731076
1077+ static void io_fallback_req_func (struct work_struct * unused );
1078+
10741079static struct kmem_cache * req_cachep ;
10751080
10761081static const struct file_operations io_uring_fops ;
@@ -1202,6 +1207,7 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
12021207 INIT_LIST_HEAD (& ctx -> tctx_list );
12031208 INIT_LIST_HEAD (& ctx -> submit_state .comp .free_list );
12041209 INIT_LIST_HEAD (& ctx -> locked_free_list );
1210+ INIT_DELAYED_WORK (& ctx -> fallback_work , io_fallback_req_func );
12051211 return ctx ;
12061212err :
12071213 kfree (ctx -> dummy_ubuf );
@@ -1999,44 +2005,12 @@ static int io_req_task_work_add(struct io_kiocb *req)
19992005 return ret ;
20002006}
20012007
2002- static bool io_run_task_work_head (struct callback_head * * work_head )
2003- {
2004- struct callback_head * work , * next ;
2005- bool executed = false;
2006-
2007- do {
2008- work = xchg (work_head , NULL );
2009- if (!work )
2010- break ;
2011-
2012- do {
2013- next = work -> next ;
2014- work -> func (work );
2015- work = next ;
2016- cond_resched ();
2017- } while (work );
2018- executed = true;
2019- } while (1 );
2020-
2021- return executed ;
2022- }
2023-
2024- static void io_task_work_add_head (struct callback_head * * work_head ,
2025- struct callback_head * task_work )
2026- {
2027- struct callback_head * head ;
2028-
2029- do {
2030- head = READ_ONCE (* work_head );
2031- task_work -> next = head ;
2032- } while (cmpxchg (work_head , head , task_work ) != head );
2033- }
2034-
20352008static void io_req_task_work_add_fallback (struct io_kiocb * req ,
20362009 task_work_func_t cb )
20372010{
20382011 init_task_work (& req -> task_work , cb );
2039- io_task_work_add_head (& req -> ctx -> exit_task_work , & req -> task_work );
2012+ if (llist_add (& req -> fallback_node , & req -> ctx -> fallback_llist ))
2013+ schedule_delayed_work (& req -> ctx -> fallback_work , 1 );
20402014}
20412015
20422016static void io_req_task_cancel (struct callback_head * cb )
@@ -2485,6 +2459,17 @@ static bool io_rw_should_reissue(struct io_kiocb *req)
24852459}
24862460#endif
24872461
2462+ static void io_fallback_req_func (struct work_struct * work )
2463+ {
2464+ struct io_ring_ctx * ctx = container_of (work , struct io_ring_ctx ,
2465+ fallback_work .work );
2466+ struct llist_node * node = llist_del_all (& ctx -> fallback_llist );
2467+ struct io_kiocb * req , * tmp ;
2468+
2469+ llist_for_each_entry_safe (req , tmp , node , fallback_node )
2470+ req -> task_work .func (& req -> task_work );
2471+ }
2472+
24882473static void __io_complete_rw (struct io_kiocb * req , long res , long res2 ,
24892474 unsigned int issue_flags )
24902475{
@@ -8767,11 +8752,6 @@ static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
87678752 return - EINVAL ;
87688753}
87698754
8770- static inline bool io_run_ctx_fallback (struct io_ring_ctx * ctx )
8771- {
8772- return io_run_task_work_head (& ctx -> exit_task_work );
8773- }
8774-
87758755struct io_tctx_exit {
87768756 struct callback_head task_work ;
87778757 struct completion completion ;
@@ -9036,7 +9016,6 @@ static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
90369016 ret |= io_kill_timeouts (ctx , task , cancel_all );
90379017 if (task )
90389018 ret |= io_run_task_work ();
9039- ret |= io_run_ctx_fallback (ctx );
90409019 if (!ret )
90419020 break ;
90429021 cond_resched ();
0 commit comments