@@ -146,7 +146,6 @@ static bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
146146 struct task_struct * task ,
147147 bool cancel_all );
148148
149- static void io_clean_op (struct io_kiocb * req );
150149static void io_queue_sqe (struct io_kiocb * req );
151150static void io_move_task_work_from_local (struct io_ring_ctx * ctx );
152151static void __io_submit_flush_completions (struct io_ring_ctx * ctx );
@@ -367,6 +366,39 @@ static bool req_need_defer(struct io_kiocb *req, u32 seq)
367366 return false;
368367}
369368
369+ static void io_clean_op (struct io_kiocb * req )
370+ {
371+ if (req -> flags & REQ_F_BUFFER_SELECTED ) {
372+ spin_lock (& req -> ctx -> completion_lock );
373+ io_put_kbuf_comp (req );
374+ spin_unlock (& req -> ctx -> completion_lock );
375+ }
376+
377+ if (req -> flags & REQ_F_NEED_CLEANUP ) {
378+ const struct io_cold_def * def = & io_cold_defs [req -> opcode ];
379+
380+ if (def -> cleanup )
381+ def -> cleanup (req );
382+ }
383+ if ((req -> flags & REQ_F_POLLED ) && req -> apoll ) {
384+ kfree (req -> apoll -> double_poll );
385+ kfree (req -> apoll );
386+ req -> apoll = NULL ;
387+ }
388+ if (req -> flags & REQ_F_INFLIGHT ) {
389+ struct io_uring_task * tctx = req -> task -> io_uring ;
390+
391+ atomic_dec (& tctx -> inflight_tracked );
392+ }
393+ if (req -> flags & REQ_F_CREDS )
394+ put_cred (req -> creds );
395+ if (req -> flags & REQ_F_ASYNC_DATA ) {
396+ kfree (req -> async_data );
397+ req -> async_data = NULL ;
398+ }
399+ req -> flags &= ~IO_REQ_CLEAN_FLAGS ;
400+ }
401+
370402static inline void io_req_track_inflight (struct io_kiocb * req )
371403{
372404 if (!(req -> flags & REQ_F_INFLIGHT )) {
@@ -1823,39 +1855,6 @@ static __cold void io_drain_req(struct io_kiocb *req)
18231855 spin_unlock (& ctx -> completion_lock );
18241856}
18251857
1826- static void io_clean_op (struct io_kiocb * req )
1827- {
1828- if (req -> flags & REQ_F_BUFFER_SELECTED ) {
1829- spin_lock (& req -> ctx -> completion_lock );
1830- io_put_kbuf_comp (req );
1831- spin_unlock (& req -> ctx -> completion_lock );
1832- }
1833-
1834- if (req -> flags & REQ_F_NEED_CLEANUP ) {
1835- const struct io_cold_def * def = & io_cold_defs [req -> opcode ];
1836-
1837- if (def -> cleanup )
1838- def -> cleanup (req );
1839- }
1840- if ((req -> flags & REQ_F_POLLED ) && req -> apoll ) {
1841- kfree (req -> apoll -> double_poll );
1842- kfree (req -> apoll );
1843- req -> apoll = NULL ;
1844- }
1845- if (req -> flags & REQ_F_INFLIGHT ) {
1846- struct io_uring_task * tctx = req -> task -> io_uring ;
1847-
1848- atomic_dec (& tctx -> inflight_tracked );
1849- }
1850- if (req -> flags & REQ_F_CREDS )
1851- put_cred (req -> creds );
1852- if (req -> flags & REQ_F_ASYNC_DATA ) {
1853- kfree (req -> async_data );
1854- req -> async_data = NULL ;
1855- }
1856- req -> flags &= ~IO_REQ_CLEAN_FLAGS ;
1857- }
1858-
18591858static bool io_assign_file (struct io_kiocb * req , const struct io_issue_def * def ,
18601859 unsigned int issue_flags )
18611860{
0 commit comments