@@ -2464,8 +2464,8 @@ static inline bool __io_fill_cqe(struct io_ring_ctx *ctx, u64 user_data,
24642464 return io_cqring_event_overflow (ctx , user_data , res , cflags , 0 , 0 );
24652465}
24662466
2467- static inline bool __io_fill_cqe_req_filled (struct io_ring_ctx * ctx ,
2468- struct io_kiocb * req )
2467+ static inline bool __io_fill_cqe_req (struct io_ring_ctx * ctx ,
2468+ struct io_kiocb * req )
24692469{
24702470 struct io_uring_cqe * cqe ;
24712471
@@ -2486,8 +2486,8 @@ static inline bool __io_fill_cqe_req_filled(struct io_ring_ctx *ctx,
24862486 req -> cqe .res , req -> cqe .flags , 0 , 0 );
24872487}
24882488
2489- static inline bool __io_fill_cqe32_req_filled (struct io_ring_ctx * ctx ,
2490- struct io_kiocb * req )
2489+ static inline bool __io_fill_cqe32_req (struct io_ring_ctx * ctx ,
2490+ struct io_kiocb * req )
24912491{
24922492 struct io_uring_cqe * cqe ;
24932493 u64 extra1 = req -> extra1 ;
@@ -2513,44 +2513,6 @@ static inline bool __io_fill_cqe32_req_filled(struct io_ring_ctx *ctx,
25132513 req -> cqe .flags , extra1 , extra2 );
25142514}
25152515
2516- static inline bool __io_fill_cqe_req (struct io_kiocb * req , s32 res , u32 cflags )
2517- {
2518- trace_io_uring_complete (req -> ctx , req , req -> cqe .user_data , res , cflags , 0 , 0 );
2519- return __io_fill_cqe (req -> ctx , req -> cqe .user_data , res , cflags );
2520- }
2521-
2522- static inline void __io_fill_cqe32_req (struct io_kiocb * req , s32 res , u32 cflags ,
2523- u64 extra1 , u64 extra2 )
2524- {
2525- struct io_ring_ctx * ctx = req -> ctx ;
2526- struct io_uring_cqe * cqe ;
2527-
2528- if (WARN_ON_ONCE (!(ctx -> flags & IORING_SETUP_CQE32 )))
2529- return ;
2530- if (req -> flags & REQ_F_CQE_SKIP )
2531- return ;
2532-
2533- trace_io_uring_complete (ctx , req , req -> cqe .user_data , res , cflags ,
2534- extra1 , extra2 );
2535-
2536- /*
2537- * If we can't get a cq entry, userspace overflowed the
2538- * submission (by quite a lot). Increment the overflow count in
2539- * the ring.
2540- */
2541- cqe = io_get_cqe (ctx );
2542- if (likely (cqe )) {
2543- WRITE_ONCE (cqe -> user_data , req -> cqe .user_data );
2544- WRITE_ONCE (cqe -> res , res );
2545- WRITE_ONCE (cqe -> flags , cflags );
2546- WRITE_ONCE (cqe -> big_cqe [0 ], extra1 );
2547- WRITE_ONCE (cqe -> big_cqe [1 ], extra2 );
2548- return ;
2549- }
2550-
2551- io_cqring_event_overflow (ctx , req -> cqe .user_data , res , cflags , extra1 , extra2 );
2552- }
2553-
25542516static noinline bool io_fill_cqe_aux (struct io_ring_ctx * ctx , u64 user_data ,
25552517 s32 res , u32 cflags )
25562518{
@@ -2593,16 +2555,24 @@ static void __io_req_complete_put(struct io_kiocb *req)
25932555static void __io_req_complete_post (struct io_kiocb * req , s32 res ,
25942556 u32 cflags )
25952557{
2596- if (!(req -> flags & REQ_F_CQE_SKIP ))
2597- __io_fill_cqe_req (req , res , cflags );
2558+ if (!(req -> flags & REQ_F_CQE_SKIP )) {
2559+ req -> cqe .res = res ;
2560+ req -> cqe .flags = cflags ;
2561+ __io_fill_cqe_req (req -> ctx , req );
2562+ }
25982563 __io_req_complete_put (req );
25992564}
26002565
26012566static void __io_req_complete_post32 (struct io_kiocb * req , s32 res ,
26022567 u32 cflags , u64 extra1 , u64 extra2 )
26032568{
2604- if (!(req -> flags & REQ_F_CQE_SKIP ))
2605- __io_fill_cqe32_req (req , res , cflags , extra1 , extra2 );
2569+ if (!(req -> flags & REQ_F_CQE_SKIP )) {
2570+ req -> cqe .res = res ;
2571+ req -> cqe .flags = cflags ;
2572+ req -> extra1 = extra1 ;
2573+ req -> extra2 = extra2 ;
2574+ __io_fill_cqe32_req (req -> ctx , req );
2575+ }
26062576 __io_req_complete_put (req );
26072577}
26082578
@@ -3207,9 +3177,9 @@ static void __io_submit_flush_completions(struct io_ring_ctx *ctx)
32073177
32083178 if (!(req -> flags & REQ_F_CQE_SKIP )) {
32093179 if (!(ctx -> flags & IORING_SETUP_CQE32 ))
3210- __io_fill_cqe_req_filled (ctx , req );
3180+ __io_fill_cqe_req (ctx , req );
32113181 else
3212- __io_fill_cqe32_req_filled (ctx , req );
3182+ __io_fill_cqe32_req (ctx , req );
32133183 }
32143184 }
32153185
@@ -3329,7 +3299,9 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
33293299 nr_events ++ ;
33303300 if (unlikely (req -> flags & REQ_F_CQE_SKIP ))
33313301 continue ;
3332- __io_fill_cqe_req (req , req -> cqe .res , io_put_kbuf (req , 0 ));
3302+
3303+ req -> cqe .flags = io_put_kbuf (req , 0 );
3304+ __io_fill_cqe_req (req -> ctx , req );
33333305 }
33343306
33353307 if (unlikely (!nr_events ))
0 commit comments