@@ -544,6 +544,48 @@ static void io_put_zcrx_ifq(struct io_zcrx_ifq *ifq)
544544 io_zcrx_ifq_free (ifq );
545545}
546546
547+ static void io_zcrx_return_niov_freelist (struct net_iov * niov )
548+ {
549+ struct io_zcrx_area * area = io_zcrx_iov_to_area (niov );
550+
551+ spin_lock_bh (& area -> freelist_lock );
552+ area -> freelist [area -> free_count ++ ] = net_iov_idx (niov );
553+ spin_unlock_bh (& area -> freelist_lock );
554+ }
555+
556+ static void io_zcrx_return_niov (struct net_iov * niov )
557+ {
558+ netmem_ref netmem = net_iov_to_netmem (niov );
559+
560+ if (!niov -> desc .pp ) {
561+ /* copy fallback allocated niovs */
562+ io_zcrx_return_niov_freelist (niov );
563+ return ;
564+ }
565+ page_pool_put_unrefed_netmem (niov -> desc .pp , netmem , -1 , false);
566+ }
567+
568+ static void io_zcrx_scrub (struct io_zcrx_ifq * ifq )
569+ {
570+ struct io_zcrx_area * area = ifq -> area ;
571+ int i ;
572+
573+ if (!area )
574+ return ;
575+
576+ /* Reclaim back all buffers given to the user space. */
577+ for (i = 0 ; i < area -> nia .num_niovs ; i ++ ) {
578+ struct net_iov * niov = & area -> nia .niovs [i ];
579+ int nr ;
580+
581+ if (!atomic_read (io_get_user_counter (niov )))
582+ continue ;
583+ nr = atomic_xchg (io_get_user_counter (niov ), 0 );
584+ if (nr && !page_pool_unref_netmem (net_iov_to_netmem (niov ), nr ))
585+ io_zcrx_return_niov (niov );
586+ }
587+ }
588+
547589struct io_mapped_region * io_zcrx_get_region (struct io_ring_ctx * ctx ,
548590 unsigned int id )
549591{
@@ -684,48 +726,6 @@ static struct net_iov *__io_zcrx_get_free_niov(struct io_zcrx_area *area)
684726 return & area -> nia .niovs [niov_idx ];
685727}
686728
687- static void io_zcrx_return_niov_freelist (struct net_iov * niov )
688- {
689- struct io_zcrx_area * area = io_zcrx_iov_to_area (niov );
690-
691- spin_lock_bh (& area -> freelist_lock );
692- area -> freelist [area -> free_count ++ ] = net_iov_idx (niov );
693- spin_unlock_bh (& area -> freelist_lock );
694- }
695-
696- static void io_zcrx_return_niov (struct net_iov * niov )
697- {
698- netmem_ref netmem = net_iov_to_netmem (niov );
699-
700- if (!niov -> desc .pp ) {
701- /* copy fallback allocated niovs */
702- io_zcrx_return_niov_freelist (niov );
703- return ;
704- }
705- page_pool_put_unrefed_netmem (niov -> desc .pp , netmem , -1 , false);
706- }
707-
708- static void io_zcrx_scrub (struct io_zcrx_ifq * ifq )
709- {
710- struct io_zcrx_area * area = ifq -> area ;
711- int i ;
712-
713- if (!area )
714- return ;
715-
716- /* Reclaim back all buffers given to the user space. */
717- for (i = 0 ; i < area -> nia .num_niovs ; i ++ ) {
718- struct net_iov * niov = & area -> nia .niovs [i ];
719- int nr ;
720-
721- if (!atomic_read (io_get_user_counter (niov )))
722- continue ;
723- nr = atomic_xchg (io_get_user_counter (niov ), 0 );
724- if (nr && !page_pool_unref_netmem (net_iov_to_netmem (niov ), nr ))
725- io_zcrx_return_niov (niov );
726- }
727- }
728-
729729void io_unregister_zcrx_ifqs (struct io_ring_ctx * ctx )
730730{
731731 struct io_zcrx_ifq * ifq ;
0 commit comments