5454#define TSNEP_TX_TYPE_SKB_FRAG BIT(1)
5555#define TSNEP_TX_TYPE_XDP_TX BIT(2)
5656#define TSNEP_TX_TYPE_XDP_NDO BIT(3)
57+ #define TSNEP_TX_TYPE_XDP (TSNEP_TX_TYPE_XDP_TX | TSNEP_TX_TYPE_XDP_NDO)
58+ #define TSNEP_TX_TYPE_XSK BIT(4)
5759
5860#define TSNEP_XDP_TX BIT(0)
5961#define TSNEP_XDP_REDIRECT BIT(1)
@@ -322,13 +324,47 @@ static void tsnep_tx_init(struct tsnep_tx *tx)
322324 tx -> increment_owner_counter = TSNEP_RING_SIZE - 1 ;
323325}
324326
327+ static void tsnep_tx_enable (struct tsnep_tx * tx )
328+ {
329+ struct netdev_queue * nq ;
330+
331+ nq = netdev_get_tx_queue (tx -> adapter -> netdev , tx -> queue_index );
332+
333+ __netif_tx_lock_bh (nq );
334+ netif_tx_wake_queue (nq );
335+ __netif_tx_unlock_bh (nq );
336+ }
337+
338+ static void tsnep_tx_disable (struct tsnep_tx * tx , struct napi_struct * napi )
339+ {
340+ struct netdev_queue * nq ;
341+ u32 val ;
342+
343+ nq = netdev_get_tx_queue (tx -> adapter -> netdev , tx -> queue_index );
344+
345+ __netif_tx_lock_bh (nq );
346+ netif_tx_stop_queue (nq );
347+ __netif_tx_unlock_bh (nq );
348+
349+ /* wait until TX is done in hardware */
350+ readx_poll_timeout (ioread32 , tx -> addr + TSNEP_CONTROL , val ,
351+ ((val & TSNEP_CONTROL_TX_ENABLE ) == 0 ), 10000 ,
352+ 1000000 );
353+
354+ /* wait until TX is also done in software */
355+ while (READ_ONCE (tx -> read ) != tx -> write ) {
356+ napi_schedule (napi );
357+ napi_synchronize (napi );
358+ }
359+ }
360+
325361static void tsnep_tx_activate (struct tsnep_tx * tx , int index , int length ,
326362 bool last )
327363{
328364 struct tsnep_tx_entry * entry = & tx -> entry [index ];
329365
330366 entry -> properties = 0 ;
331- /* xdpf is union with skb */
367+ /* xdpf and zc are union with skb */
332368 if (entry -> skb ) {
333369 entry -> properties = length & TSNEP_DESC_LENGTH_MASK ;
334370 entry -> properties |= TSNEP_DESC_INTERRUPT_FLAG ;
@@ -646,10 +682,69 @@ static bool tsnep_xdp_xmit_back(struct tsnep_adapter *adapter,
646682 return xmit ;
647683}
648684
685+ static int tsnep_xdp_tx_map_zc (struct xdp_desc * xdpd , struct tsnep_tx * tx )
686+ {
687+ struct tsnep_tx_entry * entry ;
688+ dma_addr_t dma ;
689+
690+ entry = & tx -> entry [tx -> write ];
691+ entry -> zc = true;
692+
693+ dma = xsk_buff_raw_get_dma (tx -> xsk_pool , xdpd -> addr );
694+ xsk_buff_raw_dma_sync_for_device (tx -> xsk_pool , dma , xdpd -> len );
695+
696+ entry -> type = TSNEP_TX_TYPE_XSK ;
697+ entry -> len = xdpd -> len ;
698+
699+ entry -> desc -> tx = __cpu_to_le64 (dma );
700+
701+ return xdpd -> len ;
702+ }
703+
704+ static void tsnep_xdp_xmit_frame_ring_zc (struct xdp_desc * xdpd ,
705+ struct tsnep_tx * tx )
706+ {
707+ int length ;
708+
709+ length = tsnep_xdp_tx_map_zc (xdpd , tx );
710+
711+ tsnep_tx_activate (tx , tx -> write , length , true);
712+ tx -> write = (tx -> write + 1 ) & TSNEP_RING_MASK ;
713+ }
714+
715+ static void tsnep_xdp_xmit_zc (struct tsnep_tx * tx )
716+ {
717+ int desc_available = tsnep_tx_desc_available (tx );
718+ struct xdp_desc * descs = tx -> xsk_pool -> tx_descs ;
719+ int batch , i ;
720+
721+ /* ensure that TX ring is not filled up by XDP, always MAX_SKB_FRAGS
722+ * will be available for normal TX path and queue is stopped there if
723+ * necessary
724+ */
725+ if (desc_available <= (MAX_SKB_FRAGS + 1 ))
726+ return ;
727+ desc_available -= MAX_SKB_FRAGS + 1 ;
728+
729+ batch = xsk_tx_peek_release_desc_batch (tx -> xsk_pool , desc_available );
730+ for (i = 0 ; i < batch ; i ++ )
731+ tsnep_xdp_xmit_frame_ring_zc (& descs [i ], tx );
732+
733+ if (batch ) {
734+ /* descriptor properties shall be valid before hardware is
735+ * notified
736+ */
737+ dma_wmb ();
738+
739+ tsnep_xdp_xmit_flush (tx );
740+ }
741+ }
742+
649743static bool tsnep_tx_poll (struct tsnep_tx * tx , int napi_budget )
650744{
651745 struct tsnep_tx_entry * entry ;
652746 struct netdev_queue * nq ;
747+ int xsk_frames = 0 ;
653748 int budget = 128 ;
654749 int length ;
655750 int count ;
@@ -676,7 +771,7 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
676771 if ((entry -> type & TSNEP_TX_TYPE_SKB ) &&
677772 skb_shinfo (entry -> skb )-> nr_frags > 0 )
678773 count += skb_shinfo (entry -> skb )-> nr_frags ;
679- else if (! (entry -> type & TSNEP_TX_TYPE_SKB ) &&
774+ else if ((entry -> type & TSNEP_TX_TYPE_XDP ) &&
680775 xdp_frame_has_frags (entry -> xdpf ))
681776 count += xdp_get_shared_info_from_frame (entry -> xdpf )-> nr_frags ;
682777
@@ -705,9 +800,11 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
705800
706801 if (entry -> type & TSNEP_TX_TYPE_SKB )
707802 napi_consume_skb (entry -> skb , napi_budget );
708- else
803+ else if ( entry -> type & TSNEP_TX_TYPE_XDP )
709804 xdp_return_frame_rx_napi (entry -> xdpf );
710- /* xdpf is union with skb */
805+ else
806+ xsk_frames ++ ;
807+ /* xdpf and zc are union with skb */
711808 entry -> skb = NULL ;
712809
713810 tx -> read = (tx -> read + count ) & TSNEP_RING_MASK ;
@@ -718,6 +815,14 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
718815 budget -- ;
719816 } while (likely (budget ));
720817
818+ if (tx -> xsk_pool ) {
819+ if (xsk_frames )
820+ xsk_tx_completed (tx -> xsk_pool , xsk_frames );
821+ if (xsk_uses_need_wakeup (tx -> xsk_pool ))
822+ xsk_set_tx_need_wakeup (tx -> xsk_pool );
823+ tsnep_xdp_xmit_zc (tx );
824+ }
825+
721826 if ((tsnep_tx_desc_available (tx ) >= ((MAX_SKB_FRAGS + 1 ) * 2 )) &&
722827 netif_tx_queue_stopped (nq )) {
723828 netif_tx_wake_queue (nq );
@@ -765,12 +870,6 @@ static int tsnep_tx_open(struct tsnep_tx *tx)
765870
766871static void tsnep_tx_close (struct tsnep_tx * tx )
767872{
768- u32 val ;
769-
770- readx_poll_timeout (ioread32 , tx -> addr + TSNEP_CONTROL , val ,
771- ((val & TSNEP_CONTROL_TX_ENABLE ) == 0 ), 10000 ,
772- 1000000 );
773-
774873 tsnep_tx_ring_cleanup (tx );
775874}
776875
@@ -1786,12 +1885,18 @@ static void tsnep_queue_enable(struct tsnep_queue *queue)
17861885 napi_enable (& queue -> napi );
17871886 tsnep_enable_irq (queue -> adapter , queue -> irq_mask );
17881887
1888+ if (queue -> tx )
1889+ tsnep_tx_enable (queue -> tx );
1890+
17891891 if (queue -> rx )
17901892 tsnep_rx_enable (queue -> rx );
17911893}
17921894
17931895static void tsnep_queue_disable (struct tsnep_queue * queue )
17941896{
1897+ if (queue -> tx )
1898+ tsnep_tx_disable (queue -> tx , & queue -> napi );
1899+
17951900 napi_disable (& queue -> napi );
17961901 tsnep_disable_irq (queue -> adapter , queue -> irq_mask );
17971902
@@ -1908,6 +2013,7 @@ int tsnep_enable_xsk(struct tsnep_queue *queue, struct xsk_buff_pool *pool)
19082013 if (running )
19092014 tsnep_queue_disable (queue );
19102015
2016+ queue -> tx -> xsk_pool = pool ;
19112017 queue -> rx -> xsk_pool = pool ;
19122018
19132019 if (running ) {
@@ -1928,6 +2034,7 @@ void tsnep_disable_xsk(struct tsnep_queue *queue)
19282034 tsnep_rx_free_zc (queue -> rx );
19292035
19302036 queue -> rx -> xsk_pool = NULL ;
2037+ queue -> tx -> xsk_pool = NULL ;
19312038
19322039 if (running ) {
19332040 tsnep_rx_reopen (queue -> rx );
@@ -2438,7 +2545,8 @@ static int tsnep_probe(struct platform_device *pdev)
24382545
24392546 netdev -> xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
24402547 NETDEV_XDP_ACT_NDO_XMIT |
2441- NETDEV_XDP_ACT_NDO_XMIT_SG ;
2548+ NETDEV_XDP_ACT_NDO_XMIT_SG |
2549+ NETDEV_XDP_ACT_XSK_ZEROCOPY ;
24422550
24432551 /* carrier off reporting is important to ethtool even BEFORE open */
24442552 netif_carrier_off (netdev );
0 commit comments