@@ -295,6 +295,10 @@ struct send_queue {
295295
296296 /* Record whether sq is in reset state. */
297297 bool reset ;
298+
299+ struct xsk_buff_pool * xsk_pool ;
300+
301+ dma_addr_t xsk_hdr_dma_addr ;
298302};
299303
300304/* Internal representation of a receive virtqueue */
@@ -495,6 +499,8 @@ struct virtio_net_common_hdr {
495499 };
496500};
497501
502+ static struct virtio_net_common_hdr xsk_hdr ;
503+
498504static void virtnet_sq_free_unused_buf (struct virtqueue * vq , void * buf );
499505static int virtnet_xdp_handler (struct bpf_prog * xdp_prog , struct xdp_buff * xdp ,
500506 struct net_device * dev ,
@@ -5561,6 +5567,29 @@ static int virtnet_rq_bind_xsk_pool(struct virtnet_info *vi, struct receive_queu
55615567 return err ;
55625568}
55635569
5570+ static int virtnet_sq_bind_xsk_pool (struct virtnet_info * vi ,
5571+ struct send_queue * sq ,
5572+ struct xsk_buff_pool * pool )
5573+ {
5574+ int err , qindex ;
5575+
5576+ qindex = sq - vi -> sq ;
5577+
5578+ virtnet_tx_pause (vi , sq );
5579+
5580+ err = virtqueue_reset (sq -> vq , virtnet_sq_free_unused_buf );
5581+ if (err ) {
5582+ netdev_err (vi -> dev , "reset tx fail: tx queue index: %d err: %d\n" , qindex , err );
5583+ pool = NULL ;
5584+ }
5585+
5586+ sq -> xsk_pool = pool ;
5587+
5588+ virtnet_tx_resume (vi , sq );
5589+
5590+ return err ;
5591+ }
5592+
55645593static int virtnet_xsk_pool_enable (struct net_device * dev ,
55655594 struct xsk_buff_pool * pool ,
55665595 u16 qid )
@@ -5569,6 +5598,7 @@ static int virtnet_xsk_pool_enable(struct net_device *dev,
55695598 struct receive_queue * rq ;
55705599 struct device * dma_dev ;
55715600 struct send_queue * sq ;
5601+ dma_addr_t hdr_dma ;
55725602 int err , size ;
55735603
55745604 if (vi -> hdr_len > xsk_pool_get_headroom (pool ))
@@ -5606,6 +5636,11 @@ static int virtnet_xsk_pool_enable(struct net_device *dev,
56065636 if (!rq -> xsk_buffs )
56075637 return - ENOMEM ;
56085638
5639+ hdr_dma = virtqueue_dma_map_single_attrs (sq -> vq , & xsk_hdr , vi -> hdr_len ,
5640+ DMA_TO_DEVICE , 0 );
5641+ if (virtqueue_dma_mapping_error (sq -> vq , hdr_dma ))
5642+ return - ENOMEM ;
5643+
56095644 err = xsk_pool_dma_map (pool , dma_dev , 0 );
56105645 if (err )
56115646 goto err_xsk_map ;
@@ -5614,11 +5649,24 @@ static int virtnet_xsk_pool_enable(struct net_device *dev,
56145649 if (err )
56155650 goto err_rq ;
56165651
5652+ err = virtnet_sq_bind_xsk_pool (vi , sq , pool );
5653+ if (err )
5654+ goto err_sq ;
5655+
5656+ /* Now, we do not support tx offload(such as tx csum), so all the tx
5657+ * virtnet hdr is zero. So all the tx packets can share a single hdr.
5658+ */
5659+ sq -> xsk_hdr_dma_addr = hdr_dma ;
5660+
56175661 return 0 ;
56185662
5663+ err_sq :
5664+ virtnet_rq_bind_xsk_pool (vi , rq , NULL );
56195665err_rq :
56205666 xsk_pool_dma_unmap (pool , 0 );
56215667err_xsk_map :
5668+ virtqueue_dma_unmap_single_attrs (rq -> vq , hdr_dma , vi -> hdr_len ,
5669+ DMA_TO_DEVICE , 0 );
56225670 return err ;
56235671}
56245672
@@ -5627,19 +5675,24 @@ static int virtnet_xsk_pool_disable(struct net_device *dev, u16 qid)
56275675 struct virtnet_info * vi = netdev_priv (dev );
56285676 struct xsk_buff_pool * pool ;
56295677 struct receive_queue * rq ;
5678+ struct send_queue * sq ;
56305679 int err ;
56315680
56325681 if (qid >= vi -> curr_queue_pairs )
56335682 return - EINVAL ;
56345683
5684+ sq = & vi -> sq [qid ];
56355685 rq = & vi -> rq [qid ];
56365686
56375687 pool = rq -> xsk_pool ;
56385688
56395689 err = virtnet_rq_bind_xsk_pool (vi , rq , NULL );
5690+ err |= virtnet_sq_bind_xsk_pool (vi , sq , NULL );
56405691
56415692 xsk_pool_dma_unmap (pool , 0 );
56425693
5694+ virtqueue_dma_unmap_single_attrs (sq -> vq , sq -> xsk_hdr_dma_addr ,
5695+ vi -> hdr_len , DMA_TO_DEVICE , 0 );
56435696 kvfree (rq -> xsk_buffs );
56445697
56455698 return err ;
0 commit comments