@@ -356,6 +356,9 @@ struct receive_queue {
356356 struct xdp_rxq_info xsk_rxq_info ;
357357
358358 struct xdp_buff * * xsk_buffs ;
359+
360+ /* Do dma by self */
361+ bool do_dma ;
359362};
360363
361364/* This structure can contain rss message with maximum settings for indirection table and keysize
@@ -885,7 +888,7 @@ static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx)
885888 void * buf ;
886889
887890 buf = virtqueue_get_buf_ctx (rq -> vq , len , ctx );
888- if (buf )
891+ if (buf && rq -> do_dma )
889892 virtnet_rq_unmap (rq , buf , * len );
890893
891894 return buf ;
@@ -898,6 +901,11 @@ static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len)
898901 u32 offset ;
899902 void * head ;
900903
904+ if (!rq -> do_dma ) {
905+ sg_init_one (rq -> sg , buf , len );
906+ return ;
907+ }
908+
901909 head = page_address (rq -> alloc_frag .page );
902910
903911 offset = buf - head ;
@@ -923,42 +931,44 @@ static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp)
923931
924932 head = page_address (alloc_frag -> page );
925933
926- dma = head ;
934+ if (rq -> do_dma ) {
935+ dma = head ;
936+
937+ /* new pages */
938+ if (!alloc_frag -> offset ) {
939+ if (rq -> last_dma ) {
940+ /* Now, the new page is allocated, the last dma
941+ * will not be used. So the dma can be unmapped
942+ * if the ref is 0.
943+ */
944+ virtnet_rq_unmap (rq , rq -> last_dma , 0 );
945+ rq -> last_dma = NULL ;
946+ }
927947
928- /* new pages */
929- if (!alloc_frag -> offset ) {
930- if (rq -> last_dma ) {
931- /* Now, the new page is allocated, the last dma
932- * will not be used. So the dma can be unmapped
933- * if the ref is 0.
934- */
935- virtnet_rq_unmap (rq , rq -> last_dma , 0 );
936- rq -> last_dma = NULL ;
937- }
948+ dma -> len = alloc_frag -> size - sizeof (* dma );
938949
939- dma -> len = alloc_frag -> size - sizeof (* dma );
950+ addr = virtqueue_dma_map_single_attrs (rq -> vq , dma + 1 ,
951+ dma -> len , DMA_FROM_DEVICE , 0 );
952+ if (virtqueue_dma_mapping_error (rq -> vq , addr ))
953+ return NULL ;
940954
941- addr = virtqueue_dma_map_single_attrs (rq -> vq , dma + 1 ,
942- dma -> len , DMA_FROM_DEVICE , 0 );
943- if (virtqueue_dma_mapping_error (rq -> vq , addr ))
944- return NULL ;
955+ dma -> addr = addr ;
956+ dma -> need_sync = virtqueue_dma_need_sync (rq -> vq , addr );
945957
946- dma -> addr = addr ;
947- dma -> need_sync = virtqueue_dma_need_sync (rq -> vq , addr );
958+ /* Add a reference to dma to prevent the entire dma from
959+ * being released during error handling. This reference
960+ * will be freed after the pages are no longer used.
961+ */
962+ get_page (alloc_frag -> page );
963+ dma -> ref = 1 ;
964+ alloc_frag -> offset = sizeof (* dma );
948965
949- /* Add a reference to dma to prevent the entire dma from
950- * being released during error handling. This reference
951- * will be freed after the pages are no longer used.
952- */
953- get_page (alloc_frag -> page );
954- dma -> ref = 1 ;
955- alloc_frag -> offset = sizeof (* dma );
966+ rq -> last_dma = dma ;
967+ }
956968
957- rq -> last_dma = dma ;
969+ ++ dma -> ref ;
958970 }
959971
960- ++ dma -> ref ;
961-
962972 buf = head + alloc_frag -> offset ;
963973
964974 get_page (alloc_frag -> page );
@@ -967,19 +977,6 @@ static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp)
967977 return buf ;
968978}
969979
970- static void virtnet_rq_set_premapped (struct virtnet_info * vi )
971- {
972- int i ;
973-
974- /* disable for big mode */
975- if (!vi -> mergeable_rx_bufs && vi -> big_packets )
976- return ;
977-
978- for (i = 0 ; i < vi -> max_queue_pairs ; i ++ )
979- /* error should never happen */
980- BUG_ON (virtqueue_set_dma_premapped (vi -> rq [i ].vq ));
981- }
982-
983980static void virtnet_rq_unmap_free_buf (struct virtqueue * vq , void * buf )
984981{
985982 struct virtnet_info * vi = vq -> vdev -> priv ;
@@ -993,7 +990,7 @@ static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf)
993990 return ;
994991 }
995992
996- if (! vi -> big_packets || vi -> mergeable_rx_bufs )
993+ if (rq -> do_dma )
997994 virtnet_rq_unmap (rq , buf , 0 );
998995
999996 virtnet_rq_free_buf (vi , rq , buf );
@@ -2430,7 +2427,8 @@ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
24302427
24312428 err = virtqueue_add_inbuf_ctx (rq -> vq , rq -> sg , 1 , buf , ctx , gfp );
24322429 if (err < 0 ) {
2433- virtnet_rq_unmap (rq , buf , 0 );
2430+ if (rq -> do_dma )
2431+ virtnet_rq_unmap (rq , buf , 0 );
24342432 put_page (virt_to_head_page (buf ));
24352433 }
24362434
@@ -2544,7 +2542,8 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi,
25442542 ctx = mergeable_len_to_ctx (len + room , headroom );
25452543 err = virtqueue_add_inbuf_ctx (rq -> vq , rq -> sg , 1 , buf , ctx , gfp );
25462544 if (err < 0 ) {
2547- virtnet_rq_unmap (rq , buf , 0 );
2545+ if (rq -> do_dma )
2546+ virtnet_rq_unmap (rq , buf , 0 );
25482547 put_page (virt_to_head_page (buf ));
25492548 }
25502549
@@ -2701,7 +2700,7 @@ static int virtnet_receive_packets(struct virtnet_info *vi,
27012700 }
27022701 } else {
27032702 while (packets < budget &&
2704- (buf = virtqueue_get_buf (rq -> vq , & len )) != NULL ) {
2703+ (buf = virtnet_rq_get_buf (rq , & len , NULL )) != NULL ) {
27052704 receive_buf (vi , rq , buf , len , NULL , xdp_xmit , stats );
27062705 packets ++ ;
27072706 }
@@ -5892,7 +5891,7 @@ static void free_receive_page_frags(struct virtnet_info *vi)
58925891 int i ;
58935892 for (i = 0 ; i < vi -> max_queue_pairs ; i ++ )
58945893 if (vi -> rq [i ].alloc_frag .page ) {
5895- if (vi -> rq [i ].last_dma )
5894+ if (vi -> rq [i ].do_dma && vi -> rq [ i ]. last_dma )
58965895 virtnet_rq_unmap (& vi -> rq [i ], vi -> rq [i ].last_dma , 0 );
58975896 put_page (vi -> rq [i ].alloc_frag .page );
58985897 }
@@ -6090,8 +6089,6 @@ static int init_vqs(struct virtnet_info *vi)
60906089 if (ret )
60916090 goto err_free ;
60926091
6093- virtnet_rq_set_premapped (vi );
6094-
60956092 cpus_read_lock ();
60966093 virtnet_set_affinity (vi );
60976094 cpus_read_unlock ();
0 commit comments