@@ -175,6 +175,11 @@ struct vring_virtqueue {
175175 /* Do DMA mapping by driver */
176176 bool premapped ;
177177
178+ /* Do unmap or not for desc. Just when premapped is False and
179+ * use_dma_api is true, this is true.
180+ */
181+ bool do_unmap ;
182+
178183 /* Head of free buffer list. */
179184 unsigned int free_head ;
180185 /* Number we've added since last sync. */
@@ -440,7 +445,7 @@ static void vring_unmap_one_split_indirect(const struct vring_virtqueue *vq,
440445{
441446 u16 flags ;
442447
443- if (!vq -> use_dma_api )
448+ if (!vq -> do_unmap )
444449 return ;
445450
446451 flags = virtio16_to_cpu (vq -> vq .vdev , desc -> flags );
@@ -458,18 +463,21 @@ static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq,
458463 struct vring_desc_extra * extra = vq -> split .desc_extra ;
459464 u16 flags ;
460465
461- if (!vq -> use_dma_api )
462- goto out ;
463-
464466 flags = extra [i ].flags ;
465467
466468 if (flags & VRING_DESC_F_INDIRECT ) {
469+ if (!vq -> use_dma_api )
470+ goto out ;
471+
467472 dma_unmap_single (vring_dma_dev (vq ),
468473 extra [i ].addr ,
469474 extra [i ].len ,
470475 (flags & VRING_DESC_F_WRITE ) ?
471476 DMA_FROM_DEVICE : DMA_TO_DEVICE );
472477 } else {
478+ if (!vq -> do_unmap )
479+ goto out ;
480+
473481 dma_unmap_page (vring_dma_dev (vq ),
474482 extra [i ].addr ,
475483 extra [i ].len ,
@@ -635,7 +643,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
635643 }
636644 /* Last one doesn't continue. */
637645 desc [prev ].flags &= cpu_to_virtio16 (_vq -> vdev , ~VRING_DESC_F_NEXT );
638- if (!indirect && vq -> use_dma_api )
646+ if (!indirect && vq -> do_unmap )
639647 vq -> split .desc_extra [prev & (vq -> split .vring .num - 1 )].flags &=
640648 ~VRING_DESC_F_NEXT ;
641649
@@ -794,7 +802,7 @@ static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
794802 VRING_DESC_F_INDIRECT ));
795803 BUG_ON (len == 0 || len % sizeof (struct vring_desc ));
796804
797- if (vq -> use_dma_api ) {
805+ if (vq -> do_unmap ) {
798806 for (j = 0 ; j < len / sizeof (struct vring_desc ); j ++ )
799807 vring_unmap_one_split_indirect (vq , & indir_desc [j ]);
800808 }
@@ -1217,17 +1225,20 @@ static void vring_unmap_extra_packed(const struct vring_virtqueue *vq,
12171225{
12181226 u16 flags ;
12191227
1220- if (!vq -> use_dma_api )
1221- return ;
1222-
12231228 flags = extra -> flags ;
12241229
12251230 if (flags & VRING_DESC_F_INDIRECT ) {
1231+ if (!vq -> use_dma_api )
1232+ return ;
1233+
12261234 dma_unmap_single (vring_dma_dev (vq ),
12271235 extra -> addr , extra -> len ,
12281236 (flags & VRING_DESC_F_WRITE ) ?
12291237 DMA_FROM_DEVICE : DMA_TO_DEVICE );
12301238 } else {
1239+ if (!vq -> do_unmap )
1240+ return ;
1241+
12311242 dma_unmap_page (vring_dma_dev (vq ),
12321243 extra -> addr , extra -> len ,
12331244 (flags & VRING_DESC_F_WRITE ) ?
@@ -1240,7 +1251,7 @@ static void vring_unmap_desc_packed(const struct vring_virtqueue *vq,
12401251{
12411252 u16 flags ;
12421253
1243- if (!vq -> use_dma_api )
1254+ if (!vq -> do_unmap )
12441255 return ;
12451256
12461257 flags = le16_to_cpu (desc -> flags );
@@ -1329,7 +1340,7 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
13291340 sizeof (struct vring_packed_desc ));
13301341 vq -> packed .vring .desc [head ].id = cpu_to_le16 (id );
13311342
1332- if (vq -> use_dma_api ) {
1343+ if (vq -> do_unmap ) {
13331344 vq -> packed .desc_extra [id ].addr = addr ;
13341345 vq -> packed .desc_extra [id ].len = total_sg *
13351346 sizeof (struct vring_packed_desc );
@@ -1470,7 +1481,7 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
14701481 desc [i ].len = cpu_to_le32 (sg -> length );
14711482 desc [i ].id = cpu_to_le16 (id );
14721483
1473- if (unlikely (vq -> use_dma_api )) {
1484+ if (unlikely (vq -> do_unmap )) {
14741485 vq -> packed .desc_extra [curr ].addr = addr ;
14751486 vq -> packed .desc_extra [curr ].len = sg -> length ;
14761487 vq -> packed .desc_extra [curr ].flags =
@@ -1604,7 +1615,7 @@ static void detach_buf_packed(struct vring_virtqueue *vq,
16041615 vq -> free_head = id ;
16051616 vq -> vq .num_free += state -> num ;
16061617
1607- if (unlikely (vq -> use_dma_api )) {
1618+ if (unlikely (vq -> do_unmap )) {
16081619 curr = id ;
16091620 for (i = 0 ; i < state -> num ; i ++ ) {
16101621 vring_unmap_extra_packed (vq ,
@@ -1621,7 +1632,7 @@ static void detach_buf_packed(struct vring_virtqueue *vq,
16211632 if (!desc )
16221633 return ;
16231634
1624- if (vq -> use_dma_api ) {
1635+ if (vq -> do_unmap ) {
16251636 len = vq -> packed .desc_extra [id ].len ;
16261637 for (i = 0 ; i < len / sizeof (struct vring_packed_desc );
16271638 i ++ )
@@ -2080,6 +2091,7 @@ static struct virtqueue *vring_create_virtqueue_packed(
20802091 vq -> dma_dev = dma_dev ;
20812092 vq -> use_dma_api = vring_use_dma_api (vdev );
20822093 vq -> premapped = false;
2094+ vq -> do_unmap = vq -> use_dma_api ;
20832095
20842096 vq -> indirect = virtio_has_feature (vdev , VIRTIO_RING_F_INDIRECT_DESC ) &&
20852097 !context ;
@@ -2587,6 +2599,7 @@ static struct virtqueue *__vring_new_virtqueue(unsigned int index,
25872599 vq -> dma_dev = dma_dev ;
25882600 vq -> use_dma_api = vring_use_dma_api (vdev );
25892601 vq -> premapped = false;
2602+ vq -> do_unmap = vq -> use_dma_api ;
25902603
25912604 vq -> indirect = virtio_has_feature (vdev , VIRTIO_RING_F_INDIRECT_DESC ) &&
25922605 !context ;
@@ -2771,6 +2784,7 @@ int virtqueue_set_dma_premapped(struct virtqueue *_vq)
27712784 }
27722785
27732786 vq -> premapped = true;
2787+ vq -> do_unmap = false;
27742788
27752789 END_USE (vq );
27762790
0 commit comments