@@ -355,9 +355,8 @@ static struct device *vring_dma_dev(const struct vring_virtqueue *vq)
355355}
356356
357357/* Map one sg entry. */
358- static dma_addr_t vring_map_one_sg (const struct vring_virtqueue * vq ,
359- struct scatterlist * sg ,
360- enum dma_data_direction direction )
358+ static int vring_map_one_sg (const struct vring_virtqueue * vq , struct scatterlist * sg ,
359+ enum dma_data_direction direction , dma_addr_t * addr )
361360{
362361 if (!vq -> use_dma_api ) {
363362 /*
@@ -366,17 +365,23 @@ static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
366365 * depending on the direction.
367366 */
368367 kmsan_handle_dma (sg_page (sg ), sg -> offset , sg -> length , direction );
369- return (dma_addr_t )sg_phys (sg );
368+ * addr = (dma_addr_t )sg_phys (sg );
369+ return 0 ;
370370 }
371371
372372 /*
373373 * We can't use dma_map_sg, because we don't use scatterlists in
374374 * the way it expects (we don't guarantee that the scatterlist
375375 * will exist for the lifetime of the mapping).
376376 */
377- return dma_map_page (vring_dma_dev (vq ),
377+ * addr = dma_map_page (vring_dma_dev (vq ),
378378 sg_page (sg ), sg -> offset , sg -> length ,
379379 direction );
380+
381+ if (dma_mapping_error (vring_dma_dev (vq ), * addr ))
382+ return - ENOMEM ;
383+
384+ return 0 ;
380385}
381386
382387static dma_addr_t vring_map_single (const struct vring_virtqueue * vq ,
@@ -588,8 +593,9 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
588593
589594 for (n = 0 ; n < out_sgs ; n ++ ) {
590595 for (sg = sgs [n ]; sg ; sg = sg_next (sg )) {
591- dma_addr_t addr = vring_map_one_sg (vq , sg , DMA_TO_DEVICE );
592- if (vring_mapping_error (vq , addr ))
596+ dma_addr_t addr ;
597+
598+ if (vring_map_one_sg (vq , sg , DMA_TO_DEVICE , & addr ))
593599 goto unmap_release ;
594600
595601 prev = i ;
@@ -603,8 +609,9 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
603609 }
604610 for (; n < (out_sgs + in_sgs ); n ++ ) {
605611 for (sg = sgs [n ]; sg ; sg = sg_next (sg )) {
606- dma_addr_t addr = vring_map_one_sg (vq , sg , DMA_FROM_DEVICE );
607- if (vring_mapping_error (vq , addr ))
612+ dma_addr_t addr ;
613+
614+ if (vring_map_one_sg (vq , sg , DMA_FROM_DEVICE , & addr ))
608615 goto unmap_release ;
609616
610617 prev = i ;
@@ -1281,9 +1288,8 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
12811288
12821289 for (n = 0 ; n < out_sgs + in_sgs ; n ++ ) {
12831290 for (sg = sgs [n ]; sg ; sg = sg_next (sg )) {
1284- addr = vring_map_one_sg (vq , sg , n < out_sgs ?
1285- DMA_TO_DEVICE : DMA_FROM_DEVICE );
1286- if (vring_mapping_error (vq , addr ))
1291+ if (vring_map_one_sg (vq , sg , n < out_sgs ?
1292+ DMA_TO_DEVICE : DMA_FROM_DEVICE , & addr ))
12871293 goto unmap_release ;
12881294
12891295 desc [i ].flags = cpu_to_le16 (n < out_sgs ?
@@ -1428,9 +1434,10 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
14281434 c = 0 ;
14291435 for (n = 0 ; n < out_sgs + in_sgs ; n ++ ) {
14301436 for (sg = sgs [n ]; sg ; sg = sg_next (sg )) {
1431- dma_addr_t addr = vring_map_one_sg (vq , sg , n < out_sgs ?
1432- DMA_TO_DEVICE : DMA_FROM_DEVICE );
1433- if (vring_mapping_error (vq , addr ))
1437+ dma_addr_t addr ;
1438+
1439+ if (vring_map_one_sg (vq , sg , n < out_sgs ?
1440+ DMA_TO_DEVICE : DMA_FROM_DEVICE , & addr ))
14341441 goto unmap_release ;
14351442
14361443 flags = cpu_to_le16 (vq -> packed .avail_used_flags |
0 commit comments