Skip to content

Commit bee8c7c

Browse files
jasowangmstsirkin
authored andcommitted
virtio: introduce map ops in virtio core
This patch introduces map operations for virtio device. Virtio used to use DMA API which is not necessarily the case since some devices doesn't do DMA. Instead of using tricks and abusing DMA API, let's simply abstract the current mapping logic into a virtio specific mapping operations. For the device or transport that doesn't do DMA, they can implement their own mapping logic without the need to trick DMA core. In this case the mapping metadata is opaque to the virtio core that will be passed back to the transport or device specific map operations. For other devices, DMA API will still be used, so map token will still be the dma device to minimize the changeset and performance impact. The mapping operations are abstracted as a independent structure instead of reusing virtio_config_ops. This allows the transport can simply reuse the structure for lower layers like vDPA. A set of new mapping helpers were introduced for the device that want to do mapping by themselves. Signed-off-by: Jason Wang <jasowang@redhat.com> Message-Id: <20250821064641.5025-7-jasowang@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Tested-by: Lei Yang <leiyang@redhat.com> Reviewed-by: Eugenio Pérez <eperezma@redhat.com>
1 parent 201e52f commit bee8c7c

4 files changed

Lines changed: 269 additions & 42 deletions

File tree

drivers/virtio/virtio_ring.c

Lines changed: 169 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -297,8 +297,14 @@ size_t virtio_max_dma_size(const struct virtio_device *vdev)
297297
{
298298
size_t max_segment_size = SIZE_MAX;
299299

300-
if (vring_use_map_api(vdev))
301-
max_segment_size = dma_max_mapping_size(vdev->dev.parent);
300+
if (vring_use_map_api(vdev)) {
301+
if (vdev->map) {
302+
max_segment_size =
303+
vdev->map->max_mapping_size(vdev->vmap);
304+
} else
305+
max_segment_size =
306+
dma_max_mapping_size(vdev->dev.parent);
307+
}
302308

303309
return max_segment_size;
304310
}
@@ -309,8 +315,8 @@ static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
309315
union virtio_map map)
310316
{
311317
if (vring_use_map_api(vdev)) {
312-
return dma_alloc_coherent(map.dma_dev, size,
313-
map_handle, flag);
318+
return virtqueue_map_alloc_coherent(vdev, map, size,
319+
map_handle, flag);
314320
} else {
315321
void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag);
316322

@@ -343,7 +349,8 @@ static void vring_free_queue(struct virtio_device *vdev, size_t size,
343349
union virtio_map map)
344350
{
345351
if (vring_use_map_api(vdev))
346-
dma_free_coherent(map.dma_dev, size, queue, map_handle);
352+
virtqueue_map_free_coherent(vdev, map, size,
353+
queue, map_handle);
347354
else
348355
free_pages_exact(queue, PAGE_ALIGN(size));
349356
}
@@ -358,6 +365,20 @@ static struct device *vring_dma_dev(const struct vring_virtqueue *vq)
358365
return vq->map.dma_dev;
359366
}
360367

368+
static int vring_mapping_error(const struct vring_virtqueue *vq,
369+
dma_addr_t addr)
370+
{
371+
struct virtio_device *vdev = vq->vq.vdev;
372+
373+
if (!vq->use_map_api)
374+
return 0;
375+
376+
if (vdev->map)
377+
return vdev->map->mapping_error(vq->map, addr);
378+
else
379+
return dma_mapping_error(vring_dma_dev(vq), addr);
380+
}
381+
361382
/* Map one sg entry. */
362383
static int vring_map_one_sg(const struct vring_virtqueue *vq, struct scatterlist *sg,
363384
enum dma_data_direction direction, dma_addr_t *addr,
@@ -387,11 +408,11 @@ static int vring_map_one_sg(const struct vring_virtqueue *vq, struct scatterlist
387408
* the way it expects (we don't guarantee that the scatterlist
388409
* will exist for the lifetime of the mapping).
389410
*/
390-
*addr = dma_map_page(vring_dma_dev(vq),
391-
sg_page(sg), sg->offset, sg->length,
392-
direction);
411+
*addr = virtqueue_map_page_attrs(&vq->vq, sg_page(sg),
412+
sg->offset, sg->length,
413+
direction, 0);
393414

394-
if (dma_mapping_error(vring_dma_dev(vq), *addr))
415+
if (vring_mapping_error(vq, *addr))
395416
return -ENOMEM;
396417

397418
return 0;
@@ -408,15 +429,6 @@ static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
408429
size, direction, 0);
409430
}
410431

411-
static int vring_mapping_error(const struct vring_virtqueue *vq,
412-
dma_addr_t addr)
413-
{
414-
if (!vq->use_map_api)
415-
return 0;
416-
417-
return dma_mapping_error(vring_dma_dev(vq), addr);
418-
}
419-
420432
static void virtqueue_init(struct vring_virtqueue *vq, u32 num)
421433
{
422434
vq->vq.num_free = num;
@@ -453,11 +465,12 @@ static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq,
453465
} else if (!vring_need_unmap_buffer(vq, extra))
454466
goto out;
455467

456-
dma_unmap_page(vring_dma_dev(vq),
457-
extra->addr,
458-
extra->len,
459-
(flags & VRING_DESC_F_WRITE) ?
460-
DMA_FROM_DEVICE : DMA_TO_DEVICE);
468+
virtqueue_unmap_page_attrs(&vq->vq,
469+
extra->addr,
470+
extra->len,
471+
(flags & VRING_DESC_F_WRITE) ?
472+
DMA_FROM_DEVICE : DMA_TO_DEVICE,
473+
0);
461474

462475
out:
463476
return extra->next;
@@ -1271,10 +1284,11 @@ static void vring_unmap_extra_packed(const struct vring_virtqueue *vq,
12711284
} else if (!vring_need_unmap_buffer(vq, extra))
12721285
return;
12731286

1274-
dma_unmap_page(vring_dma_dev(vq),
1275-
extra->addr, extra->len,
1276-
(flags & VRING_DESC_F_WRITE) ?
1277-
DMA_FROM_DEVICE : DMA_TO_DEVICE);
1287+
virtqueue_unmap_page_attrs(&vq->vq,
1288+
extra->addr, extra->len,
1289+
(flags & VRING_DESC_F_WRITE) ?
1290+
DMA_FROM_DEVICE : DMA_TO_DEVICE,
1291+
0);
12781292
}
12791293

12801294
static struct vring_packed_desc *alloc_indirect_packed(unsigned int total_sg,
@@ -2433,7 +2447,7 @@ struct device *virtqueue_dma_dev(struct virtqueue *_vq)
24332447
{
24342448
struct vring_virtqueue *vq = to_vvq(_vq);
24352449

2436-
if (vq->use_map_api)
2450+
if (vq->use_map_api && !_vq->vdev->map)
24372451
return vq->map.dma_dev;
24382452
else
24392453
return NULL;
@@ -3123,6 +3137,107 @@ const struct vring *virtqueue_get_vring(const struct virtqueue *vq)
31233137
}
31243138
EXPORT_SYMBOL_GPL(virtqueue_get_vring);
31253139

3140+
/**
3141+
* virtqueue_map_alloc_coherent - alloc coherent mapping
3142+
* @vdev: the virtio device we are talking to
3143+
* @map: metadata for performing mapping
3144+
* @size: the size of the buffer
3145+
* @map_handle: the pointer to the mapped address
3146+
* @gfp: allocation flag (GFP_XXX)
3147+
*
3148+
* return virtual address or NULL on error
3149+
*/
3150+
void *virtqueue_map_alloc_coherent(struct virtio_device *vdev,
3151+
union virtio_map map,
3152+
size_t size, dma_addr_t *map_handle,
3153+
gfp_t gfp)
3154+
{
3155+
if (vdev->map)
3156+
return vdev->map->alloc(map, size,
3157+
map_handle, gfp);
3158+
else
3159+
return dma_alloc_coherent(map.dma_dev, size,
3160+
map_handle, gfp);
3161+
}
3162+
EXPORT_SYMBOL_GPL(virtqueue_map_alloc_coherent);
3163+
3164+
/**
3165+
* virtqueue_map_free_coherent - free coherent mapping
3166+
* @vdev: the virtio device we are talking to
3167+
* @map: metadata for performing mapping
3168+
* @size: the size of the buffer
3169+
* @map_handle: the mapped address that needs to be freed
3170+
*
3171+
*/
3172+
void virtqueue_map_free_coherent(struct virtio_device *vdev,
3173+
union virtio_map map, size_t size, void *vaddr,
3174+
dma_addr_t map_handle)
3175+
{
3176+
if (vdev->map)
3177+
vdev->map->free(map, size, vaddr,
3178+
map_handle, 0);
3179+
else
3180+
dma_free_coherent(map.dma_dev, size, vaddr, map_handle);
3181+
}
3182+
EXPORT_SYMBOL_GPL(virtqueue_map_free_coherent);
3183+
3184+
/**
3185+
* virtqueue_map_page_attrs - map a page to the device
3186+
* @_vq: the virtqueue we are talking to
3187+
* @page: the page that will be mapped by the device
3188+
* @offset: the offset in the page for a buffer
3189+
* @size: the buffer size
3190+
* @dir: mapping direction
3191+
* @attrs: mapping attributes
3192+
*
3193+
* Returns mapped address. Caller should check that by virtqueue_mapping_error().
3194+
*/
3195+
dma_addr_t virtqueue_map_page_attrs(const struct virtqueue *_vq,
3196+
struct page *page,
3197+
unsigned long offset,
3198+
size_t size,
3199+
enum dma_data_direction dir,
3200+
unsigned long attrs)
3201+
{
3202+
const struct vring_virtqueue *vq = to_vvq(_vq);
3203+
struct virtio_device *vdev = _vq->vdev;
3204+
3205+
if (vdev->map)
3206+
return vdev->map->map_page(vq->map,
3207+
page, offset, size,
3208+
dir, attrs);
3209+
3210+
return dma_map_page_attrs(vring_dma_dev(vq),
3211+
page, offset, size,
3212+
dir, attrs);
3213+
}
3214+
EXPORT_SYMBOL_GPL(virtqueue_map_page_attrs);
3215+
3216+
/**
3217+
* virtqueue_unmap_page_attrs - map a page to the device
3218+
* @_vq: the virtqueue we are talking to
3219+
* @map_handle: the mapped address
3220+
* @size: the buffer size
3221+
* @dir: mapping direction
3222+
* @attrs: unmapping attributes
3223+
*/
3224+
void virtqueue_unmap_page_attrs(const struct virtqueue *_vq,
3225+
dma_addr_t map_handle,
3226+
size_t size, enum dma_data_direction dir,
3227+
unsigned long attrs)
3228+
{
3229+
const struct vring_virtqueue *vq = to_vvq(_vq);
3230+
struct virtio_device *vdev = _vq->vdev;
3231+
3232+
if (vdev->map)
3233+
vdev->map->unmap_page(vq->map,
3234+
map_handle, size, dir, attrs);
3235+
else
3236+
dma_unmap_page_attrs(vring_dma_dev(vq), map_handle,
3237+
size, dir, attrs);
3238+
}
3239+
EXPORT_SYMBOL_GPL(virtqueue_unmap_page_attrs);
3240+
31263241
/**
31273242
* virtqueue_map_single_attrs - map DMA for _vq
31283243
* @_vq: the struct virtqueue we're talking about.
@@ -3134,7 +3249,7 @@ EXPORT_SYMBOL_GPL(virtqueue_get_vring);
31343249
* The caller calls this to do dma mapping in advance. The DMA address can be
31353250
* passed to this _vq when it is in pre-mapped mode.
31363251
*
3137-
* return DMA address. Caller should check that by virtqueue_mapping_error().
3252+
* return mapped address. Caller should check that by virtqueue_mapping_error().
31383253
*/
31393254
dma_addr_t virtqueue_map_single_attrs(const struct virtqueue *_vq, void *ptr,
31403255
size_t size,
@@ -3153,8 +3268,8 @@ dma_addr_t virtqueue_map_single_attrs(const struct virtqueue *_vq, void *ptr,
31533268
"rejecting DMA map of vmalloc memory\n"))
31543269
return DMA_MAPPING_ERROR;
31553270

3156-
return dma_map_page_attrs(vring_dma_dev(vq), virt_to_page(ptr),
3157-
offset_in_page(ptr), size, dir, attrs);
3271+
return virtqueue_map_page_attrs(&vq->vq, virt_to_page(ptr),
3272+
offset_in_page(ptr), size, dir, attrs);
31583273
}
31593274
EXPORT_SYMBOL_GPL(virtqueue_map_single_attrs);
31603275

@@ -3179,12 +3294,12 @@ void virtqueue_unmap_single_attrs(const struct virtqueue *_vq,
31793294
if (!vq->use_map_api)
31803295
return;
31813296

3182-
dma_unmap_page_attrs(vring_dma_dev(vq), addr, size, dir, attrs);
3297+
virtqueue_unmap_page_attrs(_vq, addr, size, dir, attrs);
31833298
}
31843299
EXPORT_SYMBOL_GPL(virtqueue_unmap_single_attrs);
31853300

31863301
/**
3187-
* virtqueue_map_mapping_error - check dma address
3302+
* virtqueue_mapping_error - check dma address
31883303
* @_vq: the struct virtqueue we're talking about.
31893304
* @addr: DMA address
31903305
*
@@ -3194,10 +3309,7 @@ int virtqueue_map_mapping_error(const struct virtqueue *_vq, dma_addr_t addr)
31943309
{
31953310
const struct vring_virtqueue *vq = to_vvq(_vq);
31963311

3197-
if (!vq->use_map_api)
3198-
return 0;
3199-
3200-
return dma_mapping_error(vring_dma_dev(vq), addr);
3312+
return vring_mapping_error(vq, addr);
32013313
}
32023314
EXPORT_SYMBOL_GPL(virtqueue_map_mapping_error);
32033315

@@ -3214,11 +3326,15 @@ EXPORT_SYMBOL_GPL(virtqueue_map_mapping_error);
32143326
bool virtqueue_map_need_sync(const struct virtqueue *_vq, dma_addr_t addr)
32153327
{
32163328
const struct vring_virtqueue *vq = to_vvq(_vq);
3329+
struct virtio_device *vdev = _vq->vdev;
32173330

32183331
if (!vq->use_map_api)
32193332
return false;
32203333

3221-
return dma_need_sync(vring_dma_dev(vq), addr);
3334+
if (vdev->map)
3335+
return vdev->map->need_sync(vq->map, addr);
3336+
else
3337+
return dma_need_sync(vring_dma_dev(vq), addr);
32223338
}
32233339
EXPORT_SYMBOL_GPL(virtqueue_map_need_sync);
32243340

@@ -3240,12 +3356,17 @@ void virtqueue_map_sync_single_range_for_cpu(const struct virtqueue *_vq,
32403356
enum dma_data_direction dir)
32413357
{
32423358
const struct vring_virtqueue *vq = to_vvq(_vq);
3243-
struct device *dev = vring_dma_dev(vq);
3359+
struct virtio_device *vdev = _vq->vdev;
32443360

32453361
if (!vq->use_map_api)
32463362
return;
32473363

3248-
dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
3364+
if (vdev->map)
3365+
vdev->map->sync_single_for_cpu(vq->map,
3366+
addr + offset, size, dir);
3367+
else
3368+
dma_sync_single_range_for_cpu(vring_dma_dev(vq),
3369+
addr, offset, size, dir);
32493370
}
32503371
EXPORT_SYMBOL_GPL(virtqueue_map_sync_single_range_for_cpu);
32513372

@@ -3266,12 +3387,18 @@ void virtqueue_map_sync_single_range_for_device(const struct virtqueue *_vq,
32663387
enum dma_data_direction dir)
32673388
{
32683389
const struct vring_virtqueue *vq = to_vvq(_vq);
3269-
struct device *dev = vring_dma_dev(vq);
3390+
struct virtio_device *vdev = _vq->vdev;
32703391

32713392
if (!vq->use_map_api)
32723393
return;
32733394

3274-
dma_sync_single_range_for_device(dev, addr, offset, size, dir);
3395+
if (vdev->map)
3396+
vdev->map->sync_single_for_device(vq->map,
3397+
addr + offset,
3398+
size, dir);
3399+
else
3400+
dma_sync_single_range_for_device(vring_dma_dev(vq), addr,
3401+
offset, size, dir);
32753402
}
32763403
EXPORT_SYMBOL_GPL(virtqueue_map_sync_single_range_for_device);
32773404

drivers/virtio/virtio_vdpa.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -195,6 +195,9 @@ virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index,
195195
goto error_new_virtqueue;
196196
}
197197

198+
if (index == 0)
199+
vdev->vmap = map;
200+
198201
vq->num_max = max_num;
199202

200203
/* Setup virtqueue callback */

include/linux/virtio.h

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -166,9 +166,11 @@ struct virtio_device {
166166
struct virtio_device_id id;
167167
const struct virtio_config_ops *config;
168168
const struct vringh_config_ops *vringh_config;
169+
const struct virtio_map_ops *map;
169170
struct list_head vqs;
170171
VIRTIO_DECLARE_FEATURES(features);
171172
void *priv;
173+
union virtio_map vmap;
172174
#ifdef CONFIG_VIRTIO_DEBUG
173175
struct dentry *debugfs_dir;
174176
u64 debugfs_filter_features[VIRTIO_FEATURES_DWORDS];
@@ -267,6 +269,29 @@ void unregister_virtio_driver(struct virtio_driver *drv);
267269
module_driver(__virtio_driver, register_virtio_driver, \
268270
unregister_virtio_driver)
269271

272+
273+
void *virtqueue_map_alloc_coherent(struct virtio_device *vdev,
274+
union virtio_map mapping_token,
275+
size_t size, dma_addr_t *dma_handle,
276+
gfp_t gfp);
277+
278+
void virtqueue_map_free_coherent(struct virtio_device *vdev,
279+
union virtio_map mapping_token,
280+
size_t size, void *vaddr,
281+
dma_addr_t dma_handle);
282+
283+
dma_addr_t virtqueue_map_page_attrs(const struct virtqueue *_vq,
284+
struct page *page,
285+
unsigned long offset,
286+
size_t size,
287+
enum dma_data_direction dir,
288+
unsigned long attrs);
289+
290+
void virtqueue_unmap_page_attrs(const struct virtqueue *_vq,
291+
dma_addr_t dma_handle,
292+
size_t size, enum dma_data_direction dir,
293+
unsigned long attrs);
294+
270295
dma_addr_t virtqueue_map_single_attrs(const struct virtqueue *_vq, void *ptr, size_t size,
271296
enum dma_data_direction dir, unsigned long attrs);
272297
void virtqueue_unmap_single_attrs(const struct virtqueue *_vq, dma_addr_t addr,

0 commit comments

Comments
 (0)