Skip to content

Commit 1c14b0e

Browse files
jasowangmstsirkin
authored andcommitted
vduse: switch to use virtio map API instead of DMA API
Lacking the support of device specific mapping supported in virtio, VDUSE must trick the DMA API in order to make virtio-vdpa transport work. This is done by advertising vDPA device as dma device with a VDUSE specific dma_ops even if it doesn't do DMA at all. This will be fixed by this patch. Thanks to the new mapping operations support by virtio and vDPA. VDUSE can simply switch to advertise its specific mappings operations to virtio via virtio-vdpa then DMA API is not needed for VDUSE any more and iova domain could be used as the mapping token instead. Signed-off-by: Jason Wang <jasowang@redhat.com> Message-Id: <20250924070045.10361-3-jasowang@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Reviewed-by: Eugenio Pérez <eperezma@redhat.com>
1 parent 0d16cc4 commit 1c14b0e

5 files changed

Lines changed: 46 additions & 48 deletions

File tree

drivers/vdpa/Kconfig

Lines changed: 1 addition & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -34,13 +34,7 @@ config VDPA_SIM_BLOCK
3434

3535
config VDPA_USER
3636
tristate "VDUSE (vDPA Device in Userspace) support"
37-
depends on EVENTFD && MMU && HAS_DMA
38-
#
39-
# This driver incorrectly tries to override the dma_ops. It should
40-
# never have done that, but for now keep it working on architectures
41-
# that use dma ops
42-
#
43-
depends on ARCH_HAS_DMA_OPS
37+
depends on EVENTFD && MMU
4438
select VHOST_IOTLB
4539
select IOMMU_IOVA
4640
help

drivers/vdpa/vdpa_user/iova_domain.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -447,7 +447,7 @@ void vduse_domain_unmap_page(struct vduse_iova_domain *domain,
447447

448448
void *vduse_domain_alloc_coherent(struct vduse_iova_domain *domain,
449449
size_t size, dma_addr_t *dma_addr,
450-
gfp_t flag, unsigned long attrs)
450+
gfp_t flag)
451451
{
452452
struct iova_domain *iovad = &domain->consistent_iovad;
453453
unsigned long limit = domain->iova_limit;

drivers/vdpa/vdpa_user/iova_domain.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@ void vduse_domain_unmap_page(struct vduse_iova_domain *domain,
6464

6565
void *vduse_domain_alloc_coherent(struct vduse_iova_domain *domain,
6666
size_t size, dma_addr_t *dma_addr,
67-
gfp_t flag, unsigned long attrs);
67+
gfp_t flag);
6868

6969
void vduse_domain_free_coherent(struct vduse_iova_domain *domain, size_t size,
7070
void *vaddr, dma_addr_t dma_addr,

drivers/vdpa/vdpa_user/vduse_dev.c

Lines changed: 39 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -814,59 +814,53 @@ static const struct vdpa_config_ops vduse_vdpa_config_ops = {
814814
.free = vduse_vdpa_free,
815815
};
816816

817-
static void vduse_dev_sync_single_for_device(struct device *dev,
817+
static void vduse_dev_sync_single_for_device(union virtio_map token,
818818
dma_addr_t dma_addr, size_t size,
819819
enum dma_data_direction dir)
820820
{
821-
struct vduse_dev *vdev = dev_to_vduse(dev);
822-
struct vduse_iova_domain *domain = vdev->domain;
821+
struct vduse_iova_domain *domain = token.iova_domain;
823822

824823
vduse_domain_sync_single_for_device(domain, dma_addr, size, dir);
825824
}
826825

827-
static void vduse_dev_sync_single_for_cpu(struct device *dev,
826+
static void vduse_dev_sync_single_for_cpu(union virtio_map token,
828827
dma_addr_t dma_addr, size_t size,
829828
enum dma_data_direction dir)
830829
{
831-
struct vduse_dev *vdev = dev_to_vduse(dev);
832-
struct vduse_iova_domain *domain = vdev->domain;
830+
struct vduse_iova_domain *domain = token.iova_domain;
833831

834832
vduse_domain_sync_single_for_cpu(domain, dma_addr, size, dir);
835833
}
836834

837-
static dma_addr_t vduse_dev_map_page(struct device *dev, struct page *page,
835+
static dma_addr_t vduse_dev_map_page(union virtio_map token, struct page *page,
838836
unsigned long offset, size_t size,
839837
enum dma_data_direction dir,
840838
unsigned long attrs)
841839
{
842-
struct vduse_dev *vdev = dev_to_vduse(dev);
843-
struct vduse_iova_domain *domain = vdev->domain;
840+
struct vduse_iova_domain *domain = token.iova_domain;
844841

845842
return vduse_domain_map_page(domain, page, offset, size, dir, attrs);
846843
}
847844

848-
static void vduse_dev_unmap_page(struct device *dev, dma_addr_t dma_addr,
849-
size_t size, enum dma_data_direction dir,
850-
unsigned long attrs)
845+
static void vduse_dev_unmap_page(union virtio_map token, dma_addr_t dma_addr,
846+
size_t size, enum dma_data_direction dir,
847+
unsigned long attrs)
851848
{
852-
struct vduse_dev *vdev = dev_to_vduse(dev);
853-
struct vduse_iova_domain *domain = vdev->domain;
849+
struct vduse_iova_domain *domain = token.iova_domain;
854850

855851
return vduse_domain_unmap_page(domain, dma_addr, size, dir, attrs);
856852
}
857853

858-
static void *vduse_dev_alloc_coherent(struct device *dev, size_t size,
859-
dma_addr_t *dma_addr, gfp_t flag,
860-
unsigned long attrs)
854+
static void *vduse_dev_alloc_coherent(union virtio_map token, size_t size,
855+
dma_addr_t *dma_addr, gfp_t flag)
861856
{
862-
struct vduse_dev *vdev = dev_to_vduse(dev);
863-
struct vduse_iova_domain *domain = vdev->domain;
857+
struct vduse_iova_domain *domain = token.iova_domain;
864858
unsigned long iova;
865859
void *addr;
866860

867861
*dma_addr = DMA_MAPPING_ERROR;
868862
addr = vduse_domain_alloc_coherent(domain, size,
869-
(dma_addr_t *)&iova, flag, attrs);
863+
(dma_addr_t *)&iova, flag);
870864
if (!addr)
871865
return NULL;
872866

@@ -875,31 +869,45 @@ static void *vduse_dev_alloc_coherent(struct device *dev, size_t size,
875869
return addr;
876870
}
877871

878-
static void vduse_dev_free_coherent(struct device *dev, size_t size,
879-
void *vaddr, dma_addr_t dma_addr,
880-
unsigned long attrs)
872+
static void vduse_dev_free_coherent(union virtio_map token, size_t size,
873+
void *vaddr, dma_addr_t dma_addr,
874+
unsigned long attrs)
881875
{
882-
struct vduse_dev *vdev = dev_to_vduse(dev);
883-
struct vduse_iova_domain *domain = vdev->domain;
876+
struct vduse_iova_domain *domain = token.iova_domain;
884877

885878
vduse_domain_free_coherent(domain, size, vaddr, dma_addr, attrs);
886879
}
887880

888-
static size_t vduse_dev_max_mapping_size(struct device *dev)
881+
static bool vduse_dev_need_sync(union virtio_map token, dma_addr_t dma_addr)
889882
{
890-
struct vduse_dev *vdev = dev_to_vduse(dev);
891-
struct vduse_iova_domain *domain = vdev->domain;
883+
struct vduse_iova_domain *domain = token.iova_domain;
884+
885+
return dma_addr < domain->bounce_size;
886+
}
887+
888+
static int vduse_dev_mapping_error(union virtio_map token, dma_addr_t dma_addr)
889+
{
890+
if (unlikely(dma_addr == DMA_MAPPING_ERROR))
891+
return -ENOMEM;
892+
return 0;
893+
}
894+
895+
static size_t vduse_dev_max_mapping_size(union virtio_map token)
896+
{
897+
struct vduse_iova_domain *domain = token.iova_domain;
892898

893899
return domain->bounce_size;
894900
}
895901

896-
static const struct dma_map_ops vduse_dev_dma_ops = {
902+
static const struct virtio_map_ops vduse_map_ops = {
897903
.sync_single_for_device = vduse_dev_sync_single_for_device,
898904
.sync_single_for_cpu = vduse_dev_sync_single_for_cpu,
899905
.map_page = vduse_dev_map_page,
900906
.unmap_page = vduse_dev_unmap_page,
901907
.alloc = vduse_dev_alloc_coherent,
902908
.free = vduse_dev_free_coherent,
909+
.need_sync = vduse_dev_need_sync,
910+
.mapping_error = vduse_dev_mapping_error,
903911
.max_mapping_size = vduse_dev_max_mapping_size,
904912
};
905913

@@ -2003,27 +2011,18 @@ static struct vduse_mgmt_dev *vduse_mgmt;
20032011
static int vduse_dev_init_vdpa(struct vduse_dev *dev, const char *name)
20042012
{
20052013
struct vduse_vdpa *vdev;
2006-
int ret;
20072014

20082015
if (dev->vdev)
20092016
return -EEXIST;
20102017

20112018
vdev = vdpa_alloc_device(struct vduse_vdpa, vdpa, dev->dev,
2012-
&vduse_vdpa_config_ops, NULL,
2019+
&vduse_vdpa_config_ops, &vduse_map_ops,
20132020
1, 1, name, true);
20142021
if (IS_ERR(vdev))
20152022
return PTR_ERR(vdev);
20162023

20172024
dev->vdev = vdev;
20182025
vdev->dev = dev;
2019-
vdev->vdpa.dev.dma_mask = &vdev->vdpa.dev.coherent_dma_mask;
2020-
ret = dma_set_mask_and_coherent(&vdev->vdpa.dev, DMA_BIT_MASK(64));
2021-
if (ret) {
2022-
put_device(&vdev->vdpa.dev);
2023-
return ret;
2024-
}
2025-
set_dma_ops(&vdev->vdpa.dev, &vduse_dev_dma_ops);
2026-
vdev->vdpa.vmap.dma_dev = &vdev->vdpa.dev;
20272026
vdev->vdpa.mdev = &vduse_mgmt->mgmt_dev;
20282027

20292028
return 0;
@@ -2056,6 +2055,7 @@ static int vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
20562055
return -ENOMEM;
20572056
}
20582057

2058+
dev->vdev->vdpa.vmap.iova_domain = dev->domain;
20592059
ret = _vdpa_register_device(&dev->vdev->vdpa, dev->vq_num);
20602060
if (ret) {
20612061
put_device(&dev->vdev->vdpa.dev);

include/linux/virtio.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -41,9 +41,13 @@ struct virtqueue {
4141
void *priv;
4242
};
4343

44+
struct vduse_iova_domain;
45+
4446
union virtio_map {
4547
/* Device that performs DMA */
4648
struct device *dma_dev;
49+
/* VDUSE specific mapping data */
50+
struct vduse_iova_domain *iova_domain;
4751
};
4852

4953
int virtqueue_add_outbuf(struct virtqueue *vq,

0 commit comments

Comments
 (0)