Skip to content

Commit 02e3f7f

Browse files
eugpermarmstsirkin
authored andcommitted
vduse: return internal vq group struct as map token
Return the internal struct that represents the vq group as virtqueue map token, instead of the device. This allows the map functions to access the information per group. At this moment all the virtqueues share the same vq group, that only can point to ASID 0. This change prepares the infrastructure for actual per-group address space handling Acked-by: Jason Wang <jasowang@redhat.com> Signed-off-by: Eugenio Pérez <eperezma@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Message-Id: <20260119143306.1818855-5-eperezma@redhat.com>
1 parent 9350a09 commit 02e3f7f

2 files changed

Lines changed: 94 additions & 12 deletions

File tree

drivers/vdpa/vdpa_user/vduse_dev.c

Lines changed: 91 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@
2222
#include <linux/uio.h>
2323
#include <linux/vdpa.h>
2424
#include <linux/nospec.h>
25+
#include <linux/virtio.h>
2526
#include <linux/vmalloc.h>
2627
#include <linux/sched/mm.h>
2728
#include <uapi/linux/vduse.h>
@@ -85,6 +86,10 @@ struct vduse_umem {
8586
struct mm_struct *mm;
8687
};
8788

89+
struct vduse_vq_group {
90+
struct vduse_dev *dev;
91+
};
92+
8893
struct vduse_dev {
8994
struct vduse_vdpa *vdev;
9095
struct device *dev;
@@ -118,6 +123,7 @@ struct vduse_dev {
118123
u32 vq_align;
119124
u32 ngroups;
120125
struct vduse_umem *umem;
126+
struct vduse_vq_group *groups;
121127
struct mutex mem_lock;
122128
unsigned int bounce_size;
123129
struct mutex domain_lock;
@@ -605,6 +611,17 @@ static u32 vduse_get_vq_group(struct vdpa_device *vdpa, u16 idx)
605611
return dev->vqs[idx]->group;
606612
}
607613

614+
static union virtio_map vduse_get_vq_map(struct vdpa_device *vdpa, u16 idx)
615+
{
616+
struct vduse_dev *dev = vdpa_to_vduse(vdpa);
617+
u32 vq_group = vduse_get_vq_group(vdpa, idx);
618+
union virtio_map ret = {
619+
.group = &dev->groups[vq_group],
620+
};
621+
622+
return ret;
623+
}
624+
608625
static int vduse_vdpa_get_vq_state(struct vdpa_device *vdpa, u16 idx,
609626
struct vdpa_vq_state *state)
610627
{
@@ -825,14 +842,22 @@ static const struct vdpa_config_ops vduse_vdpa_config_ops = {
825842
.get_vq_affinity = vduse_vdpa_get_vq_affinity,
826843
.reset = vduse_vdpa_reset,
827844
.set_map = vduse_vdpa_set_map,
845+
.get_vq_map = vduse_get_vq_map,
828846
.free = vduse_vdpa_free,
829847
};
830848

831849
static void vduse_dev_sync_single_for_device(union virtio_map token,
832850
dma_addr_t dma_addr, size_t size,
833851
enum dma_data_direction dir)
834852
{
835-
struct vduse_iova_domain *domain = token.iova_domain;
853+
struct vduse_dev *vdev;
854+
struct vduse_iova_domain *domain;
855+
856+
if (!token.group)
857+
return;
858+
859+
vdev = token.group->dev;
860+
domain = vdev->domain;
836861

837862
vduse_domain_sync_single_for_device(domain, dma_addr, size, dir);
838863
}
@@ -841,7 +866,14 @@ static void vduse_dev_sync_single_for_cpu(union virtio_map token,
841866
dma_addr_t dma_addr, size_t size,
842867
enum dma_data_direction dir)
843868
{
844-
struct vduse_iova_domain *domain = token.iova_domain;
869+
struct vduse_dev *vdev;
870+
struct vduse_iova_domain *domain;
871+
872+
if (!token.group)
873+
return;
874+
875+
vdev = token.group->dev;
876+
domain = vdev->domain;
845877

846878
vduse_domain_sync_single_for_cpu(domain, dma_addr, size, dir);
847879
}
@@ -851,7 +883,14 @@ static dma_addr_t vduse_dev_map_page(union virtio_map token, struct page *page,
851883
enum dma_data_direction dir,
852884
unsigned long attrs)
853885
{
854-
struct vduse_iova_domain *domain = token.iova_domain;
886+
struct vduse_dev *vdev;
887+
struct vduse_iova_domain *domain;
888+
889+
if (!token.group)
890+
return DMA_MAPPING_ERROR;
891+
892+
vdev = token.group->dev;
893+
domain = vdev->domain;
855894

856895
return vduse_domain_map_page(domain, page, offset, size, dir, attrs);
857896
}
@@ -860,19 +899,32 @@ static void vduse_dev_unmap_page(union virtio_map token, dma_addr_t dma_addr,
860899
size_t size, enum dma_data_direction dir,
861900
unsigned long attrs)
862901
{
863-
struct vduse_iova_domain *domain = token.iova_domain;
902+
struct vduse_dev *vdev;
903+
struct vduse_iova_domain *domain;
904+
905+
if (!token.group)
906+
return;
907+
908+
vdev = token.group->dev;
909+
domain = vdev->domain;
864910

865911
return vduse_domain_unmap_page(domain, dma_addr, size, dir, attrs);
866912
}
867913

868914
static void *vduse_dev_alloc_coherent(union virtio_map token, size_t size,
869915
dma_addr_t *dma_addr, gfp_t flag)
870916
{
871-
struct vduse_iova_domain *domain = token.iova_domain;
917+
struct vduse_dev *vdev;
918+
struct vduse_iova_domain *domain;
872919
unsigned long iova;
873920
void *addr;
874921

875922
*dma_addr = DMA_MAPPING_ERROR;
923+
if (!token.group)
924+
return NULL;
925+
926+
vdev = token.group->dev;
927+
domain = vdev->domain;
876928
addr = vduse_domain_alloc_coherent(domain, size,
877929
(dma_addr_t *)&iova, flag);
878930
if (!addr)
@@ -887,14 +939,28 @@ static void vduse_dev_free_coherent(union virtio_map token, size_t size,
887939
void *vaddr, dma_addr_t dma_addr,
888940
unsigned long attrs)
889941
{
890-
struct vduse_iova_domain *domain = token.iova_domain;
942+
struct vduse_dev *vdev;
943+
struct vduse_iova_domain *domain;
944+
945+
if (!token.group)
946+
return;
947+
948+
vdev = token.group->dev;
949+
domain = vdev->domain;
891950

892951
vduse_domain_free_coherent(domain, size, vaddr, dma_addr, attrs);
893952
}
894953

895954
static bool vduse_dev_need_sync(union virtio_map token, dma_addr_t dma_addr)
896955
{
897-
struct vduse_iova_domain *domain = token.iova_domain;
956+
struct vduse_dev *vdev;
957+
struct vduse_iova_domain *domain;
958+
959+
if (!token.group)
960+
return false;
961+
962+
vdev = token.group->dev;
963+
domain = vdev->domain;
898964

899965
return dma_addr < domain->bounce_size;
900966
}
@@ -908,7 +974,14 @@ static int vduse_dev_mapping_error(union virtio_map token, dma_addr_t dma_addr)
908974

909975
static size_t vduse_dev_max_mapping_size(union virtio_map token)
910976
{
911-
struct vduse_iova_domain *domain = token.iova_domain;
977+
struct vduse_dev *vdev;
978+
struct vduse_iova_domain *domain;
979+
980+
if (!token.group)
981+
return 0;
982+
983+
vdev = token.group->dev;
984+
domain = vdev->domain;
912985

913986
return domain->bounce_size;
914987
}
@@ -1726,6 +1799,7 @@ static int vduse_destroy_dev(char *name)
17261799
if (dev->domain)
17271800
vduse_domain_destroy(dev->domain);
17281801
kfree(dev->name);
1802+
kfree(dev->groups);
17291803
vduse_dev_destroy(dev);
17301804
module_put(THIS_MODULE);
17311805

@@ -1895,6 +1969,13 @@ static int vduse_create_dev(struct vduse_dev_config *config,
18951969
dev->ngroups = (dev->api_version < VDUSE_API_VERSION_1)
18961970
? 1
18971971
: config->ngroups;
1972+
dev->groups = kcalloc(dev->ngroups, sizeof(dev->groups[0]),
1973+
GFP_KERNEL);
1974+
if (!dev->groups)
1975+
goto err_vq_groups;
1976+
for (u32 i = 0; i < dev->ngroups; ++i)
1977+
dev->groups[i].dev = dev;
1978+
18981979
dev->name = kstrdup(config->name, GFP_KERNEL);
18991980
if (!dev->name)
19001981
goto err_str;
@@ -1931,6 +2012,8 @@ static int vduse_create_dev(struct vduse_dev_config *config,
19312012
err_idr:
19322013
kfree(dev->name);
19332014
err_str:
2015+
kfree(dev->groups);
2016+
err_vq_groups:
19342017
vduse_dev_destroy(dev);
19352018
err:
19362019
return ret;
@@ -2092,7 +2175,6 @@ static int vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
20922175
return -ENOMEM;
20932176
}
20942177

2095-
dev->vdev->vdpa.vmap.iova_domain = dev->domain;
20962178
ret = _vdpa_register_device(&dev->vdev->vdpa, dev->vq_num);
20972179
if (ret) {
20982180
put_device(&dev->vdev->vdpa.dev);

include/linux/virtio.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -43,13 +43,13 @@ struct virtqueue {
4343
void *priv;
4444
};
4545

46-
struct vduse_iova_domain;
46+
struct vduse_vq_group;
4747

4848
union virtio_map {
4949
/* Device that performs DMA */
5050
struct device *dma_dev;
51-
/* VDUSE specific mapping data */
52-
struct vduse_iova_domain *iova_domain;
51+
/* VDUSE specific virtqueue group for doing map */
52+
struct vduse_vq_group *group;
5353
};
5454

5555
int virtqueue_add_outbuf(struct virtqueue *vq,

0 commit comments

Comments
 (0)