Skip to content

Commit b16060c

Browse files
jasowangmstsirkin
authored andcommitted
virtio: introduce virtio_map container union
Following patch will introduce the mapping operations for virtio device. In order to achieve this, besides the dma device, virtio core needs to support a transport or device specific mapping metadata as well. So this patch introduces a union container of a dma device. The idea is the allow the transport layer to pass device specific mapping metadata which will be used as a parameter for the virtio mapping operations. For the transport or device that is using DMA, dma device is still being used. Signed-off-by: Jason Wang <jasowang@redhat.com> Message-Id: <20250821064641.5025-5-jasowang@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Tested-by: Lei Yang <leiyang@redhat.com> Reviewed-by: Eugenio Pérez <eperezma@redhat.com>
1 parent b41cb3b commit b16060c

4 files changed

Lines changed: 66 additions & 56 deletions

File tree

drivers/virtio/virtio_ring.c

Lines changed: 53 additions & 51 deletions
Original file line numberDiff line numberDiff line change
@@ -210,8 +210,7 @@ struct vring_virtqueue {
210210
/* DMA, allocation, and size information */
211211
bool we_own_ring;
212212

213-
/* Device used for doing DMA */
214-
struct device *dma_dev;
213+
union virtio_map map;
215214

216215
#ifdef DEBUG
217216
/* They're supposed to lock for us. */
@@ -307,10 +306,10 @@ EXPORT_SYMBOL_GPL(virtio_max_dma_size);
307306

308307
static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
309308
dma_addr_t *dma_handle, gfp_t flag,
310-
struct device *dma_dev)
309+
union virtio_map map)
311310
{
312311
if (vring_use_map_api(vdev)) {
313-
return dma_alloc_coherent(dma_dev, size,
312+
return dma_alloc_coherent(map.dma_dev, size,
314313
dma_handle, flag);
315314
} else {
316315
void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag);
@@ -341,10 +340,10 @@ static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
341340

342341
static void vring_free_queue(struct virtio_device *vdev, size_t size,
343342
void *queue, dma_addr_t dma_handle,
344-
struct device *dma_dev)
343+
union virtio_map map)
345344
{
346345
if (vring_use_map_api(vdev))
347-
dma_free_coherent(dma_dev, size, queue, dma_handle);
346+
dma_free_coherent(map.dma_dev, size, queue, dma_handle);
348347
else
349348
free_pages_exact(queue, PAGE_ALIGN(size));
350349
}
@@ -356,7 +355,7 @@ static void vring_free_queue(struct virtio_device *vdev, size_t size,
356355
*/
357356
static struct device *vring_dma_dev(const struct vring_virtqueue *vq)
358357
{
359-
return vq->dma_dev;
358+
return vq->map.dma_dev;
360359
}
361360

362361
/* Map one sg entry. */
@@ -1056,12 +1055,13 @@ static int vring_alloc_state_extra_split(struct vring_virtqueue_split *vring_spl
10561055
}
10571056

10581057
static void vring_free_split(struct vring_virtqueue_split *vring_split,
1059-
struct virtio_device *vdev, struct device *dma_dev)
1058+
struct virtio_device *vdev,
1059+
union virtio_map map)
10601060
{
10611061
vring_free_queue(vdev, vring_split->queue_size_in_bytes,
10621062
vring_split->vring.desc,
10631063
vring_split->queue_dma_addr,
1064-
dma_dev);
1064+
map);
10651065

10661066
kfree(vring_split->desc_state);
10671067
kfree(vring_split->desc_extra);
@@ -1072,7 +1072,7 @@ static int vring_alloc_queue_split(struct vring_virtqueue_split *vring_split,
10721072
u32 num,
10731073
unsigned int vring_align,
10741074
bool may_reduce_num,
1075-
struct device *dma_dev)
1075+
union virtio_map map)
10761076
{
10771077
void *queue = NULL;
10781078
dma_addr_t dma_addr;
@@ -1088,7 +1088,7 @@ static int vring_alloc_queue_split(struct vring_virtqueue_split *vring_split,
10881088
queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
10891089
&dma_addr,
10901090
GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
1091-
dma_dev);
1091+
map);
10921092
if (queue)
10931093
break;
10941094
if (!may_reduce_num)
@@ -1102,7 +1102,7 @@ static int vring_alloc_queue_split(struct vring_virtqueue_split *vring_split,
11021102
/* Try to get a single page. You are my only hope! */
11031103
queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
11041104
&dma_addr, GFP_KERNEL | __GFP_ZERO,
1105-
dma_dev);
1105+
map);
11061106
}
11071107
if (!queue)
11081108
return -ENOMEM;
@@ -1126,7 +1126,7 @@ static struct virtqueue *__vring_new_virtqueue_split(unsigned int index,
11261126
bool (*notify)(struct virtqueue *),
11271127
void (*callback)(struct virtqueue *),
11281128
const char *name,
1129-
struct device *dma_dev)
1129+
union virtio_map map)
11301130
{
11311131
struct vring_virtqueue *vq;
11321132
int err;
@@ -1149,7 +1149,7 @@ static struct virtqueue *__vring_new_virtqueue_split(unsigned int index,
11491149
#else
11501150
vq->broken = false;
11511151
#endif
1152-
vq->dma_dev = dma_dev;
1152+
vq->map = map;
11531153
vq->use_map_api = vring_use_map_api(vdev);
11541154

11551155
vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
@@ -1187,21 +1187,21 @@ static struct virtqueue *vring_create_virtqueue_split(
11871187
bool (*notify)(struct virtqueue *),
11881188
void (*callback)(struct virtqueue *),
11891189
const char *name,
1190-
struct device *dma_dev)
1190+
union virtio_map map)
11911191
{
11921192
struct vring_virtqueue_split vring_split = {};
11931193
struct virtqueue *vq;
11941194
int err;
11951195

11961196
err = vring_alloc_queue_split(&vring_split, vdev, num, vring_align,
1197-
may_reduce_num, dma_dev);
1197+
may_reduce_num, map);
11981198
if (err)
11991199
return NULL;
12001200

12011201
vq = __vring_new_virtqueue_split(index, &vring_split, vdev, weak_barriers,
1202-
context, notify, callback, name, dma_dev);
1202+
context, notify, callback, name, map);
12031203
if (!vq) {
1204-
vring_free_split(&vring_split, vdev, dma_dev);
1204+
vring_free_split(&vring_split, vdev, map);
12051205
return NULL;
12061206
}
12071207

@@ -1220,7 +1220,7 @@ static int virtqueue_resize_split(struct virtqueue *_vq, u32 num)
12201220
err = vring_alloc_queue_split(&vring_split, vdev, num,
12211221
vq->split.vring_align,
12221222
vq->split.may_reduce_num,
1223-
vring_dma_dev(vq));
1223+
vq->map);
12241224
if (err)
12251225
goto err;
12261226

@@ -1238,7 +1238,7 @@ static int virtqueue_resize_split(struct virtqueue *_vq, u32 num)
12381238
return 0;
12391239

12401240
err_state_extra:
1241-
vring_free_split(&vring_split, vdev, vring_dma_dev(vq));
1241+
vring_free_split(&vring_split, vdev, vq->map);
12421242
err:
12431243
virtqueue_reinit_split(vq);
12441244
return -ENOMEM;
@@ -1947,33 +1947,33 @@ static struct vring_desc_extra *vring_alloc_desc_extra(unsigned int num)
19471947

19481948
static void vring_free_packed(struct vring_virtqueue_packed *vring_packed,
19491949
struct virtio_device *vdev,
1950-
struct device *dma_dev)
1950+
union virtio_map map)
19511951
{
19521952
if (vring_packed->vring.desc)
19531953
vring_free_queue(vdev, vring_packed->ring_size_in_bytes,
19541954
vring_packed->vring.desc,
19551955
vring_packed->ring_dma_addr,
1956-
dma_dev);
1956+
map);
19571957

19581958
if (vring_packed->vring.driver)
19591959
vring_free_queue(vdev, vring_packed->event_size_in_bytes,
19601960
vring_packed->vring.driver,
19611961
vring_packed->driver_event_dma_addr,
1962-
dma_dev);
1962+
map);
19631963

19641964
if (vring_packed->vring.device)
19651965
vring_free_queue(vdev, vring_packed->event_size_in_bytes,
19661966
vring_packed->vring.device,
19671967
vring_packed->device_event_dma_addr,
1968-
dma_dev);
1968+
map);
19691969

19701970
kfree(vring_packed->desc_state);
19711971
kfree(vring_packed->desc_extra);
19721972
}
19731973

19741974
static int vring_alloc_queue_packed(struct vring_virtqueue_packed *vring_packed,
19751975
struct virtio_device *vdev,
1976-
u32 num, struct device *dma_dev)
1976+
u32 num, union virtio_map map)
19771977
{
19781978
struct vring_packed_desc *ring;
19791979
struct vring_packed_desc_event *driver, *device;
@@ -1985,7 +1985,7 @@ static int vring_alloc_queue_packed(struct vring_virtqueue_packed *vring_packed,
19851985
ring = vring_alloc_queue(vdev, ring_size_in_bytes,
19861986
&ring_dma_addr,
19871987
GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
1988-
dma_dev);
1988+
map);
19891989
if (!ring)
19901990
goto err;
19911991

@@ -1998,7 +1998,7 @@ static int vring_alloc_queue_packed(struct vring_virtqueue_packed *vring_packed,
19981998
driver = vring_alloc_queue(vdev, event_size_in_bytes,
19991999
&driver_event_dma_addr,
20002000
GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
2001-
dma_dev);
2001+
map);
20022002
if (!driver)
20032003
goto err;
20042004

@@ -2009,7 +2009,7 @@ static int vring_alloc_queue_packed(struct vring_virtqueue_packed *vring_packed,
20092009
device = vring_alloc_queue(vdev, event_size_in_bytes,
20102010
&device_event_dma_addr,
20112011
GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
2012-
dma_dev);
2012+
map);
20132013
if (!device)
20142014
goto err;
20152015

@@ -2021,7 +2021,7 @@ static int vring_alloc_queue_packed(struct vring_virtqueue_packed *vring_packed,
20212021
return 0;
20222022

20232023
err:
2024-
vring_free_packed(vring_packed, vdev, dma_dev);
2024+
vring_free_packed(vring_packed, vdev, map);
20252025
return -ENOMEM;
20262026
}
20272027

@@ -2097,7 +2097,7 @@ static struct virtqueue *__vring_new_virtqueue_packed(unsigned int index,
20972097
bool (*notify)(struct virtqueue *),
20982098
void (*callback)(struct virtqueue *),
20992099
const char *name,
2100-
struct device *dma_dev)
2100+
union virtio_map map)
21012101
{
21022102
struct vring_virtqueue *vq;
21032103
int err;
@@ -2120,7 +2120,7 @@ static struct virtqueue *__vring_new_virtqueue_packed(unsigned int index,
21202120
vq->broken = false;
21212121
#endif
21222122
vq->packed_ring = true;
2123-
vq->dma_dev = dma_dev;
2123+
vq->map = map;
21242124
vq->use_map_api = vring_use_map_api(vdev);
21252125

21262126
vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
@@ -2158,18 +2158,18 @@ static struct virtqueue *vring_create_virtqueue_packed(
21582158
bool (*notify)(struct virtqueue *),
21592159
void (*callback)(struct virtqueue *),
21602160
const char *name,
2161-
struct device *dma_dev)
2161+
union virtio_map map)
21622162
{
21632163
struct vring_virtqueue_packed vring_packed = {};
21642164
struct virtqueue *vq;
21652165

2166-
if (vring_alloc_queue_packed(&vring_packed, vdev, num, dma_dev))
2166+
if (vring_alloc_queue_packed(&vring_packed, vdev, num, map))
21672167
return NULL;
21682168

21692169
vq = __vring_new_virtqueue_packed(index, &vring_packed, vdev, weak_barriers,
2170-
context, notify, callback, name, dma_dev);
2170+
context, notify, callback, name, map);
21712171
if (!vq) {
2172-
vring_free_packed(&vring_packed, vdev, dma_dev);
2172+
vring_free_packed(&vring_packed, vdev, map);
21732173
return NULL;
21742174
}
21752175

@@ -2185,7 +2185,7 @@ static int virtqueue_resize_packed(struct virtqueue *_vq, u32 num)
21852185
struct virtio_device *vdev = _vq->vdev;
21862186
int err;
21872187

2188-
if (vring_alloc_queue_packed(&vring_packed, vdev, num, vring_dma_dev(vq)))
2188+
if (vring_alloc_queue_packed(&vring_packed, vdev, num, vq->map))
21892189
goto err_ring;
21902190

21912191
err = vring_alloc_state_extra_packed(&vring_packed);
@@ -2202,7 +2202,7 @@ static int virtqueue_resize_packed(struct virtqueue *_vq, u32 num)
22022202
return 0;
22032203

22042204
err_state_extra:
2205-
vring_free_packed(&vring_packed, vdev, vring_dma_dev(vq));
2205+
vring_free_packed(&vring_packed, vdev, vq->map);
22062206
err_ring:
22072207
virtqueue_reinit_packed(vq);
22082208
return -ENOMEM;
@@ -2434,7 +2434,7 @@ struct device *virtqueue_dma_dev(struct virtqueue *_vq)
24342434
struct vring_virtqueue *vq = to_vvq(_vq);
24352435

24362436
if (vq->use_map_api)
2437-
return vring_dma_dev(vq);
2437+
return vq->map.dma_dev;
24382438
else
24392439
return NULL;
24402440
}
@@ -2719,19 +2719,20 @@ struct virtqueue *vring_create_virtqueue(
27192719
void (*callback)(struct virtqueue *),
27202720
const char *name)
27212721
{
2722+
union virtio_map map = {.dma_dev = vdev->dev.parent};
27222723

27232724
if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
27242725
return vring_create_virtqueue_packed(index, num, vring_align,
27252726
vdev, weak_barriers, may_reduce_num,
2726-
context, notify, callback, name, vdev->dev.parent);
2727+
context, notify, callback, name, map);
27272728

27282729
return vring_create_virtqueue_split(index, num, vring_align,
27292730
vdev, weak_barriers, may_reduce_num,
2730-
context, notify, callback, name, vdev->dev.parent);
2731+
context, notify, callback, name, map);
27312732
}
27322733
EXPORT_SYMBOL_GPL(vring_create_virtqueue);
27332734

2734-
struct virtqueue *vring_create_virtqueue_dma(
2735+
struct virtqueue *vring_create_virtqueue_map(
27352736
unsigned int index,
27362737
unsigned int num,
27372738
unsigned int vring_align,
@@ -2742,19 +2743,19 @@ struct virtqueue *vring_create_virtqueue_dma(
27422743
bool (*notify)(struct virtqueue *),
27432744
void (*callback)(struct virtqueue *),
27442745
const char *name,
2745-
struct device *dma_dev)
2746+
union virtio_map map)
27462747
{
27472748

27482749
if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
27492750
return vring_create_virtqueue_packed(index, num, vring_align,
27502751
vdev, weak_barriers, may_reduce_num,
2751-
context, notify, callback, name, dma_dev);
2752+
context, notify, callback, name, map);
27522753

27532754
return vring_create_virtqueue_split(index, num, vring_align,
27542755
vdev, weak_barriers, may_reduce_num,
2755-
context, notify, callback, name, dma_dev);
2756+
context, notify, callback, name, map);
27562757
}
2757-
EXPORT_SYMBOL_GPL(vring_create_virtqueue_dma);
2758+
EXPORT_SYMBOL_GPL(vring_create_virtqueue_map);
27582759

27592760
/**
27602761
* virtqueue_resize - resize the vring of vq
@@ -2865,6 +2866,7 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
28652866
const char *name)
28662867
{
28672868
struct vring_virtqueue_split vring_split = {};
2869+
union virtio_map map = {.dma_dev = vdev->dev.parent};
28682870

28692871
if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
28702872
struct vring_virtqueue_packed vring_packed = {};
@@ -2874,13 +2876,13 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
28742876
return __vring_new_virtqueue_packed(index, &vring_packed,
28752877
vdev, weak_barriers,
28762878
context, notify, callback,
2877-
name, vdev->dev.parent);
2879+
name, map);
28782880
}
28792881

28802882
vring_init(&vring_split.vring, num, pages, vring_align);
28812883
return __vring_new_virtqueue_split(index, &vring_split, vdev, weak_barriers,
28822884
context, notify, callback, name,
2883-
vdev->dev.parent);
2885+
map);
28842886
}
28852887
EXPORT_SYMBOL_GPL(vring_new_virtqueue);
28862888

@@ -2894,19 +2896,19 @@ static void vring_free(struct virtqueue *_vq)
28942896
vq->packed.ring_size_in_bytes,
28952897
vq->packed.vring.desc,
28962898
vq->packed.ring_dma_addr,
2897-
vring_dma_dev(vq));
2899+
vq->map);
28982900

28992901
vring_free_queue(vq->vq.vdev,
29002902
vq->packed.event_size_in_bytes,
29012903
vq->packed.vring.driver,
29022904
vq->packed.driver_event_dma_addr,
2903-
vring_dma_dev(vq));
2905+
vq->map);
29042906

29052907
vring_free_queue(vq->vq.vdev,
29062908
vq->packed.event_size_in_bytes,
29072909
vq->packed.vring.device,
29082910
vq->packed.device_event_dma_addr,
2909-
vring_dma_dev(vq));
2911+
vq->map);
29102912

29112913
kfree(vq->packed.desc_state);
29122914
kfree(vq->packed.desc_extra);
@@ -2915,7 +2917,7 @@ static void vring_free(struct virtqueue *_vq)
29152917
vq->split.queue_size_in_bytes,
29162918
vq->split.vring.desc,
29172919
vq->split.queue_dma_addr,
2918-
vring_dma_dev(vq));
2920+
vq->map);
29192921
}
29202922
}
29212923
if (!vq->packed_ring) {

0 commit comments

Comments
 (0)