Skip to content

Commit 5fc6dd1

Browse files
committed
virtio: add virtqueue_add_inbuf_cache_clean API
Add virtqueue_add_inbuf_cache_clean() for passing DMA_ATTR_CPU_CACHE_CLEAN to virtqueue operations. This suppresses DMA debug cacheline overlap warnings for buffers where proper cache management is ensured by the caller. Message-ID: <e50d38c974859e731e50bda7a0ee5691debf5bc4.1767601130.git.mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
1 parent d5d8465 commit 5fc6dd1

2 files changed

Lines changed: 65 additions & 23 deletions

File tree

drivers/virtio/virtio_ring.c

Lines changed: 60 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -174,7 +174,8 @@ struct virtqueue_ops {
174174
int (*add)(struct vring_virtqueue *vq, struct scatterlist *sgs[],
175175
unsigned int total_sg, unsigned int out_sgs,
176176
unsigned int in_sgs, void *data,
177-
void *ctx, bool premapped, gfp_t gfp);
177+
void *ctx, bool premapped, gfp_t gfp,
178+
unsigned long attr);
178179
void *(*get)(struct vring_virtqueue *vq, unsigned int *len, void **ctx);
179180
bool (*kick_prepare)(struct vring_virtqueue *vq);
180181
void (*disable_cb)(struct vring_virtqueue *vq);
@@ -444,7 +445,7 @@ static int vring_mapping_error(const struct vring_virtqueue *vq,
444445
/* Map one sg entry. */
445446
static int vring_map_one_sg(const struct vring_virtqueue *vq, struct scatterlist *sg,
446447
enum dma_data_direction direction, dma_addr_t *addr,
447-
u32 *len, bool premapped)
448+
u32 *len, bool premapped, unsigned long attr)
448449
{
449450
if (premapped) {
450451
*addr = sg_dma_address(sg);
@@ -472,7 +473,7 @@ static int vring_map_one_sg(const struct vring_virtqueue *vq, struct scatterlist
472473
*/
473474
*addr = virtqueue_map_page_attrs(&vq->vq, sg_page(sg),
474475
sg->offset, sg->length,
475-
direction, 0);
476+
direction, attr);
476477

477478
if (vring_mapping_error(vq, *addr))
478479
return -ENOMEM;
@@ -603,7 +604,8 @@ static inline int virtqueue_add_split(struct vring_virtqueue *vq,
603604
void *data,
604605
void *ctx,
605606
bool premapped,
606-
gfp_t gfp)
607+
gfp_t gfp,
608+
unsigned long attr)
607609
{
608610
struct vring_desc_extra *extra;
609611
struct scatterlist *sg;
@@ -675,7 +677,8 @@ static inline int virtqueue_add_split(struct vring_virtqueue *vq,
675677
if (++sg_count != total_sg)
676678
flags |= VRING_DESC_F_NEXT;
677679

678-
if (vring_map_one_sg(vq, sg, DMA_TO_DEVICE, &addr, &len, premapped))
680+
if (vring_map_one_sg(vq, sg, DMA_TO_DEVICE, &addr, &len,
681+
premapped, attr))
679682
goto unmap_release;
680683

681684
/* Note that we trust indirect descriptor
@@ -694,7 +697,8 @@ static inline int virtqueue_add_split(struct vring_virtqueue *vq,
694697
if (++sg_count != total_sg)
695698
flags |= VRING_DESC_F_NEXT;
696699

697-
if (vring_map_one_sg(vq, sg, DMA_FROM_DEVICE, &addr, &len, premapped))
700+
if (vring_map_one_sg(vq, sg, DMA_FROM_DEVICE, &addr, &len,
701+
premapped, attr))
698702
goto unmap_release;
699703

700704
/* Note that we trust indirect descriptor
@@ -1487,7 +1491,8 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
14871491
void *data,
14881492
bool premapped,
14891493
gfp_t gfp,
1490-
u16 id)
1494+
u16 id,
1495+
unsigned long attr)
14911496
{
14921497
struct vring_desc_extra *extra;
14931498
struct vring_packed_desc *desc;
@@ -1516,7 +1521,7 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
15161521
for (sg = sgs[n]; sg; sg = sg_next(sg)) {
15171522
if (vring_map_one_sg(vq, sg, n < out_sgs ?
15181523
DMA_TO_DEVICE : DMA_FROM_DEVICE,
1519-
&addr, &len, premapped))
1524+
&addr, &len, premapped, attr))
15201525
goto unmap_release;
15211526

15221527
desc[i].flags = cpu_to_le16(n < out_sgs ?
@@ -1615,7 +1620,8 @@ static inline int virtqueue_add_packed(struct vring_virtqueue *vq,
16151620
void *data,
16161621
void *ctx,
16171622
bool premapped,
1618-
gfp_t gfp)
1623+
gfp_t gfp,
1624+
unsigned long attr)
16191625
{
16201626
struct vring_packed_desc *desc;
16211627
struct scatterlist *sg;
@@ -1642,8 +1648,8 @@ static inline int virtqueue_add_packed(struct vring_virtqueue *vq,
16421648
id = vq->free_head;
16431649
BUG_ON(id == vq->packed.vring.num);
16441650
err = virtqueue_add_indirect_packed(vq, sgs, total_sg, out_sgs,
1645-
in_sgs, data, premapped,
1646-
gfp, id);
1651+
in_sgs, data, premapped, gfp,
1652+
id, attr);
16471653
if (err != -ENOMEM) {
16481654
END_USE(vq);
16491655
return err;
@@ -1679,7 +1685,7 @@ static inline int virtqueue_add_packed(struct vring_virtqueue *vq,
16791685

16801686
if (vring_map_one_sg(vq, sg, n < out_sgs ?
16811687
DMA_TO_DEVICE : DMA_FROM_DEVICE,
1682-
&addr, &len, premapped))
1688+
&addr, &len, premapped, attr))
16831689
goto unmap_release;
16841690

16851691
flags = cpu_to_le16(vq->packed.avail_used_flags |
@@ -1772,7 +1778,8 @@ static inline int virtqueue_add_packed_in_order(struct vring_virtqueue *vq,
17721778
void *data,
17731779
void *ctx,
17741780
bool premapped,
1775-
gfp_t gfp)
1781+
gfp_t gfp,
1782+
unsigned long attr)
17761783
{
17771784
struct vring_packed_desc *desc;
17781785
struct scatterlist *sg;
@@ -1799,7 +1806,8 @@ static inline int virtqueue_add_packed_in_order(struct vring_virtqueue *vq,
17991806
if (virtqueue_use_indirect(vq, total_sg)) {
18001807
err = virtqueue_add_indirect_packed(vq, sgs, total_sg, out_sgs,
18011808
in_sgs, data, premapped, gfp,
1802-
vq->packed.next_avail_idx);
1809+
vq->packed.next_avail_idx,
1810+
attr);
18031811
if (err != -ENOMEM) {
18041812
END_USE(vq);
18051813
return err;
@@ -1838,7 +1846,7 @@ static inline int virtqueue_add_packed_in_order(struct vring_virtqueue *vq,
18381846

18391847
if (vring_map_one_sg(vq, sg, n < out_sgs ?
18401848
DMA_TO_DEVICE : DMA_FROM_DEVICE,
1841-
&addr, &len, premapped))
1849+
&addr, &len, premapped, attr))
18421850
goto unmap_release;
18431851

18441852
flags |= cpu_to_le16(vq->packed.avail_used_flags);
@@ -2781,13 +2789,14 @@ static inline int virtqueue_add(struct virtqueue *_vq,
27812789
void *data,
27822790
void *ctx,
27832791
bool premapped,
2784-
gfp_t gfp)
2792+
gfp_t gfp,
2793+
unsigned long attr)
27852794
{
27862795
struct vring_virtqueue *vq = to_vvq(_vq);
27872796

27882797
return VIRTQUEUE_CALL(vq, add, sgs, total_sg,
27892798
out_sgs, in_sgs, data,
2790-
ctx, premapped, gfp);
2799+
ctx, premapped, gfp, attr);
27912800
}
27922801

27932802
/**
@@ -2825,7 +2834,7 @@ int virtqueue_add_sgs(struct virtqueue *_vq,
28252834
total_sg++;
28262835
}
28272836
return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs,
2828-
data, NULL, false, gfp);
2837+
data, NULL, false, gfp, 0);
28292838
}
28302839
EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
28312840

@@ -2847,7 +2856,7 @@ int virtqueue_add_outbuf(struct virtqueue *vq,
28472856
void *data,
28482857
gfp_t gfp)
28492858
{
2850-
return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, false, gfp);
2859+
return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, false, gfp, 0);
28512860
}
28522861
EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
28532862

@@ -2870,7 +2879,7 @@ int virtqueue_add_outbuf_premapped(struct virtqueue *vq,
28702879
void *data,
28712880
gfp_t gfp)
28722881
{
2873-
return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, true, gfp);
2882+
return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, true, gfp, 0);
28742883
}
28752884
EXPORT_SYMBOL_GPL(virtqueue_add_outbuf_premapped);
28762885

@@ -2892,10 +2901,38 @@ int virtqueue_add_inbuf(struct virtqueue *vq,
28922901
void *data,
28932902
gfp_t gfp)
28942903
{
2895-
return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, false, gfp);
2904+
return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, false, gfp, 0);
28962905
}
28972906
EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
28982907

2908+
/**
2909+
* virtqueue_add_inbuf_cache_clean - expose input buffers with cache clean
2910+
* @vq: the struct virtqueue we're talking about.
2911+
* @sg: scatterlist (must be well-formed and terminated!)
2912+
* @num: the number of entries in @sg writable by other side
2913+
* @data: the token identifying the buffer.
2914+
* @gfp: how to do memory allocations (if necessary).
2915+
*
2916+
* Same as virtqueue_add_inbuf but passes DMA_ATTR_CPU_CACHE_CLEAN to indicate
2917+
* that the CPU will not dirty any cacheline overlapping this buffer while it
2918+
* is available, and to suppress overlapping cacheline warnings in DMA debug
2919+
* builds.
2920+
*
2921+
* Caller must ensure we don't call this with other virtqueue operations
2922+
* at the same time (except where noted).
2923+
*
2924+
* Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
2925+
*/
2926+
int virtqueue_add_inbuf_cache_clean(struct virtqueue *vq,
2927+
struct scatterlist *sg, unsigned int num,
2928+
void *data,
2929+
gfp_t gfp)
2930+
{
2931+
return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, false, gfp,
2932+
DMA_ATTR_CPU_CACHE_CLEAN);
2933+
}
2934+
EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_cache_clean);
2935+
28992936
/**
29002937
* virtqueue_add_inbuf_ctx - expose input buffers to other end
29012938
* @vq: the struct virtqueue we're talking about.
@@ -2916,7 +2953,7 @@ int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
29162953
void *ctx,
29172954
gfp_t gfp)
29182955
{
2919-
return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, false, gfp);
2956+
return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, false, gfp, 0);
29202957
}
29212958
EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx);
29222959

@@ -2941,7 +2978,7 @@ int virtqueue_add_inbuf_premapped(struct virtqueue *vq,
29412978
void *ctx,
29422979
gfp_t gfp)
29432980
{
2944-
return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, true, gfp);
2981+
return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, true, gfp, 0);
29452982
}
29462983
EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_premapped);
29472984

include/linux/virtio.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -62,6 +62,11 @@ int virtqueue_add_inbuf(struct virtqueue *vq,
6262
void *data,
6363
gfp_t gfp);
6464

65+
int virtqueue_add_inbuf_cache_clean(struct virtqueue *vq,
66+
struct scatterlist sg[], unsigned int num,
67+
void *data,
68+
gfp_t gfp);
69+
6570
int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
6671
struct scatterlist sg[], unsigned int num,
6772
void *data,

0 commit comments

Comments
 (0)