Skip to content

Commit 71b263e

Browse files
author
Paolo Abeni
committed
Merge branch 'vsock-virtio-vhost-msg_zerocopy-preparations'
Arseniy Krasnov says: ==================== vsock/virtio/vhost: MSG_ZEROCOPY preparations this patchset is first of three parts of another big patchset for MSG_ZEROCOPY flag support: https://lore.kernel.org/netdev/20230701063947.3422088-1-AVKrasnov@sberdevices.ru/ During review of this series, Stefano Garzarella <sgarzare@redhat.com> suggested to split it for three parts to simplify review and merging: 1) virtio and vhost updates (for fragged skbs) <--- this patchset 2) AF_VSOCK updates (allows to enable MSG_ZEROCOPY mode and read tx completions) and update for Documentation/. 3) Updates for tests and utils. This series enables handling of fragged skbs in virtio and vhost parts. Newly logic won't be triggered, because SO_ZEROCOPY options is still impossible to enable at this moment (next bunch of patches from big set above will enable it). ==================== Link: https://lore.kernel.org/r/20230916130918.4105122-1-avkrasnov@salutedevices.com Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2 parents b3af9c0 + 581512a commit 71b263e

5 files changed

Lines changed: 348 additions & 87 deletions

File tree

drivers/vhost/vsock.c

Lines changed: 9 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -114,6 +114,7 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
114114
struct sk_buff *skb;
115115
unsigned out, in;
116116
size_t nbytes;
117+
u32 offset;
117118
int head;
118119

119120
skb = virtio_vsock_skb_dequeue(&vsock->send_pkt_queue);
@@ -156,7 +157,8 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
156157
}
157158

158159
iov_iter_init(&iov_iter, ITER_DEST, &vq->iov[out], in, iov_len);
159-
payload_len = skb->len;
160+
offset = VIRTIO_VSOCK_SKB_CB(skb)->offset;
161+
payload_len = skb->len - offset;
160162
hdr = virtio_vsock_hdr(skb);
161163

162164
/* If the packet is greater than the space available in the
@@ -197,8 +199,10 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
197199
break;
198200
}
199201

200-
nbytes = copy_to_iter(skb->data, payload_len, &iov_iter);
201-
if (nbytes != payload_len) {
202+
if (skb_copy_datagram_iter(skb,
203+
offset,
204+
&iov_iter,
205+
payload_len)) {
202206
kfree_skb(skb);
203207
vq_err(vq, "Faulted on copying pkt buf\n");
204208
break;
@@ -212,13 +216,13 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
212216
vhost_add_used(vq, head, sizeof(*hdr) + payload_len);
213217
added = true;
214218

215-
skb_pull(skb, payload_len);
219+
VIRTIO_VSOCK_SKB_CB(skb)->offset += payload_len;
216220
total_len += payload_len;
217221

218222
/* If we didn't send all the payload we can requeue the packet
219223
* to send it with the next available buffer.
220224
*/
221-
if (skb->len > 0) {
225+
if (VIRTIO_VSOCK_SKB_CB(skb)->offset < skb->len) {
222226
hdr->flags |= cpu_to_le32(flags_to_restore);
223227

224228
/* We are queueing the same skb to handle

include/linux/virtio_vsock.h

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@
1212
struct virtio_vsock_skb_cb {
1313
bool reply;
1414
bool tap_delivered;
15+
u32 offset;
1516
};
1617

1718
#define VIRTIO_VSOCK_SKB_CB(skb) ((struct virtio_vsock_skb_cb *)((skb)->cb))
@@ -159,6 +160,15 @@ struct virtio_transport {
159160

160161
/* Takes ownership of the packet */
161162
int (*send_pkt)(struct sk_buff *skb);
163+
164+
/* Used in MSG_ZEROCOPY mode. Checks, that provided data
165+
* (number of buffers) could be transmitted with zerocopy
166+
* mode. If this callback is not implemented for the current
167+
* transport - this means that this transport doesn't need
168+
* extra checks and can perform zerocopy transmission by
169+
* default.
170+
*/
171+
bool (*can_msgzerocopy)(int bufs_num);
162172
};
163173

164174
ssize_t

include/trace/events/vsock_virtio_transport_common.h

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -43,15 +43,17 @@ TRACE_EVENT(virtio_transport_alloc_pkt,
4343
__u32 len,
4444
__u16 type,
4545
__u16 op,
46-
__u32 flags
46+
__u32 flags,
47+
bool zcopy
4748
),
4849
TP_ARGS(
4950
src_cid, src_port,
5051
dst_cid, dst_port,
5152
len,
5253
type,
5354
op,
54-
flags
55+
flags,
56+
zcopy
5557
),
5658
TP_STRUCT__entry(
5759
__field(__u32, src_cid)
@@ -62,6 +64,7 @@ TRACE_EVENT(virtio_transport_alloc_pkt,
6264
__field(__u16, type)
6365
__field(__u16, op)
6466
__field(__u32, flags)
67+
__field(bool, zcopy)
6568
),
6669
TP_fast_assign(
6770
__entry->src_cid = src_cid;
@@ -72,14 +75,15 @@ TRACE_EVENT(virtio_transport_alloc_pkt,
7275
__entry->type = type;
7376
__entry->op = op;
7477
__entry->flags = flags;
78+
__entry->zcopy = zcopy;
7579
),
76-
TP_printk("%u:%u -> %u:%u len=%u type=%s op=%s flags=%#x",
80+
TP_printk("%u:%u -> %u:%u len=%u type=%s op=%s flags=%#x zcopy=%s",
7781
__entry->src_cid, __entry->src_port,
7882
__entry->dst_cid, __entry->dst_port,
7983
__entry->len,
8084
show_type(__entry->type),
8185
show_op(__entry->op),
82-
__entry->flags)
86+
__entry->flags, __entry->zcopy ? "true" : "false")
8387
);
8488

8589
TRACE_EVENT(virtio_transport_recv_pkt,

net/vmw_vsock/virtio_transport.c

Lines changed: 85 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -63,6 +63,17 @@ struct virtio_vsock {
6363

6464
u32 guest_cid;
6565
bool seqpacket_allow;
66+
67+
/* These fields are used only in tx path in function
68+
* 'virtio_transport_send_pkt_work()', so to save
69+
* stack space in it, place both of them here. Each
70+
* pointer from 'out_sgs' points to the corresponding
71+
* element in 'out_bufs' - this is initialized in
72+
* 'virtio_vsock_probe()'. Both fields are protected
73+
* by 'tx_lock'. +1 is needed for packet header.
74+
*/
75+
struct scatterlist *out_sgs[MAX_SKB_FRAGS + 1];
76+
struct scatterlist out_bufs[MAX_SKB_FRAGS + 1];
6677
};
6778

6879
static u32 virtio_transport_get_local_cid(void)
@@ -100,8 +111,8 @@ virtio_transport_send_pkt_work(struct work_struct *work)
100111
vq = vsock->vqs[VSOCK_VQ_TX];
101112

102113
for (;;) {
103-
struct scatterlist hdr, buf, *sgs[2];
104114
int ret, in_sg = 0, out_sg = 0;
115+
struct scatterlist **sgs;
105116
struct sk_buff *skb;
106117
bool reply;
107118

@@ -111,12 +122,43 @@ virtio_transport_send_pkt_work(struct work_struct *work)
111122

112123
virtio_transport_deliver_tap_pkt(skb);
113124
reply = virtio_vsock_skb_reply(skb);
114-
115-
sg_init_one(&hdr, virtio_vsock_hdr(skb), sizeof(*virtio_vsock_hdr(skb)));
116-
sgs[out_sg++] = &hdr;
117-
if (skb->len > 0) {
118-
sg_init_one(&buf, skb->data, skb->len);
119-
sgs[out_sg++] = &buf;
125+
sgs = vsock->out_sgs;
126+
sg_init_one(sgs[out_sg], virtio_vsock_hdr(skb),
127+
sizeof(*virtio_vsock_hdr(skb)));
128+
out_sg++;
129+
130+
if (!skb_is_nonlinear(skb)) {
131+
if (skb->len > 0) {
132+
sg_init_one(sgs[out_sg], skb->data, skb->len);
133+
out_sg++;
134+
}
135+
} else {
136+
struct skb_shared_info *si;
137+
int i;
138+
139+
/* If skb is nonlinear, then its buffer must contain
140+
* only header and nothing more. Data is stored in
141+
* the fragged part.
142+
*/
143+
WARN_ON_ONCE(skb_headroom(skb) != sizeof(*virtio_vsock_hdr(skb)));
144+
145+
si = skb_shinfo(skb);
146+
147+
for (i = 0; i < si->nr_frags; i++) {
148+
skb_frag_t *skb_frag = &si->frags[i];
149+
void *va;
150+
151+
/* We will use 'page_to_virt()' for the userspace page
152+
* here, because virtio or dma-mapping layers will call
153+
* 'virt_to_phys()' later to fill the buffer descriptor.
154+
* We don't touch memory at "virtual" address of this page.
155+
*/
156+
va = page_to_virt(skb_frag->bv_page);
157+
sg_init_one(sgs[out_sg],
158+
va + skb_frag->bv_offset,
159+
skb_frag->bv_len);
160+
out_sg++;
161+
}
120162
}
121163

122164
ret = virtqueue_add_sgs(vq, sgs, out_sg, in_sg, skb, GFP_KERNEL);
@@ -413,6 +455,37 @@ static void virtio_vsock_rx_done(struct virtqueue *vq)
413455
queue_work(virtio_vsock_workqueue, &vsock->rx_work);
414456
}
415457

458+
static bool virtio_transport_can_msgzerocopy(int bufs_num)
459+
{
460+
struct virtio_vsock *vsock;
461+
bool res = false;
462+
463+
rcu_read_lock();
464+
465+
vsock = rcu_dereference(the_virtio_vsock);
466+
if (vsock) {
467+
struct virtqueue *vq = vsock->vqs[VSOCK_VQ_TX];
468+
469+
/* Check that tx queue is large enough to keep whole
470+
* data to send. This is needed, because when there is
471+
* not enough free space in the queue, current skb to
472+
* send will be reinserted to the head of tx list of
473+
* the socket to retry transmission later, so if skb
474+
* is bigger than whole queue, it will be reinserted
475+
* again and again, thus blocking other skbs to be sent.
476+
* Each page of the user provided buffer will be added
477+
* as a single buffer to the tx virtqueue, so compare
478+
* number of pages against maximum capacity of the queue.
479+
*/
480+
if (bufs_num <= vq->num_max)
481+
res = true;
482+
}
483+
484+
rcu_read_unlock();
485+
486+
return res;
487+
}
488+
416489
static bool virtio_transport_seqpacket_allow(u32 remote_cid);
417490

418491
static struct virtio_transport virtio_transport = {
@@ -462,6 +535,7 @@ static struct virtio_transport virtio_transport = {
462535
},
463536

464537
.send_pkt = virtio_transport_send_pkt,
538+
.can_msgzerocopy = virtio_transport_can_msgzerocopy,
465539
};
466540

467541
static bool virtio_transport_seqpacket_allow(u32 remote_cid)
@@ -621,6 +695,7 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
621695
{
622696
struct virtio_vsock *vsock = NULL;
623697
int ret;
698+
int i;
624699

625700
ret = mutex_lock_interruptible(&the_virtio_vsock_mutex);
626701
if (ret)
@@ -663,6 +738,9 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
663738
if (ret < 0)
664739
goto out;
665740

741+
for (i = 0; i < ARRAY_SIZE(vsock->out_sgs); i++)
742+
vsock->out_sgs[i] = &vsock->out_bufs[i];
743+
666744
rcu_assign_pointer(the_virtio_vsock, vsock);
667745

668746
mutex_unlock(&the_virtio_vsock_mutex);

0 commit comments

Comments
 (0)