Skip to content

Commit b465518

Browse files
stefano-garzarelladavem330
authored andcommitted
vsock/loopback: use only sk_buff_head.lock to protect the packet queue
pkt_list_lock was used before commit 71dc9ec ("virtio/vsock: replace virtio_vsock_pkt with sk_buff") to protect the packet queue. After that commit we switched to sk_buff and we are using sk_buff_head.lock in almost every place to protect the packet queue except in vsock_loopback_work() when we call skb_queue_splice_init(). As reported by syzbot, this caused unlocked concurrent access to the packet queue between vsock_loopback_work() and vsock_loopback_cancel_pkt() since it is not holding pkt_list_lock. With the introduction of sk_buff_head, pkt_list_lock is redundant and can cause confusion, so let's remove it and use sk_buff_head.lock everywhere to protect the packet queue access. Fixes: 71dc9ec ("virtio/vsock: replace virtio_vsock_pkt with sk_buff") Cc: bobby.eshleman@bytedance.com Reported-and-tested-by: syzbot+befff0a9536049e7902e@syzkaller.appspotmail.com Signed-off-by: Stefano Garzarella <sgarzare@redhat.com> Reviewed-by: Bobby Eshleman <bobby.eshleman@bytedance.com> Reviewed-by: Arseniy Krasnov <AVKrasnov@sberdevices.ru> Signed-off-by: David S. Miller <davem@davemloft.net>
1 parent 6220358 commit b465518

1 file changed

Lines changed: 2 additions & 8 deletions

File tree

net/vmw_vsock/vsock_loopback.c

Lines changed: 2 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,6 @@
1515
struct vsock_loopback {
1616
struct workqueue_struct *workqueue;
1717

18-
spinlock_t pkt_list_lock; /* protects pkt_list */
1918
struct sk_buff_head pkt_queue;
2019
struct work_struct pkt_work;
2120
};
@@ -32,9 +31,7 @@ static int vsock_loopback_send_pkt(struct sk_buff *skb)
3231
struct vsock_loopback *vsock = &the_vsock_loopback;
3332
int len = skb->len;
3433

35-
spin_lock_bh(&vsock->pkt_list_lock);
3634
skb_queue_tail(&vsock->pkt_queue, skb);
37-
spin_unlock_bh(&vsock->pkt_list_lock);
3835

3936
queue_work(vsock->workqueue, &vsock->pkt_work);
4037

@@ -113,9 +110,9 @@ static void vsock_loopback_work(struct work_struct *work)
113110

114111
skb_queue_head_init(&pkts);
115112

116-
spin_lock_bh(&vsock->pkt_list_lock);
113+
spin_lock_bh(&vsock->pkt_queue.lock);
117114
skb_queue_splice_init(&vsock->pkt_queue, &pkts);
118-
spin_unlock_bh(&vsock->pkt_list_lock);
115+
spin_unlock_bh(&vsock->pkt_queue.lock);
119116

120117
while ((skb = __skb_dequeue(&pkts))) {
121118
virtio_transport_deliver_tap_pkt(skb);
@@ -132,7 +129,6 @@ static int __init vsock_loopback_init(void)
132129
if (!vsock->workqueue)
133130
return -ENOMEM;
134131

135-
spin_lock_init(&vsock->pkt_list_lock);
136132
skb_queue_head_init(&vsock->pkt_queue);
137133
INIT_WORK(&vsock->pkt_work, vsock_loopback_work);
138134

@@ -156,9 +152,7 @@ static void __exit vsock_loopback_exit(void)
156152

157153
flush_work(&vsock->pkt_work);
158154

159-
spin_lock_bh(&vsock->pkt_list_lock);
160155
virtio_vsock_skb_queue_purge(&vsock->pkt_queue);
161-
spin_unlock_bh(&vsock->pkt_list_lock);
162156

163157
destroy_workqueue(vsock->workqueue);
164158
}

0 commit comments

Comments
 (0)