Skip to content

Commit 4174152

Browse files
committed
Revert "vhost/net: Defer TX queue re-enable until after sendmsg"
This reverts commit 8c2e6b2. It tries to defer the notification enabling by moving the logic out of the loop after the vhost_tx_batch() when nothing new is spotted. This will bring side effects as the new logic would be reused for several other error conditions. One example is the IOTLB: when there's an IOTLB miss, get_tx_bufs() might return -EAGAIN and exit the loop and see there's still available buffers, so it will queue the tx work again until userspace feed the IOTLB entry correctly. This will slowdown the tx processing and trigger the TX watchdog in the guest as reported in https://lkml.org/lkml/2025/9/10/1596. To fix, revert the change. A follow up patch will bring the performance back in a safe way. Reported-by: Jon Kohler <jon@nutanix.com> Cc: stable@vger.kernel.org Fixes: 8c2e6b2 ("vhost/net: Defer TX queue re-enable until after sendmsg") Signed-off-by: Jason Wang <jasowang@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Message-Id: <20250917063045.2042-2-jasowang@redhat.com>
1 parent 90beccb commit 4174152

1 file changed

Lines changed: 9 additions & 21 deletions

File tree

drivers/vhost/net.c

Lines changed: 9 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -765,11 +765,11 @@ static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
765765
int err;
766766
int sent_pkts = 0;
767767
bool sock_can_batch = (sock->sk->sk_sndbuf == INT_MAX);
768-
bool busyloop_intr;
769768
bool in_order = vhost_has_feature(vq, VIRTIO_F_IN_ORDER);
770769

771770
do {
772-
busyloop_intr = false;
771+
bool busyloop_intr = false;
772+
773773
if (nvq->done_idx == VHOST_NET_BATCH)
774774
vhost_tx_batch(net, nvq, sock, &msg);
775775

@@ -780,10 +780,13 @@ static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
780780
break;
781781
/* Nothing new? Wait for eventfd to tell us they refilled. */
782782
if (head == vq->num) {
783-
/* Kicks are disabled at this point, break loop and
784-
* process any remaining batched packets. Queue will
785-
* be re-enabled afterwards.
786-
*/
783+
if (unlikely(busyloop_intr)) {
784+
vhost_poll_queue(&vq->poll);
785+
} else if (unlikely(vhost_enable_notify(&net->dev,
786+
vq))) {
787+
vhost_disable_notify(&net->dev, vq);
788+
continue;
789+
}
787790
break;
788791
}
789792

@@ -839,22 +842,7 @@ static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
839842
++nvq->done_idx;
840843
} while (likely(!vhost_exceeds_weight(vq, ++sent_pkts, total_len)));
841844

842-
/* Kicks are still disabled, dispatch any remaining batched msgs. */
843845
vhost_tx_batch(net, nvq, sock, &msg);
844-
845-
if (unlikely(busyloop_intr))
846-
/* If interrupted while doing busy polling, requeue the
847-
* handler to be fair handle_rx as well as other tasks
848-
* waiting on cpu.
849-
*/
850-
vhost_poll_queue(&vq->poll);
851-
else
852-
/* All of our work has been completed; however, before
853-
* leaving the TX handler, do one last check for work,
854-
* and requeue handler if necessary. If there is no work,
855-
* queue will be reenabled.
856-
*/
857-
vhost_net_busy_poll_try_queue(net, vq);
858846
}
859847

860848
static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)

0 commit comments

Comments
 (0)