Skip to content

Commit b9f7425

Browse files
jasowangmstsirkin
authored andcommitted
virtio-net: convert rx mode setting to use workqueue
This patch convert rx mode setting to be done in a workqueue, this is a must for allow to sleep when waiting for the cvq command to response since current code is executed under addr spin lock. Note that we need to disable and flush the workqueue during freeze, this means the rx mode setting is lost after resuming. This is not the bug of this patch as we never try to restore rx mode setting during resume. Signed-off-by: Jason Wang <jasowang@redhat.com> Message-Id: <20230720083839.481487-2-jasowang@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Reviewed-by: Shannon Nelson <shannon.nelson@amd.com>
1 parent d5c0ed1 commit b9f7425

1 file changed

Lines changed: 52 additions & 3 deletions

File tree

drivers/net/virtio_net.c

Lines changed: 52 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -304,6 +304,12 @@ struct virtnet_info {
304304
/* Work struct for config space updates */
305305
struct work_struct config_work;
306306

307+
/* Work struct for setting rx mode */
308+
struct work_struct rx_mode_work;
309+
310+
/* OK to queue work setting RX mode? */
311+
bool rx_mode_work_enabled;
312+
307313
/* Does the affinity hint is set for virtqueues? */
308314
bool affinity_hint_set;
309315

@@ -447,6 +453,20 @@ static void disable_delayed_refill(struct virtnet_info *vi)
447453
spin_unlock_bh(&vi->refill_lock);
448454
}
449455

456+
static void enable_rx_mode_work(struct virtnet_info *vi)
457+
{
458+
rtnl_lock();
459+
vi->rx_mode_work_enabled = true;
460+
rtnl_unlock();
461+
}
462+
463+
static void disable_rx_mode_work(struct virtnet_info *vi)
464+
{
465+
rtnl_lock();
466+
vi->rx_mode_work_enabled = false;
467+
rtnl_unlock();
468+
}
469+
450470
static void virtqueue_napi_schedule(struct napi_struct *napi,
451471
struct virtqueue *vq)
452472
{
@@ -2706,9 +2726,11 @@ static int virtnet_close(struct net_device *dev)
27062726
return 0;
27072727
}
27082728

2709-
static void virtnet_set_rx_mode(struct net_device *dev)
2729+
static void virtnet_rx_mode_work(struct work_struct *work)
27102730
{
2711-
struct virtnet_info *vi = netdev_priv(dev);
2731+
struct virtnet_info *vi =
2732+
container_of(work, struct virtnet_info, rx_mode_work);
2733+
struct net_device *dev = vi->dev;
27122734
struct scatterlist sg[2];
27132735
struct virtio_net_ctrl_mac *mac_data;
27142736
struct netdev_hw_addr *ha;
@@ -2721,6 +2743,8 @@ static void virtnet_set_rx_mode(struct net_device *dev)
27212743
if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
27222744
return;
27232745

2746+
rtnl_lock();
2747+
27242748
vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0);
27252749
vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
27262750

@@ -2738,14 +2762,19 @@ static void virtnet_set_rx_mode(struct net_device *dev)
27382762
dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
27392763
vi->ctrl->allmulti ? "en" : "dis");
27402764

2765+
netif_addr_lock_bh(dev);
2766+
27412767
uc_count = netdev_uc_count(dev);
27422768
mc_count = netdev_mc_count(dev);
27432769
/* MAC filter - use one buffer for both lists */
27442770
buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
27452771
(2 * sizeof(mac_data->entries)), GFP_ATOMIC);
27462772
mac_data = buf;
2747-
if (!buf)
2773+
if (!buf) {
2774+
netif_addr_unlock_bh(dev);
2775+
rtnl_unlock();
27482776
return;
2777+
}
27492778

27502779
sg_init_table(sg, 2);
27512780

@@ -2766,16 +2795,28 @@ static void virtnet_set_rx_mode(struct net_device *dev)
27662795
netdev_for_each_mc_addr(ha, dev)
27672796
memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
27682797

2798+
netif_addr_unlock_bh(dev);
2799+
27692800
sg_set_buf(&sg[1], mac_data,
27702801
sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
27712802

27722803
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
27732804
VIRTIO_NET_CTRL_MAC_TABLE_SET, sg))
27742805
dev_warn(&dev->dev, "Failed to set MAC filter table.\n");
27752806

2807+
rtnl_unlock();
2808+
27762809
kfree(buf);
27772810
}
27782811

2812+
static void virtnet_set_rx_mode(struct net_device *dev)
2813+
{
2814+
struct virtnet_info *vi = netdev_priv(dev);
2815+
2816+
if (vi->rx_mode_work_enabled)
2817+
schedule_work(&vi->rx_mode_work);
2818+
}
2819+
27792820
static int virtnet_vlan_rx_add_vid(struct net_device *dev,
27802821
__be16 proto, u16 vid)
27812822
{
@@ -3856,6 +3897,8 @@ static void virtnet_freeze_down(struct virtio_device *vdev)
38563897

38573898
/* Make sure no work handler is accessing the device */
38583899
flush_work(&vi->config_work);
3900+
disable_rx_mode_work(vi);
3901+
flush_work(&vi->rx_mode_work);
38593902

38603903
netif_tx_lock_bh(vi->dev);
38613904
netif_device_detach(vi->dev);
@@ -3878,6 +3921,7 @@ static int virtnet_restore_up(struct virtio_device *vdev)
38783921
virtio_device_ready(vdev);
38793922

38803923
enable_delayed_refill(vi);
3924+
enable_rx_mode_work(vi);
38813925

38823926
if (netif_running(vi->dev)) {
38833927
err = virtnet_open(vi->dev);
@@ -4676,6 +4720,7 @@ static int virtnet_probe(struct virtio_device *vdev)
46764720
vdev->priv = vi;
46774721

46784722
INIT_WORK(&vi->config_work, virtnet_config_changed_work);
4723+
INIT_WORK(&vi->rx_mode_work, virtnet_rx_mode_work);
46794724
spin_lock_init(&vi->refill_lock);
46804725

46814726
if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) {
@@ -4798,6 +4843,8 @@ static int virtnet_probe(struct virtio_device *vdev)
47984843
if (vi->has_rss || vi->has_rss_hash_report)
47994844
virtnet_init_default_rss(vi);
48004845

4846+
enable_rx_mode_work(vi);
4847+
48014848
/* serialize netdev register + virtio_device_ready() with ndo_open() */
48024849
rtnl_lock();
48034850

@@ -4895,6 +4942,8 @@ static void virtnet_remove(struct virtio_device *vdev)
48954942

48964943
/* Make sure no work handler is accessing the device. */
48974944
flush_work(&vi->config_work);
4945+
disable_rx_mode_work(vi);
4946+
flush_work(&vi->rx_mode_work);
48984947

48994948
unregister_netdev(vi->dev);
49004949

0 commit comments

Comments
 (0)