From: "Michael S. Tsirkin" <mst@redhat.com>
To: Jason Wang <jasowang@redhat.com>
Cc: xuanzhuo@linux.alibaba.com, davem@davemloft.net,
edumazet@google.com, kuba@kernel.org, pabeni@redhat.com,
virtualization@lists.linux-foundation.org,
netdev@vger.kernel.org, linux-kernel@vger.kernel.org,
alvaro.karsz@solid-run.com
Subject: Re: [PATCH V3 net-next 1/2] virtio-net: convert rx mode setting to use workqueue
Date: Wed, 24 May 2023 05:15:03 -0400 [thread overview]
Message-ID: <20230524050604-mutt-send-email-mst@kernel.org> (raw)
In-Reply-To: <20230524081842.3060-2-jasowang@redhat.com>
On Wed, May 24, 2023 at 04:18:41PM +0800, Jason Wang wrote:
> This patch convert rx mode setting to be done in a workqueue, this is
> a must for allow to sleep when waiting for the cvq command to
> response since current code is executed under addr spin lock.
>
> Signed-off-by: Jason Wang <jasowang@redhat.com>
> ---
> Changes since V1:
> - use RTNL to synchronize rx mode worker
> ---
> drivers/net/virtio_net.c | 55 +++++++++++++++++++++++++++++++++++++---
> 1 file changed, 52 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index 56ca1d270304..5d2f1da4eaa0 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -265,6 +265,12 @@ struct virtnet_info {
> /* Work struct for config space updates */
> struct work_struct config_work;
>
> + /* Work struct for config rx mode */
With a bit less abbreviation maybe? setting rx mode?
> + struct work_struct rx_mode_work;
> +
> + /* Is rx mode work enabled? */
Ugh not a great comment.
> + bool rx_mode_work_enabled;
> +
> /* Does the affinity hint is set for virtqueues? */
> bool affinity_hint_set;
>
> @@ -388,6 +394,20 @@ static void disable_delayed_refill(struct virtnet_info *vi)
> spin_unlock_bh(&vi->refill_lock);
> }
>
> +static void enable_rx_mode_work(struct virtnet_info *vi)
> +{
> + rtnl_lock();
> + vi->rx_mode_work_enabled = true;
> + rtnl_unlock();
> +}
> +
> +static void disable_rx_mode_work(struct virtnet_info *vi)
> +{
> + rtnl_lock();
> + vi->rx_mode_work_enabled = false;
> + rtnl_unlock();
> +}
> +
> static void virtqueue_napi_schedule(struct napi_struct *napi,
> struct virtqueue *vq)
> {
> @@ -2341,9 +2361,11 @@ static int virtnet_close(struct net_device *dev)
> return 0;
> }
>
> -static void virtnet_set_rx_mode(struct net_device *dev)
> +static void virtnet_rx_mode_work(struct work_struct *work)
> {
> - struct virtnet_info *vi = netdev_priv(dev);
> + struct virtnet_info *vi =
> + container_of(work, struct virtnet_info, rx_mode_work);
> + struct net_device *dev = vi->dev;
> struct scatterlist sg[2];
> struct virtio_net_ctrl_mac *mac_data;
> struct netdev_hw_addr *ha;
> @@ -2356,6 +2378,8 @@ static void virtnet_set_rx_mode(struct net_device *dev)
> if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
> return;
>
> + rtnl_lock();
> +
> vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0);
> vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
>
> @@ -2373,14 +2397,19 @@ static void virtnet_set_rx_mode(struct net_device *dev)
> dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
> vi->ctrl->allmulti ? "en" : "dis");
>
> + netif_addr_lock_bh(dev);
> +
> uc_count = netdev_uc_count(dev);
> mc_count = netdev_mc_count(dev);
> /* MAC filter - use one buffer for both lists */
> buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
> (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
> mac_data = buf;
> - if (!buf)
> + if (!buf) {
> + netif_addr_unlock_bh(dev);
> + rtnl_unlock();
> return;
> + }
>
> sg_init_table(sg, 2);
>
> @@ -2401,6 +2430,8 @@ static void virtnet_set_rx_mode(struct net_device *dev)
> netdev_for_each_mc_addr(ha, dev)
> memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
>
> + netif_addr_unlock_bh(dev);
> +
> sg_set_buf(&sg[1], mac_data,
> sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
>
> @@ -2408,9 +2439,19 @@ static void virtnet_set_rx_mode(struct net_device *dev)
> VIRTIO_NET_CTRL_MAC_TABLE_SET, sg))
> dev_warn(&dev->dev, "Failed to set MAC filter table.\n");
>
> + rtnl_unlock();
> +
> kfree(buf);
> }
>
> +static void virtnet_set_rx_mode(struct net_device *dev)
> +{
> + struct virtnet_info *vi = netdev_priv(dev);
> +
> + if (vi->rx_mode_work_enabled)
> + schedule_work(&vi->rx_mode_work);
> +}
> +
> static int virtnet_vlan_rx_add_vid(struct net_device *dev,
> __be16 proto, u16 vid)
> {
> @@ -3181,6 +3222,8 @@ static void virtnet_freeze_down(struct virtio_device *vdev)
>
> /* Make sure no work handler is accessing the device */
> flush_work(&vi->config_work);
> + disable_rx_mode_work(vi);
> + flush_work(&vi->rx_mode_work);
>
> netif_tx_lock_bh(vi->dev);
> netif_device_detach(vi->dev);
Hmm so queued rx mode work will just get skipped
and on restore we get a wrong rx mode.
Any way to make this more robust?
> @@ -3203,6 +3246,7 @@ static int virtnet_restore_up(struct virtio_device *vdev)
> virtio_device_ready(vdev);
>
> enable_delayed_refill(vi);
> + enable_rx_mode_work(vi);
>
> if (netif_running(vi->dev)) {
> err = virtnet_open(vi->dev);
> @@ -4002,6 +4046,7 @@ static int virtnet_probe(struct virtio_device *vdev)
> vdev->priv = vi;
>
> INIT_WORK(&vi->config_work, virtnet_config_changed_work);
> + INIT_WORK(&vi->rx_mode_work, virtnet_rx_mode_work);
> spin_lock_init(&vi->refill_lock);
>
> if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) {
> @@ -4110,6 +4155,8 @@ static int virtnet_probe(struct virtio_device *vdev)
> if (vi->has_rss || vi->has_rss_hash_report)
> virtnet_init_default_rss(vi);
>
> + enable_rx_mode_work(vi);
> +
> /* serialize netdev register + virtio_device_ready() with ndo_open() */
> rtnl_lock();
>
> @@ -4207,6 +4254,8 @@ static void virtnet_remove(struct virtio_device *vdev)
>
> /* Make sure no work handler is accessing the device. */
> flush_work(&vi->config_work);
> + disable_rx_mode_work(vi);
> + flush_work(&vi->rx_mode_work);
>
> unregister_netdev(vi->dev);
>
> --
> 2.25.1
next prev parent reply other threads:[~2023-05-24 9:15 UTC|newest]
Thread overview: 11+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-05-24 8:18 [PATCH V3 net-next 0/2] virtio-net: don't busy poll for cvq command Jason Wang
2023-05-24 8:18 ` [PATCH V3 net-next 1/2] virtio-net: convert rx mode setting to use workqueue Jason Wang
2023-05-24 9:15 ` Michael S. Tsirkin [this message]
2023-05-25 3:43 ` Jason Wang
2023-05-25 7:41 ` Michael S. Tsirkin
2023-05-26 1:31 ` Jason Wang
2023-05-28 11:39 ` Michael S. Tsirkin
2023-05-29 1:21 ` Jason Wang
2023-05-31 1:07 ` Jason Wang
[not found] ` <20230628093334-mutt-send-email-mst@kernel.org>
2023-06-29 3:20 ` Jason Wang
2023-05-24 8:18 ` [PATCH V3 net-next 2/2] virtio-net: add cond_resched() to the command waiting loop Jason Wang
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230524050604-mutt-send-email-mst@kernel.org \
--to=mst@redhat.com \
--cc=alvaro.karsz@solid-run.com \
--cc=davem@davemloft.net \
--cc=edumazet@google.com \
--cc=jasowang@redhat.com \
--cc=kuba@kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=netdev@vger.kernel.org \
--cc=pabeni@redhat.com \
--cc=virtualization@lists.linux-foundation.org \
--cc=xuanzhuo@linux.alibaba.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).