From: "Michael S. Tsirkin" <mst@redhat.com>
To: Eli Cohen <elic@nvidia.com>
Cc: eperezma@redhat.com, parav@mellanox.com,
virtualization@lists.linux-foundation.org
Subject: Re: [PATCH] vdpa/mlx5: Avoid losing link state updates
Date: Mon, 27 Mar 2023 08:38:35 -0400 [thread overview]
Message-ID: <20230327083408-mutt-send-email-mst@kernel.org> (raw)
In-Reply-To: <20230320080105.2867-1-elic@nvidia.com>
On Mon, Mar 20, 2023 at 10:01:05AM +0200, Eli Cohen wrote:
> Current code ignores link state updates if VIRTIO_NET_F_STATUS was not
> negotiated. However, link state updates could be received before feature
> negotiation was completed , therefore causing link state events to be
> lost, possibly leaving the link state down.
>
> Add code to detect if VIRTIO_NET_F_STATUS was set and update the link
> state. We add a spinlock to serialize updated to config.status to
> maintain its integrity.
>
> Fixes: 033779a708f0 ("vdpa/mlx5: make MTU/STATUS presence conditional on feature bits")
> Signed-off-by: Eli Cohen <elic@nvidia.com>
Hmm I don't get it.
link state changes are config updates are they not?
probe sequence is:
err = virtio_features_ok(dev);
if (err)
goto err;
err = drv->probe(dev);
if (err)
goto err;
/* If probe didn't do it, mark device DRIVER_OK ourselves. */
if (!(dev->config->get_status(dev) & VIRTIO_CONFIG_S_DRIVER_OK))
virtio_device_ready(dev);
if (drv->scan)
drv->scan(dev);
virtio_config_enable(dev);
Looks like config changes that trigger before negotiation are queued,
not lost.
Was there an actual bug you observed or is this theoretical?
Thanks!
> ---
> drivers/vdpa/mlx5/net/mlx5_vnet.c | 91 ++++++++++++++++++++-----------
> drivers/vdpa/mlx5/net/mlx5_vnet.h | 2 +
> 2 files changed, 60 insertions(+), 33 deletions(-)
>
> diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
> index 520646ae7fa0..20d415e25aeb 100644
> --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
> +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
> @@ -2298,10 +2298,62 @@ static void update_cvq_info(struct mlx5_vdpa_dev *mvdev)
> }
> }
>
> +static bool f_status_was_set(u64 old, u64 new)
> +{
> + if (!(old & BIT_ULL(VIRTIO_NET_F_STATUS)) &&
> + (new & BIT_ULL(VIRTIO_NET_F_STATUS)))
> + return true;
> +
> + return false;
> +}
> +
> +static u8 query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
> +{
> + u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {};
> + u32 in[MLX5_ST_SZ_DW(query_vport_state_in)] = {};
> + int err;
> +
> + MLX5_SET(query_vport_state_in, in, opcode, MLX5_CMD_OP_QUERY_VPORT_STATE);
> + MLX5_SET(query_vport_state_in, in, op_mod, opmod);
> + MLX5_SET(query_vport_state_in, in, vport_number, vport);
> + if (vport)
> + MLX5_SET(query_vport_state_in, in, other_vport, 1);
> +
> + err = mlx5_cmd_exec_inout(mdev, query_vport_state, in, out);
> + if (err)
> + return 0;
> +
> + return MLX5_GET(query_vport_state_out, out, state);
> +}
> +
> +static bool get_link_state(struct mlx5_vdpa_dev *mvdev)
> +{
> + if (query_vport_state(mvdev->mdev, MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT, 0) ==
> + VPORT_STATE_UP)
> + return true;
> +
> + return false;
> +}
> +
> +static void update_link_state(struct mlx5_vdpa_dev *mvdev)
> +{
> + struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
> + bool up;
> +
> + up = get_link_state(mvdev);
> + spin_lock(&ndev->status_lock);
> + if (up)
> + ndev->config.status |= cpu_to_mlx5vdpa16(mvdev, VIRTIO_NET_S_LINK_UP);
> + else
> + ndev->config.status &= cpu_to_mlx5vdpa16(mvdev, ~VIRTIO_NET_S_LINK_UP);
> + spin_unlock(&ndev->status_lock);
> +}
> +
> static int mlx5_vdpa_set_driver_features(struct vdpa_device *vdev, u64 features)
> {
> struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
> struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
> + u64 cur_features;
> int err;
>
> print_features(mvdev, features, true);
> @@ -2310,7 +2362,11 @@ static int mlx5_vdpa_set_driver_features(struct vdpa_device *vdev, u64 features)
> if (err)
> return err;
>
> + cur_features = ndev->mvdev.actual_features;
> ndev->mvdev.actual_features = features & ndev->mvdev.mlx_features;
> + if (f_status_was_set(cur_features, ndev->mvdev.actual_features))
> + update_link_state(mvdev);
> +
> if (ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_MQ))
> ndev->rqt_size = mlx5vdpa16_to_cpu(mvdev, ndev->config.max_virtqueue_pairs);
> else
> @@ -2995,34 +3051,6 @@ struct mlx5_vdpa_mgmtdev {
> struct mlx5_vdpa_net *ndev;
> };
>
> -static u8 query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
> -{
> - u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {};
> - u32 in[MLX5_ST_SZ_DW(query_vport_state_in)] = {};
> - int err;
> -
> - MLX5_SET(query_vport_state_in, in, opcode, MLX5_CMD_OP_QUERY_VPORT_STATE);
> - MLX5_SET(query_vport_state_in, in, op_mod, opmod);
> - MLX5_SET(query_vport_state_in, in, vport_number, vport);
> - if (vport)
> - MLX5_SET(query_vport_state_in, in, other_vport, 1);
> -
> - err = mlx5_cmd_exec_inout(mdev, query_vport_state, in, out);
> - if (err)
> - return 0;
> -
> - return MLX5_GET(query_vport_state_out, out, state);
> -}
> -
> -static bool get_link_state(struct mlx5_vdpa_dev *mvdev)
> -{
> - if (query_vport_state(mvdev->mdev, MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT, 0) ==
> - VPORT_STATE_UP)
> - return true;
> -
> - return false;
> -}
> -
> static void update_carrier(struct work_struct *work)
> {
> struct mlx5_vdpa_wq_ent *wqent;
> @@ -3032,11 +3060,7 @@ static void update_carrier(struct work_struct *work)
> wqent = container_of(work, struct mlx5_vdpa_wq_ent, work);
> mvdev = wqent->mvdev;
> ndev = to_mlx5_vdpa_ndev(mvdev);
> - if (get_link_state(mvdev))
> - ndev->config.status |= cpu_to_mlx5vdpa16(mvdev, VIRTIO_NET_S_LINK_UP);
> - else
> - ndev->config.status &= cpu_to_mlx5vdpa16(mvdev, ~VIRTIO_NET_S_LINK_UP);
> -
> + update_link_state(mvdev);
> if (ndev->nb_registered && ndev->config_cb.callback)
> ndev->config_cb.callback(ndev->config_cb.private);
>
> @@ -3172,6 +3196,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
>
> init_mvqs(ndev);
> init_rwsem(&ndev->reslock);
> + spin_lock_init(&ndev->status_lock);
> config = &ndev->config;
>
> if (add_config->mask & BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MTU)) {
> diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.h b/drivers/vdpa/mlx5/net/mlx5_vnet.h
> index c90a89e1de4d..3666bbaa8822 100644
> --- a/drivers/vdpa/mlx5/net/mlx5_vnet.h
> +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.h
> @@ -50,6 +50,8 @@ struct mlx5_vdpa_net {
> struct mlx5_vdpa_wq_ent cvq_ent;
> struct hlist_head macvlan_hash[MLX5V_MACVLAN_SIZE];
> struct dentry *debugfs;
> + /* serialize link status updates */
> + spinlock_t status_lock;
> };
>
> struct mlx5_vdpa_counter {
> --
> 2.38.1
_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization
next parent reply other threads:[~2023-03-27 12:38 UTC|newest]
Thread overview: 3+ messages / expand[flat|nested] mbox.gz Atom feed top
[not found] <20230320080105.2867-1-elic@nvidia.com>
2023-03-27 12:38 ` Michael S. Tsirkin [this message]
2023-03-27 12:41 ` [PATCH] vdpa/mlx5: Avoid losing link state updates Michael S. Tsirkin
[not found] ` <6b775366-d727-6f2b-e002-4fbfb47d5e9a@nvidia.com>
2023-03-27 13:28 ` Michael S. Tsirkin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230327083408-mutt-send-email-mst@kernel.org \
--to=mst@redhat.com \
--cc=elic@nvidia.com \
--cc=eperezma@redhat.com \
--cc=parav@mellanox.com \
--cc=virtualization@lists.linux-foundation.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).