* [PATCH 0/2] mlx5 address space and suspend support @ 2022-07-14 11:38 Eli Cohen 2022-07-14 11:38 ` [PATCH 1/2] vdpa/mlx5: Implement susupend virtqueue callback Eli Cohen 2022-07-14 11:38 ` [PATCH 2/2] vdpa/mlx5: Support different address spaces for control and data Eli Cohen 0 siblings, 2 replies; 4+ messages in thread From: Eli Cohen @ 2022-07-14 11:38 UTC (permalink / raw) To: mst, jasowang, eperezma, virtualization Cc: linux-kernel, si-wei.liu, parav, Eli Cohen The following two patches add support for the lately added suspend callback and address space support. These patches are aimed at supporting live migration on mlx5_vdpa. Eli Cohen (2): vdpa/mlx5: Implement susupend virtqueue callback vdpa/mlx5: Support different address spaces for control and data drivers/vdpa/mlx5/core/mlx5_vdpa.h | 11 ++ drivers/vdpa/mlx5/net/mlx5_vnet.c | 171 ++++++++++++++++++++++++++--- include/linux/mlx5/mlx5_ifc_vdpa.h | 8 ++ 3 files changed, 176 insertions(+), 14 deletions(-) -- 2.35.1 ^ permalink raw reply [flat|nested] 4+ messages in thread
* [PATCH 1/2] vdpa/mlx5: Implement susupend virtqueue callback 2022-07-14 11:38 [PATCH 0/2] mlx5 address space and suspend support Eli Cohen @ 2022-07-14 11:38 ` Eli Cohen 2022-07-14 11:38 ` [PATCH 2/2] vdpa/mlx5: Support different address spaces for control and data Eli Cohen 1 sibling, 0 replies; 4+ messages in thread From: Eli Cohen @ 2022-07-14 11:38 UTC (permalink / raw) To: mst, jasowang, eperezma, virtualization Cc: linux-kernel, si-wei.liu, parav, Eli Cohen Implement the suspend callback allowing to suspend the virtqueues so they stop processing descriptors. This is required to allow to query a consistent state of the virtqueue while live migration is taking place. Signed-off-by: Eli Cohen <elic@nvidia.com> --- drivers/vdpa/mlx5/net/mlx5_vnet.c | 83 ++++++++++++++++++++++++++++-- include/linux/mlx5/mlx5_ifc_vdpa.h | 8 +++ 2 files changed, 88 insertions(+), 3 deletions(-) diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c index e85c1d71f4ed..d0fff559ca15 100644 --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c @@ -164,6 +164,7 @@ struct mlx5_vdpa_net { bool setup; u32 cur_num_vqs; u32 rqt_size; + bool nb_registered; struct notifier_block nb; struct vdpa_callback config_cb; struct mlx5_vdpa_wq_ent cvq_ent; @@ -895,6 +896,7 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque if (err) goto err_cmd; + mvq->fw_state = MLX5_VIRTIO_NET_Q_OBJECT_STATE_INIT; kfree(in); mvq->virtq_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); @@ -922,6 +924,7 @@ static void destroy_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtq mlx5_vdpa_warn(&ndev->mvdev, "destroy virtqueue 0x%x\n", mvq->virtq_id); return; } + mvq->fw_state = MLX5_VIRTIO_NET_Q_OBJECT_NONE; umems_destroy(ndev, mvq); } @@ -1121,6 +1124,20 @@ static int query_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueu return err; } +static bool is_valid_state_change(int oldstate, int newstate) +{ + switch (oldstate) { + case MLX5_VIRTIO_NET_Q_OBJECT_STATE_INIT: + return newstate == MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY; + case MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY: + return newstate == MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND; + case MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND: + case MLX5_VIRTIO_NET_Q_OBJECT_STATE_ERR: + default: + return false; + } +} + static int modify_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int state) { int inlen = MLX5_ST_SZ_BYTES(modify_virtio_net_q_in); @@ -1130,6 +1147,12 @@ static int modify_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque void *in; int err; + if (mvq->fw_state == MLX5_VIRTIO_NET_Q_OBJECT_NONE) + return 0; + + if (!is_valid_state_change(mvq->fw_state, state)) + return -EINVAL; + in = kzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -1992,6 +2015,7 @@ static void mlx5_vdpa_set_vq_ready(struct vdpa_device *vdev, u16 idx, bool ready struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); struct mlx5_vdpa_virtqueue *mvq; + int err; if (!mvdev->actual_features) return; @@ -2005,8 +2029,16 @@ static void mlx5_vdpa_set_vq_ready(struct vdpa_device *vdev, u16 idx, bool ready } mvq = &ndev->vqs[idx]; - if (!ready) + if (!ready) { suspend_vq(ndev, mvq); + } else { + err = modify_virtqueue(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY); + if (err) { + mlx5_vdpa_warn(mvdev, "modify VQ %d to ready failed (%d)\n", idx, err); + ready = false; + } + } + mvq->ready = ready; } @@ -2733,6 +2765,37 @@ static int mlx5_vdpa_get_vendor_vq_stats(struct vdpa_device *vdev, u16 idx, return err; } +static void mlx5_vdpa_cvq_suspend(struct mlx5_vdpa_dev *mvdev) +{ + struct mlx5_control_vq *cvq; + + if (!(mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ))) + return; + + cvq = &mvdev->cvq; + cvq->ready = false; +} + +static int mlx5_vdpa_suspend(struct vdpa_device *vdev) +{ + struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); + struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); + struct mlx5_vdpa_virtqueue *mvq; + int i; + + down_write(&ndev->reslock); + mlx5_notifier_unregister(mvdev->mdev, &ndev->nb); + ndev->nb_registered = false; + flush_workqueue(ndev->mvdev.wq); + for (i = 0; i < ndev->cur_num_vqs; i++) { + mvq = &ndev->vqs[i]; + suspend_vq(ndev, mvq); + } + mlx5_vdpa_cvq_suspend(mvdev); + up_write(&ndev->reslock); + return 0; +} + static const struct vdpa_config_ops mlx5_vdpa_ops = { .set_vq_address = mlx5_vdpa_set_vq_address, .set_vq_num = mlx5_vdpa_set_vq_num, @@ -2763,6 +2826,7 @@ static const struct vdpa_config_ops mlx5_vdpa_ops = { .get_generation = mlx5_vdpa_get_generation, .set_map = mlx5_vdpa_set_map, .free = mlx5_vdpa_free, + .suspend = mlx5_vdpa_suspend, }; static int query_mtu(struct mlx5_core_dev *mdev, u16 *mtu) @@ -2828,6 +2892,7 @@ static void init_mvqs(struct mlx5_vdpa_net *ndev) mvq->index = i; mvq->ndev = ndev; mvq->fwqp.fw = true; + mvq->fw_state = MLX5_VIRTIO_NET_Q_OBJECT_NONE; } for (; i < ndev->mvdev.max_vqs; i++) { mvq = &ndev->vqs[i]; @@ -2902,13 +2967,21 @@ static int event_handler(struct notifier_block *nb, unsigned long event, void *p switch (eqe->sub_type) { case MLX5_PORT_CHANGE_SUBTYPE_DOWN: case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE: + down_read(&ndev->reslock); + if (!ndev->nb_registered) { + up_read(&ndev->reslock); + return NOTIFY_DONE; + } wqent = kzalloc(sizeof(*wqent), GFP_ATOMIC); - if (!wqent) + if (!wqent) { + up_read(&ndev->reslock); return NOTIFY_DONE; + } wqent->mvdev = &ndev->mvdev; INIT_WORK(&wqent->work, update_carrier); queue_work(ndev->mvdev.wq, &wqent->work); + up_read(&ndev->reslock); ret = NOTIFY_OK; break; default: @@ -3062,6 +3135,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name, ndev->nb.notifier_call = event_handler; mlx5_notifier_register(mdev, &ndev->nb); + ndev->nb_registered = true; mvdev->vdev.mdev = &mgtdev->mgtdev; err = _vdpa_register_device(&mvdev->vdev, max_vqs + 1); if (err) @@ -3093,7 +3167,10 @@ static void mlx5_vdpa_dev_del(struct vdpa_mgmt_dev *v_mdev, struct vdpa_device * struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); struct workqueue_struct *wq; - mlx5_notifier_unregister(mvdev->mdev, &ndev->nb); + if (ndev->nb_registered) { + mlx5_notifier_unregister(mvdev->mdev, &ndev->nb); + ndev->nb_registered = false; + } wq = mvdev->wq; mvdev->wq = NULL; destroy_workqueue(wq); diff --git a/include/linux/mlx5/mlx5_ifc_vdpa.h b/include/linux/mlx5/mlx5_ifc_vdpa.h index 4414ed5b6ed2..9becdc3fa503 100644 --- a/include/linux/mlx5/mlx5_ifc_vdpa.h +++ b/include/linux/mlx5/mlx5_ifc_vdpa.h @@ -150,6 +150,14 @@ enum { MLX5_VIRTIO_NET_Q_OBJECT_STATE_ERR = 0x3, }; +/* This indicates that the object was not created or has already + * been desroyed. It is very safe to assume that this object will never + * have so many states + */ +enum { + MLX5_VIRTIO_NET_Q_OBJECT_NONE = 0xffffffff +}; + enum { MLX5_RQTC_LIST_Q_TYPE_RQ = 0x0, MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q = 0x1, -- 2.35.1 ^ permalink raw reply related [flat|nested] 4+ messages in thread
* [PATCH 2/2] vdpa/mlx5: Support different address spaces for control and data 2022-07-14 11:38 [PATCH 0/2] mlx5 address space and suspend support Eli Cohen 2022-07-14 11:38 ` [PATCH 1/2] vdpa/mlx5: Implement susupend virtqueue callback Eli Cohen @ 2022-07-14 11:38 ` Eli Cohen 1 sibling, 0 replies; 4+ messages in thread From: Eli Cohen @ 2022-07-14 11:38 UTC (permalink / raw) To: mst, jasowang, eperezma, virtualization Cc: linux-kernel, si-wei.liu, parav, Eli Cohen Partition virtqueues to two different address spaces: one for control virtqueue which is implemented in software, and one for data virtqueues. Based-on: <20220526124338.36247-1-eperezma@redhat.com> Signed-off-by: Eli Cohen <elic@nvidia.com> --- drivers/vdpa/mlx5/core/mlx5_vdpa.h | 11 ++++ drivers/vdpa/mlx5/net/mlx5_vnet.c | 88 ++++++++++++++++++++++++++---- 2 files changed, 88 insertions(+), 11 deletions(-) diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h index 44104093163b..6af9fdbb86b7 100644 --- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h +++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h @@ -70,6 +70,16 @@ struct mlx5_vdpa_wq_ent { struct mlx5_vdpa_dev *mvdev; }; +enum { + MLX5_VDPA_DATAVQ_GROUP, + MLX5_VDPA_CVQ_GROUP, + MLX5_VDPA_NUMVQ_GROUPS +}; + +enum { + MLX5_VDPA_NUM_AS = MLX5_VDPA_NUMVQ_GROUPS +}; + struct mlx5_vdpa_dev { struct vdpa_device vdev; struct mlx5_core_dev *mdev; @@ -85,6 +95,7 @@ struct mlx5_vdpa_dev { struct mlx5_vdpa_mr mr; struct mlx5_control_vq cvq; struct workqueue_struct *wq; + unsigned int group2asid[MLX5_VDPA_NUMVQ_GROUPS]; }; int mlx5_vdpa_alloc_pd(struct mlx5_vdpa_dev *dev, u32 *pdn, u16 uid); diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c index d0fff559ca15..834023e8e073 100644 --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c @@ -2127,9 +2127,14 @@ static u32 mlx5_vdpa_get_vq_align(struct vdpa_device *vdev) return PAGE_SIZE; } -static u32 mlx5_vdpa_get_vq_group(struct vdpa_device *vdpa, u16 idx) +static u32 mlx5_vdpa_get_vq_group(struct vdpa_device *vdev, u16 idx) { - return 0; + struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); + + if (is_ctrl_vq_idx(mvdev, idx)) + return MLX5_VDPA_CVQ_GROUP; + + return MLX5_VDPA_DATAVQ_GROUP; } enum { MLX5_VIRTIO_NET_F_GUEST_CSUM = 1 << 9, @@ -2543,6 +2548,15 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status) up_write(&ndev->reslock); } +static void init_group_to_asid_map(struct mlx5_vdpa_dev *mvdev) +{ + int i; + + /* default mapping all groups are mapped to asid 0 */ + for (i = 0; i < MLX5_VDPA_NUMVQ_GROUPS; i++) + mvdev->group2asid[i] = 0; +} + static int mlx5_vdpa_reset(struct vdpa_device *vdev) { struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); @@ -2561,7 +2575,9 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev) ndev->mvdev.cvq.completed_desc = 0; memset(ndev->event_cbs, 0, sizeof(*ndev->event_cbs) * (mvdev->max_vqs + 1)); ndev->mvdev.actual_features = 0; + init_group_to_asid_map(mvdev); ++mvdev->generation; + if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) { if (mlx5_vdpa_create_mr(mvdev, NULL)) mlx5_vdpa_warn(mvdev, "create MR failed\n"); @@ -2599,26 +2615,63 @@ static u32 mlx5_vdpa_get_generation(struct vdpa_device *vdev) return mvdev->generation; } -static int mlx5_vdpa_set_map(struct vdpa_device *vdev, unsigned int asid, - struct vhost_iotlb *iotlb) +static int set_map_control(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb) +{ + u64 start = 0ULL, last = 0ULL - 1; + struct vhost_iotlb_map *map; + int err = 0; + + spin_lock(&mvdev->cvq.iommu_lock); + vhost_iotlb_reset(mvdev->cvq.iotlb); + + for (map = vhost_iotlb_itree_first(iotlb, start, last); map; + map = vhost_iotlb_itree_next(map, start, last)) { + err = vhost_iotlb_add_range(mvdev->cvq.iotlb, map->start, + map->last, map->addr, map->perm); + if (err) + goto out; + } + +out: + spin_unlock(&mvdev->cvq.iommu_lock); + return err; +} + +static int set_map_data(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb) { - struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); - struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); bool change_map; int err; - down_write(&ndev->reslock); - err = mlx5_vdpa_handle_set_map(mvdev, iotlb, &change_map); if (err) { mlx5_vdpa_warn(mvdev, "set map failed(%d)\n", err); - goto err; + return err; } if (change_map) err = mlx5_vdpa_change_map(mvdev, iotlb); -err: + return err; +} + +static int mlx5_vdpa_set_map(struct vdpa_device *vdev, unsigned int asid, + struct vhost_iotlb *iotlb) +{ + struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); + struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); + int err; + + down_write(&ndev->reslock); + if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] == asid) { + err = set_map_data(mvdev, iotlb); + if (err) + goto out; + } + + if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] == asid) + err = set_map_control(mvdev, iotlb); + +out: up_write(&ndev->reslock); return err; } @@ -2796,6 +2849,18 @@ static int mlx5_vdpa_suspend(struct vdpa_device *vdev) return 0; } +static int mlx5_set_group_asid(struct vdpa_device *vdev, u32 group, + unsigned int asid) +{ + struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); + + if (group >= MLX5_VDPA_NUMVQ_GROUPS) + return -EINVAL; + + mvdev->group2asid[group] = asid; + return 0; +} + static const struct vdpa_config_ops mlx5_vdpa_ops = { .set_vq_address = mlx5_vdpa_set_vq_address, .set_vq_num = mlx5_vdpa_set_vq_num, @@ -2825,6 +2890,7 @@ static const struct vdpa_config_ops mlx5_vdpa_ops = { .set_config = mlx5_vdpa_set_config, .get_generation = mlx5_vdpa_get_generation, .set_map = mlx5_vdpa_set_map, + .set_group_asid = mlx5_set_group_asid, .free = mlx5_vdpa_free, .suspend = mlx5_vdpa_suspend, }; @@ -3055,7 +3121,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name, } ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops, - 1, 1, name, false); + MLX5_VDPA_NUMVQ_GROUPS, MLX5_VDPA_NUM_AS, name, false); if (IS_ERR(ndev)) return PTR_ERR(ndev); -- 2.35.1 ^ permalink raw reply related [flat|nested] 4+ messages in thread
* [PATCH 0/2] mlx5 address space and suspend support @ 2022-07-14 11:39 Eli Cohen 2022-07-14 11:39 ` [PATCH 1/2] vdpa/mlx5: Implement susupend virtqueue callback Eli Cohen 0 siblings, 1 reply; 4+ messages in thread From: Eli Cohen @ 2022-07-14 11:39 UTC (permalink / raw) To: mst, jasowang, eperezma, virtualization Cc: linux-kernel, si-wei.liu, parav, Eli Cohen The following two patches add support for the lately added suspend callback and address space support. These patches are aimed at supporting live migration on mlx5_vdpa. Eli Cohen (2): vdpa/mlx5: Implement susupend virtqueue callback vdpa/mlx5: Support different address spaces for control and data drivers/vdpa/mlx5/core/mlx5_vdpa.h | 11 ++ drivers/vdpa/mlx5/net/mlx5_vnet.c | 171 ++++++++++++++++++++++++++--- include/linux/mlx5/mlx5_ifc_vdpa.h | 8 ++ 3 files changed, 176 insertions(+), 14 deletions(-) -- 2.35.1 ^ permalink raw reply [flat|nested] 4+ messages in thread
* [PATCH 1/2] vdpa/mlx5: Implement susupend virtqueue callback 2022-07-14 11:39 [PATCH 0/2] mlx5 address space and suspend support Eli Cohen @ 2022-07-14 11:39 ` Eli Cohen 0 siblings, 0 replies; 4+ messages in thread From: Eli Cohen @ 2022-07-14 11:39 UTC (permalink / raw) To: mst, jasowang, eperezma, virtualization Cc: linux-kernel, si-wei.liu, parav, Eli Cohen Implement the suspend callback allowing to suspend the virtqueues so they stop processing descriptors. This is required to allow to query a consistent state of the virtqueue while live migration is taking place. Signed-off-by: Eli Cohen <elic@nvidia.com> --- drivers/vdpa/mlx5/net/mlx5_vnet.c | 83 ++++++++++++++++++++++++++++-- include/linux/mlx5/mlx5_ifc_vdpa.h | 8 +++ 2 files changed, 88 insertions(+), 3 deletions(-) diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c index e85c1d71f4ed..d0fff559ca15 100644 --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c @@ -164,6 +164,7 @@ struct mlx5_vdpa_net { bool setup; u32 cur_num_vqs; u32 rqt_size; + bool nb_registered; struct notifier_block nb; struct vdpa_callback config_cb; struct mlx5_vdpa_wq_ent cvq_ent; @@ -895,6 +896,7 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque if (err) goto err_cmd; + mvq->fw_state = MLX5_VIRTIO_NET_Q_OBJECT_STATE_INIT; kfree(in); mvq->virtq_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); @@ -922,6 +924,7 @@ static void destroy_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtq mlx5_vdpa_warn(&ndev->mvdev, "destroy virtqueue 0x%x\n", mvq->virtq_id); return; } + mvq->fw_state = MLX5_VIRTIO_NET_Q_OBJECT_NONE; umems_destroy(ndev, mvq); } @@ -1121,6 +1124,20 @@ static int query_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueu return err; } +static bool is_valid_state_change(int oldstate, int newstate) +{ + switch (oldstate) { + case MLX5_VIRTIO_NET_Q_OBJECT_STATE_INIT: + return newstate == MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY; + case MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY: + return newstate == MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND; + case MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND: + case MLX5_VIRTIO_NET_Q_OBJECT_STATE_ERR: + default: + return false; + } +} + static int modify_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int state) { int inlen = MLX5_ST_SZ_BYTES(modify_virtio_net_q_in); @@ -1130,6 +1147,12 @@ static int modify_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque void *in; int err; + if (mvq->fw_state == MLX5_VIRTIO_NET_Q_OBJECT_NONE) + return 0; + + if (!is_valid_state_change(mvq->fw_state, state)) + return -EINVAL; + in = kzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -1992,6 +2015,7 @@ static void mlx5_vdpa_set_vq_ready(struct vdpa_device *vdev, u16 idx, bool ready struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); struct mlx5_vdpa_virtqueue *mvq; + int err; if (!mvdev->actual_features) return; @@ -2005,8 +2029,16 @@ static void mlx5_vdpa_set_vq_ready(struct vdpa_device *vdev, u16 idx, bool ready } mvq = &ndev->vqs[idx]; - if (!ready) + if (!ready) { suspend_vq(ndev, mvq); + } else { + err = modify_virtqueue(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY); + if (err) { + mlx5_vdpa_warn(mvdev, "modify VQ %d to ready failed (%d)\n", idx, err); + ready = false; + } + } + mvq->ready = ready; } @@ -2733,6 +2765,37 @@ static int mlx5_vdpa_get_vendor_vq_stats(struct vdpa_device *vdev, u16 idx, return err; } +static void mlx5_vdpa_cvq_suspend(struct mlx5_vdpa_dev *mvdev) +{ + struct mlx5_control_vq *cvq; + + if (!(mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ))) + return; + + cvq = &mvdev->cvq; + cvq->ready = false; +} + +static int mlx5_vdpa_suspend(struct vdpa_device *vdev) +{ + struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); + struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); + struct mlx5_vdpa_virtqueue *mvq; + int i; + + down_write(&ndev->reslock); + mlx5_notifier_unregister(mvdev->mdev, &ndev->nb); + ndev->nb_registered = false; + flush_workqueue(ndev->mvdev.wq); + for (i = 0; i < ndev->cur_num_vqs; i++) { + mvq = &ndev->vqs[i]; + suspend_vq(ndev, mvq); + } + mlx5_vdpa_cvq_suspend(mvdev); + up_write(&ndev->reslock); + return 0; +} + static const struct vdpa_config_ops mlx5_vdpa_ops = { .set_vq_address = mlx5_vdpa_set_vq_address, .set_vq_num = mlx5_vdpa_set_vq_num, @@ -2763,6 +2826,7 @@ static const struct vdpa_config_ops mlx5_vdpa_ops = { .get_generation = mlx5_vdpa_get_generation, .set_map = mlx5_vdpa_set_map, .free = mlx5_vdpa_free, + .suspend = mlx5_vdpa_suspend, }; static int query_mtu(struct mlx5_core_dev *mdev, u16 *mtu) @@ -2828,6 +2892,7 @@ static void init_mvqs(struct mlx5_vdpa_net *ndev) mvq->index = i; mvq->ndev = ndev; mvq->fwqp.fw = true; + mvq->fw_state = MLX5_VIRTIO_NET_Q_OBJECT_NONE; } for (; i < ndev->mvdev.max_vqs; i++) { mvq = &ndev->vqs[i]; @@ -2902,13 +2967,21 @@ static int event_handler(struct notifier_block *nb, unsigned long event, void *p switch (eqe->sub_type) { case MLX5_PORT_CHANGE_SUBTYPE_DOWN: case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE: + down_read(&ndev->reslock); + if (!ndev->nb_registered) { + up_read(&ndev->reslock); + return NOTIFY_DONE; + } wqent = kzalloc(sizeof(*wqent), GFP_ATOMIC); - if (!wqent) + if (!wqent) { + up_read(&ndev->reslock); return NOTIFY_DONE; + } wqent->mvdev = &ndev->mvdev; INIT_WORK(&wqent->work, update_carrier); queue_work(ndev->mvdev.wq, &wqent->work); + up_read(&ndev->reslock); ret = NOTIFY_OK; break; default: @@ -3062,6 +3135,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name, ndev->nb.notifier_call = event_handler; mlx5_notifier_register(mdev, &ndev->nb); + ndev->nb_registered = true; mvdev->vdev.mdev = &mgtdev->mgtdev; err = _vdpa_register_device(&mvdev->vdev, max_vqs + 1); if (err) @@ -3093,7 +3167,10 @@ static void mlx5_vdpa_dev_del(struct vdpa_mgmt_dev *v_mdev, struct vdpa_device * struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); struct workqueue_struct *wq; - mlx5_notifier_unregister(mvdev->mdev, &ndev->nb); + if (ndev->nb_registered) { + mlx5_notifier_unregister(mvdev->mdev, &ndev->nb); + ndev->nb_registered = false; + } wq = mvdev->wq; mvdev->wq = NULL; destroy_workqueue(wq); diff --git a/include/linux/mlx5/mlx5_ifc_vdpa.h b/include/linux/mlx5/mlx5_ifc_vdpa.h index 4414ed5b6ed2..9becdc3fa503 100644 --- a/include/linux/mlx5/mlx5_ifc_vdpa.h +++ b/include/linux/mlx5/mlx5_ifc_vdpa.h @@ -150,6 +150,14 @@ enum { MLX5_VIRTIO_NET_Q_OBJECT_STATE_ERR = 0x3, }; +/* This indicates that the object was not created or has already + * been desroyed. It is very safe to assume that this object will never + * have so many states + */ +enum { + MLX5_VIRTIO_NET_Q_OBJECT_NONE = 0xffffffff +}; + enum { MLX5_RQTC_LIST_Q_TYPE_RQ = 0x0, MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q = 0x1, -- 2.35.1 ^ permalink raw reply related [flat|nested] 4+ messages in thread
end of thread, other threads:[~2022-07-14 11:39 UTC | newest] Thread overview: 4+ messages (download: mbox.gz follow: Atom feed -- links below jump to the message on this page -- 2022-07-14 11:38 [PATCH 0/2] mlx5 address space and suspend support Eli Cohen 2022-07-14 11:38 ` [PATCH 1/2] vdpa/mlx5: Implement susupend virtqueue callback Eli Cohen 2022-07-14 11:38 ` [PATCH 2/2] vdpa/mlx5: Support different address spaces for control and data Eli Cohen -- strict thread matches above, loose matches on Subject: below -- 2022-07-14 11:39 [PATCH 0/2] mlx5 address space and suspend support Eli Cohen 2022-07-14 11:39 ` [PATCH 1/2] vdpa/mlx5: Implement susupend virtqueue callback Eli Cohen
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox