* Re: [PATCH v7 5/5] vdpa/mlx5: Use readers/writers semaphore instead of mutex
[not found] ` <20220517131348.498421-6-elic@nvidia.com>
@ 2022-05-17 23:59 ` Si-Wei Liu
0 siblings, 0 replies; 2+ messages in thread
From: Si-Wei Liu @ 2022-05-17 23:59 UTC (permalink / raw)
To: Eli Cohen, mst, jasowang; +Cc: linux-kernel, virtualization
On 5/17/2022 6:13 AM, Eli Cohen wrote:
> Reading statistics could be done intensively and by several processes
> concurrently. Reader's lock is sufficient in this case.
>
> Change reslock from mutex to a rwsem.
>
> Suggested-by: Si-Wei Liu <si-wei.liu@oracle.com>
> Signed-off-by: Eli Cohen <elic@nvidia.com>
Reviewed-by: Si-Wei Liu <si-wei.liu@oracle.com>
> ---
> drivers/vdpa/mlx5/net/mlx5_vnet.c | 41 ++++++++++++++-----------------
> 1 file changed, 19 insertions(+), 22 deletions(-)
>
> diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
> index 2b815ef850c8..57cfc64248b7 100644
> --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
> +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
> @@ -155,7 +155,7 @@ struct mlx5_vdpa_net {
> * since memory map might change and we need to destroy and create
> * resources while driver in operational.
> */
> - struct mutex reslock;
> + struct rw_semaphore reslock;
> struct mlx5_flow_table *rxft;
> struct mlx5_fc *rx_counter;
> struct mlx5_flow_handle *rx_rule_ucast;
> @@ -1695,7 +1695,7 @@ static void mlx5_cvq_kick_handler(struct work_struct *work)
> ndev = to_mlx5_vdpa_ndev(mvdev);
> cvq = &mvdev->cvq;
>
> - mutex_lock(&ndev->reslock);
> + down_write(&ndev->reslock);
>
> if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
> goto out;
> @@ -1746,7 +1746,7 @@ static void mlx5_cvq_kick_handler(struct work_struct *work)
> }
>
> out:
> - mutex_unlock(&ndev->reslock);
> + up_write(&ndev->reslock);
> }
>
> static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx)
> @@ -2244,7 +2244,7 @@ static int setup_driver(struct mlx5_vdpa_dev *mvdev)
> struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
> int err;
>
> - WARN_ON(!mutex_is_locked(&ndev->reslock));
> + WARN_ON(!rwsem_is_locked(&ndev->reslock));
>
> if (ndev->setup) {
> mlx5_vdpa_warn(mvdev, "setup driver called for already setup driver\n");
> @@ -2292,7 +2292,7 @@ static int setup_driver(struct mlx5_vdpa_dev *mvdev)
> static void teardown_driver(struct mlx5_vdpa_net *ndev)
> {
>
> - WARN_ON(!mutex_is_locked(&ndev->reslock));
> + WARN_ON(!rwsem_is_locked(&ndev->reslock));
>
> if (!ndev->setup)
> return;
> @@ -2322,7 +2322,7 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
>
> print_status(mvdev, status, true);
>
> - mutex_lock(&ndev->reslock);
> + down_write(&ndev->reslock);
>
> if ((status ^ ndev->mvdev.status) & VIRTIO_CONFIG_S_DRIVER_OK) {
> if (status & VIRTIO_CONFIG_S_DRIVER_OK) {
> @@ -2338,14 +2338,14 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
> }
>
> ndev->mvdev.status = status;
> - mutex_unlock(&ndev->reslock);
> + up_write(&ndev->reslock);
> return;
>
> err_setup:
> mlx5_vdpa_destroy_mr(&ndev->mvdev);
> ndev->mvdev.status |= VIRTIO_CONFIG_S_FAILED;
> err_clear:
> - mutex_unlock(&ndev->reslock);
> + up_write(&ndev->reslock);
> }
>
> static int mlx5_vdpa_reset(struct vdpa_device *vdev)
> @@ -2356,7 +2356,7 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev)
> print_status(mvdev, 0, true);
> mlx5_vdpa_info(mvdev, "performing device reset\n");
>
> - mutex_lock(&ndev->reslock);
> + down_write(&ndev->reslock);
> teardown_driver(ndev);
> clear_vqs_ready(ndev);
> mlx5_vdpa_destroy_mr(&ndev->mvdev);
> @@ -2371,7 +2371,7 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev)
> if (mlx5_vdpa_create_mr(mvdev, NULL))
> mlx5_vdpa_warn(mvdev, "create MR failed\n");
> }
> - mutex_unlock(&ndev->reslock);
> + up_write(&ndev->reslock);
>
> return 0;
> }
> @@ -2411,7 +2411,7 @@ static int mlx5_vdpa_set_map(struct vdpa_device *vdev, struct vhost_iotlb *iotlb
> bool change_map;
> int err;
>
> - mutex_lock(&ndev->reslock);
> + down_write(&ndev->reslock);
>
> err = mlx5_vdpa_handle_set_map(mvdev, iotlb, &change_map);
> if (err) {
> @@ -2423,7 +2423,7 @@ static int mlx5_vdpa_set_map(struct vdpa_device *vdev, struct vhost_iotlb *iotlb
> err = mlx5_vdpa_change_map(mvdev, iotlb);
>
> err:
> - mutex_unlock(&ndev->reslock);
> + up_write(&ndev->reslock);
> return err;
> }
>
> @@ -2442,7 +2442,6 @@ static void mlx5_vdpa_free(struct vdpa_device *vdev)
> mlx5_mpfs_del_mac(pfmdev, ndev->config.mac);
> }
> mlx5_vdpa_free_resources(&ndev->mvdev);
> - mutex_destroy(&ndev->reslock);
> kfree(ndev->event_cbs);
> kfree(ndev->vqs);
> }
> @@ -2527,7 +2526,7 @@ static int mlx5_vdpa_get_vendor_vq_stats(struct vdpa_device *vdev, u16 idx,
> u64 completed_desc;
> int err = 0;
>
> - mutex_lock(&ndev->reslock);
> + down_read(&ndev->reslock);
> if (!is_index_valid(mvdev, idx)) {
> NL_SET_ERR_MSG_MOD(extack, "virtqueue index is not valid");
> err = -EINVAL;
> @@ -2566,7 +2565,7 @@ static int mlx5_vdpa_get_vendor_vq_stats(struct vdpa_device *vdev, u16 idx,
>
> err = 0;
> out_err:
> - mutex_unlock(&ndev->reslock);
> + up_read(&ndev->reslock);
> return err;
> }
>
> @@ -2835,18 +2834,18 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
> }
>
> init_mvqs(ndev);
> - mutex_init(&ndev->reslock);
> + init_rwsem(&ndev->reslock);
> config = &ndev->config;
>
> if (add_config->mask & BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MTU)) {
> err = config_func_mtu(mdev, add_config->net.mtu);
> if (err)
> - goto err_mtu;
> + goto err_alloc;
> }
>
> err = query_mtu(mdev, &mtu);
> if (err)
> - goto err_mtu;
> + goto err_alloc;
>
> ndev->config.mtu = cpu_to_mlx5vdpa16(mvdev, mtu);
>
> @@ -2860,14 +2859,14 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
> } else {
> err = mlx5_query_nic_vport_mac_address(mdev, 0, 0, config->mac);
> if (err)
> - goto err_mtu;
> + goto err_alloc;
> }
>
> if (!is_zero_ether_addr(config->mac)) {
> pfmdev = pci_get_drvdata(pci_physfn(mdev->pdev));
> err = mlx5_mpfs_add_mac(pfmdev, config->mac);
> if (err)
> - goto err_mtu;
> + goto err_alloc;
>
> ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_MAC);
> }
> @@ -2917,8 +2916,6 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
> err_mpfs:
> if (!is_zero_ether_addr(config->mac))
> mlx5_mpfs_del_mac(pfmdev, config->mac);
> -err_mtu:
> - mutex_destroy(&ndev->reslock);
> err_alloc:
> put_device(&mvdev->vdev.dev);
> return err;
_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization
^ permalink raw reply [flat|nested] 2+ messages in thread
* Re: [PATCH v7 2/5] vdpa: Add support for querying vendor statistics
[not found] ` <20220517131348.498421-3-elic@nvidia.com>
@ 2022-05-18 1:03 ` Si-Wei Liu
0 siblings, 0 replies; 2+ messages in thread
From: Si-Wei Liu @ 2022-05-18 1:03 UTC (permalink / raw)
To: Eli Cohen, mst, jasowang; +Cc: linux-kernel, virtualization
On 5/17/2022 6:13 AM, Eli Cohen wrote:
> Allows to read vendor statistics of a vdpa device. The specific
> statistics data are received from the upstream driver in the form of an
> (attribute name, attribute value) pairs.
>
> An example of statistics for mlx5_vdpa device are:
>
> received_desc - number of descriptors received by the virtqueue
> completed_desc - number of descriptors completed by the virtqueue
>
> A descriptor using indirect buffers is still counted as 1. In addition,
> N chained descriptors are counted correctly N times as one would expect.
>
> A new callback was added to vdpa_config_ops which provides the means for
> the vdpa driver to return statistics results.
>
> The interface allows for reading all the supported virtqueues, including
> the control virtqueue if it exists.
>
> Below are some examples taken from mlx5_vdpa which are introduced in the
> following patch:
>
> 1. Read statistics for the virtqueue at index 1
>
> $ vdpa dev vstats show vdpa-a qidx 1
> vdpa-a:
> queue_type tx queue_index 1 received_desc 3844836 completed_desc 3844836
>
> 2. Read statistics for the virtqueue at index 32
> $ vdpa dev vstats show vdpa-a qidx 32
> vdpa-a:
> queue_type control_vq queue_index 32 received_desc 62 completed_desc 62
>
> 3. Read statisitics for the virtqueue at index 0 with json output
> $ vdpa -j dev vstats show vdpa-a qidx 0
> {"vstats":{"vdpa-a":{
> "queue_type":"rx","queue_index":0,"name":"received_desc","value":417776,\
> "name":"completed_desc","value":417548}}}
>
> 4. Read statistics for the virtqueue at index 0 with preety json output
> $ vdpa -jp dev vstats show vdpa-a qidx 0
> {
> "vstats": {
> "vdpa-a": {
>
> "queue_type": "rx",
> "queue_index": 0,
> "name": "received_desc",
> "value": 417776,
> "name": "completed_desc",
> "value": 417548
> }
> }
> }
>
> Signed-off-by: Eli Cohen <elic@nvidia.com>
> ---
> drivers/vdpa/vdpa.c | 160 ++++++++++++++++++++++++++++++++++++++
> include/linux/vdpa.h | 3 +
> include/uapi/linux/vdpa.h | 6 ++
> 3 files changed, 169 insertions(+)
>
> diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c
> index fac89a0d8178..1b810961ccc3 100644
> --- a/drivers/vdpa/vdpa.c
> +++ b/drivers/vdpa/vdpa.c
> @@ -914,6 +914,106 @@ vdpa_dev_config_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid,
> return err;
> }
>
> +static int vdpa_fill_stats_rec(struct vdpa_device *vdev, struct sk_buff *msg,
> + struct genl_info *info, u32 index)
> +{
> + struct virtio_net_config config = {};
> + u64 features;
> + u16 max_vqp;
> + u8 status;
> + int err;
> +
This entire function should perhaps need to take cf_mutex (worth
converting to rwsem as well) to guard against concurrent modification
via .set_status() or .set_config() from the upper stack. Similar to
vdpa_dev_config_fill().
> + status = vdev->config->get_status(vdev);
> + if (!(status & VIRTIO_CONFIG_S_FEATURES_OK)) {
> + NL_SET_ERR_MSG_MOD(info->extack, "feature negotiation not complete");
> + return -EAGAIN;
> + }
> + vdpa_get_config(vdev, 0, &config, sizeof(config));
This should use vdpa_get_config_unlocked() without lock.
Regards,
-Siwei
> +
> + max_vqp = le16_to_cpu(config.max_virtqueue_pairs);
> + if (nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MAX_VQP, max_vqp))
> + return -EMSGSIZE;
> +
> + features = vdev->config->get_driver_features(vdev);
> + if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_NEGOTIATED_FEATURES,
> + features, VDPA_ATTR_PAD))
> + return -EMSGSIZE;
> +
> + if (nla_put_u32(msg, VDPA_ATTR_DEV_QUEUE_INDEX, index))
> + return -EMSGSIZE;
> +
> + err = vdev->config->get_vendor_vq_stats(vdev, index, msg, info->extack);
> + if (err)
> + return err;
> +
> + return 0;
> +}
> +
> +static int vendor_stats_fill(struct vdpa_device *vdev, struct sk_buff *msg,
> + struct genl_info *info, u32 index)
> +{
> + int err;
> +
> + if (!vdev->config->get_vendor_vq_stats)
> + return -EOPNOTSUPP;
> +
> + err = vdpa_fill_stats_rec(vdev, msg, info, index);
> + if (err)
> + return err;
> +
> + return 0;
> +}
> +
> +static int vdpa_dev_vendor_stats_fill(struct vdpa_device *vdev,
> + struct sk_buff *msg,
> + struct genl_info *info, u32 index)
> +{
> + u32 device_id;
> + void *hdr;
> + int err;
> + u32 portid = info->snd_portid;
> + u32 seq = info->snd_seq;
> + u32 flags = 0;
> +
> + hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags,
> + VDPA_CMD_DEV_VSTATS_GET);
> + if (!hdr)
> + return -EMSGSIZE;
> +
> + if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev))) {
> + err = -EMSGSIZE;
> + goto undo_msg;
> + }
> +
> + device_id = vdev->config->get_device_id(vdev);
> + if (nla_put_u32(msg, VDPA_ATTR_DEV_ID, device_id)) {
> + err = -EMSGSIZE;
> + goto undo_msg;
> + }
> +
> + switch (device_id) {
> + case VIRTIO_ID_NET:
> + if (index > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX) {
> + NL_SET_ERR_MSG_MOD(info->extack, "queue index excceeds max value");
> + err = -ERANGE;
> + break;
> + }
> +
> + err = vendor_stats_fill(vdev, msg, info, index);
> + break;
> + default:
> + err = -EOPNOTSUPP;
> + break;
> + }
> + genlmsg_end(msg, hdr);
> +
> + return err;
> +
> +undo_msg:
> + genlmsg_cancel(msg, hdr);
> + return err;
> +}
> +
> static int vdpa_nl_cmd_dev_config_get_doit(struct sk_buff *skb, struct genl_info *info)
> {
> struct vdpa_device *vdev;
> @@ -995,6 +1095,60 @@ vdpa_nl_cmd_dev_config_get_dumpit(struct sk_buff *msg, struct netlink_callback *
> return msg->len;
> }
>
> +static int vdpa_nl_cmd_dev_stats_get_doit(struct sk_buff *skb,
> + struct genl_info *info)
> +{
> + struct vdpa_device *vdev;
> + struct sk_buff *msg;
> + const char *devname;
> + struct device *dev;
> + u32 index;
> + int err;
> +
> + if (!info->attrs[VDPA_ATTR_DEV_NAME])
> + return -EINVAL;
> +
> + if (!info->attrs[VDPA_ATTR_DEV_QUEUE_INDEX])
> + return -EINVAL;
> +
> + devname = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
> + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
> + if (!msg)
> + return -ENOMEM;
> +
> + index = nla_get_u32(info->attrs[VDPA_ATTR_DEV_QUEUE_INDEX]);
> + mutex_lock(&vdpa_dev_mutex);
> + dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match);
> + if (!dev) {
> + NL_SET_ERR_MSG_MOD(info->extack, "device not found");
> + err = -ENODEV;
> + goto dev_err;
> + }
> + vdev = container_of(dev, struct vdpa_device, dev);
> + if (!vdev->mdev) {
> + NL_SET_ERR_MSG_MOD(info->extack, "unmanaged vdpa device");
> + err = -EINVAL;
> + goto mdev_err;
> + }
> + err = vdpa_dev_vendor_stats_fill(vdev, msg, info, index);
> + if (err)
> + goto mdev_err;
> +
> + err = genlmsg_reply(msg, info);
> +
> + put_device(dev);
> + mutex_unlock(&vdpa_dev_mutex);
> +
> + return err;
> +
> +mdev_err:
> + put_device(dev);
> +dev_err:
> + nlmsg_free(msg);
> + mutex_unlock(&vdpa_dev_mutex);
> + return err;
> +}
> +
> static const struct nla_policy vdpa_nl_policy[VDPA_ATTR_MAX + 1] = {
> [VDPA_ATTR_MGMTDEV_BUS_NAME] = { .type = NLA_NUL_STRING },
> [VDPA_ATTR_MGMTDEV_DEV_NAME] = { .type = NLA_STRING },
> @@ -1035,6 +1189,12 @@ static const struct genl_ops vdpa_nl_ops[] = {
> .doit = vdpa_nl_cmd_dev_config_get_doit,
> .dumpit = vdpa_nl_cmd_dev_config_get_dumpit,
> },
> + {
> + .cmd = VDPA_CMD_DEV_VSTATS_GET,
> + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
> + .doit = vdpa_nl_cmd_dev_stats_get_doit,
> + .flags = GENL_ADMIN_PERM,
> + },
> };
>
> static struct genl_family vdpa_nl_family __ro_after_init = {
> diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h
> index 8943a209202e..2ae8443331e1 100644
> --- a/include/linux/vdpa.h
> +++ b/include/linux/vdpa.h
> @@ -276,6 +276,9 @@ struct vdpa_config_ops {
> const struct vdpa_vq_state *state);
> int (*get_vq_state)(struct vdpa_device *vdev, u16 idx,
> struct vdpa_vq_state *state);
> + int (*get_vendor_vq_stats)(struct vdpa_device *vdev, u16 idx,
> + struct sk_buff *msg,
> + struct netlink_ext_ack *extack);
> struct vdpa_notification_area
> (*get_vq_notification)(struct vdpa_device *vdev, u16 idx);
> /* vq irq is not expected to be changed once DRIVER_OK is set */
> diff --git a/include/uapi/linux/vdpa.h b/include/uapi/linux/vdpa.h
> index 1061d8d2d09d..25c55cab3d7c 100644
> --- a/include/uapi/linux/vdpa.h
> +++ b/include/uapi/linux/vdpa.h
> @@ -18,6 +18,7 @@ enum vdpa_command {
> VDPA_CMD_DEV_DEL,
> VDPA_CMD_DEV_GET, /* can dump */
> VDPA_CMD_DEV_CONFIG_GET, /* can dump */
> + VDPA_CMD_DEV_VSTATS_GET,
> };
>
> enum vdpa_attr {
> @@ -46,6 +47,11 @@ enum vdpa_attr {
> VDPA_ATTR_DEV_NEGOTIATED_FEATURES, /* u64 */
> VDPA_ATTR_DEV_MGMTDEV_MAX_VQS, /* u32 */
> VDPA_ATTR_DEV_SUPPORTED_FEATURES, /* u64 */
> +
> + VDPA_ATTR_DEV_QUEUE_INDEX, /* u32 */
> + VDPA_ATTR_DEV_VENDOR_ATTR_NAME, /* string */
> + VDPA_ATTR_DEV_VENDOR_ATTR_VALUE, /* u64 */
> +
> /* new attributes must be added above here */
> VDPA_ATTR_MAX,
> };
_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2022-05-18 1:03 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
[not found] <20220517131348.498421-1-elic@nvidia.com>
[not found] ` <20220517131348.498421-6-elic@nvidia.com>
2022-05-17 23:59 ` [PATCH v7 5/5] vdpa/mlx5: Use readers/writers semaphore instead of mutex Si-Wei Liu
[not found] ` <20220517131348.498421-3-elic@nvidia.com>
2022-05-18 1:03 ` [PATCH v7 2/5] vdpa: Add support for querying vendor statistics Si-Wei Liu
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).