From: Paolo Abeni <pabeni@redhat.com>
To: Daniel Jurgens <danielj@nvidia.com>,
netdev@vger.kernel.org, mst@redhat.com, jasowang@redhat.com,
alex.williamson@redhat.com
Cc: virtualization@lists.linux.dev, parav@nvidia.com,
shshitrit@nvidia.com, yohadt@nvidia.com,
xuanzhuo@linux.alibaba.com, eperezma@redhat.com,
shameerali.kolothum.thodi@huawei.com, jgg@ziepe.ca,
kevin.tian@intel.com, kuba@kernel.org, andrew+netdev@lunn.ch,
edumazet@google.com
Subject: Re: [PATCH net-next v6 05/12] virtio_net: Query and set flow filter caps
Date: Thu, 30 Oct 2025 12:19:02 +0100 [thread overview]
Message-ID: <b55d868c-7bc1-4e25-aef4-cdf385208400@redhat.com> (raw)
In-Reply-To: <20251027173957.2334-6-danielj@nvidia.com>
On 10/27/25 6:39 PM, Daniel Jurgens wrote:
> When probing a virtnet device, attempt to read the flow filter
> capabilities. In order to use the feature the caps must also
> be set. For now setting what was read is sufficient.
>
> Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
> Reviewed-by: Parav Pandit <parav@nvidia.com>
> Reviewed-by: Shahar Shitrit <shshitrit@nvidia.com>
> ---
> v4:
> - Validate the length in the selector caps
> - Removed __free usage.
> - Removed for(int.
> v5:
> - Remove unneed () after MAX_SEL_LEN macro (test bot)
> v6:
> - Fix sparse warning "array of flexible structures" Jakub K/Simon H
> - Use new variable and validate ff_mask_size before set_cap. MST
> ---
> drivers/net/virtio_net.c | 171 +++++++++++++++++++++++++++++
> include/linux/virtio_admin.h | 1 +
> include/uapi/linux/virtio_net_ff.h | 91 +++++++++++++++
> 3 files changed, 263 insertions(+)
> create mode 100644 include/uapi/linux/virtio_net_ff.h
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index a757cbcab87f..a9fde879fdbf 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -26,6 +26,9 @@
> #include <net/netdev_rx_queue.h>
> #include <net/netdev_queues.h>
> #include <net/xdp_sock_drv.h>
> +#include <linux/virtio_admin.h>
> +#include <net/ipv6.h>
> +#include <net/ip.h>
>
> static int napi_weight = NAPI_POLL_WEIGHT;
> module_param(napi_weight, int, 0444);
> @@ -281,6 +284,14 @@ static const struct virtnet_stat_desc virtnet_stats_tx_speed_desc_qstat[] = {
> VIRTNET_STATS_DESC_TX_QSTAT(speed, ratelimit_packets, hw_drop_ratelimits),
> };
>
> +struct virtnet_ff {
> + struct virtio_device *vdev;
> + bool ff_supported;
> + struct virtio_net_ff_cap_data *ff_caps;
> + struct virtio_net_ff_cap_mask_data *ff_mask;
> + struct virtio_net_ff_actions *ff_actions;
> +};
> +
> #define VIRTNET_Q_TYPE_RX 0
> #define VIRTNET_Q_TYPE_TX 1
> #define VIRTNET_Q_TYPE_CQ 2
> @@ -493,6 +504,8 @@ struct virtnet_info {
> struct failover *failover;
>
> u64 device_stats_cap;
> +
> + struct virtnet_ff ff;
> };
>
> struct padded_vnet_hdr {
> @@ -6753,6 +6766,160 @@ static const struct xdp_metadata_ops virtnet_xdp_metadata_ops = {
> .xmo_rx_hash = virtnet_xdp_rx_hash,
> };
>
> +static size_t get_mask_size(u16 type)
> +{
> + switch (type) {
> + case VIRTIO_NET_FF_MASK_TYPE_ETH:
> + return sizeof(struct ethhdr);
> + case VIRTIO_NET_FF_MASK_TYPE_IPV4:
> + return sizeof(struct iphdr);
> + case VIRTIO_NET_FF_MASK_TYPE_IPV6:
> + return sizeof(struct ipv6hdr);
> + case VIRTIO_NET_FF_MASK_TYPE_TCP:
> + return sizeof(struct tcphdr);
> + case VIRTIO_NET_FF_MASK_TYPE_UDP:
> + return sizeof(struct udphdr);
> + }
> +
> + return 0;
> +}
> +
> +#define MAX_SEL_LEN (sizeof(struct ipv6hdr))
> +
> +static void virtnet_ff_init(struct virtnet_ff *ff, struct virtio_device *vdev)
> +{
> + size_t ff_mask_size = sizeof(struct virtio_net_ff_cap_mask_data) +
> + sizeof(struct virtio_net_ff_selector) *
> + VIRTIO_NET_FF_MASK_TYPE_MAX;
> + struct virtio_admin_cmd_query_cap_id_result *cap_id_list;
> + struct virtio_net_ff_selector *sel;
> + size_t real_ff_mask_size;
> + int err;
> + int i;
> +
> + cap_id_list = kzalloc(sizeof(*cap_id_list), GFP_KERNEL);
> + if (!cap_id_list)
> + return;
> +
> + err = virtio_admin_cap_id_list_query(vdev, cap_id_list);
> + if (err)
> + goto err_cap_list;
> +
> + if (!(VIRTIO_CAP_IN_LIST(cap_id_list,
> + VIRTIO_NET_FF_RESOURCE_CAP) &&
> + VIRTIO_CAP_IN_LIST(cap_id_list,
> + VIRTIO_NET_FF_SELECTOR_CAP) &&
> + VIRTIO_CAP_IN_LIST(cap_id_list,
> + VIRTIO_NET_FF_ACTION_CAP)))
> + goto err_cap_list;
> +
> + ff->ff_caps = kzalloc(sizeof(*ff->ff_caps), GFP_KERNEL);
> + if (!ff->ff_caps)
> + goto err_cap_list;
> +
> + err = virtio_admin_cap_get(vdev,
> + VIRTIO_NET_FF_RESOURCE_CAP,
> + ff->ff_caps,
> + sizeof(*ff->ff_caps));
> +
> + if (err)
> + goto err_ff;
> +
> + /* VIRTIO_NET_FF_MASK_TYPE start at 1 */
> + for (i = 1; i <= VIRTIO_NET_FF_MASK_TYPE_MAX; i++)
> + ff_mask_size += get_mask_size(i);
> +
> + ff->ff_mask = kzalloc(ff_mask_size, GFP_KERNEL);
> + if (!ff->ff_mask)
> + goto err_ff;
> +
> + err = virtio_admin_cap_get(vdev,
> + VIRTIO_NET_FF_SELECTOR_CAP,
> + ff->ff_mask,
> + ff_mask_size);
> +
> + if (err)
> + goto err_ff_mask;
> +
> + ff->ff_actions = kzalloc(sizeof(*ff->ff_actions) +
> + VIRTIO_NET_FF_ACTION_MAX,
> + GFP_KERNEL);
> + if (!ff->ff_actions)
> + goto err_ff_mask;
> +
> + err = virtio_admin_cap_get(vdev,
> + VIRTIO_NET_FF_ACTION_CAP,
> + ff->ff_actions,
> + sizeof(*ff->ff_actions) + VIRTIO_NET_FF_ACTION_MAX);
> +
> + if (err)
> + goto err_ff_action;
> +
> + err = virtio_admin_cap_set(vdev,
> + VIRTIO_NET_FF_RESOURCE_CAP,
> + ff->ff_caps,
> + sizeof(*ff->ff_caps));
> + if (err)
> + goto err_ff_action;
> +
> + real_ff_mask_size = sizeof(struct virtio_net_ff_cap_mask_data);
> + sel = (void *)&ff->ff_mask->selectors[0];
> +
> + for (i = 0; i < ff->ff_mask->count; i++) {
> + if (sel->length > MAX_SEL_LEN) {
> + err = -EINVAL;
> + goto err_ff_action;
> + }
> + real_ff_mask_size += sizeof(struct virtio_net_ff_selector) + sel->length;
> + sel = (void *)sel + sizeof(*sel) + sel->length;
> + }
> +
> + if (real_ff_mask_size > ff_mask_size) {
> + err = -EINVAL;
> + goto err_ff_action;
> + }
> +
> + err = virtio_admin_cap_set(vdev,
> + VIRTIO_NET_FF_SELECTOR_CAP,
> + ff->ff_mask,
> + ff_mask_size);
> + if (err)
> + goto err_ff_action;
> +
> + err = virtio_admin_cap_set(vdev,
> + VIRTIO_NET_FF_ACTION_CAP,
> + ff->ff_actions,
> + sizeof(*ff->ff_actions) + VIRTIO_NET_FF_ACTION_MAX);
> + if (err)
> + goto err_ff_action;
> +
> + ff->vdev = vdev;
> + ff->ff_supported = true;
> +
> + kfree(cap_id_list);
> +
> + return;
> +
> +err_ff_action:
> + kfree(ff->ff_actions);
> +err_ff_mask:
> + kfree(ff->ff_mask);
> +err_ff:
> + kfree(ff->ff_caps);
> +err_cap_list:
> + kfree(cap_id_list);
Minor nit: AFAICS the ff->ff_{caps,mask,actions} pointers can be left !=
NULL even after free. That should not cause issue at cleanup time, as
double free is protected by/avoided with 'ff_supported'.
Still it could foul kmemleak check. I think it would be better to either
clear such fields here or set such fields only on success (and work with
local variable up to that point).
Not a blocker anyway.
/P
next prev parent reply other threads:[~2025-10-30 11:19 UTC|newest]
Thread overview: 29+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-10-27 17:39 [PATCH net-next v6 00/12] virtio_net: Add ethtool flow rules support Daniel Jurgens
2025-10-27 17:39 ` [PATCH net-next v6 01/12] virtio_pci: Remove supported_cap size build assert Daniel Jurgens
2025-11-03 5:52 ` Xuan Zhuo
2025-10-27 17:39 ` [PATCH net-next v6 02/12] virtio: Add config_op for admin commands Daniel Jurgens
2025-11-03 5:52 ` Xuan Zhuo
2025-10-27 17:39 ` [PATCH net-next v6 03/12] virtio: Expose generic device capability operations Daniel Jurgens
2025-11-03 5:52 ` Xuan Zhuo
2025-10-27 17:39 ` [PATCH net-next v6 04/12] virtio: Expose object create and destroy API Daniel Jurgens
2025-11-03 5:52 ` Xuan Zhuo
2025-10-27 17:39 ` [PATCH net-next v6 05/12] virtio_net: Query and set flow filter caps Daniel Jurgens
2025-10-30 11:19 ` Paolo Abeni [this message]
2025-10-30 14:03 ` Dan Jurgens
2025-11-03 2:38 ` Xuan Zhuo
2025-10-27 17:39 ` [PATCH net-next v6 06/12] virtio_net: Create a FF group for ethtool steering Daniel Jurgens
2025-11-03 2:47 ` Xuan Zhuo
2025-10-27 17:39 ` [PATCH net-next v6 07/12] virtio_net: Implement layer 2 ethtool flow rules Daniel Jurgens
2025-11-03 5:47 ` Xuan Zhuo
2025-10-27 17:39 ` [PATCH net-next v6 08/12] virtio_net: Use existing classifier if possible Daniel Jurgens
2025-10-30 11:31 ` Paolo Abeni
2025-10-30 14:08 ` Dan Jurgens
2025-11-03 3:22 ` Xuan Zhuo
2025-10-27 17:39 ` [PATCH net-next v6 09/12] virtio_net: Implement IPv4 ethtool flow rules Daniel Jurgens
2025-11-03 3:20 ` Xuan Zhuo
2025-10-27 17:39 ` [PATCH net-next v6 10/12] virtio_net: Add support for IPv6 ethtool steering Daniel Jurgens
2025-11-03 3:19 ` Xuan Zhuo
2025-10-27 17:39 ` [PATCH net-next v6 11/12] virtio_net: Add support for TCP and UDP ethtool rules Daniel Jurgens
2025-11-03 3:19 ` Xuan Zhuo
2025-10-27 17:39 ` [PATCH net-next v6 12/12] virtio_net: Add get ethtool flow rules ops Daniel Jurgens
2025-11-03 3:17 ` Xuan Zhuo
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=b55d868c-7bc1-4e25-aef4-cdf385208400@redhat.com \
--to=pabeni@redhat.com \
--cc=alex.williamson@redhat.com \
--cc=andrew+netdev@lunn.ch \
--cc=danielj@nvidia.com \
--cc=edumazet@google.com \
--cc=eperezma@redhat.com \
--cc=jasowang@redhat.com \
--cc=jgg@ziepe.ca \
--cc=kevin.tian@intel.com \
--cc=kuba@kernel.org \
--cc=mst@redhat.com \
--cc=netdev@vger.kernel.org \
--cc=parav@nvidia.com \
--cc=shameerali.kolothum.thodi@huawei.com \
--cc=shshitrit@nvidia.com \
--cc=virtualization@lists.linux.dev \
--cc=xuanzhuo@linux.alibaba.com \
--cc=yohadt@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).