From: "Michael S. Tsirkin" <mst@redhat.com>
To: Daniel Jurgens <danielj@nvidia.com>
Cc: netdev@vger.kernel.org, jasowang@redhat.com, pabeni@redhat.com,
virtualization@lists.linux.dev, parav@nvidia.com,
shshitrit@nvidia.com, yohadt@nvidia.com,
xuanzhuo@linux.alibaba.com, eperezma@redhat.com, jgg@ziepe.ca,
kevin.tian@intel.com, kuba@kernel.org, andrew+netdev@lunn.ch,
edumazet@google.com
Subject: Re: [PATCH net-next v16 09/12] virtio_net: Implement IPv4 ethtool flow rules
Date: Thu, 22 Jan 2026 12:21:19 -0500 [thread overview]
Message-ID: <20260122115857-mutt-send-email-mst@kernel.org> (raw)
In-Reply-To: <20260121220652.894364-10-danielj@nvidia.com>
On Wed, Jan 21, 2026 at 04:06:49PM -0600, Daniel Jurgens wrote:
> Add support for IP_USER type rules from ethtool.
>
> Example:
> $ ethtool -U ens9 flow-type ip4 src-ip 192.168.51.101 action -1
> Added rule with ID 1
>
> The example rule will drop packets with the source IP specified.
>
> Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
> Reviewed-by: Parav Pandit <parav@nvidia.com>
> Reviewed-by: Shahar Shitrit <shshitrit@nvidia.com>
> Reviewed-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> ---
> v4:
> - Fixed bug in protocol check of parse_ip4
> - (u8 *) to (void *) casting.
> - Alignment issues.
>
> v12
> - refactor calculate_flow_sizes to remove goto. MST
> - refactor build_and_insert to remove goto validate. MST
> - Move parse_ip4 l3_mask check to TCP/UDP patch. MST
> - Check saddr/daddr mask before copying in parse_ip4. MST
> - Remove tos check in setup_ip_key_mask.
> - check l4_4_bytes mask is 0 in setup_ip_key_mask. MST
> - changed return of setup_ip_key_mask to -EINVAL.
> - BUG_ON if key overflows u8 size in calculate_flow_sizes. MST
>
> v13:
> - Set tos field if applicable in parse_ip4. MST
> - Check tos in validate_ip4_mask. MST
> - check l3_mask before setting addr and mask in parse_ip4. MST
> - use has_ipv4 vs numhdrs for branching in build_and_insert. MST
> ---
> ---
> drivers/net/virtio_net.c | 129 +++++++++++++++++++++++++++++++++++++--
> 1 file changed, 123 insertions(+), 6 deletions(-)
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index ba231f10b803..7ce120baeb41 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -5818,6 +5818,39 @@ static bool validate_eth_mask(const struct virtnet_ff *ff,
> return true;
> }
>
> +static bool validate_ip4_mask(const struct virtnet_ff *ff,
> + const struct virtio_net_ff_selector *sel,
> + const struct virtio_net_ff_selector *sel_cap)
> +{
> + bool partial_mask = !!(sel_cap->flags & VIRTIO_NET_FF_MASK_F_PARTIAL_MASK);
> + struct iphdr *cap, *mask;
> +
> + cap = (struct iphdr *)&sel_cap->mask;
> + mask = (struct iphdr *)&sel->mask;
this cast is only portable if sel and sel_cap are properly aligned.
Because if not then at least saddr/daddr accesses below are
not portable and need
Documentation/core-api/unaligned-memory-access.rst
> +
> + if (mask->saddr &&
> + !check_mask_vs_cap(&mask->saddr, &cap->saddr,
> + sizeof(__be32), partial_mask))
> + return false;
> +
> + if (mask->daddr &&
> + !check_mask_vs_cap(&mask->daddr, &cap->daddr,
> + sizeof(__be32), partial_mask))
> + return false;
> +
> + if (mask->protocol &&
> + !check_mask_vs_cap(&mask->protocol, &cap->protocol,
> + sizeof(u8), partial_mask))
> + return false;
> +
> + if (mask->tos &&
> + !check_mask_vs_cap(&mask->tos, &cap->tos,
> + sizeof(u8), partial_mask))
> + return false;
> +
> + return true;
> +}
> +
> static bool validate_mask(const struct virtnet_ff *ff,
> const struct virtio_net_ff_selector *sel)
> {
> @@ -5829,11 +5862,41 @@ static bool validate_mask(const struct virtnet_ff *ff,
> switch (sel->type) {
> case VIRTIO_NET_FF_MASK_TYPE_ETH:
> return validate_eth_mask(ff, sel, sel_cap);
> +
> + case VIRTIO_NET_FF_MASK_TYPE_IPV4:
> + return validate_ip4_mask(ff, sel, sel_cap);
> }
>
> return false;
> }
>
> +static void parse_ip4(struct iphdr *mask, struct iphdr *key,
> + const struct ethtool_rx_flow_spec *fs)
> +{
> + const struct ethtool_usrip4_spec *l3_mask = &fs->m_u.usr_ip4_spec;
> + const struct ethtool_usrip4_spec *l3_val = &fs->h_u.usr_ip4_spec;
this cast is only portable if sel and sel_cap are properly aligned.
Because if not then at least saddr/daddr accesses below are
not portable and need
Documentation/core-api/unaligned-memory-access.rst
> +
> + if (l3_mask->ip4src) {
> + mask->saddr = l3_mask->ip4src;
> + key->saddr = l3_val->ip4src;
> + }
> +
> + if (l3_mask->ip4dst) {
> + mask->daddr = l3_mask->ip4dst;
> + key->daddr = l3_val->ip4dst;
> + }
> +
> + if (l3_mask->tos) {
> + mask->tos = l3_mask->tos;
> + key->tos = l3_val->tos;
> + }
> +}
> +
> +static bool has_ipv4(u32 flow_type)
> +{
> + return flow_type == IP_USER_FLOW;
> +}
> +
> static int setup_classifier(struct virtnet_ff *ff,
> struct virtnet_classifier **c)
> {
> @@ -5969,6 +6032,7 @@ static bool supported_flow_type(const struct ethtool_rx_flow_spec *fs)
> {
> switch (fs->flow_type) {
> case ETHER_FLOW:
> + case IP_USER_FLOW:
> return true;
> }
>
> @@ -6000,8 +6064,18 @@ static void calculate_flow_sizes(struct ethtool_rx_flow_spec *fs,
> u8 *key_size, size_t *classifier_size,
> int *num_hdrs)
> {
> + size_t size = sizeof(struct ethhdr);
> +
> *num_hdrs = 1;
> - *key_size = sizeof(struct ethhdr);
> +
> + if (fs->flow_type != ETHER_FLOW) {
> + ++(*num_hdrs);
> + if (has_ipv4(fs->flow_type))
> + size += sizeof(struct iphdr);
> + }
> +
> + BUG_ON(size > 0xff);
> + *key_size = size;
> /*
> * The classifier size is the size of the classifier header, a selector
> * header for each type of header in the match criteria, and each header
> @@ -6013,8 +6087,9 @@ static void calculate_flow_sizes(struct ethtool_rx_flow_spec *fs,
> }
>
> static void setup_eth_hdr_key_mask(struct virtio_net_ff_selector *selector,
> - u8 *key,
> - const struct ethtool_rx_flow_spec *fs)
> + u8 *key,
> + const struct ethtool_rx_flow_spec *fs,
> + int num_hdrs)
> {
> struct ethhdr *eth_m = (struct ethhdr *)&selector->mask;
> struct ethhdr *eth_k = (struct ethhdr *)key;
> @@ -6022,8 +6097,35 @@ static void setup_eth_hdr_key_mask(struct virtio_net_ff_selector *selector,
> selector->type = VIRTIO_NET_FF_MASK_TYPE_ETH;
> selector->length = sizeof(struct ethhdr);
>
> - memcpy(eth_m, &fs->m_u.ether_spec, sizeof(*eth_m));
> - memcpy(eth_k, &fs->h_u.ether_spec, sizeof(*eth_k));
> + if (num_hdrs > 1) {
> + eth_m->h_proto = cpu_to_be16(0xffff);
> + eth_k->h_proto = cpu_to_be16(ETH_P_IP);
> + } else {
> + memcpy(eth_m, &fs->m_u.ether_spec, sizeof(*eth_m));
> + memcpy(eth_k, &fs->h_u.ether_spec, sizeof(*eth_k));
> + }
> +}
> +
> +static int setup_ip_key_mask(struct virtio_net_ff_selector *selector,
> + u8 *key,
> + const struct ethtool_rx_flow_spec *fs)
> +{
> + struct iphdr *v4_m = (struct iphdr *)&selector->mask;
> + struct iphdr *v4_k = (struct iphdr *)key;
Are mask and key guaranteed to be aligned here?
Because if not then at least saddr/daddr accesses in parse_ip4
call below are not portable and need
Documentation/core-api/unaligned-memory-access.rst
> +
> + selector->type = VIRTIO_NET_FF_MASK_TYPE_IPV4;
> + selector->length = sizeof(struct iphdr);
> +
> + if (fs->h_u.usr_ip4_spec.l4_4_bytes ||
> + fs->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4 ||
> + fs->m_u.usr_ip4_spec.l4_4_bytes ||
> + fs->m_u.usr_ip4_spec.ip_ver ||
> + fs->m_u.usr_ip4_spec.proto)
> + return -EINVAL;
> +
> + parse_ip4(v4_m, v4_k, fs);
> +
> + return 0;
> }
>
> static int
> @@ -6045,6 +6147,13 @@ validate_classifier_selectors(struct virtnet_ff *ff,
> return 0;
> }
>
> +static
> +struct virtio_net_ff_selector *next_selector(struct virtio_net_ff_selector *sel)
> +{
> + return (void *)sel + sizeof(struct virtio_net_ff_selector) +
> + sel->length;
> +}
> +
> static int build_and_insert(struct virtnet_ff *ff,
> struct virtnet_ethtool_rule *eth_rule)
> {
> @@ -6082,7 +6191,15 @@ static int build_and_insert(struct virtnet_ff *ff,
> classifier->count = num_hdrs;
> selector = (void *)&classifier->selectors[0];
>
> - setup_eth_hdr_key_mask(selector, key, fs);
> + setup_eth_hdr_key_mask(selector, key, fs, num_hdrs);
So this will set sel->length to 14.
> +
> + if (has_ipv4(fs->flow_type)) {
> + selector = next_selector(selector);
And this will point next one at an offset 8 + 14 then?
> +
> + err = setup_ip_key_mask(selector, key + sizeof(struct ethhdr), fs);
and this will pass unaligned mask and key?
> + if (err)
> + goto err_classifier;
> + }
>
> err = validate_classifier_selectors(ff, classifier, num_hdrs);
> if (err)
> --
> 2.50.1
next prev parent reply other threads:[~2026-01-22 17:21 UTC|newest]
Thread overview: 21+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-01-21 22:06 [PATCH net-next v16 00/12] virtio_net: Add ethtool flow rules support Daniel Jurgens
2026-01-21 22:06 ` [PATCH net-next v16 01/12] virtio_pci: Remove supported_cap size build assert Daniel Jurgens
2026-01-21 22:06 ` [PATCH net-next v16 02/12] virtio: Add config_op for admin commands Daniel Jurgens
2026-01-21 22:06 ` [PATCH net-next v16 03/12] virtio: Expose generic device capability operations Daniel Jurgens
2026-01-22 17:32 ` Michael S. Tsirkin
2026-01-21 22:06 ` [PATCH net-next v16 04/12] virtio: Expose object create and destroy API Daniel Jurgens
2026-01-21 22:06 ` [PATCH net-next v16 05/12] virtio_net: Query and set flow filter caps Daniel Jurgens
2026-01-23 1:43 ` [net-next,v16,05/12] " Jakub Kicinski
2026-01-29 17:51 ` Dan Jurgens
2026-01-21 22:06 ` [PATCH net-next v16 06/12] virtio_net: Create a FF group for ethtool steering Daniel Jurgens
2026-01-22 21:51 ` Michael S. Tsirkin
2026-01-29 17:08 ` Dan Jurgens
2026-01-21 22:06 ` [PATCH net-next v16 07/12] virtio_net: Implement layer 2 ethtool flow rules Daniel Jurgens
2026-01-23 1:45 ` [net-next,v16,07/12] " Jakub Kicinski
2026-01-21 22:06 ` [PATCH net-next v16 08/12] virtio_net: Use existing classifier if possible Daniel Jurgens
2026-01-21 22:06 ` [PATCH net-next v16 09/12] virtio_net: Implement IPv4 ethtool flow rules Daniel Jurgens
2026-01-22 17:21 ` Michael S. Tsirkin [this message]
2026-01-29 18:46 ` Dan Jurgens
2026-01-21 22:06 ` [PATCH net-next v16 10/12] virtio_net: Add support for IPv6 ethtool steering Daniel Jurgens
2026-01-21 22:06 ` [PATCH net-next v16 11/12] virtio_net: Add support for TCP and UDP ethtool rules Daniel Jurgens
2026-01-21 22:06 ` [PATCH net-next v16 12/12] virtio_net: Add get ethtool flow rules ops Daniel Jurgens
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260122115857-mutt-send-email-mst@kernel.org \
--to=mst@redhat.com \
--cc=andrew+netdev@lunn.ch \
--cc=danielj@nvidia.com \
--cc=edumazet@google.com \
--cc=eperezma@redhat.com \
--cc=jasowang@redhat.com \
--cc=jgg@ziepe.ca \
--cc=kevin.tian@intel.com \
--cc=kuba@kernel.org \
--cc=netdev@vger.kernel.org \
--cc=pabeni@redhat.com \
--cc=parav@nvidia.com \
--cc=shshitrit@nvidia.com \
--cc=virtualization@lists.linux.dev \
--cc=xuanzhuo@linux.alibaba.com \
--cc=yohadt@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox