From: Daniel Jurgens <danielj@nvidia.com>
To: <netdev@vger.kernel.org>, <mst@redhat.com>, <jasowang@redhat.com>,
<pabeni@redhat.com>
Cc: <virtualization@lists.linux.dev>, <parav@nvidia.com>,
<shshitrit@nvidia.com>, <yohadt@nvidia.com>,
<xuanzhuo@linux.alibaba.com>, <eperezma@redhat.com>,
<jgg@ziepe.ca>, <kevin.tian@intel.com>, <kuba@kernel.org>,
<andrew+netdev@lunn.ch>, <edumazet@google.com>,
"Daniel Jurgens" <danielj@nvidia.com>
Subject: [PATCH net-next v12 11/12] virtio_net: Add support for TCP and UDP ethtool rules
Date: Wed, 19 Nov 2025 13:15:22 -0600 [thread overview]
Message-ID: <20251119191524.4572-12-danielj@nvidia.com> (raw)
In-Reply-To: <20251119191524.4572-1-danielj@nvidia.com>
Implement TCP and UDP V4/V6 ethtool flow types.
Examples:
$ ethtool -U ens9 flow-type udp4 dst-ip 192.168.5.2 dst-port\
4321 action 20
Added rule with ID 4
This example directs IPv4 UDP traffic with the specified address and
port to queue 20.
$ ethtool -U ens9 flow-type tcp6 src-ip 2001:db8::1 src-port 1234 dst-ip\
2001:db8::2 dst-port 4321 action 12
Added rule with ID 5
This example directs IPv6 TCP traffic with the specified address and
port to queue 12.
Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
Reviewed-by: Parav Pandit <parav@nvidia.com>
Reviewed-by: Shahar Shitrit <shshitrit@nvidia.com>
Reviewed-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
---
v4: (*num_hdrs)++ to ++(*num_hdrs)
v12:
- Refactor calculate_flow_sizes. MST
- Refactor build_and_insert to remove goto validate. MST
- Move parse_ip4/6 l3_mask check here. MST
---
---
drivers/net/virtio_net.c | 223 +++++++++++++++++++++++++++++++++++++--
1 file changed, 212 insertions(+), 11 deletions(-)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index bb8ec4265da5..e6c7e8cd4ab4 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -5950,6 +5950,52 @@ static bool validate_ip6_mask(const struct virtnet_ff *ff,
return true;
}
+static bool validate_tcp_mask(const struct virtnet_ff *ff,
+ const struct virtio_net_ff_selector *sel,
+ const struct virtio_net_ff_selector *sel_cap)
+{
+ bool partial_mask = !!(sel_cap->flags & VIRTIO_NET_FF_MASK_F_PARTIAL_MASK);
+ struct tcphdr *cap, *mask;
+
+ cap = (struct tcphdr *)&sel_cap->mask;
+ mask = (struct tcphdr *)&sel->mask;
+
+ if (mask->source &&
+ !check_mask_vs_cap(&mask->source, &cap->source,
+ sizeof(cap->source), partial_mask))
+ return false;
+
+ if (mask->dest &&
+ !check_mask_vs_cap(&mask->dest, &cap->dest,
+ sizeof(cap->dest), partial_mask))
+ return false;
+
+ return true;
+}
+
+static bool validate_udp_mask(const struct virtnet_ff *ff,
+ const struct virtio_net_ff_selector *sel,
+ const struct virtio_net_ff_selector *sel_cap)
+{
+ bool partial_mask = !!(sel_cap->flags & VIRTIO_NET_FF_MASK_F_PARTIAL_MASK);
+ struct udphdr *cap, *mask;
+
+ cap = (struct udphdr *)&sel_cap->mask;
+ mask = (struct udphdr *)&sel->mask;
+
+ if (mask->source &&
+ !check_mask_vs_cap(&mask->source, &cap->source,
+ sizeof(cap->source), partial_mask))
+ return false;
+
+ if (mask->dest &&
+ !check_mask_vs_cap(&mask->dest, &cap->dest,
+ sizeof(cap->dest), partial_mask))
+ return false;
+
+ return true;
+}
+
static bool validate_mask(const struct virtnet_ff *ff,
const struct virtio_net_ff_selector *sel)
{
@@ -5967,11 +6013,45 @@ static bool validate_mask(const struct virtnet_ff *ff,
case VIRTIO_NET_FF_MASK_TYPE_IPV6:
return validate_ip6_mask(ff, sel, sel_cap);
+
+ case VIRTIO_NET_FF_MASK_TYPE_TCP:
+ return validate_tcp_mask(ff, sel, sel_cap);
+
+ case VIRTIO_NET_FF_MASK_TYPE_UDP:
+ return validate_udp_mask(ff, sel, sel_cap);
}
return false;
}
+static void set_tcp(struct tcphdr *mask, struct tcphdr *key,
+ __be16 psrc_m, __be16 psrc_k,
+ __be16 pdst_m, __be16 pdst_k)
+{
+ if (psrc_m) {
+ mask->source = psrc_m;
+ key->source = psrc_k;
+ }
+ if (pdst_m) {
+ mask->dest = pdst_m;
+ key->dest = pdst_k;
+ }
+}
+
+static void set_udp(struct udphdr *mask, struct udphdr *key,
+ __be16 psrc_m, __be16 psrc_k,
+ __be16 pdst_m, __be16 pdst_k)
+{
+ if (psrc_m) {
+ mask->source = psrc_m;
+ key->source = psrc_k;
+ }
+ if (pdst_m) {
+ mask->dest = pdst_m;
+ key->dest = pdst_k;
+ }
+}
+
static void parse_ip4(struct iphdr *mask, struct iphdr *key,
const struct ethtool_rx_flow_spec *fs)
{
@@ -5987,6 +6067,11 @@ static void parse_ip4(struct iphdr *mask, struct iphdr *key,
mask->daddr = l3_mask->ip4dst;
key->daddr = l3_val->ip4dst;
}
+
+ if (l3_mask->proto) {
+ mask->protocol = l3_mask->proto;
+ key->protocol = l3_val->proto;
+ }
}
static void parse_ip6(struct ipv6hdr *mask, struct ipv6hdr *key,
@@ -6004,16 +6089,35 @@ static void parse_ip6(struct ipv6hdr *mask, struct ipv6hdr *key,
memcpy(&mask->daddr, l3_mask->ip6dst, sizeof(mask->daddr));
memcpy(&key->daddr, l3_val->ip6dst, sizeof(key->daddr));
}
+
+ if (l3_mask->l4_proto) {
+ mask->nexthdr = l3_mask->l4_proto;
+ key->nexthdr = l3_val->l4_proto;
+ }
}
static bool has_ipv4(u32 flow_type)
{
- return flow_type == IP_USER_FLOW;
+ return flow_type == TCP_V4_FLOW ||
+ flow_type == UDP_V4_FLOW ||
+ flow_type == IP_USER_FLOW;
}
static bool has_ipv6(u32 flow_type)
{
- return flow_type == IPV6_USER_FLOW;
+ return flow_type == TCP_V6_FLOW ||
+ flow_type == UDP_V6_FLOW ||
+ flow_type == IPV6_USER_FLOW;
+}
+
+static bool has_tcp(u32 flow_type)
+{
+ return flow_type == TCP_V4_FLOW || flow_type == TCP_V6_FLOW;
+}
+
+static bool has_udp(u32 flow_type)
+{
+ return flow_type == UDP_V4_FLOW || flow_type == UDP_V6_FLOW;
}
static int setup_classifier(struct virtnet_ff *ff,
@@ -6153,6 +6257,10 @@ static bool supported_flow_type(const struct ethtool_rx_flow_spec *fs)
case ETHER_FLOW:
case IP_USER_FLOW:
case IPV6_USER_FLOW:
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
return true;
}
@@ -6194,6 +6302,12 @@ static void calculate_flow_sizes(struct ethtool_rx_flow_spec *fs,
size += sizeof(struct iphdr);
else if (has_ipv6(fs->flow_type))
size += sizeof(struct ipv6hdr);
+
+ if (has_tcp(fs->flow_type) || has_udp(fs->flow_type)) {
+ ++(*num_hdrs);
+ size += has_tcp(fs->flow_type) ? sizeof(struct tcphdr) :
+ sizeof(struct udphdr);
+ }
}
BUG_ON(size > 0xff);
@@ -6233,7 +6347,8 @@ static void setup_eth_hdr_key_mask(struct virtio_net_ff_selector *selector,
static int setup_ip_key_mask(struct virtio_net_ff_selector *selector,
u8 *key,
- const struct ethtool_rx_flow_spec *fs)
+ const struct ethtool_rx_flow_spec *fs,
+ int num_hdrs)
{
struct ipv6hdr *v6_m = (struct ipv6hdr *)&selector->mask;
struct iphdr *v4_m = (struct iphdr *)&selector->mask;
@@ -6244,23 +6359,95 @@ static int setup_ip_key_mask(struct virtio_net_ff_selector *selector,
selector->type = VIRTIO_NET_FF_MASK_TYPE_IPV6;
selector->length = sizeof(struct ipv6hdr);
- if (fs->h_u.usr_ip6_spec.l4_4_bytes ||
- fs->m_u.usr_ip6_spec.l4_4_bytes)
+ if (num_hdrs == 2 && (fs->h_u.usr_ip6_spec.l4_4_bytes ||
+ fs->m_u.usr_ip6_spec.l4_4_bytes))
return -EINVAL;
parse_ip6(v6_m, v6_k, fs);
+
+ if (num_hdrs > 2) {
+ v6_m->nexthdr = 0xff;
+ if (has_tcp(fs->flow_type))
+ v6_k->nexthdr = IPPROTO_TCP;
+ else
+ v6_k->nexthdr = IPPROTO_UDP;
+ }
} else {
selector->type = VIRTIO_NET_FF_MASK_TYPE_IPV4;
selector->length = sizeof(struct iphdr);
- if (fs->h_u.usr_ip4_spec.l4_4_bytes ||
- fs->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4 ||
- fs->m_u.usr_ip4_spec.l4_4_bytes ||
- fs->m_u.usr_ip4_spec.ip_ver ||
- fs->m_u.usr_ip4_spec.proto)
+ if (num_hdrs == 2 &&
+ (fs->h_u.usr_ip4_spec.l4_4_bytes ||
+ fs->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4 ||
+ fs->m_u.usr_ip4_spec.l4_4_bytes ||
+ fs->m_u.usr_ip4_spec.ip_ver ||
+ fs->m_u.usr_ip4_spec.proto))
return -EINVAL;
parse_ip4(v4_m, v4_k, fs);
+
+ if (num_hdrs > 2) {
+ v4_m->protocol = 0xff;
+ if (has_tcp(fs->flow_type))
+ v4_k->protocol = IPPROTO_TCP;
+ else
+ v4_k->protocol = IPPROTO_UDP;
+ }
+ }
+
+ return 0;
+}
+
+static int setup_transport_key_mask(struct virtio_net_ff_selector *selector,
+ u8 *key,
+ struct ethtool_rx_flow_spec *fs)
+{
+ struct tcphdr *tcp_m = (struct tcphdr *)&selector->mask;
+ struct udphdr *udp_m = (struct udphdr *)&selector->mask;
+ const struct ethtool_tcpip6_spec *v6_l4_mask;
+ const struct ethtool_tcpip4_spec *v4_l4_mask;
+ const struct ethtool_tcpip6_spec *v6_l4_key;
+ const struct ethtool_tcpip4_spec *v4_l4_key;
+ struct tcphdr *tcp_k = (struct tcphdr *)key;
+ struct udphdr *udp_k = (struct udphdr *)key;
+
+ if (has_tcp(fs->flow_type)) {
+ selector->type = VIRTIO_NET_FF_MASK_TYPE_TCP;
+ selector->length = sizeof(struct tcphdr);
+
+ if (has_ipv6(fs->flow_type)) {
+ v6_l4_mask = &fs->m_u.tcp_ip6_spec;
+ v6_l4_key = &fs->h_u.tcp_ip6_spec;
+
+ set_tcp(tcp_m, tcp_k, v6_l4_mask->psrc, v6_l4_key->psrc,
+ v6_l4_mask->pdst, v6_l4_key->pdst);
+ } else {
+ v4_l4_mask = &fs->m_u.tcp_ip4_spec;
+ v4_l4_key = &fs->h_u.tcp_ip4_spec;
+
+ set_tcp(tcp_m, tcp_k, v4_l4_mask->psrc, v4_l4_key->psrc,
+ v4_l4_mask->pdst, v4_l4_key->pdst);
+ }
+
+ } else if (has_udp(fs->flow_type)) {
+ selector->type = VIRTIO_NET_FF_MASK_TYPE_UDP;
+ selector->length = sizeof(struct udphdr);
+
+ if (has_ipv6(fs->flow_type)) {
+ v6_l4_mask = &fs->m_u.udp_ip6_spec;
+ v6_l4_key = &fs->h_u.udp_ip6_spec;
+
+ set_udp(udp_m, udp_k, v6_l4_mask->psrc, v6_l4_key->psrc,
+ v6_l4_mask->pdst, v6_l4_key->pdst);
+ } else {
+ v4_l4_mask = &fs->m_u.udp_ip4_spec;
+ v4_l4_key = &fs->h_u.udp_ip4_spec;
+
+ set_udp(udp_m, udp_k, v4_l4_mask->psrc, v4_l4_key->psrc,
+ v4_l4_mask->pdst, v4_l4_key->pdst);
+ }
+ } else {
+ return -EOPNOTSUPP;
}
return 0;
@@ -6300,6 +6487,7 @@ static int build_and_insert(struct virtnet_ff *ff,
struct virtio_net_ff_selector *selector;
struct virtnet_classifier *c;
size_t classifier_size;
+ size_t key_offset;
int num_hdrs;
u8 key_size;
u8 *key;
@@ -6332,11 +6520,24 @@ static int build_and_insert(struct virtnet_ff *ff,
setup_eth_hdr_key_mask(selector, key, fs, num_hdrs);
if (num_hdrs != 1) {
+ key_offset = selector->length;
selector = next_selector(selector);
- err = setup_ip_key_mask(selector, key + sizeof(struct ethhdr), fs);
+ err = setup_ip_key_mask(selector, key + key_offset,
+ fs, num_hdrs);
if (err)
goto err_classifier;
+
+ if (num_hdrs >= 2) {
+ key_offset += selector->length;
+ selector = next_selector(selector);
+
+ err = setup_transport_key_mask(selector,
+ key + key_offset,
+ fs);
+ if (err)
+ goto err_classifier;
+ }
}
err = validate_classifier_selectors(ff, classifier, num_hdrs);
--
2.50.1
next prev parent reply other threads:[~2025-11-19 19:16 UTC|newest]
Thread overview: 40+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-11-19 19:15 [PATCH net-next v12 00/12] virtio_net: Add ethtool flow rules support Daniel Jurgens
2025-11-19 19:15 ` [PATCH net-next v12 01/12] virtio_pci: Remove supported_cap size build assert Daniel Jurgens
2025-11-19 19:15 ` [PATCH net-next v12 02/12] virtio: Add config_op for admin commands Daniel Jurgens
2025-11-19 19:15 ` [PATCH net-next v12 03/12] virtio: Expose generic device capability operations Daniel Jurgens
2025-11-24 20:30 ` Michael S. Tsirkin
2025-11-24 22:24 ` Dan Jurgens
2025-11-24 22:27 ` Michael S. Tsirkin
2025-11-19 19:15 ` [PATCH net-next v12 04/12] virtio: Expose object create and destroy API Daniel Jurgens
2025-11-19 19:15 ` [PATCH net-next v12 05/12] virtio_net: Query and set flow filter caps Daniel Jurgens
2025-11-20 1:51 ` Jakub Kicinski
2025-11-20 15:39 ` Dan Jurgens
2025-11-24 21:01 ` Michael S. Tsirkin
2025-11-25 0:05 ` Dan Jurgens
2025-11-24 22:54 ` Michael S. Tsirkin
2025-11-26 6:11 ` Dan Jurgens
2025-11-19 19:15 ` [PATCH net-next v12 06/12] virtio_net: Create a FF group for ethtool steering Daniel Jurgens
2025-11-19 19:15 ` [PATCH net-next v12 07/12] virtio_net: Implement layer 2 ethtool flow rules Daniel Jurgens
2025-11-24 21:05 ` Michael S. Tsirkin
2025-11-26 16:25 ` Dan Jurgens
2025-11-26 18:00 ` Michael S. Tsirkin
2025-11-25 14:25 ` Michael S. Tsirkin
2025-11-25 15:39 ` Dan Jurgens
2025-11-19 19:15 ` [PATCH net-next v12 08/12] virtio_net: Use existing classifier if possible Daniel Jurgens
2025-11-24 22:04 ` Michael S. Tsirkin
2025-11-24 22:31 ` Dan Jurgens
2025-11-24 22:38 ` Michael S. Tsirkin
2025-11-19 19:15 ` [PATCH net-next v12 09/12] virtio_net: Implement IPv4 ethtool flow rules Daniel Jurgens
2025-11-24 21:51 ` Michael S. Tsirkin
2025-11-24 22:41 ` Dan Jurgens
2025-11-26 5:48 ` Dan Jurgens
2025-11-19 19:15 ` [PATCH net-next v12 10/12] virtio_net: Add support for IPv6 ethtool steering Daniel Jurgens
2025-11-24 21:59 ` Michael S. Tsirkin
2025-11-24 23:04 ` Dan Jurgens
2025-11-24 23:12 ` Michael S. Tsirkin
2025-11-25 0:10 ` Dan Jurgens
2025-11-19 19:15 ` Daniel Jurgens [this message]
2025-11-24 22:02 ` [PATCH net-next v12 11/12] virtio_net: Add support for TCP and UDP ethtool rules Michael S. Tsirkin
2025-11-24 22:47 ` Dan Jurgens
2025-11-19 19:15 ` [PATCH net-next v12 12/12] virtio_net: Add get ethtool flow rules ops Daniel Jurgens
2025-11-19 20:22 ` [PATCH net-next v12 00/12] virtio_net: Add ethtool flow rules support Michael S. Tsirkin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251119191524.4572-12-danielj@nvidia.com \
--to=danielj@nvidia.com \
--cc=andrew+netdev@lunn.ch \
--cc=edumazet@google.com \
--cc=eperezma@redhat.com \
--cc=jasowang@redhat.com \
--cc=jgg@ziepe.ca \
--cc=kevin.tian@intel.com \
--cc=kuba@kernel.org \
--cc=mst@redhat.com \
--cc=netdev@vger.kernel.org \
--cc=pabeni@redhat.com \
--cc=parav@nvidia.com \
--cc=shshitrit@nvidia.com \
--cc=virtualization@lists.linux.dev \
--cc=xuanzhuo@linux.alibaba.com \
--cc=yohadt@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).