From: Nimrod Oren <noren@nvidia.com>
To: "David S. Miller" <davem@davemloft.net>,
Eric Dumazet <edumazet@google.com>,
Jakub Kicinski <kuba@kernel.org>, Paolo Abeni <pabeni@redhat.com>,
Simon Horman <horms@kernel.org>, Shuah Khan <shuah@kernel.org>,
Alexei Starovoitov <ast@kernel.org>,
Daniel Borkmann <daniel@iogearbox.net>,
Jesper Dangaard Brouer <hawk@kernel.org>,
"John Fastabend" <john.fastabend@gmail.com>,
Stanislav Fomichev <sdf@fomichev.me>,
Mohsin Bashir <mohsin.bashr@gmail.com>
Cc: Dragos Tatulea <dtatulea@nvidia.com>,
Tariq Toukan <tariqt@nvidia.com>,
Carolina Jubran <cjubran@nvidia.com>, <netdev@vger.kernel.org>,
<linux-kselftest@vger.kernel.org>, <linux-kernel@vger.kernel.org>,
<bpf@vger.kernel.org>, Nimrod Oren <noren@nvidia.com>
Subject: [PATCH RFC net-next 5/5] selftests: drv-net: Check XDP header data with bpf_dynptr
Date: Tue, 9 Sep 2025 11:52:36 +0300 [thread overview]
Message-ID: <20250909085236.2234306-6-noren@nvidia.com> (raw)
In-Reply-To: <20250909085236.2234306-1-noren@nvidia.com>
Update filter_udphdr to use bpf_dynptr_slice to read the packet headers
instead of accessing them directly.
The function previously returned a pointer to the UDP header, which
was then used to calculate Ethernet and IP header lengths by
subtracting xdp->data. Since this only works when the UDP header is in
the linear region, rework the function to return the total header
length instead of a pointer. Rename filter_udphdr() to check_udphdr()
to reflect the new behavior.
This makes the test viable for drivers that do not store any
packet data in the linear part when in multi-buffer mode.
Signed-off-by: Nimrod Oren <noren@nvidia.com>
Reviewed-by: Carolina Jubran <cjubran@nvidia.com>
Reviewed-by: Dragos Tatulea <dtatulea@nvidia.com>
---
.../selftests/net/lib/xdp_native.bpf.c | 80 +++++++------------
1 file changed, 31 insertions(+), 49 deletions(-)
diff --git a/tools/testing/selftests/net/lib/xdp_native.bpf.c b/tools/testing/selftests/net/lib/xdp_native.bpf.c
index ff63f572552b..6df5164e3791 100644
--- a/tools/testing/selftests/net/lib/xdp_native.bpf.c
+++ b/tools/testing/selftests/net/lib/xdp_native.bpf.c
@@ -63,53 +63,49 @@ static void record_stats(struct xdp_md *ctx, __u32 stat_type)
__sync_fetch_and_add(count, 1);
}
-static struct udphdr *filter_udphdr(struct xdp_md *ctx, __u16 port)
+static __u32 check_udphdr(struct xdp_md *ctx, __u16 port)
{
- void *data_end = (void *)(long)ctx->data_end;
- void *data = (void *)(long)ctx->data;
struct udphdr *udph = NULL;
- struct ethhdr *eth = data;
+ struct ethhdr *eth = NULL;
+ struct bpf_dynptr ptr;
- if (data + sizeof(*eth) > data_end)
- return NULL;
+ bpf_dynptr_from_xdp(ctx, 0, &ptr);
+ eth = bpf_dynptr_slice(&ptr, 0, NULL, sizeof(*eth));
+ if (!eth)
+ return 0;
if (eth->h_proto == bpf_htons(ETH_P_IP)) {
- struct iphdr *iph = data + sizeof(*eth);
+ struct iphdr *iph = bpf_dynptr_slice(&ptr, sizeof(*eth),
+ NULL, sizeof(*iph));
- if (iph + 1 > (struct iphdr *)data_end ||
- iph->protocol != IPPROTO_UDP)
- return NULL;
+ if (!iph || iph->protocol != IPPROTO_UDP)
+ return 0;
- udph = (void *)eth + sizeof(*iph) + sizeof(*eth);
- } else if (eth->h_proto == bpf_htons(ETH_P_IPV6)) {
- struct ipv6hdr *ipv6h = data + sizeof(*eth);
+ udph = bpf_dynptr_slice(&ptr, sizeof(*iph) + sizeof(*eth),
+ NULL, sizeof(*udph));
+ } else if (eth->h_proto == bpf_htons(ETH_P_IPV6)) {
+ struct ipv6hdr *ipv6h = bpf_dynptr_slice(&ptr, sizeof(*eth),
+ NULL, sizeof(*ipv6h));
- if (ipv6h + 1 > (struct ipv6hdr *)data_end ||
- ipv6h->nexthdr != IPPROTO_UDP)
- return NULL;
+ if (!ipv6h || ipv6h->nexthdr != IPPROTO_UDP)
+ return 0;
- udph = (void *)eth + sizeof(*ipv6h) + sizeof(*eth);
+ udph = bpf_dynptr_slice(&ptr, sizeof(*ipv6h) + sizeof(*eth),
+ NULL, sizeof(*udph));
} else {
- return NULL;
+ return 0;
}
- if (udph + 1 > (struct udphdr *)data_end)
- return NULL;
-
- if (udph->dest != bpf_htons(port))
- return NULL;
+ if (!udph || udph->dest != bpf_htons(port))
+ return 0;
record_stats(ctx, STATS_RX);
-
- return udph;
+ return (void *)udph - (void *)eth + sizeof(*udph);
}
static int xdp_mode_pass(struct xdp_md *ctx, __u16 port)
{
- struct udphdr *udph = NULL;
-
- udph = filter_udphdr(ctx, port);
- if (!udph)
+ if (!check_udphdr(ctx, port))
return XDP_PASS;
record_stats(ctx, STATS_PASS);
@@ -119,10 +115,7 @@ static int xdp_mode_pass(struct xdp_md *ctx, __u16 port)
static int xdp_mode_drop_handler(struct xdp_md *ctx, __u16 port)
{
- struct udphdr *udph = NULL;
-
- udph = filter_udphdr(ctx, port);
- if (!udph)
+ if (!check_udphdr(ctx, port))
return XDP_PASS;
record_stats(ctx, STATS_DROP);
@@ -363,19 +356,14 @@ static int xdp_adjst_tail_grow_data(struct xdp_md *ctx, __u16 offset)
static int xdp_adjst_tail(struct xdp_md *ctx, __u16 port)
{
- void *data = (void *)(long)ctx->data;
- struct udphdr *udph = NULL;
- __s32 *adjust_offset, *val;
+ __s32 *adjust_offset;
__u32 key, hdr_len;
- void *offset_ptr;
- __u8 tag;
int ret;
- udph = filter_udphdr(ctx, port);
- if (!udph)
+ hdr_len = check_udphdr(ctx, port);
+ if (!hdr_len)
return XDP_PASS;
- hdr_len = (void *)udph - data + sizeof(struct udphdr);
key = XDP_ADJST_OFFSET;
adjust_offset = bpf_map_lookup_elem(&map_xdp_setup, &key);
if (!adjust_offset)
@@ -504,20 +492,14 @@ static int xdp_adjst_head_grow_data(struct xdp_md *ctx, __u64 hdr_len,
static int xdp_head_adjst(struct xdp_md *ctx, __u16 port)
{
- void *data_end = (void *)(long)ctx->data_end;
- void *data = (void *)(long)ctx->data;
- struct udphdr *udph_ptr = NULL;
__u32 key, size, hdr_len;
__s32 *val;
int res;
- /* Filter packets based on UDP port */
- udph_ptr = filter_udphdr(ctx, port);
- if (!udph_ptr)
+ hdr_len = check_udphdr(ctx, port);
+ if (!hdr_len)
return XDP_PASS;
- hdr_len = (void *)udph_ptr - data + sizeof(struct udphdr);
-
key = XDP_ADJST_OFFSET;
val = bpf_map_lookup_elem(&map_xdp_setup, &key);
if (!val)
--
2.45.0
next prev parent reply other threads:[~2025-09-09 8:53 UTC|newest]
Thread overview: 8+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-09-09 8:52 [PATCH RFC net-next 0/5] selftests: drv-net: Convert XDP program to bpf_dynptr Nimrod Oren
2025-09-09 8:52 ` [PATCH RFC net-next 1/5] selftests: drv-net: Test XDP_TX with bpf_dynptr Nimrod Oren
2025-09-09 8:52 ` [PATCH RFC net-next 2/5] selftests: drv-net: Test XDP tail adjustment " Nimrod Oren
2025-09-09 8:52 ` [PATCH RFC net-next 3/5] selftests: drv-net: Test XDP head " Nimrod Oren
2025-09-09 17:26 ` Martin KaFai Lau
2025-09-09 8:52 ` [PATCH RFC net-next 4/5] selftests: drv-net: Adjust XDP header data " Nimrod Oren
2025-09-09 8:52 ` Nimrod Oren [this message]
2025-09-09 21:12 ` [PATCH RFC net-next 0/5] selftests: drv-net: Convert XDP program to bpf_dynptr Jakub Kicinski
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250909085236.2234306-6-noren@nvidia.com \
--to=noren@nvidia.com \
--cc=ast@kernel.org \
--cc=bpf@vger.kernel.org \
--cc=cjubran@nvidia.com \
--cc=daniel@iogearbox.net \
--cc=davem@davemloft.net \
--cc=dtatulea@nvidia.com \
--cc=edumazet@google.com \
--cc=hawk@kernel.org \
--cc=horms@kernel.org \
--cc=john.fastabend@gmail.com \
--cc=kuba@kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-kselftest@vger.kernel.org \
--cc=mohsin.bashr@gmail.com \
--cc=netdev@vger.kernel.org \
--cc=pabeni@redhat.com \
--cc=sdf@fomichev.me \
--cc=shuah@kernel.org \
--cc=tariqt@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox