BPF List
 help / color / mirror / Atom feed
From: Amery Hung <ameryhung@gmail.com>
To: bpf@vger.kernel.org
Cc: netdev@vger.kernel.org, alexei.starovoitov@gmail.com,
	andrii@kernel.org, daniel@iogearbox.net, kuba@kernel.org,
	stfomichev@gmail.com, martin.lau@kernel.org,
	mohsin.bashr@gmail.com, noren@nvidia.com, dtatulea@nvidia.com,
	saeedm@nvidia.com, tariqt@nvidia.com, mbloch@nvidia.com,
	maciej.fijalkowski@intel.com, kernel-team@meta.com
Subject: [PATCH bpf-next v2 6/7] selftests/bpf: Test bpf_xdp_pull_data
Date: Fri,  5 Sep 2025 10:33:50 -0700	[thread overview]
Message-ID: <20250905173352.3759457-7-ameryhung@gmail.com> (raw)
In-Reply-To: <20250905173352.3759457-1-ameryhung@gmail.com>

Test bpf_xdp_pull_data() with xdp packets with different layouts. The
xdp bpf program first checks if the layout is as expected. Then, it
calls bpf_xdp_pull_data(). Finally, it checks the 0xbb marker at offset
1024 using directly packet access.

Signed-off-by: Amery Hung <ameryhung@gmail.com>
---
 .../selftests/bpf/prog_tests/xdp_pull_data.c  | 96 +++++++++++++++++++
 .../selftests/bpf/progs/test_xdp_pull_data.c  | 36 +++++++
 2 files changed, 132 insertions(+)
 create mode 100644 tools/testing/selftests/bpf/prog_tests/xdp_pull_data.c
 create mode 100644 tools/testing/selftests/bpf/progs/test_xdp_pull_data.c

diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_pull_data.c b/tools/testing/selftests/bpf/prog_tests/xdp_pull_data.c
new file mode 100644
index 000000000000..2cd18e15d47e
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_pull_data.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <test_progs.h>
+#include <network_helpers.h>
+#include "test_xdp_pull_data.skel.h"
+
+/* xdp_pull_data_prog will directly read a marker 0xbb stored at buf[1024]
+ * so caller expecting XDP_PASS should always pass pull_len no less than 1024
+ */
+void test_xdp_pull_data_common(struct test_xdp_pull_data *skel,
+			       int buf_len, int linear_len,
+			       int pull_len, int retval)
+{
+	LIBBPF_OPTS(bpf_test_run_opts, topts);
+	struct xdp_md ctx = {};
+	int prog_fd, err;
+	__u8 *buf;
+
+	buf = calloc(buf_len, sizeof(__u8));
+	if (!ASSERT_OK_PTR(buf, "calloc buf"))
+		return;
+
+	buf[1023] = 0xaa;
+	buf[1024] = 0xbb;
+	buf[1025] = 0xcc;
+
+	topts.data_in = buf;
+	topts.data_out = buf;
+	topts.data_size_in = buf_len;
+	topts.data_size_out = buf_len;
+	ctx.data_end = linear_len;
+	topts.ctx_in = &ctx;
+	topts.ctx_out = &ctx;
+	topts.ctx_size_in = sizeof(ctx);
+	topts.ctx_size_out = sizeof(ctx);
+
+	skel->bss->linear_len = linear_len;
+	skel->bss->pull_len = pull_len;
+
+	prog_fd = bpf_program__fd(skel->progs.xdp_pull_data_prog);
+	err = bpf_prog_test_run_opts(prog_fd, &topts);
+	ASSERT_OK(err, "bpf_prog_test_run_opts");
+	ASSERT_EQ(topts.retval, retval, "xdp_pull_data_prog retval");
+
+	if (retval == XDP_DROP)
+		goto out;
+
+	ASSERT_EQ(ctx.data_end, pull_len, "linear data size");
+	ASSERT_EQ(topts.data_size_out, buf_len, "linear + non-linear data size");
+	/* Make sure data around xdp->data_end was not messed up by
+	 * bpf_xdp_pull_data()
+	 */
+	ASSERT_EQ(buf[1023], 0xaa, "buf[1023]");
+	ASSERT_EQ(buf[1024], 0xbb, "buf[1024]");
+	ASSERT_EQ(buf[1025], 0xcc, "buf[1025]");
+out:
+	free(buf);
+}
+
+static void test_xdp_pull_data_basic(void)
+{
+	struct test_xdp_pull_data *skel;
+	u32 page_size;
+
+	skel = test_xdp_pull_data__open_and_load();
+	if (!ASSERT_OK_PTR(skel, "test_xdp_pull_data__open_and_load"))
+		return;
+
+	page_size = sysconf(_SC_PAGE_SIZE);
+
+	/* linear xdp pkt, pull 0 byte */
+	test_xdp_pull_data_common(skel, 2048, 2048, 2048, XDP_PASS);
+	/* multi-buf pkt, pull results in linear xdp pkt */
+	test_xdp_pull_data_common(skel, 2048, 1024, 2048, XDP_PASS);
+	/* multi-buf pkt, pull 1 byte to linear data area */
+	test_xdp_pull_data_common(skel, 9000, 1024, 1025, XDP_PASS);
+	/* multi-buf pkt, pull 0 byte to linear data area */
+	test_xdp_pull_data_common(skel, 9000, 1025, 1025, XDP_PASS);
+
+	/* linear xdp pkt, pull more than total data len */
+	test_xdp_pull_data_common(skel, 2048, 2048, 2049, XDP_DROP);
+	/* multi-buf pkt with no space left in linear data area.
+	 * Since ctx.data_end (4096) > max_data_sz, bpf_prog_test_run_xdp()
+	 * will fill the whole linear data area and put the reset into a
+	 * fragment.
+	 */
+	test_xdp_pull_data_common(skel, page_size, page_size, page_size, XDP_DROP);
+
+	test_xdp_pull_data__destroy(skel);
+}
+
+void test_xdp_pull_data(void)
+{
+	if (test__start_subtest("xdp_pull_data"))
+		test_xdp_pull_data_basic();
+}
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_pull_data.c b/tools/testing/selftests/bpf/progs/test_xdp_pull_data.c
new file mode 100644
index 000000000000..f32e6b4a79f5
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_xdp_pull_data.c
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include  "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+int _version SEC("version") = 1;
+
+int linear_len;
+int pull_len;
+
+SEC("xdp.frags")
+int xdp_pull_data_prog(struct xdp_md *xdp)
+{
+	__u8 *data_end = (void *)(long)xdp->data_end;
+	__u8 *data = (void *)(long)xdp->data;
+	__u8 *val_p;
+	int err;
+
+	if (linear_len != data_end - data)
+		return XDP_DROP;
+
+	err = bpf_xdp_pull_data(xdp, pull_len, 0);
+	if (err)
+		return XDP_DROP;
+
+	val_p = (void *)(long)xdp->data + 1024;
+	if (val_p + 1 > (void *)(long)xdp->data_end)
+		return XDP_DROP;
+
+	if (*val_p != 0xbb)
+		return XDP_DROP;
+
+	return XDP_PASS;
+}
+
+char _license[] SEC("license") = "GPL";
-- 
2.47.3


  parent reply	other threads:[~2025-09-05 17:34 UTC|newest]

Thread overview: 16+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-09-05 17:33 [PATCH bpf-next v2 0/7] Add kfunc bpf_xdp_pull_data Amery Hung
2025-09-05 17:33 ` [PATCH bpf-next v2 1/7] net/mlx5e: Fix generating skb from nonlinear xdp_buff Amery Hung
2025-09-08 14:41   ` Dragos Tatulea
2025-09-08 17:23     ` Amery Hung
2025-09-05 17:33 ` [PATCH bpf-next v2 2/7] bpf: Allow bpf_xdp_shrink_data to shrink a frag from head and tail Amery Hung
2025-09-05 17:33 ` [PATCH bpf-next v2 3/7] bpf: Support pulling non-linear xdp data Amery Hung
2025-09-08 19:27   ` Martin KaFai Lau
2025-09-08 22:28     ` Amery Hung
2025-09-09  1:54   ` Jakub Kicinski
2025-09-10 15:17     ` Amery Hung
2025-09-10 18:04       ` Jakub Kicinski
2025-09-10 19:11         ` Amery Hung
2025-09-05 17:33 ` [PATCH bpf-next v2 4/7] bpf: Clear packet pointers after changing packet data in kfuncs Amery Hung
2025-09-05 17:33 ` [PATCH bpf-next v2 5/7] bpf: Support specifying linear xdp packet data size in test_run Amery Hung
2025-09-05 17:33 ` Amery Hung [this message]
2025-09-05 17:33 ` [PATCH bpf-next v2 7/7] selftests: drv-net: Pull data before parsing headers Amery Hung

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20250905173352.3759457-7-ameryhung@gmail.com \
    --to=ameryhung@gmail.com \
    --cc=alexei.starovoitov@gmail.com \
    --cc=andrii@kernel.org \
    --cc=bpf@vger.kernel.org \
    --cc=daniel@iogearbox.net \
    --cc=dtatulea@nvidia.com \
    --cc=kernel-team@meta.com \
    --cc=kuba@kernel.org \
    --cc=maciej.fijalkowski@intel.com \
    --cc=martin.lau@kernel.org \
    --cc=mbloch@nvidia.com \
    --cc=mohsin.bashr@gmail.com \
    --cc=netdev@vger.kernel.org \
    --cc=noren@nvidia.com \
    --cc=saeedm@nvidia.com \
    --cc=stfomichev@gmail.com \
    --cc=tariqt@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox