public inbox for netdev@vger.kernel.org
 help / color / mirror / Atom feed
From: Leon Hwang <leon.hwang@linux.dev>
To: bpf@vger.kernel.org
Cc: "Alexei Starovoitov" <ast@kernel.org>,
	"Daniel Borkmann" <daniel@iogearbox.net>,
	"Andrii Nakryiko" <andrii@kernel.org>,
	"Martin KaFai Lau" <martin.lau@linux.dev>,
	"Eduard Zingerman" <eddyz87@gmail.com>,
	"Song Liu" <song@kernel.org>,
	"Yonghong Song" <yonghong.song@linux.dev>,
	"John Fastabend" <john.fastabend@gmail.com>,
	"KP Singh" <kpsingh@kernel.org>,
	"Stanislav Fomichev" <sdf@fomichev.me>,
	"Hao Luo" <haoluo@google.com>, "Jiri Olsa" <jolsa@kernel.org>,
	"Shuah Khan" <shuah@kernel.org>,
	"Feng Yang" <yangfeng@kylinos.cn>,
	"Leon Hwang" <leon.hwang@linux.dev>,
	"Menglong Dong" <menglong8.dong@gmail.com>,
	"Puranjay Mohan" <puranjay@kernel.org>,
	"Björn Töpel" <bjorn@kernel.org>, "Pu Lehui" <pulehui@huawei.com>,
	linux-kernel@vger.kernel.org, linux-kselftest@vger.kernel.org,
	netdev@vger.kernel.org, kernel-patches-bot@fb.com
Subject: [PATCH bpf-next v2 6/6] selftests/bpf: Add tests to verify prog_array map compatibility
Date: Mon,  2 Mar 2026 23:03:42 +0800	[thread overview]
Message-ID: <20260302150342.55709-7-leon.hwang@linux.dev> (raw)
In-Reply-To: <20260302150342.55709-1-leon.hwang@linux.dev>

Add tests to verify the following tail call restrictions:

* !kprobe_write_ctx progs are not compatible with kprobe_write_ctx progs.
* !call_get_func_ip progs are not compatible with call_get_func_ip progs.
* !call_session_cookie progs are not compatible with call_session_cookie
  progs.

For kprobe_write_ctx, call_get_func_ip, and call_session_cookie, a
prog_array map cannot be shared between progs with different values.

Signed-off-by: Leon Hwang <leon.hwang@linux.dev>
---
 .../selftests/bpf/prog_tests/tailcalls.c      | 319 ++++++++++++++++++
 .../bpf/progs/tailcall_map_compatible.c       | 103 ++++++
 2 files changed, 422 insertions(+)
 create mode 100644 tools/testing/selftests/bpf/progs/tailcall_map_compatible.c

diff --git a/tools/testing/selftests/bpf/prog_tests/tailcalls.c b/tools/testing/selftests/bpf/prog_tests/tailcalls.c
index 7d534fde0af9..1063e73ecffa 100644
--- a/tools/testing/selftests/bpf/prog_tests/tailcalls.c
+++ b/tools/testing/selftests/bpf/prog_tests/tailcalls.c
@@ -9,6 +9,7 @@
 #include "tc_bpf2bpf.skel.h"
 #include "tailcall_fail.skel.h"
 #include "tailcall_sleepable.skel.h"
+#include "tailcall_map_compatible.skel.h"
 
 /* test_tailcall_1 checks basic functionality by patching multiple locations
  * in a single program for a single tail call slot with nop->jmp, jmp->nop
@@ -1725,6 +1726,312 @@ static void test_tailcall_sleepable(void)
 	tailcall_sleepable__destroy(skel);
 }
 
+#ifdef __x86_64__
+/* uprobe attach point */
+static noinline int trigger_uprobe_fn(int a)
+{
+	asm volatile ("" : "+r"(a));
+	return a;
+}
+
+static void test_map_compatible_update_kprobe_write_ctx(void)
+{
+	struct bpf_program *dummy, *kprobe, *fsession;
+	struct tailcall_map_compatible *skel;
+	struct bpf_link *link = NULL;
+	int err, prog_fd, key = 0;
+	struct bpf_map *map;
+	LIBBPF_OPTS(bpf_kprobe_opts, kprobe_opts);
+	LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts);
+	LIBBPF_OPTS(bpf_test_run_opts, topts);
+
+	skel = tailcall_map_compatible__open();
+	if (!ASSERT_OK_PTR(skel, "tailcall_map_compatible__open"))
+		return;
+
+	dummy = skel->progs.dummy_kprobe;
+	bpf_program__set_autoload(dummy, true);
+
+	kprobe = skel->progs.kprobe;
+	bpf_program__set_autoload(kprobe, true);
+
+	fsession = skel->progs.fsession_tailcall;
+	bpf_program__set_autoload(fsession, true);
+
+	skel->bss->data = 0xdeadbeef;
+
+	err = tailcall_map_compatible__load(skel);
+	if (!ASSERT_OK(err, "tailcall_map_compatible__load"))
+		goto out;
+
+	prog_fd = bpf_program__fd(kprobe);
+	map = skel->maps.prog_array_dummy;
+	err = bpf_map_update_elem(bpf_map__fd(map), &key, &prog_fd, BPF_ANY);
+	ASSERT_ERR(err, "bpf_map_update_elem kprobe");
+
+	skel->links.dummy_kprobe = bpf_program__attach_kprobe_opts(dummy, "bpf_fentry_test1",
+								   &kprobe_opts);
+	if (!ASSERT_OK_PTR(skel->links.dummy_kprobe, "bpf_program__attach_kprobe_opts"))
+		goto out;
+
+	skel->links.fsession_tailcall = bpf_program__attach_trace(fsession);
+	if (!ASSERT_OK_PTR(skel->links.fsession_tailcall, "bpf_program__attach_trace"))
+		goto out;
+
+	err = bpf_prog_test_run_opts(bpf_program__fd(fsession), &topts);
+	ASSERT_OK(err, "bpf_prog_test_run_opts fsession");
+
+	ASSERT_EQ(topts.retval, 0, "dummy retval");
+	ASSERT_EQ(skel->bss->dummy_run, 1, "dummy_run");
+	ASSERT_EQ(skel->bss->data, 0xdeadbeef, "data");
+
+	err = bpf_map_delete_elem(bpf_map__fd(map), &key);
+	ASSERT_TRUE(!err || err == -ENOENT, "bpf_map_delete_elem");
+
+	uprobe_opts.func_name = "trigger_uprobe_fn";
+	link = bpf_program__attach_uprobe_opts(kprobe, 0, "/proc/self/exe", 0, &uprobe_opts);
+	if (!ASSERT_OK_PTR(link, "bpf_program__attach_uprobe_opts"))
+		goto out;
+
+	prog_fd = bpf_program__fd(dummy);
+	map = skel->maps.prog_array_kprobe;
+	err = bpf_map_update_elem(bpf_map__fd(map), &key, &prog_fd, BPF_ANY);
+	ASSERT_OK(err, "bpf_map_update_elem dummy");
+
+	ASSERT_EQ(trigger_uprobe_fn(1), 0, "trigger_uprobe_fn retval"); /* modified by uprobe */
+
+	ASSERT_EQ(topts.retval, 0, "dummy retval");
+	ASSERT_EQ(skel->bss->dummy_run, 2, "dummy_run");
+	ASSERT_EQ(skel->bss->data, 0, "data");
+
+out:
+	bpf_link__destroy(link);
+	tailcall_map_compatible__destroy(skel);
+}
+#else
+static void test_map_compatible_update_kprobe_write_ctx(void)
+{
+	test__skip();
+}
+#endif
+
+static void test_map_compatible_update_get_func_ip(void)
+{
+	struct tailcall_map_compatible *skel;
+	struct bpf_program *dummy, *fentry;
+	struct bpf_link *link = NULL;
+	int err, prog_fd, key = 0;
+	struct bpf_map *map;
+	__u64 func_ip;
+	LIBBPF_OPTS(bpf_test_run_opts, topts);
+
+	skel = tailcall_map_compatible__open();
+	if (!ASSERT_OK_PTR(skel, "tailcall_map_compatible__open"))
+		return;
+
+	dummy = skel->progs.dummy_fentry;
+	bpf_program__set_autoload(dummy, true);
+
+	fentry = skel->progs.fentry;
+	bpf_program__set_autoload(fentry, true);
+
+	err = tailcall_map_compatible__load(skel);
+	if (!ASSERT_OK(err, "tailcall_map_compatible__load"))
+		goto out;
+
+	link = bpf_program__attach_trace(fentry);
+	if (!ASSERT_OK_PTR(link, "bpf_program__attach_trace fentry"))
+		goto out;
+
+	err = bpf_prog_test_run_opts(bpf_program__fd(fentry), &topts);
+	if (!ASSERT_OK(err, "bpf_prog_test_run_opts fentry"))
+		goto out;
+
+	ASSERT_EQ(topts.retval, 0, "fentry retval");
+	ASSERT_EQ(skel->bss->dummy_run, 0, "dummy_run");
+	ASSERT_NEQ(skel->bss->data, 0, "data");
+	func_ip = skel->bss->data;
+
+	skel->bss->data = 0xdeadbeef;
+
+	err = bpf_link__destroy(link);
+	link = NULL;
+	if (!ASSERT_OK(err, "bpf_link__destroy"))
+		goto out;
+
+	prog_fd = bpf_program__fd(fentry);
+	map = skel->maps.prog_array_dummy;
+	err = bpf_map_update_elem(bpf_map__fd(map), &key, &prog_fd, BPF_ANY);
+	ASSERT_ERR(err, "bpf_map_update_elem fentry");
+
+	link = bpf_program__attach_trace(dummy);
+	if (!ASSERT_OK_PTR(link, "bpf_program__attach_trace dummy"))
+		goto out;
+
+	err = bpf_prog_test_run_opts(bpf_program__fd(dummy), &topts);
+	if (!ASSERT_OK(err, "bpf_prog_test_run_opts dummy"))
+		goto out;
+
+	ASSERT_EQ(topts.retval, 0, "dummy retval");
+	ASSERT_EQ(skel->bss->dummy_run, 1, "dummy_run");
+	ASSERT_EQ(skel->bss->data, 0xdeadbeef, "data");
+	ASSERT_NEQ(skel->bss->data, func_ip, "data func_ip");
+
+	err = bpf_link__destroy(link);
+	link = NULL;
+	if (!ASSERT_OK(err, "bpf_link__destroy"))
+		goto out;
+
+	err = bpf_map_delete_elem(bpf_map__fd(map), &key);
+	ASSERT_TRUE(!err || err == -ENOENT, "bpf_map_delete_elem");
+
+	prog_fd = bpf_program__fd(dummy);
+	map = skel->maps.prog_array_tracing;
+	err = bpf_map_update_elem(bpf_map__fd(map), &key, &prog_fd, BPF_ANY);
+	ASSERT_OK(err, "bpf_map_update_elem dummy");
+
+	link = bpf_program__attach_trace(fentry);
+	if (!ASSERT_OK_PTR(link, "bpf_program__attach_trace fentry"))
+		goto out;
+
+	err = bpf_prog_test_run_opts(bpf_program__fd(fentry), &topts);
+	if (!ASSERT_OK(err, "bpf_prog_test_run_opts fentry"))
+		goto out;
+
+	ASSERT_EQ(topts.retval, 0, "fentry retval");
+	ASSERT_EQ(skel->bss->dummy_run, 2, "dummy_run");
+	ASSERT_EQ(skel->bss->data, func_ip, "data");
+
+out:
+	bpf_link__destroy(link);
+	tailcall_map_compatible__destroy(skel);
+}
+
+static void test_map_compatible_update_session_cookie(void)
+{
+	struct tailcall_map_compatible *skel;
+	struct bpf_program *dummy, *fsession;
+	struct bpf_link *link = NULL;
+	int err, prog_fd, key = 0;
+	struct bpf_map *map;
+	LIBBPF_OPTS(bpf_test_run_opts, topts);
+
+	skel = tailcall_map_compatible__open();
+	if (!ASSERT_OK_PTR(skel, "tailcall_map_compatible__open"))
+		return;
+
+	dummy = skel->progs.dummy_fsession;
+	bpf_program__set_autoload(dummy, true);
+
+	fsession = skel->progs.fsession_cookie;
+	bpf_program__set_autoload(fsession, true);
+
+	skel->bss->data = 0xdeadbeef;
+
+	err = tailcall_map_compatible__load(skel);
+	if (err == -EOPNOTSUPP) {
+		test__skip();
+		goto out;
+	}
+	if (!ASSERT_OK(err, "tailcall_map_compatible__load"))
+		goto out;
+
+	prog_fd = bpf_program__fd(fsession);
+	map = skel->maps.prog_array_dummy;
+	err = bpf_map_update_elem(bpf_map__fd(map), &key, &prog_fd, BPF_ANY);
+	ASSERT_ERR(err, "bpf_map_update_elem fsession");
+
+	link = bpf_program__attach_trace(dummy);
+	if (!ASSERT_OK_PTR(link, "bpf_program__attach_trace dummy"))
+		goto out;
+
+	err = bpf_prog_test_run_opts(bpf_program__fd(dummy), &topts);
+	ASSERT_OK(err, "bpf_prog_test_run_opts dummy");
+
+	ASSERT_EQ(topts.retval, 0, "dummy retval");
+	ASSERT_EQ(skel->bss->dummy_run, 2, "dummy_run");
+	ASSERT_EQ(skel->bss->data, 0xdeadbeef, "data");
+
+	err = bpf_link__destroy(link);
+	link = NULL;
+	if (!ASSERT_OK(err, "bpf_link__destroy"))
+		goto out;
+
+	err = bpf_map_delete_elem(bpf_map__fd(map), &key);
+	ASSERT_TRUE(!err || err == -ENOENT, "bpf_map_delete_elem");
+
+	prog_fd = bpf_program__fd(dummy);
+	map = skel->maps.prog_array_tracing;
+	err = bpf_map_update_elem(bpf_map__fd(map), &key, &prog_fd, BPF_ANY);
+	ASSERT_OK(err, "bpf_map_update_elem dummy");
+
+	link = bpf_program__attach_trace(fsession);
+	if (!ASSERT_OK_PTR(link, "bpf_program__attach_trace fsession"))
+		goto out;
+
+	err = bpf_prog_test_run_opts(bpf_program__fd(fsession), &topts);
+	if (!ASSERT_OK(err, "bpf_prog_test_run_opts fsession"))
+		goto out;
+
+	ASSERT_EQ(topts.retval, 0, "fsession retval");
+	ASSERT_EQ(skel->bss->dummy_run, 4, "dummy_run");
+	ASSERT_EQ(skel->bss->data, 0, "data");
+
+out:
+	bpf_link__destroy(link);
+	tailcall_map_compatible__destroy(skel);
+}
+
+static void test_map_compatible_init(const char *prog1, const char *prog2)
+{
+	struct tailcall_map_compatible *skel;
+	struct bpf_program *p1, *p2;
+	int err;
+
+	skel = tailcall_map_compatible__open();
+	if (!ASSERT_OK_PTR(skel, "tailcall_map_compatible__open"))
+		return;
+
+	p1 = bpf_object__find_program_by_name(skel->obj, prog1);
+	if (!ASSERT_OK_PTR(p1, "bpf_object__find_program_by_name prog1"))
+		goto out;
+	bpf_program__set_autoload(p1, true);
+
+	p2 = bpf_object__find_program_by_name(skel->obj, prog2);
+	if (!ASSERT_OK_PTR(p2, "bpf_object__find_program_by_name prog2"))
+		goto out;
+	bpf_program__set_autoload(p2, true);
+
+	err = tailcall_map_compatible__load(skel);
+	if (err == -EOPNOTSUPP) {
+		test__skip();
+		goto out;
+	}
+	ASSERT_ERR(err, "tailcall_map_compatible__load");
+
+out:
+	tailcall_map_compatible__destroy(skel);
+}
+
+static void test_map_compatible_init_kprobe_write_ctx(void)
+{
+#ifdef __x86_64__
+	test_map_compatible_init("kprobe", "kprobe_tailcall");
+#else
+	test__skip();
+#endif
+}
+
+static void test_map_compatible_init_call_get_func_ip(void)
+{
+	test_map_compatible_init("fentry", "fentry_tailcall");
+}
+
+static void test_map_compatible_init_call_session_cookie(void)
+{
+	test_map_compatible_init("fsession_cookie", "fsession_tailcall");
+}
+
 void test_tailcalls(void)
 {
 	if (test__start_subtest("tailcall_1"))
@@ -1781,4 +2088,16 @@ void test_tailcalls(void)
 		test_tailcall_failure();
 	if (test__start_subtest("tailcall_sleepable"))
 		test_tailcall_sleepable();
+	if (test__start_subtest("map_compatible/update/kprobe_write_ctx"))
+		test_map_compatible_update_kprobe_write_ctx();
+	if (test__start_subtest("map_compatible/update/get_func_ip"))
+		test_map_compatible_update_get_func_ip();
+	if (test__start_subtest("map_compatible/update/session_cookie"))
+		test_map_compatible_update_session_cookie();
+	if (test__start_subtest("map_compatible/init/kprobe_write_ctx"))
+		test_map_compatible_init_kprobe_write_ctx();
+	if (test__start_subtest("map_compatible/init/call_get_func_ip"))
+		test_map_compatible_init_call_get_func_ip();
+	if (test__start_subtest("map_compatible/init/call_session_cookie"))
+		test_map_compatible_init_call_session_cookie();
 }
diff --git a/tools/testing/selftests/bpf/progs/tailcall_map_compatible.c b/tools/testing/selftests/bpf/progs/tailcall_map_compatible.c
new file mode 100644
index 000000000000..991b799c89ac
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tailcall_map_compatible.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+int dummy_run;
+u64 data;
+
+struct {
+	__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+	__uint(max_entries, 1);
+	__uint(key_size, sizeof(__u32));
+	__uint(value_size, sizeof(__u32));
+} prog_array_dummy SEC(".maps");
+
+#if defined(__TARGET_ARCH_x86)
+SEC("?kprobe")
+int dummy_kprobe(void *ctx)
+{
+	dummy_run++;
+	bpf_tail_call_static(ctx, &prog_array_dummy, 0);
+	return 0;
+}
+
+struct {
+	__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+	__uint(max_entries, 1);
+	__uint(key_size, sizeof(__u32));
+	__uint(value_size, sizeof(__u32));
+} prog_array_kprobe SEC(".maps");
+
+SEC("?kprobe")
+int kprobe(struct pt_regs *regs)
+{
+	data = regs->di = 0;
+	bpf_tail_call_static(regs, &prog_array_kprobe, 0);
+	return 0;
+}
+
+SEC("?kprobe")
+int kprobe_tailcall(struct pt_regs *regs)
+{
+	bpf_tail_call_static(regs, &prog_array_kprobe, 0);
+	return 0;
+}
+#endif
+
+SEC("?fentry/bpf_fentry_test1")
+int dummy_fentry(void *ctx)
+{
+	dummy_run++;
+	bpf_tail_call_static(ctx, &prog_array_dummy, 0);
+	return 0;
+}
+
+struct {
+	__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+	__uint(max_entries, 1);
+	__uint(key_size, sizeof(__u32));
+	__uint(value_size, sizeof(__u32));
+} prog_array_tracing SEC(".maps");
+
+SEC("?fentry/bpf_fentry_test1")
+int BPF_PROG(fentry)
+{
+	data = bpf_get_func_ip(ctx);
+	bpf_tail_call_static(ctx, &prog_array_tracing, 0);
+	return 0;
+}
+
+SEC("?fentry/bpf_fentry_test1")
+int BPF_PROG(fentry_tailcall)
+{
+	bpf_tail_call_static(ctx, &prog_array_tracing, 0);
+	return 0;
+}
+
+SEC("?fsession/bpf_fentry_test2")
+int dummy_fsession(void *ctx)
+{
+	dummy_run++;
+	bpf_tail_call_static(ctx, &prog_array_dummy, 0);
+	return 0;
+}
+
+SEC("?fsession/bpf_fentry_test2")
+int BPF_PROG(fsession_cookie)
+{
+	u64 *cookie = bpf_session_cookie(ctx);
+
+	data = *cookie = 0;
+	bpf_tail_call_static(ctx, &prog_array_tracing, 0);
+	return 0;
+}
+
+SEC("?fsession/bpf_fentry_test2")
+int BPF_PROG(fsession_tailcall)
+{
+	bpf_tail_call_static(ctx, &prog_array_tracing, 0);
+	return 0;
+}
-- 
2.52.0


      parent reply	other threads:[~2026-03-02 15:05 UTC|newest]

Thread overview: 11+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-03-02 15:03 [PATCH bpf-next v2 0/6] bpf: Enhance __bpf_prog_map_compatible() Leon Hwang
2026-03-02 15:03 ` [PATCH bpf-next v2 1/6] bpf: Add fsession to verbose log in check_get_func_ip() Leon Hwang
2026-03-02 15:03 ` [PATCH bpf-next v2 2/6] bpf: Factor out bpf_map_owner_[init,matches]() helpers Leon Hwang
2026-03-02 15:03 ` [PATCH bpf-next v2 3/6] bpf: Disallow !kprobe_write_ctx progs tail-calling kprobe_write_ctx progs Leon Hwang
2026-03-02 15:53   ` bot+bpf-ci
2026-03-03  1:44     ` Leon Hwang
2026-03-02 15:03 ` [PATCH bpf-next v2 4/6] bpf: Disallow !call_get_func_ip progs tail-calling call_get_func_ip progs Leon Hwang
2026-03-02 15:53   ` bot+bpf-ci
2026-03-03  1:47     ` Leon Hwang
2026-03-02 15:03 ` [PATCH bpf-next v2 5/6] bpf: Disallow !call_session_cookie progs tail-calling call_session_cookie progs Leon Hwang
2026-03-02 15:03 ` Leon Hwang [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20260302150342.55709-7-leon.hwang@linux.dev \
    --to=leon.hwang@linux.dev \
    --cc=andrii@kernel.org \
    --cc=ast@kernel.org \
    --cc=bjorn@kernel.org \
    --cc=bpf@vger.kernel.org \
    --cc=daniel@iogearbox.net \
    --cc=eddyz87@gmail.com \
    --cc=haoluo@google.com \
    --cc=john.fastabend@gmail.com \
    --cc=jolsa@kernel.org \
    --cc=kernel-patches-bot@fb.com \
    --cc=kpsingh@kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-kselftest@vger.kernel.org \
    --cc=martin.lau@linux.dev \
    --cc=menglong8.dong@gmail.com \
    --cc=netdev@vger.kernel.org \
    --cc=pulehui@huawei.com \
    --cc=puranjay@kernel.org \
    --cc=sdf@fomichev.me \
    --cc=shuah@kernel.org \
    --cc=song@kernel.org \
    --cc=yangfeng@kylinos.cn \
    --cc=yonghong.song@linux.dev \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox