* [PATCH bpf-next] selftests/bpf: Add stacktrace test for kprobe multi
@ 2025-09-25 11:51 Jiri Olsa
2025-09-25 23:26 ` Andrii Nakryiko
0 siblings, 1 reply; 3+ messages in thread
From: Jiri Olsa @ 2025-09-25 11:51 UTC (permalink / raw)
To: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko
Cc: Feng Yang, bpf, linux-perf-users, Martin KaFai Lau,
Eduard Zingerman, Song Liu, Yonghong Song, John Fastabend,
Hao Luo
Adding stacktrace test for kprobe multi probe.
Cc: Feng Yang <yangfeng59949@163.com>
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
---
test for arm64 fix posted separately in here:
https://lore.kernel.org/bpf/20250925020822.119302-1-yangfeng59949@163.com/
.../selftests/bpf/prog_tests/stacktrace_map.c | 107 +++++++++++++-----
.../selftests/bpf/progs/test_stacktrace_map.c | 28 ++++-
2 files changed, 106 insertions(+), 29 deletions(-)
diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_map.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_map.c
index 84a7e405e912..922224adc86b 100644
--- a/tools/testing/selftests/bpf/prog_tests/stacktrace_map.c
+++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_map.c
@@ -1,13 +1,44 @@
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
+#include "test_stacktrace_map.skel.h"
-void test_stacktrace_map(void)
+static void check_stackmap(int control_map_fd, int stackid_hmap_fd,
+ int stackmap_fd, int stack_amap_fd)
+{
+ __u32 key, val, duration = 0;
+ int err, stack_trace_len;
+
+ /* disable stack trace collection */
+ key = 0;
+ val = 1;
+ bpf_map_update_elem(control_map_fd, &key, &val, 0);
+
+ /* for every element in stackid_hmap, we can find a corresponding one
+ * in stackmap, and vice versa.
+ */
+ err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
+ if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
+ "err %d errno %d\n", err, errno))
+ return;
+
+ err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
+ if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
+ "err %d errno %d\n", err, errno))
+ return;
+
+ stack_trace_len = PERF_MAX_STACK_DEPTH * sizeof(__u64);
+ err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len);
+ CHECK(err, "compare_stack_ips stackmap vs. stack_amap",
+ "err %d errno %d\n", err, errno);
+}
+
+static void test_stacktrace_map_tp(void)
{
int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
const char *prog_name = "oncpu";
- int err, prog_fd, stack_trace_len;
+ int err, prog_fd;
const char *file = "./test_stacktrace_map.bpf.o";
- __u32 key, val, duration = 0;
+ __u32 duration = 0;
struct bpf_program *prog;
struct bpf_object *obj;
struct bpf_link *link;
@@ -44,32 +75,56 @@ void test_stacktrace_map(void)
/* give some time for bpf program run */
sleep(1);
- /* disable stack trace collection */
- key = 0;
- val = 1;
- bpf_map_update_elem(control_map_fd, &key, &val, 0);
-
- /* for every element in stackid_hmap, we can find a corresponding one
- * in stackmap, and vice versa.
- */
- err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
- if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
- "err %d errno %d\n", err, errno))
- goto disable_pmu;
-
- err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
- if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
- "err %d errno %d\n", err, errno))
- goto disable_pmu;
-
- stack_trace_len = PERF_MAX_STACK_DEPTH * sizeof(__u64);
- err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len);
- if (CHECK(err, "compare_stack_ips stackmap vs. stack_amap",
- "err %d errno %d\n", err, errno))
- goto disable_pmu;
+ check_stackmap(control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd);
disable_pmu:
bpf_link__destroy(link);
close_prog:
bpf_object__close(obj);
}
+
+static void test_stacktrace_map_kprobe_multi(bool retprobe)
+{
+ int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
+ LIBBPF_OPTS(bpf_kprobe_multi_opts, opts,
+ .retprobe = retprobe
+ );
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+ struct test_stacktrace_map *skel;
+ struct bpf_link *link;
+ int prog_fd, err;
+
+ skel = test_stacktrace_map__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "test_stacktrace_map__open_and_load"))
+ return;
+
+ link = bpf_program__attach_kprobe_multi_opts(skel->progs.kprobe,
+ "bpf_fentry_test1", &opts);
+ if (!ASSERT_OK_PTR(link, "bpf_program__attach_kprobe_multi_opts"))
+ goto cleanup;
+
+ prog_fd = bpf_program__fd(skel->progs.trigger);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "test_run");
+ ASSERT_EQ(topts.retval, 0, "test_run");
+
+ control_map_fd = bpf_map__fd(skel->maps.control_map);
+ stackid_hmap_fd = bpf_map__fd(skel->maps.stackid_hmap);
+ stackmap_fd = bpf_map__fd(skel->maps.stackmap);
+ stack_amap_fd = bpf_map__fd(skel->maps.stack_amap);
+
+ check_stackmap(control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd);
+
+cleanup:
+ test_stacktrace_map__destroy(skel);
+}
+
+void test_stacktrace_map(void)
+{
+ if (test__start_subtest("tp"))
+ test_stacktrace_map_tp();
+ if (test__start_subtest("kprobe_multi"))
+ test_stacktrace_map_kprobe_multi(false);
+ if (test__start_subtest("kretprobe_multi"))
+ test_stacktrace_map_kprobe_multi(true);
+}
diff --git a/tools/testing/selftests/bpf/progs/test_stacktrace_map.c b/tools/testing/selftests/bpf/progs/test_stacktrace_map.c
index 47568007b668..7a27e162a407 100644
--- a/tools/testing/selftests/bpf/progs/test_stacktrace_map.c
+++ b/tools/testing/selftests/bpf/progs/test_stacktrace_map.c
@@ -3,6 +3,7 @@
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
#ifndef PERF_MAX_STACK_DEPTH
#define PERF_MAX_STACK_DEPTH 127
@@ -50,8 +51,7 @@ struct sched_switch_args {
int next_prio;
};
-SEC("tracepoint/sched/sched_switch")
-int oncpu(struct sched_switch_args *ctx)
+static inline void test_stackmap(void *ctx)
{
__u32 max_len = PERF_MAX_STACK_DEPTH * sizeof(__u64);
__u32 key = 0, val = 0, *value_p;
@@ -59,7 +59,7 @@ int oncpu(struct sched_switch_args *ctx)
value_p = bpf_map_lookup_elem(&control_map, &key);
if (value_p && *value_p)
- return 0; /* skip if non-zero *value_p */
+ return; /* skip if non-zero *value_p */
/* The size of stackmap and stackid_hmap should be the same */
key = bpf_get_stackid(ctx, &stackmap, 0);
@@ -69,7 +69,29 @@ int oncpu(struct sched_switch_args *ctx)
if (stack_p)
bpf_get_stack(ctx, stack_p, max_len, 0);
}
+}
+
+SEC("tracepoint/sched/sched_switch")
+int oncpu(struct sched_switch_args *ctx)
+{
+ test_stackmap(ctx);
+ return 0;
+}
+/*
+ * No tests in here, just to trigger 'bpf_fentry_test*'
+ * through tracing test_run.
+ */
+SEC("fentry/bpf_modify_return_test")
+int BPF_PROG(trigger)
+{
+ return 0;
+}
+
+SEC("kprobe.multi")
+int kprobe(struct pt_regs *ctx)
+{
+ test_stackmap(ctx);
return 0;
}
--
2.51.0
^ permalink raw reply related [flat|nested] 3+ messages in thread
* Re: [PATCH bpf-next] selftests/bpf: Add stacktrace test for kprobe multi
2025-09-25 11:51 [PATCH bpf-next] selftests/bpf: Add stacktrace test for kprobe multi Jiri Olsa
@ 2025-09-25 23:26 ` Andrii Nakryiko
2025-09-26 10:54 ` Jiri Olsa
0 siblings, 1 reply; 3+ messages in thread
From: Andrii Nakryiko @ 2025-09-25 23:26 UTC (permalink / raw)
To: Jiri Olsa
Cc: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko, Feng Yang,
bpf, linux-perf-users, Martin KaFai Lau, Eduard Zingerman,
Song Liu, Yonghong Song, John Fastabend, Hao Luo
On Thu, Sep 25, 2025 at 4:51 AM Jiri Olsa <jolsa@kernel.org> wrote:
>
> Adding stacktrace test for kprobe multi probe.
>
> Cc: Feng Yang <yangfeng59949@163.com>
> Signed-off-by: Jiri Olsa <jolsa@kernel.org>
> ---
> test for arm64 fix posted separately in here:
> https://lore.kernel.org/bpf/20250925020822.119302-1-yangfeng59949@163.com/
>
> .../selftests/bpf/prog_tests/stacktrace_map.c | 107 +++++++++++++-----
> .../selftests/bpf/progs/test_stacktrace_map.c | 28 ++++-
> 2 files changed, 106 insertions(+), 29 deletions(-)
>
> diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_map.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_map.c
> index 84a7e405e912..922224adc86b 100644
> --- a/tools/testing/selftests/bpf/prog_tests/stacktrace_map.c
> +++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_map.c
> @@ -1,13 +1,44 @@
> // SPDX-License-Identifier: GPL-2.0
> #include <test_progs.h>
> +#include "test_stacktrace_map.skel.h"
>
Tao just refactored this to skeleton, so please rebase and adjust accordingly
pw-bot: cr
> -void test_stacktrace_map(void)
> +static void check_stackmap(int control_map_fd, int stackid_hmap_fd,
> + int stackmap_fd, int stack_amap_fd)
> +{
> + __u32 key, val, duration = 0;
> + int err, stack_trace_len;
> +
> + /* disable stack trace collection */
> + key = 0;
> + val = 1;
> + bpf_map_update_elem(control_map_fd, &key, &val, 0);
> +
> + /* for every element in stackid_hmap, we can find a corresponding one
> + * in stackmap, and vice versa.
> + */
> + err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
> + if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
> + "err %d errno %d\n", err, errno))
> + return;
> +
> + err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
> + if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
> + "err %d errno %d\n", err, errno))
> + return;
> +
> + stack_trace_len = PERF_MAX_STACK_DEPTH * sizeof(__u64);
> + err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len);
> + CHECK(err, "compare_stack_ips stackmap vs. stack_amap",
> + "err %d errno %d\n", err, errno);
> +}
> +
> +static void test_stacktrace_map_tp(void)
> {
> int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
> const char *prog_name = "oncpu";
> - int err, prog_fd, stack_trace_len;
> + int err, prog_fd;
> const char *file = "./test_stacktrace_map.bpf.o";
> - __u32 key, val, duration = 0;
> + __u32 duration = 0;
> struct bpf_program *prog;
> struct bpf_object *obj;
> struct bpf_link *link;
> @@ -44,32 +75,56 @@ void test_stacktrace_map(void)
> /* give some time for bpf program run */
> sleep(1);
>
> - /* disable stack trace collection */
> - key = 0;
> - val = 1;
> - bpf_map_update_elem(control_map_fd, &key, &val, 0);
> -
> - /* for every element in stackid_hmap, we can find a corresponding one
> - * in stackmap, and vice versa.
> - */
> - err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
> - if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
> - "err %d errno %d\n", err, errno))
> - goto disable_pmu;
> -
> - err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
> - if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
> - "err %d errno %d\n", err, errno))
> - goto disable_pmu;
> -
> - stack_trace_len = PERF_MAX_STACK_DEPTH * sizeof(__u64);
> - err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len);
> - if (CHECK(err, "compare_stack_ips stackmap vs. stack_amap",
> - "err %d errno %d\n", err, errno))
> - goto disable_pmu;
> + check_stackmap(control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd);
>
> disable_pmu:
> bpf_link__destroy(link);
> close_prog:
> bpf_object__close(obj);
> }
> +
> +static void test_stacktrace_map_kprobe_multi(bool retprobe)
> +{
> + int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
> + LIBBPF_OPTS(bpf_kprobe_multi_opts, opts,
> + .retprobe = retprobe
> + );
> + LIBBPF_OPTS(bpf_test_run_opts, topts);
> + struct test_stacktrace_map *skel;
> + struct bpf_link *link;
> + int prog_fd, err;
> +
> + skel = test_stacktrace_map__open_and_load();
> + if (!ASSERT_OK_PTR(skel, "test_stacktrace_map__open_and_load"))
> + return;
> +
> + link = bpf_program__attach_kprobe_multi_opts(skel->progs.kprobe,
> + "bpf_fentry_test1", &opts);
> + if (!ASSERT_OK_PTR(link, "bpf_program__attach_kprobe_multi_opts"))
> + goto cleanup;
> +
> + prog_fd = bpf_program__fd(skel->progs.trigger);
> + err = bpf_prog_test_run_opts(prog_fd, &topts);
> + ASSERT_OK(err, "test_run");
> + ASSERT_EQ(topts.retval, 0, "test_run");
> +
> + control_map_fd = bpf_map__fd(skel->maps.control_map);
> + stackid_hmap_fd = bpf_map__fd(skel->maps.stackid_hmap);
> + stackmap_fd = bpf_map__fd(skel->maps.stackmap);
> + stack_amap_fd = bpf_map__fd(skel->maps.stack_amap);
> +
> + check_stackmap(control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd);
> +
> +cleanup:
> + test_stacktrace_map__destroy(skel);
> +}
> +
> +void test_stacktrace_map(void)
> +{
> + if (test__start_subtest("tp"))
> + test_stacktrace_map_tp();
> + if (test__start_subtest("kprobe_multi"))
> + test_stacktrace_map_kprobe_multi(false);
> + if (test__start_subtest("kretprobe_multi"))
> + test_stacktrace_map_kprobe_multi(true);
> +}
> diff --git a/tools/testing/selftests/bpf/progs/test_stacktrace_map.c b/tools/testing/selftests/bpf/progs/test_stacktrace_map.c
> index 47568007b668..7a27e162a407 100644
> --- a/tools/testing/selftests/bpf/progs/test_stacktrace_map.c
> +++ b/tools/testing/selftests/bpf/progs/test_stacktrace_map.c
> @@ -3,6 +3,7 @@
>
> #include <vmlinux.h>
> #include <bpf/bpf_helpers.h>
> +#include <bpf/bpf_tracing.h>
>
> #ifndef PERF_MAX_STACK_DEPTH
> #define PERF_MAX_STACK_DEPTH 127
> @@ -50,8 +51,7 @@ struct sched_switch_args {
> int next_prio;
> };
>
> -SEC("tracepoint/sched/sched_switch")
> -int oncpu(struct sched_switch_args *ctx)
> +static inline void test_stackmap(void *ctx)
> {
> __u32 max_len = PERF_MAX_STACK_DEPTH * sizeof(__u64);
> __u32 key = 0, val = 0, *value_p;
> @@ -59,7 +59,7 @@ int oncpu(struct sched_switch_args *ctx)
>
> value_p = bpf_map_lookup_elem(&control_map, &key);
> if (value_p && *value_p)
> - return 0; /* skip if non-zero *value_p */
> + return; /* skip if non-zero *value_p */
>
> /* The size of stackmap and stackid_hmap should be the same */
> key = bpf_get_stackid(ctx, &stackmap, 0);
> @@ -69,7 +69,29 @@ int oncpu(struct sched_switch_args *ctx)
> if (stack_p)
> bpf_get_stack(ctx, stack_p, max_len, 0);
> }
> +}
> +
> +SEC("tracepoint/sched/sched_switch")
> +int oncpu(struct sched_switch_args *ctx)
> +{
> + test_stackmap(ctx);
> + return 0;
> +}
>
> +/*
> + * No tests in here, just to trigger 'bpf_fentry_test*'
> + * through tracing test_run.
> + */
> +SEC("fentry/bpf_modify_return_test")
> +int BPF_PROG(trigger)
> +{
> + return 0;
> +}
> +
> +SEC("kprobe.multi")
> +int kprobe(struct pt_regs *ctx)
> +{
> + test_stackmap(ctx);
> return 0;
> }
>
Can you please expland the set of program types you are testing:
kprobe/kretprobe, kprobe.multi/kretprobe.multi, fentry/fexit/fmod_ret,
maybe even uprobe/uretprobe?
Also, for check_stack_ips(), can we make sure that we look for
expected >1 IPs there? We had (still have?) issues where we'd get
(successfully) stack trace with just single (but valid!) entry
corresponding to traced function, but nothing beyond that. Which
clearly is broken. So it would be good to be able to detect this going
forward.
> --
> 2.51.0
>
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [PATCH bpf-next] selftests/bpf: Add stacktrace test for kprobe multi
2025-09-25 23:26 ` Andrii Nakryiko
@ 2025-09-26 10:54 ` Jiri Olsa
0 siblings, 0 replies; 3+ messages in thread
From: Jiri Olsa @ 2025-09-26 10:54 UTC (permalink / raw)
To: Andrii Nakryiko
Cc: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko, Feng Yang,
bpf, linux-perf-users, Martin KaFai Lau, Eduard Zingerman,
Song Liu, Yonghong Song, John Fastabend, Hao Luo
On Thu, Sep 25, 2025 at 04:26:08PM -0700, Andrii Nakryiko wrote:
> On Thu, Sep 25, 2025 at 4:51 AM Jiri Olsa <jolsa@kernel.org> wrote:
> >
> > Adding stacktrace test for kprobe multi probe.
> >
> > Cc: Feng Yang <yangfeng59949@163.com>
> > Signed-off-by: Jiri Olsa <jolsa@kernel.org>
> > ---
> > test for arm64 fix posted separately in here:
> > https://lore.kernel.org/bpf/20250925020822.119302-1-yangfeng59949@163.com/
> >
> > .../selftests/bpf/prog_tests/stacktrace_map.c | 107 +++++++++++++-----
> > .../selftests/bpf/progs/test_stacktrace_map.c | 28 ++++-
> > 2 files changed, 106 insertions(+), 29 deletions(-)
> >
> > diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_map.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_map.c
> > index 84a7e405e912..922224adc86b 100644
> > --- a/tools/testing/selftests/bpf/prog_tests/stacktrace_map.c
> > +++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_map.c
> > @@ -1,13 +1,44 @@
> > // SPDX-License-Identifier: GPL-2.0
> > #include <test_progs.h>
> > +#include "test_stacktrace_map.skel.h"
> >
>
> Tao just refactored this to skeleton, so please rebase and adjust accordingly
ok
>
> pw-bot: cr
>
>
> > -void test_stacktrace_map(void)
> > +static void check_stackmap(int control_map_fd, int stackid_hmap_fd,
> > + int stackmap_fd, int stack_amap_fd)
> > +{
> > + __u32 key, val, duration = 0;
> > + int err, stack_trace_len;
> > +
> > + /* disable stack trace collection */
> > + key = 0;
> > + val = 1;
> > + bpf_map_update_elem(control_map_fd, &key, &val, 0);
> > +
> > + /* for every element in stackid_hmap, we can find a corresponding one
> > + * in stackmap, and vice versa.
> > + */
> > + err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
> > + if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
> > + "err %d errno %d\n", err, errno))
> > + return;
> > +
> > + err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
> > + if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
> > + "err %d errno %d\n", err, errno))
> > + return;
> > +
> > + stack_trace_len = PERF_MAX_STACK_DEPTH * sizeof(__u64);
> > + err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len);
> > + CHECK(err, "compare_stack_ips stackmap vs. stack_amap",
> > + "err %d errno %d\n", err, errno);
> > +}
> > +
> > +static void test_stacktrace_map_tp(void)
> > {
> > int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
> > const char *prog_name = "oncpu";
> > - int err, prog_fd, stack_trace_len;
> > + int err, prog_fd;
> > const char *file = "./test_stacktrace_map.bpf.o";
> > - __u32 key, val, duration = 0;
> > + __u32 duration = 0;
> > struct bpf_program *prog;
> > struct bpf_object *obj;
> > struct bpf_link *link;
> > @@ -44,32 +75,56 @@ void test_stacktrace_map(void)
> > /* give some time for bpf program run */
> > sleep(1);
> >
> > - /* disable stack trace collection */
> > - key = 0;
> > - val = 1;
> > - bpf_map_update_elem(control_map_fd, &key, &val, 0);
> > -
> > - /* for every element in stackid_hmap, we can find a corresponding one
> > - * in stackmap, and vice versa.
> > - */
> > - err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
> > - if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
> > - "err %d errno %d\n", err, errno))
> > - goto disable_pmu;
> > -
> > - err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
> > - if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
> > - "err %d errno %d\n", err, errno))
> > - goto disable_pmu;
> > -
> > - stack_trace_len = PERF_MAX_STACK_DEPTH * sizeof(__u64);
> > - err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len);
> > - if (CHECK(err, "compare_stack_ips stackmap vs. stack_amap",
> > - "err %d errno %d\n", err, errno))
> > - goto disable_pmu;
> > + check_stackmap(control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd);
> >
> > disable_pmu:
> > bpf_link__destroy(link);
> > close_prog:
> > bpf_object__close(obj);
> > }
> > +
> > +static void test_stacktrace_map_kprobe_multi(bool retprobe)
> > +{
> > + int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
> > + LIBBPF_OPTS(bpf_kprobe_multi_opts, opts,
> > + .retprobe = retprobe
> > + );
> > + LIBBPF_OPTS(bpf_test_run_opts, topts);
> > + struct test_stacktrace_map *skel;
> > + struct bpf_link *link;
> > + int prog_fd, err;
> > +
> > + skel = test_stacktrace_map__open_and_load();
> > + if (!ASSERT_OK_PTR(skel, "test_stacktrace_map__open_and_load"))
> > + return;
> > +
> > + link = bpf_program__attach_kprobe_multi_opts(skel->progs.kprobe,
> > + "bpf_fentry_test1", &opts);
> > + if (!ASSERT_OK_PTR(link, "bpf_program__attach_kprobe_multi_opts"))
> > + goto cleanup;
> > +
> > + prog_fd = bpf_program__fd(skel->progs.trigger);
> > + err = bpf_prog_test_run_opts(prog_fd, &topts);
> > + ASSERT_OK(err, "test_run");
> > + ASSERT_EQ(topts.retval, 0, "test_run");
> > +
> > + control_map_fd = bpf_map__fd(skel->maps.control_map);
> > + stackid_hmap_fd = bpf_map__fd(skel->maps.stackid_hmap);
> > + stackmap_fd = bpf_map__fd(skel->maps.stackmap);
> > + stack_amap_fd = bpf_map__fd(skel->maps.stack_amap);
> > +
> > + check_stackmap(control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd);
> > +
> > +cleanup:
> > + test_stacktrace_map__destroy(skel);
> > +}
> > +
> > +void test_stacktrace_map(void)
> > +{
> > + if (test__start_subtest("tp"))
> > + test_stacktrace_map_tp();
> > + if (test__start_subtest("kprobe_multi"))
> > + test_stacktrace_map_kprobe_multi(false);
> > + if (test__start_subtest("kretprobe_multi"))
> > + test_stacktrace_map_kprobe_multi(true);
> > +}
> > diff --git a/tools/testing/selftests/bpf/progs/test_stacktrace_map.c b/tools/testing/selftests/bpf/progs/test_stacktrace_map.c
> > index 47568007b668..7a27e162a407 100644
> > --- a/tools/testing/selftests/bpf/progs/test_stacktrace_map.c
> > +++ b/tools/testing/selftests/bpf/progs/test_stacktrace_map.c
> > @@ -3,6 +3,7 @@
> >
> > #include <vmlinux.h>
> > #include <bpf/bpf_helpers.h>
> > +#include <bpf/bpf_tracing.h>
> >
> > #ifndef PERF_MAX_STACK_DEPTH
> > #define PERF_MAX_STACK_DEPTH 127
> > @@ -50,8 +51,7 @@ struct sched_switch_args {
> > int next_prio;
> > };
> >
> > -SEC("tracepoint/sched/sched_switch")
> > -int oncpu(struct sched_switch_args *ctx)
> > +static inline void test_stackmap(void *ctx)
> > {
> > __u32 max_len = PERF_MAX_STACK_DEPTH * sizeof(__u64);
> > __u32 key = 0, val = 0, *value_p;
> > @@ -59,7 +59,7 @@ int oncpu(struct sched_switch_args *ctx)
> >
> > value_p = bpf_map_lookup_elem(&control_map, &key);
> > if (value_p && *value_p)
> > - return 0; /* skip if non-zero *value_p */
> > + return; /* skip if non-zero *value_p */
> >
> > /* The size of stackmap and stackid_hmap should be the same */
> > key = bpf_get_stackid(ctx, &stackmap, 0);
> > @@ -69,7 +69,29 @@ int oncpu(struct sched_switch_args *ctx)
> > if (stack_p)
> > bpf_get_stack(ctx, stack_p, max_len, 0);
> > }
> > +}
> > +
> > +SEC("tracepoint/sched/sched_switch")
> > +int oncpu(struct sched_switch_args *ctx)
> > +{
> > + test_stackmap(ctx);
> > + return 0;
> > +}
> >
> > +/*
> > + * No tests in here, just to trigger 'bpf_fentry_test*'
> > + * through tracing test_run.
> > + */
> > +SEC("fentry/bpf_modify_return_test")
> > +int BPF_PROG(trigger)
> > +{
> > + return 0;
> > +}
> > +
> > +SEC("kprobe.multi")
> > +int kprobe(struct pt_regs *ctx)
> > +{
> > + test_stackmap(ctx);
> > return 0;
> > }
> >
>
> Can you please expland the set of program types you are testing:
> kprobe/kretprobe, kprobe.multi/kretprobe.multi, fentry/fexit/fmod_ret,
> maybe even uprobe/uretprobe?
right, will add those
>
> Also, for check_stack_ips(), can we make sure that we look for
> expected >1 IPs there? We had (still have?) issues where we'd get
> (successfully) stack trace with just single (but valid!) entry
> corresponding to traced function, but nothing beyond that. Which
> clearly is broken. So it would be good to be able to detect this going
> forward.
ok
thanks,
jirka
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2025-09-26 10:54 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-09-25 11:51 [PATCH bpf-next] selftests/bpf: Add stacktrace test for kprobe multi Jiri Olsa
2025-09-25 23:26 ` Andrii Nakryiko
2025-09-26 10:54 ` Jiri Olsa
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).