From: Jiri Olsa <jolsa@kernel.org>
To: Alexei Starovoitov <ast@kernel.org>,
Daniel Borkmann <daniel@iogearbox.net>,
Andrii Nakryiko <andrii@kernel.org>
Cc: bpf@vger.kernel.org, linux-trace-kernel@vger.kernel.org,
Martin KaFai Lau <kafai@fb.com>,
Eduard Zingerman <eddyz87@gmail.com>,
Song Liu <songliubraving@fb.com>, Yonghong Song <yhs@fb.com>,
Menglong Dong <menglong8.dong@gmail.com>,
Steven Rostedt <rostedt@kernel.org>
Subject: [PATCHv5 bpf-next 12/28] bpf: Add bpf_trampoline_multi_attach/detach functions
Date: Fri, 17 Apr 2026 21:24:46 +0200 [thread overview]
Message-ID: <20260417192502.194548-13-jolsa@kernel.org> (raw)
In-Reply-To: <20260417192502.194548-1-jolsa@kernel.org>
Adding bpf_trampoline_multi_attach/detach functions that allows to
attach/detach tracing program to multiple functions/trampolines.
The attachment is defined with bpf_program and array of BTF ids of
functions to attach the bpf program to.
Adding bpf_tracing_multi_link object that holds all the attached
trampolines and is initialized in attach and used in detach.
The attachment allocates or uses currently existing trampoline
for each function to attach and links it with the bpf program.
The attach works as follows:
- we get all the needed trampolines
- lock them and add the bpf program to each (__bpf_trampoline_link_prog)
- the trampoline_multi_ops passed in __bpf_trampoline_link_prog gathers
ftrace_hash (ip -> trampoline) objects
- we call update_ftrace_direct_add/mod to update needed locations
- we unlock all the trampolines
The detach works as follows:
- we lock all the needed trampolines
- remove the program from each (__bpf_trampoline_unlink_prog)
- the trampoline_multi_ops passed in __bpf_trampoline_unlink_prog gathers
ftrace_hash (ip -> trampoline) objects
- we call update_ftrace_direct_del/mod to update needed locations
- we unlock and put all the trampolines
We store the old image/flags in the trampoline before the update
and use it in case we need to rollback the attachment.
We keep the ftrace_hash objects allocated during attach in the link
so they can be used for detach as well.
Adding trampoline_(un)lock_all functions to (un)lock all trampolines
to gate the tracing_multi attachment.
Note this is supported only for archs (x86_64) with ftrace direct and
have single ops support.
CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS &&
CONFIG_HAVE_SINGLE_FTRACE_DIRECT_OPS
It also needs CONFIG_BPF_SYSCALL enabled.
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
---
include/linux/bpf.h | 43 ++++++
include/linux/bpf_verifier.h | 4 +
kernel/bpf/trampoline.c | 270 +++++++++++++++++++++++++++++++++++
kernel/bpf/verifier.c | 52 +++++++
4 files changed, 369 insertions(+)
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index c815227f3bc6..37c96daaae9a 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -31,6 +31,7 @@
#include <linux/static_call.h>
#include <linux/memcontrol.h>
#include <linux/cfi.h>
+#include <linux/ftrace.h>
#include <asm/rqspinlock.h>
struct bpf_verifier_env;
@@ -1355,6 +1356,11 @@ struct bpf_trampoline {
int progs_cnt[BPF_TRAMP_MAX];
/* Executable image of trampoline */
struct bpf_tramp_image *cur_image;
+ /* Used as temporary old image storage for multi_attach */
+ struct {
+ struct bpf_tramp_image *old_image;
+ u32 old_flags;
+ } multi_attach;
};
struct bpf_attach_target_info {
@@ -1452,6 +1458,8 @@ static inline int bpf_dynptr_check_off_len(const struct bpf_dynptr_kern *ptr, u6
return 0;
}
+struct bpf_tracing_multi_link;
+
#ifdef CONFIG_BPF_JIT
int bpf_trampoline_link_prog(struct bpf_tramp_node *node,
struct bpf_trampoline *tr,
@@ -1464,6 +1472,11 @@ struct bpf_trampoline *bpf_trampoline_get(u64 key,
void bpf_trampoline_put(struct bpf_trampoline *tr);
int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_funcs);
+int bpf_trampoline_multi_attach(struct bpf_prog *prog, u32 *ids,
+ struct bpf_tracing_multi_link *link);
+int bpf_trampoline_multi_detach(struct bpf_prog *prog,
+ struct bpf_tracing_multi_link *link);
+
/*
* When the architecture supports STATIC_CALL replace the bpf_dispatcher_fn
* indirection with a direct call to the bpf program. If the architecture does
@@ -1573,6 +1586,16 @@ static inline bool bpf_prog_has_trampoline(const struct bpf_prog *prog)
{
return false;
}
+static inline int bpf_trampoline_multi_attach(struct bpf_prog *prog, u32 *ids,
+ struct bpf_tracing_multi_link *link)
+{
+ return -ENOTSUPP;
+}
+static inline int bpf_trampoline_multi_detach(struct bpf_prog *prog,
+ struct bpf_tracing_multi_link *link)
+{
+ return -ENOTSUPP;
+}
#endif
struct bpf_func_info_aux {
@@ -1892,6 +1915,26 @@ struct bpf_tracing_link {
struct bpf_prog *tgt_prog;
};
+struct bpf_tracing_multi_node {
+ struct bpf_tramp_node node;
+ struct bpf_trampoline *trampoline;
+ struct ftrace_func_entry entry;
+};
+
+struct bpf_tracing_multi_data {
+ struct ftrace_hash *unreg;
+ struct ftrace_hash *modify;
+ struct ftrace_hash *reg;
+ struct ftrace_func_entry *entry;
+};
+
+struct bpf_tracing_multi_link {
+ struct bpf_link link;
+ struct bpf_tracing_multi_data data;
+ int nodes_cnt;
+ struct bpf_tracing_multi_node nodes[] __counted_by(nodes_cnt);
+};
+
struct bpf_raw_tp_link {
struct bpf_link link;
struct bpf_raw_event_map *btp;
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 53e8664cb566..055450f8d718 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -1476,6 +1476,10 @@ int bpf_add_kfunc_call(struct bpf_verifier_env *env, u32 func_id, u16 offset);
int bpf_fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
struct bpf_insn *insn_buf, int insn_idx, int *cnt);
+/* Functions exported from verifier.c, used by trampoline.c */
+int bpf_check_attach_btf_id_multi(struct btf *btf, struct bpf_prog *prog, u32 btf_id,
+ struct bpf_attach_target_info *tgt_info);
+
/* Functions in fixups.c, called from bpf_check() */
int bpf_remove_fastcall_spills_fills(struct bpf_verifier_env *env);
int bpf_optimize_bpf_loop(struct bpf_verifier_env *env);
diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
index 23856bd28d59..f6180f15d5a4 100644
--- a/kernel/bpf/trampoline.c
+++ b/kernel/bpf/trampoline.c
@@ -1447,6 +1447,276 @@ int __weak arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
return -ENOTSUPP;
}
+#if defined(CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS) && \
+ defined(CONFIG_HAVE_SINGLE_FTRACE_DIRECT_OPS) && \
+ defined(CONFIG_BPF_SYSCALL)
+
+static void trampoline_lock_all(void)
+{
+ int i;
+
+ for (i = 0; i < TRAMPOLINE_LOCKS_TABLE_SIZE; i++)
+ mutex_lock(&trampoline_locks[i].mutex);
+}
+
+static void trampoline_unlock_all(void)
+{
+ int i;
+
+ for (i = 0; i < TRAMPOLINE_LOCKS_TABLE_SIZE; i++)
+ mutex_unlock(&trampoline_locks[i].mutex);
+}
+
+static void remove_tracing_multi_data(struct bpf_tracing_multi_data *data)
+{
+ ftrace_hash_remove(data->reg);
+ ftrace_hash_remove(data->unreg);
+ ftrace_hash_remove(data->modify);
+}
+
+static void clear_tracing_multi_data(struct bpf_tracing_multi_data *data)
+{
+ remove_tracing_multi_data(data);
+
+ free_ftrace_hash(data->reg);
+ free_ftrace_hash(data->unreg);
+ free_ftrace_hash(data->modify);
+}
+
+static int init_tracing_multi_data(struct bpf_tracing_multi_data *data)
+{
+ data->reg = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
+ data->unreg = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
+ data->modify = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
+
+ if (!data->reg || !data->unreg || !data->modify) {
+ clear_tracing_multi_data(data);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void ftrace_hash_add(struct ftrace_hash *hash, struct ftrace_func_entry *entry,
+ unsigned long ip, unsigned long direct)
+{
+ entry->ip = ip;
+ entry->direct = direct;
+ add_ftrace_hash_entry(hash, entry);
+}
+
+static int register_fentry_multi(struct bpf_trampoline *tr, struct bpf_tramp_image *im, void *ptr)
+{
+ unsigned long addr = (unsigned long) im->image;
+ unsigned long ip = ftrace_location(tr->ip);
+ struct bpf_tracing_multi_data *data = ptr;
+
+ if (bpf_trampoline_use_jmp(tr->flags))
+ addr = ftrace_jmp_set(addr);
+
+ ftrace_hash_add(data->reg, data->entry, ip, addr);
+ tr->cur_image = im;
+ return 0;
+}
+
+static int unregister_fentry_multi(struct bpf_trampoline *tr, u32 orig_flags, void *ptr)
+{
+ unsigned long addr = (unsigned long) tr->cur_image->image;
+ unsigned long ip = ftrace_location(tr->ip);
+ struct bpf_tracing_multi_data *data = ptr;
+
+ if (bpf_trampoline_use_jmp(tr->flags))
+ addr = ftrace_jmp_set(addr);
+
+ ftrace_hash_add(data->unreg, data->entry, ip, addr);
+ tr->cur_image = NULL;
+ return 0;
+}
+
+static int modify_fentry_multi(struct bpf_trampoline *tr, u32 orig_flags, struct bpf_tramp_image *im,
+ bool lock_direct_mutex, void *ptr)
+{
+ unsigned long addr = (unsigned long) im->image;
+ unsigned long ip = ftrace_location(tr->ip);
+ struct bpf_tracing_multi_data *data = ptr;
+
+ if (bpf_trampoline_use_jmp(tr->flags))
+ addr = ftrace_jmp_set(addr);
+
+ ftrace_hash_add(data->modify, data->entry, ip, addr);
+ tr->cur_image = im;
+ return 0;
+}
+
+static const struct bpf_trampoline_ops trampoline_multi_ops = {
+ .register_fentry = register_fentry_multi,
+ .unregister_fentry = unregister_fentry_multi,
+ .modify_fentry = modify_fentry_multi,
+};
+
+static void bpf_trampoline_multi_attach_init(struct bpf_trampoline *tr)
+{
+ tr->multi_attach.old_image = tr->cur_image;
+ tr->multi_attach.old_flags = tr->flags;
+}
+
+static void bpf_trampoline_multi_attach_free(struct bpf_trampoline *tr)
+{
+ if (tr->multi_attach.old_image)
+ bpf_tramp_image_put(tr->multi_attach.old_image);
+
+ tr->multi_attach.old_image = NULL;
+ tr->multi_attach.old_flags = 0;
+}
+
+static void bpf_trampoline_multi_attach_rollback(struct bpf_trampoline *tr)
+{
+ if (tr->cur_image)
+ bpf_tramp_image_put(tr->cur_image);
+ tr->cur_image = tr->multi_attach.old_image;
+ tr->flags = tr->multi_attach.old_flags;
+
+ tr->multi_attach.old_image = NULL;
+ tr->multi_attach.old_flags = 0;
+}
+
+#define for_each_mnode_cnt(mnode, link, cnt) \
+ for (i = 0, mnode = &link->nodes[i]; i < cnt; i++, mnode = &link->nodes[i])
+
+#define for_each_mnode(mnode, link) \
+ for_each_mnode_cnt(mnode, link, link->nodes_cnt)
+
+int bpf_trampoline_multi_attach(struct bpf_prog *prog, u32 *ids,
+ struct bpf_tracing_multi_link *link)
+{
+ struct bpf_tracing_multi_data *data = &link->data;
+ struct bpf_attach_target_info tgt_info = {};
+ struct btf *btf = prog->aux->attach_btf;
+ struct bpf_tracing_multi_node *mnode;
+ struct bpf_trampoline *tr;
+ int i, err, rollback_cnt;
+ u64 key;
+
+ for_each_mnode(mnode, link) {
+ rollback_cnt = i;
+
+ err = bpf_check_attach_btf_id_multi(btf, prog, ids[i], &tgt_info);
+ if (err)
+ goto rollback_put;
+
+ key = bpf_trampoline_compute_key(NULL, btf, ids[i]);
+
+ tr = bpf_trampoline_get(key, &tgt_info);
+ if (!tr) {
+ err = -ENOMEM;
+ goto rollback_put;
+ }
+
+ mnode->trampoline = tr;
+ mnode->node.link = &link->link;
+
+ cond_resched();
+ }
+
+ err = init_tracing_multi_data(data);
+ if (err) {
+ rollback_cnt = link->nodes_cnt;
+ goto rollback_put;
+ }
+
+ trampoline_lock_all();
+
+ for_each_mnode(mnode, link) {
+ bpf_trampoline_multi_attach_init(mnode->trampoline);
+
+ data->entry = &mnode->entry;
+ err = __bpf_trampoline_link_prog(&mnode->node, mnode->trampoline, NULL,
+ &trampoline_multi_ops, data);
+ if (err) {
+ rollback_cnt = i;
+ goto rollback_unlink;
+ }
+ }
+
+ rollback_cnt = link->nodes_cnt;
+ if (ftrace_hash_count(data->reg)) {
+ err = update_ftrace_direct_add(&direct_ops, data->reg);
+ if (err)
+ goto rollback_unlink;
+ }
+
+ if (ftrace_hash_count(data->modify)) {
+ err = update_ftrace_direct_mod(&direct_ops, data->modify, true);
+ if (err) {
+ WARN_ON_ONCE(update_ftrace_direct_del(&direct_ops, data->reg));
+ goto rollback_unlink;
+ }
+ }
+
+ for_each_mnode(mnode, link)
+ bpf_trampoline_multi_attach_free(mnode->trampoline);
+
+ trampoline_unlock_all();
+
+ remove_tracing_multi_data(data);
+ return 0;
+
+rollback_unlink:
+ for_each_mnode_cnt(mnode, link, rollback_cnt) {
+ bpf_trampoline_remove_prog(mnode->trampoline, &mnode->node);
+ bpf_trampoline_multi_attach_rollback(mnode->trampoline);
+ }
+
+ trampoline_unlock_all();
+
+ clear_tracing_multi_data(data);
+ rollback_cnt = link->nodes_cnt;
+
+rollback_put:
+ for_each_mnode_cnt(mnode, link, rollback_cnt)
+ bpf_trampoline_put(mnode->trampoline);
+
+ return err;
+}
+
+int bpf_trampoline_multi_detach(struct bpf_prog *prog, struct bpf_tracing_multi_link *link)
+{
+ struct bpf_tracing_multi_data *data = &link->data;
+ struct bpf_tracing_multi_node *mnode;
+ int i;
+
+ trampoline_lock_all();
+
+ for_each_mnode(mnode, link) {
+ data->entry = &mnode->entry;
+ bpf_trampoline_multi_attach_init(mnode->trampoline);
+ WARN_ON_ONCE(__bpf_trampoline_unlink_prog(&mnode->node, mnode->trampoline,
+ NULL, &trampoline_multi_ops, data));
+ }
+
+ if (ftrace_hash_count(data->unreg))
+ WARN_ON_ONCE(update_ftrace_direct_del(&direct_ops, data->unreg));
+ if (ftrace_hash_count(data->modify))
+ WARN_ON_ONCE(update_ftrace_direct_mod(&direct_ops, data->modify, true));
+
+ for_each_mnode(mnode, link)
+ bpf_trampoline_multi_attach_free(mnode->trampoline);
+
+ trampoline_unlock_all();
+
+ for_each_mnode(mnode, link)
+ bpf_trampoline_put(mnode->trampoline);
+
+ clear_tracing_multi_data(data);
+ return 0;
+}
+
+#undef for_each_mnode_cnt
+#undef for_each_mnode
+
+#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS &&
+ CONFIG_HAVE_SINGLE_FTRACE_DIRECT_OPS &&
+ CONFIG_BPF_SYSCALL */
+
static int __init init_trampolines(void)
{
int i;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 739e730add96..4f5a21a4eddc 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -19658,6 +19658,58 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
return 0;
}
+int bpf_check_attach_btf_id_multi(struct btf *btf, struct bpf_prog *prog, u32 btf_id,
+ struct bpf_attach_target_info *tgt_info)
+{
+ const struct btf_type *t;
+ unsigned long addr;
+ const char *tname;
+ int err;
+
+ if (!btf_id || !btf)
+ return -EINVAL;
+
+ /* Check noreturn attachment. */
+ if (prog->expected_attach_type == BPF_TRACE_FEXIT_MULTI ||
+ btf_id_set_contains(&noreturn_deny, btf_id))
+ return -EINVAL;
+
+ /* Check and get function target data. */
+ t = btf_type_by_id(btf, btf_id);
+ if (!t)
+ return -EINVAL;
+ tname = btf_name_by_offset(btf, t->name_off);
+ if (!tname)
+ return -EINVAL;
+ if (!btf_type_is_func(t))
+ return -EINVAL;
+ t = btf_type_by_id(btf, t->type);
+ if (!btf_type_is_func_proto(t))
+ return -EINVAL;
+ err = btf_distill_func_proto(NULL, btf, t, tname, &tgt_info->fmodel);
+ if (err < 0)
+ return err;
+ if (btf_is_module(btf)) {
+ /* The bpf program already holds reference to module. */
+ if (WARN_ON_ONCE(!prog->aux->mod))
+ return -EINVAL;
+ addr = find_kallsyms_symbol_value(prog->aux->mod, tname);
+ } else {
+ addr = kallsyms_lookup_name(tname);
+ }
+ if (!addr || !ftrace_location(addr))
+ return -ENOENT;
+
+ /* Check sleepable program attachment. */
+ if (prog->sleepable) {
+ err = btf_id_allow_sleepable(btf_id, addr, prog, btf);
+ if (err)
+ return err;
+ }
+ tgt_info->tgt_addr = addr;
+ return 0;
+}
+
struct btf *bpf_get_btf_vmlinux(void)
{
if (!btf_vmlinux && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) {
--
2.53.0
next prev parent reply other threads:[~2026-04-17 19:27 UTC|newest]
Thread overview: 45+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-04-17 19:24 [PATCHv5 bpf-next 00/28] bpf: tracing_multi link Jiri Olsa
2026-04-17 19:24 ` [PATCHv5 bpf-next 01/28] ftrace: Add ftrace_hash_count function Jiri Olsa
2026-04-18 6:10 ` bot+bpf-ci
2026-04-17 19:24 ` [PATCHv5 bpf-next 02/28] ftrace: Add ftrace_hash_remove function Jiri Olsa
2026-04-18 6:10 ` bot+bpf-ci
2026-04-17 19:24 ` [PATCHv5 bpf-next 03/28] ftrace: Add add_ftrace_hash_entry function Jiri Olsa
2026-04-17 19:24 ` [PATCHv5 bpf-next 04/28] bpf: Use mutex lock pool for bpf trampolines Jiri Olsa
2026-04-17 20:10 ` bot+bpf-ci
2026-04-18 6:49 ` bot+bpf-ci
2026-04-17 19:24 ` [PATCHv5 bpf-next 05/28] bpf: Add struct bpf_trampoline_ops object Jiri Olsa
2026-04-17 19:24 ` [PATCHv5 bpf-next 06/28] bpf: Move trampoline image setup into bpf_trampoline_ops callbacks Jiri Olsa
2026-04-17 20:10 ` bot+bpf-ci
2026-04-18 6:10 ` bot+bpf-ci
2026-04-17 19:24 ` [PATCHv5 bpf-next 07/28] bpf: Add bpf_trampoline_add/remove_prog functions Jiri Olsa
2026-04-17 19:24 ` [PATCHv5 bpf-next 08/28] bpf: Add struct bpf_tramp_node object Jiri Olsa
2026-04-17 20:22 ` bot+bpf-ci
2026-04-18 6:10 ` bot+bpf-ci
2026-04-17 19:24 ` [PATCHv5 bpf-next 09/28] bpf: Factor fsession link to use struct bpf_tramp_node Jiri Olsa
2026-04-17 19:24 ` [PATCHv5 bpf-next 10/28] bpf: Add multi tracing attach types Jiri Olsa
2026-04-17 20:22 ` bot+bpf-ci
2026-04-18 6:49 ` bot+bpf-ci
2026-04-17 19:24 ` [PATCHv5 bpf-next 11/28] bpf: Move sleepable verification code to btf_id_allow_sleepable Jiri Olsa
2026-04-17 19:24 ` Jiri Olsa [this message]
2026-04-17 20:22 ` [PATCHv5 bpf-next 12/28] bpf: Add bpf_trampoline_multi_attach/detach functions bot+bpf-ci
2026-04-18 6:10 ` bot+bpf-ci
2026-04-17 19:24 ` [PATCHv5 bpf-next 13/28] bpf: Add support for tracing multi link Jiri Olsa
2026-04-17 19:24 ` [PATCHv5 bpf-next 14/28] bpf: Add support for tracing_multi link cookies Jiri Olsa
2026-04-17 19:24 ` [PATCHv5 bpf-next 15/28] bpf: Add support for tracing_multi link session Jiri Olsa
2026-04-17 19:24 ` [PATCHv5 bpf-next 16/28] bpf: Add support for tracing_multi link fdinfo Jiri Olsa
2026-04-17 19:24 ` [PATCHv5 bpf-next 17/28] libbpf: Add bpf_object_cleanup_btf function Jiri Olsa
2026-04-17 19:24 ` [PATCHv5 bpf-next 18/28] libbpf: Add bpf_link_create support for tracing_multi link Jiri Olsa
2026-04-17 19:24 ` [PATCHv5 bpf-next 19/28] libbpf: Add btf_type_is_traceable_func function Jiri Olsa
2026-04-18 5:59 ` bot+bpf-ci
2026-04-17 19:24 ` [PATCHv5 bpf-next 20/28] libbpf: Add support to create tracing multi link Jiri Olsa
2026-04-18 6:10 ` bot+bpf-ci
2026-04-17 19:24 ` [PATCHv5 bpf-next 21/28] selftests/bpf: Add tracing multi skel/pattern/ids attach tests Jiri Olsa
2026-04-17 20:10 ` bot+bpf-ci
2026-04-18 6:10 ` bot+bpf-ci
2026-04-17 19:24 ` [PATCHv5 bpf-next 22/28] selftests/bpf: Add tracing multi skel/pattern/ids module " Jiri Olsa
2026-04-17 19:24 ` [PATCHv5 bpf-next 23/28] selftests/bpf: Add tracing multi intersect tests Jiri Olsa
2026-04-17 19:24 ` [PATCHv5 bpf-next 24/28] selftests/bpf: Add tracing multi cookies test Jiri Olsa
2026-04-17 19:24 ` [PATCHv5 bpf-next 25/28] selftests/bpf: Add tracing multi session test Jiri Olsa
2026-04-17 19:25 ` [PATCHv5 bpf-next 26/28] selftests/bpf: Add tracing multi attach fails test Jiri Olsa
2026-04-17 19:25 ` [PATCHv5 bpf-next 27/28] selftests/bpf: Add tracing multi attach benchmark test Jiri Olsa
2026-04-17 19:25 ` [PATCHv5 bpf-next 28/28] selftests/bpf: Add tracing multi attach rollback tests Jiri Olsa
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260417192502.194548-13-jolsa@kernel.org \
--to=jolsa@kernel.org \
--cc=andrii@kernel.org \
--cc=ast@kernel.org \
--cc=bpf@vger.kernel.org \
--cc=daniel@iogearbox.net \
--cc=eddyz87@gmail.com \
--cc=kafai@fb.com \
--cc=linux-trace-kernel@vger.kernel.org \
--cc=menglong8.dong@gmail.com \
--cc=rostedt@kernel.org \
--cc=songliubraving@fb.com \
--cc=yhs@fb.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox