From: Mykyta Yatsenko <mykyta.yatsenko5@gmail.com>
To: bpf@vger.kernel.org, ast@kernel.org, andrii@kernel.org,
daniel@iogearbox.net, kafai@meta.com, kernel-team@meta.com,
eddyz87@gmail.com, memxor@gmail.com, peterz@infradead.org,
rostedt@goodmis.org
Cc: Mykyta Yatsenko <yatsenko@meta.com>
Subject: [PATCH bpf-next v13 1/6] bpf: Add sleepable support for raw tracepoint programs
Date: Wed, 22 Apr 2026 12:41:06 -0700 [thread overview]
Message-ID: <20260422-sleepable_tracepoints-v13-1-99005dff21ef@meta.com> (raw)
In-Reply-To: <20260422-sleepable_tracepoints-v13-0-99005dff21ef@meta.com>
From: Mykyta Yatsenko <yatsenko@meta.com>
Rework __bpf_trace_run() to support sleepable BPF programs by using
explicit RCU flavor selection, following the uprobe_prog_run() pattern.
For sleepable programs, use rcu_read_lock_tasks_trace() for lifetime
protection with migrate_disable(). For non-sleepable programs, use the
regular rcu_read_lock_dont_migrate().
Remove the preempt_disable_notrace/preempt_enable_notrace pair from
the faultable tracepoint BPF probe wrapper in bpf_probe.h, since
migration protection and RCU locking are now handled per-program
inside __bpf_trace_run().
Adapt bpf_prog_test_run_raw_tp() for sleepable programs: reject
BPF_F_TEST_RUN_ON_CPU since sleepable programs cannot run in hardirq
or preempt-disabled context, and call __bpf_prog_test_run_raw_tp()
directly instead of via smp_call_function_single(). Rework
__bpf_prog_test_run_raw_tp() to select RCU flavor per-program and
add per-program recursion context guard for private stack safety.
Acked-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
Signed-off-by: Mykyta Yatsenko <yatsenko@meta.com>
---
include/trace/bpf_probe.h | 2 --
kernel/trace/bpf_trace.c | 20 ++++++++++++---
net/bpf/test_run.c | 65 ++++++++++++++++++++++++++++++++++++-----------
3 files changed, 67 insertions(+), 20 deletions(-)
diff --git a/include/trace/bpf_probe.h b/include/trace/bpf_probe.h
index 9391d54d3f12..d1de8f9aa07f 100644
--- a/include/trace/bpf_probe.h
+++ b/include/trace/bpf_probe.h
@@ -58,9 +58,7 @@ static notrace void \
__bpf_trace_##call(void *__data, proto) \
{ \
might_fault(); \
- preempt_disable_notrace(); \
CONCATENATE(bpf_trace_run, COUNT_ARGS(args))(__data, CAST_TO_U64(args)); \
- preempt_enable_notrace(); \
}
#undef DECLARE_EVENT_SYSCALL_CLASS
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index e916f0ccbed9..7276c72c1d31 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -2072,11 +2072,19 @@ void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
static __always_inline
void __bpf_trace_run(struct bpf_raw_tp_link *link, u64 *args)
{
+ struct srcu_ctr __percpu *scp = NULL;
struct bpf_prog *prog = link->link.prog;
+ bool sleepable = prog->sleepable;
struct bpf_run_ctx *old_run_ctx;
struct bpf_trace_run_ctx run_ctx;
- rcu_read_lock_dont_migrate();
+ if (sleepable) {
+ scp = rcu_read_lock_tasks_trace();
+ migrate_disable();
+ } else {
+ rcu_read_lock_dont_migrate();
+ }
+
if (unlikely(!bpf_prog_get_recursion_context(prog))) {
bpf_prog_inc_misses_counter(prog);
goto out;
@@ -2085,12 +2093,18 @@ void __bpf_trace_run(struct bpf_raw_tp_link *link, u64 *args)
run_ctx.bpf_cookie = link->cookie;
old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
- (void) bpf_prog_run(prog, args);
+ (void)bpf_prog_run(prog, args);
bpf_reset_run_ctx(old_run_ctx);
out:
bpf_prog_put_recursion_context(prog);
- rcu_read_unlock_migrate();
+
+ if (sleepable) {
+ migrate_enable();
+ rcu_read_unlock_tasks_trace(scp);
+ } else {
+ rcu_read_unlock_migrate();
+ }
}
#define UNPACK(...) __VA_ARGS__
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index 2bc04feadfab..c9aea7052ba7 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -748,14 +748,35 @@ static void
__bpf_prog_test_run_raw_tp(void *data)
{
struct bpf_raw_tp_test_run_info *info = data;
+ struct srcu_ctr __percpu *scp = NULL;
struct bpf_trace_run_ctx run_ctx = {};
struct bpf_run_ctx *old_run_ctx;
old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
- rcu_read_lock();
+ if (info->prog->sleepable) {
+ scp = rcu_read_lock_tasks_trace();
+ migrate_disable();
+ } else {
+ rcu_read_lock();
+ }
+
+ if (unlikely(!bpf_prog_get_recursion_context(info->prog))) {
+ bpf_prog_inc_misses_counter(info->prog);
+ goto out;
+ }
+
info->retval = bpf_prog_run(info->prog, info->ctx);
- rcu_read_unlock();
+
+out:
+ bpf_prog_put_recursion_context(info->prog);
+
+ if (info->prog->sleepable) {
+ migrate_enable();
+ rcu_read_unlock_tasks_trace(scp);
+ } else {
+ rcu_read_unlock();
+ }
bpf_reset_run_ctx(old_run_ctx);
}
@@ -783,6 +804,13 @@ int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 && cpu != 0)
return -EINVAL;
+ /*
+ * Sleepable programs cannot run with preemption disabled or in
+ * hardirq context (smp_call_function_single), reject the flag.
+ */
+ if (prog->sleepable && (kattr->test.flags & BPF_F_TEST_RUN_ON_CPU))
+ return -EINVAL;
+
if (ctx_size_in) {
info.ctx = memdup_user(ctx_in, ctx_size_in);
if (IS_ERR(info.ctx))
@@ -791,24 +819,31 @@ int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
info.ctx = NULL;
}
+ info.retval = 0;
info.prog = prog;
- current_cpu = get_cpu();
- if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 ||
- cpu == current_cpu) {
+ if (prog->sleepable) {
__bpf_prog_test_run_raw_tp(&info);
- } else if (cpu >= nr_cpu_ids || !cpu_online(cpu)) {
- /* smp_call_function_single() also checks cpu_online()
- * after csd_lock(). However, since cpu is from user
- * space, let's do an extra quick check to filter out
- * invalid value before smp_call_function_single().
- */
- err = -ENXIO;
} else {
- err = smp_call_function_single(cpu, __bpf_prog_test_run_raw_tp,
- &info, 1);
+ current_cpu = get_cpu();
+ if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 ||
+ cpu == current_cpu) {
+ __bpf_prog_test_run_raw_tp(&info);
+ } else if (cpu >= nr_cpu_ids || !cpu_online(cpu)) {
+ /*
+ * smp_call_function_single() also checks cpu_online()
+ * after csd_lock(). However, since cpu is from user
+ * space, let's do an extra quick check to filter out
+ * invalid value before smp_call_function_single().
+ */
+ err = -ENXIO;
+ } else {
+ err = smp_call_function_single(cpu,
+ __bpf_prog_test_run_raw_tp,
+ &info, 1);
+ }
+ put_cpu();
}
- put_cpu();
if (!err &&
copy_to_user(&uattr->test.retval, &info.retval, sizeof(u32)))
--
2.52.0
next prev parent reply other threads:[~2026-04-22 19:42 UTC|newest]
Thread overview: 15+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-04-22 19:41 [PATCH bpf-next v13 0/6] bpf: Add support for sleepable tracepoint programs Mykyta Yatsenko
2026-04-22 19:41 ` Mykyta Yatsenko [this message]
2026-04-22 19:41 ` [PATCH bpf-next v13 2/6] bpf: Add bpf_prog_run_array_sleepable() Mykyta Yatsenko
2026-04-22 19:41 ` [PATCH bpf-next v13 3/6] bpf: Add sleepable support for classic tracepoint programs Mykyta Yatsenko
2026-04-22 20:57 ` bot+bpf-ci
2026-04-23 13:42 ` Mykyta Yatsenko
2026-04-22 23:35 ` sashiko-bot
2026-04-23 13:38 ` Mykyta Yatsenko
2026-04-22 19:41 ` [PATCH bpf-next v13 4/6] bpf: Verifier support for sleepable " Mykyta Yatsenko
2026-04-22 19:41 ` [PATCH bpf-next v13 5/6] libbpf: Add section handlers for sleepable tracepoints Mykyta Yatsenko
2026-04-22 19:41 ` [PATCH bpf-next v13 6/6] selftests/bpf: Add tests for sleepable tracepoint programs Mykyta Yatsenko
2026-04-22 20:30 ` bot+bpf-ci
2026-04-22 20:37 ` Mykyta Yatsenko
2026-04-22 20:49 ` Kumar Kartikeya Dwivedi
2026-04-22 21:00 ` [PATCH bpf-next v13 0/6] bpf: Add support " patchwork-bot+netdevbpf
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260422-sleepable_tracepoints-v13-1-99005dff21ef@meta.com \
--to=mykyta.yatsenko5@gmail.com \
--cc=andrii@kernel.org \
--cc=ast@kernel.org \
--cc=bpf@vger.kernel.org \
--cc=daniel@iogearbox.net \
--cc=eddyz87@gmail.com \
--cc=kafai@meta.com \
--cc=kernel-team@meta.com \
--cc=memxor@gmail.com \
--cc=peterz@infradead.org \
--cc=rostedt@goodmis.org \
--cc=yatsenko@meta.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox