From: Tao Chen <chen.dylane@linux.dev>
To: peterz@infradead.org, mingo@redhat.com, acme@kernel.org,
namhyung@kernel.org, mark.rutland@arm.com,
alexander.shishkin@linux.intel.com, jolsa@kernel.org,
irogers@google.com, adrian.hunter@intel.com,
kan.liang@linux.intel.com, song@kernel.org, ast@kernel.org,
daniel@iogearbox.net, andrii@kernel.org, martin.lau@linux.dev,
eddyz87@gmail.com, yonghong.song@linux.dev,
john.fastabend@gmail.com, kpsingh@kernel.org, sdf@fomichev.me,
haoluo@google.com
Cc: linux-perf-users@vger.kernel.org, linux-kernel@vger.kernel.org,
bpf@vger.kernel.org, Tao Chen <chen.dylane@linux.dev>
Subject: [PATCH bpf-next v4 2/2] bpf: Hold the perf callchain entry until used completely
Date: Wed, 29 Oct 2025 00:25:02 +0800 [thread overview]
Message-ID: <20251028162502.3418817-3-chen.dylane@linux.dev> (raw)
In-Reply-To: <20251028162502.3418817-1-chen.dylane@linux.dev>
As Alexei noted, get_perf_callchain() return values may be reused
if a task is preempted after the BPF program enters migrate disable
mode. The perf_callchain_entres has a small stack of entries, and
we can reuse it as follows:
1. get the perf callchain entry
2. BPF use...
3. put the perf callchain entry
Signed-off-by: Tao Chen <chen.dylane@linux.dev>
---
kernel/bpf/stackmap.c | 61 ++++++++++++++++++++++++++++++++++---------
1 file changed, 48 insertions(+), 13 deletions(-)
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index e28b35c7e0b..70d38249083 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -188,13 +188,12 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
}
static struct perf_callchain_entry *
-get_callchain_entry_for_task(struct task_struct *task, u32 max_depth)
+get_callchain_entry_for_task(int *rctx, struct task_struct *task, u32 max_depth)
{
#ifdef CONFIG_STACKTRACE
struct perf_callchain_entry *entry;
- int rctx;
- entry = get_callchain_entry(&rctx);
+ entry = get_callchain_entry(rctx);
if (!entry)
return NULL;
@@ -216,8 +215,6 @@ get_callchain_entry_for_task(struct task_struct *task, u32 max_depth)
to[i] = (u64)(from[i]);
}
- put_callchain_entry(rctx);
-
return entry;
#else /* CONFIG_STACKTRACE */
return NULL;
@@ -297,6 +294,31 @@ static long __bpf_get_stackid(struct bpf_map *map,
return id;
}
+static struct perf_callchain_entry *
+bpf_get_perf_callchain(int *rctx, struct pt_regs *regs, bool kernel, bool user,
+ int max_stack, bool crosstask)
+{
+ struct perf_callchain_entry_ctx ctx;
+ struct perf_callchain_entry *entry;
+
+ entry = get_callchain_entry(rctx);
+ if (unlikely(!entry))
+ return NULL;
+
+ __init_perf_callchain_ctx(&ctx, entry, max_stack, false);
+ if (kernel)
+ __get_perf_callchain_kernel(&ctx, regs);
+ if (user && !crosstask)
+ __get_perf_callchain_user(&ctx, regs);
+
+ return entry;
+}
+
+static void bpf_put_callchain_entry(int rctx)
+{
+ put_callchain_entry(rctx);
+}
+
BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map,
u64, flags)
{
@@ -305,6 +327,7 @@ BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map,
bool user = flags & BPF_F_USER_STACK;
struct perf_callchain_entry *trace;
bool kernel = !user;
+ int rctx, ret;
if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID)))
@@ -314,14 +337,15 @@ BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map,
if (max_depth > sysctl_perf_event_max_stack)
max_depth = sysctl_perf_event_max_stack;
- trace = get_perf_callchain(regs, kernel, user, max_depth,
- false);
-
+ trace = bpf_get_perf_callchain(&rctx, regs, kernel, user, max_depth, false);
if (unlikely(!trace))
/* couldn't fetch the stack trace */
return -EFAULT;
- return __bpf_get_stackid(map, trace, flags);
+ ret = __bpf_get_stackid(map, trace, flags);
+ bpf_put_callchain_entry(rctx);
+
+ return ret;
}
const struct bpf_func_proto bpf_get_stackid_proto = {
@@ -415,6 +439,7 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
bool kernel = !user;
int err = -EINVAL;
u64 *ips;
+ int rctx;
if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
BPF_F_USER_BUILD_ID)))
@@ -449,17 +474,24 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
if (trace_in)
trace = trace_in;
else if (kernel && task)
- trace = get_callchain_entry_for_task(task, max_depth);
+ trace = get_callchain_entry_for_task(&rctx, task, max_depth);
else
- trace = get_perf_callchain(regs, kernel, user, max_depth,
- crosstask);
+ trace = bpf_get_perf_callchain(&rctx, regs, kernel, user, max_depth, crosstask);
- if (unlikely(!trace) || trace->nr < skip) {
+ if (unlikely(!trace)) {
if (may_fault)
rcu_read_unlock();
goto err_fault;
}
+ if (trace->nr < skip) {
+ if (may_fault)
+ rcu_read_unlock();
+ if (!trace_in)
+ bpf_put_callchain_entry(rctx);
+ goto err_fault;
+ }
+
trace_nr = trace->nr - skip;
trace_nr = (trace_nr <= num_elem) ? trace_nr : num_elem;
copy_len = trace_nr * elem_size;
@@ -479,6 +511,9 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
if (may_fault)
rcu_read_unlock();
+ if (!trace_in)
+ bpf_put_callchain_entry(rctx);
+
if (user_build_id)
stack_map_get_build_id_offset(buf, trace_nr, user, may_fault);
--
2.48.1
next prev parent reply other threads:[~2025-10-28 16:26 UTC|newest]
Thread overview: 11+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-10-28 16:25 [PATCH bpf-next v4 0/2] Pass external callchain entry to get_perf_callchain Tao Chen
2025-10-28 16:25 ` [PATCH bpf-next v4 1/2] perf: Refactor get_perf_callchain Tao Chen
2025-10-28 17:09 ` bot+bpf-ci
2025-10-30 2:36 ` Tao Chen
2025-11-05 20:45 ` Yonghong Song
2025-11-06 3:28 ` Tao Chen
2025-10-28 16:25 ` Tao Chen [this message]
2025-11-05 22:16 ` [PATCH bpf-next v4 2/2] bpf: Hold the perf callchain entry until used completely Yonghong Song
2025-11-06 5:12 ` Tao Chen
2025-11-06 6:20 ` Yonghong Song
2025-11-06 7:08 ` Tao Chen
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251028162502.3418817-3-chen.dylane@linux.dev \
--to=chen.dylane@linux.dev \
--cc=acme@kernel.org \
--cc=adrian.hunter@intel.com \
--cc=alexander.shishkin@linux.intel.com \
--cc=andrii@kernel.org \
--cc=ast@kernel.org \
--cc=bpf@vger.kernel.org \
--cc=daniel@iogearbox.net \
--cc=eddyz87@gmail.com \
--cc=haoluo@google.com \
--cc=irogers@google.com \
--cc=john.fastabend@gmail.com \
--cc=jolsa@kernel.org \
--cc=kan.liang@linux.intel.com \
--cc=kpsingh@kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-perf-users@vger.kernel.org \
--cc=mark.rutland@arm.com \
--cc=martin.lau@linux.dev \
--cc=mingo@redhat.com \
--cc=namhyung@kernel.org \
--cc=peterz@infradead.org \
--cc=sdf@fomichev.me \
--cc=song@kernel.org \
--cc=yonghong.song@linux.dev \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).