public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
From: Tao Chen <chen.dylane@linux.dev>
To: peterz@infradead.org, mingo@redhat.com, acme@kernel.org,
	namhyung@kernel.org, mark.rutland@arm.com,
	alexander.shishkin@linux.intel.com, jolsa@kernel.org,
	irogers@google.com, adrian.hunter@intel.com,
	kan.liang@linux.intel.com, song@kernel.org, ast@kernel.org,
	daniel@iogearbox.net, andrii@kernel.org, martin.lau@linux.dev,
	eddyz87@gmail.com, yonghong.song@linux.dev,
	john.fastabend@gmail.com, kpsingh@kernel.org, sdf@fomichev.me,
	haoluo@google.com
Cc: linux-perf-users@vger.kernel.org, linux-kernel@vger.kernel.org,
	bpf@vger.kernel.org, Tao Chen <chen.dylane@linux.dev>
Subject: [PATCH bpf-next v8 3/3] bpf: Hold ther perf callchain entry until used completely
Date: Mon, 26 Jan 2026 15:43:31 +0800	[thread overview]
Message-ID: <20260126074331.815684-4-chen.dylane@linux.dev> (raw)
In-Reply-To: <20260126074331.815684-1-chen.dylane@linux.dev>

As Alexei noted, get_perf_callchain() return values may be reused
if a task is preempted after the BPF program enters migrate disable
mode. The perf_callchain_entres has a small stack of entries, and
we can reuse it as follows:

1. get the perf callchain entry
2. BPF use...
3. put the perf callchain entry

And Peter suggested that get_recursion_context used with preemption
disabled, so we should disable preemption at BPF side.

Acked-by: Yonghong Song <yonghong.song@linux.dev>
Signed-off-by: Tao Chen <chen.dylane@linux.dev>
---
 kernel/bpf/stackmap.c | 55 ++++++++++++++++++++++++++++++++++++-------
 1 file changed, 47 insertions(+), 8 deletions(-)

diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index e77dcdc2164..6bdee6cc05f 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -215,7 +215,9 @@ get_callchain_entry_for_task(struct task_struct *task, u32 max_depth)
 #ifdef CONFIG_STACKTRACE
 	struct perf_callchain_entry *entry;
 
+	preempt_disable();
 	entry = get_callchain_entry();
+	preempt_enable();
 
 	if (!entry)
 		return NULL;
@@ -237,14 +239,40 @@ get_callchain_entry_for_task(struct task_struct *task, u32 max_depth)
 			to[i] = (u64)(from[i]);
 	}
 
-	put_callchain_entry(entry);
-
 	return entry;
 #else /* CONFIG_STACKTRACE */
 	return NULL;
 #endif
 }
 
+static struct perf_callchain_entry *
+bpf_get_perf_callchain(struct pt_regs *regs, bool kernel, bool user, int max_stack,
+		       bool crosstask)
+{
+	struct perf_callchain_entry *entry;
+	int ret;
+
+	preempt_disable();
+	entry = get_callchain_entry();
+	preempt_enable();
+
+	if (unlikely(!entry))
+		return NULL;
+
+	ret = __get_perf_callchain(entry, regs, kernel, user, max_stack, crosstask, false, 0);
+	if (ret) {
+		put_callchain_entry(entry);
+		return NULL;
+	}
+
+	return entry;
+}
+
+static void bpf_put_perf_callchain(struct perf_callchain_entry *entry)
+{
+	put_callchain_entry(entry);
+}
+
 static long __bpf_get_stackid(struct bpf_map *map,
 			      struct perf_callchain_entry *trace, u64 flags)
 {
@@ -327,20 +355,23 @@ BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map,
 	struct perf_callchain_entry *trace;
 	bool kernel = !user;
 	u32 max_depth;
+	int ret;
 
 	if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
 			       BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID)))
 		return -EINVAL;
 
 	max_depth = stack_map_calculate_max_depth(map->value_size, elem_size, flags);
-	trace = get_perf_callchain(regs, kernel, user, max_depth,
-				   false, false, 0);
+	trace = bpf_get_perf_callchain(regs, kernel, user, max_depth, false);
 
 	if (unlikely(!trace))
 		/* couldn't fetch the stack trace */
 		return -EFAULT;
 
-	return __bpf_get_stackid(map, trace, flags);
+	ret = __bpf_get_stackid(map, trace, flags);
+	bpf_put_perf_callchain(trace);
+
+	return ret;
 }
 
 const struct bpf_func_proto bpf_get_stackid_proto = {
@@ -468,13 +499,19 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
 	} else if (kernel && task) {
 		trace = get_callchain_entry_for_task(task, max_depth);
 	} else {
-		trace = get_perf_callchain(regs, kernel, user, max_depth,
-					   crosstask, false, 0);
+		trace = bpf_get_perf_callchain(regs, kernel, user, max_depth, crosstask);
 	}
 
-	if (unlikely(!trace) || trace->nr < skip) {
+	if (unlikely(!trace)) {
+		if (may_fault)
+			rcu_read_unlock();
+		goto err_fault;
+	}
+	if (trace->nr < skip) {
 		if (may_fault)
 			rcu_read_unlock();
+		if (!trace_in)
+			bpf_put_perf_callchain(trace);
 		goto err_fault;
 	}
 
@@ -495,6 +532,8 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
 	/* trace/ips should not be dereferenced after this point */
 	if (may_fault)
 		rcu_read_unlock();
+	if (!trace_in)
+		bpf_put_perf_callchain(trace);
 
 	if (user_build_id)
 		stack_map_get_build_id_offset(buf, trace_nr, user, may_fault);
-- 
2.48.1


  parent reply	other threads:[~2026-01-26  7:46 UTC|newest]

Thread overview: 28+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-01-26  7:43 [PATCH bpf-next v8 0/3] Pass external callchain entry to get_perf_callchain Tao Chen
2026-01-26  7:43 ` [PATCH bpf-next v8 1/3] perf: Add rctx in perf_callchain_entry Tao Chen
2026-01-26  8:03   ` bot+bpf-ci
2026-01-26  8:51     ` Tao Chen
2026-01-27 21:01       ` Andrii Nakryiko
2026-01-28  2:41         ` Tao Chen
2026-01-28  8:59   ` Peter Zijlstra
2026-01-28 16:52     ` Tao Chen
2026-01-28 18:59       ` Andrii Nakryiko
2026-01-29  3:03         ` Tao Chen
2026-01-26  7:43 ` [PATCH bpf-next v8 2/3] perf: Refactor get_perf_callchain Tao Chen
2026-01-27 21:07   ` Andrii Nakryiko
2026-01-28  2:42     ` Tao Chen
2026-01-28  9:10   ` Peter Zijlstra
2026-01-28 16:49     ` Tao Chen
2026-01-28 19:12     ` Andrii Nakryiko
2026-01-30 11:31       ` Peter Zijlstra
2026-01-30 20:04         ` Andrii Nakryiko
2026-02-02 19:59           ` Peter Zijlstra
2026-02-04  0:24             ` Andrii Nakryiko
2026-02-04  1:08   ` Andrii Nakryiko
2026-02-05  6:16     ` Tao Chen
2026-02-05 17:34       ` Andrii Nakryiko
2026-02-06  9:20         ` Tao Chen
2026-01-26  7:43 ` Tao Chen [this message]
2026-01-27 21:35   ` [PATCH bpf-next v8 3/3] bpf: Hold ther perf callchain entry until used completely Andrii Nakryiko
2026-01-28  4:21     ` Tao Chen
2026-01-28 19:13       ` Andrii Nakryiko

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20260126074331.815684-4-chen.dylane@linux.dev \
    --to=chen.dylane@linux.dev \
    --cc=acme@kernel.org \
    --cc=adrian.hunter@intel.com \
    --cc=alexander.shishkin@linux.intel.com \
    --cc=andrii@kernel.org \
    --cc=ast@kernel.org \
    --cc=bpf@vger.kernel.org \
    --cc=daniel@iogearbox.net \
    --cc=eddyz87@gmail.com \
    --cc=haoluo@google.com \
    --cc=irogers@google.com \
    --cc=john.fastabend@gmail.com \
    --cc=jolsa@kernel.org \
    --cc=kan.liang@linux.intel.com \
    --cc=kpsingh@kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-perf-users@vger.kernel.org \
    --cc=mark.rutland@arm.com \
    --cc=martin.lau@linux.dev \
    --cc=mingo@redhat.com \
    --cc=namhyung@kernel.org \
    --cc=peterz@infradead.org \
    --cc=sdf@fomichev.me \
    --cc=song@kernel.org \
    --cc=yonghong.song@linux.dev \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox