linux-toolchains.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Josh Poimboeuf <jpoimboe@kernel.org>
To: x86@kernel.org
Cc: Peter Zijlstra <peterz@infradead.org>,
	Steven Rostedt <rostedt@goodmis.org>,
	Ingo Molnar <mingo@kernel.org>,
	Arnaldo Carvalho de Melo <acme@kernel.org>,
	linux-kernel@vger.kernel.org,
	Indu Bhagat <indu.bhagat@oracle.com>,
	Mark Rutland <mark.rutland@arm.com>,
	Alexander Shishkin <alexander.shishkin@linux.intel.com>,
	Jiri Olsa <jolsa@kernel.org>, Namhyung Kim <namhyung@kernel.org>,
	Ian Rogers <irogers@google.com>,
	Adrian Hunter <adrian.hunter@intel.com>,
	linux-perf-users@vger.kernel.org, Mark Brown <broonie@kernel.org>,
	linux-toolchains@vger.kernel.org, Jordan Rome <jordalgo@meta.com>,
	Sam James <sam@gentoo.org>,
	linux-trace-kernel@vger.kernel.org,
	Andrii Nakryiko <andrii.nakryiko@gmail.com>,
	Jens Remus <jremus@linux.ibm.com>,
	Mathieu Desnoyers <mathieu.desnoyers@efficios.com>,
	Florian Weimer <fweimer@redhat.com>,
	Andy Lutomirski <luto@kernel.org>,
	Masami Hiramatsu <mhiramat@kernel.org>,
	Weinan Liu <wnliu@google.com>
Subject: [PATCH v4 35/39] perf: Support deferred user callchains
Date: Tue, 21 Jan 2025 18:31:27 -0800	[thread overview]
Message-ID: <2e54e6f1c914b219b889fbb47bc33d4749c3ad87.1737511963.git.jpoimboe@kernel.org> (raw)
In-Reply-To: <cover.1737511963.git.jpoimboe@kernel.org>

Use the new unwind_deferred_request() interface (if available) to defer
unwinds to task context.  This allows the use of .sframe (if available)
and also prevents duplicate userspace unwinds.

Suggested-by: Steven Rostedt <rostedt@goodmis.org>
Suggested-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Josh Poimboeuf <jpoimboe@kernel.org>
---
 arch/Kconfig                          |   3 +
 include/linux/perf_event.h            |  13 +++-
 include/uapi/linux/perf_event.h       |  19 ++++-
 kernel/bpf/stackmap.c                 |   6 +-
 kernel/events/callchain.c             |  11 ++-
 kernel/events/core.c                  | 103 +++++++++++++++++++++++++-
 tools/include/uapi/linux/perf_event.h |  19 ++++-
 7 files changed, 166 insertions(+), 8 deletions(-)

diff --git a/arch/Kconfig b/arch/Kconfig
index b3676605bab6..83ab94af46ca 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -472,6 +472,9 @@ config SFRAME_VALIDATION
 
 	  If unsure, say N.
 
+config HAVE_PERF_CALLCHAIN_DEFERRED
+	bool
+
 config HAVE_PERF_REGS
 	bool
 	help
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 1563dc2cd979..7fd54e4d2084 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -62,6 +62,7 @@ struct perf_guest_info_callbacks {
 #include <linux/security.h>
 #include <linux/static_call.h>
 #include <linux/lockdep.h>
+#include <linux/unwind_deferred.h>
 #include <asm/local.h>
 
 struct perf_callchain_entry {
@@ -833,6 +834,10 @@ struct perf_event {
 	unsigned int			pending_work;
 	struct rcuwait			pending_work_wait;
 
+	struct unwind_work		pending_unwind_work;
+	struct rcuwait			pending_unwind_wait;
+	unsigned int			pending_unwind_callback;
+
 	atomic_t			event_limit;
 
 	/* address range filters */
@@ -1590,12 +1595,18 @@ extern void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct p
 extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
 extern struct perf_callchain_entry *
 get_perf_callchain(struct pt_regs *regs, bool kernel, bool user,
-		   u32 max_stack, bool add_mark);
+		   u32 max_stack, bool add_mark, bool defer_user);
 extern int get_callchain_buffers(int max_stack);
 extern void put_callchain_buffers(void);
 extern struct perf_callchain_entry *get_callchain_entry(int *rctx);
 extern void put_callchain_entry(int rctx);
 
+#ifdef CONFIG_HAVE_PERF_CALLCHAIN_DEFERRED
+extern void perf_callchain_user_deferred(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
+#else
+static inline void perf_callchain_user_deferred(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) {}
+#endif
+
 extern int sysctl_perf_event_max_stack;
 extern int sysctl_perf_event_max_contexts_per_stack;
 
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index 0524d541d4e3..16307be57de9 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -460,7 +460,8 @@ struct perf_event_attr {
 				inherit_thread :  1, /* children only inherit if cloned with CLONE_THREAD */
 				remove_on_exec :  1, /* event is removed from task on exec */
 				sigtrap        :  1, /* send synchronous SIGTRAP on event */
-				__reserved_1   : 26;
+				defer_callchain:  1, /* generate PERF_RECORD_CALLCHAIN_DEFERRED records */
+				__reserved_1   : 25;
 
 	union {
 		__u32		wakeup_events;	  /* wakeup every n events */
@@ -1226,6 +1227,21 @@ enum perf_event_type {
 	 */
 	PERF_RECORD_AUX_OUTPUT_HW_ID		= 21,
 
+	/*
+	 * This user callchain capture was deferred until shortly before
+	 * returning to user space.  Previous samples would have kernel
+	 * callchains only and they need to be stitched with this to make full
+	 * callchains.
+	 *
+	 * struct {
+	 *	struct perf_event_header	header;
+	 *	u64				nr;
+	 *	u64				ips[nr];
+	 *	struct sample_id		sample_id;
+	 * };
+	 */
+	PERF_RECORD_CALLCHAIN_DEFERRED		= 22,
+
 	PERF_RECORD_MAX,			/* non-ABI */
 };
 
@@ -1256,6 +1272,7 @@ enum perf_callchain_context {
 	PERF_CONTEXT_HV			= (__u64)-32,
 	PERF_CONTEXT_KERNEL		= (__u64)-128,
 	PERF_CONTEXT_USER		= (__u64)-512,
+	PERF_CONTEXT_USER_DEFERRED	= (__u64)-640,
 
 	PERF_CONTEXT_GUEST		= (__u64)-2048,
 	PERF_CONTEXT_GUEST_KERNEL	= (__u64)-2176,
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index ee9701337912..f073ebaf9c30 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -314,8 +314,7 @@ BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map,
 	if (max_depth > sysctl_perf_event_max_stack)
 		max_depth = sysctl_perf_event_max_stack;
 
-	trace = get_perf_callchain(regs, kernel, user, max_depth, false);
-
+	trace = get_perf_callchain(regs, kernel, user, max_depth, false, false);
 	if (unlikely(!trace))
 		/* couldn't fetch the stack trace */
 		return -EFAULT;
@@ -448,7 +447,8 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
 	else if (kernel && task)
 		trace = get_callchain_entry_for_task(task, max_depth);
 	else
-		trace = get_perf_callchain(regs, kernel, user, max_depth,false);
+		trace = get_perf_callchain(regs, kernel, user, max_depth,
+					   false, false);
 
 	if (unlikely(!trace) || trace->nr < skip) {
 		if (may_fault)
diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
index 2278402b7ac9..eeb15ba0137f 100644
--- a/kernel/events/callchain.c
+++ b/kernel/events/callchain.c
@@ -217,7 +217,7 @@ static void fixup_uretprobe_trampoline_entries(struct perf_callchain_entry *entr
 
 struct perf_callchain_entry *
 get_perf_callchain(struct pt_regs *regs, bool kernel, bool user,
-		   u32 max_stack, bool add_mark)
+		   u32 max_stack, bool add_mark, bool defer_user)
 {
 	struct perf_callchain_entry *entry;
 	struct perf_callchain_entry_ctx ctx;
@@ -246,6 +246,15 @@ get_perf_callchain(struct pt_regs *regs, bool kernel, bool user,
 			regs = task_pt_regs(current);
 		}
 
+		if (defer_user) {
+			/*
+			 * Foretell the coming of PERF_RECORD_CALLCHAIN_DEFERRED
+			 * which can be stitched to this one.
+			 */
+			perf_callchain_store_context(&ctx, PERF_CONTEXT_USER_DEFERRED);
+			goto exit_put;
+		}
+
 		if (add_mark)
 			perf_callchain_store_context(&ctx, PERF_CONTEXT_USER);
 
diff --git a/kernel/events/core.c b/kernel/events/core.c
index a886bb83f4d0..32603bbd797d 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -55,6 +55,7 @@
 #include <linux/pgtable.h>
 #include <linux/buildid.h>
 #include <linux/task_work.h>
+#include <linux/unwind_deferred.h>
 
 #include "internal.h"
 
@@ -5312,11 +5313,37 @@ static void perf_pending_task_sync(struct perf_event *event)
 	rcuwait_wait_event(&event->pending_work_wait, !event->pending_work, TASK_UNINTERRUPTIBLE);
 }
 
+static void perf_pending_unwind_sync(struct perf_event *event)
+{
+	might_sleep();
+
+	if (!event->pending_unwind_callback)
+		return;
+
+	/*
+	 * If the task is queued to the current task's queue, we
+	 * obviously can't wait for it to complete. Simply cancel it.
+	 */
+	if (unwind_deferred_cancel(current, &event->pending_unwind_work)) {
+		event->pending_unwind_callback = 0;
+		local_dec(&event->ctx->nr_no_switch_fast);
+		return;
+	}
+
+	/*
+	 * All accesses related to the event are within the same RCU section in
+	 * perf_event_callchain_deferred(). The RCU grace period before the
+	 * event is freed will make sure all those accesses are complete by then.
+	 */
+	rcuwait_wait_event(&event->pending_unwind_wait, !event->pending_unwind_callback, TASK_UNINTERRUPTIBLE);
+}
+
 static void _free_event(struct perf_event *event)
 {
 	irq_work_sync(&event->pending_irq);
 	irq_work_sync(&event->pending_disable_irq);
 	perf_pending_task_sync(event);
+	perf_pending_unwind_sync(event);
 
 	unaccount_event(event);
 
@@ -6933,6 +6960,61 @@ static void perf_pending_irq(struct irq_work *entry)
 		perf_swevent_put_recursion_context(rctx);
 }
 
+
+struct perf_callchain_deferred_event {
+	struct perf_event_header	header;
+	u64				nr;
+	u64				ips[];
+};
+
+static void perf_event_callchain_deferred(struct unwind_work *work, struct unwind_stacktrace *trace, u64 cookie)
+{
+	struct perf_event *event = container_of(work, struct perf_event, pending_unwind_work);
+	struct perf_callchain_deferred_event deferred_event;
+	u64 callchain_context = PERF_CONTEXT_USER;
+	struct perf_output_handle handle;
+	struct perf_sample_data data;
+	u64 nr = trace->nr +1 ; /* +1 == callchain_context */
+
+	if (WARN_ON_ONCE(!event->pending_unwind_callback))
+		return;
+
+	/*
+	 * All accesses to the event must belong to the same implicit RCU
+	 * read-side critical section as the ->pending_unwind_callback reset.
+	 * See comment in perf_pending_unwind_sync().
+	 */
+	rcu_read_lock();
+
+	if (!current->mm)
+		goto out;
+
+	deferred_event.header.type = PERF_RECORD_CALLCHAIN_DEFERRED;
+	deferred_event.header.misc = PERF_RECORD_MISC_USER;
+	deferred_event.header.size = sizeof(deferred_event) + (nr * sizeof(u64));
+
+	deferred_event.nr = nr;
+
+	perf_event_header__init_id(&deferred_event.header, &data, event);
+
+	if (perf_output_begin(&handle, &data, event, deferred_event.header.size))
+		goto out;
+
+	perf_output_put(&handle, deferred_event);
+	perf_output_put(&handle, callchain_context);
+	perf_output_copy(&handle, trace->entries, trace->nr * sizeof(u64));
+	perf_event__output_id_sample(event, &handle, &data);
+
+	perf_output_end(&handle);
+
+out:
+	event->pending_unwind_callback = 0;
+	local_dec(&event->ctx->nr_no_switch_fast);
+	rcuwait_wake_up(&event->pending_unwind_wait);
+
+	rcu_read_unlock();
+}
+
 static void perf_pending_task(struct callback_head *head)
 {
 	struct perf_event *event = container_of(head, struct perf_event, pending_task);
@@ -7795,6 +7877,8 @@ perf_callchain(struct perf_event *event, struct pt_regs *regs)
 	bool user   = !event->attr.exclude_callchain_user && current->mm;
 	const u32 max_stack = event->attr.sample_max_stack;
 	struct perf_callchain_entry *callchain;
+	bool defer_user = IS_ENABLED(CONFIG_UNWIND_USER) && user &&
+			  event->attr.defer_callchain;
 
 	if (!kernel && !user)
 		return &__empty_callchain;
@@ -7803,7 +7887,21 @@ perf_callchain(struct perf_event *event, struct pt_regs *regs)
 	if (event->ctx->task && event->ctx->task != current)
 		return &__empty_callchain;
 
-	callchain = get_perf_callchain(regs, kernel, user, max_stack, true);
+	if (defer_user && !event->pending_unwind_callback) {
+		u64 cookie;
+
+		if (!unwind_deferred_request(&event->pending_unwind_work, &cookie)) {
+			event->pending_unwind_callback = 1;
+			local_inc(&event->ctx->nr_no_switch_fast);
+		}
+
+		if (!cookie)
+			defer_user = false;
+	}
+
+	callchain = get_perf_callchain(regs, kernel, user, max_stack, true,
+				       defer_user);
+
 	return callchain ?: &__empty_callchain;
 }
 
@@ -12225,6 +12323,9 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
 	init_task_work(&event->pending_task, perf_pending_task);
 	rcuwait_init(&event->pending_work_wait);
 
+	unwind_deferred_init(&event->pending_unwind_work, perf_event_callchain_deferred);
+	rcuwait_init(&event->pending_unwind_wait);
+
 	mutex_init(&event->mmap_mutex);
 	raw_spin_lock_init(&event->addr_filters.lock);
 
diff --git a/tools/include/uapi/linux/perf_event.h b/tools/include/uapi/linux/perf_event.h
index 0524d541d4e3..16307be57de9 100644
--- a/tools/include/uapi/linux/perf_event.h
+++ b/tools/include/uapi/linux/perf_event.h
@@ -460,7 +460,8 @@ struct perf_event_attr {
 				inherit_thread :  1, /* children only inherit if cloned with CLONE_THREAD */
 				remove_on_exec :  1, /* event is removed from task on exec */
 				sigtrap        :  1, /* send synchronous SIGTRAP on event */
-				__reserved_1   : 26;
+				defer_callchain:  1, /* generate PERF_RECORD_CALLCHAIN_DEFERRED records */
+				__reserved_1   : 25;
 
 	union {
 		__u32		wakeup_events;	  /* wakeup every n events */
@@ -1226,6 +1227,21 @@ enum perf_event_type {
 	 */
 	PERF_RECORD_AUX_OUTPUT_HW_ID		= 21,
 
+	/*
+	 * This user callchain capture was deferred until shortly before
+	 * returning to user space.  Previous samples would have kernel
+	 * callchains only and they need to be stitched with this to make full
+	 * callchains.
+	 *
+	 * struct {
+	 *	struct perf_event_header	header;
+	 *	u64				nr;
+	 *	u64				ips[nr];
+	 *	struct sample_id		sample_id;
+	 * };
+	 */
+	PERF_RECORD_CALLCHAIN_DEFERRED		= 22,
+
 	PERF_RECORD_MAX,			/* non-ABI */
 };
 
@@ -1256,6 +1272,7 @@ enum perf_callchain_context {
 	PERF_CONTEXT_HV			= (__u64)-32,
 	PERF_CONTEXT_KERNEL		= (__u64)-128,
 	PERF_CONTEXT_USER		= (__u64)-512,
+	PERF_CONTEXT_USER_DEFERRED	= (__u64)-640,
 
 	PERF_CONTEXT_GUEST		= (__u64)-2048,
 	PERF_CONTEXT_GUEST_KERNEL	= (__u64)-2176,
-- 
2.48.1


  parent reply	other threads:[~2025-01-22  2:32 UTC|newest]

Thread overview: 161+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-01-22  2:30 [PATCH v4 00/39] unwind, perf: sframe user space unwinding Josh Poimboeuf
2025-01-22  2:30 ` [PATCH v4 01/39] task_work: Fix TWA_NMI_CURRENT error handling Josh Poimboeuf
2025-01-22 12:28   ` Peter Zijlstra
2025-01-22 20:47     ` Josh Poimboeuf
2025-01-23  8:14       ` Peter Zijlstra
2025-01-23 17:15         ` Josh Poimboeuf
2025-01-23 22:19           ` Peter Zijlstra
2025-04-22 16:14     ` Steven Rostedt
2025-01-22  2:30 ` [PATCH v4 02/39] task_work: Fix TWA_NMI_CURRENT race with __schedule() Josh Poimboeuf
2025-01-22 12:23   ` Peter Zijlstra
2025-01-22 12:42   ` Peter Zijlstra
2025-01-22 21:03     ` Josh Poimboeuf
2025-01-22 22:14       ` Josh Poimboeuf
2025-01-23  8:15         ` Peter Zijlstra
2025-04-22 16:15     ` Steven Rostedt
2025-04-22 17:20       ` Josh Poimboeuf
2025-01-22  2:30 ` [PATCH v4 03/39] mm: Add guard for mmap_read_lock Josh Poimboeuf
2025-01-22  2:30 ` [PATCH v4 04/39] x86/vdso: Fix DWARF generation for getrandom() Josh Poimboeuf
2025-01-22  2:30 ` [PATCH v4 05/39] x86/asm: Avoid emitting DWARF CFI for non-VDSO Josh Poimboeuf
2025-01-24 16:08   ` Jens Remus
2025-01-24 16:47     ` Josh Poimboeuf
2025-01-22  2:30 ` [PATCH v4 06/39] x86/asm: Fix VDSO DWARF generation with kernel IBT enabled Josh Poimboeuf
2025-01-22  2:30 ` [PATCH v4 07/39] x86/vdso: Use SYM_FUNC_{START,END} in __kernel_vsyscall() Josh Poimboeuf
2025-01-22  2:31 ` [PATCH v4 08/39] x86/vdso: Use CFI macros in __vdso_sgx_enter_enclave() Josh Poimboeuf
2025-01-22  2:31 ` [PATCH v4 09/39] x86/vdso: Enable sframe generation in VDSO Josh Poimboeuf
2025-01-24 16:00   ` Jens Remus
2025-01-24 16:43     ` Josh Poimboeuf
2025-01-24 16:53       ` Josh Poimboeuf
2025-04-22 17:44       ` Steven Rostedt
2025-01-24 16:30   ` Jens Remus
2025-01-24 16:56     ` Josh Poimboeuf
2025-01-22  2:31 ` [PATCH v4 10/39] x86/uaccess: Add unsafe_copy_from_user() implementation Josh Poimboeuf
2025-01-22  2:31 ` [PATCH v4 11/39] unwind_user: Add user space unwinding API Josh Poimboeuf
2025-01-24 16:41   ` Jens Remus
2025-01-24 17:09     ` Josh Poimboeuf
2025-01-24 17:59   ` Andrii Nakryiko
2025-01-24 18:08     ` Josh Poimboeuf
2025-01-24 20:02   ` Steven Rostedt
2025-01-24 22:05     ` Josh Poimboeuf
2025-01-22  2:31 ` [PATCH v4 12/39] unwind_user: Add frame pointer support Josh Poimboeuf
2025-01-24 17:59   ` Andrii Nakryiko
2025-01-24 18:16     ` Josh Poimboeuf
2025-04-24 13:41       ` Steven Rostedt
2025-01-22  2:31 ` [PATCH v4 13/39] unwind_user/x86: Enable frame pointer unwinding on x86 Josh Poimboeuf
2025-01-22  2:31 ` [PATCH v4 14/39] perf/x86: Rename get_segment_base() and make it global Josh Poimboeuf
2025-01-22 12:51   ` Peter Zijlstra
2025-01-22 21:37     ` Josh Poimboeuf
2025-01-24 20:09   ` Steven Rostedt
2025-01-24 22:06     ` Josh Poimboeuf
2025-01-22  2:31 ` [PATCH v4 15/39] unwind_user: Add compat mode frame pointer support Josh Poimboeuf
2025-01-22  2:31 ` [PATCH v4 16/39] unwind_user/x86: Enable compat mode frame pointer unwinding on x86 Josh Poimboeuf
2025-01-22  2:31 ` [PATCH v4 17/39] unwind_user/sframe: Add support for reading .sframe headers Josh Poimboeuf
2025-01-24 18:00   ` Andrii Nakryiko
2025-01-24 19:21     ` Josh Poimboeuf
2025-01-24 20:13       ` Steven Rostedt
2025-01-24 22:39         ` Josh Poimboeuf
2025-01-24 22:13       ` Indu Bhagat
2025-01-28  1:10         ` Andrii Nakryiko
2025-01-29  2:02           ` Josh Poimboeuf
2025-01-30  0:02             ` Andrii Nakryiko
2025-02-04 18:26               ` Josh Poimboeuf
2025-01-30 21:39             ` Indu Bhagat
2025-02-05  0:57               ` Josh Poimboeuf
2025-02-06  1:10                 ` Indu Bhagat
2025-02-05 13:56             ` Jens Remus
2025-02-07 21:13               ` Josh Poimboeuf
2025-01-30 21:21           ` Indu Bhagat
2025-02-04 19:59             ` Josh Poimboeuf
2025-02-05 23:16             ` Andrii Nakryiko
2025-02-05 11:01           ` Jens Remus
2025-02-05 23:05             ` Andrii Nakryiko
2025-01-24 20:31     ` Indu Bhagat
2025-01-22  2:31 ` [PATCH v4 18/39] unwind_user/sframe: Store sframe section data in per-mm maple tree Josh Poimboeuf
2025-01-22  2:31 ` [PATCH v4 19/39] unwind_user/sframe: Add support for reading .sframe contents Josh Poimboeuf
2025-01-24 16:36   ` Jens Remus
2025-01-24 17:07     ` Josh Poimboeuf
2025-01-24 18:02   ` Andrii Nakryiko
2025-01-24 21:41     ` Josh Poimboeuf
2025-01-28  0:39       ` Andrii Nakryiko
2025-01-28 10:50         ` Jens Remus
2025-01-29  2:04           ` Josh Poimboeuf
2025-01-28 10:54         ` Jens Remus
2025-01-30 19:51       ` Weinan Liu
2025-02-04 19:42         ` Josh Poimboeuf
2025-01-30 15:07   ` Indu Bhagat
2025-02-04 18:38     ` Josh Poimboeuf
2025-01-30 15:47   ` Jens Remus
2025-02-04 18:51     ` Josh Poimboeuf
2025-02-05  9:47       ` Jens Remus
2025-02-07 21:06         ` Josh Poimboeuf
2025-02-10 15:56           ` Jens Remus
2025-01-22  2:31 ` [PATCH v4 20/39] unwind_user/sframe: Detect .sframe sections in executables Josh Poimboeuf
2025-01-22  2:31 ` [PATCH v4 21/39] unwind_user/sframe: Add prctl() interface for registering .sframe sections Josh Poimboeuf
2025-01-22  2:31 ` [PATCH v4 22/39] unwind_user/sframe: Wire up unwind_user to sframe Josh Poimboeuf
2025-01-22  2:31 ` [PATCH v4 23/39] unwind_user/sframe/x86: Enable sframe unwinding on x86 Josh Poimboeuf
2025-01-22  2:31 ` [PATCH v4 24/39] unwind_user/sframe: Remove .sframe section on detected corruption Josh Poimboeuf
2025-01-22  2:31 ` [PATCH v4 25/39] unwind_user/sframe: Show file name in debug output Josh Poimboeuf
2025-01-30 16:17   ` Jens Remus
2025-02-04 19:10     ` Josh Poimboeuf
2025-02-05 10:04       ` Jens Remus
2025-01-22  2:31 ` [PATCH v4 26/39] unwind_user/sframe: Enable debugging in uaccess regions Josh Poimboeuf
2025-01-30 16:38   ` Jens Remus
2025-02-04 19:33     ` Josh Poimboeuf
2025-01-22  2:31 ` [PATCH v4 27/39] unwind_user/sframe: Add .sframe validation option Josh Poimboeuf
2025-01-22  2:31 ` [PATCH v4 28/39] unwind_user/deferred: Add deferred unwinding interface Josh Poimboeuf
2025-01-22 13:37   ` Peter Zijlstra
2025-01-22 14:16     ` Peter Zijlstra
2025-01-22 22:51       ` Josh Poimboeuf
2025-01-23  8:17         ` Peter Zijlstra
2025-01-23 18:30           ` Josh Poimboeuf
2025-01-23 21:58             ` Peter Zijlstra
2025-01-22 21:38     ` Josh Poimboeuf
2025-01-22 13:44   ` Peter Zijlstra
2025-01-22 21:52     ` Josh Poimboeuf
2025-01-22 20:13   ` Mathieu Desnoyers
2025-01-23  4:05     ` Josh Poimboeuf
2025-01-23  8:25       ` Peter Zijlstra
2025-01-23 18:43         ` Josh Poimboeuf
2025-01-23 22:13           ` Peter Zijlstra
2025-01-24 21:58             ` Steven Rostedt
2025-01-24 22:46               ` Josh Poimboeuf
2025-01-24 22:50                 ` Josh Poimboeuf
2025-01-24 23:57                   ` Steven Rostedt
2025-01-30 20:21                     ` Steven Rostedt
2025-02-05  2:25                       ` Josh Poimboeuf
2025-01-24 16:35   ` Jens Remus
2025-01-24 16:57     ` Josh Poimboeuf
2025-01-22  2:31 ` [PATCH v4 29/39] unwind_user/deferred: Add unwind cache Josh Poimboeuf
2025-01-22 13:57   ` Peter Zijlstra
2025-01-22 22:36     ` Josh Poimboeuf
2025-01-23  8:31       ` Peter Zijlstra
2025-01-23 18:45         ` Josh Poimboeuf
2025-01-22  2:31 ` [PATCH v4 30/39] unwind_user/deferred: Make unwind deferral requests NMI-safe Josh Poimboeuf
2025-01-22 14:15   ` Peter Zijlstra
2025-01-22 22:49     ` Josh Poimboeuf
2025-01-23  8:40       ` Peter Zijlstra
2025-01-23 19:48         ` Josh Poimboeuf
2025-01-23 19:54           ` Josh Poimboeuf
2025-01-23 22:17           ` Peter Zijlstra
2025-01-23 23:34             ` Josh Poimboeuf
2025-01-24 11:58               ` Peter Zijlstra
2025-01-22 14:24   ` Peter Zijlstra
2025-01-22 22:52     ` Josh Poimboeuf
2025-01-23  8:42       ` Peter Zijlstra
2025-01-22  2:31 ` [PATCH v4 31/39] perf: Remove get_perf_callchain() 'init_nr' argument Josh Poimboeuf
2025-01-22  2:31 ` [PATCH v4 32/39] perf: Remove get_perf_callchain() 'crosstask' argument Josh Poimboeuf
2025-01-24 18:13   ` Andrii Nakryiko
2025-01-24 22:00     ` Josh Poimboeuf
2025-01-28  0:39       ` Andrii Nakryiko
2025-01-22  2:31 ` [PATCH v4 33/39] perf: Simplify get_perf_callchain() user logic Josh Poimboeuf
2025-01-22  2:31 ` [PATCH v4 34/39] perf: Skip user unwind if !current->mm Josh Poimboeuf
2025-01-22 14:29   ` Peter Zijlstra
2025-01-22 23:08     ` Josh Poimboeuf
2025-01-23  8:44       ` Peter Zijlstra
2025-01-22  2:31 ` Josh Poimboeuf [this message]
2025-01-22  2:31 ` [PATCH v4 36/39] perf tools: Minimal CALLCHAIN_DEFERRED support Josh Poimboeuf
2025-01-22  2:31 ` [PATCH v4 37/39] perf record: Enable defer_callchain for user callchains Josh Poimboeuf
2025-01-22  2:31 ` [PATCH v4 38/39] perf script: Display PERF_RECORD_CALLCHAIN_DEFERRED Josh Poimboeuf
2025-01-22  2:31 ` [PATCH v4 39/39] perf tools: Merge deferred user callchains Josh Poimboeuf
2025-01-22  2:35 ` [PATCH v4 00/39] unwind, perf: sframe user space unwinding Josh Poimboeuf
2025-01-22 16:13 ` Steven Rostedt

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=2e54e6f1c914b219b889fbb47bc33d4749c3ad87.1737511963.git.jpoimboe@kernel.org \
    --to=jpoimboe@kernel.org \
    --cc=acme@kernel.org \
    --cc=adrian.hunter@intel.com \
    --cc=alexander.shishkin@linux.intel.com \
    --cc=andrii.nakryiko@gmail.com \
    --cc=broonie@kernel.org \
    --cc=fweimer@redhat.com \
    --cc=indu.bhagat@oracle.com \
    --cc=irogers@google.com \
    --cc=jolsa@kernel.org \
    --cc=jordalgo@meta.com \
    --cc=jremus@linux.ibm.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-perf-users@vger.kernel.org \
    --cc=linux-toolchains@vger.kernel.org \
    --cc=linux-trace-kernel@vger.kernel.org \
    --cc=luto@kernel.org \
    --cc=mark.rutland@arm.com \
    --cc=mathieu.desnoyers@efficios.com \
    --cc=mhiramat@kernel.org \
    --cc=mingo@kernel.org \
    --cc=namhyung@kernel.org \
    --cc=peterz@infradead.org \
    --cc=rostedt@goodmis.org \
    --cc=sam@gentoo.org \
    --cc=wnliu@google.com \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).