linux-toolchains.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Josh Poimboeuf <jpoimboe@kernel.org>
To: x86@kernel.org
Cc: Peter Zijlstra <peterz@infradead.org>,
	Steven Rostedt <rostedt@goodmis.org>,
	Ingo Molnar <mingo@kernel.org>,
	Arnaldo Carvalho de Melo <acme@kernel.org>,
	linux-kernel@vger.kernel.org,
	Indu Bhagat <indu.bhagat@oracle.com>,
	Mark Rutland <mark.rutland@arm.com>,
	Alexander Shishkin <alexander.shishkin@linux.intel.com>,
	Jiri Olsa <jolsa@kernel.org>, Namhyung Kim <namhyung@kernel.org>,
	Ian Rogers <irogers@google.com>,
	Adrian Hunter <adrian.hunter@intel.com>,
	linux-perf-users@vger.kernel.org, Mark Brown <broonie@kernel.org>,
	linux-toolchains@vger.kernel.org, Jordan Rome <jordalgo@meta.com>,
	Sam James <sam@gentoo.org>
Subject: [PATCH v2 09/11] perf: Introduce deferred user callchains
Date: Sat, 14 Sep 2024 01:02:11 +0200	[thread overview]
Message-ID: <5bc834b68fe14daaa1782b925ab54fc414245334.1726268190.git.jpoimboe@kernel.org> (raw)
In-Reply-To: <cover.1726268190.git.jpoimboe@kernel.org>

Instead of attempting to unwind user space from the NMI handler, defer
it to run in task context by sending a self-IPI and then scheduling the
unwind to run in the IRQ's exit task work before returning to user space.

This allows the user stack page to be paged in if needed, avoids
duplicate unwinds for kernel-bound workloads, and prepares for SFrame
unwinding (so .sframe sections can be paged in on demand).

Suggested-by: Steven Rostedt <rostedt@goodmis.org>
Suggested-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Josh Poimboeuf <jpoimboe@kernel.org>
---
 arch/Kconfig                    |  3 ++
 include/linux/perf_event.h      |  9 +++-
 include/uapi/linux/perf_event.h | 21 ++++++++-
 kernel/bpf/stackmap.c           |  5 +--
 kernel/events/callchain.c       | 12 +++++-
 kernel/events/core.c            | 76 ++++++++++++++++++++++++++++++++-
 6 files changed, 119 insertions(+), 7 deletions(-)

diff --git a/arch/Kconfig b/arch/Kconfig
index ff5d5bc5f947..0629c1aa2a5c 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -431,6 +431,9 @@ config HAVE_USER_UNWIND
 config HAVE_USER_UNWIND_SFRAME
 	bool
 
+config HAVE_PERF_CALLCHAIN_DEFERRED
+	bool
+
 config HAVE_PERF_REGS
 	bool
 	help
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 64f9efe19553..a617aad2851c 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -787,6 +787,7 @@ struct perf_event {
 	struct callback_head		pending_task;
 	unsigned int			pending_work;
 	struct rcuwait			pending_work_wait;
+	unsigned int			pending_callchain;
 
 	atomic_t			event_limit;
 
@@ -1541,12 +1542,18 @@ extern void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct p
 extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
 extern struct perf_callchain_entry *
 get_perf_callchain(struct pt_regs *regs, bool kernel, bool user,
-		   u32 max_stack, bool add_mark);
+		   u32 max_stack, bool add_mark, bool defer_user);
 extern int get_callchain_buffers(int max_stack);
 extern void put_callchain_buffers(void);
 extern struct perf_callchain_entry *get_callchain_entry(int *rctx);
 extern void put_callchain_entry(int rctx);
 
+#ifdef CONFIG_HAVE_PERF_CALLCHAIN_DEFERRED
+extern void perf_callchain_user_deferred(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
+#else
+static inline void perf_callchain_user_deferred(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) {}
+#endif
+
 extern int sysctl_perf_event_max_stack;
 extern int sysctl_perf_event_max_contexts_per_stack;
 
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index 4842c36fdf80..a7f875eb29dd 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -460,7 +460,8 @@ struct perf_event_attr {
 				inherit_thread :  1, /* children only inherit if cloned with CLONE_THREAD */
 				remove_on_exec :  1, /* event is removed from task on exec */
 				sigtrap        :  1, /* send synchronous SIGTRAP on event */
-				__reserved_1   : 26;
+				defer_callchain:  1, /* generate PERF_RECORD_CALLCHAIN_DEFERRED records */
+				__reserved_1   : 25;
 
 	union {
 		__u32		wakeup_events;	  /* wakeup every n events */
@@ -1217,6 +1218,23 @@ enum perf_event_type {
 	 */
 	PERF_RECORD_AUX_OUTPUT_HW_ID		= 21,
 
+	/*
+	 * This user callchain capture was deferred until shortly before
+	 * returning to user space.  Previous samples would have kernel
+	 * callchains only and they need to be stitched with this to make full
+	 * callchains.
+	 *
+	 * TODO: do PERF_SAMPLE_{REGS,STACK}_USER also need deferral?
+	 *
+	 * struct {
+	 *	struct perf_event_header	header;
+	 *	u64				nr;
+	 *	u64				ips[nr];
+	 *	struct sample_id		sample_id;
+	 * };
+	 */
+	PERF_RECORD_CALLCHAIN_DEFERRED		= 22,
+
 	PERF_RECORD_MAX,			/* non-ABI */
 };
 
@@ -1247,6 +1265,7 @@ enum perf_callchain_context {
 	PERF_CONTEXT_HV			= (__u64)-32,
 	PERF_CONTEXT_KERNEL		= (__u64)-128,
 	PERF_CONTEXT_USER		= (__u64)-512,
+	PERF_CONTEXT_USER_DEFERRED	= (__u64)-640,
 
 	PERF_CONTEXT_GUEST		= (__u64)-2048,
 	PERF_CONTEXT_GUEST_KERNEL	= (__u64)-2176,
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index ff6f0ef7ba5d..9d58a84e553a 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -297,8 +297,7 @@ BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map,
 	if (max_depth > sysctl_perf_event_max_stack)
 		max_depth = sysctl_perf_event_max_stack;
 
-	trace = get_perf_callchain(regs, kernel, user, max_depth, false);
-
+	trace = get_perf_callchain(regs, kernel, user, max_depth, false, false);
 	if (unlikely(!trace))
 		/* couldn't fetch the stack trace */
 		return -EFAULT;
@@ -432,7 +431,7 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
 		trace = get_callchain_entry_for_task(task, max_depth);
 	else
 		trace = get_perf_callchain(regs, kernel, user, max_depth,
-					   false);
+					   false, false);
 	if (unlikely(!trace))
 		goto err_fault;
 
diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
index 2278402b7ac9..883f0ac9ef3a 100644
--- a/kernel/events/callchain.c
+++ b/kernel/events/callchain.c
@@ -217,7 +217,7 @@ static void fixup_uretprobe_trampoline_entries(struct perf_callchain_entry *entr
 
 struct perf_callchain_entry *
 get_perf_callchain(struct pt_regs *regs, bool kernel, bool user,
-		   u32 max_stack, bool add_mark)
+		   u32 max_stack, bool add_mark, bool defer_user)
 {
 	struct perf_callchain_entry *entry;
 	struct perf_callchain_entry_ctx ctx;
@@ -246,6 +246,16 @@ get_perf_callchain(struct pt_regs *regs, bool kernel, bool user,
 			regs = task_pt_regs(current);
 		}
 
+		if (defer_user) {
+			/*
+			 * Foretell the coming of a
+			 * PERF_RECORD_CALLCHAIN_DEFERRED sample which can be
+			 * stitched to this one.
+			 */
+			perf_callchain_store_context(&ctx, PERF_CONTEXT_USER_DEFERRED);
+			goto exit_put;
+		}
+
 		if (add_mark)
 			perf_callchain_store_context(&ctx, PERF_CONTEXT_USER);
 
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 19fd7bd38ecf..5fc7c5156287 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -6835,6 +6835,12 @@ static void perf_pending_irq(struct irq_work *entry)
 	struct perf_event *event = container_of(entry, struct perf_event, pending_irq);
 	int rctx;
 
+	if (!is_software_event(event)) {
+		if (event->pending_callchain)
+			task_work_add(current, &event->pending_task, TWA_RESUME);
+		return;
+	}
+
 	/*
 	 * If we 'fail' here, that's OK, it means recursion is already disabled
 	 * and we won't recurse 'further'.
@@ -6854,11 +6860,70 @@ static void perf_pending_irq(struct irq_work *entry)
 		perf_swevent_put_recursion_context(rctx);
 }
 
+struct perf_callchain_deferred_event {
+	struct perf_event_header	header;
+	struct perf_callchain_entry	callchain;
+};
+
+#define PERF_CALLCHAIN_DEFERRED_EVENT_SIZE				\
+	sizeof(struct perf_callchain_deferred_event) +			\
+	(sizeof(__u64) * 1) + /* PERF_CONTEXT_USER */			\
+	(sizeof(__u64) * PERF_MAX_STACK_DEPTH)
+
+static void perf_event_callchain_deferred(struct perf_event *event)
+{
+	struct pt_regs *regs = task_pt_regs(current);
+	struct perf_callchain_entry *callchain;
+	struct perf_output_handle handle;
+	struct perf_sample_data data;
+	unsigned char buf[PERF_CALLCHAIN_DEFERRED_EVENT_SIZE];
+	struct perf_callchain_entry_ctx ctx;
+	struct perf_callchain_deferred_event *deferred_event;
+
+	deferred_event = (void *)&buf;
+
+	callchain = &deferred_event->callchain;
+	callchain->nr = 0;
+
+	ctx.entry		= callchain;
+	ctx.max_stack		= MIN(event->attr.sample_max_stack,
+				      PERF_MAX_STACK_DEPTH);
+	ctx.nr			= 0;
+	ctx.contexts		= 0;
+	ctx.contexts_maxed	= false;
+
+	perf_callchain_store_context(&ctx, PERF_CONTEXT_USER);
+	perf_callchain_user_deferred(&ctx, regs);
+
+	deferred_event->header.type = PERF_RECORD_CALLCHAIN_DEFERRED;
+	deferred_event->header.misc = 0;
+	deferred_event->header.size = sizeof(*deferred_event) +
+				      (callchain->nr * sizeof(u64));
+
+	perf_event_header__init_id(&deferred_event->header, &data, event);
+
+	if (perf_output_begin(&handle, &data, event,
+			      deferred_event->header.size))
+		return;
+
+	perf_output_copy(&handle, deferred_event, deferred_event->header.size);
+	perf_event__output_id_sample(event, &handle, &data);
+	perf_output_end(&handle);
+}
+
 static void perf_pending_task(struct callback_head *head)
 {
 	struct perf_event *event = container_of(head, struct perf_event, pending_task);
 	int rctx;
 
+	if (!is_software_event(event)) {
+		if (event->pending_callchain) {
+			perf_event_callchain_deferred(event);
+			event->pending_callchain = 0;
+		}
+		return;
+	}
+
 	/*
 	 * All accesses to the event must belong to the same implicit RCU read-side
 	 * critical section as the ->pending_work reset. See comment in
@@ -7688,6 +7753,8 @@ perf_callchain(struct perf_event *event, struct pt_regs *regs)
 	bool user   = !event->attr.exclude_callchain_user;
 	const u32 max_stack = event->attr.sample_max_stack;
 	struct perf_callchain_entry *callchain;
+	bool defer_user = IS_ENABLED(CONFIG_HAVE_PERF_CALLCHAIN_DEFERRED) &&
+			  event->attr.defer_callchain;
 
 	/* Disallow cross-task user callchains. */
 	user &= !event->ctx->task || event->ctx->task == current;
@@ -7695,7 +7762,14 @@ perf_callchain(struct perf_event *event, struct pt_regs *regs)
 	if (!kernel && !user)
 		return &__empty_callchain;
 
-	callchain = get_perf_callchain(regs, kernel, user, max_stack, true);
+	callchain = get_perf_callchain(regs, kernel, user, max_stack, true,
+				       defer_user);
+
+	if (user && defer_user && !event->pending_callchain) {
+		event->pending_callchain = 1;
+		irq_work_queue(&event->pending_irq);
+	}
+
 	return callchain ?: &__empty_callchain;
 }
 
-- 
2.46.0


  parent reply	other threads:[~2024-09-13 23:03 UTC|newest]

Thread overview: 44+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-09-13 23:02 [PATCH v2 00/11] unwind, perf: sframe user space unwinding, deferred perf callchains Josh Poimboeuf
2024-09-13 23:02 ` [PATCH v2 01/11] unwind: Introduce generic user space unwinding interface Josh Poimboeuf
2024-09-13 23:02 ` [PATCH v2 02/11] unwind/x86: Add HAVE_USER_UNWIND Josh Poimboeuf
2024-09-16 11:46   ` Peter Zijlstra
2024-10-20  8:09     ` Josh Poimboeuf
2024-09-13 23:02 ` [PATCH v2 03/11] unwind: Introduce SFrame user space unwinding Josh Poimboeuf
2024-09-14 11:23   ` Steven Rostedt
2024-10-01 18:20     ` Indu Bhagat
2024-10-01 18:36       ` Steven Rostedt
2024-10-02  8:18         ` Florian Weimer
2024-10-02 14:05           ` Steven Rostedt
2024-10-23 13:59   ` Jens Remus
2024-10-27 17:49     ` Josh Poimboeuf
2024-09-13 23:02 ` [PATCH v2 04/11] unwind/x86/64: Add HAVE_USER_UNWIND_SFRAME Josh Poimboeuf
2024-09-13 23:02 ` [PATCH v2 05/11] perf/x86: Use user_unwind interface Josh Poimboeuf
2024-09-16  6:48   ` kernel test robot
2024-09-17 22:01     ` Namhyung Kim
2024-09-13 23:02 ` [PATCH v2 06/11] perf: Remove get_perf_callchain() 'init_nr' argument Josh Poimboeuf
2024-09-13 23:02 ` [PATCH v2 07/11] perf: Remove get_perf_callchain() 'crosstask' argument Josh Poimboeuf
2024-09-13 23:02 ` [PATCH v2 08/11] perf: Simplify get_perf_callchain() user logic Josh Poimboeuf
2024-09-13 23:02 ` Josh Poimboeuf [this message]
2024-09-17 22:07   ` [PATCH v2 09/11] perf: Introduce deferred user callchains Namhyung Kim
2024-09-13 23:02 ` [PATCH v2 10/11] perf/x86: Add HAVE_PERF_CALLCHAIN_DEFERRED Josh Poimboeuf
2024-09-13 23:02 ` [PATCH v2 11/11] perf/x86: Enable SFrame unwinding for deferred user callchains Josh Poimboeuf
2024-09-14 12:12 ` [PATCH v2 00/11] unwind, perf: sframe user space unwinding, deferred perf callchains Steven Rostedt
2024-09-15 11:11   ` Josh Poimboeuf
2024-09-15 11:38     ` Steven Rostedt
2024-09-16 14:08     ` Peter Zijlstra
2024-09-16 15:39       ` Josh Poimboeuf
2024-09-16 18:15         ` Peter Zijlstra
2024-09-16  0:15           ` Mathieu Desnoyers
2024-09-16  0:33             ` Mathieu Desnoyers
2024-09-17  0:37               ` Mathieu Desnoyers
2024-09-16 22:46           ` Steven Rostedt
2024-09-17 21:58             ` Namhyung Kim
2024-09-18  5:14               ` Mathieu Desnoyers
2024-10-03  2:31         ` Steven Rostedt
2024-10-03  2:37           ` Josh Poimboeuf
2024-10-03 14:56             ` Steven Rostedt
2024-09-16 16:03       ` Steven Rostedt
2024-09-14 19:37 ` Namhyung Kim
2024-10-23 13:22 ` Jens Remus
2024-10-24  2:22   ` Steven Rostedt
2024-10-27 17:24   ` Josh Poimboeuf

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=5bc834b68fe14daaa1782b925ab54fc414245334.1726268190.git.jpoimboe@kernel.org \
    --to=jpoimboe@kernel.org \
    --cc=acme@kernel.org \
    --cc=adrian.hunter@intel.com \
    --cc=alexander.shishkin@linux.intel.com \
    --cc=broonie@kernel.org \
    --cc=indu.bhagat@oracle.com \
    --cc=irogers@google.com \
    --cc=jolsa@kernel.org \
    --cc=jordalgo@meta.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-perf-users@vger.kernel.org \
    --cc=linux-toolchains@vger.kernel.org \
    --cc=mark.rutland@arm.com \
    --cc=mingo@kernel.org \
    --cc=namhyung@kernel.org \
    --cc=peterz@infradead.org \
    --cc=rostedt@goodmis.org \
    --cc=sam@gentoo.org \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).