linux-trace-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Steven Rostedt <rostedt@goodmis.org>
To: linux-kernel@vger.kernel.org, linux-trace-kernel@vger.kernel.org,
	bpf@vger.kernel.org, x86@kernel.org
Cc: Masami Hiramatsu <mhiramat@kernel.org>,
	Mathieu Desnoyers <mathieu.desnoyers@efficios.com>,
	Josh Poimboeuf <jpoimboe@kernel.org>,
	Peter Zijlstra <peterz@infradead.org>,
	Ingo Molnar <mingo@kernel.org>, Jiri Olsa <jolsa@kernel.org>,
	Namhyung Kim <namhyung@kernel.org>
Subject: [PATCH v8 11/18] unwind deferred: Use bitmask to determine which callbacks to call
Date: Fri, 09 May 2025 12:45:35 -0400	[thread overview]
Message-ID: <20250509165155.459900954@goodmis.org> (raw)
In-Reply-To: 20250509164524.448387100@goodmis.org

From: Steven Rostedt <rostedt@goodmis.org>

In order to know which registered callback requested a stacktrace for when
the task goes back to user space, add a bitmask for all registered
tracers. The bitmask is the size of log, which means that on a 32 bit
machine, it can have at most 32 registered tracers, and on 64 bit, it can
have at most 64 registered tracers. This should not be an issue as there
should not be more than 10 (unless BPF can abuse this?).

When a tracer registers with unwind_deferred_init() it will get a bit
number assigned to it. When a tracer requests a stacktrace, it will have
its bit set within the task_struct. When the task returns back to user
space, it will call the callbacks for all the registered tracers where
their bits are set in the task's mask.

When a tracer is removed by the unwind_deferred_cancel() all current tasks
will clear the associated bit, just in case another tracer gets registered
immediately afterward and then gets their callback called unexpectedly.

Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
---
 include/linux/sched.h           |  1 +
 include/linux/unwind_deferred.h |  1 +
 kernel/unwind/deferred.c        | 46 ++++++++++++++++++++++++++++-----
 3 files changed, 41 insertions(+), 7 deletions(-)

diff --git a/include/linux/sched.h b/include/linux/sched.h
index a1e1c07cadfb..d3ee0c5405d6 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1649,6 +1649,7 @@ struct task_struct {
 
 #ifdef CONFIG_UNWIND_USER
 	struct unwind_task_info		unwind_info;
+	unsigned long			unwind_mask;
 #endif
 
 	/* CPU-specific state of this task: */
diff --git a/include/linux/unwind_deferred.h b/include/linux/unwind_deferred.h
index a384eef719a3..1789c3624723 100644
--- a/include/linux/unwind_deferred.h
+++ b/include/linux/unwind_deferred.h
@@ -13,6 +13,7 @@ typedef void (*unwind_callback_t)(struct unwind_work *work, struct unwind_stackt
 struct unwind_work {
 	struct list_head		list;
 	unwind_callback_t		func;
+	int				bit;
 };
 
 #ifdef CONFIG_UNWIND_USER
diff --git a/kernel/unwind/deferred.c b/kernel/unwind/deferred.c
index 238cd97079ec..7ae0bec5b36a 100644
--- a/kernel/unwind/deferred.c
+++ b/kernel/unwind/deferred.c
@@ -16,6 +16,7 @@
 /* Guards adding to and reading the list of callbacks */
 static DEFINE_MUTEX(callback_mutex);
 static LIST_HEAD(callbacks);
+static unsigned long unwind_mask;
 
 /*
  * Read the task context timestamp, if this is the first caller then
@@ -106,6 +107,7 @@ static void unwind_deferred_task_work(struct callback_head *head)
 	struct unwind_stacktrace trace;
 	struct unwind_work *work;
 	u64 timestamp;
+	struct task_struct *task = current;
 
 	if (WARN_ON_ONCE(!info->pending))
 		return;
@@ -133,7 +135,10 @@ static void unwind_deferred_task_work(struct callback_head *head)
 
 	guard(mutex)(&callback_mutex);
 	list_for_each_entry(work, &callbacks, list) {
-		work->func(work, &trace, timestamp);
+		if (task->unwind_mask & (1UL << work->bit)) {
+			work->func(work, &trace, timestamp);
+			clear_bit(work->bit, &current->unwind_mask);
+		}
 	}
 }
 
@@ -159,9 +164,12 @@ static int unwind_deferred_request_nmi(struct unwind_work *work, u64 *timestamp)
 		inited_timestamp = true;
 	}
 
-	if (info->pending)
+	if (current->unwind_mask & (1UL << work->bit))
 		return 1;
 
+	if (info->pending)
+		goto out;
+
 	ret = task_work_add(current, &info->work, TWA_NMI_CURRENT);
 	if (ret) {
 		/*
@@ -175,8 +183,8 @@ static int unwind_deferred_request_nmi(struct unwind_work *work, u64 *timestamp)
 	}
 
 	info->pending = 1;
-
-	return 0;
+out:
+	return test_and_set_bit(work->bit, &current->unwind_mask);
 }
 
 /**
@@ -223,14 +231,18 @@ int unwind_deferred_request(struct unwind_work *work, u64 *timestamp)
 
 	*timestamp = get_timestamp(info);
 
+	/* This is already queued */
+	if (current->unwind_mask & (1UL << work->bit))
+		return 1;
+
 	/* callback already pending? */
 	pending = READ_ONCE(info->pending);
 	if (pending)
-		return 1;
+		goto out;
 
 	/* Claim the work unless an NMI just now swooped in to do so. */
 	if (!try_cmpxchg(&info->pending, &pending, 1))
-		return 1;
+		goto out;
 
 	/* The work has been claimed, now schedule it. */
 	ret = task_work_add(current, &info->work, TWA_RESUME);
@@ -239,16 +251,27 @@ int unwind_deferred_request(struct unwind_work *work, u64 *timestamp)
 		return ret;
 	}
 
-	return 0;
+ out:
+	return test_and_set_bit(work->bit, &current->unwind_mask);
 }
 
 void unwind_deferred_cancel(struct unwind_work *work)
 {
+	struct task_struct *g, *t;
+
 	if (!work)
 		return;
 
 	guard(mutex)(&callback_mutex);
 	list_del(&work->list);
+
+	clear_bit(work->bit, &unwind_mask);
+
+	guard(rcu)();
+	/* Clear this bit from all threads */
+	for_each_process_thread(g, t) {
+		clear_bit(work->bit, &t->unwind_mask);
+	}
 }
 
 int unwind_deferred_init(struct unwind_work *work, unwind_callback_t func)
@@ -256,6 +279,14 @@ int unwind_deferred_init(struct unwind_work *work, unwind_callback_t func)
 	memset(work, 0, sizeof(*work));
 
 	guard(mutex)(&callback_mutex);
+
+	/* See if there's a bit in the mask available */
+	if (unwind_mask == ~0UL)
+		return -EBUSY;
+
+	work->bit = ffz(unwind_mask);
+	unwind_mask |= 1UL << work->bit;
+
 	list_add(&work->list, &callbacks);
 	work->func = func;
 	return 0;
@@ -267,6 +298,7 @@ void unwind_task_init(struct task_struct *task)
 
 	memset(info, 0, sizeof(*info));
 	init_task_work(&info->work, unwind_deferred_task_work);
+	task->unwind_mask = 0;
 }
 
 void unwind_task_free(struct task_struct *task)
-- 
2.47.2



  parent reply	other threads:[~2025-05-09 16:51 UTC|newest]

Thread overview: 27+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-05-09 16:45 [PATCH v8 00/18] unwind_user: perf: x86: Deferred unwinding infrastructure Steven Rostedt
2025-05-09 16:45 ` [PATCH v8 01/18] unwind_user: Add user space unwinding API Steven Rostedt
2025-05-09 16:45 ` [PATCH v8 02/18] unwind_user: Add frame pointer support Steven Rostedt
2025-05-09 16:45 ` [PATCH v8 03/18] unwind_user/x86: Enable frame pointer unwinding on x86 Steven Rostedt
2025-05-09 16:45 ` [PATCH v8 04/18] perf/x86: Rename and move get_segment_base() and make it global Steven Rostedt
2025-05-09 16:45 ` [PATCH v8 05/18] unwind_user: Add compat mode frame pointer support Steven Rostedt
2025-05-09 16:45 ` [PATCH v8 06/18] unwind_user/x86: Enable compat mode frame pointer unwinding on x86 Steven Rostedt
2025-05-09 16:45 ` [PATCH v8 07/18] unwind_user/deferred: Add unwind_deferred_trace() Steven Rostedt
2025-05-09 16:45 ` [PATCH v8 08/18] unwind_user/deferred: Add unwind cache Steven Rostedt
2025-05-09 16:45 ` [PATCH v8 09/18] unwind_user/deferred: Add deferred unwinding interface Steven Rostedt
2025-05-09 16:45 ` [PATCH v8 10/18] unwind_user/deferred: Make unwind deferral requests NMI-safe Steven Rostedt
2025-05-09 16:45 ` Steven Rostedt [this message]
2025-05-09 16:45 ` [PATCH v8 12/18] unwind deferred: Use SRCU unwind_deferred_task_work() Steven Rostedt
2025-05-09 21:49   ` Andrii Nakryiko
2025-05-10 13:41     ` Steven Rostedt
2025-05-12 16:17       ` Andrii Nakryiko
2025-05-09 16:45 ` [PATCH v8 13/18] unwind: Clear unwind_mask on exit back to user space Steven Rostedt
2025-05-09 16:45 ` [PATCH v8 14/18] perf: Remove get_perf_callchain() init_nr argument Steven Rostedt
2025-05-09 17:11   ` Alexei Starovoitov
2025-05-09 16:45 ` [PATCH v8 15/18] perf: Have get_perf_callchain() return NULL if crosstask and user are set Steven Rostedt
2025-05-09 21:53   ` Andrii Nakryiko
2025-05-10 13:46     ` Steven Rostedt
2025-05-10 17:59     ` Josh Poimboeuf
2025-05-12 22:27       ` Andrii Nakryiko
2025-05-09 16:45 ` [PATCH v8 16/18] perf: Use current->flags & PF_KTHREAD instead of current->mm == NULL Steven Rostedt
2025-05-09 16:45 ` [PATCH v8 17/18] perf: Simplify get_perf_callchain() user logic Steven Rostedt
2025-05-09 16:45 ` [PATCH v8 18/18] perf: Skip user unwind if the task is a kernel thread Steven Rostedt

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20250509165155.459900954@goodmis.org \
    --to=rostedt@goodmis.org \
    --cc=bpf@vger.kernel.org \
    --cc=jolsa@kernel.org \
    --cc=jpoimboe@kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-trace-kernel@vger.kernel.org \
    --cc=mathieu.desnoyers@efficios.com \
    --cc=mhiramat@kernel.org \
    --cc=mingo@kernel.org \
    --cc=namhyung@kernel.org \
    --cc=peterz@infradead.org \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).