From: Steven Rostedt <rostedt@goodmis.org>
To: linux-kernel@vger.kernel.org, linux-trace-kernel@vger.kernel.org,
bpf@vger.kernel.org, x86@kernel.org
Cc: Masami Hiramatsu <mhiramat@kernel.org>,
Mathieu Desnoyers <mathieu.desnoyers@efficios.com>,
Josh Poimboeuf <jpoimboe@kernel.org>,
Peter Zijlstra <peterz@infradead.org>,
Ingo Molnar <mingo@kernel.org>, Jiri Olsa <jolsa@kernel.org>,
Namhyung Kim <namhyung@kernel.org>
Subject: [PATCH v8 12/18] unwind deferred: Use SRCU unwind_deferred_task_work()
Date: Fri, 09 May 2025 12:45:36 -0400 [thread overview]
Message-ID: <20250509165155.628873521@goodmis.org> (raw)
In-Reply-To: 20250509164524.448387100@goodmis.org
From: Steven Rostedt <rostedt@goodmis.org>
Instead of using the callback_mutex to protect the link list of callbacks
in unwind_deferred_task_work(), use SRCU instead. This gets called every
time a task exits that has to record a stack trace that was requested.
This can happen for many tasks on several CPUs at the same time. A mutex
is a bottleneck and can cause a bit of contention and slow down performance.
As the callbacks themselves are allowed to sleep, regular RCU can not be
used to protect the list. Instead use SRCU, as that still allows the
callbacks to sleep and the list can be read without needing to hold the
callback_mutex.
Link: https://lore.kernel.org/all/ca9bd83a-6c80-4ee0-a83c-224b9d60b755@efficios.com/
Suggested-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
---
kernel/unwind/deferred.c | 33 +++++++++++++++++++++++++--------
1 file changed, 25 insertions(+), 8 deletions(-)
diff --git a/kernel/unwind/deferred.c b/kernel/unwind/deferred.c
index 7ae0bec5b36a..5d6976ee648f 100644
--- a/kernel/unwind/deferred.c
+++ b/kernel/unwind/deferred.c
@@ -13,10 +13,11 @@
#define UNWIND_MAX_ENTRIES 512
-/* Guards adding to and reading the list of callbacks */
+/* Guards adding to or removing from the list of callbacks */
static DEFINE_MUTEX(callback_mutex);
static LIST_HEAD(callbacks);
static unsigned long unwind_mask;
+DEFINE_STATIC_SRCU(unwind_srcu);
/*
* Read the task context timestamp, if this is the first caller then
@@ -108,6 +109,7 @@ static void unwind_deferred_task_work(struct callback_head *head)
struct unwind_work *work;
u64 timestamp;
struct task_struct *task = current;
+ int idx;
if (WARN_ON_ONCE(!info->pending))
return;
@@ -133,13 +135,15 @@ static void unwind_deferred_task_work(struct callback_head *head)
timestamp = info->timestamp;
- guard(mutex)(&callback_mutex);
- list_for_each_entry(work, &callbacks, list) {
+ idx = srcu_read_lock(&unwind_srcu);
+ list_for_each_entry_srcu(work, &callbacks, list,
+ srcu_read_lock_held(&unwind_srcu)) {
if (task->unwind_mask & (1UL << work->bit)) {
work->func(work, &trace, timestamp);
clear_bit(work->bit, ¤t->unwind_mask);
}
}
+ srcu_read_unlock(&unwind_srcu, idx);
}
static int unwind_deferred_request_nmi(struct unwind_work *work, u64 *timestamp)
@@ -216,6 +220,7 @@ int unwind_deferred_request(struct unwind_work *work, u64 *timestamp)
{
struct unwind_task_info *info = ¤t->unwind_info;
int pending;
+ int bit;
int ret;
*timestamp = 0;
@@ -227,12 +232,17 @@ int unwind_deferred_request(struct unwind_work *work, u64 *timestamp)
if (in_nmi())
return unwind_deferred_request_nmi(work, timestamp);
+ /* Do not allow cancelled works to request again */
+ bit = READ_ONCE(work->bit);
+ if (WARN_ON_ONCE(bit < 0))
+ return -EINVAL;
+
guard(irqsave)();
*timestamp = get_timestamp(info);
/* This is already queued */
- if (current->unwind_mask & (1UL << work->bit))
+ if (current->unwind_mask & (1UL << bit))
return 1;
/* callback already pending? */
@@ -258,19 +268,26 @@ int unwind_deferred_request(struct unwind_work *work, u64 *timestamp)
void unwind_deferred_cancel(struct unwind_work *work)
{
struct task_struct *g, *t;
+ int bit;
if (!work)
return;
guard(mutex)(&callback_mutex);
- list_del(&work->list);
+ list_del_rcu(&work->list);
+ bit = work->bit;
+
+ /* Do not allow any more requests and prevent callbacks */
+ work->bit = -1;
+
+ clear_bit(bit, &unwind_mask);
- clear_bit(work->bit, &unwind_mask);
+ synchronize_srcu(&unwind_srcu);
guard(rcu)();
/* Clear this bit from all threads */
for_each_process_thread(g, t) {
- clear_bit(work->bit, &t->unwind_mask);
+ clear_bit(bit, &t->unwind_mask);
}
}
@@ -287,7 +304,7 @@ int unwind_deferred_init(struct unwind_work *work, unwind_callback_t func)
work->bit = ffz(unwind_mask);
unwind_mask |= 1UL << work->bit;
- list_add(&work->list, &callbacks);
+ list_add_rcu(&work->list, &callbacks);
work->func = func;
return 0;
}
--
2.47.2
next prev parent reply other threads:[~2025-05-09 16:51 UTC|newest]
Thread overview: 27+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-05-09 16:45 [PATCH v8 00/18] unwind_user: perf: x86: Deferred unwinding infrastructure Steven Rostedt
2025-05-09 16:45 ` [PATCH v8 01/18] unwind_user: Add user space unwinding API Steven Rostedt
2025-05-09 16:45 ` [PATCH v8 02/18] unwind_user: Add frame pointer support Steven Rostedt
2025-05-09 16:45 ` [PATCH v8 03/18] unwind_user/x86: Enable frame pointer unwinding on x86 Steven Rostedt
2025-05-09 16:45 ` [PATCH v8 04/18] perf/x86: Rename and move get_segment_base() and make it global Steven Rostedt
2025-05-09 16:45 ` [PATCH v8 05/18] unwind_user: Add compat mode frame pointer support Steven Rostedt
2025-05-09 16:45 ` [PATCH v8 06/18] unwind_user/x86: Enable compat mode frame pointer unwinding on x86 Steven Rostedt
2025-05-09 16:45 ` [PATCH v8 07/18] unwind_user/deferred: Add unwind_deferred_trace() Steven Rostedt
2025-05-09 16:45 ` [PATCH v8 08/18] unwind_user/deferred: Add unwind cache Steven Rostedt
2025-05-09 16:45 ` [PATCH v8 09/18] unwind_user/deferred: Add deferred unwinding interface Steven Rostedt
2025-05-09 16:45 ` [PATCH v8 10/18] unwind_user/deferred: Make unwind deferral requests NMI-safe Steven Rostedt
2025-05-09 16:45 ` [PATCH v8 11/18] unwind deferred: Use bitmask to determine which callbacks to call Steven Rostedt
2025-05-09 16:45 ` Steven Rostedt [this message]
2025-05-09 21:49 ` [PATCH v8 12/18] unwind deferred: Use SRCU unwind_deferred_task_work() Andrii Nakryiko
2025-05-10 13:41 ` Steven Rostedt
2025-05-12 16:17 ` Andrii Nakryiko
2025-05-09 16:45 ` [PATCH v8 13/18] unwind: Clear unwind_mask on exit back to user space Steven Rostedt
2025-05-09 16:45 ` [PATCH v8 14/18] perf: Remove get_perf_callchain() init_nr argument Steven Rostedt
2025-05-09 17:11 ` Alexei Starovoitov
2025-05-09 16:45 ` [PATCH v8 15/18] perf: Have get_perf_callchain() return NULL if crosstask and user are set Steven Rostedt
2025-05-09 21:53 ` Andrii Nakryiko
2025-05-10 13:46 ` Steven Rostedt
2025-05-10 17:59 ` Josh Poimboeuf
2025-05-12 22:27 ` Andrii Nakryiko
2025-05-09 16:45 ` [PATCH v8 16/18] perf: Use current->flags & PF_KTHREAD instead of current->mm == NULL Steven Rostedt
2025-05-09 16:45 ` [PATCH v8 17/18] perf: Simplify get_perf_callchain() user logic Steven Rostedt
2025-05-09 16:45 ` [PATCH v8 18/18] perf: Skip user unwind if the task is a kernel thread Steven Rostedt
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250509165155.628873521@goodmis.org \
--to=rostedt@goodmis.org \
--cc=bpf@vger.kernel.org \
--cc=jolsa@kernel.org \
--cc=jpoimboe@kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-trace-kernel@vger.kernel.org \
--cc=mathieu.desnoyers@efficios.com \
--cc=mhiramat@kernel.org \
--cc=mingo@kernel.org \
--cc=namhyung@kernel.org \
--cc=peterz@infradead.org \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox