From: Cheng-Yang Chou <yphbchou0911@gmail.com>
To: sched-ext@lists.linux.dev, Tejun Heo <tj@kernel.org>,
David Vernet <void@manifault.com>,
Andrea Righi <arighi@nvidia.com>,
Changwoo Min <changwoo@igalia.com>,
"Paul E . McKenney" <paulmck@kernel.org>,
rcu@vger.kernel.org
Cc: Ching-Chun Huang <jserv@ccns.ncku.edu.tw>,
Chia-Ping Tsai <chia7712@gmail.com>,
yphbchou0911@gmail.com
Subject: [PATCH 2/2] sched_ext: Fix exit_cpu accuracy for lockup paths
Date: Fri, 1 May 2026 21:14:57 +0800 [thread overview]
Message-ID: <20260501131521.161852-3-yphbchou0911@gmail.com> (raw)
In-Reply-To: <20260501131521.161852-1-yphbchou0911@gmail.com>
handle_lockup() uses raw_smp_processor_id() for exit_cpu, which is wrong
for two paths:
- scx_hardlockup_irq_workfn() has the hung CPU in a local variable but
irq_work may run elsewhere; pass the local cpu explicitly.
- scx_rcu_cpu_stall() takes no CPU, recording the detector rather than
the stalled one; add stalled_cpu to its signature and
panic_on_rcu_stall(), threading it from print_cpu_stall() (current
CPU) and print_other_cpu_stall() (first detected stalled CPU).
Add an exit_cpu parameter to handle_lockup() so callers pass the correct
CPU directly. Remove the now-unused scx_verror() macro.
Signed-off-by: Cheng-Yang Chou <yphbchou0911@gmail.com>
---
include/linux/sched/ext.h | 4 ++--
kernel/rcu/tree_exp.h | 2 +-
kernel/rcu/tree_stall.h | 11 +++++++----
kernel/sched/ext.c | 14 ++++++++------
kernel/sched/ext_internal.h | 2 --
5 files changed, 18 insertions(+), 15 deletions(-)
diff --git a/include/linux/sched/ext.h b/include/linux/sched/ext.h
index d05efcac794d..16bbf24089ca 100644
--- a/include/linux/sched/ext.h
+++ b/include/linux/sched/ext.h
@@ -259,7 +259,7 @@ void sched_ext_dead(struct task_struct *p);
void print_scx_info(const char *log_lvl, struct task_struct *p);
void scx_softlockup(u32 dur_s);
bool scx_hardlockup(int cpu);
-bool scx_rcu_cpu_stall(void);
+bool scx_rcu_cpu_stall(int stalled_cpu);
#else /* !CONFIG_SCHED_CLASS_EXT */
@@ -267,7 +267,7 @@ static inline void sched_ext_dead(struct task_struct *p) {}
static inline void print_scx_info(const char *log_lvl, struct task_struct *p) {}
static inline void scx_softlockup(u32 dur_s) {}
static inline bool scx_hardlockup(int cpu) { return false; }
-static inline bool scx_rcu_cpu_stall(void) { return false; }
+static inline bool scx_rcu_cpu_stall(int stalled_cpu) { return false; }
#endif /* CONFIG_SCHED_CLASS_EXT */
diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
index 82cada459e5d..2c9a99a9b68e 100644
--- a/kernel/rcu/tree_exp.h
+++ b/kernel/rcu/tree_exp.h
@@ -675,7 +675,7 @@ static void synchronize_rcu_expedited_wait(void)
nbcon_cpu_emergency_exit();
- panic_on_rcu_stall();
+ panic_on_rcu_stall(smp_processor_id());
}
}
diff --git a/kernel/rcu/tree_stall.h b/kernel/rcu/tree_stall.h
index b67532cb8770..172ac08e673d 100644
--- a/kernel/rcu/tree_stall.h
+++ b/kernel/rcu/tree_stall.h
@@ -159,7 +159,7 @@ static int __init check_cpu_stall_init(void)
early_initcall(check_cpu_stall_init);
/* If so specified via sysctl, panic, yielding cleaner stall-warning output. */
-static void panic_on_rcu_stall(void)
+static void panic_on_rcu_stall(int stalled_cpu)
{
static int cpu_stall;
@@ -167,7 +167,7 @@ static void panic_on_rcu_stall(void)
* Attempt to kick out the BPF scheduler if it's installed and defer
* the panic to give the system a chance to recover.
*/
- if (scx_rcu_cpu_stall())
+ if (scx_rcu_cpu_stall(stalled_cpu))
return;
if (++cpu_stall < sysctl_max_rcu_stall_to_panic)
@@ -631,6 +631,7 @@ static void rcu_check_gp_kthread_expired_fqs_timer(void)
static void print_other_cpu_stall(unsigned long gp_seq, unsigned long gps)
{
int cpu;
+ int first_stalled_cpu = -1;
unsigned long flags;
unsigned long gpa;
unsigned long j;
@@ -660,6 +661,8 @@ static void print_other_cpu_stall(unsigned long gp_seq, unsigned long gps)
for_each_leaf_node_possible_cpu(rnp, cpu)
if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
print_cpu_stall_info(cpu);
+ if (first_stalled_cpu < 0)
+ first_stalled_cpu = cpu;
ndetected++;
}
}
@@ -701,7 +704,7 @@ static void print_other_cpu_stall(unsigned long gp_seq, unsigned long gps)
nbcon_cpu_emergency_exit();
- panic_on_rcu_stall();
+ panic_on_rcu_stall(first_stalled_cpu);
rcu_force_quiescent_state(); /* Kick them all. */
}
@@ -754,7 +757,7 @@ static void print_cpu_stall(unsigned long gp_seq, unsigned long gps)
nbcon_cpu_emergency_exit();
- panic_on_rcu_stall();
+ panic_on_rcu_stall(smp_processor_id());
/*
* Attempt to revive the RCU machinery by forcing a context switch.
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index 48c65ac8e230..8a0b1662a75a 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -5069,6 +5069,7 @@ bool scx_allow_ttwu_queue(const struct task_struct *p)
/**
* handle_lockup - sched_ext common lockup handler
+ * @exit_cpu: CPU to record in exit_info; pass the stalled/hung CPU, not current
* @fmt: format string
*
* Called on system stall or lockup condition and initiates abort of sched_ext
@@ -5078,7 +5079,7 @@ bool scx_allow_ttwu_queue(const struct task_struct *p)
* resolve the lockup. %false if sched_ext is not enabled or abort was already
* initiated by someone else.
*/
-static __printf(1, 2) bool handle_lockup(const char *fmt, ...)
+static __printf(2, 3) bool handle_lockup(int exit_cpu, const char *fmt, ...)
{
struct scx_sched *sch;
va_list args;
@@ -5094,7 +5095,7 @@ static __printf(1, 2) bool handle_lockup(const char *fmt, ...)
case SCX_ENABLING:
case SCX_ENABLED:
va_start(args, fmt);
- ret = scx_verror(sch, fmt, args);
+ ret = scx_vexit(sch, SCX_EXIT_ERROR, 0, exit_cpu, fmt, args);
va_end(args);
return ret;
default:
@@ -5114,9 +5115,9 @@ static __printf(1, 2) bool handle_lockup(const char *fmt, ...)
* resolve the reported RCU stall. %false if sched_ext is not enabled or someone
* else already initiated abort.
*/
-bool scx_rcu_cpu_stall(void)
+bool scx_rcu_cpu_stall(int stalled_cpu)
{
- return handle_lockup("RCU CPU stall detected!");
+ return handle_lockup(stalled_cpu, "RCU CPU stall detected!");
}
/**
@@ -5131,7 +5132,8 @@ bool scx_rcu_cpu_stall(void)
*/
void scx_softlockup(u32 dur_s)
{
- if (!handle_lockup("soft lockup - CPU %d stuck for %us", smp_processor_id(), dur_s))
+ if (!handle_lockup(smp_processor_id(), "soft lockup - CPU %d stuck for %us",
+ smp_processor_id(), dur_s))
return;
printk_deferred(KERN_ERR "sched_ext: Soft lockup - CPU %d stuck for %us, disabling BPF scheduler\n",
@@ -5150,7 +5152,7 @@ static void scx_hardlockup_irq_workfn(struct irq_work *work)
{
int cpu = atomic_xchg(&scx_hardlockup_cpu, -1);
- if (cpu >= 0 && handle_lockup("hard lockup - CPU %d", cpu))
+ if (cpu >= 0 && handle_lockup(cpu, "hard lockup - CPU %d", cpu))
printk_deferred(KERN_ERR "sched_ext: Hard lockup - CPU %d, disabling BPF scheduler\n",
cpu);
}
diff --git a/kernel/sched/ext_internal.h b/kernel/sched/ext_internal.h
index b4f5dd28855e..2d04f27cf7c5 100644
--- a/kernel/sched/ext_internal.h
+++ b/kernel/sched/ext_internal.h
@@ -1486,8 +1486,6 @@ __printf(5, 6) bool __scx_exit(struct scx_sched *sch, enum scx_exit_kind kind,
__scx_exit(sch, kind, exit_code, raw_smp_processor_id(), fmt, ##args)
#define scx_error(sch, fmt, args...) \
scx_exit((sch), SCX_EXIT_ERROR, 0, fmt, ##args)
-#define scx_verror(sch, fmt, args) \
- scx_vexit((sch), SCX_EXIT_ERROR, 0, raw_smp_processor_id(), fmt, args)
/*
* Return the rq currently locked from an scx callback, or NULL if no rq is
--
2.48.1
prev parent reply other threads:[~2026-05-01 13:16 UTC|newest]
Thread overview: 3+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-05-01 13:14 [PATCH sched_ext/for-7.2 0/2] sched_ext: Follow-up fixes for exit_cpu accuracy Cheng-Yang Chou
2026-05-01 13:14 ` [PATCH 1/2] sched_ext: Normalize exit dump header to "on CPU N" Cheng-Yang Chou
2026-05-01 13:14 ` Cheng-Yang Chou [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260501131521.161852-3-yphbchou0911@gmail.com \
--to=yphbchou0911@gmail.com \
--cc=arighi@nvidia.com \
--cc=changwoo@igalia.com \
--cc=chia7712@gmail.com \
--cc=jserv@ccns.ncku.edu.tw \
--cc=paulmck@kernel.org \
--cc=rcu@vger.kernel.org \
--cc=sched-ext@lists.linux.dev \
--cc=tj@kernel.org \
--cc=void@manifault.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox