* [PATCH v3 2/4] sched: Simplify ifdeffery around cpu_smt_mask
2026-05-13 13:39 [PATCH v3 0/4] sched: Simplify CONFIG_SCHED_SMT ifdef usage Shrikanth Hegde
2026-05-13 13:39 ` [PATCH v3 1/4] topology: Introduce cpu_smt_mask for CONFIG_SCHED_SMT=n Shrikanth Hegde
@ 2026-05-13 13:39 ` Shrikanth Hegde
2026-05-13 13:39 ` [PATCH v3 3/4] sched/fair: Add sched_smt_active check for fastpaths Shrikanth Hegde
2026-05-13 13:39 ` [PATCH v3 4/4] sched: Unify SMT active check via sched_smt_active() Shrikanth Hegde
3 siblings, 0 replies; 5+ messages in thread
From: Shrikanth Hegde @ 2026-05-13 13:39 UTC (permalink / raw)
To: mingo, peterz, vincent.guittot, linux-kernel
Cc: sshegde, kprateek.nayak, juri.lelli, vschneid, dietmar.eggemann,
tj, rostedt, mgorman, bsegall, arighi, pauld
Now, that cpu_smt_mask is defined as cpumask_of(cpu) for
CONFIG_SCHED_SMT=n, it is possible to get rid of the ifdeffery.
Effectively,
- This makes sched_smt_present is defined always
- cpumask_weight(cpumask_of(cpu)) == 1. So sched_smt_present_inc/dec
will never enable the sched_smt_present. Which is expected.
- Paths that were compile-time eliminated become runtime guarded
using static keys.
- Defines set_idle_cores, test_idle_cores, etc which could likely benefit
the CONFIG_SCHED_SMT=n systems to use the same optimizations within the
LLC at wakeups.
- This will expose sched_smt_present symbol for CONFIG_SCHED_SMT=n.
Likely not a concern.
- There is a bloat of code CONFIG_SCHED_SMT=n. (NR_CPUS=2048)
add/remove: 24/18 grow/shrink: 26/28 up/down: 6396/-3188 (3208)
Total: Before=30629880, After=30633088, chg +0.01%
- No code bloat for CONFIG_SCHED_SMT=y, which is expected.
- Add comments around stop_core_cpuslocked on why ifdefs are not
removed.
- This leaves the remaining uses of CONFIG_SCHED_SMT mainly for
topology building bits which has a policy based decision.
Tested-by: K Prateek Nayak <kprateek.nayak@amd.com>
Reviewed-by: Phil Auld <pauld@redhat.com>
Reviewed-by: Valentin Schneider <vschneid@redhat.com>
Acked-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Shrikanth Hegde <sshegde@linux.ibm.com>
---
include/linux/sched/smt.h | 4 ----
kernel/sched/core.c | 6 ------
kernel/sched/ext_idle.c | 6 ------
kernel/sched/fair.c | 35 -----------------------------------
kernel/sched/sched.h | 6 ------
kernel/sched/topology.c | 2 --
kernel/stop_machine.c | 5 +++++
kernel/workqueue.c | 4 ----
8 files changed, 5 insertions(+), 63 deletions(-)
diff --git a/include/linux/sched/smt.h b/include/linux/sched/smt.h
index 166b19af956f..cde6679c0278 100644
--- a/include/linux/sched/smt.h
+++ b/include/linux/sched/smt.h
@@ -4,16 +4,12 @@
#include <linux/static_key.h>
-#ifdef CONFIG_SCHED_SMT
extern struct static_key_false sched_smt_present;
static __always_inline bool sched_smt_active(void)
{
return static_branch_likely(&sched_smt_present);
}
-#else
-static __always_inline bool sched_smt_active(void) { return false; }
-#endif
void arch_smt_update(void);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index b905805bbcbe..3ae5f19c1b7e 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -8612,18 +8612,14 @@ static void cpuset_cpu_inactive(unsigned int cpu)
static inline void sched_smt_present_inc(int cpu)
{
-#ifdef CONFIG_SCHED_SMT
if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
static_branch_inc_cpuslocked(&sched_smt_present);
-#endif
}
static inline void sched_smt_present_dec(int cpu)
{
-#ifdef CONFIG_SCHED_SMT
if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
static_branch_dec_cpuslocked(&sched_smt_present);
-#endif
}
int sched_cpu_activate(unsigned int cpu)
@@ -8711,9 +8707,7 @@ int sched_cpu_deactivate(unsigned int cpu)
*/
sched_smt_present_dec(cpu);
-#ifdef CONFIG_SCHED_SMT
sched_core_cpu_deactivate(cpu);
-#endif
if (!sched_smp_initialized)
return 0;
diff --git a/kernel/sched/ext_idle.c b/kernel/sched/ext_idle.c
index 6e1980763270..9f5ad6b071f9 100644
--- a/kernel/sched/ext_idle.c
+++ b/kernel/sched/ext_idle.c
@@ -79,7 +79,6 @@ static bool scx_idle_test_and_clear_cpu(int cpu)
int node = scx_cpu_node_if_enabled(cpu);
struct cpumask *idle_cpus = idle_cpumask(node)->cpu;
-#ifdef CONFIG_SCHED_SMT
/*
* SMT mask should be cleared whether we can claim @cpu or not. The SMT
* cluster is not wholly idle either way. This also prevents
@@ -104,7 +103,6 @@ static bool scx_idle_test_and_clear_cpu(int cpu)
else if (cpumask_test_cpu(cpu, idle_smts))
__cpumask_clear_cpu(cpu, idle_smts);
}
-#endif
return cpumask_test_and_clear_cpu(cpu, idle_cpus);
}
@@ -622,7 +620,6 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
goto out_unlock;
}
-#ifdef CONFIG_SCHED_SMT
/*
* Use @prev_cpu's sibling if it's idle.
*/
@@ -634,7 +631,6 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
goto out_unlock;
}
}
-#endif
/*
* Search for any idle CPU in the same LLC domain.
@@ -714,7 +710,6 @@ static void update_builtin_idle(int cpu, bool idle)
assign_cpu(cpu, idle_cpus, idle);
-#ifdef CONFIG_SCHED_SMT
if (sched_smt_active()) {
const struct cpumask *smt = cpu_smt_mask(cpu);
struct cpumask *idle_smts = idle_cpumask(node)->smt;
@@ -731,7 +726,6 @@ static void update_builtin_idle(int cpu, bool idle)
cpumask_andnot(idle_smts, idle_smts, smt);
}
}
-#endif
}
/*
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 3ebec186f982..353e31ecaadc 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1584,7 +1584,6 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
static inline bool is_core_idle(int cpu)
{
-#ifdef CONFIG_SCHED_SMT
int sibling;
for_each_cpu(sibling, cpu_smt_mask(cpu)) {
@@ -1594,7 +1593,6 @@ static inline bool is_core_idle(int cpu)
if (!idle_cpu(sibling))
return false;
}
-#endif
return true;
}
@@ -2277,7 +2275,6 @@ numa_type numa_classify(unsigned int imbalance_pct,
return node_fully_busy;
}
-#ifdef CONFIG_SCHED_SMT
/* Forward declarations of select_idle_sibling helpers */
static inline bool test_idle_cores(int cpu);
static inline int numa_idle_core(int idle_core, int cpu)
@@ -2295,12 +2292,6 @@ static inline int numa_idle_core(int idle_core, int cpu)
return idle_core;
}
-#else /* !CONFIG_SCHED_SMT: */
-static inline int numa_idle_core(int idle_core, int cpu)
-{
- return idle_core;
-}
-#endif /* !CONFIG_SCHED_SMT */
/*
* Gather all necessary information to make NUMA balancing placement
@@ -7811,7 +7802,6 @@ static inline int __select_idle_cpu(int cpu, struct task_struct *p)
return -1;
}
-#ifdef CONFIG_SCHED_SMT
DEFINE_STATIC_KEY_FALSE(sched_smt_present);
EXPORT_SYMBOL_GPL(sched_smt_present);
@@ -7921,29 +7911,6 @@ static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int t
return -1;
}
-#else /* !CONFIG_SCHED_SMT: */
-
-static inline void set_idle_cores(int cpu, int val)
-{
-}
-
-static inline bool test_idle_cores(int cpu)
-{
- return false;
-}
-
-static inline int select_idle_core(struct task_struct *p, int core, struct cpumask *cpus, int *idle_cpu)
-{
- return __select_idle_cpu(core, p);
-}
-
-static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
-{
- return -1;
-}
-
-#endif /* !CONFIG_SCHED_SMT */
-
/*
* Scan the LLC domain for idle CPUs; this is dynamically regulated by
* comparing the average scan cost (tracked in sd->avg_scan_cost) against the
@@ -12036,9 +12003,7 @@ static int should_we_balance(struct lb_env *env)
* idle has been found, then its not needed to check other
* SMT siblings for idleness:
*/
-#ifdef CONFIG_SCHED_SMT
cpumask_andnot(swb_cpus, swb_cpus, cpu_smt_mask(cpu));
-#endif
continue;
}
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 9f63b15d309d..e476623a0c2a 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1667,7 +1667,6 @@ do { \
flags = _raw_spin_rq_lock_irqsave(rq); \
} while (0)
-#ifdef CONFIG_SCHED_SMT
extern void __update_idle_core(struct rq *rq);
static inline void update_idle_core(struct rq *rq)
@@ -1676,12 +1675,7 @@ static inline void update_idle_core(struct rq *rq)
__update_idle_core(rq);
}
-#else /* !CONFIG_SCHED_SMT: */
-static inline void update_idle_core(struct rq *rq) { }
-#endif /* !CONFIG_SCHED_SMT */
-
#ifdef CONFIG_FAIR_GROUP_SCHED
-
static inline struct task_struct *task_of(struct sched_entity *se)
{
WARN_ON_ONCE(!entity_is_task(se));
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 5847b83d9d55..a1f46e3f4ede 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -1310,9 +1310,7 @@ static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
cpumask_copy(mask, sched_group_span(sg));
for_each_cpu(cpu, mask) {
cores++;
-#ifdef CONFIG_SCHED_SMT
cpumask_andnot(mask, mask, cpu_smt_mask(cpu));
-#endif
}
sg->cores = cores;
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 3fe6b0c99f3d..773d8e9ae30c 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -633,6 +633,11 @@ int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus)
EXPORT_SYMBOL_GPL(stop_machine);
#ifdef CONFIG_SCHED_SMT
+/*
+ * INTEL_IFS is the only user of this API. That selftest can
+ * only be compiled if SMP=y. On x86 it selects SCHED_SMT.
+ * Keep the ifdefs for now.
+ */
int stop_core_cpuslocked(unsigned int cpu, cpu_stop_fn_t fn, void *data)
{
const struct cpumask *smt_mask = cpu_smt_mask(cpu);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 3d2e3b2ec528..c911fdcb4428 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -8198,11 +8198,7 @@ static bool __init cpus_dont_share(int cpu0, int cpu1)
static bool __init cpus_share_smt(int cpu0, int cpu1)
{
-#ifdef CONFIG_SCHED_SMT
return cpumask_test_cpu(cpu0, cpu_smt_mask(cpu1));
-#else
- return false;
-#endif
}
static bool __init cpus_share_numa(int cpu0, int cpu1)
--
2.47.3
^ permalink raw reply related [flat|nested] 5+ messages in thread* [PATCH v3 4/4] sched: Unify SMT active check via sched_smt_active()
2026-05-13 13:39 [PATCH v3 0/4] sched: Simplify CONFIG_SCHED_SMT ifdef usage Shrikanth Hegde
` (2 preceding siblings ...)
2026-05-13 13:39 ` [PATCH v3 3/4] sched/fair: Add sched_smt_active check for fastpaths Shrikanth Hegde
@ 2026-05-13 13:39 ` Shrikanth Hegde
3 siblings, 0 replies; 5+ messages in thread
From: Shrikanth Hegde @ 2026-05-13 13:39 UTC (permalink / raw)
To: mingo, peterz, vincent.guittot, linux-kernel
Cc: sshegde, kprateek.nayak, juri.lelli, vschneid, dietmar.eggemann,
tj, rostedt, mgorman, bsegall, arighi, pauld
There is a use of sched_smt_active() and explicit use of sched_smt_present.
Remove the explicit usage for better code maintenance and readability.
Note that this differs slightly for update_idle_core. It used to call
static_branch_unlikely earlier and now it will call static_branch_likely.
Signed-off-by: Shrikanth Hegde <sshegde@linux.ibm.com>
---
kernel/sched/core_sched.c | 2 +-
kernel/sched/fair.c | 2 +-
kernel/sched/sched.h | 2 +-
3 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/kernel/sched/core_sched.c b/kernel/sched/core_sched.c
index 73b6b2426911..43e0bde3038e 100644
--- a/kernel/sched/core_sched.c
+++ b/kernel/sched/core_sched.c
@@ -136,7 +136,7 @@ int sched_core_share_pid(unsigned int cmd, pid_t pid, enum pid_type type,
struct pid *grp;
int err = 0;
- if (!static_branch_likely(&sched_smt_present))
+ if (!sched_smt_active())
return -ENODEV;
BUILD_BUG_ON(PR_SCHED_CORE_SCOPE_THREAD != PIDTYPE_PID);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 964014a74cf9..29fbb5287cfc 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2276,7 +2276,7 @@ numa_type numa_classify(unsigned int imbalance_pct,
static inline bool test_idle_cores(int cpu);
static inline int numa_idle_core(int idle_core, int cpu)
{
- if (!static_branch_likely(&sched_smt_present) ||
+ if (!sched_smt_active() ||
idle_core >= 0 || !test_idle_cores(cpu))
return idle_core;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index e476623a0c2a..ffe77b2b6296 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1671,7 +1671,7 @@ extern void __update_idle_core(struct rq *rq);
static inline void update_idle_core(struct rq *rq)
{
- if (static_branch_unlikely(&sched_smt_present))
+ if (sched_smt_active())
__update_idle_core(rq);
}
--
2.47.3
^ permalink raw reply related [flat|nested] 5+ messages in thread