From: Shrikanth Hegde <sshegde@linux.ibm.com>
To: mingo@kernel.org, peterz@infradead.org,
vincent.guittot@linaro.org, linux-kernel@vger.kernel.org
Cc: sshegde@linux.ibm.com, kprateek.nayak@amd.com,
juri.lelli@redhat.com, vschneid@redhat.com,
dietmar.eggemann@arm.com, tj@kernel.org, rostedt@goodmis.org,
tglx@kernel.org, mgorman@suse.de, bsegall@google.com,
arighi@nvidia.com
Subject: [PATCH 2/3] sched: Simplify ifdeffery around cpu_smt_mask
Date: Wed, 6 May 2026 16:30:51 +0530 [thread overview]
Message-ID: <20260506110052.9974-3-sshegde@linux.ibm.com> (raw)
In-Reply-To: <20260506110052.9974-1-sshegde@linux.ibm.com>
Now, that cpu_smt_mask is defined as cpumask_of(cpu) for
CONFIG_SCHED_SMT=n, it is possible to get rid of the ifdeffery.
Effectively,
- This makes sched_smt_present is defined always
- cpumask_weight(cpumask_of(cpu)) == 1. So sched_smt_present_inc/dec
will never enable the sched_smt_present. Which is expected.
- Paths that were compile-time eliminated become runtime guarded
using static keys.
- Defines set_idle_cores, test_idle_cores etc which could likely benefit
the CONFIG_SCHED_SMT=n systems to use the same optimizations within the
LLC at wakeups.
- This will expose sched_smt_present,stop_core_cpuslocked symbol for
CONFIG_SCHED_SMT=n. Likely not a concern.
- There a bloat of code CONFIG_SCHED_SMT=n. (NR_CPUS=2048)
add/remove: 25/18 grow/shrink: 26/19 up/down: 6696/-3064 (3632)
Total: Before=30771823, After=30775455, chg +0.01%
- No code bloat for CONFIG_SCHED_SMT=y, which is expected.
Signed-off-by: Shrikanth Hegde <sshegde@linux.ibm.com>
---
include/linux/sched/smt.h | 4 ----
kernel/sched/core.c | 6 ------
kernel/sched/ext_idle.c | 6 ------
kernel/sched/fair.c | 35 -----------------------------------
kernel/sched/sched.h | 6 ------
kernel/sched/topology.c | 2 --
kernel/stop_machine.c | 2 --
kernel/workqueue.c | 4 ----
8 files changed, 65 deletions(-)
diff --git a/include/linux/sched/smt.h b/include/linux/sched/smt.h
index 166b19af956f..cde6679c0278 100644
--- a/include/linux/sched/smt.h
+++ b/include/linux/sched/smt.h
@@ -4,16 +4,12 @@
#include <linux/static_key.h>
-#ifdef CONFIG_SCHED_SMT
extern struct static_key_false sched_smt_present;
static __always_inline bool sched_smt_active(void)
{
return static_branch_likely(&sched_smt_present);
}
-#else
-static __always_inline bool sched_smt_active(void) { return false; }
-#endif
void arch_smt_update(void);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index b8871449d3c6..055db51c5483 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -8604,18 +8604,14 @@ static void cpuset_cpu_inactive(unsigned int cpu)
static inline void sched_smt_present_inc(int cpu)
{
-#ifdef CONFIG_SCHED_SMT
if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
static_branch_inc_cpuslocked(&sched_smt_present);
-#endif
}
static inline void sched_smt_present_dec(int cpu)
{
-#ifdef CONFIG_SCHED_SMT
if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
static_branch_dec_cpuslocked(&sched_smt_present);
-#endif
}
int sched_cpu_activate(unsigned int cpu)
@@ -8703,9 +8699,7 @@ int sched_cpu_deactivate(unsigned int cpu)
*/
sched_smt_present_dec(cpu);
-#ifdef CONFIG_SCHED_SMT
sched_core_cpu_deactivate(cpu);
-#endif
if (!sched_smp_initialized)
return 0;
diff --git a/kernel/sched/ext_idle.c b/kernel/sched/ext_idle.c
index 7468560a6d80..2bcf58e99c9b 100644
--- a/kernel/sched/ext_idle.c
+++ b/kernel/sched/ext_idle.c
@@ -79,7 +79,6 @@ static bool scx_idle_test_and_clear_cpu(int cpu)
int node = scx_cpu_node_if_enabled(cpu);
struct cpumask *idle_cpus = idle_cpumask(node)->cpu;
-#ifdef CONFIG_SCHED_SMT
/*
* SMT mask should be cleared whether we can claim @cpu or not. The SMT
* cluster is not wholly idle either way. This also prevents
@@ -104,7 +103,6 @@ static bool scx_idle_test_and_clear_cpu(int cpu)
else if (cpumask_test_cpu(cpu, idle_smts))
__cpumask_clear_cpu(cpu, idle_smts);
}
-#endif
return cpumask_test_and_clear_cpu(cpu, idle_cpus);
}
@@ -622,7 +620,6 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
goto out_unlock;
}
-#ifdef CONFIG_SCHED_SMT
/*
* Use @prev_cpu's sibling if it's idle.
*/
@@ -634,7 +631,6 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
goto out_unlock;
}
}
-#endif
/*
* Search for any idle CPU in the same LLC domain.
@@ -714,7 +710,6 @@ static void update_builtin_idle(int cpu, bool idle)
assign_cpu(cpu, idle_cpus, idle);
-#ifdef CONFIG_SCHED_SMT
if (sched_smt_active()) {
const struct cpumask *smt = cpu_smt_mask(cpu);
struct cpumask *idle_smts = idle_cpumask(node)->smt;
@@ -731,7 +726,6 @@ static void update_builtin_idle(int cpu, bool idle)
cpumask_andnot(idle_smts, idle_smts, smt);
}
}
-#endif
}
/*
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 728965851842..d19c416d1b84 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1555,7 +1555,6 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
static inline bool is_core_idle(int cpu)
{
-#ifdef CONFIG_SCHED_SMT
int sibling;
for_each_cpu(sibling, cpu_smt_mask(cpu)) {
@@ -1565,7 +1564,6 @@ static inline bool is_core_idle(int cpu)
if (!idle_cpu(sibling))
return false;
}
-#endif
return true;
}
@@ -2248,7 +2246,6 @@ numa_type numa_classify(unsigned int imbalance_pct,
return node_fully_busy;
}
-#ifdef CONFIG_SCHED_SMT
/* Forward declarations of select_idle_sibling helpers */
static inline bool test_idle_cores(int cpu);
static inline int numa_idle_core(int idle_core, int cpu)
@@ -2266,12 +2263,6 @@ static inline int numa_idle_core(int idle_core, int cpu)
return idle_core;
}
-#else /* !CONFIG_SCHED_SMT: */
-static inline int numa_idle_core(int idle_core, int cpu)
-{
- return idle_core;
-}
-#endif /* !CONFIG_SCHED_SMT */
/*
* Gather all necessary information to make NUMA balancing placement
@@ -7782,7 +7773,6 @@ static inline int __select_idle_cpu(int cpu, struct task_struct *p)
return -1;
}
-#ifdef CONFIG_SCHED_SMT
DEFINE_STATIC_KEY_FALSE(sched_smt_present);
EXPORT_SYMBOL_GPL(sched_smt_present);
@@ -7892,29 +7882,6 @@ static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int t
return -1;
}
-#else /* !CONFIG_SCHED_SMT: */
-
-static inline void set_idle_cores(int cpu, int val)
-{
-}
-
-static inline bool test_idle_cores(int cpu)
-{
- return false;
-}
-
-static inline int select_idle_core(struct task_struct *p, int core, struct cpumask *cpus, int *idle_cpu)
-{
- return __select_idle_cpu(core, p);
-}
-
-static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
-{
- return -1;
-}
-
-#endif /* !CONFIG_SCHED_SMT */
-
/*
* Scan the LLC domain for idle CPUs; this is dynamically regulated by
* comparing the average scan cost (tracked in sd->avg_scan_cost) against the
@@ -12006,9 +11973,7 @@ static int should_we_balance(struct lb_env *env)
* idle has been found, then its not needed to check other
* SMT siblings for idleness:
*/
-#ifdef CONFIG_SCHED_SMT
cpumask_andnot(swb_cpus, swb_cpus, cpu_smt_mask(cpu));
-#endif
continue;
}
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 9f63b15d309d..e476623a0c2a 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1667,7 +1667,6 @@ do { \
flags = _raw_spin_rq_lock_irqsave(rq); \
} while (0)
-#ifdef CONFIG_SCHED_SMT
extern void __update_idle_core(struct rq *rq);
static inline void update_idle_core(struct rq *rq)
@@ -1676,12 +1675,7 @@ static inline void update_idle_core(struct rq *rq)
__update_idle_core(rq);
}
-#else /* !CONFIG_SCHED_SMT: */
-static inline void update_idle_core(struct rq *rq) { }
-#endif /* !CONFIG_SCHED_SMT */
-
#ifdef CONFIG_FAIR_GROUP_SCHED
-
static inline struct task_struct *task_of(struct sched_entity *se)
{
WARN_ON_ONCE(!entity_is_task(se));
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 5847b83d9d55..a1f46e3f4ede 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -1310,9 +1310,7 @@ static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
cpumask_copy(mask, sched_group_span(sg));
for_each_cpu(cpu, mask) {
cores++;
-#ifdef CONFIG_SCHED_SMT
cpumask_andnot(mask, mask, cpu_smt_mask(cpu));
-#endif
}
sg->cores = cores;
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 3fe6b0c99f3d..e17afa52893c 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -632,7 +632,6 @@ int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus)
}
EXPORT_SYMBOL_GPL(stop_machine);
-#ifdef CONFIG_SCHED_SMT
int stop_core_cpuslocked(unsigned int cpu, cpu_stop_fn_t fn, void *data)
{
const struct cpumask *smt_mask = cpu_smt_mask(cpu);
@@ -651,7 +650,6 @@ int stop_core_cpuslocked(unsigned int cpu, cpu_stop_fn_t fn, void *data)
return stop_cpus(smt_mask, multi_cpu_stop, &msdata);
}
EXPORT_SYMBOL_GPL(stop_core_cpuslocked);
-#endif
/**
* stop_machine_from_inactive_cpu - stop_machine() from inactive CPU
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 5f747f241a5f..99ef412f02a6 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -8187,11 +8187,7 @@ static bool __init cpus_dont_share(int cpu0, int cpu1)
static bool __init cpus_share_smt(int cpu0, int cpu1)
{
-#ifdef CONFIG_SCHED_SMT
return cpumask_test_cpu(cpu0, cpu_smt_mask(cpu1));
-#else
- return false;
-#endif
}
static bool __init cpus_share_numa(int cpu0, int cpu1)
--
2.51.0
next prev parent reply other threads:[~2026-05-06 11:01 UTC|newest]
Thread overview: 7+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-05-06 11:00 [PATCH 0/3] sched: Simplify ifdeffery around CONFIG_SCHED_SMT Shrikanth Hegde
2026-05-06 11:00 ` [PATCH 1/3] topology: Introduce cpu_smt_mask for CONFIG_SCHED_SMT=n Shrikanth Hegde
2026-05-06 11:00 ` Shrikanth Hegde [this message]
2026-05-11 12:53 ` [PATCH 2/3] sched: Simplify ifdeffery around cpu_smt_mask Valentin Schneider
2026-05-11 14:37 ` Shrikanth Hegde
2026-05-11 18:46 ` Tejun Heo
2026-05-06 11:00 ` [PATCH 3/3] sched/fair: Add compile time check in fastpaths for CONFIG_SCHED_SMT=n Shrikanth Hegde
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260506110052.9974-3-sshegde@linux.ibm.com \
--to=sshegde@linux.ibm.com \
--cc=arighi@nvidia.com \
--cc=bsegall@google.com \
--cc=dietmar.eggemann@arm.com \
--cc=juri.lelli@redhat.com \
--cc=kprateek.nayak@amd.com \
--cc=linux-kernel@vger.kernel.org \
--cc=mgorman@suse.de \
--cc=mingo@kernel.org \
--cc=peterz@infradead.org \
--cc=rostedt@goodmis.org \
--cc=tglx@kernel.org \
--cc=tj@kernel.org \
--cc=vincent.guittot@linaro.org \
--cc=vschneid@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox