All of lore.kernel.org
 help / color / mirror / Atom feed
From: Shrikanth Hegde <sshegde@linux.ibm.com>
To: mingo@kernel.org, peterz@infradead.org,
	vincent.guittot@linaro.org, linux-kernel@vger.kernel.org
Cc: sshegde@linux.ibm.com, kprateek.nayak@amd.com,
	juri.lelli@redhat.com, vschneid@redhat.com,
	dietmar.eggemann@arm.com, tj@kernel.org, rostedt@goodmis.org,
	mgorman@suse.de, bsegall@google.com, arighi@nvidia.com
Subject: [PATCH v2 2/3] sched: Simplify ifdeffery around cpu_smt_mask
Date: Tue, 12 May 2026 20:51:24 +0530	[thread overview]
Message-ID: <20260512152125.308280-3-sshegde@linux.ibm.com> (raw)
In-Reply-To: <20260512152125.308280-1-sshegde@linux.ibm.com>

Now, that cpu_smt_mask is defined as cpumask_of(cpu) for
CONFIG_SCHED_SMT=n, it is possible to get rid of the ifdeffery.

Effectively,
- This makes sched_smt_present is defined always

- cpumask_weight(cpumask_of(cpu)) == 1. So sched_smt_present_inc/dec
  will never enable the sched_smt_present. Which is expected.

- Paths that were compile-time eliminated become runtime guarded
  using static keys.

- Defines set_idle_cores, test_idle_cores etc which could likely benefit
  the CONFIG_SCHED_SMT=n systems to use the same optimizations within the
  LLC at wakeups.

- This will expose sched_smt_present symbol for CONFIG_SCHED_SMT=n.
  Likely not a concern.

- There a bloat of code CONFIG_SCHED_SMT=n. (NR_CPUS=2048)
  add/remove: 24/18 grow/shrink: 26/28 up/down: 6396/-3188 (3208)
  Total: Before=30629880, After=30633088, chg +0.01%

- No code bloat for CONFIG_SCHED_SMT=y, which is expected.

- Add comments around stop_core_cpuslocked on why ifdefs are not
  removed.

- This leaves the remaining uses of CONFIG_SCHED_SMT mainly for
  topology building bits which have a policy based decision.

Acked-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Shrikanth Hegde <sshegde@linux.ibm.com>
---
 include/linux/sched/smt.h |  4 ----
 kernel/sched/core.c       |  6 ------
 kernel/sched/ext_idle.c   |  6 ------
 kernel/sched/fair.c       | 35 -----------------------------------
 kernel/sched/sched.h      |  6 ------
 kernel/sched/topology.c   |  2 --
 kernel/stop_machine.c     |  5 +++++
 kernel/workqueue.c        |  4 ----
 8 files changed, 5 insertions(+), 63 deletions(-)

diff --git a/include/linux/sched/smt.h b/include/linux/sched/smt.h
index 166b19af956f..cde6679c0278 100644
--- a/include/linux/sched/smt.h
+++ b/include/linux/sched/smt.h
@@ -4,16 +4,12 @@
 
 #include <linux/static_key.h>
 
-#ifdef CONFIG_SCHED_SMT
 extern struct static_key_false sched_smt_present;
 
 static __always_inline bool sched_smt_active(void)
 {
 	return static_branch_likely(&sched_smt_present);
 }
-#else
-static __always_inline bool sched_smt_active(void) { return false; }
-#endif
 
 void arch_smt_update(void);
 
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index b905805bbcbe..3ae5f19c1b7e 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -8612,18 +8612,14 @@ static void cpuset_cpu_inactive(unsigned int cpu)
 
 static inline void sched_smt_present_inc(int cpu)
 {
-#ifdef CONFIG_SCHED_SMT
 	if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
 		static_branch_inc_cpuslocked(&sched_smt_present);
-#endif
 }
 
 static inline void sched_smt_present_dec(int cpu)
 {
-#ifdef CONFIG_SCHED_SMT
 	if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
 		static_branch_dec_cpuslocked(&sched_smt_present);
-#endif
 }
 
 int sched_cpu_activate(unsigned int cpu)
@@ -8711,9 +8707,7 @@ int sched_cpu_deactivate(unsigned int cpu)
 	 */
 	sched_smt_present_dec(cpu);
 
-#ifdef CONFIG_SCHED_SMT
 	sched_core_cpu_deactivate(cpu);
-#endif
 
 	if (!sched_smp_initialized)
 		return 0;
diff --git a/kernel/sched/ext_idle.c b/kernel/sched/ext_idle.c
index 6e1980763270..9f5ad6b071f9 100644
--- a/kernel/sched/ext_idle.c
+++ b/kernel/sched/ext_idle.c
@@ -79,7 +79,6 @@ static bool scx_idle_test_and_clear_cpu(int cpu)
 	int node = scx_cpu_node_if_enabled(cpu);
 	struct cpumask *idle_cpus = idle_cpumask(node)->cpu;
 
-#ifdef CONFIG_SCHED_SMT
 	/*
 	 * SMT mask should be cleared whether we can claim @cpu or not. The SMT
 	 * cluster is not wholly idle either way. This also prevents
@@ -104,7 +103,6 @@ static bool scx_idle_test_and_clear_cpu(int cpu)
 		else if (cpumask_test_cpu(cpu, idle_smts))
 			__cpumask_clear_cpu(cpu, idle_smts);
 	}
-#endif
 
 	return cpumask_test_and_clear_cpu(cpu, idle_cpus);
 }
@@ -622,7 +620,6 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
 		goto out_unlock;
 	}
 
-#ifdef CONFIG_SCHED_SMT
 	/*
 	 * Use @prev_cpu's sibling if it's idle.
 	 */
@@ -634,7 +631,6 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
 				goto out_unlock;
 		}
 	}
-#endif
 
 	/*
 	 * Search for any idle CPU in the same LLC domain.
@@ -714,7 +710,6 @@ static void update_builtin_idle(int cpu, bool idle)
 
 	assign_cpu(cpu, idle_cpus, idle);
 
-#ifdef CONFIG_SCHED_SMT
 	if (sched_smt_active()) {
 		const struct cpumask *smt = cpu_smt_mask(cpu);
 		struct cpumask *idle_smts = idle_cpumask(node)->smt;
@@ -731,7 +726,6 @@ static void update_builtin_idle(int cpu, bool idle)
 			cpumask_andnot(idle_smts, idle_smts, smt);
 		}
 	}
-#endif
 }
 
 /*
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 3ebec186f982..353e31ecaadc 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1584,7 +1584,6 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
 
 static inline bool is_core_idle(int cpu)
 {
-#ifdef CONFIG_SCHED_SMT
 	int sibling;
 
 	for_each_cpu(sibling, cpu_smt_mask(cpu)) {
@@ -1594,7 +1593,6 @@ static inline bool is_core_idle(int cpu)
 		if (!idle_cpu(sibling))
 			return false;
 	}
-#endif
 
 	return true;
 }
@@ -2277,7 +2275,6 @@ numa_type numa_classify(unsigned int imbalance_pct,
 	return node_fully_busy;
 }
 
-#ifdef CONFIG_SCHED_SMT
 /* Forward declarations of select_idle_sibling helpers */
 static inline bool test_idle_cores(int cpu);
 static inline int numa_idle_core(int idle_core, int cpu)
@@ -2295,12 +2292,6 @@ static inline int numa_idle_core(int idle_core, int cpu)
 
 	return idle_core;
 }
-#else /* !CONFIG_SCHED_SMT: */
-static inline int numa_idle_core(int idle_core, int cpu)
-{
-	return idle_core;
-}
-#endif /* !CONFIG_SCHED_SMT */
 
 /*
  * Gather all necessary information to make NUMA balancing placement
@@ -7811,7 +7802,6 @@ static inline int __select_idle_cpu(int cpu, struct task_struct *p)
 	return -1;
 }
 
-#ifdef CONFIG_SCHED_SMT
 DEFINE_STATIC_KEY_FALSE(sched_smt_present);
 EXPORT_SYMBOL_GPL(sched_smt_present);
 
@@ -7921,29 +7911,6 @@ static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int t
 	return -1;
 }
 
-#else /* !CONFIG_SCHED_SMT: */
-
-static inline void set_idle_cores(int cpu, int val)
-{
-}
-
-static inline bool test_idle_cores(int cpu)
-{
-	return false;
-}
-
-static inline int select_idle_core(struct task_struct *p, int core, struct cpumask *cpus, int *idle_cpu)
-{
-	return __select_idle_cpu(core, p);
-}
-
-static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
-{
-	return -1;
-}
-
-#endif /* !CONFIG_SCHED_SMT */
-
 /*
  * Scan the LLC domain for idle CPUs; this is dynamically regulated by
  * comparing the average scan cost (tracked in sd->avg_scan_cost) against the
@@ -12036,9 +12003,7 @@ static int should_we_balance(struct lb_env *env)
 			 * idle has been found, then its not needed to check other
 			 * SMT siblings for idleness:
 			 */
-#ifdef CONFIG_SCHED_SMT
 			cpumask_andnot(swb_cpus, swb_cpus, cpu_smt_mask(cpu));
-#endif
 			continue;
 		}
 
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 9f63b15d309d..e476623a0c2a 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1667,7 +1667,6 @@ do {						\
 	flags = _raw_spin_rq_lock_irqsave(rq);	\
 } while (0)
 
-#ifdef CONFIG_SCHED_SMT
 extern void __update_idle_core(struct rq *rq);
 
 static inline void update_idle_core(struct rq *rq)
@@ -1676,12 +1675,7 @@ static inline void update_idle_core(struct rq *rq)
 		__update_idle_core(rq);
 }
 
-#else /* !CONFIG_SCHED_SMT: */
-static inline void update_idle_core(struct rq *rq) { }
-#endif /* !CONFIG_SCHED_SMT */
-
 #ifdef CONFIG_FAIR_GROUP_SCHED
-
 static inline struct task_struct *task_of(struct sched_entity *se)
 {
 	WARN_ON_ONCE(!entity_is_task(se));
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 5847b83d9d55..a1f46e3f4ede 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -1310,9 +1310,7 @@ static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
 		cpumask_copy(mask, sched_group_span(sg));
 		for_each_cpu(cpu, mask) {
 			cores++;
-#ifdef CONFIG_SCHED_SMT
 			cpumask_andnot(mask, mask, cpu_smt_mask(cpu));
-#endif
 		}
 		sg->cores = cores;
 
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 3fe6b0c99f3d..773d8e9ae30c 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -633,6 +633,11 @@ int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus)
 EXPORT_SYMBOL_GPL(stop_machine);
 
 #ifdef CONFIG_SCHED_SMT
+/*
+ * INTEL_IFS is the only user of this API. That selftest can
+ * only be compiled if SMP=y. On x86 it selects SCHED_SMT.
+ * Keep the ifdefs for now.
+ */
 int stop_core_cpuslocked(unsigned int cpu, cpu_stop_fn_t fn, void *data)
 {
 	const struct cpumask *smt_mask = cpu_smt_mask(cpu);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 3d2e3b2ec528..c911fdcb4428 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -8198,11 +8198,7 @@ static bool __init cpus_dont_share(int cpu0, int cpu1)
 
 static bool __init cpus_share_smt(int cpu0, int cpu1)
 {
-#ifdef CONFIG_SCHED_SMT
 	return cpumask_test_cpu(cpu0, cpu_smt_mask(cpu1));
-#else
-	return false;
-#endif
 }
 
 static bool __init cpus_share_numa(int cpu0, int cpu1)
-- 
2.47.3


  parent reply	other threads:[~2026-05-12 15:22 UTC|newest]

Thread overview: 15+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-05-12 15:21 [PATCH v2 0/3] sched: Simplify ifdeffery around CONFIG_SCHED_SMT Shrikanth Hegde
2026-05-12 15:21 ` [PATCH v2 1/3] topology: Introduce cpu_smt_mask for CONFIG_SCHED_SMT=n Shrikanth Hegde
2026-05-12 16:56   ` Phil Auld
2026-05-12 15:21 ` Shrikanth Hegde [this message]
2026-05-12 16:57   ` [PATCH v2 2/3] sched: Simplify ifdeffery around cpu_smt_mask Phil Auld
2026-05-12 15:21 ` [PATCH v2 3/3] sched/fair: Add compile time check in fastpaths for CONFIG_SCHED_SMT=n Shrikanth Hegde
2026-05-12 16:59   ` Phil Auld
2026-05-13  4:15     ` Shrikanth Hegde
2026-05-13 11:53       ` Phil Auld
2026-05-13  6:07   ` K Prateek Nayak
2026-05-13  6:16     ` Shrikanth Hegde
2026-05-13  6:39     ` Shrikanth Hegde
2026-05-13  6:57       ` K Prateek Nayak
2026-05-12 17:48 ` [PATCH v2 0/3] sched: Simplify ifdeffery around CONFIG_SCHED_SMT Valentin Schneider
2026-05-13  6:20 ` K Prateek Nayak

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20260512152125.308280-3-sshegde@linux.ibm.com \
    --to=sshegde@linux.ibm.com \
    --cc=arighi@nvidia.com \
    --cc=bsegall@google.com \
    --cc=dietmar.eggemann@arm.com \
    --cc=juri.lelli@redhat.com \
    --cc=kprateek.nayak@amd.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mgorman@suse.de \
    --cc=mingo@kernel.org \
    --cc=peterz@infradead.org \
    --cc=rostedt@goodmis.org \
    --cc=tj@kernel.org \
    --cc=vincent.guittot@linaro.org \
    --cc=vschneid@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.