public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
From: Peter Zijlstra <peterz@infradead.org>
To: Chris Mason <clm@meta.com>,
	Joseph Salisbury <joseph.salisbury@oracle.com>,
	Adam Li <adamli@os.amperecomputing.com>,
	Hazem Mohamed Abuelfotoh <abuehaze@amazon.com>,
	Josh Don <joshdon@google.com>
Cc: mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com,
	vincent.guittot@linaro.org, dietmar.eggemann@arm.com,
	rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de,
	vschneid@redhat.com, linux-kernel@vger.kernel.org
Subject: [PATCH 4/4] sched/fair: Proportional newidle balance
Date: Fri, 07 Nov 2025 17:06:49 +0100	[thread overview]
Message-ID: <20251107161739.770122091@infradead.org> (raw)
In-Reply-To: 20251107160645.929564468@infradead.org

Add a randomized algorithm that runs newidle balancing proportional to
its success rate.

This improves schbench significantly:

 6.18-rc4:			2.22 Mrps/s
 6.18-rc4+revert:		2.04 Mrps/s
 6.18-rc4+revert+random:	2.18 Mrps/S

Conversely, per Adam Li this affects SpecJBB slightly, reducing it by 1%:

 6.17:			-6%
 6.17+revert:		 0%
 6.17+revert+random:	-1%

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
 include/linux/sched/topology.h |    3 ++
 kernel/sched/core.c            |    3 ++
 kernel/sched/fair.c            |   43 +++++++++++++++++++++++++++++++++++++----
 kernel/sched/features.h        |    5 ++++
 kernel/sched/sched.h           |    7 ++++++
 kernel/sched/topology.c        |    6 +++++
 6 files changed, 63 insertions(+), 4 deletions(-)

--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -92,6 +92,9 @@ struct sched_domain {
 	unsigned int nr_balance_failed; /* initialise to 0 */
 
 	/* idle_balance() stats */
+	unsigned int newidle_call;
+	unsigned int newidle_success;
+	unsigned int newidle_ratio;
 	u64 max_newidle_lb_cost;
 	unsigned long last_decay_max_lb_cost;
 
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -121,6 +121,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(sched_updat
 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_compute_energy_tp);
 
 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
+DEFINE_PER_CPU(struct rnd_state, sched_rnd_state);
 
 #ifdef CONFIG_SCHED_PROXY_EXEC
 DEFINE_STATIC_KEY_TRUE(__sched_proxy_exec);
@@ -8589,6 +8590,8 @@ void __init sched_init_smp(void)
 {
 	sched_init_numa(NUMA_NO_NODE);
 
+	prandom_init_once(&sched_rnd_state);
+
 	/*
 	 * There's no userspace yet to cause hotplug operations; hence all the
 	 * CPU masks are stable and all blatant races in the below code cannot
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -12146,11 +12146,26 @@ void update_max_interval(void)
 	max_load_balance_interval = HZ*num_online_cpus()/10;
 }
 
-static inline bool update_newidle_cost(struct sched_domain *sd, u64 cost)
+static inline void update_newidle_stats(struct sched_domain *sd, unsigned int success)
+{
+	sd->newidle_call++;
+	sd->newidle_success += success;
+
+	if (sd->newidle_call >= 1024) {
+		sd->newidle_ratio = sd->newidle_success;
+		sd->newidle_call /= 2;
+		sd->newidle_success /= 2;
+	}
+}
+
+static inline bool
+update_newidle_cost(struct sched_domain *sd, u64 cost, unsigned int success)
 {
 	unsigned long next_decay = sd->last_decay_max_lb_cost + HZ;
 	unsigned long now = jiffies;
 
+	update_newidle_stats(sd, success);
+
 	if (cost > sd->max_newidle_lb_cost) {
 		/*
 		 * Track max cost of a domain to make sure to not delay the
@@ -12198,7 +12213,7 @@ static void sched_balance_domains(struct
 		 * Decay the newidle max times here because this is a regular
 		 * visit to all the domains.
 		 */
-		need_decay = update_newidle_cost(sd, 0);
+		need_decay = update_newidle_cost(sd, 0, 0);
 		max_cost += sd->max_newidle_lb_cost;
 
 		/*
@@ -12843,6 +12858,22 @@ static int sched_balance_newidle(struct
 			break;
 
 		if (sd->flags & SD_BALANCE_NEWIDLE) {
+			unsigned int weight = 1;
+
+			if (sched_feat(NI_RANDOM)) {
+				/*
+				 * Throw a 1k sided dice; and only run
+				 * newidle_balance according to the success
+				 * rate.
+				 */
+				u32 d1k = sched_rng() % 1024;
+				weight = 1 + sd->newidle_ratio;
+				if (d1k > weight) {
+					update_newidle_stats(sd, 0);
+					continue;
+				}
+				weight = (1024 + weight/2) / weight;
+			}
 
 			pulled_task = sched_balance_rq(this_cpu, this_rq,
 						   sd, CPU_NEWLY_IDLE,
@@ -12850,10 +12881,14 @@ static int sched_balance_newidle(struct
 
 			t1 = sched_clock_cpu(this_cpu);
 			domain_cost = t1 - t0;
-			update_newidle_cost(sd, domain_cost);
-
 			curr_cost += domain_cost;
 			t0 = t1;
+
+			/*
+			 * Track max cost of a domain to make sure to not delay the
+			 * next wakeup on the CPU.
+			 */
+			update_newidle_cost(sd, domain_cost, weight * !!pulled_task);
 		}
 
 		/*
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -121,3 +121,8 @@ SCHED_FEAT(WA_BIAS, true)
 SCHED_FEAT(UTIL_EST, true)
 
 SCHED_FEAT(LATENCY_WARN, false)
+
+/*
+ * Do newidle balancing proportional to its success rate using randomization.
+ */
+SCHED_FEAT(NI_RANDOM, true)
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -5,6 +5,7 @@
 #ifndef _KERNEL_SCHED_SCHED_H
 #define _KERNEL_SCHED_SCHED_H
 
+#include <linux/prandom.h>
 #include <linux/sched/affinity.h>
 #include <linux/sched/autogroup.h>
 #include <linux/sched/cpufreq.h>
@@ -1348,6 +1349,12 @@ static inline bool is_migration_disabled
 }
 
 DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
+DECLARE_PER_CPU(struct rnd_state, sched_rnd_state);
+
+static inline u32 sched_rng(void)
+{
+	return prandom_u32_state(this_cpu_ptr(&sched_rnd_state));
+}
 
 #define cpu_rq(cpu)		(&per_cpu(runqueues, (cpu)))
 #define this_rq()		this_cpu_ptr(&runqueues)
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -1662,6 +1662,12 @@ sd_init(struct sched_domain_topology_lev
 
 		.last_balance		= jiffies,
 		.balance_interval	= sd_weight,
+
+		/* 50% success rate */
+		.newidle_call		= 512,
+		.newidle_success	= 256,
+		.newidle_ratio		= 512,
+
 		.max_newidle_lb_cost	= 0,
 		.last_decay_max_lb_cost	= jiffies,
 		.child			= child,



  parent reply	other threads:[~2025-11-07 16:18 UTC|newest]

Thread overview: 65+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-11-07 16:06 [PATCH 0/4] sched: The newidle balance regression Peter Zijlstra
2025-11-07 16:06 ` [PATCH 1/4] sched/fair: Revert max_newidle_lb_cost bump Peter Zijlstra
2025-11-14 12:19   ` [tip: sched/core] " tip-bot2 for Peter Zijlstra
2025-11-17 16:23   ` tip-bot2 for Peter Zijlstra
2025-11-07 16:06 ` [PATCH 2/4] sched/fair: Small cleanup to sched_balance_newidle() Peter Zijlstra
2025-11-10 13:55   ` Dietmar Eggemann
2025-11-10 14:04     ` Peter Zijlstra
2025-11-12 14:37   ` Shrikanth Hegde
2025-11-12 14:42     ` Peter Zijlstra
2025-11-12 15:08       ` Peter Zijlstra
2025-11-12 15:28         ` Shrikanth Hegde
2025-11-14  9:49           ` Peter Zijlstra
2025-11-14 10:22             ` Vincent Guittot
2025-11-14 11:05               ` Peter Zijlstra
2025-11-14 13:11                 ` Vincent Guittot
2025-11-14 12:19   ` [tip: sched/core] " tip-bot2 for Peter Zijlstra
2025-11-17 16:23   ` tip-bot2 for Peter Zijlstra
2025-11-07 16:06 ` [PATCH 3/4] sched/fair: Small cleanup to update_newidle_cost() Peter Zijlstra
2025-11-14 12:19   ` [tip: sched/core] " tip-bot2 for Peter Zijlstra
2025-11-17 16:23   ` tip-bot2 for Peter Zijlstra
2025-11-07 16:06 ` Peter Zijlstra [this message]
2025-11-10 13:55   ` [PATCH 4/4] sched/fair: Proportional newidle balance Dietmar Eggemann
2025-11-11  9:07   ` Adam Li
2025-11-11  9:20     ` Peter Zijlstra
2025-11-12 12:04       ` Adam Li
2025-11-12 13:41         ` Peter Zijlstra
2025-11-12 15:42   ` Shrikanth Hegde
2025-11-14  9:35     ` Peter Zijlstra
2025-11-14 12:18   ` [tip: sched/core] " tip-bot2 for Peter Zijlstra
2025-11-17 16:23   ` tip-bot2 for Peter Zijlstra
2026-01-18 20:46   ` [PATCH 4/4] " Mario Roy
2026-01-23 10:50     ` Peter Zijlstra
2026-01-23 11:03       ` Peter Zijlstra
2026-01-23 12:24         ` K Prateek Nayak
2026-01-28  4:08           ` K Prateek Nayak
2026-01-27  4:15         ` Mario Roy
2026-01-27 10:40         ` Peter Zijlstra
2026-01-27 15:17           ` Peter Zijlstra
2026-01-30  1:44             ` Mario Roy
2026-01-30  4:14             ` Mario Roy
2026-02-24  9:13             ` [tip: sched/core] sched/fair: More complex proportional " tip-bot2 for Peter Zijlstra
2026-01-25 12:22     ` [PATCH 4/4] sched/fair: Proportional " Mohamed Abuelfotoh, Hazem
2026-01-27  8:44       ` Peter Zijlstra
2026-01-28 15:48         ` Mohamed Abuelfotoh, Hazem
2026-01-29  9:19           ` Peter Zijlstra
2026-01-29  9:24             ` Peter Zijlstra
2026-01-30 16:12               ` Mohamed Abuelfotoh, Hazem
2026-01-30 13:16             ` Mohamed Abuelfotoh, Hazem
2026-02-02 10:51               ` Peter Zijlstra
2026-02-02 11:07                 ` Mohamed Abuelfotoh, Hazem
2026-02-04 12:45                   ` Mohamed Abuelfotoh, Hazem
2026-02-04 13:27                     ` Peter Zijlstra
2026-02-04 13:59                       ` Mohamed Abuelfotoh, Hazem
2026-02-04 14:05                         ` Peter Zijlstra
2026-02-04 22:48                           ` Mohamed Abuelfotoh, Hazem
2026-01-27  8:50       ` Peter Zijlstra
2026-01-27  9:13         ` Peter Zijlstra
2026-01-28 16:24           ` Mohamed Abuelfotoh, Hazem
2026-01-28 16:03         ` Mohamed Abuelfotoh, Hazem
2026-04-29  8:51   ` Qing Wang
2026-05-06  2:44     ` [PATCH] sched/fair: Replace random newidle_balance with Bresenham accumulator Qing Wang
2025-11-10 19:47 ` [PATCH 0/4] sched: The newidle balance regression Chris Mason
2025-11-11 19:08 ` Josh Don
2025-11-12 21:59 ` Chris Mason
2025-11-14  9:37   ` Peter Zijlstra

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20251107161739.770122091@infradead.org \
    --to=peterz@infradead.org \
    --cc=abuehaze@amazon.com \
    --cc=adamli@os.amperecomputing.com \
    --cc=bsegall@google.com \
    --cc=clm@meta.com \
    --cc=dietmar.eggemann@arm.com \
    --cc=joseph.salisbury@oracle.com \
    --cc=joshdon@google.com \
    --cc=juri.lelli@redhat.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mgorman@suse.de \
    --cc=mingo@redhat.com \
    --cc=rostedt@goodmis.org \
    --cc=vincent.guittot@linaro.org \
    --cc=vschneid@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox