public inbox for stable@vger.kernel.org
 help / color / mirror / Atom feed
From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
To: stable@vger.kernel.org
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
	patches@lists.linux.dev,
	"Peter Zijlstra (Intel)" <peterz@infradead.org>,
	Dietmar Eggemann <dietmar.eggemann@arm.com>,
	Chris Mason <clm@meta.com>, Ajay Kaher <ajay.kaher@broadcom.com>
Subject: [PATCH 6.12 13/16] sched/fair: Proportional newidle balance
Date: Fri,  9 Jan 2026 12:43:54 +0100	[thread overview]
Message-ID: <20260109111951.922467193@linuxfoundation.org> (raw)
In-Reply-To: <20260109111951.415522519@linuxfoundation.org>

6.12-stable review patch.  If anyone has any objections, please let me know.

------------------

From: Peter Zijlstra <peterz@infradead.org>

commit 33cf66d88306663d16e4759e9d24766b0aaa2e17 upstream.

Add a randomized algorithm that runs newidle balancing proportional to
its success rate.

This improves schbench significantly:

 6.18-rc4:			2.22 Mrps/s
 6.18-rc4+revert:		2.04 Mrps/s
 6.18-rc4+revert+random:	2.18 Mrps/S

Conversely, per Adam Li this affects SpecJBB slightly, reducing it by 1%:

 6.17:			-6%
 6.17+revert:		 0%
 6.17+revert+random:	-1%

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
Tested-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
Tested-by: Chris Mason <clm@meta.com>
Link: https://lkml.kernel.org/r/6825c50d-7fa7-45d8-9b81-c6e7e25738e2@meta.com
Link: https://patch.msgid.link/20251107161739.770122091@infradead.org
[ Ajay: Modified to apply on v6.12 ]
Signed-off-by: Ajay Kaher <ajay.kaher@broadcom.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
---
 include/linux/sched/topology.h |    3 ++
 kernel/sched/core.c            |    3 ++
 kernel/sched/fair.c            |   44 +++++++++++++++++++++++++++++++++++++----
 kernel/sched/features.h        |    5 ++++
 kernel/sched/sched.h           |    7 ++++++
 kernel/sched/topology.c        |    6 +++++
 6 files changed, 64 insertions(+), 4 deletions(-)

--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -106,6 +106,9 @@ struct sched_domain {
 	unsigned int nr_balance_failed; /* initialise to 0 */
 
 	/* idle_balance() stats */
+	unsigned int newidle_call;
+	unsigned int newidle_success;
+	unsigned int newidle_ratio;
 	u64 max_newidle_lb_cost;
 	unsigned long last_decay_max_lb_cost;
 
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -118,6 +118,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(sched_updat
 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_compute_energy_tp);
 
 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
+DEFINE_PER_CPU(struct rnd_state, sched_rnd_state);
 
 #ifdef CONFIG_SCHED_DEBUG
 /*
@@ -8335,6 +8336,8 @@ void __init sched_init_smp(void)
 {
 	sched_init_numa(NUMA_NO_NODE);
 
+	prandom_init_once(&sched_rnd_state);
+
 	/*
 	 * There's no userspace yet to cause hotplug operations; hence all the
 	 * CPU masks are stable and all blatant races in the below code cannot
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -12186,11 +12186,27 @@ void update_max_interval(void)
 	max_load_balance_interval = HZ*num_online_cpus()/10;
 }
 
-static inline bool update_newidle_cost(struct sched_domain *sd, u64 cost)
+static inline void update_newidle_stats(struct sched_domain *sd, unsigned int success)
+{
+	sd->newidle_call++;
+	sd->newidle_success += success;
+
+	if (sd->newidle_call >= 1024) {
+		sd->newidle_ratio = sd->newidle_success;
+		sd->newidle_call /= 2;
+		sd->newidle_success /= 2;
+	}
+}
+
+static inline bool
+update_newidle_cost(struct sched_domain *sd, u64 cost, unsigned int success)
 {
 	unsigned long next_decay = sd->last_decay_max_lb_cost + HZ;
 	unsigned long now = jiffies;
 
+	if (cost)
+		update_newidle_stats(sd, success);
+
 	if (cost > sd->max_newidle_lb_cost) {
 		/*
 		 * Track max cost of a domain to make sure to not delay the
@@ -12238,7 +12254,7 @@ static void sched_balance_domains(struct
 		 * Decay the newidle max times here because this is a regular
 		 * visit to all the domains.
 		 */
-		need_decay = update_newidle_cost(sd, 0);
+		need_decay = update_newidle_cost(sd, 0, 0);
 		max_cost += sd->max_newidle_lb_cost;
 
 		/*
@@ -12896,6 +12912,22 @@ static int sched_balance_newidle(struct
 			break;
 
 		if (sd->flags & SD_BALANCE_NEWIDLE) {
+			unsigned int weight = 1;
+
+			if (sched_feat(NI_RANDOM)) {
+				/*
+				 * Throw a 1k sided dice; and only run
+				 * newidle_balance according to the success
+				 * rate.
+				 */
+				u32 d1k = sched_rng() % 1024;
+				weight = 1 + sd->newidle_ratio;
+				if (d1k > weight) {
+					update_newidle_stats(sd, 0);
+					continue;
+				}
+				weight = (1024 + weight/2) / weight;
+			}
 
 			pulled_task = sched_balance_rq(this_cpu, this_rq,
 						   sd, CPU_NEWLY_IDLE,
@@ -12903,10 +12935,14 @@ static int sched_balance_newidle(struct
 
 			t1 = sched_clock_cpu(this_cpu);
 			domain_cost = t1 - t0;
-			update_newidle_cost(sd, domain_cost);
-
 			curr_cost += domain_cost;
 			t0 = t1;
+
+			/*
+			 * Track max cost of a domain to make sure to not delay the
+			 * next wakeup on the CPU.
+			 */
+			update_newidle_cost(sd, domain_cost, weight * !!pulled_task);
 		}
 
 		/*
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -122,3 +122,8 @@ SCHED_FEAT(WA_BIAS, true)
 SCHED_FEAT(UTIL_EST, true)
 
 SCHED_FEAT(LATENCY_WARN, false)
+
+/*
+ * Do newidle balancing proportional to its success rate using randomization.
+ */
+SCHED_FEAT(NI_RANDOM, true)
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -5,6 +5,7 @@
 #ifndef _KERNEL_SCHED_SCHED_H
 #define _KERNEL_SCHED_SCHED_H
 
+#include <linux/prandom.h>
 #include <linux/sched/affinity.h>
 #include <linux/sched/autogroup.h>
 #include <linux/sched/cpufreq.h>
@@ -1348,6 +1349,12 @@ static inline bool is_migration_disabled
 }
 
 DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
+DECLARE_PER_CPU(struct rnd_state, sched_rnd_state);
+
+static inline u32 sched_rng(void)
+{
+	return prandom_u32_state(this_cpu_ptr(&sched_rnd_state));
+}
 
 #define cpu_rq(cpu)		(&per_cpu(runqueues, (cpu)))
 #define this_rq()		this_cpu_ptr(&runqueues)
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -1632,6 +1632,12 @@ sd_init(struct sched_domain_topology_lev
 
 		.last_balance		= jiffies,
 		.balance_interval	= sd_weight,
+
+		/* 50% success rate */
+		.newidle_call		= 512,
+		.newidle_success	= 256,
+		.newidle_ratio		= 512,
+
 		.max_newidle_lb_cost	= 0,
 		.last_decay_max_lb_cost	= jiffies,
 		.child			= child,



  parent reply	other threads:[~2026-01-09 11:44 UTC|newest]

Thread overview: 31+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-01-09 11:43 [PATCH 6.12 00/16] 6.12.65-rc1 review Greg Kroah-Hartman
2026-01-09 11:43 ` [PATCH 6.12 01/16] mptcp: fallback earlier on simult connection Greg Kroah-Hartman
2026-01-09 11:43 ` [PATCH 6.12 02/16] mm/page_alloc: change all pageblocks migrate type on coalescing Greg Kroah-Hartman
2026-01-09 11:43 ` [PATCH 6.12 03/16] mm: simplify folio_expected_ref_count() Greg Kroah-Hartman
2026-01-09 11:43 ` [PATCH 6.12 04/16] mm: consider non-anon swap cache folios in folio_expected_ref_count() Greg Kroah-Hartman
2026-01-09 11:43 ` [PATCH 6.12 05/16] mptcp: ensure context reset on disconnect() Greg Kroah-Hartman
2026-01-09 11:43 ` [PATCH 6.12 06/16] wifi: mac80211: Discard Beacon frames to non-broadcast address Greg Kroah-Hartman
2026-01-09 11:43 ` [PATCH 6.12 07/16] net: phy: mediatek: fix nvmem cell reference leak in mt798x_phy_calibration Greg Kroah-Hartman
2026-01-09 11:43 ` [PATCH 6.12 08/16] drm/amdgpu: Forward VMID reservation errors Greg Kroah-Hartman
2026-01-09 11:43 ` [PATCH 6.12 09/16] cpufreq: intel_pstate: Check IDA only before MSR_IA32_PERF_CTL writes Greg Kroah-Hartman
2026-01-09 11:43 ` [PATCH 6.12 10/16] net: Remove RTNL dance for SIOCBRADDIF and SIOCBRDELIF Greg Kroah-Hartman
2026-01-09 11:43 ` [PATCH 6.12 11/16] sched/fair: Small cleanup to sched_balance_newidle() Greg Kroah-Hartman
2026-01-09 11:43 ` [PATCH 6.12 12/16] sched/fair: Small cleanup to update_newidle_cost() Greg Kroah-Hartman
2026-01-09 11:43 ` Greg Kroah-Hartman [this message]
2026-01-09 11:43 ` [PATCH 6.12 14/16] virtio_console: fix order of fields cols and rows Greg Kroah-Hartman
2026-01-09 11:43 ` [PATCH 6.12 15/16] pwm: stm32: Always program polarity Greg Kroah-Hartman
2026-01-09 11:43 ` [PATCH 6.12 16/16] Revert "iommu/amd: Skip enabling command/event buffers for kdump" Greg Kroah-Hartman
2026-01-09 14:05 ` [PATCH 6.12 00/16] 6.12.65-rc1 review Slade Watkins
2026-01-09 16:15 ` Jon Hunter
2026-01-09 19:01 ` Brett A C Sheffield
2026-01-09 19:49 ` Florian Fainelli
2026-01-09 23:58 ` Shuah Khan
2026-01-10  1:53 ` Brett Mastbergen
2026-01-10  3:39 ` Peter Schneider
2026-01-10  6:51 ` Ron Economos
2026-01-10  8:30 ` Francesco Dolcini
2026-01-10 10:04 ` Jeffrin Thalakkottoor
2026-01-10 11:27 ` Mark Brown
2026-01-10 21:11 ` Harshit Mogalapalli
2026-01-10 21:19 ` Miguel Ojeda
2026-01-12 10:25 ` Pavel Machek

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20260109111951.922467193@linuxfoundation.org \
    --to=gregkh@linuxfoundation.org \
    --cc=ajay.kaher@broadcom.com \
    --cc=clm@meta.com \
    --cc=dietmar.eggemann@arm.com \
    --cc=patches@lists.linux.dev \
    --cc=peterz@infradead.org \
    --cc=stable@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox