public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
From: Peter Zijlstra <a.p.zijlstra@chello.nl>
To: Ingo Molnar <mingo@elte.hu>
Cc: linux-kernel@vger.kernel.org, Gautham R Shenoy <ego@in.ibm.com>,
	Andreas Herrmann <andreas.herrmann3@amd.com>,
	Balbir Singh <balbir@in.ibm.com>,
	Peter Zijlstra <a.p.zijlstra@chello.nl>
Subject: [RFC][PATCH 03/14] sched: update the cpu_power sum during load-balance
Date: Thu, 03 Sep 2009 15:21:48 +0200	[thread overview]
Message-ID: <20090903132212.317982805@chello.nl> (raw)
In-Reply-To: 20090903132145.482814810@chello.nl

[-- Attachment #1: sched-lb-3.patch --]
[-- Type: text/plain, Size: 2677 bytes --]

In order to prepare for a more dynamic cpu_power, update the group sum
while walking the sched domains during load-balance.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
---
 kernel/sched.c |   33 +++++++++++++++++++++++++++++----
 1 file changed, 29 insertions(+), 4 deletions(-)

Index: linux-2.6/kernel/sched.c
===================================================================
--- linux-2.6.orig/kernel/sched.c
+++ linux-2.6/kernel/sched.c
@@ -3699,6 +3699,28 @@ static inline int check_power_save_busie
 }
 #endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
 
+static void update_sched_power(struct sched_domain *sd)
+{
+	struct sched_domain *child = sd->child;
+	struct sched_group *group, *sdg = sd->groups;
+	unsigned long power = sdg->__cpu_power;
+
+	if (!child) {
+		/* compute cpu power for this cpu */
+		return;
+	}
+
+	sdg->__cpu_power = 0;
+
+	group = child->groups;
+	do {
+		sdg->__cpu_power += group->__cpu_power;
+		group = group->next;
+	} while (group != child->groups);
+
+	if (power != sdg->__cpu_power)
+		sdg->reciprocal_cpu_power = reciprocal_value(sdg->__cpu_power);
+}
 
 /**
  * update_sg_lb_stats - Update sched_group's statistics for load balancing.
@@ -3712,7 +3734,8 @@ static inline int check_power_save_busie
  * @balance: Should we balance.
  * @sgs: variable to hold the statistics for this group.
  */
-static inline void update_sg_lb_stats(struct sched_group *group, int this_cpu,
+static inline void update_sg_lb_stats(struct sched_domain *sd,
+			struct sched_group *group, int this_cpu,
 			enum cpu_idle_type idle, int load_idx, int *sd_idle,
 			int local_group, const struct cpumask *cpus,
 			int *balance, struct sg_lb_stats *sgs)
@@ -3723,8 +3746,11 @@ static inline void update_sg_lb_stats(st
 	unsigned long sum_avg_load_per_task;
 	unsigned long avg_load_per_task;
 
-	if (local_group)
+	if (local_group) {
 		balance_cpu = group_first_cpu(group);
+		if (balance_cpu == this_cpu)
+			update_sched_power(sd);
+	}
 
 	/* Tally up the load of all CPUs in the group */
 	sum_avg_load_per_task = avg_load_per_task = 0;
@@ -3828,7 +3854,7 @@ static inline void update_sd_lb_stats(st
 		local_group = cpumask_test_cpu(this_cpu,
 					       sched_group_cpus(group));
 		memset(&sgs, 0, sizeof(sgs));
-		update_sg_lb_stats(group, this_cpu, idle, load_idx, sd_idle,
+		update_sg_lb_stats(sd, group, this_cpu, idle, load_idx, sd_idle,
 				local_group, cpus, balance, &sgs);
 
 		if (local_group && balance && !(*balance))
@@ -3863,7 +3889,6 @@ static inline void update_sd_lb_stats(st
 		update_sd_power_savings_stats(group, sds, local_group, &sgs);
 		group = group->next;
 	} while (group != sd->groups);
-
 }
 
 /**

-- 


  parent reply	other threads:[~2009-09-03 13:23 UTC|newest]

Thread overview: 23+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2009-09-03 13:21 [RFC][PATCH 00/14] load-balancing and cpu_power -v3 Peter Zijlstra
2009-09-03 13:21 ` [RFC][PATCH 01/14] sched: restore __cpu_power to a straight sum of power Peter Zijlstra
2009-09-03 13:21 ` [RFC][PATCH 02/14] sched: SD_PREFER_SIBLING Peter Zijlstra
2009-09-03 13:21 ` Peter Zijlstra [this message]
2009-09-03 13:21 ` [RFC][PATCH 04/14] sched: add smt_gain Peter Zijlstra
2009-09-03 13:21 ` [RFC][PATCH 05/14] sched: dynamic cpu_power Peter Zijlstra
2009-09-03 13:21 ` [RFC][PATCH 06/14] sched: scale down cpu_power due to RT tasks Peter Zijlstra
2009-09-03 13:21 ` [RFC][PATCH 07/14] sched: try to deal with low capacity Peter Zijlstra
2009-09-03 13:21 ` [RFC][PATCH 08/14] sched: remove reciprocal for cpu_power Peter Zijlstra
2009-09-03 13:59   ` Peter Zijlstra
2009-09-03 13:21 ` [RFC][PATCH 09/14] x86: move APERF/MPERF into a X86_FEATURE Peter Zijlstra
2009-09-03 13:21 ` [RFC][PATCH 10/14] x86: generic aperf/mperf code Peter Zijlstra
2009-09-04  9:19   ` Thomas Renninger
2009-09-04  9:25     ` Peter Zijlstra
2009-09-04  9:27       ` Peter Zijlstra
2009-09-04  9:34         ` Thomas Renninger
2009-09-04 14:22         ` Dave Jones
2009-09-04 14:42           ` Peter Zijlstra
2009-09-04 17:45             ` H. Peter Anvin
2009-09-03 13:21 ` [RFC][PATCH 11/14] sched: provide arch_scale_freq_power Peter Zijlstra
2009-09-03 13:21 ` [RFC][PATCH 12/14] x86: sched: provide arch implementations using aperf/mperf Peter Zijlstra
2009-09-03 13:21 ` [RFC][PATCH 13/14] sched: cleanup wake_idle power saving Peter Zijlstra
2009-09-03 13:21 ` [RFC][PATCH 14/14] sched: cleanup wake_idle Peter Zijlstra

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20090903132212.317982805@chello.nl \
    --to=a.p.zijlstra@chello.nl \
    --cc=andreas.herrmann3@amd.com \
    --cc=balbir@in.ibm.com \
    --cc=ego@in.ibm.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@elte.hu \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox