From: Nikhil Rao <ncrao@google.com>
To: Ingo Molnar <mingo@elte.hu>, Peter Zijlstra <peterz@infradead.org>
Cc: Paul Turner <pjt@google.com>, Mike Galbraith <efault@gmx.de>,
linux-kernel@vger.kernel.org, Nikhil Rao <ncrao@google.com>
Subject: [RFC][Patch 13/18] sched: update f_b_g() to use u64 for weights
Date: Wed, 20 Apr 2011 13:51:32 -0700 [thread overview]
Message-ID: <1303332697-16426-14-git-send-email-ncrao@google.com> (raw)
In-Reply-To: <1303332697-16426-1-git-send-email-ncrao@google.com>
This patch updates f_b_g() and helper functions to use u64 to handle the
increased sched load resolution.
Signed-off-by: Nikhil Rao <ncrao@google.com>
---
kernel/sched_fair.c | 51 +++++++++++++++++++++++++++------------------------
1 files changed, 27 insertions(+), 24 deletions(-)
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 12b25b7..8478aac 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -2946,12 +2946,13 @@ static int check_asym_packing(struct sched_domain *sd,
static inline void fix_small_imbalance(struct sd_lb_stats *sds,
int this_cpu, unsigned long *imbalance)
{
- unsigned long tmp, pwr_now = 0, pwr_move = 0;
+ u64 tmp, pwr_now = 0, pwr_move = 0;
unsigned int imbn = 2;
unsigned long scaled_busy_load_per_task;
if (sds->this_nr_running) {
- sds->this_load_per_task /= sds->this_nr_running;
+ sds->this_load_per_task = div_u64(sds->this_load_per_task,
+ sds->this_nr_running);
if (sds->busiest_load_per_task >
sds->this_load_per_task)
imbn = 1;
@@ -2959,9 +2960,9 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds,
sds->this_load_per_task =
cpu_avg_load_per_task(this_cpu);
- scaled_busy_load_per_task = sds->busiest_load_per_task
- * SCHED_POWER_SCALE;
- scaled_busy_load_per_task /= sds->busiest->cpu_power;
+ scaled_busy_load_per_task =
+ div_u64(sds->busiest_load_per_task * SCHED_POWER_SCALE,
+ sds->busiest->cpu_power);
if (sds->max_load - sds->this_load + scaled_busy_load_per_task >=
(scaled_busy_load_per_task * imbn)) {
@@ -2979,11 +2980,11 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds,
min(sds->busiest_load_per_task, sds->max_load);
pwr_now += sds->this->cpu_power *
min(sds->this_load_per_task, sds->this_load);
- pwr_now /= SCHED_POWER_SCALE;
+ pwr_now = div_u64(pwr_now, SCHED_POWER_SCALE);
/* Amount of load we'd subtract */
- tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
- sds->busiest->cpu_power;
+ tmp = div_u64(sds->busiest_load_per_task * SCHED_POWER_SCALE,
+ sds->busiest->cpu_power);
if (sds->max_load > tmp)
pwr_move += sds->busiest->cpu_power *
min(sds->busiest_load_per_task, sds->max_load - tmp);
@@ -2991,14 +2992,15 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds,
/* Amount of load we'd add */
if (sds->max_load * sds->busiest->cpu_power <
sds->busiest_load_per_task * SCHED_POWER_SCALE)
- tmp = (sds->max_load * sds->busiest->cpu_power) /
- sds->this->cpu_power;
+ tmp = div_u64(sds->max_load * sds->busiest->cpu_power,
+ sds->this->cpu_power);
else
- tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
- sds->this->cpu_power;
+ tmp = div_u64(sds->busiest_load_per_task * SCHED_POWER_SCALE,
+ sds->this->cpu_power);
+
pwr_move += sds->this->cpu_power *
min(sds->this_load_per_task, sds->this_load + tmp);
- pwr_move /= SCHED_POWER_SCALE;
+ pwr_move = div_u64(pwr_move, SCHED_POWER_SCALE);
/* Move if we gain throughput */
if (pwr_move > pwr_now)
@@ -3015,9 +3017,10 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds,
static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
unsigned long *imbalance)
{
- unsigned long max_pull, load_above_capacity = ~0UL;
+ u64 max_pull, load_above_capacity = ~0ULL;
- sds->busiest_load_per_task /= sds->busiest_nr_running;
+ sds->busiest_load_per_task = div_u64(sds->busiest_load_per_task,
+ sds->busiest_nr_running);
if (sds->group_imb) {
sds->busiest_load_per_task =
min(sds->busiest_load_per_task, sds->avg_load);
@@ -3034,15 +3037,15 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
}
if (!sds->group_imb) {
+ unsigned long imb_capacity = (sds->busiest_nr_running -
+ sds->busiest_group_capacity);
/*
* Don't want to pull so many tasks that a group would go idle.
*/
- load_above_capacity = (sds->busiest_nr_running -
- sds->busiest_group_capacity);
-
- load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_POWER_SCALE);
-
- load_above_capacity /= sds->busiest->cpu_power;
+ load_above_capacity = NICE_0_LOAD * imb_capacity;
+ load_above_capacity =
+ div_u64(load_above_capacity * SCHED_POWER_SCALE,
+ sds->busiest->cpu_power);
}
/*
@@ -3059,8 +3062,8 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
/* How much load to actually move to equalise the imbalance */
*imbalance = min(max_pull * sds->busiest->cpu_power,
- (sds->avg_load - sds->this_load) * sds->this->cpu_power)
- / SCHED_POWER_SCALE;
+ (sds->avg_load - sds->this_load)*sds->this->cpu_power);
+ *imbalance = div_u64(*imbalance, SCHED_POWER_SCALE);
/*
* if *imbalance is less than the average load per runnable task
@@ -3129,7 +3132,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
if (!sds.busiest || sds.busiest_nr_running == 0)
goto out_balanced;
- sds.avg_load = (SCHED_POWER_SCALE * sds.total_load) / sds.total_pwr;
+ sds.avg_load = div_u64(sds.total_load*SCHED_POWER_SCALE, sds.total_pwr);
/*
* If the busiest group is imbalanced the below checks don't
--
1.7.3.1
next prev parent reply other threads:[~2011-04-20 20:52 UTC|newest]
Thread overview: 34+ messages / expand[flat|nested] mbox.gz Atom feed top
2011-04-20 20:51 [RFC][PATCH 00/18] Increase resolution of load weights Nikhil Rao
2011-04-20 20:51 ` [RFC][Patch 01/18] sched: introduce SCHED_POWER_SCALE to scale cpu_power calculations Nikhil Rao
2011-04-20 20:51 ` [RFC][Patch 02/18] sched: increase SCHED_LOAD_SCALE resolution Nikhil Rao
2011-04-28 9:54 ` Nikunj A. Dadhania
2011-04-28 17:11 ` Nikhil Rao
2011-04-20 20:51 ` [RFC][Patch 03/18] sched: use u64 for load_weight fields Nikhil Rao
2011-04-20 20:51 ` [RFC][Patch 04/18] sched: update cpu_load to be u64 Nikhil Rao
2011-04-20 20:51 ` [RFC][Patch 05/18] sched: update this_cpu_load() to return u64 value Nikhil Rao
2011-04-20 20:51 ` [RFC][Patch 06/18] sched: update source_load(), target_load() and weighted_cpuload() to use u64 Nikhil Rao
2011-04-20 20:51 ` [RFC][Patch 07/18] sched: update find_idlest_cpu() to use u64 for load Nikhil Rao
2011-04-20 20:51 ` [RFC][Patch 08/18] sched: update find_idlest_group() to use u64 Nikhil Rao
2011-04-20 20:51 ` [RFC][Patch 09/18] sched: update division in cpu_avg_load_per_task to use div_u64 Nikhil Rao
2011-04-20 20:51 ` [RFC][Patch 10/18] sched: update wake_affine path to use u64, s64 for weights Nikhil Rao
2011-04-20 20:51 ` [RFC][Patch 11/18] sched: update update_sg_lb_stats() to use u64 Nikhil Rao
2011-04-20 20:51 ` [RFC][Patch 12/18] sched: Update update_sd_lb_stats() " Nikhil Rao
2011-04-20 20:51 ` Nikhil Rao [this message]
2011-04-20 20:51 ` [RFC][Patch 14/18] sched: change type of imbalance to be u64 Nikhil Rao
2011-04-20 20:51 ` [RFC][Patch 15/18] sched: update h_load to use u64 Nikhil Rao
2011-04-20 20:51 ` [RFC][Patch 16/18] sched: update move_task() and helper functions to use u64 for weights Nikhil Rao
2011-04-20 20:51 ` [RFC][Patch 17/18] sched: update f_b_q() to use u64 for weighted cpuload Nikhil Rao
2011-04-20 20:51 ` [RFC][Patch 18/18] sched: update shares distribution to use u64 Nikhil Rao
2011-04-21 6:16 ` [RFC][PATCH 00/18] Increase resolution of load weights Ingo Molnar
2011-04-21 16:32 ` Peter Zijlstra
2011-04-26 16:11 ` Nikhil Rao
2011-04-21 16:40 ` Peter Zijlstra
2011-04-28 7:07 ` Nikunj A. Dadhania
2011-04-28 11:48 ` Nikunj A. Dadhania
2011-04-28 12:12 ` Srivatsa Vaddagiri
2011-04-28 18:33 ` Nikhil Rao
2011-04-28 18:51 ` Paul Turner
2011-04-28 18:53 ` Paul Turner
2011-04-28 21:27 ` Nikhil Rao
2011-04-29 16:55 ` Paul Turner
2011-04-28 18:20 ` Nikhil Rao
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1303332697-16426-14-git-send-email-ncrao@google.com \
--to=ncrao@google.com \
--cc=efault@gmx.de \
--cc=linux-kernel@vger.kernel.org \
--cc=mingo@elte.hu \
--cc=peterz@infradead.org \
--cc=pjt@google.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox