From: Peter Zijlstra <a.p.zijlstra@chello.nl>
To: Dhaval Giani <dhaval@linux.vnet.ibm.com>
Cc: "Zhang, Yanmin" <yanmin_zhang@linux.intel.com>,
Ingo Molnar <mingo@elte.hu>, LKML <linux-kernel@vger.kernel.org>,
Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>,
Aneesh Kumar KV <aneesh.kumar@linux.vnet.ibm.com>,
Balbir Singh <balbir@in.ibm.com>
Subject: Re: VolanoMark regression with 2.6.27-rc1
Date: Mon, 04 Aug 2008 08:26:11 +0200 [thread overview]
Message-ID: <1217831171.9016.42.camel@twins> (raw)
In-Reply-To: <20080804055339.GB5444@linux.vnet.ibm.com>
On Mon, 2008-08-04 at 11:23 +0530, Dhaval Giani wrote:
> Peter, vatsa, any ideas?
---
Revert:
a7be37ac8e1565e00880531f4e2aff421a21c803 sched: revert the revert of: weight calculations
c9c294a630e28eec5f2865f028ecfc58d45c0a5a sched: fix calc_delta_asym()
ced8aa16e1db55c33c507174c1b1f9e107445865 sched: fix calc_delta_asym, #2
---
diff --git a/kernel/sched.c b/kernel/sched.c
index 21f7da9..7afb0fc 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1244,9 +1244,6 @@ static void resched_task(struct task_struct *p)
*/
#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
-/*
- * delta *= weight / lw
- */
static unsigned long
calc_delta_mine(unsigned long delta_exec, unsigned long weight,
struct load_weight *lw)
@@ -1274,6 +1271,12 @@ calc_delta_mine(unsigned long delta_exec, unsigned long weight,
return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
}
+static inline unsigned long
+calc_delta_fair(unsigned long delta_exec, struct load_weight *lw)
+{
+ return calc_delta_mine(delta_exec, NICE_0_LOAD, lw);
+}
+
static inline void update_load_add(struct load_weight *lw, unsigned long inc)
{
lw->weight += inc;
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index cf2cd6c..593af05 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -334,34 +334,6 @@ int sched_nr_latency_handler(struct ctl_table *table, int write,
#endif
/*
- * delta *= w / rw
- */
-static inline unsigned long
-calc_delta_weight(unsigned long delta, struct sched_entity *se)
-{
- for_each_sched_entity(se) {
- delta = calc_delta_mine(delta,
- se->load.weight, &cfs_rq_of(se)->load);
- }
-
- return delta;
-}
-
-/*
- * delta *= rw / w
- */
-static inline unsigned long
-calc_delta_fair(unsigned long delta, struct sched_entity *se)
-{
- for_each_sched_entity(se) {
- delta = calc_delta_mine(delta,
- cfs_rq_of(se)->load.weight, &se->load);
- }
-
- return delta;
-}
-
-/*
* The idea is to set a period in which each task runs once.
*
* When there are too many tasks (sysctl_sched_nr_latency) we have to stretch
@@ -390,80 +362,47 @@ static u64 __sched_period(unsigned long nr_running)
*/
static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
- return calc_delta_weight(__sched_period(cfs_rq->nr_running), se);
+ u64 slice = __sched_period(cfs_rq->nr_running);
+
+ for_each_sched_entity(se) {
+ cfs_rq = cfs_rq_of(se);
+
+ slice *= se->load.weight;
+ do_div(slice, cfs_rq->load.weight);
+ }
+
+
+ return slice;
}
/*
* We calculate the vruntime slice of a to be inserted task
*
- * vs = s*rw/w = p
+ * vs = s/w = p/rw
*/
static u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
unsigned long nr_running = cfs_rq->nr_running;
+ unsigned long weight;
+ u64 vslice;
if (!se->on_rq)
nr_running++;
- return __sched_period(nr_running);
-}
-
-/*
- * The goal of calc_delta_asym() is to be asymmetrically around NICE_0_LOAD, in
- * that it favours >=0 over <0.
- *
- * -20 |
- * |
- * 0 --------+-------
- * .'
- * 19 .'
- *
- */
-static unsigned long
-calc_delta_asym(unsigned long delta, struct sched_entity *se)
-{
- struct load_weight lw = {
- .weight = NICE_0_LOAD,
- .inv_weight = 1UL << (WMULT_SHIFT-NICE_0_SHIFT)
- };
+ vslice = __sched_period(nr_running);
for_each_sched_entity(se) {
- struct load_weight *se_lw = &se->load;
- unsigned long rw = cfs_rq_of(se)->load.weight;
-
-#ifdef CONFIG_FAIR_SCHED_GROUP
- struct cfs_rq *cfs_rq = se->my_q;
- struct task_group *tg = NULL
-
- if (cfs_rq)
- tg = cfs_rq->tg;
-
- if (tg && tg->shares < NICE_0_LOAD) {
- /*
- * scale shares to what it would have been had
- * tg->weight been NICE_0_LOAD:
- *
- * weight = 1024 * shares / tg->weight
- */
- lw.weight *= se->load.weight;
- lw.weight /= tg->shares;
-
- lw.inv_weight = 0;
-
- se_lw = &lw;
- rw += lw.weight - se->load.weight;
- } else
-#endif
+ cfs_rq = cfs_rq_of(se);
- if (se->load.weight < NICE_0_LOAD) {
- se_lw = &lw;
- rw += NICE_0_LOAD - se->load.weight;
- }
+ weight = cfs_rq->load.weight;
+ if (!se->on_rq)
+ weight += se->load.weight;
- delta = calc_delta_mine(delta, rw, se_lw);
+ vslice *= NICE_0_LOAD;
+ do_div(vslice, weight);
}
- return delta;
+ return vslice;
}
/*
@@ -480,7 +419,11 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
curr->sum_exec_runtime += delta_exec;
schedstat_add(cfs_rq, exec_clock, delta_exec);
- delta_exec_weighted = calc_delta_fair(delta_exec, curr);
+ delta_exec_weighted = delta_exec;
+ if (unlikely(curr->load.weight != NICE_0_LOAD)) {
+ delta_exec_weighted = calc_delta_fair(delta_exec_weighted,
+ &curr->load);
+ }
curr->vruntime += delta_exec_weighted;
}
@@ -687,17 +630,8 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
if (!initial) {
/* sleeps upto a single latency don't count. */
- if (sched_feat(NEW_FAIR_SLEEPERS)) {
- unsigned long thresh = sysctl_sched_latency;
-
- /*
- * convert the sleeper threshold into virtual time
- */
- if (sched_feat(NORMALIZED_SLEEPER))
- thresh = calc_delta_fair(thresh, se);
-
- vruntime -= thresh;
- }
+ if (sched_feat(NEW_FAIR_SLEEPERS))
+ vruntime -= sysctl_sched_latency;
/* ensure we never gain time by being placed backwards. */
vruntime = max_vruntime(se->vruntime, vruntime);
@@ -1277,13 +1211,11 @@ static unsigned long wakeup_gran(struct sched_entity *se)
unsigned long gran = sysctl_sched_wakeup_granularity;
/*
- * More easily preempt - nice tasks, while not making it harder for
- * + nice tasks.
+ * More easily preempt - nice tasks, while not making
+ * it harder for + nice tasks.
*/
- if (sched_feat(ASYM_GRAN))
- gran = calc_delta_asym(sysctl_sched_wakeup_granularity, se);
- else
- gran = calc_delta_fair(sysctl_sched_wakeup_granularity, se);
+ if (unlikely(se->load.weight > NICE_0_LOAD))
+ gran = calc_delta_fair(gran, &se->load);
return gran;
}
diff --git a/kernel/sched_features.h b/kernel/sched_features.h
index 862b06b..6cd8734 100644
--- a/kernel/sched_features.h
+++ b/kernel/sched_features.h
@@ -1,5 +1,4 @@
SCHED_FEAT(NEW_FAIR_SLEEPERS, 1)
-SCHED_FEAT(NORMALIZED_SLEEPER, 1)
SCHED_FEAT(WAKEUP_PREEMPT, 1)
SCHED_FEAT(START_DEBIT, 1)
SCHED_FEAT(AFFINE_WAKEUPS, 1)
@@ -7,7 +6,6 @@ SCHED_FEAT(CACHE_HOT_BUDDY, 1)
SCHED_FEAT(SYNC_WAKEUPS, 1)
SCHED_FEAT(HRTICK, 1)
SCHED_FEAT(DOUBLE_TICK, 0)
-SCHED_FEAT(ASYM_GRAN, 1)
SCHED_FEAT(LB_BIAS, 0)
SCHED_FEAT(LB_WAKEUP_UPDATE, 1)
SCHED_FEAT(ASYM_EFF_LOAD, 1)
next prev parent reply other threads:[~2008-08-04 6:26 UTC|newest]
Thread overview: 50+ messages / expand[flat|nested] mbox.gz Atom feed top
2008-07-31 3:20 VolanoMark regression with 2.6.27-rc1 Zhang, Yanmin
2008-07-31 7:31 ` Zhang, Yanmin
2008-07-31 7:39 ` Peter Zijlstra
2008-07-31 7:49 ` Zhang, Yanmin
2008-08-01 0:39 ` Zhang, Yanmin
2008-08-01 2:35 ` Miao Xie
2008-08-01 3:08 ` Zhang, Yanmin
2008-08-01 5:14 ` Dhaval Giani
2008-08-04 5:04 ` Zhang, Yanmin
2008-08-04 5:22 ` Dhaval Giani
2008-08-04 5:37 ` Zhang, Yanmin
2008-08-04 5:53 ` Dhaval Giani
2008-08-04 6:26 ` Peter Zijlstra
2008-08-04 6:26 ` Peter Zijlstra [this message]
2008-08-04 7:05 ` Dhaval Giani
2008-08-04 7:12 ` Peter Zijlstra
2030-08-06 3:26 ` Zhang, Yanmin
2008-08-08 7:30 ` Peter Zijlstra
[not found] ` <20080811185008.GA29291@linux.vnet.ibm.com>
[not found] ` <1912726331.25608.235.camel@ymzhang>
[not found] ` <20080817115035.GA32223@linux.vnet.ibm.com>
[not found] ` <20080818052155.GA5063@linux.vnet.ibm.com>
2008-08-20 7:24 ` Zhang, Yanmin
2008-08-20 7:41 ` Peter Zijlstra
2008-08-20 10:51 ` Ingo Molnar
2008-08-20 13:32 ` Peter Zijlstra
2008-08-20 13:47 ` Ingo Molnar
2008-08-21 2:25 ` Zhang, Yanmin
2008-08-21 6:16 ` Ingo Molnar
2008-08-21 6:48 ` Zhang, Yanmin
2008-08-29 3:35 ` Zhang, Yanmin
2008-08-29 3:38 ` Zhang, Yanmin
2008-08-20 14:32 ` adobriyan
2008-08-20 14:33 ` Peter Zijlstra
2008-08-20 15:10 ` Nick Piggin
2008-08-20 15:15 ` Peter Zijlstra
2008-08-20 16:29 ` Ray Lee
2008-08-20 16:51 ` Peter Zijlstra
2008-08-20 17:21 ` Peter Zijlstra
2008-08-20 17:55 ` Nick Piggin
2008-08-20 18:15 ` Ray Lee
2008-08-20 20:30 ` Peter Zijlstra
2008-08-20 20:56 ` Peter Zijlstra
2008-08-21 6:11 ` Nick Piggin
2008-08-21 8:17 ` Peter Zijlstra
2008-08-21 6:15 ` Ingo Molnar
2008-08-20 20:58 ` Ray Lee
2008-08-20 21:04 ` Peter Zijlstra
2008-08-21 6:12 ` Ingo Molnar
2030-08-13 8:50 ` Zhang, Yanmin
2008-08-04 6:54 ` Peter Zijlstra
2008-08-15 15:37 ` Ingo Molnar
2008-08-01 12:25 ` Hugh Dickins
2008-08-04 0:54 ` Zhang, Yanmin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1217831171.9016.42.camel@twins \
--to=a.p.zijlstra@chello.nl \
--cc=aneesh.kumar@linux.vnet.ibm.com \
--cc=balbir@in.ibm.com \
--cc=dhaval@linux.vnet.ibm.com \
--cc=linux-kernel@vger.kernel.org \
--cc=mingo@elte.hu \
--cc=vatsa@linux.vnet.ibm.com \
--cc=yanmin_zhang@linux.intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox