From: Morten Rasmussen <morten.rasmussen@arm.com>
To: linux-kernel@vger.kernel.org, linux-pm@vger.kernel.org,
peterz@infradead.org, mingo@kernel.org
Cc: rjw@rjwysocki.net, vincent.guittot@linaro.org,
daniel.lezcano@linaro.org, preeti@linux.vnet.ibm.com,
Dietmar.Eggemann@arm.com, pjt@google.com
Subject: [RFCv2 PATCH 09/23] sched: Maintain the unweighted load contribution of blocked entities
Date: Thu, 3 Jul 2014 17:25:56 +0100 [thread overview]
Message-ID: <1404404770-323-10-git-send-email-morten.rasmussen@arm.com> (raw)
In-Reply-To: <1404404770-323-1-git-send-email-morten.rasmussen@arm.com>
From: Dietmar Eggemann <dietmar.eggemann@arm.com>
The unweighted blocked load on a run queue is maintained alongside the
existing (weighted) blocked load.
This patch is the unweighted counterpart of "sched: Maintain the load
contribution of blocked entities" (commit id 9ee474f55664).
Note: The unweighted blocked load is not used for energy aware
scheduling yet.
Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
---
kernel/sched/debug.c | 2 ++
kernel/sched/fair.c | 22 +++++++++++++++++-----
kernel/sched/sched.h | 2 +-
3 files changed, 20 insertions(+), 6 deletions(-)
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 78d4151..ffa56a8 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -220,6 +220,8 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
cfs_rq->uw_runnable_load_avg);
SEQ_printf(m, " .%-30s: %ld\n", "blocked_load_avg",
cfs_rq->blocked_load_avg);
+ SEQ_printf(m, " .%-30s: %ld\n", "uw_blocked_load_avg",
+ cfs_rq->uw_blocked_load_avg);
#ifdef CONFIG_FAIR_GROUP_SCHED
SEQ_printf(m, " .%-30s: %ld\n", "tg_load_contrib",
cfs_rq->tg_load_contrib);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 1ee47b3..c6207f7 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2481,12 +2481,18 @@ static long __update_entity_load_avg_contrib(struct sched_entity *se,
}
static inline void subtract_blocked_load_contrib(struct cfs_rq *cfs_rq,
- long load_contrib)
+ long load_contrib,
+ long uw_load_contrib)
{
if (likely(load_contrib < cfs_rq->blocked_load_avg))
cfs_rq->blocked_load_avg -= load_contrib;
else
cfs_rq->blocked_load_avg = 0;
+
+ if (likely(uw_load_contrib < cfs_rq->uw_blocked_load_avg))
+ cfs_rq->uw_blocked_load_avg -= uw_load_contrib;
+ else
+ cfs_rq->uw_blocked_load_avg = 0;
}
static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
@@ -2521,7 +2527,8 @@ static inline void update_entity_load_avg(struct sched_entity *se,
cfs_rq->uw_runnable_load_avg += uw_contrib_delta;
}
else
- subtract_blocked_load_contrib(cfs_rq, -contrib_delta);
+ subtract_blocked_load_contrib(cfs_rq, -contrib_delta,
+ -uw_contrib_delta);
}
/*
@@ -2540,12 +2547,14 @@ static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, int force_update)
if (atomic_long_read(&cfs_rq->removed_load)) {
unsigned long removed_load;
removed_load = atomic_long_xchg(&cfs_rq->removed_load, 0);
- subtract_blocked_load_contrib(cfs_rq, removed_load);
+ subtract_blocked_load_contrib(cfs_rq, removed_load, 0);
}
if (decays) {
cfs_rq->blocked_load_avg = decay_load(cfs_rq->blocked_load_avg,
decays);
+ cfs_rq->uw_blocked_load_avg =
+ decay_load(cfs_rq->uw_blocked_load_avg, decays);
atomic64_add(decays, &cfs_rq->decay_counter);
cfs_rq->last_decay = now;
}
@@ -2591,7 +2600,8 @@ static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
/* migrated tasks did not contribute to our blocked load */
if (wakeup) {
- subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib);
+ subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib,
+ se->avg.uw_load_avg_contrib);
update_entity_load_avg(se, 0);
}
@@ -2620,6 +2630,7 @@ static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
if (sleep) {
cfs_rq->blocked_load_avg += se->avg.load_avg_contrib;
+ cfs_rq->uw_blocked_load_avg += se->avg.uw_load_avg_contrib;
se->avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
} /* migrations, e.g. sleep=0 leave decay_count == 0 */
}
@@ -7481,7 +7492,8 @@ static void switched_from_fair(struct rq *rq, struct task_struct *p)
*/
if (se->avg.decay_count) {
__synchronize_entity_decay(se);
- subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib);
+ subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib,
+ se->avg.uw_load_avg_contrib);
}
#endif
}
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 46cb8bd..3f1eeb3 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -337,7 +337,7 @@ struct cfs_rq {
* the FAIR_GROUP_SCHED case).
*/
unsigned long runnable_load_avg, blocked_load_avg;
- unsigned long uw_runnable_load_avg;
+ unsigned long uw_runnable_load_avg, uw_blocked_load_avg;
atomic64_t decay_counter;
u64 last_decay;
atomic_long_t removed_load;
--
1.7.9.5
next prev parent reply other threads:[~2014-07-03 16:28 UTC|newest]
Thread overview: 39+ messages / expand[flat|nested] mbox.gz Atom feed top
2014-07-03 16:25 [RFCv2 PATCH 00/23] sched: Energy cost model for energy-aware scheduling Morten Rasmussen
2014-07-03 16:25 ` [RFCv2 PATCH 01/23] sched: Documentation for scheduler energy cost model Morten Rasmussen
2014-07-24 0:53 ` Rafael J. Wysocki
2014-07-24 7:26 ` Peter Zijlstra
2014-07-24 14:28 ` Rafael J. Wysocki
2014-07-24 17:57 ` Morten Rasmussen
2014-07-03 16:25 ` [RFCv2 PATCH 02/23] sched: Make energy awareness a sched feature Morten Rasmussen
2014-07-03 16:25 ` [RFCv2 PATCH 03/23] sched: Introduce energy data structures Morten Rasmussen
2014-07-03 16:25 ` [RFCv2 PATCH 04/23] sched: Allocate and initialize " Morten Rasmussen
2014-07-03 16:25 ` [RFCv2 PATCH 05/23] sched: Add energy procfs interface Morten Rasmussen
2014-07-03 16:25 ` [RFCv2 PATCH 06/23] arm: topology: Define TC2 energy and provide it to the scheduler Morten Rasmussen
2014-07-03 16:25 ` [RFCv2 PATCH 07/23] sched: Introduce system-wide sched_energy Morten Rasmussen
2014-07-03 16:25 ` [RFCv2 PATCH 08/23] sched: Aggregate unweighted load contributed by task entities on parenting cfs_rq Morten Rasmussen
2014-07-03 23:50 ` Yuyang Du
2014-07-03 16:25 ` Morten Rasmussen [this message]
2014-07-03 16:25 ` [RFCv2 PATCH 10/23] sched: Account for blocked unweighted load waking back up Morten Rasmussen
2014-07-03 16:25 ` [RFCv2 PATCH 11/23] sched: Introduce an unweighted cpu_load array Morten Rasmussen
2014-07-03 16:25 ` [RFCv2 PATCH 12/23] sched: Rename weighted_cpuload() to cpu_load() Morten Rasmussen
2014-07-03 16:26 ` [RFCv2 PATCH 13/23] sched: Introduce weighted/unweighted switch in load related functions Morten Rasmussen
2014-07-03 16:26 ` [RFCv2 PATCH 14/23] sched: Introduce SD_SHARE_CAP_STATES sched_domain flag Morten Rasmussen
2014-07-03 16:26 ` [RFCv2 PATCH 15/23] sched, cpufreq: Introduce current cpu compute capacity into scheduler Morten Rasmussen
2014-07-03 16:26 ` [RFCv2 PATCH 16/23] sched, cpufreq: Current compute capacity hack for ARM TC2 Morten Rasmussen
2014-07-03 16:26 ` [RFCv2 PATCH 17/23] sched: Likely idle state statistics placeholder Morten Rasmussen
2014-07-03 16:26 ` [RFCv2 PATCH 18/23] sched: Energy model functions Morten Rasmussen
2014-07-03 16:26 ` [RFCv2 PATCH 19/23] sched: Task wakeup tracking Morten Rasmussen
2014-07-03 16:26 ` [RFCv2 PATCH 20/23] sched: Take task wakeups into account in energy estimates Morten Rasmussen
2014-07-03 16:26 ` [RFCv2 PATCH 21/23] sched: Use energy model in select_idle_sibling Morten Rasmussen
2014-07-03 16:26 ` [RFCv2 PATCH 22/23] sched: Use energy to guide wakeup task placement Morten Rasmussen
2014-07-03 16:26 ` [RFCv2 PATCH 23/23] sched: Use energy model in load balance path Morten Rasmussen
2014-07-03 23:19 ` [RFCv2 PATCH 00/23] sched: Energy cost model for energy-aware scheduling Yuyang Du
2014-07-04 11:06 ` Morten Rasmussen
2014-07-04 16:03 ` Anca Emanuel
2014-07-06 19:05 ` Yuyang Du
2014-07-07 14:16 ` Morten Rasmussen
2014-07-08 0:23 ` Yuyang Du
2014-07-08 9:28 ` Morten Rasmussen
2014-07-04 16:55 ` Catalin Marinas
2014-07-07 14:00 ` Morten Rasmussen
2014-07-07 15:42 ` Peter Zijlstra
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1404404770-323-10-git-send-email-morten.rasmussen@arm.com \
--to=morten.rasmussen@arm.com \
--cc=Dietmar.Eggemann@arm.com \
--cc=daniel.lezcano@linaro.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-pm@vger.kernel.org \
--cc=mingo@kernel.org \
--cc=peterz@infradead.org \
--cc=pjt@google.com \
--cc=preeti@linux.vnet.ibm.com \
--cc=rjw@rjwysocki.net \
--cc=vincent.guittot@linaro.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).