From: Paul Turner <pjt@google.com>
To: linux-kernel@vger.kernel.org
Cc: Bharata B Rao <bharata@linux.vnet.ibm.com>,
Dhaval Giani <dhaval@linux.vnet.ibm.com>,
Balbir Singh <balbir@linux.vnet.ibm.com>,
Vaidyanathan Srinivasan <svaidy@linux.vnet.ibm.com>,
Gautham R Shenoy <ego@in.ibm.com>,
Srivatsa Vaddagiri <vatsa@in.ibm.com>,
Kamalesh Babulal <kamalesh@linux.vnet.ibm.com>,
Ingo Molnar <mingo@elte.hu>,
Peter Zijlstra <a.p.zijlstra@chello.nl>,
Pavel Emelyanov <xemul@openvz.org>,
Herbert Poetzl <herbert@13thfloor.at>,
Avi Kivity <avi@redhat.com>, Chris Friesen <cfriesen@nortel.com>,
Nikhil Rao <ncrao@google.com>
Subject: [CFS Bandwidth Control v4 4/7] sched: unthrottle cfs_rq(s) who ran out of quota at period refresh
Date: Tue, 15 Feb 2011 19:18:35 -0800 [thread overview]
Message-ID: <20110216031841.161743484@google.com> (raw)
In-Reply-To: 20110216031831.571628191@google.com
[-- Attachment #1: sched-bwc-unthrottle_entities.patch --]
[-- Type: text/plain, Size: 5774 bytes --]
At the start of a new period there are several actions we must take:
- Refresh global bandwidth pool
- Unthrottle entities who ran out of quota as refreshed bandwidth permits
Unthrottled entities have the cfs_rq->throttled flag set and are re-enqueued
into the cfs entity hierarchy.
sched_rt_period_mask() is refactored slightly into sched_bw_period_mask()
since it is now shared by both cfs and rt bandwidth period timers.
The !CONFIG_RT_GROUP_SCHED && CONFIG_SMP case has been collapsed to use
rd->span instead of cpu_online_mask since I think that was incorrect before
(don't want to hit cpu's outside of your root_domain for RT bandwidth).
Signed-off-by: Paul Turner <pjt@google.com>
Signed-off-by: Nikhil Rao <ncrao@google.com>
Signed-off-by: Bharata B Rao <bharata@linux.vnet.ibm.com>
---
kernel/sched.c | 16 +++++++++++
kernel/sched_fair.c | 74 +++++++++++++++++++++++++++++++++++++++++++++++++++-
kernel/sched_rt.c | 19 -------------
3 files changed, 90 insertions(+), 19 deletions(-)
Index: tip/kernel/sched.c
===================================================================
--- tip.orig/kernel/sched.c
+++ tip/kernel/sched.c
@@ -1561,6 +1561,8 @@ static int tg_nop(struct task_group *tg,
}
#endif
+static inline const struct cpumask *sched_bw_period_mask(void);
+
#ifdef CONFIG_SMP
/* Used instead of source_load when we know the type == 0 */
static unsigned long weighted_cpuload(const int cpu)
@@ -8503,6 +8505,18 @@ void set_curr_task(int cpu, struct task_
#endif
+#ifdef CONFIG_SMP
+static inline const struct cpumask *sched_bw_period_mask(void)
+{
+ return cpu_rq(smp_processor_id())->rd->span;
+}
+#else
+static inline const struct cpumask *sched_bw_period_mask(void)
+{
+ return cpu_online_mask;
+}
+#endif
+
#ifdef CONFIG_FAIR_GROUP_SCHED
static void free_fair_sched_group(struct task_group *tg)
{
@@ -9240,6 +9254,8 @@ static int tg_set_cfs_bandwidth(struct t
raw_spin_lock_irq(&rq->lock);
init_cfs_rq_quota(cfs_rq);
+ if (cfs_rq_throttled(cfs_rq))
+ unthrottle_cfs_rq(cfs_rq);
raw_spin_unlock_irq(&rq->lock);
}
mutex_unlock(&mutex);
Index: tip/kernel/sched_fair.c
===================================================================
--- tip.orig/kernel/sched_fair.c
+++ tip/kernel/sched_fair.c
@@ -327,6 +327,13 @@ static inline u64 sched_cfs_bandwidth_sl
return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC;
}
+static inline
+struct cfs_rq *cfs_bandwidth_cfs_rq(struct cfs_bandwidth *cfs_b, int cpu)
+{
+ return container_of(cfs_b, struct task_group,
+ cfs_bandwidth)->cfs_rq[cpu];
+}
+
static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
{
return &tg->cfs_bandwidth;
@@ -1513,6 +1520,33 @@ out_throttled:
update_cfs_rq_load_contribution(cfs_rq, 1);
}
+static void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
+{
+ struct rq *rq = rq_of(cfs_rq);
+ struct sched_entity *se;
+
+ se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
+
+ update_rq_clock(rq);
+ /* (Try to) avoid maintaining share statistics for idle time */
+ cfs_rq->load_stamp = cfs_rq->load_last = rq->clock_task;
+
+ cfs_rq->throttled = 0;
+ for_each_sched_entity(se) {
+ if (se->on_rq)
+ break;
+
+ cfs_rq = cfs_rq_of(se);
+ enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
+ if (cfs_rq_throttled(cfs_rq))
+ break;
+ }
+
+ /* determine whether we need to wake up potentally idle cpu */
+ if (rq->curr == rq->idle && rq->cfs.nr_running)
+ resched_task(rq->curr);
+}
+
static void account_cfs_rq_quota(struct cfs_rq *cfs_rq,
unsigned long delta_exec)
{
@@ -1535,8 +1569,46 @@ static void account_cfs_rq_quota(struct
static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
{
- return 1;
+ int i, idle = 1;
+ u64 delta;
+ const struct cpumask *span;
+
+ if (cfs_b->quota == RUNTIME_INF)
+ return 1;
+
+ /* reset group quota */
+ raw_spin_lock(&cfs_b->lock);
+ cfs_b->runtime = cfs_b->quota;
+ raw_spin_unlock(&cfs_b->lock);
+
+ span = sched_bw_period_mask();
+ for_each_cpu(i, span) {
+ struct rq *rq = cpu_rq(i);
+ struct cfs_rq *cfs_rq = cfs_bandwidth_cfs_rq(cfs_b, i);
+
+ if (cfs_rq->nr_running)
+ idle = 0;
+
+ if (!cfs_rq_throttled(cfs_rq))
+ continue;
+
+ delta = tg_request_cfs_quota(cfs_rq->tg);
+
+ if (delta) {
+ raw_spin_lock(&rq->lock);
+ cfs_rq->quota_assigned += delta;
+
+ /* avoid race with tg_set_cfs_bandwidth */
+ if (cfs_rq_throttled(cfs_rq) &&
+ cfs_rq->quota_used < cfs_rq->quota_assigned)
+ unthrottle_cfs_rq(cfs_rq);
+ raw_spin_unlock(&rq->lock);
+ }
+ }
+
+ return idle;
}
+
#endif
#ifdef CONFIG_SMP
Index: tip/kernel/sched_rt.c
===================================================================
--- tip.orig/kernel/sched_rt.c
+++ tip/kernel/sched_rt.c
@@ -252,18 +252,6 @@ static int rt_se_boosted(struct sched_rt
return p->prio != p->normal_prio;
}
-#ifdef CONFIG_SMP
-static inline const struct cpumask *sched_rt_period_mask(void)
-{
- return cpu_rq(smp_processor_id())->rd->span;
-}
-#else
-static inline const struct cpumask *sched_rt_period_mask(void)
-{
- return cpu_online_mask;
-}
-#endif
-
static inline
struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
{
@@ -321,11 +309,6 @@ static inline int rt_rq_throttled(struct
return rt_rq->rt_throttled;
}
-static inline const struct cpumask *sched_rt_period_mask(void)
-{
- return cpu_online_mask;
-}
-
static inline
struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
{
@@ -543,7 +526,7 @@ static int do_sched_rt_period_timer(stru
if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
return 1;
- span = sched_rt_period_mask();
+ span = sched_bw_period_mask();
for_each_cpu(i, span) {
int enqueue = 0;
struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
next prev parent reply other threads:[~2011-02-16 3:20 UTC|newest]
Thread overview: 71+ messages / expand[flat|nested] mbox.gz Atom feed top
2011-02-16 3:18 [CFS Bandwidth Control v4 0/7] Introduction Paul Turner
2011-02-16 3:18 ` [CFS Bandwidth Control v4 1/7] sched: introduce primitives to account for CFS bandwidth tracking Paul Turner
2011-02-16 16:52 ` Balbir Singh
2011-02-17 2:54 ` Bharata B Rao
2011-02-23 13:32 ` Peter Zijlstra
2011-02-25 3:11 ` Paul Turner
2011-02-25 20:53 ` Paul Turner
2011-02-16 3:18 ` [CFS Bandwidth Control v4 2/7] sched: accumulate per-cfs_rq cpu usage Paul Turner
2011-02-16 17:45 ` Balbir Singh
2011-02-23 13:32 ` Peter Zijlstra
2011-02-25 3:33 ` Paul Turner
2011-02-25 12:31 ` Peter Zijlstra
2011-02-16 3:18 ` [CFS Bandwidth Control v4 3/7] sched: throttle cfs_rq entities which exceed their local quota Paul Turner
2011-02-18 6:52 ` Balbir Singh
2011-02-23 13:32 ` Peter Zijlstra
2011-02-24 5:21 ` Bharata B Rao
2011-02-24 11:05 ` Peter Zijlstra
2011-02-24 15:45 ` Bharata B Rao
2011-02-24 15:52 ` Peter Zijlstra
2011-02-24 16:39 ` Bharata B Rao
2011-02-24 17:20 ` Peter Zijlstra
2011-02-25 3:59 ` Paul Turner
2011-02-25 3:41 ` Paul Turner
2011-02-25 3:10 ` Paul Turner
2011-02-25 13:58 ` Bharata B Rao
2011-02-25 20:51 ` Paul Turner
2011-02-28 3:50 ` Bharata B Rao
2011-02-28 6:38 ` Paul Turner
2011-02-28 13:48 ` Peter Zijlstra
2011-03-01 8:31 ` Paul Turner
2011-03-02 7:23 ` Bharata B Rao
2011-03-02 8:05 ` Paul Turner
2011-02-16 3:18 ` Paul Turner [this message]
2011-02-18 7:19 ` [CFS Bandwidth Control v4 4/7] sched: unthrottle cfs_rq(s) who ran out of quota at period refresh Balbir Singh
2011-02-18 8:10 ` Bharata B Rao
2011-02-23 12:23 ` Peter Zijlstra
2011-02-23 13:32 ` Peter Zijlstra
2011-02-24 7:04 ` Bharata B Rao
2011-02-24 11:14 ` Peter Zijlstra
2011-02-26 0:02 ` Paul Turner
2011-02-16 3:18 ` [CFS Bandwidth Control v4 5/7] sched: add exports tracking cfs bandwidth control statistics Paul Turner
2011-02-22 3:14 ` Balbir Singh
2011-02-22 4:13 ` Bharata B Rao
2011-02-22 4:40 ` Balbir Singh
2011-02-23 8:03 ` Paul Turner
2011-02-23 10:13 ` Balbir Singh
2011-02-23 13:32 ` Peter Zijlstra
2011-02-25 3:26 ` Paul Turner
2011-02-25 8:54 ` Peter Zijlstra
2011-02-16 3:18 ` [CFS Bandwidth Control v4 6/7] sched: hierarchical task accounting for SCHED_OTHER Paul Turner
2011-02-22 3:17 ` Balbir Singh
2011-02-23 8:05 ` Paul Turner
2011-02-23 2:02 ` Hidetoshi Seto
2011-02-23 2:20 ` Paul Turner
2011-02-23 2:43 ` Balbir Singh
2011-02-23 13:32 ` Peter Zijlstra
2011-02-25 3:25 ` Paul Turner
2011-02-25 12:17 ` Peter Zijlstra
2011-02-16 3:18 ` [CFS Bandwidth Control v4 7/7] sched: add documentation for bandwidth control Paul Turner
2011-02-21 2:47 ` [CFS Bandwidth Control v4 0/7] Introduction Xiao Guangrong
2011-02-22 10:28 ` Bharata B Rao
2011-02-23 7:42 ` Paul Turner
2011-02-23 7:51 ` Balbir Singh
2011-02-23 7:56 ` Paul Turner
2011-02-23 8:31 ` Bharata B Rao
[not found] ` <20110224161111.7d83a884@jacob-laptop>
2011-02-25 10:03 ` Paul Turner
2011-02-25 13:06 ` jacob pan
2011-03-08 3:57 ` Balbir Singh
2011-03-08 18:18 ` Jacob Pan
2011-03-09 10:12 ` Paul Turner
2011-03-09 21:57 ` jacob pan
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20110216031841.161743484@google.com \
--to=pjt@google.com \
--cc=a.p.zijlstra@chello.nl \
--cc=avi@redhat.com \
--cc=balbir@linux.vnet.ibm.com \
--cc=bharata@linux.vnet.ibm.com \
--cc=cfriesen@nortel.com \
--cc=dhaval@linux.vnet.ibm.com \
--cc=ego@in.ibm.com \
--cc=herbert@13thfloor.at \
--cc=kamalesh@linux.vnet.ibm.com \
--cc=linux-kernel@vger.kernel.org \
--cc=mingo@elte.hu \
--cc=ncrao@google.com \
--cc=svaidy@linux.vnet.ibm.com \
--cc=vatsa@in.ibm.com \
--cc=xemul@openvz.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox