From: Paul Turner <pjt@google.com>
To: linux-kernel@vger.kernel.org
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>,
Bharata B Rao <bharata@linux.vnet.ibm.com>,
Dhaval Giani <dhaval.giani@gmail.com>,
Balbir Singh <balbir@linux.vnet.ibm.com>,
Vaidyanathan Srinivasan <svaidy@linux.vnet.ibm.com>,
Srivatsa Vaddagiri <vatsa@in.ibm.com>,
Kamalesh Babulal <kamalesh@linux.vnet.ibm.com>,
Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>,
Ingo Molnar <mingo@elte.hu>, Pavel Emelyanov <xemul@openvz.org>
Subject: [patch 04/16] sched: validate CFS quota hierarchies
Date: Tue, 21 Jun 2011 00:16:53 -0700 [thread overview]
Message-ID: <20110621071700.092877579@google.com> (raw)
In-Reply-To: 20110621071649.862846205@google.com
[-- Attachment #1: sched-bwc-consistent_quota.patch --]
[-- Type: text/plain, Size: 5310 bytes --]
Add constraints validation for CFS bandwidth hierarchies.
Validate that:
max(child bandwidth) <= parent_bandwidth
In a quota limited hierarchy, an unconstrained entity
(e.g. bandwidth==RUNTIME_INF) inherits the bandwidth of its parent.
This constraint is chosen over sum(child_bandwidth) as notion of over-commit is
valuable within SCHED_OTHER. Some basic code from the RT case is re-factored
for reuse.
Signed-off-by: Paul Turner <pjt@google.com>
---
kernel/sched.c | 109 ++++++++++++++++++++++++++++++++++++++++++++++++++-------
1 file changed, 96 insertions(+), 13 deletions(-)
Index: tip/kernel/sched.c
===================================================================
--- tip.orig/kernel/sched.c
+++ tip/kernel/sched.c
@@ -249,6 +249,7 @@ struct cfs_bandwidth {
raw_spinlock_t lock;
ktime_t period;
u64 quota;
+ s64 hierarchal_quota;
#endif
};
@@ -8534,12 +8535,7 @@ unsigned long sched_group_shares(struct
}
#endif
-#ifdef CONFIG_RT_GROUP_SCHED
-/*
- * Ensure that the real time constraints are schedulable.
- */
-static DEFINE_MUTEX(rt_constraints_mutex);
-
+#if defined(CONFIG_RT_GROUP_SCHED) || defined(CONFIG_CFS_BANDWIDTH)
static unsigned long to_ratio(u64 period, u64 runtime)
{
if (runtime == RUNTIME_INF)
@@ -8547,6 +8543,13 @@ static unsigned long to_ratio(u64 period
return div64_u64(runtime << 20, period);
}
+#endif
+
+#ifdef CONFIG_RT_GROUP_SCHED
+/*
+ * Ensure that the real time constraints are schedulable.
+ */
+static DEFINE_MUTEX(rt_constraints_mutex);
/* Must be called with tasklist_lock held */
static inline int tg_has_rt_tasks(struct task_group *tg)
@@ -8567,7 +8570,7 @@ struct rt_schedulable_data {
u64 rt_runtime;
};
-static int tg_schedulable(struct task_group *tg, void *data)
+static int tg_rt_schedulable(struct task_group *tg, void *data)
{
struct rt_schedulable_data *d = data;
struct task_group *child;
@@ -8631,7 +8634,7 @@ static int __rt_schedulable(struct task_
.rt_runtime = runtime,
};
- return walk_tg_tree(tg_schedulable, tg_nop, &data);
+ return walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
}
static int tg_set_rt_bandwidth(struct task_group *tg,
@@ -8890,14 +8893,17 @@ static u64 cpu_shares_read_u64(struct cg
}
#ifdef CONFIG_CFS_BANDWIDTH
+static DEFINE_MUTEX(cfs_constraints_mutex);
+
const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */
const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */
+static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
+
static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
{
- int i;
+ int i, ret = 0;
struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
- static DEFINE_MUTEX(mutex);
if (tg == &root_task_group)
return -EINVAL;
@@ -8918,7 +8924,11 @@ static int tg_set_cfs_bandwidth(struct t
if (period > max_cfs_quota_period)
return -EINVAL;
- mutex_lock(&mutex);
+ mutex_lock(&cfs_constraints_mutex);
+ ret = __cfs_schedulable(tg, period, quota);
+ if (ret)
+ goto out_unlock;
+
raw_spin_lock_irq(&cfs_b->lock);
cfs_b->period = ns_to_ktime(period);
cfs_b->quota = quota;
@@ -8933,9 +8943,10 @@ static int tg_set_cfs_bandwidth(struct t
cfs_rq->runtime_remaining = 0;
raw_spin_unlock_irq(&rq->lock);
}
- mutex_unlock(&mutex);
+out_unlock:
+ mutex_unlock(&cfs_constraints_mutex);
- return 0;
+ return ret;
}
int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
@@ -9009,6 +9020,78 @@ static int cpu_cfs_period_write_u64(stru
return tg_set_cfs_period(cgroup_tg(cgrp), cfs_period_us);
}
+struct cfs_schedulable_data {
+ struct task_group *tg;
+ u64 period, quota;
+};
+
+/*
+ * normalize group quota/period to be quota/max_period
+ * note: units are usecs
+ */
+static u64 normalize_cfs_quota(struct task_group *tg,
+ struct cfs_schedulable_data *d)
+{
+ u64 quota, period;
+
+ if (tg == d->tg) {
+ period = d->period;
+ quota = d->quota;
+ } else {
+ period = tg_get_cfs_period(tg);
+ quota = tg_get_cfs_quota(tg);
+ }
+
+ /* note: these should typically be equivalent */
+ if (quota == RUNTIME_INF || quota == -1)
+ return RUNTIME_INF;
+
+ return to_ratio(period, quota);
+}
+
+static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
+{
+ struct cfs_schedulable_data *d = data;
+ struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
+ s64 quota = 0, parent_quota = -1;
+
+ if (!tg->parent) {
+ quota = RUNTIME_INF;
+ } else {
+ struct cfs_bandwidth *parent_b = tg_cfs_bandwidth(tg->parent);
+
+ quota = normalize_cfs_quota(tg, d);
+ parent_quota = parent_b->hierarchal_quota;
+
+ /*
+ * ensure max(child_quota) <= parent_quota, inherit when no
+ * limit is set
+ */
+ if (quota == RUNTIME_INF)
+ quota = parent_quota;
+ else if (parent_quota != RUNTIME_INF && quota > parent_quota)
+ return -EINVAL;
+ }
+ cfs_b->hierarchal_quota = quota;
+
+ return 0;
+}
+
+static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
+{
+ struct cfs_schedulable_data data = {
+ .tg = tg,
+ .period = period,
+ .quota = quota,
+ };
+
+ if (quota != RUNTIME_INF) {
+ do_div(data.period, NSEC_PER_USEC);
+ do_div(data.quota, NSEC_PER_USEC);
+ }
+
+ return walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
+}
#endif /* CONFIG_CFS_BANDWIDTH */
#endif /* CONFIG_FAIR_GROUP_SCHED */
next prev parent reply other threads:[~2011-06-21 7:24 UTC|newest]
Thread overview: 59+ messages / expand[flat|nested] mbox.gz Atom feed top
2011-06-21 7:16 [patch 00/16] CFS Bandwidth Control v7 Paul Turner
2011-06-21 7:16 ` [patch 01/16] sched: (fixlet) dont update shares twice on on_rq parent Paul Turner
2011-06-21 7:16 ` [patch 02/16] sched: hierarchical task accounting for SCHED_OTHER Paul Turner
2011-06-21 7:16 ` [patch 03/16] sched: introduce primitives to account for CFS bandwidth tracking Paul Turner
2011-06-22 10:52 ` Peter Zijlstra
2011-07-06 21:38 ` Paul Turner
2011-07-07 11:32 ` Peter Zijlstra
2011-06-21 7:16 ` Paul Turner [this message]
2011-06-22 5:43 ` [patch 04/16] sched: validate CFS quota hierarchies Bharata B Rao
2011-06-22 6:57 ` Paul Turner
2011-06-22 9:38 ` Hidetoshi Seto
2011-06-21 7:16 ` [patch 05/16] sched: accumulate per-cfs_rq cpu usage and charge against bandwidth Paul Turner
2011-06-21 7:16 ` [patch 06/16] sched: add a timer to handle CFS bandwidth refresh Paul Turner
2011-06-22 9:38 ` Hidetoshi Seto
2011-06-21 7:16 ` [patch 07/16] sched: expire invalid runtime Paul Turner
2011-06-22 9:38 ` Hidetoshi Seto
2011-06-22 15:47 ` Peter Zijlstra
2011-06-28 4:42 ` Paul Turner
2011-06-29 2:29 ` Paul Turner
2011-06-21 7:16 ` [patch 08/16] sched: throttle cfs_rq entities which exceed their local runtime Paul Turner
2011-06-22 7:11 ` Bharata B Rao
2011-06-22 16:07 ` Peter Zijlstra
2011-06-22 16:54 ` Paul Turner
2011-06-21 7:16 ` [patch 09/16] sched: unthrottle cfs_rq(s) who ran out of quota at period refresh Paul Turner
2011-06-22 17:29 ` Peter Zijlstra
2011-06-28 4:40 ` Paul Turner
2011-06-28 9:11 ` Peter Zijlstra
2011-06-29 3:37 ` Paul Turner
2011-06-21 7:16 ` [patch 10/16] sched: throttle entities exceeding their allowed bandwidth Paul Turner
2011-06-22 9:39 ` Hidetoshi Seto
2011-06-21 7:17 ` [patch 11/16] sched: allow for positional tg_tree walks Paul Turner
2011-06-21 7:17 ` [patch 12/16] sched: prevent interactions with throttled entities Paul Turner
2011-06-22 21:34 ` Peter Zijlstra
2011-06-28 4:43 ` Paul Turner
2011-06-23 11:49 ` Peter Zijlstra
2011-06-28 4:38 ` Paul Turner
2011-06-21 7:17 ` [patch 13/16] sched: migrate throttled tasks on HOTPLUG Paul Turner
2011-06-21 7:17 ` [patch 14/16] sched: add exports tracking cfs bandwidth control statistics Paul Turner
2011-06-21 7:17 ` [patch 15/16] sched: return unused runtime on voluntary sleep Paul Turner
2011-06-21 7:33 ` Paul Turner
2011-06-22 9:39 ` Hidetoshi Seto
2011-06-23 15:26 ` Peter Zijlstra
2011-06-28 1:42 ` Paul Turner
2011-06-28 10:01 ` Peter Zijlstra
2011-06-28 18:45 ` Paul Turner
2011-06-21 7:17 ` [patch 16/16] sched: add documentation for bandwidth control Paul Turner
2011-06-21 10:30 ` Hidetoshi Seto
2011-06-21 19:46 ` Paul Turner
2011-06-22 10:05 ` [patch 00/16] CFS Bandwidth Control v7 Hidetoshi Seto
2011-06-23 12:06 ` Peter Zijlstra
2011-06-23 12:43 ` Ingo Molnar
2011-06-24 5:11 ` Hidetoshi Seto
2011-06-26 10:35 ` Ingo Molnar
2011-06-29 4:05 ` Hu Tao
2011-07-01 12:28 ` Ingo Molnar
2011-07-05 3:58 ` Hu Tao
2011-07-05 8:50 ` Ingo Molnar
2011-07-05 8:52 ` Ingo Molnar
2011-07-07 3:53 ` Hu Tao
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20110621071700.092877579@google.com \
--to=pjt@google.com \
--cc=a.p.zijlstra@chello.nl \
--cc=balbir@linux.vnet.ibm.com \
--cc=bharata@linux.vnet.ibm.com \
--cc=dhaval.giani@gmail.com \
--cc=kamalesh@linux.vnet.ibm.com \
--cc=linux-kernel@vger.kernel.org \
--cc=mingo@elte.hu \
--cc=seto.hidetoshi@jp.fujitsu.com \
--cc=svaidy@linux.vnet.ibm.com \
--cc=vatsa@in.ibm.com \
--cc=xemul@openvz.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox