From: Paul Turner <pjt@google.com>
To: linux-kernel@vger.kernel.org
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>,
Bharata B Rao <bharata@linux.vnet.ibm.com>,
Dhaval Giani <dhaval.giani@gmail.com>,
Balbir Singh <balbir@linux.vnet.ibm.com>,
Vaidyanathan Srinivasan <svaidy@linux.vnet.ibm.com>,
Srivatsa Vaddagiri <vatsa@in.ibm.com>,
Kamalesh Babulal <kamalesh@linux.vnet.ibm.com>,
Ingo Molnar <mingo@elte.hu>, Pavel Emelyanov <xemul@openvz.org>
Subject: [patch 04/15] sched: validate CFS quota hierarchies
Date: Tue, 03 May 2011 02:28:50 -0700 [thread overview]
Message-ID: <20110503092904.806273470@google.com> (raw)
In-Reply-To: 20110503092846.022272244@google.com
[-- Attachment #1: sched-bwc-consistent_quota.patch --]
[-- Type: text/plain, Size: 8005 bytes --]
Add constraints validation for CFS bandwidth hierachies.
Validate that:
sum(child bandwidth) <= parent_bandwidth
In a quota limited hierarchy, an unconstrainted entity
(e.g. bandwidth==RUNTIME_INF) inherits the bandwidth of its parent.
Since bandwidth periods may be non-uniform we normalize to the maximum allowed
period, 1 second.
This behavior may be disabled (allowing child bandwidth to exceed parent) via
kernel.sched_cfs_bandwidth_consistent=0
Signed-off-by: Paul Turner <pjt@google.com>
---
include/linux/sched.h | 8 ++
kernel/sched.c | 137 +++++++++++++++++++++++++++++++++++++++++++++-----
kernel/sched_fair.c | 8 ++
kernel/sysctl.c | 11 ++++
4 files changed, 151 insertions(+), 13 deletions(-)
Index: tip/kernel/sched.c
===================================================================
--- tip.orig/kernel/sched.c
+++ tip/kernel/sched.c
@@ -249,6 +249,7 @@ struct cfs_bandwidth {
raw_spinlock_t lock;
ktime_t period;
u64 quota;
+ s64 hierarchal_quota;
#endif
};
@@ -8789,12 +8790,7 @@ unsigned long sched_group_shares(struct
}
#endif
-#ifdef CONFIG_RT_GROUP_SCHED
-/*
- * Ensure that the real time constraints are schedulable.
- */
-static DEFINE_MUTEX(rt_constraints_mutex);
-
+#if defined(CONFIG_RT_GROUP_SCHED) || defined(CONFIG_CFS_BANDWIDTH)
static unsigned long to_ratio(u64 period, u64 runtime)
{
if (runtime == RUNTIME_INF)
@@ -8802,6 +8798,13 @@ static unsigned long to_ratio(u64 period
return div64_u64(runtime << 20, period);
}
+#endif
+
+#ifdef CONFIG_RT_GROUP_SCHED
+/*
+ * Ensure that the real time constraints are schedulable.
+ */
+static DEFINE_MUTEX(rt_constraints_mutex);
/* Must be called with tasklist_lock held */
static inline int tg_has_rt_tasks(struct task_group *tg)
@@ -8822,7 +8825,7 @@ struct rt_schedulable_data {
u64 rt_runtime;
};
-static int tg_schedulable(struct task_group *tg, void *data)
+static int tg_rt_schedulable(struct task_group *tg, void *data)
{
struct rt_schedulable_data *d = data;
struct task_group *child;
@@ -8886,7 +8889,7 @@ static int __rt_schedulable(struct task_
.rt_runtime = runtime,
};
- return walk_tg_tree(tg_schedulable, tg_nop, &data);
+ return walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
}
static int tg_set_rt_bandwidth(struct task_group *tg,
@@ -9177,14 +9180,17 @@ static u64 cpu_shares_read_u64(struct cg
}
#ifdef CONFIG_CFS_BANDWIDTH
+static DEFINE_MUTEX(cfs_constraints_mutex);
+
const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */
const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */
+static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
+
static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
{
- int i;
+ int i, ret = 0;
struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
- static DEFINE_MUTEX(mutex);
if (tg == &root_task_group)
return -EINVAL;
@@ -9205,7 +9211,13 @@ static int tg_set_cfs_bandwidth(struct t
if (period > max_cfs_quota_period)
return -EINVAL;
- mutex_lock(&mutex);
+ mutex_lock(&cfs_constraints_mutex);
+ if (sysctl_sched_cfs_bandwidth_consistent) {
+ ret = __cfs_schedulable(tg, period, quota);
+ if (ret)
+ goto out_unlock;
+ }
+
raw_spin_lock_irq(&cfs_b->lock);
cfs_b->period = ns_to_ktime(period);
cfs_b->quota = quota;
@@ -9220,9 +9232,10 @@ static int tg_set_cfs_bandwidth(struct t
cfs_rq->runtime_remaining = 0;
raw_spin_unlock_irq(&rq->lock);
}
- mutex_unlock(&mutex);
+out_unlock:
+ mutex_unlock(&cfs_constraints_mutex);
- return 0;
+ return ret;
}
int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
@@ -9296,6 +9309,104 @@ static int cpu_cfs_period_write_u64(stru
return tg_set_cfs_period(cgroup_tg(cgrp), cfs_period_us);
}
+
+struct cfs_schedulable_data {
+ struct task_group *tg;
+ u64 period, quota;
+};
+
+/*
+ * normalize group quota/period to be quota/max_period
+ * note: units are usecs
+ */
+static u64 normalize_cfs_quota(struct task_group *tg,
+ struct cfs_schedulable_data *d)
+{
+ u64 quota, period;
+
+ if (tg == d->tg) {
+ period = d->period;
+ quota = d->quota;
+ } else {
+ period = tg_get_cfs_period(tg);
+ quota = tg_get_cfs_quota(tg);
+ }
+
+ if (quota == RUNTIME_INF)
+ return RUNTIME_INF;
+
+ return to_ratio(period, quota);
+}
+
+static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
+{
+ struct cfs_schedulable_data *d = data;
+ struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
+ s64 quota = 0, parent_quota = -1;
+
+ quota = normalize_cfs_quota(tg, d);
+ if (!tg->parent) {
+ quota = RUNTIME_INF;
+ } else {
+ struct cfs_bandwidth *parent_b = tg_cfs_bandwidth(tg->parent);
+
+ parent_quota = parent_b->hierarchal_quota;
+ if (parent_quota != RUNTIME_INF) {
+ parent_quota -= quota;
+ /* invalid hierarchy, child bandwidth exceeds parent */
+ if (parent_quota < 0)
+ return -EINVAL;
+ }
+
+ /* if no inherent limit then inherit parent quota */
+ if (quota == RUNTIME_INF)
+ quota = parent_quota;
+ parent_b->hierarchal_quota = parent_quota;
+ }
+ cfs_b->hierarchal_quota = quota;
+
+ return 0;
+}
+
+static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
+{
+ struct cfs_schedulable_data data = {
+ .tg = tg,
+ .period = period,
+ .quota = quota,
+ };
+
+ if (!sysctl_sched_cfs_bandwidth_consistent)
+ return 0;
+
+ if (quota != RUNTIME_INF) {
+ do_div(data.period, NSEC_PER_USEC);
+ do_div(data.quota, NSEC_PER_USEC);
+ }
+
+ return walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
+}
+
+int sched_cfs_consistent_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ int ret;
+
+ mutex_lock(&cfs_constraints_mutex);
+ ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+
+ if (!ret && write && sysctl_sched_cfs_bandwidth_consistent) {
+ ret = __cfs_schedulable(NULL, 0, 0);
+
+ /* must be consistent to enable */
+ if (ret)
+ sysctl_sched_cfs_bandwidth_consistent = 0;
+ }
+ mutex_unlock(&cfs_constraints_mutex);
+
+ return ret;
+}
#endif /* CONFIG_CFS_BANDWIDTH */
#endif /* CONFIG_FAIR_GROUP_SCHED */
Index: tip/kernel/sysctl.c
===================================================================
--- tip.orig/kernel/sysctl.c
+++ tip/kernel/sysctl.c
@@ -367,6 +367,17 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = sched_rt_handler,
},
+#ifdef CONFIG_CFS_BANDWIDTH
+ {
+ .procname = "sched_cfs_bandwidth_consistent",
+ .data = &sysctl_sched_cfs_bandwidth_consistent,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = sched_cfs_consistent_handler,
+ .extra1 = &zero,
+ .extra2 = &one,
+ },
+#endif
#ifdef CONFIG_SCHED_AUTOGROUP
{
.procname = "sched_autogroup_enabled",
Index: tip/include/linux/sched.h
===================================================================
--- tip.orig/include/linux/sched.h
+++ tip/include/linux/sched.h
@@ -1950,6 +1950,14 @@ int sched_rt_handler(struct ctl_table *t
void __user *buffer, size_t *lenp,
loff_t *ppos);
+#ifdef CONFIG_CFS_BANDWIDTH
+extern unsigned int sysctl_sched_cfs_bandwidth_consistent;
+
+int sched_cfs_consistent_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos);
+#endif
+
#ifdef CONFIG_SCHED_AUTOGROUP
extern unsigned int sysctl_sched_autogroup_enabled;
Index: tip/kernel/sched_fair.c
===================================================================
--- tip.orig/kernel/sched_fair.c
+++ tip/kernel/sched_fair.c
@@ -88,6 +88,14 @@ const_debug unsigned int sysctl_sched_mi
*/
unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL;
+#ifdef CONFIG_CFS_BANDWIDTH
+/*
+ * Whether a CFS bandwidth hierarchy is required to be consistent, that is:
+ * sum(child_bandwidth) <= parent_bandwidth
+ */
+unsigned int sysctl_sched_cfs_bandwidth_consistent = 1;
+#endif
+
static const struct sched_class fair_sched_class;
/**************************************************************
next prev parent reply other threads:[~2011-05-07 6:32 UTC|newest]
Thread overview: 129+ messages / expand[flat|nested] mbox.gz Atom feed top
2011-05-03 9:28 [patch 00/15] CFS Bandwidth Control V6 Paul Turner
2011-05-03 9:28 ` [patch 01/15] sched: (fixlet) dont update shares twice on on_rq parent Paul Turner
2011-05-10 7:14 ` Hidetoshi Seto
2011-05-10 8:32 ` Mike Galbraith
2011-05-11 7:55 ` Hidetoshi Seto
2011-05-11 8:13 ` Paul Turner
2011-05-11 8:45 ` Mike Galbraith
2011-05-11 8:59 ` Hidetoshi Seto
2011-05-03 9:28 ` [patch 02/15] sched: hierarchical task accounting for SCHED_OTHER Paul Turner
2011-05-10 7:17 ` Hidetoshi Seto
2011-05-03 9:28 ` [patch 03/15] sched: introduce primitives to account for CFS bandwidth tracking Paul Turner
2011-05-10 7:18 ` Hidetoshi Seto
2011-05-03 9:28 ` Paul Turner [this message]
2011-05-10 7:20 ` [patch 04/15] sched: validate CFS quota hierarchies Hidetoshi Seto
2011-05-11 9:37 ` Paul Turner
2011-05-16 9:30 ` Peter Zijlstra
2011-05-16 9:43 ` Peter Zijlstra
2011-05-16 12:32 ` Paul Turner
2011-05-17 15:26 ` Peter Zijlstra
2011-05-18 7:16 ` Paul Turner
2011-05-18 11:57 ` Peter Zijlstra
2011-05-03 9:28 ` [patch 05/15] sched: add a timer to handle CFS bandwidth refresh Paul Turner
2011-05-10 7:21 ` Hidetoshi Seto
2011-05-11 9:27 ` Paul Turner
2011-05-16 10:18 ` Peter Zijlstra
2011-05-16 12:56 ` Paul Turner
2011-05-03 9:28 ` [patch 06/15] sched: accumulate per-cfs_rq cpu usage and charge against bandwidth Paul Turner
2011-05-10 7:22 ` Hidetoshi Seto
2011-05-11 9:25 ` Paul Turner
2011-05-16 10:27 ` Peter Zijlstra
2011-05-16 12:59 ` Paul Turner
2011-05-17 15:28 ` Peter Zijlstra
2011-05-18 7:02 ` Paul Turner
2011-05-16 10:32 ` Peter Zijlstra
2011-05-03 9:28 ` [patch 07/15] sched: expire invalid runtime Paul Turner
2011-05-10 7:22 ` Hidetoshi Seto
2011-05-16 11:05 ` Peter Zijlstra
2011-05-16 11:07 ` Peter Zijlstra
2011-05-03 9:28 ` [patch 08/15] sched: throttle cfs_rq entities which exceed their local runtime Paul Turner
2011-05-10 7:23 ` Hidetoshi Seto
2011-05-16 15:58 ` Peter Zijlstra
2011-05-16 16:05 ` Peter Zijlstra
2011-05-03 9:28 ` [patch 09/15] sched: unthrottle cfs_rq(s) who ran out of quota at period refresh Paul Turner
2011-05-10 7:24 ` Hidetoshi Seto
2011-05-11 9:24 ` Paul Turner
2011-05-03 9:28 ` [patch 10/15] sched: allow for positional tg_tree walks Paul Turner
2011-05-10 7:24 ` Hidetoshi Seto
2011-05-17 13:31 ` Peter Zijlstra
2011-05-18 7:18 ` Paul Turner
2011-05-03 9:28 ` [patch 11/15] sched: prevent interactions between throttled entities and load-balance Paul Turner
2011-05-10 7:26 ` Hidetoshi Seto
2011-05-11 9:11 ` Paul Turner
2011-05-03 9:28 ` [patch 12/15] sched: migrate throttled tasks on HOTPLUG Paul Turner
2011-05-10 7:27 ` Hidetoshi Seto
2011-05-11 9:10 ` Paul Turner
2011-05-03 9:28 ` [patch 13/15] sched: add exports tracking cfs bandwidth control statistics Paul Turner
2011-05-10 7:27 ` Hidetoshi Seto
2011-05-11 7:56 ` Hidetoshi Seto
2011-05-11 9:09 ` Paul Turner
2011-05-03 9:29 ` [patch 14/15] sched: return unused runtime on voluntary sleep Paul Turner
2011-05-10 7:28 ` Hidetoshi Seto
2011-05-03 9:29 ` [patch 15/15] sched: add documentation for bandwidth control Paul Turner
2011-05-10 7:29 ` Hidetoshi Seto
2011-05-11 9:09 ` Paul Turner
2011-06-07 15:45 ` CFS Bandwidth Control - Test results of cgroups tasks pinned vs unpinned Kamalesh Babulal
2011-06-08 3:09 ` Paul Turner
2011-06-08 10:46 ` Vladimir Davydov
2011-06-08 16:32 ` Kamalesh Babulal
2011-06-09 3:25 ` Paul Turner
2011-06-10 18:17 ` Kamalesh Babulal
2011-06-14 0:00 ` Paul Turner
2011-06-15 5:37 ` Kamalesh Babulal
2011-06-21 19:48 ` Paul Turner
2011-06-24 15:05 ` Kamalesh Babulal
2011-09-07 11:00 ` Srivatsa Vaddagiri
2011-09-07 14:54 ` Srivatsa Vaddagiri
2011-09-07 15:20 ` CFS Bandwidth Control - Test results of cgroups tasks pinned vs unpinnede Srivatsa Vaddagiri
2011-09-07 19:22 ` Peter Zijlstra
2011-09-08 15:15 ` Srivatsa Vaddagiri
2011-09-09 12:31 ` Peter Zijlstra
2011-09-09 13:26 ` Srivatsa Vaddagiri
2011-09-12 10:17 ` Srivatsa Vaddagiri
2011-09-12 12:35 ` Peter Zijlstra
2011-09-13 4:15 ` Srivatsa Vaddagiri
2011-09-13 5:03 ` Srivatsa Vaddagiri
2011-09-13 5:05 ` Srivatsa Vaddagiri
2011-09-13 9:39 ` Peter Zijlstra
2011-09-13 11:28 ` Srivatsa Vaddagiri
2011-09-13 14:07 ` Peter Zijlstra
2011-09-13 16:21 ` Srivatsa Vaddagiri
2011-09-13 16:33 ` Peter Zijlstra
2011-09-13 17:41 ` Srivatsa Vaddagiri
2011-09-13 16:36 ` Peter Zijlstra
2011-09-13 17:54 ` Srivatsa Vaddagiri
2011-09-13 18:03 ` Peter Zijlstra
2011-09-13 18:12 ` Srivatsa Vaddagiri
2011-09-13 18:07 ` Peter Zijlstra
2011-09-13 18:19 ` Peter Zijlstra
2011-09-13 18:28 ` Srivatsa Vaddagiri
2011-09-13 18:30 ` Peter Zijlstra
2011-09-13 18:35 ` Srivatsa Vaddagiri
2011-09-15 17:55 ` Kamalesh Babulal
2011-09-15 21:48 ` Peter Zijlstra
2011-09-19 17:51 ` Kamalesh Babulal
2011-09-20 0:38 ` Venki Pallipadi
2011-09-20 11:09 ` Kamalesh Babulal
2011-09-20 13:56 ` Peter Zijlstra
2011-09-20 14:04 ` Peter Zijlstra
2011-09-20 12:55 ` Peter Zijlstra
2011-09-21 17:34 ` Kamalesh Babulal
2011-09-13 14:19 ` Peter Zijlstra
2011-09-13 18:01 ` Srivatsa Vaddagiri
2011-09-13 18:23 ` Peter Zijlstra
2011-09-16 8:14 ` Paul Turner
2011-09-16 8:28 ` Peter Zijlstra
2011-09-19 16:35 ` Srivatsa Vaddagiri
2011-09-16 8:22 ` Paul Turner
2011-06-14 10:16 ` CFS Bandwidth Control - Test results of cgroups tasks pinned vs unpinned Hidetoshi Seto
2011-06-14 6:58 ` [patch 00/15] CFS Bandwidth Control V6 Hu Tao
2011-06-14 7:29 ` Hidetoshi Seto
2011-06-14 7:44 ` Hu Tao
2011-06-15 8:37 ` Hu Tao
2011-06-16 0:57 ` Hidetoshi Seto
2011-06-16 9:45 ` Hu Tao
2011-06-17 1:22 ` Hidetoshi Seto
2011-06-17 6:05 ` Hu Tao
2011-06-17 6:25 ` Paul Turner
2011-06-17 9:13 ` Hidetoshi Seto
2011-06-18 0:28 ` Paul Turner
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20110503092904.806273470@google.com \
--to=pjt@google.com \
--cc=a.p.zijlstra@chello.nl \
--cc=balbir@linux.vnet.ibm.com \
--cc=bharata@linux.vnet.ibm.com \
--cc=dhaval.giani@gmail.com \
--cc=kamalesh@linux.vnet.ibm.com \
--cc=linux-kernel@vger.kernel.org \
--cc=mingo@elte.hu \
--cc=svaidy@linux.vnet.ibm.com \
--cc=vatsa@in.ibm.com \
--cc=xemul@openvz.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).