linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Bharata B Rao <bharata@linux.vnet.ibm.com>
To: linux-kernel@vger.kernel.org
Cc: Dhaval Giani <dhaval.giani@gmail.com>,
	Balbir Singh <balbir@linux.vnet.ibm.com>,
	Vaidyanathan Srinivasan <svaidy@linux.vnet.ibm.com>,
	Srivatsa Vaddagiri <vatsa@in.ibm.com>,
	Kamalesh Babulal <kamalesh@linux.vnet.ibm.com>,
	Ingo Molnar <mingo@elte.hu>,
	Peter Zijlstra <a.p.zijlstra@chello.nl>,
	Pavel Emelyanov <xemul@openvz.org>,
	Herbert Poetzl <herbert@13thfloor.at>,
	Avi Kivity <avi@redhat.com>, Chris Friesen <cfriesen@nortel.com>,
	Paul Menage <menage@google.com>,
	Mike Waychison <mikew@google.com>, Paul Turner <pjt@google.com>,
	Nikhil Rao <ncrao@google.com>
Subject: [PATCH v3 2/7] sched: accumulate per-cfs_rq cpu usage
Date: Tue, 12 Oct 2010 13:21:09 +0530	[thread overview]
Message-ID: <20101012075109.GC9893@in.ibm.com> (raw)
In-Reply-To: <20101012074910.GA9893@in.ibm.com>

sched: accumulate per-cfs_rq cpu usage

From: Paul Turner <pjt@google.com>

Introduce account_cfs_rq_quota() to account bandwidth usage on the cfs_rq
level versus task_groups for which bandwidth has been assigned.  This is
tracked by whether the local cfs_rq->quota_assigned is finite or infinite
(RUNTIME_INF).

For cfs_rq's that belong to a bandwidth constrained task_group we introduce
tg_request_cfs_quota() which attempts to allocate quota from the global pool
for use locally.  Updates involving the global pool are currently protected
under cfs_bandwidth->lock, local pools are protected by rq->lock.

This patch only attempts to assign and track quota, no action is taken in the
case that cfs_rq->quota_used exceeds cfs_rq->quota_assigned.

Signed-off-by: Paul Turner <pjt@google.com>
Signed-off-by: Nikhil Rao <ncrao@google.com>
Signed-off-by: Bharata B Rao <bharata@linux.vnet.ibm.com>
---
 include/linux/sched.h |    4 ++++
 kernel/sched.c        |   13 +++++++++++++
 kernel/sched_fair.c   |   50 ++++++++++++++++++++++++++++++++++++++++++++++++++
 kernel/sysctl.c       |   10 ++++++++++
 4 files changed, 77 insertions(+)

--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1898,6 +1898,10 @@ int sched_rt_handler(struct ctl_table *t
 		void __user *buffer, size_t *lenp,
 		loff_t *ppos);
 
+#ifdef CONFIG_CFS_BANDWIDTH
+extern unsigned int sysctl_sched_cfs_bandwidth_slice;
+#endif
+
 extern unsigned int sysctl_sched_compat_yield;
 
 #ifdef CONFIG_RT_MUTEXES
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1929,6 +1929,19 @@ static const struct sched_class rt_sched
  * default: 0.5s
  */
 static u64 sched_cfs_bandwidth_period = 500000000ULL;
+
+/*
+ * default slice of quota to allocate from global tg to local cfs_rq pool on
+ * each refresh
+ * default: 10ms
+ */
+unsigned int sysctl_sched_cfs_bandwidth_slice = 10000UL;
+
+static inline u64 sched_cfs_bandwidth_slice(void)
+{
+	return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC;
+}
+
 #endif
 
 #define sched_class_highest (&rt_sched_class)
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -267,6 +267,16 @@ find_matching_se(struct sched_entity **s
 
 #endif	/* CONFIG_FAIR_GROUP_SCHED */
 
+#ifdef CONFIG_CFS_BANDWIDTH
+static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
+{
+	return &tg->cfs_bandwidth;
+}
+
+static void account_cfs_rq_quota(struct cfs_rq *cfs_rq,
+		unsigned long delta_exec);
+#endif
+
 
 /**************************************************************
  * Scheduling class tree data structure manipulation methods:
@@ -547,6 +557,9 @@ static void update_curr(struct cfs_rq *c
 		cpuacct_charge(curtask, delta_exec);
 		account_group_exec_runtime(curtask, delta_exec);
 	}
+#ifdef CONFIG_CFS_BANDWIDTH
+	account_cfs_rq_quota(cfs_rq, delta_exec);
+#endif
 }
 
 static inline void
@@ -1130,6 +1143,43 @@ static void yield_task_fair(struct rq *r
 }
 
 #ifdef CONFIG_CFS_BANDWIDTH
+static u64 tg_request_cfs_quota(struct task_group *tg)
+{
+	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
+	u64 delta = 0;
+
+	if (cfs_b->runtime > 0 || cfs_b->quota == RUNTIME_INF) {
+		raw_spin_lock(&cfs_b->lock);
+		/*
+		 * it's possible a bandwidth update has changed the global
+		 * pool.
+		 */
+		if (cfs_b->quota == RUNTIME_INF)
+			delta = sched_cfs_bandwidth_slice();
+		else {
+			delta = min(cfs_b->runtime,
+					sched_cfs_bandwidth_slice());
+			cfs_b->runtime -= delta;
+		}
+		raw_spin_unlock(&cfs_b->lock);
+	}
+	return delta;
+}
+
+static void account_cfs_rq_quota(struct cfs_rq *cfs_rq,
+		unsigned long delta_exec)
+{
+	if (cfs_rq->quota_assigned == RUNTIME_INF)
+		return;
+
+	cfs_rq->quota_used += delta_exec;
+
+	if (cfs_rq->quota_used < cfs_rq->quota_assigned)
+		return;
+
+	cfs_rq->quota_assigned += tg_request_cfs_quota(cfs_rq->tg);
+}
+
 static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
 {
 	return 1;
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -384,6 +384,16 @@ static struct ctl_table kern_table[] = {
 		.mode		= 0644,
 		.proc_handler	= proc_dointvec,
 	},
+#ifdef CONFIG_CFS_BANDWIDTH
+	{
+		.procname	= "sched_cfs_bandwidth_slice_us",
+		.data		= &sysctl_sched_cfs_bandwidth_slice,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= &one,
+	},
+#endif
 #ifdef CONFIG_PROVE_LOCKING
 	{
 		.procname	= "prove_locking",

  parent reply	other threads:[~2010-10-12  7:51 UTC|newest]

Thread overview: 51+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2010-10-12  7:49 [PATCH v3 0/7] CFS Bandwidth Control Bharata B Rao
2010-10-12  7:50 ` [PATCH v3 1/7] sched: introduce primitives to account for CFS bandwidth tracking Bharata B Rao
2010-10-13 13:00   ` Balbir Singh
2010-10-14  5:14     ` Bharata B Rao
2010-10-14  7:52     ` Peter Zijlstra
2010-10-14 12:38       ` Balbir Singh
2010-10-14 13:24         ` Peter Zijlstra
2010-12-06  9:02         ` Bharata B Rao
2010-10-12  7:51 ` Bharata B Rao [this message]
2010-10-13 13:30   ` [PATCH v3 2/7] sched: accumulate per-cfs_rq cpu usage Balbir Singh
2010-10-13 13:46     ` Nikhil Rao
2010-10-13 13:59       ` Balbir Singh
2010-10-13 14:41         ` Nikhil Rao
2010-10-14  5:39           ` Balbir Singh
2010-10-14  8:57   ` Peter Zijlstra
2010-10-14  9:07     ` Paul Turner
2010-10-14  9:13       ` Peter Zijlstra
2010-10-14  9:01   ` Peter Zijlstra
2010-10-14  9:14     ` Paul Turner
2010-10-14  9:27       ` Peter Zijlstra
2010-10-14  9:53         ` Paul Turner
2010-10-14  9:19   ` Peter Zijlstra
2010-10-14  9:27     ` Paul Turner
2010-10-14  9:40       ` Bharata B Rao
2010-10-12  7:52 ` [PATCH v3 3/7] sched: throttle cfs_rq entities which exceed their local quota Bharata B Rao
2010-10-13  6:34   ` KAMEZAWA Hiroyuki
2010-10-13  6:44     ` Paul Turner
2010-10-13  6:47       ` Bharata B Rao
2010-10-13  6:52         ` Paul Turner
2010-10-13  7:00       ` KAMEZAWA Hiroyuki
2010-10-13  7:13         ` Paul Turner
2010-10-14  9:12     ` Peter Zijlstra
2010-10-14  9:50       ` KAMEZAWA Hiroyuki
2010-10-14  9:59         ` Peter Zijlstra
2010-10-14 10:08           ` KAMEZAWA Hiroyuki
2010-10-14 10:25             ` Paul Turner
2010-10-14 10:41               ` Peter Zijlstra
2010-10-14 23:30                 ` KAMEZAWA Hiroyuki
2010-10-14 10:37             ` Peter Zijlstra
2010-10-14  9:58       ` Paul Turner
2010-10-12  7:52 ` [PATCH v3 4/7] sched: unthrottle cfs_rq(s) who ran out of quota at period refresh Bharata B Rao
2010-10-15  4:45   ` Balbir Singh
2010-12-07 13:13     ` Bharata B Rao
2010-10-12  7:53 ` [PATCH v3 5/7] sched: add exports tracking cfs bandwidth control statistics Bharata B Rao
2010-10-12  7:54 ` [PATCH v3 6/7] sched: hierarchical task accounting for FAIR_GROUP_SCHED Bharata B Rao
2010-10-12  7:55 ` [PATCH v3 7/7] sched: Return/expire slack quota using generation counters Bharata B Rao
2010-10-13  5:14 ` [PATCH v3 0/7] CFS Bandwidth Control KAMEZAWA Hiroyuki
2010-10-13  5:44 ` Herbert Poetzl
2010-10-13  6:26   ` Paul Turner
2010-11-17  8:32 ` Lai Jiangshan
2010-11-19  3:24   ` Bharata B Rao

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20101012075109.GC9893@in.ibm.com \
    --to=bharata@linux.vnet.ibm.com \
    --cc=a.p.zijlstra@chello.nl \
    --cc=avi@redhat.com \
    --cc=balbir@linux.vnet.ibm.com \
    --cc=cfriesen@nortel.com \
    --cc=dhaval.giani@gmail.com \
    --cc=herbert@13thfloor.at \
    --cc=kamalesh@linux.vnet.ibm.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=menage@google.com \
    --cc=mikew@google.com \
    --cc=mingo@elte.hu \
    --cc=ncrao@google.com \
    --cc=pjt@google.com \
    --cc=svaidy@linux.vnet.ibm.com \
    --cc=vatsa@in.ibm.com \
    --cc=xemul@openvz.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).