From: Paul Turner <pjt@google.com>
To: linux-kernel@vger.kernel.org
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>,
Bharata B Rao <bharata@linux.vnet.ibm.com>,
Dhaval Giani <dhaval.giani@gmail.com>,
Balbir Singh <balbir@linux.vnet.ibm.com>,
Vaidyanathan Srinivasan <svaidy@linux.vnet.ibm.com>,
Srivatsa Vaddagiri <vatsa@in.ibm.com>,
Kamalesh Babulal <kamalesh@linux.vnet.ibm.com>,
Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>,
Ingo Molnar <mingo@elte.hu>, Pavel Emelyanov <xemul@openvz.org>,
Nikhil Rao <ncrao@google.com>
Subject: [patch 09/16] sched: unthrottle cfs_rq(s) who ran out of quota at period refresh
Date: Tue, 21 Jun 2011 00:16:58 -0700 [thread overview]
Message-ID: <20110621071700.599897751@google.com> (raw)
In-Reply-To: 20110621071649.862846205@google.com
[-- Attachment #1: sched-bwc-unthrottle_entities.patch --]
[-- Type: text/plain, Size: 4868 bytes --]
At the start of a new period we must refresh the global bandwidth pool as well
as unthrottle any cfs_rq entities who previously ran out of bandwidth (as quota
permits).
Unthrottled entities have the cfs_rq->throttled flag cleared and are re-enqueued
into the entity hierarchy.
Signed-off-by: Paul Turner <pjt@google.com>
Signed-off-by: Nikhil Rao <ncrao@google.com>
Signed-off-by: Bharata B Rao <bharata@linux.vnet.ibm.com>
Reviewed-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
---
kernel/sched.c | 3 +
kernel/sched_fair.c | 125 +++++++++++++++++++++++++++++++++++++++++++++++++---
2 files changed, 121 insertions(+), 7 deletions(-)
Index: tip/kernel/sched.c
===================================================================
--- tip.orig/kernel/sched.c
+++ tip/kernel/sched.c
@@ -9002,6 +9002,9 @@ static int tg_set_cfs_bandwidth(struct t
raw_spin_lock_irq(&rq->lock);
cfs_rq->runtime_enabled = quota != RUNTIME_INF;
cfs_rq->runtime_remaining = 0;
+
+ if (cfs_rq_throttled(cfs_rq))
+ unthrottle_cfs_rq(cfs_rq);
raw_spin_unlock_irq(&rq->lock);
}
out_unlock:
Index: tip/kernel/sched_fair.c
===================================================================
--- tip.orig/kernel/sched_fair.c
+++ tip/kernel/sched_fair.c
@@ -1448,26 +1448,137 @@ static void throttle_cfs_rq(struct cfs_r
raw_spin_unlock(&cfs_b->lock);
}
+static void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
+{
+ struct rq *rq = rq_of(cfs_rq);
+ struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
+ struct sched_entity *se;
+ int enqueue = 1;
+ long task_delta;
+
+ se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
+
+ cfs_rq->throttled = 0;
+ raw_spin_lock(&cfs_b->lock);
+ list_del_rcu(&cfs_rq->throttled_list);
+ raw_spin_unlock(&cfs_b->lock);
+
+ if (!cfs_rq->load.weight)
+ return;
+
+ task_delta = cfs_rq->h_nr_running;
+ for_each_sched_entity(se) {
+ if (se->on_rq)
+ enqueue = 0;
+
+ cfs_rq = cfs_rq_of(se);
+ if (enqueue)
+ enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
+ cfs_rq->h_nr_running += task_delta;
+
+ if (cfs_rq_throttled(cfs_rq))
+ break;
+ }
+
+ if (!se)
+ rq->nr_running += task_delta;
+
+ /* determine whether we need to wake up potentially idle cpu */
+ if (rq->curr == rq->idle && rq->cfs.nr_running)
+ resched_task(rq->curr);
+}
+
+static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
+ u64 remaining, u64 expires)
+{
+ struct cfs_rq *cfs_rq;
+ u64 runtime = remaining;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
+ throttled_list) {
+ struct rq *rq = rq_of(cfs_rq);
+
+ raw_spin_lock(&rq->lock);
+ if (!cfs_rq_throttled(cfs_rq))
+ goto next;
+
+ runtime = -cfs_rq->runtime_remaining + 1;
+ if (runtime > remaining)
+ runtime = remaining;
+ remaining -= runtime;
+
+ cfs_rq->runtime_remaining += runtime;
+ cfs_rq->runtime_expires = expires;
+
+ /* we check whether we're throttled above */
+ if (cfs_rq->runtime_remaining > 0)
+ unthrottle_cfs_rq(cfs_rq);
+
+next:
+ raw_spin_unlock(&rq->lock);
+
+ if (!remaining)
+ break;
+ }
+ rcu_read_unlock();
+
+ return remaining;
+}
+
static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
{
- int idle = 1;
+ int idle = 1, throttled = 0;
+ u64 runtime, runtime_expires;
+
raw_spin_lock(&cfs_b->lock);
if (cfs_b->quota != RUNTIME_INF) {
- idle = cfs_b->idle;
- /* If we're going idle then defer handle the refill */
+ /* idle depends on !throttled in the case of a large deficit */
+ throttled = !list_empty(&cfs_b->throttled_cfs_rq);
+ idle = cfs_b->idle && !throttled;
+
+ /* If we're going idle then defer the refill */
if (!idle)
__refill_cfs_bandwidth_runtime(cfs_b);
+ if (throttled) {
+ runtime = cfs_b->runtime;
+ runtime_expires = cfs_b->runtime_expires;
+
+ /* we must first distribute to throttled entities */
+ cfs_b->runtime = 0;
+ }
/*
- * mark this bandwidth pool as idle so that we may deactivate
- * the timer at the next expiration if there is no usage.
+ * conditionally mark this bandwidth pool as idle so that we may
+ * deactivate the timer at the next expiration if there is no
+ * usage.
*/
- cfs_b->idle = 1;
+ cfs_b->idle = !throttled;
}
- if (idle)
+ if (idle) {
cfs_b->timer_active = 0;
+ goto out_unlock;
+ }
+ raw_spin_unlock(&cfs_b->lock);
+
+retry:
+ runtime = distribute_cfs_runtime(cfs_b, runtime, runtime_expires);
+
+ raw_spin_lock(&cfs_b->lock);
+ /* new bandwidth specification may exist */
+ if (unlikely(runtime_expires != cfs_b->runtime_expires))
+ goto out_unlock;
+ /* ensure no-one was throttled while we unthrottling */
+ if (unlikely(!list_empty(&cfs_b->throttled_cfs_rq)) && runtime > 0) {
+ raw_spin_unlock(&cfs_b->lock);
+ goto retry;
+ }
+
+ /* return remaining runtime */
+ cfs_b->runtime = runtime;
+out_unlock:
raw_spin_unlock(&cfs_b->lock);
return idle;
next prev parent reply other threads:[~2011-06-21 7:21 UTC|newest]
Thread overview: 59+ messages / expand[flat|nested] mbox.gz Atom feed top
2011-06-21 7:16 [patch 00/16] CFS Bandwidth Control v7 Paul Turner
2011-06-21 7:16 ` [patch 01/16] sched: (fixlet) dont update shares twice on on_rq parent Paul Turner
2011-06-21 7:16 ` [patch 02/16] sched: hierarchical task accounting for SCHED_OTHER Paul Turner
2011-06-21 7:16 ` [patch 03/16] sched: introduce primitives to account for CFS bandwidth tracking Paul Turner
2011-06-22 10:52 ` Peter Zijlstra
2011-07-06 21:38 ` Paul Turner
2011-07-07 11:32 ` Peter Zijlstra
2011-06-21 7:16 ` [patch 04/16] sched: validate CFS quota hierarchies Paul Turner
2011-06-22 5:43 ` Bharata B Rao
2011-06-22 6:57 ` Paul Turner
2011-06-22 9:38 ` Hidetoshi Seto
2011-06-21 7:16 ` [patch 05/16] sched: accumulate per-cfs_rq cpu usage and charge against bandwidth Paul Turner
2011-06-21 7:16 ` [patch 06/16] sched: add a timer to handle CFS bandwidth refresh Paul Turner
2011-06-22 9:38 ` Hidetoshi Seto
2011-06-21 7:16 ` [patch 07/16] sched: expire invalid runtime Paul Turner
2011-06-22 9:38 ` Hidetoshi Seto
2011-06-22 15:47 ` Peter Zijlstra
2011-06-28 4:42 ` Paul Turner
2011-06-29 2:29 ` Paul Turner
2011-06-21 7:16 ` [patch 08/16] sched: throttle cfs_rq entities which exceed their local runtime Paul Turner
2011-06-22 7:11 ` Bharata B Rao
2011-06-22 16:07 ` Peter Zijlstra
2011-06-22 16:54 ` Paul Turner
2011-06-21 7:16 ` Paul Turner [this message]
2011-06-22 17:29 ` [patch 09/16] sched: unthrottle cfs_rq(s) who ran out of quota at period refresh Peter Zijlstra
2011-06-28 4:40 ` Paul Turner
2011-06-28 9:11 ` Peter Zijlstra
2011-06-29 3:37 ` Paul Turner
2011-06-21 7:16 ` [patch 10/16] sched: throttle entities exceeding their allowed bandwidth Paul Turner
2011-06-22 9:39 ` Hidetoshi Seto
2011-06-21 7:17 ` [patch 11/16] sched: allow for positional tg_tree walks Paul Turner
2011-06-21 7:17 ` [patch 12/16] sched: prevent interactions with throttled entities Paul Turner
2011-06-22 21:34 ` Peter Zijlstra
2011-06-28 4:43 ` Paul Turner
2011-06-23 11:49 ` Peter Zijlstra
2011-06-28 4:38 ` Paul Turner
2011-06-21 7:17 ` [patch 13/16] sched: migrate throttled tasks on HOTPLUG Paul Turner
2011-06-21 7:17 ` [patch 14/16] sched: add exports tracking cfs bandwidth control statistics Paul Turner
2011-06-21 7:17 ` [patch 15/16] sched: return unused runtime on voluntary sleep Paul Turner
2011-06-21 7:33 ` Paul Turner
2011-06-22 9:39 ` Hidetoshi Seto
2011-06-23 15:26 ` Peter Zijlstra
2011-06-28 1:42 ` Paul Turner
2011-06-28 10:01 ` Peter Zijlstra
2011-06-28 18:45 ` Paul Turner
2011-06-21 7:17 ` [patch 16/16] sched: add documentation for bandwidth control Paul Turner
2011-06-21 10:30 ` Hidetoshi Seto
2011-06-21 19:46 ` Paul Turner
2011-06-22 10:05 ` [patch 00/16] CFS Bandwidth Control v7 Hidetoshi Seto
2011-06-23 12:06 ` Peter Zijlstra
2011-06-23 12:43 ` Ingo Molnar
2011-06-24 5:11 ` Hidetoshi Seto
2011-06-26 10:35 ` Ingo Molnar
2011-06-29 4:05 ` Hu Tao
2011-07-01 12:28 ` Ingo Molnar
2011-07-05 3:58 ` Hu Tao
2011-07-05 8:50 ` Ingo Molnar
2011-07-05 8:52 ` Ingo Molnar
2011-07-07 3:53 ` Hu Tao
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20110621071700.599897751@google.com \
--to=pjt@google.com \
--cc=a.p.zijlstra@chello.nl \
--cc=balbir@linux.vnet.ibm.com \
--cc=bharata@linux.vnet.ibm.com \
--cc=dhaval.giani@gmail.com \
--cc=kamalesh@linux.vnet.ibm.com \
--cc=linux-kernel@vger.kernel.org \
--cc=mingo@elte.hu \
--cc=ncrao@google.com \
--cc=seto.hidetoshi@jp.fujitsu.com \
--cc=svaidy@linux.vnet.ibm.com \
--cc=vatsa@in.ibm.com \
--cc=xemul@openvz.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox