From: Tianyang Chen <tiche@seas.upenn.edu>
To: xen-devel@lists.xen.org
Cc: ian.campbell@citrix.com, dario.faggioli@citrix.com,
Tianyang Chen <tiche@seas.upenn.edu>,
ian.jackson@eu.citrix.com, george.dunlap@citrix.com,
Meng Xu <mengxu@cis.upenn.edu>,
jbeulich@suse.com, Dagaen Golomb <dgolomb@seas.upenn.edu>
Subject: [PATCH V2 1/1] Improved RTDS scheduler
Date: Thu, 31 Dec 2015 05:20:19 -0500 [thread overview]
Message-ID: <1451557219-8642-2-git-send-email-tiche@seas.upenn.edu> (raw)
In-Reply-To: <1451557219-8642-1-git-send-email-tiche@seas.upenn.edu>
Budget replenishment is now handled by a dedicated timer which is
triggered at the most imminent release time of all runnable vcpus.
Changes since V1:
None
Signed-off-by: Tianyang Chen <tiche@seas.upenn.edu>
Signed-off-by: Meng Xu <mengxu@cis.upenn.edu>
Signed-off-by: Dagaen Golomb <dgolomb@seas.upenn.edu>
---
xen/common/sched_rt.c | 159 +++++++++++++++++++++++++++++--------------------
1 file changed, 95 insertions(+), 64 deletions(-)
diff --git a/xen/common/sched_rt.c b/xen/common/sched_rt.c
index 4372486..d522272 100644
--- a/xen/common/sched_rt.c
+++ b/xen/common/sched_rt.c
@@ -16,6 +16,7 @@
#include <xen/delay.h>
#include <xen/event.h>
#include <xen/time.h>
+#include <xen/timer.h>
#include <xen/perfc.h>
#include <xen/sched-if.h>
#include <xen/softirq.h>
@@ -147,6 +148,16 @@ static unsigned int nr_rt_ops;
* Global lock is referenced by schedule_data.schedule_lock from all
* physical cpus. It can be grabbed via vcpu_schedule_lock_irq()
*/
+
+/* dedicated timer for replenishment */
+static struct timer repl_timer;
+
+/* controls when to first start the timer*/
+static int timer_started;
+
+/* handler for the replenishment timer */
+static void repl_handler(void *data);
+
struct rt_private {
spinlock_t lock; /* the global coarse grand lock */
struct list_head sdom; /* list of availalbe domains, used for dump */
@@ -426,6 +437,7 @@ __runq_insert(const struct scheduler *ops, struct rt_vcpu *svc)
static int
rt_init(struct scheduler *ops)
{
+ const int cpu = smp_processor_id();
struct rt_private *prv = xzalloc(struct rt_private);
printk("Initializing RTDS scheduler\n"
@@ -454,6 +466,8 @@ rt_init(struct scheduler *ops)
ops->sched_data = prv;
+ init_timer(&repl_timer, repl_handler, ops, 0);
+
return 0;
no_mem:
@@ -473,6 +487,9 @@ rt_deinit(const struct scheduler *ops)
xfree(_cpumask_scratch);
_cpumask_scratch = NULL;
}
+
+ kill_timer(&repl_timer);
+
xfree(prv);
}
@@ -635,6 +652,13 @@ rt_vcpu_insert(const struct scheduler *ops, struct vcpu *vc)
/* add rt_vcpu svc to scheduler-specific vcpu list of the dom */
list_add_tail(&svc->sdom_elem, &svc->sdom->vcpu);
+
+ if(!timer_started)
+ {
+ /* the first vcpu starts the timer for the first time*/
+ timer_started = 1;
+ set_timer(&repl_timer,svc->cur_deadline);
+ }
}
/*
@@ -792,44 +816,6 @@ __runq_pick(const struct scheduler *ops, const cpumask_t *mask)
}
/*
- * Update vcpu's budget and
- * sort runq by insert the modifed vcpu back to runq
- * lock is grabbed before calling this function
- */
-static void
-__repl_update(const struct scheduler *ops, s_time_t now)
-{
- struct list_head *runq = rt_runq(ops);
- struct list_head *depletedq = rt_depletedq(ops);
- struct list_head *iter;
- struct list_head *tmp;
- struct rt_vcpu *svc = NULL;
-
- list_for_each_safe(iter, tmp, runq)
- {
- svc = __q_elem(iter);
- if ( now < svc->cur_deadline )
- break;
-
- rt_update_deadline(now, svc);
- /* reinsert the vcpu if its deadline is updated */
- __q_remove(svc);
- __runq_insert(ops, svc);
- }
-
- list_for_each_safe(iter, tmp, depletedq)
- {
- svc = __q_elem(iter);
- if ( now >= svc->cur_deadline )
- {
- rt_update_deadline(now, svc);
- __q_remove(svc); /* remove from depleted queue */
- __runq_insert(ops, svc); /* add to runq */
- }
- }
-}
-
-/*
* schedule function for rt scheduler.
* The lock is already grabbed in schedule.c, no need to lock here
*/
@@ -848,7 +834,6 @@ rt_schedule(const struct scheduler *ops, s_time_t now, bool_t tasklet_work_sched
/* burn_budget would return for IDLE VCPU */
burn_budget(ops, scurr, now);
- __repl_update(ops, now);
if ( tasklet_work_scheduled )
{
@@ -889,7 +874,7 @@ rt_schedule(const struct scheduler *ops, s_time_t now, bool_t tasklet_work_sched
}
}
- ret.time = MIN(snext->budget, MAX_SCHEDULE); /* sched quantum */
+ ret.time = snext->budget; /* invoke the scheduler next time */
ret.task = snext->vcpu;
/* TRACE */
@@ -1033,10 +1018,6 @@ rt_vcpu_wake(const struct scheduler *ops, struct vcpu *vc)
{
struct rt_vcpu * const svc = rt_vcpu(vc);
s_time_t now = NOW();
- struct rt_private *prv = rt_priv(ops);
- struct rt_vcpu *snext = NULL; /* highest priority on RunQ */
- struct rt_dom *sdom = NULL;
- cpumask_t *online;
BUG_ON( is_idle_vcpu(vc) );
@@ -1074,14 +1055,7 @@ rt_vcpu_wake(const struct scheduler *ops, struct vcpu *vc)
/* insert svc to runq/depletedq because svc is not in queue now */
__runq_insert(ops, svc);
- __repl_update(ops, now);
-
- ASSERT(!list_empty(&prv->sdom));
- sdom = list_entry(prv->sdom.next, struct rt_dom, sdom_elem);
- online = cpupool_scheduler_cpumask(sdom->dom->cpupool);
- snext = __runq_pick(ops, online); /* pick snext from ALL valid cpus */
-
- runq_tickle(ops, snext);
+ runq_tickle(ops, svc);
return;
}
@@ -1094,10 +1068,6 @@ static void
rt_context_saved(const struct scheduler *ops, struct vcpu *vc)
{
struct rt_vcpu *svc = rt_vcpu(vc);
- struct rt_vcpu *snext = NULL;
- struct rt_dom *sdom = NULL;
- struct rt_private *prv = rt_priv(ops);
- cpumask_t *online;
spinlock_t *lock = vcpu_schedule_lock_irq(vc);
clear_bit(__RTDS_scheduled, &svc->flags);
@@ -1108,15 +1078,8 @@ rt_context_saved(const struct scheduler *ops, struct vcpu *vc)
if ( test_and_clear_bit(__RTDS_delayed_runq_add, &svc->flags) &&
likely(vcpu_runnable(vc)) )
{
+ /* only insert the pre-empted vcpu back*/
__runq_insert(ops, svc);
- __repl_update(ops, NOW());
-
- ASSERT(!list_empty(&prv->sdom));
- sdom = list_entry(prv->sdom.next, struct rt_dom, sdom_elem);
- online = cpupool_scheduler_cpumask(sdom->dom->cpupool);
- snext = __runq_pick(ops, online); /* pick snext from ALL cpus */
-
- runq_tickle(ops, snext);
}
out:
vcpu_schedule_unlock_irq(lock, vc);
@@ -1167,6 +1130,74 @@ rt_dom_cntl(
return rc;
}
+static void repl_handler(void *data){
+ unsigned long flags;
+ s_time_t now = NOW();
+ s_time_t min_repl = LONG_MAX; /* max time used in comparison*/
+ struct scheduler *ops = data;
+ struct rt_private *prv = rt_priv(ops);
+ struct list_head *runq = rt_runq(ops);
+ struct list_head *depletedq = rt_depletedq(ops);
+ struct list_head *iter;
+ struct list_head *tmp;
+ struct rt_vcpu *svc = NULL;
+
+ spin_lock_irqsave(&prv->lock,flags);
+
+ stop_timer(&repl_timer);
+
+ list_for_each_safe(iter, tmp, runq)
+ {
+ svc = __q_elem(iter);
+ if ( now < svc->cur_deadline )
+ break;
+ rt_update_deadline(now, svc);
+ /* scan the runq to find the min release time
+ * this happens when vcpus on runq miss deadline
+ */
+ if( min_repl> svc->cur_deadline )
+ {
+ min_repl = svc->cur_deadline;
+ }
+ /* reinsert the vcpu if its deadline is updated */
+ __q_remove(svc);
+ __runq_insert(ops, svc);
+ }
+
+ list_for_each_safe(iter, tmp, depletedq)
+ {
+ svc = __q_elem(iter);
+ if ( now >= svc->cur_deadline )
+ {
+ rt_update_deadline(now, svc);
+
+ /* scan the delp_q to find the minimal release time */
+ if(min_repl > svc->cur_deadline)
+ {
+ min_repl = svc->cur_deadline;
+ }
+ __q_remove(svc);
+ __runq_insert(ops, svc);
+ runq_tickle(ops, svc);
+ }
+ }
+
+ /* if timer was triggered but none of the vcpus
+ * need to be refilled, set the timer to be the
+ * default period + now
+ */
+ if(min_repl == LONG_MAX)
+ {
+ set_timer(&repl_timer, now + RTDS_DEFAULT_PERIOD);
+ }
+ else
+ {
+ /* reprogram the timer using the most imminent release time*/
+ set_timer(&repl_timer, min_repl);
+ }
+ spin_unlock_irqrestore(&prv->lock,flags);
+}
+
static struct rt_private _rt_priv;
const struct scheduler sched_rtds_def = {
--
1.7.9.5
next prev parent reply other threads:[~2015-12-31 10:20 UTC|newest]
Thread overview: 14+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-12-31 10:20 [PATCH V2 0/1] Improved RTDS scheduler Tianyang Chen
2015-12-31 10:20 ` Tianyang Chen [this message]
2015-12-31 13:44 ` [PATCH V2 1/1] " Meng Xu
2016-01-06 7:57 ` Tianyang Chen
2016-01-15 15:33 ` Meng Xu
2016-01-19 16:35 ` Tianyang Chen
2016-01-19 16:53 ` Meng Xu
2016-01-25 9:00 ` Dario Faggioli
2016-01-25 22:04 ` Tianyang Chen
2016-01-25 23:00 ` Meng Xu
2016-01-26 14:06 ` Dario Faggioli
2016-01-26 17:28 ` Meng Xu
2016-01-29 22:40 ` Tianyang Chen
2016-01-26 11:52 ` Dario Faggioli
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1451557219-8642-2-git-send-email-tiche@seas.upenn.edu \
--to=tiche@seas.upenn.edu \
--cc=dario.faggioli@citrix.com \
--cc=dgolomb@seas.upenn.edu \
--cc=george.dunlap@citrix.com \
--cc=ian.campbell@citrix.com \
--cc=ian.jackson@eu.citrix.com \
--cc=jbeulich@suse.com \
--cc=mengxu@cis.upenn.edu \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).