xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Meng Xu <mengxu@cis.upenn.edu>
To: xen-devel@lists.xen.org
Cc: ian.campbell@citrix.com, stefano.stabellini@eu.citrix.com,
	george.dunlap@eu.citrix.com, dario.faggioli@citrix.com,
	ian.jackson@eu.citrix.com, xumengpanda@gmail.com,
	Meng Xu <mengxu@cis.upenn.edu>,
	JBeulich@suse.com
Subject: [PATCH] xen: sanity check input and serialize vcpu data in sched_rt.c
Date: Fri, 26 Sep 2014 14:29:43 -0400	[thread overview]
Message-ID: <1411756183-2255-1-git-send-email-mengxu@cis.upenn.edu> (raw)
In-Reply-To: <5422C8BC.3050807@eu.citrix.com>

Sanity check input params in rt_dom_cntl();
Serialize rt_dom_cntl() against the global lock;
Move the call to rt_update_deadline() from _alloc to _insert.

Signed-off-by: Meng Xu <mengxu@cis.upenn.edu>
---
 xen/common/sched_rt.c |   27 ++++++++++++++++++++-------
 1 file changed, 20 insertions(+), 7 deletions(-)

diff --git a/xen/common/sched_rt.c b/xen/common/sched_rt.c
index 6c8faeb..6cd2648 100644
--- a/xen/common/sched_rt.c
+++ b/xen/common/sched_rt.c
@@ -369,11 +369,15 @@ __runq_insert(const struct scheduler *ops, struct rt_vcpu *svc)
     struct rt_private *prv = rt_priv(ops);
     struct list_head *runq = rt_runq(ops);
     struct list_head *iter;
+    s_time_t now = NOW();
 
     ASSERT( spin_is_locked(&prv->lock) );
 
     ASSERT( !__vcpu_on_q(svc) );
 
+    if ( now >= svc->cur_deadline )
+        rt_update_deadline(now, svc);
+
     /* add svc to runq if svc still has budget */
     if ( svc->cur_budget > 0 )
     {
@@ -528,8 +532,6 @@ rt_alloc_vdata(const struct scheduler *ops, struct vcpu *vc, void *dd)
 
     ASSERT( now >= svc->cur_deadline );
 
-    rt_update_deadline(now, svc);
-
     return svc;
 }
 
@@ -577,7 +579,12 @@ rt_vcpu_remove(const struct scheduler *ops, struct vcpu *vc)
     BUG_ON( sdom == NULL );
 
     if ( __vcpu_on_q(svc) )
+    {
+        spinlock_t *lock;
+        lock = vcpu_schedule_lock_irq(vc);
         __q_remove(svc);
+        vcpu_schedule_unlock_irq(lock, vc);
+    }
 
     if ( !is_idle_vcpu(vc) )
         list_del_init(&svc->sdom_elem);
@@ -733,7 +740,6 @@ __repl_update(const struct scheduler *ops, s_time_t now)
         if ( now < svc->cur_deadline )
             break;
 
-        rt_update_deadline(now, svc);
         /* reinsert the vcpu if its deadline is updated */
         __q_remove(svc);
         __runq_insert(ops, svc);
@@ -744,7 +750,6 @@ __repl_update(const struct scheduler *ops, s_time_t now)
         svc = __q_elem(iter);
         if ( now >= svc->cur_deadline )
         {
-            rt_update_deadline(now, svc);
             __q_remove(svc); /* remove from depleted queue */
             __runq_insert(ops, svc); /* add to runq */
         }
@@ -979,9 +984,6 @@ rt_vcpu_wake(const struct scheduler *ops, struct vcpu *vc)
         return;
     }
 
-    if ( now >= svc->cur_deadline)
-        rt_update_deadline(now, svc);
-
     /* insert svc to runq/depletedq because svc is not in queue now */
     __runq_insert(ops, svc);
 
@@ -1042,11 +1044,14 @@ rt_dom_cntl(
     struct domain *d,
     struct xen_domctl_scheduler_op *op)
 {
+    struct rt_private *prv = rt_priv(ops);
     struct rt_dom * const sdom = rt_dom(d);
     struct rt_vcpu *svc;
     struct list_head *iter;
+    unsigned long flags;
     int rc = 0;
 
+    spin_lock_irqsave(&prv->lock, flags);
     switch ( op->cmd )
     {
     case XEN_DOMCTL_SCHEDOP_getinfo:
@@ -1055,6 +1060,12 @@ rt_dom_cntl(
         op->u.rtds.budget = svc->budget / MICROSECS(1);
         break;
     case XEN_DOMCTL_SCHEDOP_putinfo:
+        if ( op->u.rtds.period == 0 || op->u.rtds.budget == 0 )
+        {
+            rc = -EINVAL;
+            break;
+        }
+
         list_for_each( iter, &sdom->vcpu )
         {
             struct rt_vcpu * svc = list_entry(iter, struct rt_vcpu, sdom_elem);
@@ -1064,6 +1075,8 @@ rt_dom_cntl(
         break;
     }
 
+    spin_unlock_irqrestore(&prv->lock, flags);
+
     return rc;
 }
 
-- 
1.7.9.5

  parent reply	other threads:[~2014-09-26 18:29 UTC|newest]

Thread overview: 16+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2014-09-20 22:13 [PATCH for 4.5 v4 1/4] xen: add real time scheduler rtds Meng Xu
2014-09-22 17:13 ` Dario Faggioli
2014-09-23 11:47 ` George Dunlap
2014-09-24 12:09 ` Jan Beulich
2014-09-24 13:14   ` Dario Faggioli
2014-09-24 13:25     ` Meng Xu
2014-09-24 13:33       ` George Dunlap
2014-09-24 13:32     ` Jan Beulich
2014-09-24 13:35       ` George Dunlap
2014-09-24 13:41         ` Dario Faggioli
2014-09-24 13:45           ` George Dunlap
2014-09-26 18:29         ` Meng Xu [this message]
2014-09-26 18:49           ` [PATCH] xen: sanity check input and serialize vcpu data in sched_rt.c Andrew Cooper
2014-09-29  8:09           ` Jan Beulich
2014-09-29 13:26             ` Dario Faggioli
2014-09-30 21:21             ` Meng Xu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1411756183-2255-1-git-send-email-mengxu@cis.upenn.edu \
    --to=mengxu@cis.upenn.edu \
    --cc=JBeulich@suse.com \
    --cc=dario.faggioli@citrix.com \
    --cc=george.dunlap@eu.citrix.com \
    --cc=ian.campbell@citrix.com \
    --cc=ian.jackson@eu.citrix.com \
    --cc=stefano.stabellini@eu.citrix.com \
    --cc=xen-devel@lists.xen.org \
    --cc=xumengpanda@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).