From mboxrd@z Thu Jan 1 00:00:00 1970 From: Meng Xu Subject: [PATCH] xen: sanity check input and serialize vcpu data in sched_rt.c Date: Fri, 26 Sep 2014 14:29:43 -0400 Message-ID: <1411756183-2255-1-git-send-email-mengxu@cis.upenn.edu> References: <5422C8BC.3050807@eu.citrix.com> Mime-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Return-path: In-Reply-To: <5422C8BC.3050807@eu.citrix.com> List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Sender: xen-devel-bounces@lists.xen.org Errors-To: xen-devel-bounces@lists.xen.org To: xen-devel@lists.xen.org Cc: ian.campbell@citrix.com, stefano.stabellini@eu.citrix.com, george.dunlap@eu.citrix.com, dario.faggioli@citrix.com, ian.jackson@eu.citrix.com, xumengpanda@gmail.com, Meng Xu , JBeulich@suse.com List-Id: xen-devel@lists.xenproject.org Sanity check input params in rt_dom_cntl(); Serialize rt_dom_cntl() against the global lock; Move the call to rt_update_deadline() from _alloc to _insert. Signed-off-by: Meng Xu --- xen/common/sched_rt.c | 27 ++++++++++++++++++++------- 1 file changed, 20 insertions(+), 7 deletions(-) diff --git a/xen/common/sched_rt.c b/xen/common/sched_rt.c index 6c8faeb..6cd2648 100644 --- a/xen/common/sched_rt.c +++ b/xen/common/sched_rt.c @@ -369,11 +369,15 @@ __runq_insert(const struct scheduler *ops, struct rt_vcpu *svc) struct rt_private *prv = rt_priv(ops); struct list_head *runq = rt_runq(ops); struct list_head *iter; + s_time_t now = NOW(); ASSERT( spin_is_locked(&prv->lock) ); ASSERT( !__vcpu_on_q(svc) ); + if ( now >= svc->cur_deadline ) + rt_update_deadline(now, svc); + /* add svc to runq if svc still has budget */ if ( svc->cur_budget > 0 ) { @@ -528,8 +532,6 @@ rt_alloc_vdata(const struct scheduler *ops, struct vcpu *vc, void *dd) ASSERT( now >= svc->cur_deadline ); - rt_update_deadline(now, svc); - return svc; } @@ -577,7 +579,12 @@ rt_vcpu_remove(const struct scheduler *ops, struct vcpu *vc) BUG_ON( sdom == NULL ); if ( __vcpu_on_q(svc) ) + { + spinlock_t *lock; + lock = vcpu_schedule_lock_irq(vc); __q_remove(svc); + vcpu_schedule_unlock_irq(lock, vc); + } if ( !is_idle_vcpu(vc) ) list_del_init(&svc->sdom_elem); @@ -733,7 +740,6 @@ __repl_update(const struct scheduler *ops, s_time_t now) if ( now < svc->cur_deadline ) break; - rt_update_deadline(now, svc); /* reinsert the vcpu if its deadline is updated */ __q_remove(svc); __runq_insert(ops, svc); @@ -744,7 +750,6 @@ __repl_update(const struct scheduler *ops, s_time_t now) svc = __q_elem(iter); if ( now >= svc->cur_deadline ) { - rt_update_deadline(now, svc); __q_remove(svc); /* remove from depleted queue */ __runq_insert(ops, svc); /* add to runq */ } @@ -979,9 +984,6 @@ rt_vcpu_wake(const struct scheduler *ops, struct vcpu *vc) return; } - if ( now >= svc->cur_deadline) - rt_update_deadline(now, svc); - /* insert svc to runq/depletedq because svc is not in queue now */ __runq_insert(ops, svc); @@ -1042,11 +1044,14 @@ rt_dom_cntl( struct domain *d, struct xen_domctl_scheduler_op *op) { + struct rt_private *prv = rt_priv(ops); struct rt_dom * const sdom = rt_dom(d); struct rt_vcpu *svc; struct list_head *iter; + unsigned long flags; int rc = 0; + spin_lock_irqsave(&prv->lock, flags); switch ( op->cmd ) { case XEN_DOMCTL_SCHEDOP_getinfo: @@ -1055,6 +1060,12 @@ rt_dom_cntl( op->u.rtds.budget = svc->budget / MICROSECS(1); break; case XEN_DOMCTL_SCHEDOP_putinfo: + if ( op->u.rtds.period == 0 || op->u.rtds.budget == 0 ) + { + rc = -EINVAL; + break; + } + list_for_each( iter, &sdom->vcpu ) { struct rt_vcpu * svc = list_entry(iter, struct rt_vcpu, sdom_elem); @@ -1064,6 +1075,8 @@ rt_dom_cntl( break; } + spin_unlock_irqrestore(&prv->lock, flags); + return rc; } -- 1.7.9.5