From: Dario Faggioli <dario.faggioli@citrix.com>
To: xen-devel@lists.xenproject.org
Cc: George Dunlap <george.dunlap@eu.citrix.com>,
Meng Xu <mengxu@cis.upenn.edu>
Subject: [PATCH 4/7] xen: sched: get rid of the per domain vCPU list in RTDS
Date: Thu, 08 Oct 2015 14:52:58 +0200 [thread overview]
Message-ID: <20151008125258.12522.94232.stgit@Solace.station> (raw)
In-Reply-To: <20151008124027.12522.42552.stgit@Solace.station>
As, curently, there is no reason for bothering having
it and keeping it updated.
In fact, it is only used for dumping and changing
vCPUs parameters, but that can be achieved easily with
for_each_vcpu.
Signed-off-by: Dario Faggioli <dario.faggioli@citrix.com>
---
Cc: George Dunlap <george.dunlap@eu.citrix.com>
Cc: Meng Xu <mengxu@cis.upenn.edu>
---
xen/common/sched_rt.c | 36 +++++++++++++-----------------------
1 file changed, 13 insertions(+), 23 deletions(-)
diff --git a/xen/common/sched_rt.c b/xen/common/sched_rt.c
index 37a32a4..797adc1 100644
--- a/xen/common/sched_rt.c
+++ b/xen/common/sched_rt.c
@@ -160,7 +160,6 @@ struct rt_private {
*/
struct rt_vcpu {
struct list_head q_elem; /* on the runq/depletedq list */
- struct list_head sdom_elem; /* on the domain VCPU list */
/* Up-pointers */
struct rt_dom *sdom;
@@ -182,7 +181,6 @@ struct rt_vcpu {
* Domain
*/
struct rt_dom {
- struct list_head vcpu; /* link its VCPUs */
struct list_head sdom_elem; /* link list on rt_priv */
struct domain *dom; /* pointer to upper domain */
};
@@ -290,7 +288,7 @@ rt_dump_pcpu(const struct scheduler *ops, int cpu)
static void
rt_dump(const struct scheduler *ops)
{
- struct list_head *iter_sdom, *iter_svc, *runq, *depletedq, *iter;
+ struct list_head *runq, *depletedq, *iter;
struct rt_private *prv = rt_priv(ops);
struct rt_vcpu *svc;
struct rt_dom *sdom;
@@ -319,14 +317,16 @@ rt_dump(const struct scheduler *ops)
}
printk("Domain info:\n");
- list_for_each( iter_sdom, &prv->sdom )
+ list_for_each( iter, &prv->sdom )
{
- sdom = list_entry(iter_sdom, struct rt_dom, sdom_elem);
+ struct vcpu *vc;
+
+ sdom = list_entry(iter, struct rt_dom, sdom_elem);
printk("\tdomain: %d\n", sdom->dom->domain_id);
- list_for_each( iter_svc, &sdom->vcpu )
+ for_each_vcpu( sdom->dom, vc )
{
- svc = list_entry(iter_svc, struct rt_vcpu, sdom_elem);
+ svc = rt_vcpu(vc);
rt_dump_vcpu(ops, svc);
}
}
@@ -527,7 +527,6 @@ rt_alloc_domdata(const struct scheduler *ops, struct domain *dom)
if ( sdom == NULL )
return NULL;
- INIT_LIST_HEAD(&sdom->vcpu);
INIT_LIST_HEAD(&sdom->sdom_elem);
sdom->dom = dom;
@@ -587,7 +586,6 @@ rt_alloc_vdata(const struct scheduler *ops, struct vcpu *vc, void *dd)
return NULL;
INIT_LIST_HEAD(&svc->q_elem);
- INIT_LIST_HEAD(&svc->sdom_elem);
svc->flags = 0U;
svc->sdom = dd;
svc->vcpu = vc;
@@ -614,8 +612,7 @@ rt_free_vdata(const struct scheduler *ops, void *priv)
* This function is called in sched_move_domain() in schedule.c
* When move a domain to a new cpupool.
* It inserts vcpus of moving domain to the scheduler's RunQ in
- * dest. cpupool; and insert rt_vcpu svc to scheduler-specific
- * vcpu list of the dom
+ * dest. cpupool.
*/
static void
rt_vcpu_insert(const struct scheduler *ops, struct vcpu *vc)
@@ -634,15 +631,11 @@ rt_vcpu_insert(const struct scheduler *ops, struct vcpu *vc)
__runq_insert(ops, svc);
vcpu_schedule_unlock_irq(lock, vc);
- /* add rt_vcpu svc to scheduler-specific vcpu list of the dom */
- list_add_tail(&svc->sdom_elem, &svc->sdom->vcpu);
-
SCHED_STAT_CRANK(vcpu_insert);
}
/*
- * Remove rt_vcpu svc from the old scheduler in source cpupool; and
- * Remove rt_vcpu svc from scheduler-specific vcpu list of the dom
+ * Remove rt_vcpu svc from the old scheduler in source cpupool.
*/
static void
rt_vcpu_remove(const struct scheduler *ops, struct vcpu *vc)
@@ -659,9 +652,6 @@ rt_vcpu_remove(const struct scheduler *ops, struct vcpu *vc)
if ( __vcpu_on_q(svc) )
__q_remove(svc);
vcpu_schedule_unlock_irq(lock, vc);
-
- if ( !is_idle_vcpu(vc) )
- list_del_init(&svc->sdom_elem);
}
/*
@@ -1137,7 +1127,7 @@ rt_dom_cntl(
struct rt_private *prv = rt_priv(ops);
struct rt_dom * const sdom = rt_dom(d);
struct rt_vcpu *svc;
- struct list_head *iter;
+ struct vcpu *vc;
unsigned long flags;
int rc = 0;
@@ -1145,7 +1135,7 @@ rt_dom_cntl(
{
case XEN_DOMCTL_SCHEDOP_getinfo:
spin_lock_irqsave(&prv->lock, flags);
- svc = list_entry(sdom->vcpu.next, struct rt_vcpu, sdom_elem);
+ svc = rt_vcpu(sdom->dom->vcpu[0]);
op->u.rtds.period = svc->period / MICROSECS(1); /* transfer to us */
op->u.rtds.budget = svc->budget / MICROSECS(1);
spin_unlock_irqrestore(&prv->lock, flags);
@@ -1157,9 +1147,9 @@ rt_dom_cntl(
break;
}
spin_lock_irqsave(&prv->lock, flags);
- list_for_each( iter, &sdom->vcpu )
+ for_each_vcpu( sdom->dom, vc )
{
- struct rt_vcpu * svc = list_entry(iter, struct rt_vcpu, sdom_elem);
+ svc = rt_vcpu(vc);
svc->period = MICROSECS(op->u.rtds.period); /* transfer to nanosec */
svc->budget = MICROSECS(op->u.rtds.budget);
}
next prev parent reply other threads:[~2015-10-08 12:53 UTC|newest]
Thread overview: 29+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-10-08 12:52 [PATCH 0/7] xen: sched: fix locking of {insert, remove}_vcpu() Dario Faggioli
2015-10-08 12:52 ` [PATCH 1/7] xen: sched: fix locking of remove_vcpu() in credit1 Dario Faggioli
2015-10-08 13:16 ` Andrew Cooper
2015-10-08 12:52 ` [PATCH 2/7] xen: sched: fix locking for insert_vcpu() in credit1 and RTDS Dario Faggioli
2015-10-08 13:18 ` Andrew Cooper
2015-10-08 15:16 ` George Dunlap
2015-10-08 15:49 ` Dario Faggioli
2015-10-08 20:12 ` Dario Faggioli
2015-10-08 12:52 ` [PATCH 3/7] xen: sched: better handle (not) inserting idle vCPUs in runqueues Dario Faggioli
2015-10-08 15:27 ` George Dunlap
2015-10-08 15:39 ` Dario Faggioli
2015-10-09 5:31 ` Juergen Gross
2015-10-08 12:52 ` Dario Faggioli [this message]
2015-10-08 13:47 ` [PATCH 4/7] xen: sched: get rid of the per domain vCPU list in RTDS Andrew Cooper
2015-10-08 15:31 ` George Dunlap
2015-10-08 12:53 ` [PATCH 5/7] xen: sched: get rid of the per domain vCPU list in Credit2 Dario Faggioli
2015-10-08 13:10 ` Andrew Cooper
2015-10-08 13:17 ` Dario Faggioli
2015-10-08 13:56 ` Andrew Cooper
2015-10-08 15:32 ` Dario Faggioli
2015-10-08 15:39 ` Andrew Cooper
2015-10-08 15:40 ` George Dunlap
2015-10-08 12:53 ` [PATCH 6/7] xen: sched: fix an 'off by one \t' in credit2 debug dump Dario Faggioli
2015-10-08 15:42 ` George Dunlap
2015-10-08 15:59 ` Dario Faggioli
2015-10-08 12:53 ` [PATCH 7/7] xen: sched / cpupool: dump the actual value of NOW() Dario Faggioli
2015-10-08 13:12 ` Andrew Cooper
2015-10-08 15:37 ` Jan Beulich
2015-10-09 5:09 ` Juergen Gross
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20151008125258.12522.94232.stgit@Solace.station \
--to=dario.faggioli@citrix.com \
--cc=george.dunlap@eu.citrix.com \
--cc=mengxu@cis.upenn.edu \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).