From: George Dunlap <george.dunlap@citrix.com>
To: Dario Faggioli <dario.faggioli@citrix.com>,
xen-devel@lists.xenproject.org
Cc: George Dunlap <george.dunlap@eu.citrix.com>
Subject: Re: [PATCH 5/7] xen: sched: get rid of the per domain vCPU list in Credit2
Date: Thu, 8 Oct 2015 16:40:56 +0100 [thread overview]
Message-ID: <56168E88.2080603@citrix.com> (raw)
In-Reply-To: <20151008125305.12522.60745.stgit@Solace.station>
On 08/10/15 13:53, Dario Faggioli wrote:
> As, curently, there is no reason for bothering having
> it and keeping it updated.
>
> In fact, it is only used for dumping and changing
> vCPUs parameters, but that can be achieved easily with
> for_each_vcpu.
>
> While there, improve alignment of comments, ad
> add a const qualifier to a pointer, making things
> more consistent with what happens everywhere else
> in the source file.
>
> This also allows us to kill one of the remaining
> FIXMEs in the code, which is always good.
>
> Signed-off-by: Dario Faggioli <dario.faggioli@citrix.com>
Acked-by: George Dunlap <george.dunlap@citrix.com>
> ---
> Cc: George Dunlap <george.dunlap@eu.citrix.com>
> ---
> xen/common/sched_credit2.c | 34 +++++++++++-----------------------
> xen/common/sched_rt.c | 2 +-
> 2 files changed, 12 insertions(+), 24 deletions(-)
>
> diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
> index 234f798..178a665 100644
> --- a/xen/common/sched_credit2.c
> +++ b/xen/common/sched_credit2.c
> @@ -234,9 +234,8 @@ struct csched2_private {
> * Virtual CPU
> */
> struct csched2_vcpu {
> - struct list_head rqd_elem; /* On the runqueue data list */
> - struct list_head sdom_elem; /* On the domain vcpu list */
> - struct list_head runq_elem; /* On the runqueue */
> + struct list_head rqd_elem; /* On the runqueue data list */
> + struct list_head runq_elem; /* On the runqueue */
> struct csched2_runqueue_data *rqd; /* Up-pointer to the runqueue */
>
> /* Up-pointers */
> @@ -261,7 +260,6 @@ struct csched2_vcpu {
> * Domain
> */
> struct csched2_dom {
> - struct list_head vcpu;
> struct list_head sdom_elem;
> struct domain *dom;
> uint16_t weight;
> @@ -772,7 +770,6 @@ csched2_alloc_vdata(const struct scheduler *ops, struct vcpu *vc, void *dd)
> return NULL;
>
> INIT_LIST_HEAD(&svc->rqd_elem);
> - INIT_LIST_HEAD(&svc->sdom_elem);
> INIT_LIST_HEAD(&svc->runq_elem);
>
> svc->sdom = dd;
> @@ -876,9 +873,6 @@ csched2_vcpu_insert(const struct scheduler *ops, struct vcpu *vc)
>
> BUG_ON(is_idle_vcpu(vc));
>
> - /* FIXME: Do we need the private lock here? */
> - list_add_tail(&svc->sdom_elem, &svc->sdom->vcpu);
> -
> /* Add vcpu to runqueue of initial processor */
> lock = vcpu_schedule_lock_irq(vc);
>
> @@ -923,10 +917,6 @@ csched2_vcpu_remove(const struct scheduler *ops, struct vcpu *vc)
>
> vcpu_schedule_unlock_irq(lock, vc);
>
> - /* Remove from sdom list. Don't need a lock for this, as it's called
> - * syncronously when nothing else can happen. */
> - list_del_init(&svc->sdom_elem);
> -
> svc->sdom->nr_vcpus--;
> }
> }
> @@ -1443,7 +1433,7 @@ csched2_dom_cntl(
>
> if ( op->u.credit2.weight != 0 )
> {
> - struct list_head *iter;
> + struct vcpu *vc;
> int old_weight;
>
> old_weight = sdom->weight;
> @@ -1451,9 +1441,9 @@ csched2_dom_cntl(
> sdom->weight = op->u.credit2.weight;
>
> /* Update weights for vcpus, and max_weight for runqueues on which they reside */
> - list_for_each ( iter, &sdom->vcpu )
> + for_each_vcpu ( d, vc )
> {
> - struct csched2_vcpu *svc = list_entry(iter, struct csched2_vcpu, sdom_elem);
> + struct csched2_vcpu *svc = CSCHED2_VCPU(vc);
>
> /* NB: Locking order is important here. Because we grab this lock here, we
> * must never lock csched2_priv.lock if we're holding a runqueue lock.
> @@ -1487,7 +1477,6 @@ csched2_alloc_domdata(const struct scheduler *ops, struct domain *dom)
> return NULL;
>
> /* Initialize credit and weight */
> - INIT_LIST_HEAD(&sdom->vcpu);
> INIT_LIST_HEAD(&sdom->sdom_elem);
> sdom->dom = dom;
> sdom->weight = CSCHED2_DEFAULT_WEIGHT;
> @@ -1539,9 +1528,7 @@ csched2_free_domdata(const struct scheduler *ops, void *data)
> static void
> csched2_dom_destroy(const struct scheduler *ops, struct domain *dom)
> {
> - struct csched2_dom *sdom = CSCHED2_DOM(dom);
> -
> - BUG_ON(!list_empty(&sdom->vcpu));
> + BUG_ON(CSCHED2_DOM(dom)->nr_vcpus != 0);
>
> csched2_free_domdata(ops, CSCHED2_DOM(dom));
> }
> @@ -1881,7 +1868,7 @@ csched2_dump_pcpu(const struct scheduler *ops, int cpu)
> static void
> csched2_dump(const struct scheduler *ops)
> {
> - struct list_head *iter_sdom, *iter_svc;
> + struct list_head *iter_sdom;
> struct csched2_private *prv = CSCHED2_PRIV(ops);
> unsigned long flags;
> int i, loop;
> @@ -1926,6 +1913,8 @@ csched2_dump(const struct scheduler *ops)
> list_for_each( iter_sdom, &prv->sdom )
> {
> struct csched2_dom *sdom;
> + struct vcpu *vc;
> +
> sdom = list_entry(iter_sdom, struct csched2_dom, sdom_elem);
>
> printk("\tDomain: %d w %d v %d\n\t",
> @@ -1933,12 +1922,11 @@ csched2_dump(const struct scheduler *ops)
> sdom->weight,
> sdom->nr_vcpus);
>
> - list_for_each( iter_svc, &sdom->vcpu )
> + for_each_vcpu( sdom->dom, vc )
> {
> - struct csched2_vcpu *svc;
> + struct csched2_vcpu * const svc = CSCHED2_VCPU(vc);
> spinlock_t *lock;
>
> - svc = list_entry(iter_svc, struct csched2_vcpu, sdom_elem);
> lock = vcpu_schedule_lock(svc->vcpu);
>
> printk("\t%3d: ", ++loop);
> diff --git a/xen/common/sched_rt.c b/xen/common/sched_rt.c
> index 797adc1..0ca4902 100644
> --- a/xen/common/sched_rt.c
> +++ b/xen/common/sched_rt.c
> @@ -1147,7 +1147,7 @@ rt_dom_cntl(
> break;
> }
> spin_lock_irqsave(&prv->lock, flags);
> - for_each_vcpu( sdom->dom, vc )
> + for_each_vcpu( d, vc )
> {
> svc = rt_vcpu(vc);
> svc->period = MICROSECS(op->u.rtds.period); /* transfer to nanosec */
>
next prev parent reply other threads:[~2015-10-08 15:41 UTC|newest]
Thread overview: 29+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-10-08 12:52 [PATCH 0/7] xen: sched: fix locking of {insert, remove}_vcpu() Dario Faggioli
2015-10-08 12:52 ` [PATCH 1/7] xen: sched: fix locking of remove_vcpu() in credit1 Dario Faggioli
2015-10-08 13:16 ` Andrew Cooper
2015-10-08 12:52 ` [PATCH 2/7] xen: sched: fix locking for insert_vcpu() in credit1 and RTDS Dario Faggioli
2015-10-08 13:18 ` Andrew Cooper
2015-10-08 15:16 ` George Dunlap
2015-10-08 15:49 ` Dario Faggioli
2015-10-08 20:12 ` Dario Faggioli
2015-10-08 12:52 ` [PATCH 3/7] xen: sched: better handle (not) inserting idle vCPUs in runqueues Dario Faggioli
2015-10-08 15:27 ` George Dunlap
2015-10-08 15:39 ` Dario Faggioli
2015-10-09 5:31 ` Juergen Gross
2015-10-08 12:52 ` [PATCH 4/7] xen: sched: get rid of the per domain vCPU list in RTDS Dario Faggioli
2015-10-08 13:47 ` Andrew Cooper
2015-10-08 15:31 ` George Dunlap
2015-10-08 12:53 ` [PATCH 5/7] xen: sched: get rid of the per domain vCPU list in Credit2 Dario Faggioli
2015-10-08 13:10 ` Andrew Cooper
2015-10-08 13:17 ` Dario Faggioli
2015-10-08 13:56 ` Andrew Cooper
2015-10-08 15:32 ` Dario Faggioli
2015-10-08 15:39 ` Andrew Cooper
2015-10-08 15:40 ` George Dunlap [this message]
2015-10-08 12:53 ` [PATCH 6/7] xen: sched: fix an 'off by one \t' in credit2 debug dump Dario Faggioli
2015-10-08 15:42 ` George Dunlap
2015-10-08 15:59 ` Dario Faggioli
2015-10-08 12:53 ` [PATCH 7/7] xen: sched / cpupool: dump the actual value of NOW() Dario Faggioli
2015-10-08 13:12 ` Andrew Cooper
2015-10-08 15:37 ` Jan Beulich
2015-10-09 5:09 ` Juergen Gross
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=56168E88.2080603@citrix.com \
--to=george.dunlap@citrix.com \
--cc=dario.faggioli@citrix.com \
--cc=george.dunlap@eu.citrix.com \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).