From: "Justin T. Weaver" <jtweaver@hawaii.edu>
To: xen-devel@lists.xen.org
Cc: george.dunlap@eu.citrix.com, dario.faggioli@citrix.com,
"Justin T. Weaver" <jtweaver@hawaii.edu>,
henric@hawaii.edu
Subject: [PATCH v3 3/4] sched: credit2: indent code sections to make review of patch 4/4 easier
Date: Wed, 25 Mar 2015 23:48:33 -1000 [thread overview]
Message-ID: <1427363314-25430-4-git-send-email-jtweaver@hawaii.edu> (raw)
In-Reply-To: <1427363314-25430-1-git-send-email-jtweaver@hawaii.edu>
Functions runq_tickle and choose_cpu both have code sections that get turned
into loops in patch 4 v3, soft affinity. Do the indenting here to make the
patch 4 diff section easier to read. This patch does not have any changes
other than the addition of one four-space indent per line.
Signed-off-by: Justin T. Weaver <jtweaver@hawaii.edu>
---
Changes in v3: First introduced in patch series version 3
---
xen/common/sched_credit2.c | 152 ++++++++++++++++++++++----------------------
1 file changed, 76 insertions(+), 76 deletions(-)
diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index af716e4..bbcfbf2 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -534,58 +534,58 @@ runq_tickle(const struct scheduler *ops, unsigned int cpu, struct csched2_vcpu *
goto tickle;
}
- /* Get a mask of idle, but not tickled, that new is allowed to run on. */
- cpumask_andnot(&mask, &rqd->idle, &rqd->tickled);
- cpumask_and(&mask, &mask, new->vcpu->cpu_hard_affinity);
+ /* Get a mask of idle, but not tickled, that new is allowed to run on. */
+ cpumask_andnot(&mask, &rqd->idle, &rqd->tickled);
+ cpumask_and(&mask, &mask, new->vcpu->cpu_hard_affinity);
- /* If it's not empty, choose one */
- i = cpumask_cycle(cpu, &mask);
- if ( i < nr_cpu_ids )
- {
- ipid = i;
- goto tickle;
- }
+ /* If it's not empty, choose one */
+ i = cpumask_cycle(cpu, &mask);
+ if ( i < nr_cpu_ids )
+ {
+ ipid = i;
+ goto tickle;
+ }
/* Otherwise, look for the non-idle cpu with the lowest credit,
* skipping cpus which have been tickled but not scheduled yet,
* that new is allowed to run on. */
- cpumask_andnot(&mask, &rqd->active, &rqd->idle);
- cpumask_andnot(&mask, &mask, &rqd->tickled);
- cpumask_and(&mask, &mask, new->vcpu->cpu_hard_affinity);
+ cpumask_andnot(&mask, &rqd->active, &rqd->idle);
+ cpumask_andnot(&mask, &mask, &rqd->tickled);
+ cpumask_and(&mask, &mask, new->vcpu->cpu_hard_affinity);
- for_each_cpu(i, &mask)
- {
- struct csched2_vcpu * cur;
+ for_each_cpu(i, &mask)
+ {
+ struct csched2_vcpu * cur;
- /* Already looked at this one above */
- if ( i == cpu )
- continue;
+ /* Already looked at this one above */
+ if ( i == cpu )
+ continue;
- cur = CSCHED2_VCPU(curr_on_cpu(i));
+ cur = CSCHED2_VCPU(curr_on_cpu(i));
- BUG_ON(is_idle_vcpu(cur->vcpu));
+ BUG_ON(is_idle_vcpu(cur->vcpu));
- /* Update credits for current to see if we want to preempt */
- burn_credits(rqd, cur, now);
+ /* Update credits for current to see if we want to preempt */
+ burn_credits(rqd, cur, now);
- if ( cur->credit < lowest )
- {
- ipid = i;
- lowest = cur->credit;
- }
+ if ( cur->credit < lowest )
+ {
+ ipid = i;
+ lowest = cur->credit;
+ }
- /* TRACE */ {
- struct {
- unsigned dom:16,vcpu:16;
- unsigned credit;
- } d;
- d.dom = cur->vcpu->domain->domain_id;
- d.vcpu = cur->vcpu->vcpu_id;
- d.credit = cur->credit;
- trace_var(TRC_CSCHED2_TICKLE_CHECK, 1,
- sizeof(d),
- (unsigned char *)&d);
- }
+ /* TRACE */ {
+ struct {
+ unsigned dom:16,vcpu:16;
+ unsigned credit;
+ } d;
+ d.dom = cur->vcpu->domain->domain_id;
+ d.vcpu = cur->vcpu->vcpu_id;
+ d.credit = cur->credit;
+ trace_var(TRC_CSCHED2_TICKLE_CHECK, 1,
+ sizeof(d),
+ (unsigned char *)&d);
+ }
}
/* Only switch to another processor if the credit difference is greater
@@ -1144,45 +1144,45 @@ choose_cpu(const struct scheduler *ops, struct vcpu *vc)
min_avgload = MAX_LOAD;
/* Find the runqueue with the lowest instantaneous load */
- for_each_cpu(i, &prv->active_queues)
- {
- struct csched2_runqueue_data *rqd;
- s_time_t rqd_avgload = MAX_LOAD;
-
- rqd = prv->rqd + i;
-
- /* If checking a different runqueue, grab the lock,
- * check hard affinity, read the avg, and then release the lock.
- *
- * If on our own runqueue, don't grab or release the lock;
- * but subtract our own load from the runqueue load to simulate
- * impartiality.
- *
- * svc's hard affinity may have changed; this function is the
- * credit 2 scheduler's first opportunity to react to the change,
- * so it is possible here that svc does not have hard affinity
- * with any of the pcpus of svc's currently assigned run queue.
- */
- if ( rqd == svc->rqd )
+ for_each_cpu(i, &prv->active_queues)
{
- if ( cpumask_intersects(vc->cpu_hard_affinity, &rqd->active) )
- rqd_avgload = rqd->b_avgload - svc->avgload;
- }
- else if ( spin_trylock(&rqd->lock) )
- {
- if ( cpumask_intersects(vc->cpu_hard_affinity, &rqd->active) )
- rqd_avgload = rqd->b_avgload;
+ struct csched2_runqueue_data *rqd;
+ s_time_t rqd_avgload = MAX_LOAD;
+
+ rqd = prv->rqd + i;
+
+ /* If checking a different runqueue, grab the lock,
+ * check hard affinity, read the avg, and then release the lock.
+ *
+ * If on our own runqueue, don't grab or release the lock;
+ * but subtract our own load from the runqueue load to simulate
+ * impartiality.
+ *
+ * svc's hard affinity may have changed; this function is the
+ * credit 2 scheduler's first opportunity to react to the change,
+ * so it is possible here that svc does not have hard affinity
+ * with any of the pcpus of svc's currently assigned run queue.
+ */
+ if ( rqd == svc->rqd )
+ {
+ if ( cpumask_intersects(vc->cpu_hard_affinity, &rqd->active) )
+ rqd_avgload = rqd->b_avgload - svc->avgload;
+ }
+ else if ( spin_trylock(&rqd->lock) )
+ {
+ if ( cpumask_intersects(vc->cpu_hard_affinity, &rqd->active) )
+ rqd_avgload = rqd->b_avgload;
- spin_unlock(&rqd->lock);
- }
- else
- continue;
+ spin_unlock(&rqd->lock);
+ }
+ else
+ continue;
- if ( rqd_avgload < min_avgload )
- {
- min_avgload = rqd_avgload;
- min_rqi=i;
- }
+ if ( rqd_avgload < min_avgload )
+ {
+ min_avgload = rqd_avgload;
+ min_rqi=i;
+ }
}
/* We didn't find anyone (most likely because of spinlock contention). */
--
1.7.10.4
next prev parent reply other threads:[~2015-03-26 9:48 UTC|newest]
Thread overview: 17+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-03-26 9:48 [PATCH v3 0/4] sched: credit2: introduce per-vcpu hard and soft affinity Justin T. Weaver
2015-03-26 9:48 ` [PATCH v3 1/4] sched: credit2: respect per-vcpu hard affinity Justin T. Weaver
2015-03-31 14:37 ` George Dunlap
2015-03-31 17:14 ` Dario Faggioli
2015-03-31 17:32 ` George Dunlap
2015-04-23 16:00 ` Dario Faggioli
2015-05-06 12:39 ` Dario Faggioli
2015-03-26 9:48 ` [PATCH v3 2/4] sched: factor out per-vcpu affinity related code to common header file Justin T. Weaver
2015-04-23 15:22 ` Dario Faggioli
2015-03-26 9:48 ` Justin T. Weaver [this message]
2015-04-23 15:35 ` [PATCH v3 3/4] sched: credit2: indent code sections to make review of patch 4/4 easier Dario Faggioli
2015-03-26 9:48 ` [PATCH v3 4/4] sched: credit2: consider per-vcpu soft affinity Justin T. Weaver
2015-03-31 17:38 ` George Dunlap
2015-04-20 15:38 ` George Dunlap
2015-04-22 16:16 ` George Dunlap
2015-09-17 14:27 ` [PATCH v3 0/4] sched: credit2: introduce per-vcpu hard and " Dario Faggioli
2015-09-17 15:15 ` Dario Faggioli
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1427363314-25430-4-git-send-email-jtweaver@hawaii.edu \
--to=jtweaver@hawaii.edu \
--cc=dario.faggioli@citrix.com \
--cc=george.dunlap@eu.citrix.com \
--cc=henric@hawaii.edu \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).