From: George Dunlap <george.dunlap@eu.citrix.com>
To: Dario Faggioli <dario.faggioli@citrix.com>
Cc: Paul Durrant <Paul.Durrant@citrix.com>,
"Keir (Xen.org)" <keir@xen.org>, Jan Beulich <jbeulich@suse.com>,
"xen-devel@lists.xen.org" <xen-devel@lists.xen.org>
Subject: Re: [PATCH 3 of 6] xen: sched: generalize scheduling related perfcounter macros
Date: Tue, 23 Oct 2012 17:27:46 +0100 [thread overview]
Message-ID: <5086C582.8080804@eu.citrix.com> (raw)
In-Reply-To: <a6e4517a5a24f7771cf6.1350916836@Solace>
On 22/10/12 15:40, Dario Faggioli wrote:
> Moving some of them from sched_credit.c to generic scheduler code.
> This also allows the other schedulers to use perf counters equally
> easy.
>
> This change is mainly preparatory work for what stated above. In fact,
> it mostly does s/CSCHED_STAT/SCHED_STAT/, and, in general, it implies no
> functional changes.
>
> Signed-off-by: Dario Faggioli <dario.faggioli@citrix.com>
Acked-by: George Dunlap <george.dunlap@eu.citrix.com>
>
> diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c
> --- a/xen/common/sched_credit.c
> +++ b/xen/common/sched_credit.c
> @@ -16,25 +16,12 @@
> #include <xen/delay.h>
> #include <xen/event.h>
> #include <xen/time.h>
> -#include <xen/perfc.h>
> #include <xen/sched-if.h>
> #include <xen/softirq.h>
> #include <asm/atomic.h>
> #include <xen/errno.h>
> #include <xen/keyhandler.h>
>
> -/*
> - * CSCHED_STATS
> - *
> - * Manage very basic per-vCPU counters and stats.
> - *
> - * Useful for debugging live systems. The stats are displayed
> - * with runq dumps ('r' on the Xen console).
> - */
> -#ifdef PERF_COUNTERS
> -#define CSCHED_STATS
> -#endif
> -
>
> /*
> * Basic constants
> @@ -75,29 +62,36 @@
>
>
> /*
> - * Stats
> + * CSCHED_STATS
> + *
> + * Manage very basic per-vCPU counters and stats.
> + *
> + * Useful for debugging live systems. The stats are displayed
> + * with runq dumps ('r' on the Xen console).
> */
> -#define CSCHED_STAT_CRANK(_X) (perfc_incr(_X))
> +#ifdef SCHED_STATS
>
> -#ifdef CSCHED_STATS
> +#define CSCHED_STATS
>
> -#define CSCHED_VCPU_STATS_RESET(_V) \
> +#define SCHED_VCPU_STATS_RESET(_V) \
> do \
> { \
> memset(&(_V)->stats, 0, sizeof((_V)->stats)); \
> } while ( 0 )
>
> -#define CSCHED_VCPU_STAT_CRANK(_V, _X) (((_V)->stats._X)++)
> +#define SCHED_VCPU_STAT_CRANK(_V, _X) (((_V)->stats._X)++)
>
> -#define CSCHED_VCPU_STAT_SET(_V, _X, _Y) (((_V)->stats._X) = (_Y))
> +#define SCHED_VCPU_STAT_SET(_V, _X, _Y) (((_V)->stats._X) = (_Y))
>
> -#else /* CSCHED_STATS */
> +#else /* !SCHED_STATS */
>
> -#define CSCHED_VCPU_STATS_RESET(_V) do {} while ( 0 )
> -#define CSCHED_VCPU_STAT_CRANK(_V, _X) do {} while ( 0 )
> -#define CSCHED_VCPU_STAT_SET(_V, _X, _Y) do {} while ( 0 )
> +#undef CSCHED_STATS
>
> -#endif /* CSCHED_STATS */
> +#define SCHED_VCPU_STATS_RESET(_V) do {} while ( 0 )
> +#define SCHED_VCPU_STAT_CRANK(_V, _X) do {} while ( 0 )
> +#define SCHED_VCPU_STAT_SET(_V, _X, _Y) do {} while ( 0 )
> +
> +#endif /* SCHED_STATS */
>
>
> /*
> @@ -264,13 +258,13 @@ static inline void
> if ( new->pri > cur->pri )
> {
> if ( cur->pri == CSCHED_PRI_IDLE )
> - CSCHED_STAT_CRANK(tickle_local_idler);
> + SCHED_STAT_CRANK(tickle_local_idler);
> else if ( cur->pri == CSCHED_PRI_TS_OVER )
> - CSCHED_STAT_CRANK(tickle_local_over);
> + SCHED_STAT_CRANK(tickle_local_over);
> else if ( cur->pri == CSCHED_PRI_TS_UNDER )
> - CSCHED_STAT_CRANK(tickle_local_under);
> + SCHED_STAT_CRANK(tickle_local_under);
> else
> - CSCHED_STAT_CRANK(tickle_local_other);
> + SCHED_STAT_CRANK(tickle_local_other);
>
> cpumask_set_cpu(cpu, &mask);
> }
> @@ -283,7 +277,7 @@ static inline void
> {
> if ( cpumask_empty(prv->idlers) )
> {
> - CSCHED_STAT_CRANK(tickle_idlers_none);
> + SCHED_STAT_CRANK(tickle_idlers_none);
> }
> else
> {
> @@ -292,7 +286,7 @@ static inline void
> cpumask_and(&idle_mask, prv->idlers, new->vcpu->cpu_affinity);
> if ( !cpumask_empty(&idle_mask) )
> {
> - CSCHED_STAT_CRANK(tickle_idlers_some);
> + SCHED_STAT_CRANK(tickle_idlers_some);
> if ( opt_tickle_one_idle )
> {
> this_cpu(last_tickle_cpu) =
> @@ -404,7 +398,7 @@ static inline void
> BUG_ON( !is_idle_vcpu(vc) );
> }
>
> - CSCHED_STAT_CRANK(vcpu_check);
> + SCHED_STAT_CRANK(vcpu_check);
> }
> #define CSCHED_VCPU_CHECK(_vc) (__csched_vcpu_check(_vc))
> #else
> @@ -437,7 +431,7 @@ static inline int
> ((uint64_t)vcpu_migration_delay * 1000u));
>
> if ( hot )
> - CSCHED_STAT_CRANK(vcpu_hot);
> + SCHED_STAT_CRANK(vcpu_hot);
>
> return hot;
> }
> @@ -559,8 +553,8 @@ static inline void
>
> if ( list_empty(&svc->active_vcpu_elem) )
> {
> - CSCHED_VCPU_STAT_CRANK(svc, state_active);
> - CSCHED_STAT_CRANK(acct_vcpu_active);
> + SCHED_VCPU_STAT_CRANK(svc, state_active);
> + SCHED_STAT_CRANK(acct_vcpu_active);
>
> sdom->active_vcpu_count++;
> list_add(&svc->active_vcpu_elem, &sdom->active_vcpu);
> @@ -583,8 +577,8 @@ static inline void
>
> BUG_ON( list_empty(&svc->active_vcpu_elem) );
>
> - CSCHED_VCPU_STAT_CRANK(svc, state_idle);
> - CSCHED_STAT_CRANK(acct_vcpu_idle);
> + SCHED_VCPU_STAT_CRANK(svc, state_idle);
> + SCHED_STAT_CRANK(acct_vcpu_idle);
>
> BUG_ON( prv->weight < sdom->weight );
> sdom->active_vcpu_count--;
> @@ -633,8 +627,8 @@ csched_vcpu_acct(struct csched_private *
> }
> else if ( _csched_cpu_pick(ops, current, 0) != cpu )
> {
> - CSCHED_VCPU_STAT_CRANK(svc, migrate_r);
> - CSCHED_STAT_CRANK(migrate_running);
> + SCHED_VCPU_STAT_CRANK(svc, migrate_r);
> + SCHED_STAT_CRANK(migrate_running);
> set_bit(_VPF_migrating, ¤t->pause_flags);
> cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
> }
> @@ -658,8 +652,8 @@ csched_alloc_vdata(const struct schedule
> svc->flags = 0U;
> svc->pri = is_idle_domain(vc->domain) ?
> CSCHED_PRI_IDLE : CSCHED_PRI_TS_UNDER;
> - CSCHED_VCPU_STATS_RESET(svc);
> - CSCHED_STAT_CRANK(vcpu_init);
> + SCHED_VCPU_STATS_RESET(svc);
> + SCHED_STAT_CRANK(vcpu_init);
> return svc;
> }
>
> @@ -690,7 +684,7 @@ csched_vcpu_remove(const struct schedule
> struct csched_dom * const sdom = svc->sdom;
> unsigned long flags;
>
> - CSCHED_STAT_CRANK(vcpu_destroy);
> + SCHED_STAT_CRANK(vcpu_destroy);
>
> if ( __vcpu_on_runq(svc) )
> __runq_remove(svc);
> @@ -711,7 +705,7 @@ csched_vcpu_sleep(const struct scheduler
> {
> struct csched_vcpu * const svc = CSCHED_VCPU(vc);
>
> - CSCHED_STAT_CRANK(vcpu_sleep);
> + SCHED_STAT_CRANK(vcpu_sleep);
>
> BUG_ON( is_idle_vcpu(vc) );
>
> @@ -731,19 +725,19 @@ csched_vcpu_wake(const struct scheduler
>
> if ( unlikely(per_cpu(schedule_data, cpu).curr == vc) )
> {
> - CSCHED_STAT_CRANK(vcpu_wake_running);
> + SCHED_STAT_CRANK(vcpu_wake_running);
> return;
> }
> if ( unlikely(__vcpu_on_runq(svc)) )
> {
> - CSCHED_STAT_CRANK(vcpu_wake_onrunq);
> + SCHED_STAT_CRANK(vcpu_wake_onrunq);
> return;
> }
>
> if ( likely(vcpu_runnable(vc)) )
> - CSCHED_STAT_CRANK(vcpu_wake_runnable);
> + SCHED_STAT_CRANK(vcpu_wake_runnable);
> else
> - CSCHED_STAT_CRANK(vcpu_wake_not_runnable);
> + SCHED_STAT_CRANK(vcpu_wake_not_runnable);
>
> /*
> * We temporarly boost the priority of awaking VCPUs!
> @@ -883,8 +877,6 @@ csched_dom_init(const struct scheduler *
> {
> struct csched_dom *sdom;
>
> - CSCHED_STAT_CRANK(dom_init);
> -
> if ( is_idle_domain(dom) )
> return 0;
>
> @@ -906,7 +898,6 @@ csched_free_domdata(const struct schedul
> static void
> csched_dom_destroy(const struct scheduler *ops, struct domain *dom)
> {
> - CSCHED_STAT_CRANK(dom_destroy);
> csched_free_domdata(ops, CSCHED_DOM(dom));
> }
>
> @@ -989,18 +980,18 @@ csched_acct(void* dummy)
> if ( prv->credit_balance < 0 )
> {
> credit_total -= prv->credit_balance;
> - CSCHED_STAT_CRANK(acct_balance);
> + SCHED_STAT_CRANK(acct_balance);
> }
>
> if ( unlikely(weight_total == 0) )
> {
> prv->credit_balance = 0;
> spin_unlock_irqrestore(&prv->lock, flags);
> - CSCHED_STAT_CRANK(acct_no_work);
> + SCHED_STAT_CRANK(acct_no_work);
> goto out;
> }
>
> - CSCHED_STAT_CRANK(acct_run);
> + SCHED_STAT_CRANK(acct_run);
>
> weight_left = weight_total;
> credit_balance = 0;
> @@ -1075,7 +1066,7 @@ csched_acct(void* dummy)
> * the queue to give others a chance at them in future
> * accounting periods.
> */
> - CSCHED_STAT_CRANK(acct_reorder);
> + SCHED_STAT_CRANK(acct_reorder);
> list_del(&sdom->active_sdom_elem);
> list_add(&sdom->active_sdom_elem, &prv->active_sdom);
> }
> @@ -1110,7 +1101,7 @@ csched_acct(void* dummy)
> credit < -credit_cap &&
> !(svc->flags & CSCHED_FLAG_VCPU_PARKED) )
> {
> - CSCHED_STAT_CRANK(vcpu_park);
> + SCHED_STAT_CRANK(vcpu_park);
> vcpu_pause_nosync(svc->vcpu);
> svc->flags |= CSCHED_FLAG_VCPU_PARKED;
> }
> @@ -1118,7 +1109,7 @@ csched_acct(void* dummy)
> /* Lower bound on credits */
> if ( credit < -prv->credits_per_tslice )
> {
> - CSCHED_STAT_CRANK(acct_min_credit);
> + SCHED_STAT_CRANK(acct_min_credit);
> credit = -prv->credits_per_tslice;
> atomic_set(&svc->credit, credit);
> }
> @@ -1135,7 +1126,7 @@ csched_acct(void* dummy)
> * call to make sure the VCPU's priority is not boosted
> * if it is woken up here.
> */
> - CSCHED_STAT_CRANK(vcpu_unpark);
> + SCHED_STAT_CRANK(vcpu_unpark);
> vcpu_unpause(svc->vcpu);
> svc->flags &= ~CSCHED_FLAG_VCPU_PARKED;
> }
> @@ -1151,8 +1142,8 @@ csched_acct(void* dummy)
> }
> }
>
> - CSCHED_VCPU_STAT_SET(svc, credit_last, credit);
> - CSCHED_VCPU_STAT_SET(svc, credit_incr, credit_fair);
> + SCHED_VCPU_STAT_SET(svc, credit_last, credit);
> + SCHED_VCPU_STAT_SET(svc, credit_incr, credit_fair);
> credit_balance += credit;
> }
> }
> @@ -1229,8 +1220,8 @@ csched_runq_steal(int peer_cpu, int cpu,
> if (__csched_vcpu_is_migrateable(vc, cpu))
> {
> /* We got a candidate. Grab it! */
> - CSCHED_VCPU_STAT_CRANK(speer, migrate_q);
> - CSCHED_STAT_CRANK(migrate_queued);
> + SCHED_VCPU_STAT_CRANK(speer, migrate_q);
> + SCHED_STAT_CRANK(migrate_queued);
> WARN_ON(vc->is_urgent);
> __runq_remove(speer);
> vc->processor = cpu;
> @@ -1239,7 +1230,7 @@ csched_runq_steal(int peer_cpu, int cpu,
> }
> }
>
> - CSCHED_STAT_CRANK(steal_peer_idle);
> + SCHED_STAT_CRANK(steal_peer_idle);
> return NULL;
> }
>
> @@ -1260,11 +1251,11 @@ csched_load_balance(struct csched_privat
> goto out;
>
> if ( snext->pri == CSCHED_PRI_IDLE )
> - CSCHED_STAT_CRANK(load_balance_idle);
> + SCHED_STAT_CRANK(load_balance_idle);
> else if ( snext->pri == CSCHED_PRI_TS_OVER )
> - CSCHED_STAT_CRANK(load_balance_over);
> + SCHED_STAT_CRANK(load_balance_over);
> else
> - CSCHED_STAT_CRANK(load_balance_other);
> + SCHED_STAT_CRANK(load_balance_other);
>
> /*
> * Peek at non-idling CPUs in the system, starting with our
> @@ -1288,7 +1279,7 @@ csched_load_balance(struct csched_privat
> */
> if ( !pcpu_schedule_trylock(peer_cpu) )
> {
> - CSCHED_STAT_CRANK(steal_trylock_failed);
> + SCHED_STAT_CRANK(steal_trylock_failed);
> continue;
> }
>
> @@ -1327,7 +1318,7 @@ csched_schedule(
> struct task_slice ret;
> s_time_t runtime, tslice;
>
> - CSCHED_STAT_CRANK(schedule);
> + SCHED_STAT_CRANK(schedule);
> CSCHED_VCPU_CHECK(current);
>
> runtime = now - current->runstate.state_entry_time;
> diff --git a/xen/common/schedule.c b/xen/common/schedule.c
> --- a/xen/common/schedule.c
> +++ b/xen/common/schedule.c
> @@ -314,11 +314,14 @@ int sched_init_domain(struct domain *d)
> {
> printk("%s: Initializing domain %d\n", __func__, d->domain_id);
>
> + SCHED_STAT_CRANK(dom_init);
> +
> return SCHED_OP(DOM2OP(d), init_domain, d);
> }
>
> void sched_destroy_domain(struct domain *d)
> {
> + SCHED_STAT_CRANK(dom_destroy);
> SCHED_OP(DOM2OP(d), destroy_domain, d);
> }
>
> @@ -1086,7 +1089,7 @@ static void schedule(void)
>
> ASSERT(!in_atomic());
>
> - perfc_incr(sched_run);
> + SCHED_STAT_CRANK(sched_run);
>
> sd = &this_cpu(schedule_data);
>
> @@ -1164,7 +1167,7 @@ static void schedule(void)
>
> pcpu_schedule_unlock_irq(cpu);
>
> - perfc_incr(sched_ctx);
> + SCHED_STAT_CRANK(sched_ctx);
>
> stop_timer(&prev->periodic_timer);
>
> @@ -1198,7 +1201,7 @@ void context_saved(struct vcpu *prev)
> static void s_timer_fn(void *unused)
> {
> raise_softirq(SCHEDULE_SOFTIRQ);
> - perfc_incr(sched_irq);
> + SCHED_STAT_CRANK(sched_irq);
> }
>
> /* Per-VCPU periodic timer function: sends a virtual timer interrupt. */
> diff --git a/xen/include/xen/perfc_defn.h b/xen/include/xen/perfc_defn.h
> --- a/xen/include/xen/perfc_defn.h
> +++ b/xen/include/xen/perfc_defn.h
> @@ -12,13 +12,19 @@ PERFCOUNTER(calls_from_multicall,
> PERFCOUNTER(irqs, "#interrupts")
> PERFCOUNTER(ipis, "#IPIs")
>
> +/* Generic scheduler counters (applicable to all schedulers) */
> PERFCOUNTER(sched_irq, "sched: timer")
> PERFCOUNTER(sched_run, "sched: runs through scheduler")
> PERFCOUNTER(sched_ctx, "sched: context switches")
> +PERFCOUNTER(schedule, "sched: specific scheduler")
> +PERFCOUNTER(dom_init, "sched: dom_init")
> +PERFCOUNTER(dom_destroy, "sched: dom_destroy")
> +PERFCOUNTER(vcpu_init, "sched: vcpu_init")
> +PERFCOUNTER(vcpu_destroy, "sched: vcpu_destroy")
>
> +/* credit specific counters */
> PERFCOUNTER(delay_ms, "csched: delay")
> PERFCOUNTER(vcpu_check, "csched: vcpu_check")
> -PERFCOUNTER(schedule, "csched: schedule")
> PERFCOUNTER(acct_run, "csched: acct_run")
> PERFCOUNTER(acct_no_work, "csched: acct_no_work")
> PERFCOUNTER(acct_balance, "csched: acct_balance")
> @@ -46,10 +52,6 @@ PERFCOUNTER(steal_trylock_failed, "csc
> PERFCOUNTER(steal_peer_idle, "csched: steal_peer_idle")
> PERFCOUNTER(migrate_queued, "csched: migrate_queued")
> PERFCOUNTER(migrate_running, "csched: migrate_running")
> -PERFCOUNTER(dom_init, "csched: dom_init")
> -PERFCOUNTER(dom_destroy, "csched: dom_destroy")
> -PERFCOUNTER(vcpu_init, "csched: vcpu_init")
> -PERFCOUNTER(vcpu_destroy, "csched: vcpu_destroy")
> PERFCOUNTER(vcpu_hot, "csched: vcpu_hot")
>
> PERFCOUNTER(need_flush_tlb_flush, "PG_need_flush tlb flushes")
> diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
> --- a/xen/include/xen/sched.h
> +++ b/xen/include/xen/sched.h
> @@ -16,6 +16,7 @@
> #include <xen/tasklet.h>
> #include <xen/mm.h>
> #include <xen/smp.h>
> +#include <xen/perfc.h>
> #include <asm/atomic.h>
> #include <xen/wait.h>
> #include <public/xen.h>
> @@ -29,6 +30,18 @@
> DEFINE_XEN_GUEST_HANDLE(vcpu_runstate_info_compat_t);
> #endif
>
> +/*
> + * Stats
> + *
> + * Enable and ease the use of scheduling related performance counters.
> + *
> + */
> +#ifdef PERF_COUNTERS
> +#define SCHED_STATS
> +#endif
> +
> +#define SCHED_STAT_CRANK(_X) (perfc_incr(_X))
> +
> /* A global pointer to the initial domain (DOM0). */
> extern struct domain *dom0;
>
next prev parent reply other threads:[~2012-10-23 16:27 UTC|newest]
Thread overview: 17+ messages / expand[flat|nested] mbox.gz Atom feed top
2012-10-22 14:40 [PATCH 0 of 6] Xen: generalize and beautify scheduling related perfc and stats Dario Faggioli
2012-10-22 14:40 ` [PATCH 1 of 6] xen: fix build when 'perfc=y' Dario Faggioli
2012-10-23 16:01 ` George Dunlap
2012-10-22 14:40 ` [PATCH 2 of 6] xen: move `printk("Initializing domain")` from credit to generic scheduling code Dario Faggioli
2012-10-23 16:04 ` George Dunlap
2012-10-23 16:13 ` Dario Faggioli
2012-10-23 16:20 ` George Dunlap
2012-10-22 14:40 ` [PATCH 3 of 6] xen: sched: generalize scheduling related perfcounter macros Dario Faggioli
2012-10-23 16:27 ` George Dunlap [this message]
2012-10-22 14:40 ` [PATCH 4 of 6] xen: sched: introduce a couple of counters in credit2 and SEDF Dario Faggioli
2012-10-23 16:28 ` George Dunlap
2012-10-22 14:40 ` [PATCH 5 of 6] xen: sched_sedf: beautify statisics in SEDF Dario Faggioli
2012-10-23 16:26 ` George Dunlap
2012-10-23 16:33 ` Dario Faggioli
2012-10-23 16:35 ` George Dunlap
2012-10-22 14:40 ` [PATCH 6 of 6] xen: sched_sedf: remove an unused stat " Dario Faggioli
2012-10-23 16:30 ` George Dunlap
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=5086C582.8080804@eu.citrix.com \
--to=george.dunlap@eu.citrix.com \
--cc=Paul.Durrant@citrix.com \
--cc=dario.faggioli@citrix.com \
--cc=jbeulich@suse.com \
--cc=keir@xen.org \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).