linux-arm-kernel.lists.infradead.org archive mirror
 help / color / mirror / Atom feed
From: ccross@android.com (Colin Cross)
To: linux-arm-kernel@lists.infradead.org
Subject: [PATCHv2 5/5] cpuidle: coupled: add trace events
Date: Wed, 14 Mar 2012 11:29:54 -0700	[thread overview]
Message-ID: <1331749794-8056-6-git-send-email-ccross@android.com> (raw)
In-Reply-To: <1331749794-8056-1-git-send-email-ccross@android.com>

Adds trace events to allow debugging of coupled cpuidle.
Can be used to verify cpuidle performance, including time spent
spinning and time spent in safe states.  Not intended for merging.

Signed-off-by: Colin Cross <ccross@android.com>
---
 drivers/cpuidle/coupled.c      |   48 +++++++-
 include/trace/events/cpuidle.h |  243 ++++++++++++++++++++++++++++++++++++++++
 2 files changed, 287 insertions(+), 4 deletions(-)
 create mode 100644 include/trace/events/cpuidle.h

diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c
index 188a53f..3bc8a02 100644
--- a/drivers/cpuidle/coupled.c
+++ b/drivers/cpuidle/coupled.c
@@ -15,10 +15,12 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  */
+#define DEBUG
 
 #include <linux/kernel.h>
 #include <linux/cpu.h>
 #include <linux/cpuidle.h>
+#include <linux/delay.h>
 #include <linux/mutex.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
@@ -26,6 +28,11 @@
 
 #include "cpuidle.h"
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/cpuidle.h>
+
+atomic_t cpuidle_trace_seq;
+
 /*
  * coupled cpuidle states
  *
@@ -154,20 +161,33 @@ static cpumask_t cpuidle_coupled_poked_mask;
 void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a)
 {
 	int n = atomic_read(&dev->coupled->alive_count);
+#ifdef DEBUG
+	int loops = 0;
+#endif
 
 	smp_mb__before_atomic_inc();
 	atomic_inc(a);
 
-	while (atomic_read(a) < n)
+	while (atomic_read(a) < n) {
+#ifdef DEBUG
+		BUG_ON(loops++ > loops_per_jiffy);
+#else
 		cpu_relax();
+#endif
+	}
 
 	if (atomic_inc_return(a) == n * 2) {
 		atomic_set(a, 0);
 		return;
 	}
 
-	while (atomic_read(a) > n)
+	while (atomic_read(a) > n) {
+#ifdef DEBUG
+		BUG_ON(loops++ > loops_per_jiffy);
+#else
 		cpu_relax();
+#endif
+	}
 }
 
 /**
@@ -232,6 +252,7 @@ static inline int cpuidle_coupled_get_state(struct cpuidle_device *dev,
 static void cpuidle_coupled_poked(void *info)
 {
 	int cpu = (unsigned long)info;
+	trace_coupled_poked(cpu);
 	cpumask_clear_cpu(cpu, &cpuidle_coupled_poked_mask);
 }
 
@@ -251,8 +272,10 @@ static void cpuidle_coupled_poke(int cpu)
 {
 	struct call_single_data *csd = &per_cpu(cpuidle_coupled_poke_cb, cpu);
 
-	if (!cpumask_test_and_set_cpu(cpu, &cpuidle_coupled_poked_mask))
+	if (!cpumask_test_and_set_cpu(cpu, &cpuidle_coupled_poked_mask)) {
+		trace_coupled_poke(cpu);
 		__smp_call_function_single(cpu, csd, 0);
+	}
 }
 
 /**
@@ -361,28 +384,37 @@ int cpuidle_enter_state_coupled(struct cpuidle_device *dev,
 	BUG_ON(atomic_read(&coupled->ready_count));
 	cpuidle_coupled_set_waiting(dev, coupled, next_state);
 
+	trace_coupled_enter(dev->cpu);
+
 retry:
 	/*
 	 * Wait for all coupled cpus to be idle, using the deepest state
 	 * allowed for a single cpu.
 	 */
 	while (!need_resched() && !cpuidle_coupled_cpus_waiting(coupled)) {
+		trace_coupled_safe_enter(dev->cpu);
 		entered_state = cpuidle_enter_state(dev, drv,
 			dev->safe_state_index);
+		trace_coupled_safe_exit(dev->cpu);
 
+		trace_coupled_spin(dev->cpu);
 		local_irq_enable();
 		while (cpumask_test_cpu(dev->cpu, &cpuidle_coupled_poked_mask))
 			cpu_relax();
 		local_irq_disable();
+		trace_coupled_unspin(dev->cpu);
 	}
 
 	/* give a chance to process any remaining pokes */
+	trace_coupled_spin(dev->cpu);
 	local_irq_enable();
 	while (cpumask_test_cpu(dev->cpu, &cpuidle_coupled_poked_mask))
 		cpu_relax();
 	local_irq_disable();
+	trace_coupled_unspin(dev->cpu);
 
 	if (need_resched()) {
+		trace_coupled_abort(dev->cpu);
 		cpuidle_coupled_set_not_waiting(dev, coupled);
 		goto out;
 	}
@@ -401,29 +433,35 @@ retry:
 	smp_mb__after_atomic_inc();
 	/* alive_count can't change while ready_count > 0 */
 	alive = atomic_read(&coupled->alive_count);
+	trace_coupled_spin(dev->cpu);
 	while (atomic_read(&coupled->ready_count) != alive) {
 		/* Check if any other cpus bailed out of idle. */
 		if (!cpuidle_coupled_cpus_waiting(coupled)) {
 			atomic_dec(&coupled->ready_count);
 			smp_mb__after_atomic_dec();
+			trace_coupled_detected_abort(dev->cpu);
 			goto retry;
 		}
 
 		cpu_relax();
 	}
+	trace_coupled_unspin(dev->cpu);
 
 	/* all cpus have acked the coupled state */
 	smp_rmb();
 
 	next_state = cpuidle_coupled_get_state(dev, coupled);
-
+	trace_coupled_idle_enter(dev->cpu);
 	entered_state = cpuidle_enter_state(dev, drv, next_state);
+	trace_coupled_idle_exit(dev->cpu);
 
 	cpuidle_coupled_set_not_waiting(dev, coupled);
 	atomic_dec(&coupled->ready_count);
 	smp_mb__after_atomic_dec();
 
 out:
+	trace_coupled_exit(dev->cpu);
+
 	/*
 	 * Normal cpuidle states are expected to return with irqs enabled.
 	 * That leads to an inefficiency where a cpu receiving an interrupt
@@ -445,8 +483,10 @@ out:
 	 * a cpu exits and re-enters the ready state because this cpu has
 	 * already decremented its waiting_count.
 	 */
+	trace_coupled_spin(dev->cpu);
 	while (atomic_read(&coupled->ready_count) != 0)
 		cpu_relax();
+	trace_coupled_unspin(dev->cpu);
 
 	smp_rmb();
 
diff --git a/include/trace/events/cpuidle.h b/include/trace/events/cpuidle.h
new file mode 100644
index 0000000..9b2cbbb
--- /dev/null
+++ b/include/trace/events/cpuidle.h
@@ -0,0 +1,243 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cpuidle
+
+#if !defined(_TRACE_CPUIDLE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_CPUIDLE_H
+
+#include <linux/atomic.h>
+#include <linux/tracepoint.h>
+
+extern atomic_t cpuidle_trace_seq;
+
+TRACE_EVENT(coupled_enter,
+
+	TP_PROTO(unsigned int cpu),
+
+	TP_ARGS(cpu),
+
+	TP_STRUCT__entry(
+		__field(unsigned int, cpu)
+		__field(unsigned int, seq)
+	),
+
+	TP_fast_assign(
+		__entry->cpu = cpu;
+		__entry->seq = atomic_inc_return(&cpuidle_trace_seq);
+	),
+
+	TP_printk("%u %u", __entry->seq, __entry->cpu)
+);
+
+TRACE_EVENT(coupled_exit,
+
+	TP_PROTO(unsigned int cpu),
+
+	TP_ARGS(cpu),
+
+	TP_STRUCT__entry(
+		__field(unsigned int, cpu)
+		__field(unsigned int, seq)
+	),
+
+	TP_fast_assign(
+		__entry->cpu = cpu;
+		__entry->seq = atomic_inc_return(&cpuidle_trace_seq);
+	),
+
+	TP_printk("%u %u", __entry->seq, __entry->cpu)
+);
+
+TRACE_EVENT(coupled_spin,
+
+	TP_PROTO(unsigned int cpu),
+
+	TP_ARGS(cpu),
+
+	TP_STRUCT__entry(
+		__field(unsigned int, cpu)
+		__field(unsigned int, seq)
+	),
+
+	TP_fast_assign(
+		__entry->cpu = cpu;
+		__entry->seq = atomic_inc_return(&cpuidle_trace_seq);
+	),
+
+	TP_printk("%u %u", __entry->seq, __entry->cpu)
+);
+
+TRACE_EVENT(coupled_unspin,
+
+	TP_PROTO(unsigned int cpu),
+
+	TP_ARGS(cpu),
+
+	TP_STRUCT__entry(
+		__field(unsigned int, cpu)
+		__field(unsigned int, seq)
+	),
+
+	TP_fast_assign(
+		__entry->cpu = cpu;
+		__entry->seq = atomic_inc_return(&cpuidle_trace_seq);
+	),
+
+	TP_printk("%u %u", __entry->seq, __entry->cpu)
+);
+
+TRACE_EVENT(coupled_safe_enter,
+
+	TP_PROTO(unsigned int cpu),
+
+	TP_ARGS(cpu),
+
+	TP_STRUCT__entry(
+		__field(unsigned int, cpu)
+		__field(unsigned int, seq)
+	),
+
+	TP_fast_assign(
+		__entry->cpu = cpu;
+		__entry->seq = atomic_inc_return(&cpuidle_trace_seq);
+	),
+
+	TP_printk("%u %u", __entry->seq, __entry->cpu)
+);
+
+TRACE_EVENT(coupled_safe_exit,
+
+	TP_PROTO(unsigned int cpu),
+
+	TP_ARGS(cpu),
+
+	TP_STRUCT__entry(
+		__field(unsigned int, cpu)
+		__field(unsigned int, seq)
+	),
+
+	TP_fast_assign(
+		__entry->cpu = cpu;
+		__entry->seq = atomic_inc_return(&cpuidle_trace_seq);
+	),
+
+	TP_printk("%u %u", __entry->seq, __entry->cpu)
+);
+
+TRACE_EVENT(coupled_idle_enter,
+
+	TP_PROTO(unsigned int cpu),
+
+	TP_ARGS(cpu),
+
+	TP_STRUCT__entry(
+		__field(unsigned int, cpu)
+		__field(unsigned int, seq)
+	),
+
+	TP_fast_assign(
+		__entry->cpu = cpu;
+		__entry->seq = atomic_inc_return(&cpuidle_trace_seq);
+	),
+
+	TP_printk("%u %u", __entry->seq, __entry->cpu)
+);
+
+TRACE_EVENT(coupled_idle_exit,
+
+	TP_PROTO(unsigned int cpu),
+
+	TP_ARGS(cpu),
+
+	TP_STRUCT__entry(
+		__field(unsigned int, cpu)
+		__field(unsigned int, seq)
+	),
+
+	TP_fast_assign(
+		__entry->cpu = cpu;
+		__entry->seq = atomic_inc_return(&cpuidle_trace_seq);
+	),
+
+	TP_printk("%u %u", __entry->seq, __entry->cpu)
+);
+
+TRACE_EVENT(coupled_abort,
+
+	TP_PROTO(unsigned int cpu),
+
+	TP_ARGS(cpu),
+
+	TP_STRUCT__entry(
+		__field(unsigned int, cpu)
+		__field(unsigned int, seq)
+	),
+
+	TP_fast_assign(
+		__entry->cpu = cpu;
+		__entry->seq = atomic_inc_return(&cpuidle_trace_seq);
+	),
+
+	TP_printk("%u %u", __entry->seq, __entry->cpu)
+);
+
+TRACE_EVENT(coupled_detected_abort,
+
+	TP_PROTO(unsigned int cpu),
+
+	TP_ARGS(cpu),
+
+	TP_STRUCT__entry(
+		__field(unsigned int, cpu)
+		__field(unsigned int, seq)
+	),
+
+	TP_fast_assign(
+		__entry->cpu = cpu;
+		__entry->seq = atomic_inc_return(&cpuidle_trace_seq);
+	),
+
+	TP_printk("%u %u", __entry->seq, __entry->cpu)
+);
+
+TRACE_EVENT(coupled_poke,
+
+	TP_PROTO(unsigned int cpu),
+
+	TP_ARGS(cpu),
+
+	TP_STRUCT__entry(
+		__field(unsigned int, cpu)
+		__field(unsigned int, seq)
+	),
+
+	TP_fast_assign(
+		__entry->cpu = cpu;
+		__entry->seq = atomic_inc_return(&cpuidle_trace_seq);
+	),
+
+	TP_printk("%u %u", __entry->seq, __entry->cpu)
+);
+
+TRACE_EVENT(coupled_poked,
+
+	TP_PROTO(unsigned int cpu),
+
+	TP_ARGS(cpu),
+
+	TP_STRUCT__entry(
+		__field(unsigned int, cpu)
+		__field(unsigned int, seq)
+	),
+
+	TP_fast_assign(
+		__entry->cpu = cpu;
+		__entry->seq = atomic_inc_return(&cpuidle_trace_seq);
+	),
+
+	TP_printk("%u %u", __entry->seq, __entry->cpu)
+);
+
+#endif /* if !defined(_TRACE_CPUIDLE_H) || defined(TRACE_HEADER_MULTI_READ) */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
-- 
1.7.9.2

  parent reply	other threads:[~2012-03-14 18:29 UTC|newest]

Thread overview: 17+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2012-03-14 18:29 [PATCHv2 0/5] coupled cpuidle state support Colin Cross
2012-03-14 18:29 ` [PATCHv2 1/5] cpuidle: refactor out cpuidle_enter_state Colin Cross
2012-03-14 18:29 ` [PATCHv2 2/5] cpuidle: fix error handling in __cpuidle_register_device Colin Cross
2012-03-14 18:29 ` [PATCHv2 3/5] cpuidle: add support for states that affect multiple cpus Colin Cross
2012-03-16  0:04   ` Kevin Hilman
2012-03-16  0:20     ` Colin Cross
2012-03-16  1:52       ` Arjan van de Ven
2012-03-17 12:29   ` Santosh Shilimkar
2012-03-17 19:21     ` Colin Cross
2012-03-18  7:18       ` Shilimkar, Santosh
2012-03-14 18:29 ` [PATCHv2 4/5] cpuidle: coupled: add parallel barrier function Colin Cross
2012-03-14 18:29 ` Colin Cross [this message]
2012-04-09  6:59   ` [PATCHv2 5/5] cpuidle: coupled: add trace events Santosh Shilimkar
2012-03-15 23:37 ` [PATCHv2 0/5] coupled cpuidle state support Colin Cross
2012-03-30 12:53   ` Santosh Shilimkar
2012-04-09  7:11     ` Santosh Shilimkar
2012-04-09 23:35       ` [linux-pm] " Kevin Hilman

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1331749794-8056-6-git-send-email-ccross@android.com \
    --to=ccross@android.com \
    --cc=linux-arm-kernel@lists.infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).