From: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
To: Linus Torvalds <torvalds@linux-foundation.org>,
akpm@linux-foundation.org, Ingo Molnar <mingo@elte.hu>,
Peter Zijlstra <a.p.zijlstra@chello.nl>,
linux-kernel@vger.kernel.org
Cc: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>,
Ralf Baechle <ralf@linux-mips.org>
Subject: [RFC patch 17/18] MIPS : Trace clock
Date: Fri, 07 Nov 2008 00:23:53 -0500 [thread overview]
Message-ID: <20081107053351.329744816@polymtl.ca> (raw)
In-Reply-To: 20081107052336.652868737@polymtl.ca
[-- Attachment #1: mips-trace-clock.patch --]
[-- Type: text/plain, Size: 10611 bytes --]
MIPS get_cycles only returns a 32 bits TSC (see timex.h). The assumption there
is that the reschedule is done every 8 seconds or so. Given that tracing needs
to detect delays longer than 8 seconds, we need a full 64-bits TSC, which is
provided by trace-clock-32-to-64.
I leave the "depends on !CPU_R4400_WORKAROUNDS" in Kconfig because the solution
proposed by Ralf to deal with the R4400 bug is racy, so let's just not support
this broken architecture. :(
Note for Peter Zijlstra :
You should probably have a look at lockdep.c raw_spinlock_t lockdep_lock usage.
I suspect it may be used with preemption enabled in graph_lock(). (not sure
though, but it's worth double-checking.
This patch uses the same cache-line bouncing algorithm used for x86. This is a
best-effort to support architectures lacking synchronized TSC without adding a
lot of complexity too soon. This keeps room for improvement in a second phase.
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
CC: Ralf Baechle <ralf@linux-mips.org>
CC: Peter Zijlstra <a.p.zijlstra@chello.nl>
---
arch/mips/Kconfig | 3
arch/mips/include/asm/timex.h | 17 +++
arch/mips/include/asm/trace-clock.h | 68 ++++++++++++++
arch/mips/kernel/Makefile | 2
arch/mips/kernel/trace-clock.c | 172 ++++++++++++++++++++++++++++++++++++
5 files changed, 261 insertions(+), 1 deletion(-)
Index: linux.trees.git/arch/mips/Kconfig
===================================================================
--- linux.trees.git.orig/arch/mips/Kconfig 2008-11-07 00:10:10.000000000 -0500
+++ linux.trees.git/arch/mips/Kconfig 2008-11-07 00:16:42.000000000 -0500
@@ -1614,6 +1614,9 @@ config CPU_R4400_WORKAROUNDS
config HAVE_GET_CYCLES_32
def_bool y
depends on !CPU_R4400_WORKAROUNDS
+ select HAVE_TRACE_CLOCK
+ select HAVE_TRACE_CLOCK_32_TO_64
+ select HAVE_UNSYNCHRONIZED_TSC
#
# Use the generic interrupt handling code in kernel/irq/:
Index: linux.trees.git/arch/mips/include/asm/trace-clock.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux.trees.git/arch/mips/include/asm/trace-clock.h 2008-11-07 00:16:42.000000000 -0500
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2005,2008 Mathieu Desnoyers
+ *
+ * Trace clock MIPS definitions.
+ */
+
+#ifndef _ASM_MIPS_TRACE_CLOCK_H
+#define _ASM_MIPS_TRACE_CLOCK_H
+
+#include <linux/timex.h>
+#include <asm/processor.h>
+
+#define TRACE_CLOCK_MIN_PROBE_DURATION 200
+
+extern u64 trace_clock_read_synthetic_tsc(void);
+
+/*
+ * MIPS get_cycles only returns a 32 bits TSC (see timex.h). The assumption
+ * there is that the reschedule is done every 8 seconds or so. Given that
+ * tracing needs to detect delays longer than 8 seconds, we need a full 64-bits
+ * TSC, whic is provided by trace-clock-32-to-64.
+*/
+extern u64 trace_clock_async_tsc_read(void);
+
+static inline u32 trace_clock_read32(void)
+{
+ u32 cycles;
+
+ if (likely(tsc_is_sync()))
+ cycles = (u32)get_cycles(); /* only need the 32 LSB */
+ else
+ cycles = (u32)trace_clock_async_tsc_read();
+ return cycles;
+}
+
+static inline u64 trace_clock_read64(void)
+{
+ u64 cycles;
+
+ if (likely(tsc_is_sync()))
+ cycles = trace_clock_read_synthetic_tsc();
+ else
+ cycles = trace_clock_async_tsc_read();
+ return cycles;
+}
+
+static inline void trace_clock_add_timestamp(unsigned long ticks)
+{ }
+
+static inline unsigned int trace_clock_frequency(void)
+{
+ return mips_hpt_frequency;
+}
+
+static inline u32 trace_clock_freq_scale(void)
+{
+ return 1;
+}
+
+extern void get_trace_clock(void);
+extern void put_trace_clock(void);
+extern void get_synthetic_tsc(void);
+extern void put_synthetic_tsc(void);
+
+static inline void set_trace_clock_is_sync(int state)
+{
+}
+#endif /* _ASM_MIPS_TRACE_CLOCK_H */
Index: linux.trees.git/arch/mips/kernel/trace-clock.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux.trees.git/arch/mips/kernel/trace-clock.c 2008-11-07 00:16:42.000000000 -0500
@@ -0,0 +1,172 @@
+/*
+ * arch/mips/kernel/trace-clock.c
+ *
+ * Trace clock for mips.
+ *
+ * Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>, October 2008
+ */
+
+#include <linux/module.h>
+#include <linux/trace-clock.h>
+#include <linux/jiffies.h>
+#include <linux/mutex.h>
+#include <linux/timer.h>
+#include <linux/spinlock.h>
+
+static u64 trace_clock_last_tsc;
+static DEFINE_PER_CPU(struct timer_list, update_timer);
+static DEFINE_MUTEX(async_tsc_mutex);
+static int async_tsc_refcount; /* Number of readers */
+static int async_tsc_enabled; /* Async TSC enabled on all online CPUs */
+
+/*
+ * Support for architectures with non-sync TSCs.
+ * When the local TSC is discovered to lag behind the highest TSC counter, we
+ * increment the TSC count of an amount that should be, ideally, lower than the
+ * execution time of this routine, in cycles : this is the granularity we look
+ * for : we must be able to order the events.
+ */
+
+#if BITS_PER_LONG == 64
+notrace u64 trace_clock_async_tsc_read(void)
+{
+ u64 new_tsc, last_tsc;
+
+ WARN_ON(!async_tsc_refcount || !async_tsc_enabled);
+ new_tsc = trace_clock_read_synthetic_tsc();
+ do {
+ last_tsc = trace_clock_last_tsc;
+ if (new_tsc < last_tsc)
+ new_tsc = last_tsc + TRACE_CLOCK_MIN_PROBE_DURATION;
+ /*
+ * If cmpxchg fails with a value higher than the new_tsc, don't
+ * retry : the value has been incremented and the events
+ * happened almost at the same time.
+ * We must retry if cmpxchg fails with a lower value :
+ * it means that we are the CPU with highest frequency and
+ * therefore MUST update the value.
+ */
+ } while (cmpxchg64(&trace_clock_last_tsc, last_tsc, new_tsc) < new_tsc);
+ return new_tsc;
+}
+EXPORT_SYMBOL_GPL(trace_clock_async_tsc_read);
+#else
+/*
+ * Emulate an atomic 64-bits update with a spinlock.
+ * Note : preempt_disable or irq save must be explicit with raw_spinlock_t.
+ * Given we use a spinlock for this time base, we should never be called from
+ * NMI context.
+ */
+static raw_spinlock_t trace_clock_lock =
+ (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+
+static inline u64 trace_clock_cmpxchg64(u64 *ptr, u64 old, u64 new)
+{
+ u64 val;
+
+ val = *ptr;
+ if (likely(val == old))
+ *ptr = val = new;
+ return val;
+}
+
+notrace u64 trace_clock_async_tsc_read(void)
+{
+ u64 new_tsc, last_tsc;
+ unsigned long flags;
+
+ WARN_ON(!async_tsc_refcount || !async_tsc_enabled);
+ local_irq_save(flags);
+ __raw_spin_lock(&trace_clock_lock);
+ new_tsc = trace_clock_read_synthetic_tsc();
+ do {
+ last_tsc = trace_clock_last_tsc;
+ if (new_tsc < last_tsc)
+ new_tsc = last_tsc + TRACE_CLOCK_MIN_PROBE_DURATION;
+ /*
+ * If cmpxchg fails with a value higher than the new_tsc, don't
+ * retry : the value has been incremented and the events
+ * happened almost at the same time.
+ * We must retry if cmpxchg fails with a lower value :
+ * it means that we are the CPU with highest frequency and
+ * therefore MUST update the value.
+ */
+ } while (trace_clock_cmpxchg64(&trace_clock_last_tsc, last_tsc,
+ new_tsc) < new_tsc);
+ __raw_spin_unlock(&trace_clock_lock);
+ local_irq_restore(flags);
+ return new_tsc;
+}
+EXPORT_SYMBOL_GPL(trace_clock_async_tsc_read);
+#endif
+
+
+static void update_timer_ipi(void *info)
+{
+ (void)trace_clock_async_tsc_read();
+}
+
+/*
+ * update_timer_fct : - Timer function to resync the clocks
+ * @data: unused
+ *
+ * Fires every jiffy.
+ */
+static void update_timer_fct(unsigned long data)
+{
+ (void)trace_clock_async_tsc_read();
+
+ per_cpu(update_timer, smp_processor_id()).expires = jiffies + 1;
+ add_timer_on(&per_cpu(update_timer, smp_processor_id()),
+ smp_processor_id());
+}
+
+static void enable_trace_clock(int cpu)
+{
+ init_timer(&per_cpu(update_timer, cpu));
+ per_cpu(update_timer, cpu).function = update_timer_fct;
+ per_cpu(update_timer, cpu).expires = jiffies + 1;
+ smp_call_function_single(cpu, update_timer_ipi, NULL, 1);
+ add_timer_on(&per_cpu(update_timer, cpu), cpu);
+}
+
+static void disable_trace_clock(int cpu)
+{
+ del_timer_sync(&per_cpu(update_timer, cpu));
+}
+
+void get_trace_clock(void)
+{
+ int cpu;
+
+ mutex_lock(&async_tsc_mutex);
+ if (async_tsc_refcount++ || tsc_is_sync())
+ goto end;
+
+ async_tsc_enabled = 1;
+ for_each_online_cpu(cpu)
+ enable_trace_clock(cpu);
+end:
+ mutex_unlock(&async_tsc_mutex);
+ get_synthetic_tsc();
+}
+EXPORT_SYMBOL_GPL(get_trace_clock);
+
+void put_trace_clock(void)
+{
+ int cpu;
+
+ put_synthetic_tsc();
+ mutex_lock(&async_tsc_mutex);
+ WARN_ON(async_tsc_refcount <= 0);
+ if (async_tsc_refcount != 1 || !async_tsc_enabled)
+ goto end;
+
+ for_each_online_cpu(cpu)
+ disable_trace_clock(cpu);
+ async_tsc_enabled = 0;
+end:
+ async_tsc_refcount--;
+ mutex_unlock(&async_tsc_mutex);
+}
+EXPORT_SYMBOL_GPL(put_trace_clock);
Index: linux.trees.git/arch/mips/include/asm/timex.h
===================================================================
--- linux.trees.git.orig/arch/mips/include/asm/timex.h 2008-11-07 00:16:17.000000000 -0500
+++ linux.trees.git/arch/mips/include/asm/timex.h 2008-11-07 00:16:42.000000000 -0500
@@ -42,7 +42,7 @@
typedef unsigned int cycles_t;
-#ifdef HAVE_GET_CYCLES_32
+#ifdef CONFIG_HAVE_GET_CYCLES_32
static inline cycles_t get_cycles(void)
{
return read_c0_count();
@@ -91,6 +91,21 @@ static inline void write_tsc(u32 val1, u
extern unsigned int mips_hpt_frequency;
+/*
+ * Currently unused, should update internal tsc-related timekeeping sources.
+ */
+static inline void mark_tsc_unstable(char *reason)
+{
+}
+
+/*
+ * Currently simply use the tsc_is_sync value.
+ */
+static inline int unsynchronized_tsc(void)
+{
+ return !tsc_is_sync();
+}
+
#endif /* __KERNEL__ */
#endif /* _ASM_TIMEX_H */
Index: linux.trees.git/arch/mips/kernel/Makefile
===================================================================
--- linux.trees.git.orig/arch/mips/kernel/Makefile 2008-10-30 20:22:50.000000000 -0400
+++ linux.trees.git/arch/mips/kernel/Makefile 2008-11-07 00:16:42.000000000 -0500
@@ -85,6 +85,8 @@ obj-$(CONFIG_GPIO_TXX9) += gpio_txx9.o
obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
+obj-$(CONFIG_HAVE_GET_CYCLES_32) += trace-clock.o
+
CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi)
obj-$(CONFIG_HAVE_STD_PC_SERIAL_PORT) += 8250-platform.o
--
Mathieu Desnoyers
OpenPGP key fingerprint: 8CD5 52C3 8E3C 4140 715F BA06 3F25 A8FE 3BAE 9A68
next prev parent reply other threads:[~2008-11-07 5:44 UTC|newest]
Thread overview: 118+ messages / expand[flat|nested] mbox.gz Atom feed top
2008-11-07 5:23 [RFC patch 00/18] Trace Clock v2 Mathieu Desnoyers
2008-11-07 5:23 ` [RFC patch 01/18] get_cycles() : kconfig HAVE_GET_CYCLES Mathieu Desnoyers
2008-11-07 5:23 ` [RFC patch 02/18] get_cycles() : x86 HAVE_GET_CYCLES Mathieu Desnoyers
2008-11-07 5:23 ` [RFC patch 03/18] get_cycles() : sparc64 HAVE_GET_CYCLES Mathieu Desnoyers
2008-11-07 5:23 ` [RFC patch 04/18] get_cycles() : powerpc64 HAVE_GET_CYCLES Mathieu Desnoyers
2008-11-07 14:56 ` Josh Boyer
2008-11-07 18:14 ` Mathieu Desnoyers
2008-11-07 5:23 ` [RFC patch 05/18] get_cycles() : MIPS HAVE_GET_CYCLES_32 Mathieu Desnoyers
2008-11-07 5:23 ` [RFC patch 06/18] Trace clock generic Mathieu Desnoyers
2008-11-07 5:23 ` [RFC patch 07/18] Trace clock core Mathieu Desnoyers
2008-11-07 5:52 ` Andrew Morton
2008-11-07 6:16 ` Mathieu Desnoyers
2008-11-07 6:26 ` Andrew Morton
2008-11-07 16:12 ` Mathieu Desnoyers
2008-11-07 16:19 ` Andrew Morton
2008-11-07 18:16 ` Mathieu Desnoyers
2008-11-07 5:23 ` [RFC patch 08/18] cnt32_to_63 should use smp_rmb() Mathieu Desnoyers
2008-11-07 6:05 ` Andrew Morton
2008-11-07 8:12 ` Nicolas Pitre
2008-11-07 8:38 ` Andrew Morton
2008-11-07 11:20 ` David Howells
2008-11-07 15:01 ` Nicolas Pitre
2008-11-07 15:50 ` Andrew Morton
2008-11-07 16:21 ` David Howells
2008-11-07 16:29 ` Andrew Morton
2008-11-07 17:10 ` David Howells
2008-11-07 17:26 ` Andrew Morton
2008-11-07 18:00 ` Mathieu Desnoyers
2008-11-07 18:21 ` Andrew Morton
2008-11-07 18:30 ` Harvey Harrison
2008-11-07 18:42 ` Mathieu Desnoyers
2008-11-07 18:33 ` Mathieu Desnoyers
2008-11-07 18:36 ` Linus Torvalds
2008-11-07 18:45 ` Andrew Morton
2008-11-07 16:47 ` Nicolas Pitre
2008-11-07 16:55 ` David Howells
2008-11-07 17:21 ` Andrew Morton
2008-11-07 20:03 ` Nicolas Pitre
2008-11-07 16:07 ` David Howells
2008-11-07 16:47 ` Mathieu Desnoyers
2008-11-07 17:04 ` David Howells
2008-11-07 17:17 ` Mathieu Desnoyers
2008-11-07 23:27 ` David Howells
2008-11-07 20:11 ` Russell King
2008-11-07 21:36 ` Mathieu Desnoyers
2008-11-07 22:18 ` Russell King
2008-11-07 22:36 ` Mathieu Desnoyers
2008-11-07 23:41 ` David Howells
2008-11-08 0:15 ` Russell King
2008-11-08 0:45 ` David Howells
2008-11-08 15:24 ` Nicolas Pitre
2008-11-08 23:20 ` [PATCH] clarify usage expectations for cnt32_to_63() Nicolas Pitre
2008-11-09 2:25 ` Mathieu Desnoyers
2008-11-09 2:54 ` Nicolas Pitre
2008-11-09 5:06 ` Nicolas Pitre
2008-11-09 5:27 ` [PATCH v2] " Nicolas Pitre
2008-11-09 6:48 ` Mathieu Desnoyers
2008-11-09 13:34 ` Nicolas Pitre
2008-11-09 13:43 ` Russell King
2008-11-09 16:22 ` Mathieu Desnoyers
2008-11-10 4:20 ` Nicolas Pitre
2008-11-10 4:42 ` Andrew Morton
2008-11-10 21:34 ` Nicolas Pitre
2008-11-10 21:58 ` Andrew Morton
2008-11-10 23:15 ` Nicolas Pitre
2008-11-10 23:22 ` Andrew Morton
2008-11-10 23:38 ` Steven Rostedt
2008-11-11 0:26 ` Nicolas Pitre
2008-11-11 18:28 ` [PATCH] convert cnt32_to_63 to inline Mathieu Desnoyers
2008-11-11 19:13 ` Russell King
2008-11-11 20:11 ` Mathieu Desnoyers
2008-11-11 21:51 ` Russell King
2008-11-12 3:48 ` Mathieu Desnoyers
2008-11-11 21:00 ` Nicolas Pitre
2008-11-11 21:13 ` Russell King
2008-11-11 22:31 ` David Howells
2008-11-11 22:37 ` Peter Zijlstra
2008-11-12 1:13 ` Steven Rostedt
2008-11-07 11:03 ` [RFC patch 08/18] cnt32_to_63 should use smp_rmb() David Howells
2008-11-07 16:51 ` Mathieu Desnoyers
2008-11-07 20:18 ` Nicolas Pitre
2008-11-07 23:55 ` David Howells
2008-11-07 10:59 ` David Howells
2008-11-07 10:55 ` David Howells
2008-11-07 17:09 ` Mathieu Desnoyers
2008-11-07 17:33 ` Steven Rostedt
2008-11-07 19:18 ` Mathieu Desnoyers
2008-11-07 19:32 ` Peter Zijlstra
2008-11-07 20:02 ` Mathieu Desnoyers
2008-11-07 20:45 ` Mathieu Desnoyers
2008-11-07 20:54 ` Paul E. McKenney
2008-11-07 21:04 ` Steven Rostedt
2008-11-08 0:34 ` Paul E. McKenney
2008-11-07 21:16 ` Mathieu Desnoyers
2008-11-07 23:50 ` David Howells
2008-11-08 0:55 ` Steven Rostedt
2008-11-09 11:51 ` David Howells
2008-11-09 14:31 ` Steven Rostedt
2008-11-09 16:18 ` Mathieu Desnoyers
2008-11-07 20:08 ` Steven Rostedt
2008-11-07 20:55 ` Paul E. McKenney
2008-11-07 21:27 ` Mathieu Desnoyers
2008-11-07 20:36 ` Nicolas Pitre
2008-11-07 20:55 ` Mathieu Desnoyers
2008-11-07 21:22 ` Nicolas Pitre
2008-11-07 5:23 ` [RFC patch 09/18] Powerpc : Trace clock Mathieu Desnoyers
2008-11-07 5:23 ` [RFC patch 10/18] Sparc64 " Mathieu Desnoyers
2008-11-07 5:45 ` David Miller
2008-11-07 5:23 ` [RFC patch 11/18] LTTng timestamp sh Mathieu Desnoyers
2008-11-07 5:23 ` [RFC patch 12/18] LTTng - TSC synchronicity test Mathieu Desnoyers
2008-11-07 5:23 ` [RFC patch 13/18] x86 : remove arch-specific tsc_sync.c Mathieu Desnoyers
2008-11-07 5:23 ` [RFC patch 14/18] MIPS use tsc_sync.c Mathieu Desnoyers
2008-11-07 5:23 ` [RFC patch 15/18] MIPS : export hpt frequency for trace_clock Mathieu Desnoyers
2008-11-07 5:23 ` [RFC patch 16/18] MIPS create empty sync_core() Mathieu Desnoyers
2008-11-07 5:23 ` Mathieu Desnoyers [this message]
2008-11-07 11:53 ` [RFC patch 17/18] MIPS : Trace clock Peter Zijlstra
2008-11-07 17:44 ` Mathieu Desnoyers
2008-11-07 5:23 ` [RFC patch 18/18] x86 trace clock Mathieu Desnoyers
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20081107053351.329744816@polymtl.ca \
--to=mathieu.desnoyers@polymtl.ca \
--cc=a.p.zijlstra@chello.nl \
--cc=akpm@linux-foundation.org \
--cc=linux-kernel@vger.kernel.org \
--cc=mingo@elte.hu \
--cc=ralf@linux-mips.org \
--cc=torvalds@linux-foundation.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox