linux-mips.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v1 1/5] mips static function tracer support
       [not found] <cover.1243543471.git.wuzj@lemote.com>
@ 2009-05-28 20:48 ` wuzhangjin
  2009-05-29  1:13   ` Steven Rostedt
  2009-05-28 20:48 ` [PATCH v1 2/5] mips dynamic " wuzhangjin
                   ` (3 subsequent siblings)
  4 siblings, 1 reply; 15+ messages in thread
From: wuzhangjin @ 2009-05-28 20:48 UTC (permalink / raw)
  To: linux-mips, linux-kernel
  Cc: Wu Zhangjin, Steven Rostedt, Ralf Baechle, Ingo Molnar,
	Andrew Morton, Thomas Gleixner, Nicholas Mc Guire

From: Wu Zhangjin <wuzj@lemote.com>

if -pg of gcc is enabled. a calling to _mcount will be inserted to each
kernel function. so, there is a possibility to trace the functions in
_mcount.

here is the implementation of mips specific _mcount for static function
tracer.

-ffunction-sections option not works with -pg, so disable it if enables
FUNCTION_TRACER.

Signed-off-by: Wu Zhangjin <wuzj@lemote.com>
---
 arch/mips/Kconfig              |    2 +
 arch/mips/Makefile             |    2 +
 arch/mips/include/asm/ftrace.h |   25 ++++++++++-
 arch/mips/kernel/Makefile      |    8 +++
 arch/mips/kernel/mcount.S      |   98 ++++++++++++++++++++++++++++++++++++++++
 arch/mips/kernel/mips_ksyms.c  |    5 ++
 6 files changed, 139 insertions(+), 1 deletions(-)
 create mode 100644 arch/mips/kernel/mcount.S

diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 09b1287..d5c01ca 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -4,6 +4,8 @@ config MIPS
 	select HAVE_IDE
 	select HAVE_OPROFILE
 	select HAVE_ARCH_KGDB
+	select HAVE_FUNCTION_TRACER
+	select HAVE_FUNCTION_TRACE_MCOUNT_TEST
 	# Horrible source of confusion.  Die, die, die ...
 	select EMBEDDED
 	select RTC_LIB
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index c4cae9e..f86fb15 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -48,7 +48,9 @@ ifneq ($(SUBARCH),$(ARCH))
   endif
 endif
 
+ifndef CONFIG_FUNCTION_TRACER
 cflags-y := -ffunction-sections
+endif
 cflags-y += $(call cc-option, -mno-check-zero-division)
 
 ifdef CONFIG_32BIT
diff --git a/arch/mips/include/asm/ftrace.h b/arch/mips/include/asm/ftrace.h
index 40a8c17..5f8ebcf 100644
--- a/arch/mips/include/asm/ftrace.h
+++ b/arch/mips/include/asm/ftrace.h
@@ -1 +1,24 @@
-/* empty */
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive for
+ * more details.
+ *
+ * Copyright (C) 2009 DSLab, Lanzhou University, China
+ * Author: Wu Zhangjin <wuzj@lemote.com>
+ */
+
+#ifndef _ASM_MIPS_FTRACE_H
+#define _ASM_MIPS_FTRACE_H
+
+#ifdef CONFIG_FUNCTION_TRACER
+
+#define MCOUNT_ADDR ((unsigned long)(_mcount))
+#define MCOUNT_INSN_SIZE 4		/* sizeof mcount call */
+
+#ifndef __ASSEMBLY__
+extern void _mcount(void);
+#define mcount _mcount
+
+#endif /* __ASSEMBLY__ */
+#endif /* CONFIG_FUNCTION_TRACER */
+#endif /* _ASM_MIPS_FTRACE_H */
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index e961221..d167dde 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -8,6 +8,12 @@ obj-y		+= cpu-probe.o branch.o entry.o genex.o irq.o process.o \
 		   ptrace.o reset.o setup.o signal.o syscall.o \
 		   time.o topology.o traps.o unaligned.o watch.o
 
+ifdef CONFIG_FUNCTION_TRACER
+# Do not profile debug and lowlevel utilities
+CFLAGS_REMOVE_mcount.o = -pg
+CFLAGS_REMOVE_early_printk.o = -pg
+endif
+
 obj-$(CONFIG_CEVT_BCM1480)	+= cevt-bcm1480.o
 obj-$(CONFIG_CEVT_R4K_LIB)	+= cevt-r4k.o
 obj-$(CONFIG_MIPS_MT_SMTC)	+= cevt-smtc.o
@@ -24,6 +30,8 @@ obj-$(CONFIG_SYNC_R4K)		+= sync-r4k.o
 obj-$(CONFIG_STACKTRACE)	+= stacktrace.o
 obj-$(CONFIG_MODULES)		+= mips_ksyms.o module.o
 
+obj-$(CONFIG_FUNCTION_TRACER)	+= mcount.o
+
 obj-$(CONFIG_CPU_LOONGSON2)	+= r4k_fpu.o r4k_switch.o
 obj-$(CONFIG_CPU_MIPS32)	+= r4k_fpu.o r4k_switch.o
 obj-$(CONFIG_CPU_MIPS64)	+= r4k_fpu.o r4k_switch.o
diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S
new file mode 100644
index 0000000..268724e
--- /dev/null
+++ b/arch/mips/kernel/mcount.S
@@ -0,0 +1,98 @@
+/*
+ * the mips-specific _mcount implementation
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive for
+ * more details.
+ *
+ * Copyright (C) 2009 DSLab, Lanzhou University, China
+ * Author: Wu Zhangjin <wuzj@lemote.com>
+ */
+
+#include <asm/regdef.h>
+#include <asm/stackframe.h>
+#include <asm/ftrace.h>
+
+	.text
+	.set noreorder
+	.set noat
+
+	/* since there is a "addiu sp,sp,-8" before "jal _mcount" in 32bit */
+	.macro RESTORE_SP_FOR_32BIT
+#ifdef CONFIG_32BIT
+	PTR_ADDIU	sp, 8
+#endif
+	.endm
+
+	.macro MCOUNT_SAVE_REGS
+	PTR_SUBU	sp, PT_SIZE
+	PTR_S	ra, PT_R31(sp)
+	PTR_S	$1, PT_R1(sp)
+	PTR_S	a0, PT_R4(sp)
+	PTR_S	a1, PT_R5(sp)
+	PTR_S	a2, PT_R6(sp)
+	PTR_S	a3, PT_R7(sp)
+#ifdef CONFIG_64BIT
+	PTR_S	a4, PT_R8(sp)
+	PTR_S	a5, PT_R9(sp)
+	PTR_S	a6, PT_R10(sp)
+	PTR_S	a7, PT_R11(sp)
+#endif
+	.endm
+
+	.macro MCOUNT_RESTORE_REGS
+	PTR_L	ra, PT_R31(sp)
+	PTR_L	$1, PT_R1(sp)
+	PTR_L	a0, PT_R4(sp)
+	PTR_L	a1, PT_R5(sp)
+	PTR_L	a2, PT_R6(sp)
+	PTR_L	a3, PT_R7(sp)
+#ifdef CONFIG_64BIT
+	PTR_L	a4, PT_R8(sp)
+	PTR_L	a5, PT_R9(sp)
+	PTR_L	a6, PT_R10(sp)
+	PTR_L	a7, PT_R11(sp)
+#endif
+	PTR_ADDIU	sp, PT_SIZE
+.endm
+
+	.macro MCOUNT_SET_ARGS
+	move	a0, ra		/* arg1: next ip, selfaddr */
+	move	a1, $1		/* arg2: the caller's next ip, parent */
+	PTR_SUBU a0, MCOUNT_INSN_SIZE
+	.endm
+
+	.macro RETURN_BACK
+	jr ra
+	move ra, $1
+	.endm
+
+NESTED(_mcount, PT_SIZE, ra)
+	RESTORE_SP_FOR_32BIT
+	PTR_L	t0, function_trace_stop
+	bnez	t0, ftrace_stub
+	nop
+
+	PTR_LA	t0, ftrace_stub
+	PTR_L	t1, ftrace_trace_function /* please don't use t1 later, safe? */
+	bne	t0, t1, static_trace
+	nop
+
+	j	ftrace_stub
+	nop
+
+static_trace:
+	MCOUNT_SAVE_REGS
+
+	MCOUNT_SET_ARGS			/* call *ftrace_trace_function */
+	jalr	t1
+	nop
+
+	MCOUNT_RESTORE_REGS
+	.globl ftrace_stub
+ftrace_stub:
+	RETURN_BACK
+	END(_mcount)
+
+	.set at
+	.set reorder
diff --git a/arch/mips/kernel/mips_ksyms.c b/arch/mips/kernel/mips_ksyms.c
index 225755d..1d04807 100644
--- a/arch/mips/kernel/mips_ksyms.c
+++ b/arch/mips/kernel/mips_ksyms.c
@@ -13,6 +13,7 @@
 #include <asm/checksum.h>
 #include <asm/pgtable.h>
 #include <asm/uaccess.h>
+#include <asm/ftrace.h>
 
 extern void *__bzero(void *__s, size_t __count);
 extern long __strncpy_from_user_nocheck_asm(char *__to,
@@ -51,3 +52,7 @@ EXPORT_SYMBOL(csum_partial_copy_nocheck);
 EXPORT_SYMBOL(__csum_partial_copy_user);
 
 EXPORT_SYMBOL(invalid_pte_table);
+#ifdef CONFIG_FUNCTION_TRACER
+/* _mcount is defined in arch/mips/kernel/mcount.S */
+EXPORT_SYMBOL(_mcount);
+#endif
-- 
1.6.0.4

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [PATCH v1 2/5] mips dynamic function tracer support
       [not found] <cover.1243543471.git.wuzj@lemote.com>
  2009-05-28 20:48 ` [PATCH v1 1/5] mips static function tracer support wuzhangjin
@ 2009-05-28 20:48 ` wuzhangjin
  2009-05-29  1:24   ` Steven Rostedt
  2009-05-28 20:49 ` [PATCH v1 3/5] mips function graph " wuzhangjin
                   ` (2 subsequent siblings)
  4 siblings, 1 reply; 15+ messages in thread
From: wuzhangjin @ 2009-05-28 20:48 UTC (permalink / raw)
  To: linux-mips, linux-kernel
  Cc: Wu Zhangjin, Steven Rostedt, Ralf Baechle, Ingo Molnar,
	Andrew Morton, Thomas Gleixner, Nicholas Mc Guire

From: Wu Zhangjin <wuzj@lemote.com>

dynamic function tracer need to replace "nop" to "jumps & links" and
something reversely.

Signed-off-by: Wu Zhangjin <wuzj@lemote.com>
---
 arch/mips/Kconfig              |    3 +
 arch/mips/include/asm/ftrace.h |   10 ++
 arch/mips/kernel/Makefile      |    2 +
 arch/mips/kernel/ftrace.c      |  217 ++++++++++++++++++++++++++++++++++++++++
 arch/mips/kernel/mcount.S      |   31 ++++++
 scripts/Makefile.build         |    1 +
 scripts/recordmcount.pl        |   32 +++++-
 7 files changed, 290 insertions(+), 6 deletions(-)
 create mode 100644 arch/mips/kernel/ftrace.c

diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index d5c01ca..0c00536 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -6,6 +6,9 @@ config MIPS
 	select HAVE_ARCH_KGDB
 	select HAVE_FUNCTION_TRACER
 	select HAVE_FUNCTION_TRACE_MCOUNT_TEST
+	select HAVE_DYNAMIC_FTRACE
+	select HAVE_FTRACE_MCOUNT_RECORD
+	select HAVE_FTRACE_NMI_ENTER if DYNAMIC_FTRACE
 	# Horrible source of confusion.  Die, die, die ...
 	select EMBEDDED
 	select RTC_LIB
diff --git a/arch/mips/include/asm/ftrace.h b/arch/mips/include/asm/ftrace.h
index 5f8ebcf..b4970c9 100644
--- a/arch/mips/include/asm/ftrace.h
+++ b/arch/mips/include/asm/ftrace.h
@@ -19,6 +19,16 @@
 extern void _mcount(void);
 #define mcount _mcount
 
+#ifdef CONFIG_DYNAMIC_FTRACE
+/* reloction of mcount call site is the same as the address */
+static inline unsigned long ftrace_call_adjust(unsigned long addr)
+{
+	return addr;
+}
+
+struct dyn_arch_ftrace {
+};
+#endif /*  CONFIG_DYNAMIC_FTRACE */
 #endif /* __ASSEMBLY__ */
 #endif /* CONFIG_FUNCTION_TRACER */
 #endif /* _ASM_MIPS_FTRACE_H */
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index d167dde..6b1a8a5 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -11,6 +11,7 @@ obj-y		+= cpu-probe.o branch.o entry.o genex.o irq.o process.o \
 ifdef CONFIG_FUNCTION_TRACER
 # Do not profile debug and lowlevel utilities
 CFLAGS_REMOVE_mcount.o = -pg
+CFLAGS_REMOVE_ftrace.o = -pg
 CFLAGS_REMOVE_early_printk.o = -pg
 endif
 
@@ -31,6 +32,7 @@ obj-$(CONFIG_STACKTRACE)	+= stacktrace.o
 obj-$(CONFIG_MODULES)		+= mips_ksyms.o module.o
 
 obj-$(CONFIG_FUNCTION_TRACER)	+= mcount.o
+obj-$(CONFIG_FUNCTION_TRACER)	+= ftrace.o
 
 obj-$(CONFIG_CPU_LOONGSON2)	+= r4k_fpu.o r4k_switch.o
 obj-$(CONFIG_CPU_MIPS32)	+= r4k_fpu.o r4k_switch.o
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
new file mode 100644
index 0000000..827c128
--- /dev/null
+++ b/arch/mips/kernel/ftrace.c
@@ -0,0 +1,217 @@
+/*
+ * Code for replacing ftrace calls with jumps.
+ *
+ * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
+ * Copyright (C) 2009 DSLab, Lanzhou University, China
+ * Author: Wu Zhangjin <wuzj@lemote.com>
+ *
+ * Thanks goes to Steven Rostedt for writing the original x86 version.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/hardirq.h>
+#include <linux/uaccess.h>
+#include <linux/percpu.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/ftrace.h>
+
+#include <asm/cacheflush.h>
+#include <asm/ftrace.h>
+#include <asm/asm.h>
+#include <asm/unistd.h>
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+
+#define JAL 0x0c000000	/* jump & link: ip --> ra, jump to target */
+#define ADDR_MASK 0x03ffffff	/*  op_code|addr : 31...26|25 ....0 */
+
+static unsigned int ftrace_nop = 0x00000000;
+
+static unsigned char *ftrace_call_replace(unsigned long op_code,
+					  unsigned long addr)
+{
+    static unsigned int op;
+
+    op = op_code | ((addr >> 2) & ADDR_MASK);
+
+    return (unsigned char *) &op;
+}
+
+static atomic_t nmi_running = ATOMIC_INIT(0);
+static int mod_code_status;	/* holds return value of text write */
+static int mod_code_write;	/* set when NMI should do the write */
+static void *mod_code_ip;	/* holds the IP to write to */
+static void *mod_code_newcode;	/* holds the text to write to the IP */
+
+static unsigned nmi_wait_count;
+static atomic_t nmi_update_count = ATOMIC_INIT(0);
+
+int ftrace_arch_read_dyn_info(char *buf, int size)
+{
+    int r;
+
+    r = snprintf(buf, size, "%u %u",
+		 nmi_wait_count, atomic_read(&nmi_update_count));
+    return r;
+}
+
+static void ftrace_mod_code(void)
+{
+    /*
+     * Yes, more than one CPU process can be writing to mod_code_status.
+     *    (and the code itself)
+     * But if one were to fail, then they all should, and if one were
+     * to succeed, then they all should.
+     */
+    mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode,
+					 MCOUNT_INSN_SIZE);
+
+    /* if we fail, then kill any new writers */
+    if (mod_code_status)
+		mod_code_write = 0;
+}
+
+void ftrace_nmi_enter(void)
+{
+    atomic_inc(&nmi_running);
+    /* Must have nmi_running seen before reading write flag */
+    smp_mb();
+    if (mod_code_write) {
+		ftrace_mod_code();
+		atomic_inc(&nmi_update_count);
+    }
+}
+
+void ftrace_nmi_exit(void)
+{
+    /* Finish all executions before clearing nmi_running */
+    smp_wmb();
+    atomic_dec(&nmi_running);
+}
+
+static void wait_for_nmi(void)
+{
+    int waited = 0;
+
+    while (atomic_read(&nmi_running)) {
+		waited = 1;
+		cpu_relax();
+    }
+
+    if (waited)
+		nmi_wait_count++;
+}
+
+static int do_ftrace_mod_code(unsigned long ip, void *new_code)
+{
+    mod_code_ip = (void *) ip;
+    mod_code_newcode = new_code;
+
+    /* The buffers need to be visible before we let NMIs write them */
+    smp_wmb();
+
+    mod_code_write = 1;
+
+    /* Make sure write bit is visible before we wait on NMIs */
+    smp_mb();
+
+    wait_for_nmi();
+
+    /* Make sure all running NMIs have finished before we write the code */
+    smp_mb();
+
+    ftrace_mod_code();
+
+    /* Make sure the write happens before clearing the bit */
+    smp_wmb();
+
+    mod_code_write = 0;
+
+    /* make sure NMIs see the cleared bit */
+    smp_mb();
+
+    wait_for_nmi();
+
+    return mod_code_status;
+}
+
+static unsigned char *ftrace_nop_replace(void)
+{
+    return (unsigned char *) &ftrace_nop;
+}
+
+static int
+ftrace_modify_code(unsigned long ip, unsigned char *old_code,
+		   unsigned char *new_code)
+{
+    unsigned char replaced[MCOUNT_INSN_SIZE];
+
+    /*
+     * Note: Due to modules and __init, code can
+     *  disappear and change, we need to protect against faulting
+     *  as well as code changing. We do this by using the
+     *  probe_kernel_* functions.
+     *
+     * No real locking needed, this code is run through
+     * kstop_machine, or before SMP starts.
+     */
+
+    /* read the text we want to modify */
+    if (probe_kernel_read(replaced, (void *) ip, MCOUNT_INSN_SIZE))
+		return -EFAULT;
+
+    /* Make sure it is what we expect it to be */
+    if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
+		return -EINVAL;
+
+    /* replace the text with the new text */
+    if (do_ftrace_mod_code(ip, new_code))
+		return -EPERM;
+
+    return 0;
+}
+
+int ftrace_make_nop(struct module *mod,
+		    struct dyn_ftrace *rec, unsigned long addr)
+{
+    unsigned char *new, *old;
+
+    old = ftrace_call_replace(JAL, addr);
+    new = ftrace_nop_replace();
+
+    return ftrace_modify_code(rec->ip, old, new);
+}
+
+int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+{
+    unsigned char *new, *old;
+
+    old = ftrace_nop_replace();
+    new = ftrace_call_replace(JAL, addr);
+
+    return ftrace_modify_code(rec->ip, old, new);
+}
+
+int ftrace_update_ftrace_func(ftrace_func_t func)
+{
+    unsigned long ip = (unsigned long) (&ftrace_call);
+    unsigned char old[MCOUNT_INSN_SIZE], *new;
+    int ret;
+
+    memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
+    new = ftrace_call_replace(JAL, (unsigned long) func);
+    ret = ftrace_modify_code(ip, old, new);
+
+    return ret;
+}
+
+int __init ftrace_dyn_arch_init(void *data)
+{
+    /* The return code is retured via data */
+    *(unsigned long *) data = 0;
+
+    return 0;
+}
+#endif				/* CONFIG_DYNAMIC_FTRACE */
diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S
index 268724e..ce8a0ba 100644
--- a/arch/mips/kernel/mcount.S
+++ b/arch/mips/kernel/mcount.S
@@ -67,6 +67,35 @@
 	move ra, $1
 	.endm
 
+#ifdef CONFIG_DYNAMIC_FTRACE
+
+LEAF(_mcount)
+	RESTORE_SP_FOR_32BIT
+ 	RETURN_BACK
+ 	END(_mcount)
+
+NESTED(ftrace_caller, PT_SIZE, ra)
+	RESTORE_SP_FOR_32BIT
+	lw	t0, function_trace_stop
+	bnez	t0, ftrace_stub
+	nop
+
+	MCOUNT_SAVE_REGS
+
+	MCOUNT_SET_ARGS
+	.globl ftrace_call
+ftrace_call:
+	jal	ftrace_stub
+	nop
+
+	MCOUNT_RESTORE_REGS
+	.globl ftrace_stub
+ftrace_stub:
+	RETURN_BACK
+	END(ftrace_caller)
+
+#else	/* ! CONFIG_DYNAMIC_FTRACE */
+
 NESTED(_mcount, PT_SIZE, ra)
 	RESTORE_SP_FOR_32BIT
 	PTR_L	t0, function_trace_stop
@@ -94,5 +123,7 @@ ftrace_stub:
 	RETURN_BACK
 	END(_mcount)
 
+#endif	/* ! CONFIG_DYNAMIC_FTRACE */
+
 	.set at
 	.set reorder
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index 5c4b7a4..548d575 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -207,6 +207,7 @@ endif
 
 ifdef CONFIG_FTRACE_MCOUNT_RECORD
 cmd_record_mcount = perl $(srctree)/scripts/recordmcount.pl "$(ARCH)" \
+	"$(if $(CONFIG_CPU_BIG_ENDIAN),big,little)" \
 	"$(if $(CONFIG_64BIT),64,32)" \
 	"$(OBJDUMP)" "$(OBJCOPY)" "$(CC)" "$(LD)" "$(NM)" "$(RM)" "$(MV)" \
 	"$(if $(part-of-module),1,0)" "$(@)";
diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
index 409596e..e963948 100755
--- a/scripts/recordmcount.pl
+++ b/scripts/recordmcount.pl
@@ -100,13 +100,13 @@ $P =~ s@.*/@@g;
 
 my $V = '0.1';
 
-if ($#ARGV < 7) {
-	print "usage: $P arch bits objdump objcopy cc ld nm rm mv is_module inputfile\n";
+if ($#ARGV < 8) {
+	print "usage: $P arch endian bits objdump objcopy cc ld nm rm mv is_module inputfile\n";
 	print "version: $V\n";
 	exit(1);
 }
 
-my ($arch, $bits, $objdump, $objcopy, $cc,
+my ($arch, $endian, $bits, $objdump, $objcopy, $cc,
     $ld, $nm, $rm, $mv, $is_module, $inputfile) = @ARGV;
 
 # This file refers to mcount and shouldn't be ftraced, so lets' ignore it
@@ -213,6 +213,26 @@ if ($arch eq "x86_64") {
     if ($is_module eq "0") {
         $cc .= " -mconstant-gp";
     }
+
+} elsif ($arch eq "mips") {
+	$mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s_mcount\$";
+	$objdump .= " -Melf-trad".$endian."mips ";
+
+	if ($endian eq "big") {
+		$endian = " -EB ";
+		$ld .= " -melf".$bits."btsmip";
+	} else {
+		$endian = " -EL ";
+		$ld .= " -melf".$bits."ltsmip";
+	}
+
+	$cc .= " -mno-abicalls -fno-pic -mabi=" . $bits . $endian;
+    $ld .= $endian;
+
+    if ($bits == 64) {
+		$type = ".dword";
+    }
+
 } else {
     die "Arch $arch is not supported with CONFIG_FTRACE_MCOUNT_RECORD";
 }
@@ -441,12 +461,12 @@ if ($#converts >= 0) {
     #
     # Step 5: set up each local function as a global
     #
-    `$objcopy $globallist $inputfile $globalobj`;
+    `$objcopy $globallist $inputfile $globalobj 2>&1 >/dev/null`;
 
     #
     # Step 6: Link the global version to our list.
     #
-    `$ld -r $globalobj $mcount_o -o $globalmix`;
+    `$ld -r $globalobj $mcount_o -o $globalmix 2>&1 >/dev/null`;
 
     #
     # Step 7: Convert the local functions back into local symbols
@@ -454,7 +474,7 @@ if ($#converts >= 0) {
     `$objcopy $locallist $globalmix $inputfile`;
 
     # Remove the temp files
-    `$rm $globalobj $globalmix`;
+    `$rm $globalobj $globalmix 2>&1 >/dev/null`;
 
 } else {
 
-- 
1.6.0.4

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [PATCH v1 3/5] mips function graph tracer support
       [not found] <cover.1243543471.git.wuzj@lemote.com>
  2009-05-28 20:48 ` [PATCH v1 1/5] mips static function tracer support wuzhangjin
  2009-05-28 20:48 ` [PATCH v1 2/5] mips dynamic " wuzhangjin
@ 2009-05-28 20:49 ` wuzhangjin
  2009-05-29  2:01   ` Steven Rostedt
  2009-05-28 20:49 ` [PATCH v1 4/5] mips specific clock function to get precise timestamp wuzhangjin
  2009-05-28 20:49 ` [PATCH v1 5/5] mips specific system call tracer wuzhangjin
  4 siblings, 1 reply; 15+ messages in thread
From: wuzhangjin @ 2009-05-28 20:49 UTC (permalink / raw)
  To: linux-mips, linux-kernel
  Cc: Wu Zhangjin, Steven Rostedt, Ralf Baechle, Ingo Molnar,
	Andrew Morton, Thomas Gleixner, Nicholas Mc Guire

From: Wu Zhangjin <wuzj@lemote.com>

this works something like -finstrument-functions does, instead of using

                void __cyg_profile_func_enter (void *this_fn,
                                                  void *call_site);
                   void __cyg_profile_func_exit  (void *this_fn,
                                                  void *call_site);

-pg use _mcount, so some tricks are adoptive by the author of orignal function
graph tracer:

	the _mcount function will call prepare_function_return to save the
	parent_ip, ip and calltime in a tracing array, if success, the
	address of a hooker function named return_to_handler will be
	substitued to the parent_ip, so, after return from _mcount it will
	call the return_to_handler, not back to the parent_ip, but calling
	ftrace_return_to_handler to remember the rettime, and return the
	parent_ip to let return_to_handler go back to the real parent.

Signed-off-by: Wu Zhangjin <wuzj@lemote.com>
---
 arch/mips/Kconfig              |    1 +
 arch/mips/kernel/ftrace.c      |   72 ++++++++++++++++++++++++++++++++++++++++
 arch/mips/kernel/mcount.S      |   58 +++++++++++++++++++++++++++++++-
 arch/mips/kernel/vmlinux.lds.S |    1 +
 4 files changed, 131 insertions(+), 1 deletions(-)

diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 0c00536..ac1437e 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -9,6 +9,7 @@ config MIPS
 	select HAVE_DYNAMIC_FTRACE
 	select HAVE_FTRACE_MCOUNT_RECORD
 	select HAVE_FTRACE_NMI_ENTER if DYNAMIC_FTRACE
+	select HAVE_FUNCTION_GRAPH_TRACER
 	# Horrible source of confusion.  Die, die, die ...
 	select EMBEDDED
 	select RTC_LIB
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
index 827c128..e7f15f7 100644
--- a/arch/mips/kernel/ftrace.c
+++ b/arch/mips/kernel/ftrace.c
@@ -215,3 +215,75 @@ int __init ftrace_dyn_arch_init(void *data)
     return 0;
 }
 #endif				/* CONFIG_DYNAMIC_FTRACE */
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+
+#define JMP	0x08000000			/* jump to target directly */
+extern void ftrace_graph_call(void);
+
+int ftrace_enable_ftrace_graph_caller(void)
+{
+    unsigned long ip = (unsigned long) (&ftrace_graph_call);
+    unsigned char old[MCOUNT_INSN_SIZE], *new;
+    int ret;
+
+	/* j ftrace_stub */
+    memcpy(old, (unsigned long *) ip, MCOUNT_INSN_SIZE);
+    new = ftrace_call_replace(JMP, (unsigned long) ftrace_graph_caller);
+
+    ret = ftrace_modify_code(ip, old, new);
+
+    return ret;
+}
+
+int ftrace_disable_ftrace_graph_caller(void)
+{
+    unsigned long ip = (unsigned long) (&ftrace_graph_call);
+    unsigned char old[MCOUNT_INSN_SIZE], *new;
+    int ret;
+
+	/* j ftrace_graph_caller */
+    memcpy(old, (unsigned long *) ip, MCOUNT_INSN_SIZE);
+    new = ftrace_call_replace(JMP, (unsigned long) ftrace_stub);
+
+    ret = ftrace_modify_code(ip, old, new);
+
+    return ret;
+}
+
+#endif				/* !CONFIG_DYNAMIC_FTRACE */
+
+/*
+ * Hook the return address and push it in the stack of return addrs
+ * in current thread info.
+ */
+
+unsigned long prepare_ftrace_return(unsigned long ip,
+				    unsigned long parent_ip)
+{
+    struct ftrace_graph_ent trace;
+
+    /* Nmi's are currently unsupported */
+    if (unlikely(in_nmi()))
+		goto out;
+
+    if (unlikely(atomic_read(&current->tracing_graph_pause)))
+		goto out;
+
+    if (ftrace_push_return_trace(parent_ip, ip, &trace.depth) == -EBUSY)
+		goto out;
+
+    trace.func = ip;
+
+    /* Only trace if the calling function expects to */
+    if (!ftrace_graph_entry(&trace)) {
+		current->curr_ret_stack--;
+		goto out;
+    }
+    return (unsigned long) &return_to_handler;
+out:
+    return parent_ip;
+}
+#endif				/* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S
index ce8a0ba..bd58f16 100644
--- a/arch/mips/kernel/mcount.S
+++ b/arch/mips/kernel/mcount.S
@@ -28,6 +28,10 @@
 	PTR_SUBU	sp, PT_SIZE
 	PTR_S	ra, PT_R31(sp)
 	PTR_S	$1, PT_R1(sp)
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+	PTR_S	v0, PT_R2(sp)
+	PTR_S	v1, PT_R3(sp)
+#endif
 	PTR_S	a0, PT_R4(sp)
 	PTR_S	a1, PT_R5(sp)
 	PTR_S	a2, PT_R6(sp)
@@ -43,6 +47,10 @@
 	.macro MCOUNT_RESTORE_REGS
 	PTR_L	ra, PT_R31(sp)
 	PTR_L	$1, PT_R1(sp)
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+	PTR_L	v0, PT_R2(sp)
+	PTR_L	v1, PT_R3(sp)
+#endif
 	PTR_L	a0, PT_R4(sp)
 	PTR_L	a1, PT_R5(sp)
 	PTR_L	a2, PT_R6(sp)
@@ -89,6 +97,14 @@ ftrace_call:
 	nop
 
 	MCOUNT_RESTORE_REGS
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+	.globl ftrace_graph_call
+ftrace_graph_call:
+	j	ftrace_stub
+	nop
+#endif
+
 	.globl ftrace_stub
 ftrace_stub:
 	RETURN_BACK
@@ -106,7 +122,15 @@ NESTED(_mcount, PT_SIZE, ra)
 	PTR_L	t1, ftrace_trace_function /* please don't use t1 later, safe? */
 	bne	t0, t1, static_trace
 	nop
-
+#ifdef	CONFIG_FUNCTION_GRAPH_TRACER
+	PTR_L	t2, ftrace_graph_return
+	bne	t0,	t2, ftrace_graph_caller
+	nop
+	PTR_LA	t0, ftrace_graph_entry_stub
+	PTR_L	t2, ftrace_graph_entry
+	bne	t0,	t2, ftrace_graph_caller
+	nop
+#endif
 	j	ftrace_stub
 	nop
 
@@ -125,5 +149,37 @@ ftrace_stub:
 
 #endif	/* ! CONFIG_DYNAMIC_FTRACE */
 
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+
+NESTED(ftrace_graph_caller, PT_SIZE, ra)
+	MCOUNT_SAVE_REGS
+
+	MCOUNT_SET_ARGS
+	jal	prepare_ftrace_return
+	nop
+
+	/* overwrite the parent as &return_to_handler: v0 -> $1(at) */
+	PTR_S	v0, PT_R1(sp)
+
+	MCOUNT_RESTORE_REGS
+	RETURN_BACK
+	END(ftrace_graph_caller)
+
+	.align	2
+	.globl	return_to_handler
+return_to_handler:
+	MCOUNT_SAVE_REGS
+
+	jal	ftrace_return_to_handler
+	nop
+
+	/* restore the real parent address: v0 -> ra */
+	PTR_S	v0, PT_R31(sp)
+
+	MCOUNT_RESTORE_REGS
+	RETURN_BACK
+
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
 	.set at
 	.set reorder
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index 58738c8..67435e5 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -36,6 +36,7 @@ SECTIONS
 		SCHED_TEXT
 		LOCK_TEXT
 		KPROBES_TEXT
+		IRQENTRY_TEXT
 		*(.text.*)
 		*(.fixup)
 		*(.gnu.warning)
-- 
1.6.0.4

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [PATCH v1 4/5] mips specific clock function to get precise timestamp
       [not found] <cover.1243543471.git.wuzj@lemote.com>
                   ` (2 preceding siblings ...)
  2009-05-28 20:49 ` [PATCH v1 3/5] mips function graph " wuzhangjin
@ 2009-05-28 20:49 ` wuzhangjin
  2009-05-29  2:06   ` Steven Rostedt
  2009-05-28 20:49 ` [PATCH v1 5/5] mips specific system call tracer wuzhangjin
  4 siblings, 1 reply; 15+ messages in thread
From: wuzhangjin @ 2009-05-28 20:49 UTC (permalink / raw)
  To: linux-mips, linux-kernel
  Cc: Wu Zhangjin, Steven Rostedt, Ralf Baechle, Ingo Molnar,
	Andrew Morton, Thomas Gleixner, Nicholas Mc Guire

From: Wu Zhangjin <wuzj@lemote.com>

by default, ring_buffer_time_stamp calling sched_clock(jiffies-based)
function to get timestamp, in x86, there is a tsc(64bit) based
sched_clock, but in mips, the 'tsc'(clock counter) is only 32bit long,
which will easily rollover, and there is no precise sched_clock in mips,
we need to get one ourselves.

to avoid invading the whole linux-mips, i do not want to implement a
tsc-based native_sched_clock like x86 does. because, there is a need to
handling rollover of the only 32-bit long 'tsc' of mips, which will need
extra overhead. in reality, i have tried to implement one(just like the
ring_buffer_time_stamp here does), but make the kernel hangs when
booting, i am not sure why it not work.

herein, i just implement a mips-specific ring_buffer_time_stamp in
arch/mips/kernel/ftrace.c via adding  __attribute__((weak)) before
ring_buffer_time_stamp(...) {} in kernel/trace/ring_buffer.c and do
something in arch/mips/kernel/ftrace.c like this:

u64  ring_buffer_time_stamp \
       __attribute__((alias("native_ring_buffer_time_stamp")));

and, as the same, there is also a need to implement a mips-specific
trace_clock_local based on the above ring_buffer_timep_stamp, this clock
function is called in function graph tracer to get calltime & rettime of
a function.

and what about the trace_clock and trace_clock_global function, should
we also implement a mips-secific one? i am not sure.

Signed-off-by: Wu Zhangjin <wuzj@lemote.com>
---
 arch/mips/kernel/Makefile       |    2 +
 arch/mips/kernel/csrc-r4k.c     |    2 +-
 arch/mips/kernel/ftrace_clock.c |   77 +++++++++++++++++++++++++++++++++++++++
 kernel/trace/ring_buffer.c      |    3 +-
 kernel/trace/trace_clock.c      |    2 +-
 5 files changed, 83 insertions(+), 3 deletions(-)
 create mode 100644 arch/mips/kernel/ftrace_clock.c

diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 6b1a8a5..5dec76f 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -12,6 +12,7 @@ ifdef CONFIG_FUNCTION_TRACER
 # Do not profile debug and lowlevel utilities
 CFLAGS_REMOVE_mcount.o = -pg
 CFLAGS_REMOVE_ftrace.o = -pg
+CFLAGS_REMOVE_ftrace_clock.o = -pg
 CFLAGS_REMOVE_early_printk.o = -pg
 endif
 
@@ -33,6 +34,7 @@ obj-$(CONFIG_MODULES)		+= mips_ksyms.o module.o
 
 obj-$(CONFIG_FUNCTION_TRACER)	+= mcount.o
 obj-$(CONFIG_FUNCTION_TRACER)	+= ftrace.o
+obj-$(CONFIG_NOP_TRACER)	+= ftrace_clock.o
 
 obj-$(CONFIG_CPU_LOONGSON2)	+= r4k_fpu.o r4k_switch.o
 obj-$(CONFIG_CPU_MIPS32)	+= r4k_fpu.o r4k_switch.o
diff --git a/arch/mips/kernel/csrc-r4k.c b/arch/mips/kernel/csrc-r4k.c
index e95a3cd..3da1c7a 100644
--- a/arch/mips/kernel/csrc-r4k.c
+++ b/arch/mips/kernel/csrc-r4k.c
@@ -10,7 +10,7 @@
 
 #include <asm/time.h>
 
-static cycle_t c0_hpt_read(struct clocksource *cs)
+static cycle_t notrace c0_hpt_read(struct clocksource *cs)
 {
 	return read_c0_count();
 }
diff --git a/arch/mips/kernel/ftrace_clock.c b/arch/mips/kernel/ftrace_clock.c
new file mode 100644
index 0000000..2f3b05a
--- /dev/null
+++ b/arch/mips/kernel/ftrace_clock.c
@@ -0,0 +1,77 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive for
+ * more details.
+ *
+ * Copyright (C) 2009 DSLab, Lanzhou University, China
+ * Author: Wu Zhangjin <wuzj@lemote.com>
+ */
+
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/jiffies.h>
+#include <linux/clocksource.h>
+#include <linux/ring_buffer.h>
+
+/* Up this if you want to test the TIME_EXTENTS and normalization */
+#ifndef DEBUG_SHIFT
+#define DEBUG_SHIFT 0
+#endif
+
+/* mips-specific ring_buffer_time_stamp implementation,
+ * the original one is defined in kernel/trace/ring_buffer.c
+ */
+
+u64 native_ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
+{
+	u64 current_cycles;
+	static unsigned long old_jiffies;
+	static u64 time, old_cycles;
+
+	preempt_disable_notrace();
+    /* update timestamp to avoid missing the timer interrupt */
+	if (time_before(jiffies, old_jiffies)) {
+		old_jiffies = jiffies;
+		time = sched_clock();
+		old_cycles = clock->cycle_last;
+	}
+	current_cycles = clock->read(clock);
+
+	time = (time + cyc2ns(clock, (current_cycles - old_cycles)
+				& clock->mask)) << DEBUG_SHIFT;
+
+	old_cycles = current_cycles;
+	preempt_enable_no_resched_notrace();
+
+	return time;
+}
+
+u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
+		__attribute__((alias("native_ring_buffer_time_stamp")));
+
+/*
+ * native_trace_clock_local(): the simplest and least coherent tracing clock.
+ *
+ * Useful for tracing that does not cross to other CPUs nor
+ * does it go through idle events.
+ */
+u64 native_trace_clock_local(void)
+{
+	unsigned long flags;
+	u64 clock;
+
+	/*
+	 * sched_clock() is an architecture implemented, fast, scalable,
+	 * lockless clock. It is not guaranteed to be coherent across
+	 * CPUs, nor across CPU idle events.
+	 */
+	raw_local_irq_save(flags);
+	clock = ring_buffer_time_stamp(NULL, raw_smp_processor_id());
+	raw_local_irq_restore(flags);
+
+	return clock;
+}
+
+u64 trace_clock_local(void)
+		__attribute__((alias("native_trace_clock_local")));
+
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 960cbf4..717bd8e 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -420,7 +420,8 @@ struct ring_buffer_iter {
 /* Up this if you want to test the TIME_EXTENTS and normalization */
 #define DEBUG_SHIFT 0
 
-u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
+u64 __attribute__((weak)) ring_buffer_time_stamp(struct ring_buffer *buffer,
+				int cpu)
 {
 	u64 time;
 
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
index b588fd8..78c98c8 100644
--- a/kernel/trace/trace_clock.c
+++ b/kernel/trace/trace_clock.c
@@ -26,7 +26,7 @@
  * Useful for tracing that does not cross to other CPUs nor
  * does it go through idle events.
  */
-u64 notrace trace_clock_local(void)
+u64 __attribute__((weak)) notrace trace_clock_local(void)
 {
 	unsigned long flags;
 	u64 clock;
-- 
1.6.0.4

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [PATCH v1 5/5] mips specific system call tracer
       [not found] <cover.1243543471.git.wuzj@lemote.com>
                   ` (3 preceding siblings ...)
  2009-05-28 20:49 ` [PATCH v1 4/5] mips specific clock function to get precise timestamp wuzhangjin
@ 2009-05-28 20:49 ` wuzhangjin
  2009-05-29  2:09   ` Steven Rostedt
  4 siblings, 1 reply; 15+ messages in thread
From: wuzhangjin @ 2009-05-28 20:49 UTC (permalink / raw)
  To: linux-mips, linux-kernel
  Cc: Wu Zhangjin, Steven Rostedt, Ralf Baechle, Ingo Molnar,
	Andrew Morton, Thomas Gleixner, Nicholas Mc Guire

From: Wu Zhangjin <wuzj@lemote.com>

FIXME: there are several different sys_call_entry in mips64, but
currently, i only use the the one in arch/mips/kernel/scall64-o32.S
so,if people not use o32 standard, it will not compiled normally.

the system call tracing demo in a debian system on
qemu-system-mipsel/malta:

debian-mips-malta:~# mount -t debugfs nodev /debug
debian-mips-malta:~# echo 20000 > /debug/tracing/buffer_size_kb
debian-mips-malta:~# cat /debug/tracing/available_tracers
syscall nop
debian-mips-malta:~# echo syscall > /debug/tracing/current_tracer
debian-mips-malta:~# echo 1 > /debug/tracing/tracing_enabled
debian-mips-malta:~# sleep 1
debian-mips-malta:~# echo 0 > /debug/tracing/tracing_enabled
debian-mips-malta:~# cat /debug/tracing/trace | head -20
           <...>-533   [000]    60.458291: sys_write(fd: 1, buf: 4fc408, count: 8)
           <...>-533   [000]    64.325614: sys_getrlimit(resource: 3, rlim: 530020)
           <...>-533   [000]    64.327089: sys_read(fd: 2, buf: 4fc008, count: 6)
           <...>-533   [000]    64.969663: sys_exit(error_code: 2)
           <...>-533   [000]    65.608794: sys_exit(error_code: 2)
           <...>-533   [000]    66.231796: sys_read(fd: 2, buf: 4fc008, count: 6)
           <...>-533   [000]    66.913687: sys_open(filename: 1, flags: 0, mode: a)
           <...>-533   [000]    66.914617: sys_exit(error_code: 1)
           <...>-533   [000]    70.797507: sys_exit(error_code: 503be8)
           <...>-536   [000]    70.833108: sys_exit(error_code: 2aac6cfc)
           <...>-536   [000]    70.833897: sys_exit(error_code: 2aac6540)
           <...>-536   [000]    70.835711: sys_exit(error_code: 2aac6cfc)
           <...>-536   [000]    70.840609: sys_lchown(filename: 3, user: 7fb08b38, group: 20)
           <...>-533   [000]    71.877785: sys_open(filename: ffffffff, flags: 7fcf08c8, mode: b)
           <...>-533   [000]    75.531122: sys_open(filename: 1, flags: 0, mode: a)

Signed-off-by: Wu Zhangjin <wuzj@lemote.com>
---
 arch/mips/Kconfig                   |    1 +
 arch/mips/include/asm/ptrace.h      |    2 +
 arch/mips/include/asm/reg.h         |    5 ++
 arch/mips/include/asm/syscall.h     |   84 +++++++++++++++++++++++++++++++++++
 arch/mips/include/asm/thread_info.h |    5 ++-
 arch/mips/kernel/Makefile           |    1 +
 arch/mips/kernel/entry.S            |    2 +-
 arch/mips/kernel/ftrace.c           |   71 +++++++++++++++++++++++++++++
 arch/mips/kernel/ptrace.c           |   14 +++++-
 arch/mips/kernel/scall64-o32.S      |    2 +-
 10 files changed, 182 insertions(+), 5 deletions(-)
 create mode 100644 arch/mips/include/asm/syscall.h

diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index ac1437e..f488027 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -10,6 +10,7 @@ config MIPS
 	select HAVE_FTRACE_MCOUNT_RECORD
 	select HAVE_FTRACE_NMI_ENTER if DYNAMIC_FTRACE
 	select HAVE_FUNCTION_GRAPH_TRACER
+	select HAVE_FTRACE_SYSCALLS
 	# Horrible source of confusion.  Die, die, die ...
 	select EMBEDDED
 	select RTC_LIB
diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h
index ce47118..32e5b62 100644
--- a/arch/mips/include/asm/ptrace.h
+++ b/arch/mips/include/asm/ptrace.h
@@ -45,6 +45,8 @@ struct pt_regs {
 	unsigned long cp0_badvaddr;
 	unsigned long cp0_cause;
 	unsigned long cp0_epc;
+	/* Used for restarting system calls */
+	unsigned long orig_v0;
 #ifdef CONFIG_MIPS_MT_SMTC
 	unsigned long cp0_tcstatus;
 #endif /* CONFIG_MIPS_MT_SMTC */
diff --git a/arch/mips/include/asm/reg.h b/arch/mips/include/asm/reg.h
index 634b55d..93d66bc 100644
--- a/arch/mips/include/asm/reg.h
+++ b/arch/mips/include/asm/reg.h
@@ -65,6 +65,8 @@
 #define EF_CP0_CAUSE		43
 #define EF_UNUSED0		44
 
+#define EF_ORIG_V0		45
+
 #define EF_SIZE			180
 
 #endif
@@ -121,6 +123,9 @@
 #define EF_CP0_STATUS		36
 #define EF_CP0_CAUSE		37
 
+
+#define EF_ORIG_V0			38
+
 #define EF_SIZE			304	/* size in bytes */
 
 #endif /* CONFIG_64BIT */
diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h
new file mode 100644
index 0000000..b785098
--- /dev/null
+++ b/arch/mips/include/asm/syscall.h
@@ -0,0 +1,84 @@
+/*
+ * Access to user system call parameters and results
+ *
+ * Copyright (C) 2008 Red Hat, Inc.  All rights reserved.
+ * Copyright (C) 2009 DSLab, Lanzhou University, China
+ * Author: Wu Zhangjin <wuzj@lemote.com>
+ *
+ * This copyrighted material is made available to anyone wishing to use,
+ * modify, copy, or redistribute it subject to the terms and conditions
+ * of the GNU General Public License v.2.
+ *
+ * See asm-generic/syscall.h for descriptions of what we must do here.
+ */
+
+#ifndef _ASM_SYSCALL_H
+#define _ASM_SYSCALL_H	1
+
+#include <linux/sched.h>
+
+static inline long syscall_get_nr(struct task_struct *task,
+				  struct pt_regs *regs)
+{
+	/*        syscall   Exc-Code: 0 1000 00     v0 */
+	return ((regs->cp0_cause&0xff) == 0x20)  ? regs->regs[2] : -1L;
+}
+
+static inline void syscall_rollback(struct task_struct *task,
+				    struct pt_regs *regs)
+{
+	regs->regs[2] = regs->orig_v0;
+}
+
+static inline long syscall_get_error(struct task_struct *task,
+				     struct pt_regs *regs)
+{
+	return regs->regs[2] ? -regs->regs[2] : 0;
+}
+
+static inline long syscall_get_return_value(struct task_struct *task,
+					    struct pt_regs *regs)
+{
+	return regs->regs[2];
+}
+
+static inline void syscall_set_return_value(struct task_struct *task,
+					    struct pt_regs *regs,
+					    int error, long val)
+{
+	if (error)
+		regs->regs[2] = -error;
+	else
+		regs->regs[2] = val;
+}
+
+static inline void syscall_get_arguments(struct task_struct *task,
+					 struct pt_regs *regs,
+					 unsigned int i, unsigned int n,
+					 unsigned long *args)
+{
+#ifdef CONFIG_32BIT
+	/* fixme: only 4 argument register available in mip32, so, how to handle
+	 * others?
+	 */
+	BUG_ON(i + n > 4);
+#else
+	BUG_ON(i + n > 6);
+#endif
+	memcpy(args, &regs->regs[4 + i], n * sizeof(args[0]));
+}
+
+static inline void syscall_set_arguments(struct task_struct *task,
+					 struct pt_regs *regs,
+					 unsigned int i, unsigned int n,
+					 const unsigned long *args)
+{
+#ifdef CONFIG_32BIT
+	BUG_ON(i + n > 4);
+#else
+	BUG_ON(i + n > 6);
+#endif
+	memcpy(&regs->regs[4 + i], args, n * sizeof(args[0]));
+}
+
+#endif	/* _ASM_SYSCALL_H */
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
index 143a481..1d55dc0 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
@@ -128,6 +128,7 @@ register struct thread_info *__current_thread_info __asm__("$28");
 #define TIF_32BIT_ADDR		23	/* 32-bit address space (o32/n32) */
 #define TIF_FPUBOUND		24	/* thread bound to FPU-full CPU set */
 #define TIF_LOAD_WATCH		25	/* If set, load watch registers */
+#define TIF_SYSCALL_FTRACE	27	/* for ftrace syscall instrumentation */
 #define TIF_SYSCALL_TRACE	31	/* syscall trace active */
 
 #ifdef CONFIG_MIPS32_O32
@@ -151,11 +152,13 @@ register struct thread_info *__current_thread_info __asm__("$28");
 #define _TIF_32BIT_ADDR		(1<<TIF_32BIT_ADDR)
 #define _TIF_FPUBOUND		(1<<TIF_FPUBOUND)
 #define _TIF_LOAD_WATCH		(1<<TIF_LOAD_WATCH)
+#define _TIF_SYSCALL_FTRACE	(1<<TIF_SYSCALL_FTRACE)
 
 /* work to do on interrupt/exception return */
 #define _TIF_WORK_MASK		(0x0000ffef & ~_TIF_SECCOMP)
 /* work to do on any return to u-space */
-#define _TIF_ALLWORK_MASK	(0x8000ffff & ~_TIF_SECCOMP)
+#define _TIF_ALLWORK_MASK	\
+	((0x8000ffff & ~_TIF_SECCOMP) | _TIF_SYSCALL_FTRACE)
 
 #endif /* __KERNEL__ */
 
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 5dec76f..8b4fafa 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_MODULES)		+= mips_ksyms.o module.o
 
 obj-$(CONFIG_FUNCTION_TRACER)	+= mcount.o
 obj-$(CONFIG_FUNCTION_TRACER)	+= ftrace.o
+obj-$(CONFIG_FTRACE_SYSCALLS)	+= ftrace.o
 obj-$(CONFIG_NOP_TRACER)	+= ftrace_clock.o
 
 obj-$(CONFIG_CPU_LOONGSON2)	+= r4k_fpu.o r4k_switch.o
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S
index ffa3310..786e4ef 100644
--- a/arch/mips/kernel/entry.S
+++ b/arch/mips/kernel/entry.S
@@ -167,7 +167,7 @@ work_notifysig:				# deal with pending signals and
 FEXPORT(syscall_exit_work_partial)
 	SAVE_STATIC
 syscall_exit_work:
-	li	t0, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
+	li	t0, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_FTRACE
 	and	t0, a2			# a2 is preloaded with TI_FLAGS
 	beqz	t0, work_pending	# trace bit set?
 	local_irq_enable		# could let do_syscall_trace()
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
index e7f15f7..5967998 100644
--- a/arch/mips/kernel/ftrace.c
+++ b/arch/mips/kernel/ftrace.c
@@ -22,6 +22,8 @@
 #include <asm/asm.h>
 #include <asm/unistd.h>
 
+#include <trace/syscall.h>
+
 #ifdef CONFIG_DYNAMIC_FTRACE
 
 #define JAL 0x0c000000	/* jump & link: ip --> ra, jump to target */
@@ -287,3 +289,72 @@ out:
     return parent_ip;
 }
 #endif				/* CONFIG_FUNCTION_GRAPH_TRACER */
+
+#ifdef CONFIG_FTRACE_SYSCALLS
+
+extern unsigned long __start_syscalls_metadata[];
+extern unsigned long __stop_syscalls_metadata[];
+
+/* fixme: in mips64, there are different entries of sys_call_table when using
+ * different standards, in loongson2f based machines: Fuloong & Yeeloong, the
+ * system use o32 standard, so here, we only use the sys_call_table in
+ * arch/mips/kernel/scall64-o32.S */
+
+extern unsigned long *sys_call_table;
+
+static struct syscall_metadata **syscalls_metadata;
+
+static struct syscall_metadata *find_syscall_meta(unsigned long *syscall)
+{
+	struct syscall_metadata *start;
+	struct syscall_metadata *stop;
+	char str[KSYM_SYMBOL_LEN];
+
+
+	start = (struct syscall_metadata *)__start_syscalls_metadata;
+	stop = (struct syscall_metadata *)__stop_syscalls_metadata;
+	kallsyms_lookup((unsigned long) syscall, NULL, NULL, NULL, str);
+
+	for ( ; start < stop; start++) {
+		if (start->name && !strcmp(start->name, str))
+			return start;
+	}
+	return NULL;
+}
+
+struct syscall_metadata *syscall_nr_to_meta(int nr)
+{
+	if (!syscalls_metadata || nr >= __NR_Linux_syscalls || nr < 0)
+		return NULL;
+
+	return syscalls_metadata[nr];
+}
+
+void arch_init_ftrace_syscalls(void)
+{
+	int i;
+	struct syscall_metadata *meta;
+	unsigned long **psys_syscall_table = &sys_call_table;
+	static atomic_t refs;
+
+	if (atomic_inc_return(&refs) != 1)
+		goto end;
+
+	syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) *
+					__NR_Linux_syscalls, GFP_KERNEL);
+	if (!syscalls_metadata) {
+		WARN_ON(1);
+		return;
+	}
+
+	for (i = 0; i < __NR_Linux_syscalls; i++) {
+		meta = find_syscall_meta(psys_syscall_table[i]);
+		syscalls_metadata[i] = meta;
+	}
+	return;
+
+	/* Paranoid: avoid overflow */
+end:
+	atomic_dec(&refs);
+}
+#endif	/* CONFIG_FTRACE_SYSCALLS */
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 054861c..fa762dc 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -25,6 +25,7 @@
 #include <linux/security.h>
 #include <linux/audit.h>
 #include <linux/seccomp.h>
+#include <linux/ftrace.h>
 
 #include <asm/byteorder.h>
 #include <asm/cpu.h>
@@ -39,6 +40,7 @@
 #include <asm/bootinfo.h>
 #include <asm/reg.h>
 
+#include <trace/syscall.h>
 /*
  * Called by kernel/ptrace.c when detaching..
  *
@@ -60,7 +62,7 @@ int ptrace_getregs(struct task_struct *child, __s64 __user *data)
 	struct pt_regs *regs;
 	int i;
 
-	if (!access_ok(VERIFY_WRITE, data, 38 * 8))
+	if (!access_ok(VERIFY_WRITE, data, 39 * 8))
 		return -EIO;
 
 	regs = task_pt_regs(child);
@@ -73,6 +75,7 @@ int ptrace_getregs(struct task_struct *child, __s64 __user *data)
 	__put_user((long)regs->cp0_badvaddr, data + EF_CP0_BADVADDR - EF_R0);
 	__put_user((long)regs->cp0_status, data + EF_CP0_STATUS - EF_R0);
 	__put_user((long)regs->cp0_cause, data + EF_CP0_CAUSE - EF_R0);
+	__put_user((long)regs->orig_v0, data + EF_ORIG_V0 - EF_R0);
 
 	return 0;
 }
@@ -87,7 +90,7 @@ int ptrace_setregs(struct task_struct *child, __s64 __user *data)
 	struct pt_regs *regs;
 	int i;
 
-	if (!access_ok(VERIFY_READ, data, 38 * 8))
+	if (!access_ok(VERIFY_READ, data, 39 * 8))
 		return -EIO;
 
 	regs = task_pt_regs(child);
@@ -97,6 +100,7 @@ int ptrace_setregs(struct task_struct *child, __s64 __user *data)
 	__get_user(regs->lo, data + EF_LO - EF_R0);
 	__get_user(regs->hi, data + EF_HI - EF_R0);
 	__get_user(regs->cp0_epc, data + EF_CP0_EPC - EF_R0);
+	__get_user(regs->orig_v0, data + EF_ORIG_V0 - EF_R0);
 
 	/* badvaddr, status, and cause may not be written.  */
 
@@ -575,6 +579,9 @@ asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit)
 	if (!(current->ptrace & PT_PTRACED))
 		goto out;
 
+	if (unlikely(test_thread_flag(TIF_SYSCALL_FTRACE)))
+		ftrace_syscall_exit(regs);
+
 	if (!test_thread_flag(TIF_SYSCALL_TRACE))
 		goto out;
 
@@ -594,6 +601,9 @@ asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit)
 	}
 
 out:
+	if (unlikely(test_thread_flag(TIF_SYSCALL_FTRACE)))
+		ftrace_syscall_enter(regs);
+
 	if (unlikely(current->audit_context) && !entryexit)
 		audit_syscall_entry(audit_arch(), regs->regs[0],
 				    regs->regs[4], regs->regs[5],
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index a5598b2..dd1f13a 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -202,7 +202,7 @@ einval:	li	v0, -ENOSYS
 
 	.align	3
 	.type	sys_call_table,@object
-sys_call_table:
+EXPORT(sys_call_table)
 	PTR	sys32_syscall			/* 4000 */
 	PTR	sys_exit
 	PTR	sys_fork
-- 
1.6.0.4

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* Re: [PATCH v1 1/5] mips static function tracer support
  2009-05-28 20:48 ` [PATCH v1 1/5] mips static function tracer support wuzhangjin
@ 2009-05-29  1:13   ` Steven Rostedt
  2009-05-29  6:11     ` Wu Zhangjin
  0 siblings, 1 reply; 15+ messages in thread
From: Steven Rostedt @ 2009-05-29  1:13 UTC (permalink / raw)
  To: wuzhangjin
  Cc: linux-mips, linux-kernel, Wu Zhangjin, Ralf Baechle, Ingo Molnar,
	Andrew Morton, Thomas Gleixner, Nicholas Mc Guire



On Fri, 29 May 2009, wuzhangjin@gmail.com wrote:

> From: Wu Zhangjin <wuzj@lemote.com>
> 
> if -pg of gcc is enabled. a calling to _mcount will be inserted to each
> kernel function. so, there is a possibility to trace the functions in
> _mcount.
> 
> here is the implementation of mips specific _mcount for static function
> tracer.
> 
> -ffunction-sections option not works with -pg, so disable it if enables
> FUNCTION_TRACER.
> 
> Signed-off-by: Wu Zhangjin <wuzj@lemote.com>
> ---
>  arch/mips/Kconfig              |    2 +
>  arch/mips/Makefile             |    2 +
>  arch/mips/include/asm/ftrace.h |   25 ++++++++++-
>  arch/mips/kernel/Makefile      |    8 +++
>  arch/mips/kernel/mcount.S      |   98 ++++++++++++++++++++++++++++++++++++++++
>  arch/mips/kernel/mips_ksyms.c  |    5 ++
>  6 files changed, 139 insertions(+), 1 deletions(-)
>  create mode 100644 arch/mips/kernel/mcount.S
> 
> diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
> index 09b1287..d5c01ca 100644
> --- a/arch/mips/Kconfig
> +++ b/arch/mips/Kconfig
> @@ -4,6 +4,8 @@ config MIPS
>  	select HAVE_IDE
>  	select HAVE_OPROFILE
>  	select HAVE_ARCH_KGDB
> +	select HAVE_FUNCTION_TRACER
> +	select HAVE_FUNCTION_TRACE_MCOUNT_TEST
>  	# Horrible source of confusion.  Die, die, die ...
>  	select EMBEDDED
>  	select RTC_LIB
> diff --git a/arch/mips/Makefile b/arch/mips/Makefile
> index c4cae9e..f86fb15 100644
> --- a/arch/mips/Makefile
> +++ b/arch/mips/Makefile
> @@ -48,7 +48,9 @@ ifneq ($(SUBARCH),$(ARCH))
>    endif
>  endif
>  
> +ifndef CONFIG_FUNCTION_TRACER
>  cflags-y := -ffunction-sections
> +endif
>  cflags-y += $(call cc-option, -mno-check-zero-division)
>  
>  ifdef CONFIG_32BIT
> diff --git a/arch/mips/include/asm/ftrace.h b/arch/mips/include/asm/ftrace.h
> index 40a8c17..5f8ebcf 100644
> --- a/arch/mips/include/asm/ftrace.h
> +++ b/arch/mips/include/asm/ftrace.h
> @@ -1 +1,24 @@
> -/* empty */
> +/*
> + * This file is subject to the terms and conditions of the GNU General Public
> + * License.  See the file "COPYING" in the main directory of this archive for
> + * more details.
> + *
> + * Copyright (C) 2009 DSLab, Lanzhou University, China
> + * Author: Wu Zhangjin <wuzj@lemote.com>
> + */
> +
> +#ifndef _ASM_MIPS_FTRACE_H
> +#define _ASM_MIPS_FTRACE_H
> +
> +#ifdef CONFIG_FUNCTION_TRACER
> +
> +#define MCOUNT_ADDR ((unsigned long)(_mcount))
> +#define MCOUNT_INSN_SIZE 4		/* sizeof mcount call */
> +
> +#ifndef __ASSEMBLY__
> +extern void _mcount(void);
> +#define mcount _mcount
> +
> +#endif /* __ASSEMBLY__ */
> +#endif /* CONFIG_FUNCTION_TRACER */
> +#endif /* _ASM_MIPS_FTRACE_H */
> diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
> index e961221..d167dde 100644
> --- a/arch/mips/kernel/Makefile
> +++ b/arch/mips/kernel/Makefile
> @@ -8,6 +8,12 @@ obj-y		+= cpu-probe.o branch.o entry.o genex.o irq.o process.o \
>  		   ptrace.o reset.o setup.o signal.o syscall.o \
>  		   time.o topology.o traps.o unaligned.o watch.o
>  
> +ifdef CONFIG_FUNCTION_TRACER
> +# Do not profile debug and lowlevel utilities
> +CFLAGS_REMOVE_mcount.o = -pg

mcount.S is an assembly file, the above is for C files. So it is not 
needed.

-- Steve

> +CFLAGS_REMOVE_early_printk.o = -pg
> +endif
> +
>  obj-$(CONFIG_CEVT_BCM1480)	+= cevt-bcm1480.o
>  obj-$(CONFIG_CEVT_R4K_LIB)	+= cevt-r4k.o
>  obj-$(CONFIG_MIPS_MT_SMTC)	+= cevt-smtc.o
> @@ -24,6 +30,8 @@ obj-$(CONFIG_SYNC_R4K)		+= sync-r4k.o
>  obj-$(CONFIG_STACKTRACE)	+= stacktrace.o
>  obj-$(CONFIG_MODULES)		+= mips_ksyms.o module.o
>  
> +obj-$(CONFIG_FUNCTION_TRACER)	+= mcount.o
> +
>  obj-$(CONFIG_CPU_LOONGSON2)	+= r4k_fpu.o r4k_switch.o
>  obj-$(CONFIG_CPU_MIPS32)	+= r4k_fpu.o r4k_switch.o
>  obj-$(CONFIG_CPU_MIPS64)	+= r4k_fpu.o r4k_switch.o
> diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S
> new file mode 100644
> index 0000000..268724e
> --- /dev/null

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH v1 2/5] mips dynamic function tracer support
  2009-05-28 20:48 ` [PATCH v1 2/5] mips dynamic " wuzhangjin
@ 2009-05-29  1:24   ` Steven Rostedt
  2009-05-29  6:36     ` Wu Zhangjin
  0 siblings, 1 reply; 15+ messages in thread
From: Steven Rostedt @ 2009-05-29  1:24 UTC (permalink / raw)
  To: wuzhangjin
  Cc: linux-mips, linux-kernel, Wu Zhangjin, Ralf Baechle, Ingo Molnar,
	Andrew Morton, Thomas Gleixner, Nicholas Mc Guire


On Fri, 29 May 2009, wuzhangjin@gmail.com wrote:

> From: Wu Zhangjin <wuzj@lemote.com>
> 
> dynamic function tracer need to replace "nop" to "jumps & links" and
> something reversely.
> 
> Signed-off-by: Wu Zhangjin <wuzj@lemote.com>
> ---
>  arch/mips/Kconfig              |    3 +
>  arch/mips/include/asm/ftrace.h |   10 ++
>  arch/mips/kernel/Makefile      |    2 +
>  arch/mips/kernel/ftrace.c      |  217 ++++++++++++++++++++++++++++++++++++++++
>  arch/mips/kernel/mcount.S      |   31 ++++++
>  scripts/Makefile.build         |    1 +
>  scripts/recordmcount.pl        |   32 +++++-
>  7 files changed, 290 insertions(+), 6 deletions(-)
>  create mode 100644 arch/mips/kernel/ftrace.c
> 
> diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
> index d5c01ca..0c00536 100644
> --- a/arch/mips/Kconfig
> +++ b/arch/mips/Kconfig
> @@ -6,6 +6,9 @@ config MIPS
>  	select HAVE_ARCH_KGDB
>  	select HAVE_FUNCTION_TRACER
>  	select HAVE_FUNCTION_TRACE_MCOUNT_TEST
> +	select HAVE_DYNAMIC_FTRACE
> +	select HAVE_FTRACE_MCOUNT_RECORD
> +	select HAVE_FTRACE_NMI_ENTER if DYNAMIC_FTRACE
>  	# Horrible source of confusion.  Die, die, die ...
>  	select EMBEDDED
>  	select RTC_LIB
> diff --git a/arch/mips/include/asm/ftrace.h b/arch/mips/include/asm/ftrace.h
> index 5f8ebcf..b4970c9 100644
> --- a/arch/mips/include/asm/ftrace.h
> +++ b/arch/mips/include/asm/ftrace.h
> @@ -19,6 +19,16 @@
>  extern void _mcount(void);
>  #define mcount _mcount
>  
> +#ifdef CONFIG_DYNAMIC_FTRACE
> +/* reloction of mcount call site is the same as the address */
> +static inline unsigned long ftrace_call_adjust(unsigned long addr)
> +{
> +	return addr;
> +}
> +
> +struct dyn_arch_ftrace {
> +};
> +#endif /*  CONFIG_DYNAMIC_FTRACE */
>  #endif /* __ASSEMBLY__ */
>  #endif /* CONFIG_FUNCTION_TRACER */
>  #endif /* _ASM_MIPS_FTRACE_H */
> diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
> index d167dde..6b1a8a5 100644
> --- a/arch/mips/kernel/Makefile
> +++ b/arch/mips/kernel/Makefile
> @@ -11,6 +11,7 @@ obj-y		+= cpu-probe.o branch.o entry.o genex.o irq.o process.o \
>  ifdef CONFIG_FUNCTION_TRACER
>  # Do not profile debug and lowlevel utilities
>  CFLAGS_REMOVE_mcount.o = -pg
> +CFLAGS_REMOVE_ftrace.o = -pg
>  CFLAGS_REMOVE_early_printk.o = -pg
>  endif
>  
> @@ -31,6 +32,7 @@ obj-$(CONFIG_STACKTRACE)	+= stacktrace.o
>  obj-$(CONFIG_MODULES)		+= mips_ksyms.o module.o
>  
>  obj-$(CONFIG_FUNCTION_TRACER)	+= mcount.o
> +obj-$(CONFIG_FUNCTION_TRACER)	+= ftrace.o
>  
>  obj-$(CONFIG_CPU_LOONGSON2)	+= r4k_fpu.o r4k_switch.o
>  obj-$(CONFIG_CPU_MIPS32)	+= r4k_fpu.o r4k_switch.o
> diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
> new file mode 100644
> index 0000000..827c128
> --- /dev/null
> +++ b/arch/mips/kernel/ftrace.c
> @@ -0,0 +1,217 @@
> +/*
> + * Code for replacing ftrace calls with jumps.
> + *
> + * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
> + * Copyright (C) 2009 DSLab, Lanzhou University, China
> + * Author: Wu Zhangjin <wuzj@lemote.com>
> + *
> + * Thanks goes to Steven Rostedt for writing the original x86 version.
> + */
> +
> +#include <linux/spinlock.h>
> +#include <linux/hardirq.h>
> +#include <linux/uaccess.h>
> +#include <linux/percpu.h>
> +#include <linux/sched.h>
> +#include <linux/init.h>
> +#include <linux/list.h>
> +#include <linux/ftrace.h>
> +
> +#include <asm/cacheflush.h>
> +#include <asm/ftrace.h>
> +#include <asm/asm.h>
> +#include <asm/unistd.h>
> +
> +#ifdef CONFIG_DYNAMIC_FTRACE
> +
> +#define JAL 0x0c000000	/* jump & link: ip --> ra, jump to target */
> +#define ADDR_MASK 0x03ffffff	/*  op_code|addr : 31...26|25 ....0 */
> +
> +static unsigned int ftrace_nop = 0x00000000;
> +
> +static unsigned char *ftrace_call_replace(unsigned long op_code,
> +					  unsigned long addr)
> +{
> +    static unsigned int op;
> +
> +    op = op_code | ((addr >> 2) & ADDR_MASK);
> +
> +    return (unsigned char *) &op;
> +}
> +
> +static atomic_t nmi_running = ATOMIC_INIT(0);
> +static int mod_code_status;	/* holds return value of text write */
> +static int mod_code_write;	/* set when NMI should do the write */
> +static void *mod_code_ip;	/* holds the IP to write to */
> +static void *mod_code_newcode;	/* holds the text to write to the IP */
> +
> +static unsigned nmi_wait_count;
> +static atomic_t nmi_update_count = ATOMIC_INIT(0);
> +
> +int ftrace_arch_read_dyn_info(char *buf, int size)
> +{
> +    int r;
> +
> +    r = snprintf(buf, size, "%u %u",
> +		 nmi_wait_count, atomic_read(&nmi_update_count));
> +    return r;
> +}
> +
> +static void ftrace_mod_code(void)
> +{
> +    /*
> +     * Yes, more than one CPU process can be writing to mod_code_status.
> +     *    (and the code itself)
> +     * But if one were to fail, then they all should, and if one were
> +     * to succeed, then they all should.
> +     */
> +    mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode,
> +					 MCOUNT_INSN_SIZE);
> +
> +    /* if we fail, then kill any new writers */
> +    if (mod_code_status)
> +		mod_code_write = 0;
> +}
> +
> +void ftrace_nmi_enter(void)
> +{
> +    atomic_inc(&nmi_running);
> +    /* Must have nmi_running seen before reading write flag */
> +    smp_mb();
> +    if (mod_code_write) {
> +		ftrace_mod_code();
> +		atomic_inc(&nmi_update_count);
> +    }
> +}
> +
> +void ftrace_nmi_exit(void)
> +{
> +    /* Finish all executions before clearing nmi_running */
> +    smp_wmb();
> +    atomic_dec(&nmi_running);
> +}
> +
> +static void wait_for_nmi(void)
> +{
> +    int waited = 0;
> +
> +    while (atomic_read(&nmi_running)) {
> +		waited = 1;
> +		cpu_relax();
> +    }
> +
> +    if (waited)
> +		nmi_wait_count++;
> +}
> +
> +static int do_ftrace_mod_code(unsigned long ip, void *new_code)
> +{
> +    mod_code_ip = (void *) ip;
> +    mod_code_newcode = new_code;
> +
> +    /* The buffers need to be visible before we let NMIs write them */
> +    smp_wmb();
> +
> +    mod_code_write = 1;
> +
> +    /* Make sure write bit is visible before we wait on NMIs */
> +    smp_mb();
> +
> +    wait_for_nmi();
> +
> +    /* Make sure all running NMIs have finished before we write the code */
> +    smp_mb();
> +
> +    ftrace_mod_code();
> +
> +    /* Make sure the write happens before clearing the bit */
> +    smp_wmb();
> +
> +    mod_code_write = 0;
> +
> +    /* make sure NMIs see the cleared bit */
> +    smp_mb();
> +
> +    wait_for_nmi();
> +
> +    return mod_code_status;
> +}

Hmm, this is basically exactly the same as x86's version. I wounder if we 
should make a helper function in generic code to let archs use it. We can 
put the do_ftrace_mod_code into kernel/trace/ftrace.c and have weak 
functions for the ftrace_mod_code. If the arch needs this to handle NMIs, 
then it can use it. This code was tricky to write, and I would hate to 
have it duplicated in every arch.

> +
> +static unsigned char *ftrace_nop_replace(void)
> +{
> +    return (unsigned char *) &ftrace_nop;
> +}
> +
> +static int
> +ftrace_modify_code(unsigned long ip, unsigned char *old_code,
> +		   unsigned char *new_code)
> +{
> +    unsigned char replaced[MCOUNT_INSN_SIZE];
> +
> +    /*
> +     * Note: Due to modules and __init, code can
> +     *  disappear and change, we need to protect against faulting
> +     *  as well as code changing. We do this by using the
> +     *  probe_kernel_* functions.

hehe, this is an old comment. We don't touch __init sections anymore. I 
need to remove it from the x86 file.

> +     *
> +     * No real locking needed, this code is run through
> +     * kstop_machine, or before SMP starts.
> +     */
> +
> +    /* read the text we want to modify */
> +    if (probe_kernel_read(replaced, (void *) ip, MCOUNT_INSN_SIZE))
> +		return -EFAULT;
> +
> +    /* Make sure it is what we expect it to be */
> +    if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
> +		return -EINVAL;
> +
> +    /* replace the text with the new text */
> +    if (do_ftrace_mod_code(ip, new_code))
> +		return -EPERM;
> +
> +    return 0;
> +}
> +
> +int ftrace_make_nop(struct module *mod,
> +		    struct dyn_ftrace *rec, unsigned long addr)
> +{
> +    unsigned char *new, *old;
> +
> +    old = ftrace_call_replace(JAL, addr);
> +    new = ftrace_nop_replace();
> +
> +    return ftrace_modify_code(rec->ip, old, new);
> +}
> +
> +int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
> +{
> +    unsigned char *new, *old;
> +
> +    old = ftrace_nop_replace();
> +    new = ftrace_call_replace(JAL, addr);
> +
> +    return ftrace_modify_code(rec->ip, old, new);
> +}
> +
> +int ftrace_update_ftrace_func(ftrace_func_t func)
> +{
> +    unsigned long ip = (unsigned long) (&ftrace_call);
> +    unsigned char old[MCOUNT_INSN_SIZE], *new;
> +    int ret;
> +
> +    memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
> +    new = ftrace_call_replace(JAL, (unsigned long) func);
> +    ret = ftrace_modify_code(ip, old, new);
> +
> +    return ret;
> +}
> +
> +int __init ftrace_dyn_arch_init(void *data)
> +{
> +    /* The return code is retured via data */
> +    *(unsigned long *) data = 0;

egad, I need to clean that up too. I should return the true error code 
with ret. That is legacy from the first version of the dynamic ftrace 
code.

This review is showing all the flaws of my own work ;-)

> +
> +    return 0;
> +}
> +#endif				/* CONFIG_DYNAMIC_FTRACE */
> diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S
> index 268724e..ce8a0ba 100644
> --- a/arch/mips/kernel/mcount.S
> +++ b/arch/mips/kernel/mcount.S
> @@ -67,6 +67,35 @@
>  	move ra, $1
>  	.endm
>  
> +#ifdef CONFIG_DYNAMIC_FTRACE
> +
> +LEAF(_mcount)
> +	RESTORE_SP_FOR_32BIT
> + 	RETURN_BACK
> + 	END(_mcount)
> +
> +NESTED(ftrace_caller, PT_SIZE, ra)
> +	RESTORE_SP_FOR_32BIT
> +	lw	t0, function_trace_stop
> +	bnez	t0, ftrace_stub
> +	nop
> +
> +	MCOUNT_SAVE_REGS
> +
> +	MCOUNT_SET_ARGS
> +	.globl ftrace_call
> +ftrace_call:
> +	jal	ftrace_stub
> +	nop
> +
> +	MCOUNT_RESTORE_REGS
> +	.globl ftrace_stub
> +ftrace_stub:
> +	RETURN_BACK
> +	END(ftrace_caller)
> +
> +#else	/* ! CONFIG_DYNAMIC_FTRACE */
> +
>  NESTED(_mcount, PT_SIZE, ra)
>  	RESTORE_SP_FOR_32BIT
>  	PTR_L	t0, function_trace_stop
> @@ -94,5 +123,7 @@ ftrace_stub:
>  	RETURN_BACK
>  	END(_mcount)
>  
> +#endif	/* ! CONFIG_DYNAMIC_FTRACE */
> +
>  	.set at
>  	.set reorder
> diff --git a/scripts/Makefile.build b/scripts/Makefile.build
> index 5c4b7a4..548d575 100644
> --- a/scripts/Makefile.build
> +++ b/scripts/Makefile.build
> @@ -207,6 +207,7 @@ endif
>  
>  ifdef CONFIG_FTRACE_MCOUNT_RECORD
>  cmd_record_mcount = perl $(srctree)/scripts/recordmcount.pl "$(ARCH)" \
> +	"$(if $(CONFIG_CPU_BIG_ENDIAN),big,little)" \
>  	"$(if $(CONFIG_64BIT),64,32)" \
>  	"$(OBJDUMP)" "$(OBJCOPY)" "$(CC)" "$(LD)" "$(NM)" "$(RM)" "$(MV)" \
>  	"$(if $(part-of-module),1,0)" "$(@)";

This big/little endian addition, I would like in its own patch.

> diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
> index 409596e..e963948 100755
> --- a/scripts/recordmcount.pl
> +++ b/scripts/recordmcount.pl
> @@ -100,13 +100,13 @@ $P =~ s@.*/@@g;
>  
>  my $V = '0.1';
>  
> -if ($#ARGV < 7) {
> -	print "usage: $P arch bits objdump objcopy cc ld nm rm mv is_module inputfile\n";
> +if ($#ARGV < 8) {
> +	print "usage: $P arch endian bits objdump objcopy cc ld nm rm mv is_module inputfile\n";
>  	print "version: $V\n";
>  	exit(1);
>  }
>  
> -my ($arch, $bits, $objdump, $objcopy, $cc,
> +my ($arch, $endian, $bits, $objdump, $objcopy, $cc,
>      $ld, $nm, $rm, $mv, $is_module, $inputfile) = @ARGV;
>  
>  # This file refers to mcount and shouldn't be ftraced, so lets' ignore it
> @@ -213,6 +213,26 @@ if ($arch eq "x86_64") {
>      if ($is_module eq "0") {
>          $cc .= " -mconstant-gp";
>      }
> +
> +} elsif ($arch eq "mips") {
> +	$mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s_mcount\$";
> +	$objdump .= " -Melf-trad".$endian."mips ";
> +
> +	if ($endian eq "big") {
> +		$endian = " -EB ";
> +		$ld .= " -melf".$bits."btsmip";
> +	} else {
> +		$endian = " -EL ";
> +		$ld .= " -melf".$bits."ltsmip";
> +	}
> +
> +	$cc .= " -mno-abicalls -fno-pic -mabi=" . $bits . $endian;
> +    $ld .= $endian;
> +
> +    if ($bits == 64) {
> +		$type = ".dword";
> +    }

The mips addition to the recordmcount.pl is OK to keep with this patch.

> +
>  } else {
>      die "Arch $arch is not supported with CONFIG_FTRACE_MCOUNT_RECORD";
>  }
> @@ -441,12 +461,12 @@ if ($#converts >= 0) {
>      #
>      # Step 5: set up each local function as a global
>      #
> -    `$objcopy $globallist $inputfile $globalobj`;
> +    `$objcopy $globallist $inputfile $globalobj 2>&1 >/dev/null`;

Are these spitting out errors?

-- Steve

>  
>      #
>      # Step 6: Link the global version to our list.
>      #
> -    `$ld -r $globalobj $mcount_o -o $globalmix`;
> +    `$ld -r $globalobj $mcount_o -o $globalmix 2>&1 >/dev/null`;
>  
>      #
>      # Step 7: Convert the local functions back into local symbols
> @@ -454,7 +474,7 @@ if ($#converts >= 0) {
>      `$objcopy $locallist $globalmix $inputfile`;
>  
>      # Remove the temp files
> -    `$rm $globalobj $globalmix`;
> +    `$rm $globalobj $globalmix 2>&1 >/dev/null`;
>  
>  } else {
>  
> -- 
> 1.6.0.4
> 
> 

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH v1 3/5] mips function graph tracer support
  2009-05-28 20:49 ` [PATCH v1 3/5] mips function graph " wuzhangjin
@ 2009-05-29  2:01   ` Steven Rostedt
  2009-05-29  9:07     ` Wu Zhangjin
  0 siblings, 1 reply; 15+ messages in thread
From: Steven Rostedt @ 2009-05-29  2:01 UTC (permalink / raw)
  To: wuzhangjin
  Cc: linux-mips, linux-kernel, Wu Zhangjin, Ralf Baechle, Ingo Molnar,
	Andrew Morton, Thomas Gleixner, Nicholas Mc Guire


On Fri, 29 May 2009, wuzhangjin@gmail.com wrote:
> diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S
> index ce8a0ba..bd58f16 100644
> --- a/arch/mips/kernel/mcount.S
> +++ b/arch/mips/kernel/mcount.S
> @@ -28,6 +28,10 @@
>  	PTR_SUBU	sp, PT_SIZE
>  	PTR_S	ra, PT_R31(sp)
>  	PTR_S	$1, PT_R1(sp)
> +#ifdef CONFIG_FUNCTION_GRAPH_TRACER
> +	PTR_S	v0, PT_R2(sp)
> +	PTR_S	v1, PT_R3(sp)
> +#endif
>  	PTR_S	a0, PT_R4(sp)
>  	PTR_S	a1, PT_R5(sp)
>  	PTR_S	a2, PT_R6(sp)
> @@ -43,6 +47,10 @@
>  	.macro MCOUNT_RESTORE_REGS
>  	PTR_L	ra, PT_R31(sp)
>  	PTR_L	$1, PT_R1(sp)
> +#ifdef CONFIG_FUNCTION_GRAPH_TRACER
> +	PTR_L	v0, PT_R2(sp)
> +	PTR_L	v1, PT_R3(sp)
> +#endif
>  	PTR_L	a0, PT_R4(sp)
>  	PTR_L	a1, PT_R5(sp)
>  	PTR_L	a2, PT_R6(sp)
> @@ -89,6 +97,14 @@ ftrace_call:
>  	nop
>  
>  	MCOUNT_RESTORE_REGS
> +
> +#ifdef CONFIG_FUNCTION_GRAPH_TRACER
> +	.globl ftrace_graph_call
> +ftrace_graph_call:
> +	j	ftrace_stub
> +	nop
> +#endif
> +
>  	.globl ftrace_stub
>  ftrace_stub:
>  	RETURN_BACK
> @@ -106,7 +122,15 @@ NESTED(_mcount, PT_SIZE, ra)
>  	PTR_L	t1, ftrace_trace_function /* please don't use t1 later, safe? */
>  	bne	t0, t1, static_trace
>  	nop
> -
> +#ifdef	CONFIG_FUNCTION_GRAPH_TRACER
> +	PTR_L	t2, ftrace_graph_return
> +	bne	t0,	t2, ftrace_graph_caller
> +	nop
> +	PTR_LA	t0, ftrace_graph_entry_stub
> +	PTR_L	t2, ftrace_graph_entry
> +	bne	t0,	t2, ftrace_graph_caller
> +	nop
> +#endif
>  	j	ftrace_stub
>  	nop
>  
> @@ -125,5 +149,37 @@ ftrace_stub:
>  
>  #endif	/* ! CONFIG_DYNAMIC_FTRACE */
>  
> +#ifdef CONFIG_FUNCTION_GRAPH_TRACER
> +
> +NESTED(ftrace_graph_caller, PT_SIZE, ra)
> +	MCOUNT_SAVE_REGS
> +
> +	MCOUNT_SET_ARGS
> +	jal	prepare_ftrace_return
> +	nop
> +
> +	/* overwrite the parent as &return_to_handler: v0 -> $1(at) */
> +	PTR_S	v0, PT_R1(sp)
> +
> +	MCOUNT_RESTORE_REGS
> +	RETURN_BACK
> +	END(ftrace_graph_caller)
> +
> +	.align	2
> +	.globl	return_to_handler
> +return_to_handler:
> +	MCOUNT_SAVE_REGS

I'm not sure which version of function_graph tracer you looked at, but I'm 
pretty sure you can just save the return code registers of the function.

return_to_handler is called on the return of a function. Thus, any callee 
saved registers have already been restored and would also be restored by 
ftrace_return_to_handler.  Any callee registers would have been saved by 
the function you are about to return to.

Thus the only things you need to save are the return code registers.

-- Steve


> +
> +	jal	ftrace_return_to_handler
> +	nop
> +
> +	/* restore the real parent address: v0 -> ra */
> +	PTR_S	v0, PT_R31(sp)
> +
> +	MCOUNT_RESTORE_REGS
> +	RETURN_BACK
> +
> +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
> +
>  	.set at
>  	.set reorder
> diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
> index 58738c8..67435e5 100644
> --- a/arch/mips/kernel/vmlinux.lds.S
> +++ b/arch/mips/kernel/vmlinux.lds.S
> @@ -36,6 +36,7 @@ SECTIONS
>  		SCHED_TEXT
>  		LOCK_TEXT
>  		KPROBES_TEXT
> +		IRQENTRY_TEXT
>  		*(.text.*)
>  		*(.fixup)
>  		*(.gnu.warning)
> -- 
> 1.6.0.4
> 
> 

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH v1 4/5] mips specific clock function to get precise timestamp
  2009-05-28 20:49 ` [PATCH v1 4/5] mips specific clock function to get precise timestamp wuzhangjin
@ 2009-05-29  2:06   ` Steven Rostedt
  0 siblings, 0 replies; 15+ messages in thread
From: Steven Rostedt @ 2009-05-29  2:06 UTC (permalink / raw)
  To: wuzhangjin
  Cc: linux-mips, linux-kernel, Wu Zhangjin, Ralf Baechle, Ingo Molnar,
	Andrew Morton, Thomas Gleixner, Nicholas Mc Guire


On Fri, 29 May 2009, wuzhangjin@gmail.com wrote:

> From: Wu Zhangjin <wuzj@lemote.com>
> 
> by default, ring_buffer_time_stamp calling sched_clock(jiffies-based)
> function to get timestamp, in x86, there is a tsc(64bit) based
> sched_clock, but in mips, the 'tsc'(clock counter) is only 32bit long,
> which will easily rollover, and there is no precise sched_clock in mips,
> we need to get one ourselves.
> 
> to avoid invading the whole linux-mips, i do not want to implement a
> tsc-based native_sched_clock like x86 does. because, there is a need to
> handling rollover of the only 32-bit long 'tsc' of mips, which will need
> extra overhead. in reality, i have tried to implement one(just like the
> ring_buffer_time_stamp here does), but make the kernel hangs when
> booting, i am not sure why it not work.
> 
> herein, i just implement a mips-specific ring_buffer_time_stamp in
> arch/mips/kernel/ftrace.c via adding  __attribute__((weak)) before
> ring_buffer_time_stamp(...) {} in kernel/trace/ring_buffer.c and do
> something in arch/mips/kernel/ftrace.c like this:
> 
> u64  ring_buffer_time_stamp \
>        __attribute__((alias("native_ring_buffer_time_stamp")));
> 
> and, as the same, there is also a need to implement a mips-specific
> trace_clock_local based on the above ring_buffer_timep_stamp, this clock
> function is called in function graph tracer to get calltime & rettime of
> a function.
> 
> and what about the trace_clock and trace_clock_global function, should
> we also implement a mips-secific one? i am not sure.
> 
> Signed-off-by: Wu Zhangjin <wuzj@lemote.com>
> ---
>  arch/mips/kernel/Makefile       |    2 +
>  arch/mips/kernel/csrc-r4k.c     |    2 +-
>  arch/mips/kernel/ftrace_clock.c |   77 +++++++++++++++++++++++++++++++++++++++
>  kernel/trace/ring_buffer.c      |    3 +-
>  kernel/trace/trace_clock.c      |    2 +-
>  5 files changed, 83 insertions(+), 3 deletions(-)
>  create mode 100644 arch/mips/kernel/ftrace_clock.c
> 
> diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
> index 6b1a8a5..5dec76f 100644
> --- a/arch/mips/kernel/Makefile
> +++ b/arch/mips/kernel/Makefile
> @@ -12,6 +12,7 @@ ifdef CONFIG_FUNCTION_TRACER
>  # Do not profile debug and lowlevel utilities
>  CFLAGS_REMOVE_mcount.o = -pg
>  CFLAGS_REMOVE_ftrace.o = -pg
> +CFLAGS_REMOVE_ftrace_clock.o = -pg
>  CFLAGS_REMOVE_early_printk.o = -pg
>  endif
>  
> @@ -33,6 +34,7 @@ obj-$(CONFIG_MODULES)		+= mips_ksyms.o module.o
>  
>  obj-$(CONFIG_FUNCTION_TRACER)	+= mcount.o
>  obj-$(CONFIG_FUNCTION_TRACER)	+= ftrace.o
> +obj-$(CONFIG_NOP_TRACER)	+= ftrace_clock.o
>  
>  obj-$(CONFIG_CPU_LOONGSON2)	+= r4k_fpu.o r4k_switch.o
>  obj-$(CONFIG_CPU_MIPS32)	+= r4k_fpu.o r4k_switch.o
> diff --git a/arch/mips/kernel/csrc-r4k.c b/arch/mips/kernel/csrc-r4k.c
> index e95a3cd..3da1c7a 100644
> --- a/arch/mips/kernel/csrc-r4k.c
> +++ b/arch/mips/kernel/csrc-r4k.c
> @@ -10,7 +10,7 @@
>  
>  #include <asm/time.h>
>  
> -static cycle_t c0_hpt_read(struct clocksource *cs)
> +static cycle_t notrace c0_hpt_read(struct clocksource *cs)
>  {
>  	return read_c0_count();
>  }
> diff --git a/arch/mips/kernel/ftrace_clock.c b/arch/mips/kernel/ftrace_clock.c
> new file mode 100644
> index 0000000..2f3b05a
> --- /dev/null
> +++ b/arch/mips/kernel/ftrace_clock.c
> @@ -0,0 +1,77 @@
> +/*
> + * This file is subject to the terms and conditions of the GNU General Public
> + * License.  See the file "COPYING" in the main directory of this archive for
> + * more details.
> + *
> + * Copyright (C) 2009 DSLab, Lanzhou University, China
> + * Author: Wu Zhangjin <wuzj@lemote.com>
> + */
> +
> +#include <linux/types.h>
> +#include <linux/sched.h>
> +#include <linux/jiffies.h>
> +#include <linux/clocksource.h>
> +#include <linux/ring_buffer.h>
> +
> +/* Up this if you want to test the TIME_EXTENTS and normalization */
> +#ifndef DEBUG_SHIFT
> +#define DEBUG_SHIFT 0
> +#endif
> +
> +/* mips-specific ring_buffer_time_stamp implementation,
> + * the original one is defined in kernel/trace/ring_buffer.c
> + */
> +
> +u64 native_ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
> +{
> +	u64 current_cycles;
> +	static unsigned long old_jiffies;
> +	static u64 time, old_cycles;
> +
> +	preempt_disable_notrace();
> +    /* update timestamp to avoid missing the timer interrupt */
> +	if (time_before(jiffies, old_jiffies)) {
> +		old_jiffies = jiffies;
> +		time = sched_clock();
> +		old_cycles = clock->cycle_last;
> +	}
> +	current_cycles = clock->read(clock);
> +
> +	time = (time + cyc2ns(clock, (current_cycles - old_cycles)
> +				& clock->mask)) << DEBUG_SHIFT;
> +
> +	old_cycles = current_cycles;
> +	preempt_enable_no_resched_notrace();
> +
> +	return time;
> +}
> +
> +u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
> +		__attribute__((alias("native_ring_buffer_time_stamp")));
> +
> +/*
> + * native_trace_clock_local(): the simplest and least coherent tracing clock.
> + *
> + * Useful for tracing that does not cross to other CPUs nor
> + * does it go through idle events.
> + */
> +u64 native_trace_clock_local(void)
> +{
> +	unsigned long flags;
> +	u64 clock;
> +
> +	/*
> +	 * sched_clock() is an architecture implemented, fast, scalable,
> +	 * lockless clock. It is not guaranteed to be coherent across
> +	 * CPUs, nor across CPU idle events.
> +	 */
> +	raw_local_irq_save(flags);
> +	clock = ring_buffer_time_stamp(NULL, raw_smp_processor_id());
> +	raw_local_irq_restore(flags);
> +
> +	return clock;
> +}
> +
> +u64 trace_clock_local(void)
> +		__attribute__((alias("native_trace_clock_local")));
> +
> diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
> index 960cbf4..717bd8e 100644
> --- a/kernel/trace/ring_buffer.c
> +++ b/kernel/trace/ring_buffer.c
> @@ -420,7 +420,8 @@ struct ring_buffer_iter {
>  /* Up this if you want to test the TIME_EXTENTS and normalization */
>  #define DEBUG_SHIFT 0
>  
> -u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
> +u64 __attribute__((weak)) ring_buffer_time_stamp(struct ring_buffer *buffer,
> +				int cpu)
>  {

Note, you want to look at what is in tip:

http://people.redhat.com/~mingo/tip.git/README

The latest ftrace code is there. I changed the ring_buffer_time_stamp to 
use an individule buffer clock, that defaults to trace_clock_local.
Then only the trace_clock_local needs to be weak.

-- Steve


>  	u64 time;
>  
> diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
> index b588fd8..78c98c8 100644
> --- a/kernel/trace/trace_clock.c
> +++ b/kernel/trace/trace_clock.c
> @@ -26,7 +26,7 @@
>   * Useful for tracing that does not cross to other CPUs nor
>   * does it go through idle events.
>   */
> -u64 notrace trace_clock_local(void)
> +u64 __attribute__((weak)) notrace trace_clock_local(void)
>  {
>  	unsigned long flags;
>  	u64 clock;
> -- 
> 1.6.0.4
> 
> 

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH v1 5/5] mips specific system call tracer
  2009-05-28 20:49 ` [PATCH v1 5/5] mips specific system call tracer wuzhangjin
@ 2009-05-29  2:09   ` Steven Rostedt
  0 siblings, 0 replies; 15+ messages in thread
From: Steven Rostedt @ 2009-05-29  2:09 UTC (permalink / raw)
  To: wuzhangjin
  Cc: linux-mips, LKML, Wu Zhangjin, Ralf Baechle, Ingo Molnar,
	Andrew Morton, Thomas Gleixner, Nicholas Mc Guire,
	Frederic Weisbecker


Since Frederic did the x86 version, I just added him to the Cc here.

-- Steve


On Fri, 29 May 2009, wuzhangjin@gmail.com wrote:

> From: Wu Zhangjin <wuzj@lemote.com>
> 
> FIXME: there are several different sys_call_entry in mips64, but
> currently, i only use the the one in arch/mips/kernel/scall64-o32.S
> so,if people not use o32 standard, it will not compiled normally.
> 
> the system call tracing demo in a debian system on
> qemu-system-mipsel/malta:
> 
> debian-mips-malta:~# mount -t debugfs nodev /debug
> debian-mips-malta:~# echo 20000 > /debug/tracing/buffer_size_kb
> debian-mips-malta:~# cat /debug/tracing/available_tracers
> syscall nop
> debian-mips-malta:~# echo syscall > /debug/tracing/current_tracer
> debian-mips-malta:~# echo 1 > /debug/tracing/tracing_enabled
> debian-mips-malta:~# sleep 1
> debian-mips-malta:~# echo 0 > /debug/tracing/tracing_enabled
> debian-mips-malta:~# cat /debug/tracing/trace | head -20
>            <...>-533   [000]    60.458291: sys_write(fd: 1, buf: 4fc408, count: 8)
>            <...>-533   [000]    64.325614: sys_getrlimit(resource: 3, rlim: 530020)
>            <...>-533   [000]    64.327089: sys_read(fd: 2, buf: 4fc008, count: 6)
>            <...>-533   [000]    64.969663: sys_exit(error_code: 2)
>            <...>-533   [000]    65.608794: sys_exit(error_code: 2)
>            <...>-533   [000]    66.231796: sys_read(fd: 2, buf: 4fc008, count: 6)
>            <...>-533   [000]    66.913687: sys_open(filename: 1, flags: 0, mode: a)
>            <...>-533   [000]    66.914617: sys_exit(error_code: 1)
>            <...>-533   [000]    70.797507: sys_exit(error_code: 503be8)
>            <...>-536   [000]    70.833108: sys_exit(error_code: 2aac6cfc)
>            <...>-536   [000]    70.833897: sys_exit(error_code: 2aac6540)
>            <...>-536   [000]    70.835711: sys_exit(error_code: 2aac6cfc)
>            <...>-536   [000]    70.840609: sys_lchown(filename: 3, user: 7fb08b38, group: 20)
>            <...>-533   [000]    71.877785: sys_open(filename: ffffffff, flags: 7fcf08c8, mode: b)
>            <...>-533   [000]    75.531122: sys_open(filename: 1, flags: 0, mode: a)
> 
> Signed-off-by: Wu Zhangjin <wuzj@lemote.com>
> ---
>  arch/mips/Kconfig                   |    1 +
>  arch/mips/include/asm/ptrace.h      |    2 +
>  arch/mips/include/asm/reg.h         |    5 ++
>  arch/mips/include/asm/syscall.h     |   84 +++++++++++++++++++++++++++++++++++
>  arch/mips/include/asm/thread_info.h |    5 ++-
>  arch/mips/kernel/Makefile           |    1 +
>  arch/mips/kernel/entry.S            |    2 +-
>  arch/mips/kernel/ftrace.c           |   71 +++++++++++++++++++++++++++++
>  arch/mips/kernel/ptrace.c           |   14 +++++-
>  arch/mips/kernel/scall64-o32.S      |    2 +-
>  10 files changed, 182 insertions(+), 5 deletions(-)
>  create mode 100644 arch/mips/include/asm/syscall.h
> 
> diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
> index ac1437e..f488027 100644
> --- a/arch/mips/Kconfig
> +++ b/arch/mips/Kconfig
> @@ -10,6 +10,7 @@ config MIPS
>  	select HAVE_FTRACE_MCOUNT_RECORD
>  	select HAVE_FTRACE_NMI_ENTER if DYNAMIC_FTRACE
>  	select HAVE_FUNCTION_GRAPH_TRACER
> +	select HAVE_FTRACE_SYSCALLS
>  	# Horrible source of confusion.  Die, die, die ...
>  	select EMBEDDED
>  	select RTC_LIB
> diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h
> index ce47118..32e5b62 100644
> --- a/arch/mips/include/asm/ptrace.h
> +++ b/arch/mips/include/asm/ptrace.h
> @@ -45,6 +45,8 @@ struct pt_regs {
>  	unsigned long cp0_badvaddr;
>  	unsigned long cp0_cause;
>  	unsigned long cp0_epc;
> +	/* Used for restarting system calls */
> +	unsigned long orig_v0;
>  #ifdef CONFIG_MIPS_MT_SMTC
>  	unsigned long cp0_tcstatus;
>  #endif /* CONFIG_MIPS_MT_SMTC */
> diff --git a/arch/mips/include/asm/reg.h b/arch/mips/include/asm/reg.h
> index 634b55d..93d66bc 100644
> --- a/arch/mips/include/asm/reg.h
> +++ b/arch/mips/include/asm/reg.h
> @@ -65,6 +65,8 @@
>  #define EF_CP0_CAUSE		43
>  #define EF_UNUSED0		44
>  
> +#define EF_ORIG_V0		45
> +
>  #define EF_SIZE			180
>  
>  #endif
> @@ -121,6 +123,9 @@
>  #define EF_CP0_STATUS		36
>  #define EF_CP0_CAUSE		37
>  
> +
> +#define EF_ORIG_V0			38
> +
>  #define EF_SIZE			304	/* size in bytes */
>  
>  #endif /* CONFIG_64BIT */
> diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h
> new file mode 100644
> index 0000000..b785098
> --- /dev/null
> +++ b/arch/mips/include/asm/syscall.h
> @@ -0,0 +1,84 @@
> +/*
> + * Access to user system call parameters and results
> + *
> + * Copyright (C) 2008 Red Hat, Inc.  All rights reserved.
> + * Copyright (C) 2009 DSLab, Lanzhou University, China
> + * Author: Wu Zhangjin <wuzj@lemote.com>
> + *
> + * This copyrighted material is made available to anyone wishing to use,
> + * modify, copy, or redistribute it subject to the terms and conditions
> + * of the GNU General Public License v.2.
> + *
> + * See asm-generic/syscall.h for descriptions of what we must do here.
> + */
> +
> +#ifndef _ASM_SYSCALL_H
> +#define _ASM_SYSCALL_H	1
> +
> +#include <linux/sched.h>
> +
> +static inline long syscall_get_nr(struct task_struct *task,
> +				  struct pt_regs *regs)
> +{
> +	/*        syscall   Exc-Code: 0 1000 00     v0 */
> +	return ((regs->cp0_cause&0xff) == 0x20)  ? regs->regs[2] : -1L;
> +}
> +
> +static inline void syscall_rollback(struct task_struct *task,
> +				    struct pt_regs *regs)
> +{
> +	regs->regs[2] = regs->orig_v0;
> +}
> +
> +static inline long syscall_get_error(struct task_struct *task,
> +				     struct pt_regs *regs)
> +{
> +	return regs->regs[2] ? -regs->regs[2] : 0;
> +}
> +
> +static inline long syscall_get_return_value(struct task_struct *task,
> +					    struct pt_regs *regs)
> +{
> +	return regs->regs[2];
> +}
> +
> +static inline void syscall_set_return_value(struct task_struct *task,
> +					    struct pt_regs *regs,
> +					    int error, long val)
> +{
> +	if (error)
> +		regs->regs[2] = -error;
> +	else
> +		regs->regs[2] = val;
> +}
> +
> +static inline void syscall_get_arguments(struct task_struct *task,
> +					 struct pt_regs *regs,
> +					 unsigned int i, unsigned int n,
> +					 unsigned long *args)
> +{
> +#ifdef CONFIG_32BIT
> +	/* fixme: only 4 argument register available in mip32, so, how to handle
> +	 * others?
> +	 */
> +	BUG_ON(i + n > 4);
> +#else
> +	BUG_ON(i + n > 6);
> +#endif
> +	memcpy(args, &regs->regs[4 + i], n * sizeof(args[0]));
> +}
> +
> +static inline void syscall_set_arguments(struct task_struct *task,
> +					 struct pt_regs *regs,
> +					 unsigned int i, unsigned int n,
> +					 const unsigned long *args)
> +{
> +#ifdef CONFIG_32BIT
> +	BUG_ON(i + n > 4);
> +#else
> +	BUG_ON(i + n > 6);
> +#endif
> +	memcpy(&regs->regs[4 + i], args, n * sizeof(args[0]));
> +}
> +
> +#endif	/* _ASM_SYSCALL_H */
> diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
> index 143a481..1d55dc0 100644
> --- a/arch/mips/include/asm/thread_info.h
> +++ b/arch/mips/include/asm/thread_info.h
> @@ -128,6 +128,7 @@ register struct thread_info *__current_thread_info __asm__("$28");
>  #define TIF_32BIT_ADDR		23	/* 32-bit address space (o32/n32) */
>  #define TIF_FPUBOUND		24	/* thread bound to FPU-full CPU set */
>  #define TIF_LOAD_WATCH		25	/* If set, load watch registers */
> +#define TIF_SYSCALL_FTRACE	27	/* for ftrace syscall instrumentation */
>  #define TIF_SYSCALL_TRACE	31	/* syscall trace active */
>  
>  #ifdef CONFIG_MIPS32_O32
> @@ -151,11 +152,13 @@ register struct thread_info *__current_thread_info __asm__("$28");
>  #define _TIF_32BIT_ADDR		(1<<TIF_32BIT_ADDR)
>  #define _TIF_FPUBOUND		(1<<TIF_FPUBOUND)
>  #define _TIF_LOAD_WATCH		(1<<TIF_LOAD_WATCH)
> +#define _TIF_SYSCALL_FTRACE	(1<<TIF_SYSCALL_FTRACE)
>  
>  /* work to do on interrupt/exception return */
>  #define _TIF_WORK_MASK		(0x0000ffef & ~_TIF_SECCOMP)
>  /* work to do on any return to u-space */
> -#define _TIF_ALLWORK_MASK	(0x8000ffff & ~_TIF_SECCOMP)
> +#define _TIF_ALLWORK_MASK	\
> +	((0x8000ffff & ~_TIF_SECCOMP) | _TIF_SYSCALL_FTRACE)
>  
>  #endif /* __KERNEL__ */
>  
> diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
> index 5dec76f..8b4fafa 100644
> --- a/arch/mips/kernel/Makefile
> +++ b/arch/mips/kernel/Makefile
> @@ -34,6 +34,7 @@ obj-$(CONFIG_MODULES)		+= mips_ksyms.o module.o
>  
>  obj-$(CONFIG_FUNCTION_TRACER)	+= mcount.o
>  obj-$(CONFIG_FUNCTION_TRACER)	+= ftrace.o
> +obj-$(CONFIG_FTRACE_SYSCALLS)	+= ftrace.o
>  obj-$(CONFIG_NOP_TRACER)	+= ftrace_clock.o
>  
>  obj-$(CONFIG_CPU_LOONGSON2)	+= r4k_fpu.o r4k_switch.o
> diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S
> index ffa3310..786e4ef 100644
> --- a/arch/mips/kernel/entry.S
> +++ b/arch/mips/kernel/entry.S
> @@ -167,7 +167,7 @@ work_notifysig:				# deal with pending signals and
>  FEXPORT(syscall_exit_work_partial)
>  	SAVE_STATIC
>  syscall_exit_work:
> -	li	t0, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
> +	li	t0, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_FTRACE
>  	and	t0, a2			# a2 is preloaded with TI_FLAGS
>  	beqz	t0, work_pending	# trace bit set?
>  	local_irq_enable		# could let do_syscall_trace()
> diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
> index e7f15f7..5967998 100644
> --- a/arch/mips/kernel/ftrace.c
> +++ b/arch/mips/kernel/ftrace.c
> @@ -22,6 +22,8 @@
>  #include <asm/asm.h>
>  #include <asm/unistd.h>
>  
> +#include <trace/syscall.h>
> +
>  #ifdef CONFIG_DYNAMIC_FTRACE
>  
>  #define JAL 0x0c000000	/* jump & link: ip --> ra, jump to target */
> @@ -287,3 +289,72 @@ out:
>      return parent_ip;
>  }
>  #endif				/* CONFIG_FUNCTION_GRAPH_TRACER */
> +
> +#ifdef CONFIG_FTRACE_SYSCALLS
> +
> +extern unsigned long __start_syscalls_metadata[];
> +extern unsigned long __stop_syscalls_metadata[];
> +
> +/* fixme: in mips64, there are different entries of sys_call_table when using
> + * different standards, in loongson2f based machines: Fuloong & Yeeloong, the
> + * system use o32 standard, so here, we only use the sys_call_table in
> + * arch/mips/kernel/scall64-o32.S */
> +
> +extern unsigned long *sys_call_table;
> +
> +static struct syscall_metadata **syscalls_metadata;
> +
> +static struct syscall_metadata *find_syscall_meta(unsigned long *syscall)
> +{
> +	struct syscall_metadata *start;
> +	struct syscall_metadata *stop;
> +	char str[KSYM_SYMBOL_LEN];
> +
> +
> +	start = (struct syscall_metadata *)__start_syscalls_metadata;
> +	stop = (struct syscall_metadata *)__stop_syscalls_metadata;
> +	kallsyms_lookup((unsigned long) syscall, NULL, NULL, NULL, str);
> +
> +	for ( ; start < stop; start++) {
> +		if (start->name && !strcmp(start->name, str))
> +			return start;
> +	}
> +	return NULL;
> +}
> +
> +struct syscall_metadata *syscall_nr_to_meta(int nr)
> +{
> +	if (!syscalls_metadata || nr >= __NR_Linux_syscalls || nr < 0)
> +		return NULL;
> +
> +	return syscalls_metadata[nr];
> +}
> +
> +void arch_init_ftrace_syscalls(void)
> +{
> +	int i;
> +	struct syscall_metadata *meta;
> +	unsigned long **psys_syscall_table = &sys_call_table;
> +	static atomic_t refs;
> +
> +	if (atomic_inc_return(&refs) != 1)
> +		goto end;
> +
> +	syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) *
> +					__NR_Linux_syscalls, GFP_KERNEL);
> +	if (!syscalls_metadata) {
> +		WARN_ON(1);
> +		return;
> +	}
> +
> +	for (i = 0; i < __NR_Linux_syscalls; i++) {
> +		meta = find_syscall_meta(psys_syscall_table[i]);
> +		syscalls_metadata[i] = meta;
> +	}
> +	return;
> +
> +	/* Paranoid: avoid overflow */
> +end:
> +	atomic_dec(&refs);
> +}
> +#endif	/* CONFIG_FTRACE_SYSCALLS */
> diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
> index 054861c..fa762dc 100644
> --- a/arch/mips/kernel/ptrace.c
> +++ b/arch/mips/kernel/ptrace.c
> @@ -25,6 +25,7 @@
>  #include <linux/security.h>
>  #include <linux/audit.h>
>  #include <linux/seccomp.h>
> +#include <linux/ftrace.h>
>  
>  #include <asm/byteorder.h>
>  #include <asm/cpu.h>
> @@ -39,6 +40,7 @@
>  #include <asm/bootinfo.h>
>  #include <asm/reg.h>
>  
> +#include <trace/syscall.h>
>  /*
>   * Called by kernel/ptrace.c when detaching..
>   *
> @@ -60,7 +62,7 @@ int ptrace_getregs(struct task_struct *child, __s64 __user *data)
>  	struct pt_regs *regs;
>  	int i;
>  
> -	if (!access_ok(VERIFY_WRITE, data, 38 * 8))
> +	if (!access_ok(VERIFY_WRITE, data, 39 * 8))
>  		return -EIO;
>  
>  	regs = task_pt_regs(child);
> @@ -73,6 +75,7 @@ int ptrace_getregs(struct task_struct *child, __s64 __user *data)
>  	__put_user((long)regs->cp0_badvaddr, data + EF_CP0_BADVADDR - EF_R0);
>  	__put_user((long)regs->cp0_status, data + EF_CP0_STATUS - EF_R0);
>  	__put_user((long)regs->cp0_cause, data + EF_CP0_CAUSE - EF_R0);
> +	__put_user((long)regs->orig_v0, data + EF_ORIG_V0 - EF_R0);
>  
>  	return 0;
>  }
> @@ -87,7 +90,7 @@ int ptrace_setregs(struct task_struct *child, __s64 __user *data)
>  	struct pt_regs *regs;
>  	int i;
>  
> -	if (!access_ok(VERIFY_READ, data, 38 * 8))
> +	if (!access_ok(VERIFY_READ, data, 39 * 8))
>  		return -EIO;
>  
>  	regs = task_pt_regs(child);
> @@ -97,6 +100,7 @@ int ptrace_setregs(struct task_struct *child, __s64 __user *data)
>  	__get_user(regs->lo, data + EF_LO - EF_R0);
>  	__get_user(regs->hi, data + EF_HI - EF_R0);
>  	__get_user(regs->cp0_epc, data + EF_CP0_EPC - EF_R0);
> +	__get_user(regs->orig_v0, data + EF_ORIG_V0 - EF_R0);
>  
>  	/* badvaddr, status, and cause may not be written.  */
>  
> @@ -575,6 +579,9 @@ asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit)
>  	if (!(current->ptrace & PT_PTRACED))
>  		goto out;
>  
> +	if (unlikely(test_thread_flag(TIF_SYSCALL_FTRACE)))
> +		ftrace_syscall_exit(regs);
> +
>  	if (!test_thread_flag(TIF_SYSCALL_TRACE))
>  		goto out;
>  
> @@ -594,6 +601,9 @@ asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit)
>  	}
>  
>  out:
> +	if (unlikely(test_thread_flag(TIF_SYSCALL_FTRACE)))
> +		ftrace_syscall_enter(regs);
> +
>  	if (unlikely(current->audit_context) && !entryexit)
>  		audit_syscall_entry(audit_arch(), regs->regs[0],
>  				    regs->regs[4], regs->regs[5],
> diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
> index a5598b2..dd1f13a 100644
> --- a/arch/mips/kernel/scall64-o32.S
> +++ b/arch/mips/kernel/scall64-o32.S
> @@ -202,7 +202,7 @@ einval:	li	v0, -ENOSYS
>  
>  	.align	3
>  	.type	sys_call_table,@object
> -sys_call_table:
> +EXPORT(sys_call_table)
>  	PTR	sys32_syscall			/* 4000 */
>  	PTR	sys_exit
>  	PTR	sys_fork
> -- 
> 1.6.0.4
> 
> 

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH v1 1/5] mips static function tracer support
  2009-05-29  1:13   ` Steven Rostedt
@ 2009-05-29  6:11     ` Wu Zhangjin
  0 siblings, 0 replies; 15+ messages in thread
From: Wu Zhangjin @ 2009-05-29  6:11 UTC (permalink / raw)
  To: Steven Rostedt
  Cc: linux-mips, linux-kernel, Ralf Baechle, Ingo Molnar,
	Andrew Morton, Thomas Gleixner, Nicholas Mc Guire

On Thu, 2009-05-28 at 21:13 -0400, Steven Rostedt wrote:
> 
> On Fri, 29 May 2009, wuzhangjin@gmail.com wrote:
> 
> > From: Wu Zhangjin <wuzj@lemote.com>
> > 
> > if -pg of gcc is enabled. a calling to _mcount will be inserted to each
> > kernel function. so, there is a possibility to trace the functions in
> > _mcount.
> > 
> > here is the implementation of mips specific _mcount for static function
> > tracer.
> > 
> > -ffunction-sections option not works with -pg, so disable it if enables
> > FUNCTION_TRACER.
> > 
> > Signed-off-by: Wu Zhangjin <wuzj@lemote.com>
> > ---
> >  arch/mips/Kconfig              |    2 +
> >  arch/mips/Makefile             |    2 +
> >  arch/mips/include/asm/ftrace.h |   25 ++++++++++-
> >  arch/mips/kernel/Makefile      |    8 +++
> >  arch/mips/kernel/mcount.S      |   98 ++++++++++++++++++++++++++++++++++++++++
> >  arch/mips/kernel/mips_ksyms.c  |    5 ++
> >  6 files changed, 139 insertions(+), 1 deletions(-)
> >  create mode 100644 arch/mips/kernel/mcount.S
> > 
> > diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
> > index 09b1287..d5c01ca 100644
> > --- a/arch/mips/Kconfig
> > +++ b/arch/mips/Kconfig
> > @@ -4,6 +4,8 @@ config MIPS
> >  	select HAVE_IDE
> >  	select HAVE_OPROFILE
> >  	select HAVE_ARCH_KGDB
> > +	select HAVE_FUNCTION_TRACER
> > +	select HAVE_FUNCTION_TRACE_MCOUNT_TEST
> >  	# Horrible source of confusion.  Die, die, die ...
> >  	select EMBEDDED
> >  	select RTC_LIB
> > diff --git a/arch/mips/Makefile b/arch/mips/Makefile
> > index c4cae9e..f86fb15 100644
> > --- a/arch/mips/Makefile
> > +++ b/arch/mips/Makefile
> > @@ -48,7 +48,9 @@ ifneq ($(SUBARCH),$(ARCH))
> >    endif
> >  endif
> >  
> > +ifndef CONFIG_FUNCTION_TRACER
> >  cflags-y := -ffunction-sections
> > +endif
> >  cflags-y += $(call cc-option, -mno-check-zero-division)
> >  
> >  ifdef CONFIG_32BIT
> > diff --git a/arch/mips/include/asm/ftrace.h b/arch/mips/include/asm/ftrace.h
> > index 40a8c17..5f8ebcf 100644
> > --- a/arch/mips/include/asm/ftrace.h
> > +++ b/arch/mips/include/asm/ftrace.h
> > @@ -1 +1,24 @@
> > -/* empty */
> > +/*
> > + * This file is subject to the terms and conditions of the GNU General Public
> > + * License.  See the file "COPYING" in the main directory of this archive for
> > + * more details.
> > + *
> > + * Copyright (C) 2009 DSLab, Lanzhou University, China
> > + * Author: Wu Zhangjin <wuzj@lemote.com>
> > + */
> > +
> > +#ifndef _ASM_MIPS_FTRACE_H
> > +#define _ASM_MIPS_FTRACE_H
> > +
> > +#ifdef CONFIG_FUNCTION_TRACER
> > +
> > +#define MCOUNT_ADDR ((unsigned long)(_mcount))
> > +#define MCOUNT_INSN_SIZE 4		/* sizeof mcount call */
> > +
> > +#ifndef __ASSEMBLY__
> > +extern void _mcount(void);
> > +#define mcount _mcount
> > +
> > +#endif /* __ASSEMBLY__ */
> > +#endif /* CONFIG_FUNCTION_TRACER */
> > +#endif /* _ASM_MIPS_FTRACE_H */
> > diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
> > index e961221..d167dde 100644
> > --- a/arch/mips/kernel/Makefile
> > +++ b/arch/mips/kernel/Makefile
> > @@ -8,6 +8,12 @@ obj-y		+= cpu-probe.o branch.o entry.o genex.o irq.o process.o \
> >  		   ptrace.o reset.o setup.o signal.o syscall.o \
> >  		   time.o topology.o traps.o unaligned.o watch.o
> >  
> > +ifdef CONFIG_FUNCTION_TRACER
> > +# Do not profile debug and lowlevel utilities
> > +CFLAGS_REMOVE_mcount.o = -pg
> 
> mcount.S is an assembly file, the above is for C files. So it is not 
> needed.

Removed, thanks!

> 
> -- Steve
> 
> > +CFLAGS_REMOVE_early_printk.o = -pg
> > +endif
> > +
> >  obj-$(CONFIG_CEVT_BCM1480)	+= cevt-bcm1480.o
> >  obj-$(CONFIG_CEVT_R4K_LIB)	+= cevt-r4k.o
> >  obj-$(CONFIG_MIPS_MT_SMTC)	+= cevt-smtc.o
> > @@ -24,6 +30,8 @@ obj-$(CONFIG_SYNC_R4K)		+= sync-r4k.o
> >  obj-$(CONFIG_STACKTRACE)	+= stacktrace.o
> >  obj-$(CONFIG_MODULES)		+= mips_ksyms.o module.o
> >  
> > +obj-$(CONFIG_FUNCTION_TRACER)	+= mcount.o
> > +
> >  obj-$(CONFIG_CPU_LOONGSON2)	+= r4k_fpu.o r4k_switch.o
> >  obj-$(CONFIG_CPU_MIPS32)	+= r4k_fpu.o r4k_switch.o
> >  obj-$(CONFIG_CPU_MIPS64)	+= r4k_fpu.o r4k_switch.o
> > diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S
> > new file mode 100644
> > index 0000000..268724e
> > --- /dev/null

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH v1 2/5] mips dynamic function tracer support
  2009-05-29  1:24   ` Steven Rostedt
@ 2009-05-29  6:36     ` Wu Zhangjin
  2009-05-29 15:07       ` Steven Rostedt
  0 siblings, 1 reply; 15+ messages in thread
From: Wu Zhangjin @ 2009-05-29  6:36 UTC (permalink / raw)
  To: Steven Rostedt
  Cc: linux-mips, linux-kernel, Ralf Baechle, Ingo Molnar,
	Andrew Morton, Thomas Gleixner, Nicholas Mc Guire

On Thu, 2009-05-28 at 21:24 -0400, Steven Rostedt wrote:
> On Fri, 29 May 2009, wuzhangjin@gmail.com wrote:
> 
> > From: Wu Zhangjin <wuzj@lemote.com>
> > 
> > dynamic function tracer need to replace "nop" to "jumps & links" and
> > something reversely.
> > 
> > Signed-off-by: Wu Zhangjin <wuzj@lemote.com>
> > ---
> >  arch/mips/Kconfig              |    3 +
> >  arch/mips/include/asm/ftrace.h |   10 ++
> >  arch/mips/kernel/Makefile      |    2 +
> >  arch/mips/kernel/ftrace.c      |  217 ++++++++++++++++++++++++++++++++++++++++
> >  arch/mips/kernel/mcount.S      |   31 ++++++
> >  scripts/Makefile.build         |    1 +
> >  scripts/recordmcount.pl        |   32 +++++-
> >  7 files changed, 290 insertions(+), 6 deletions(-)
> >  create mode 100644 arch/mips/kernel/ftrace.c
> > 
> > diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
> > index d5c01ca..0c00536 100644
> > --- a/arch/mips/Kconfig
> > +++ b/arch/mips/Kconfig
> > @@ -6,6 +6,9 @@ config MIPS
> >  	select HAVE_ARCH_KGDB
> >  	select HAVE_FUNCTION_TRACER
> >  	select HAVE_FUNCTION_TRACE_MCOUNT_TEST
> > +	select HAVE_DYNAMIC_FTRACE
> > +	select HAVE_FTRACE_MCOUNT_RECORD
> > +	select HAVE_FTRACE_NMI_ENTER if DYNAMIC_FTRACE
> >  	# Horrible source of confusion.  Die, die, die ...
> >  	select EMBEDDED
> >  	select RTC_LIB
> > diff --git a/arch/mips/include/asm/ftrace.h b/arch/mips/include/asm/ftrace.h
> > index 5f8ebcf..b4970c9 100644
> > --- a/arch/mips/include/asm/ftrace.h
> > +++ b/arch/mips/include/asm/ftrace.h
> > @@ -19,6 +19,16 @@
> >  extern void _mcount(void);
> >  #define mcount _mcount
> >  
> > +#ifdef CONFIG_DYNAMIC_FTRACE
> > +/* reloction of mcount call site is the same as the address */
> > +static inline unsigned long ftrace_call_adjust(unsigned long addr)
> > +{
> > +	return addr;
> > +}
> > +
> > +struct dyn_arch_ftrace {
> > +};
> > +#endif /*  CONFIG_DYNAMIC_FTRACE */
> >  #endif /* __ASSEMBLY__ */
> >  #endif /* CONFIG_FUNCTION_TRACER */
> >  #endif /* _ASM_MIPS_FTRACE_H */
> > diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
> > index d167dde..6b1a8a5 100644
> > --- a/arch/mips/kernel/Makefile
> > +++ b/arch/mips/kernel/Makefile
> > @@ -11,6 +11,7 @@ obj-y		+= cpu-probe.o branch.o entry.o genex.o irq.o process.o \
> >  ifdef CONFIG_FUNCTION_TRACER
> >  # Do not profile debug and lowlevel utilities
> >  CFLAGS_REMOVE_mcount.o = -pg
> > +CFLAGS_REMOVE_ftrace.o = -pg
> >  CFLAGS_REMOVE_early_printk.o = -pg
> >  endif
> >  
> > @@ -31,6 +32,7 @@ obj-$(CONFIG_STACKTRACE)	+= stacktrace.o
> >  obj-$(CONFIG_MODULES)		+= mips_ksyms.o module.o
> >  
> >  obj-$(CONFIG_FUNCTION_TRACER)	+= mcount.o
> > +obj-$(CONFIG_FUNCTION_TRACER)	+= ftrace.o
> >  
> >  obj-$(CONFIG_CPU_LOONGSON2)	+= r4k_fpu.o r4k_switch.o
> >  obj-$(CONFIG_CPU_MIPS32)	+= r4k_fpu.o r4k_switch.o
> > diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
> > new file mode 100644
> > index 0000000..827c128
> > --- /dev/null
> > +++ b/arch/mips/kernel/ftrace.c
> > @@ -0,0 +1,217 @@
> > +/*
> > + * Code for replacing ftrace calls with jumps.
> > + *
> > + * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
> > + * Copyright (C) 2009 DSLab, Lanzhou University, China
> > + * Author: Wu Zhangjin <wuzj@lemote.com>
> > + *
> > + * Thanks goes to Steven Rostedt for writing the original x86 version.
> > + */
> > +
> > +#include <linux/spinlock.h>
> > +#include <linux/hardirq.h>
> > +#include <linux/uaccess.h>
> > +#include <linux/percpu.h>
> > +#include <linux/sched.h>
> > +#include <linux/init.h>
> > +#include <linux/list.h>
> > +#include <linux/ftrace.h>
> > +
> > +#include <asm/cacheflush.h>
> > +#include <asm/ftrace.h>
> > +#include <asm/asm.h>
> > +#include <asm/unistd.h>
> > +
> > +#ifdef CONFIG_DYNAMIC_FTRACE
> > +
> > +#define JAL 0x0c000000	/* jump & link: ip --> ra, jump to target */
> > +#define ADDR_MASK 0x03ffffff	/*  op_code|addr : 31...26|25 ....0 */
> > +
> > +static unsigned int ftrace_nop = 0x00000000;
> > +
> > +static unsigned char *ftrace_call_replace(unsigned long op_code,
> > +					  unsigned long addr)
> > +{
> > +    static unsigned int op;
> > +
> > +    op = op_code | ((addr >> 2) & ADDR_MASK);
> > +
> > +    return (unsigned char *) &op;
> > +}
> > +
> > +static atomic_t nmi_running = ATOMIC_INIT(0);
> > +static int mod_code_status;	/* holds return value of text write */
> > +static int mod_code_write;	/* set when NMI should do the write */
> > +static void *mod_code_ip;	/* holds the IP to write to */
> > +static void *mod_code_newcode;	/* holds the text to write to the IP */
> > +
> > +static unsigned nmi_wait_count;
> > +static atomic_t nmi_update_count = ATOMIC_INIT(0);
> > +
> > +int ftrace_arch_read_dyn_info(char *buf, int size)
> > +{
> > +    int r;
> > +
> > +    r = snprintf(buf, size, "%u %u",
> > +		 nmi_wait_count, atomic_read(&nmi_update_count));
> > +    return r;
> > +}
> > +
> > +static void ftrace_mod_code(void)
> > +{
> > +    /*
> > +     * Yes, more than one CPU process can be writing to mod_code_status.
> > +     *    (and the code itself)
> > +     * But if one were to fail, then they all should, and if one were
> > +     * to succeed, then they all should.
> > +     */
> > +    mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode,
> > +					 MCOUNT_INSN_SIZE);
> > +
> > +    /* if we fail, then kill any new writers */
> > +    if (mod_code_status)
> > +		mod_code_write = 0;
> > +}
> > +
> > +void ftrace_nmi_enter(void)
> > +{
> > +    atomic_inc(&nmi_running);
> > +    /* Must have nmi_running seen before reading write flag */
> > +    smp_mb();
> > +    if (mod_code_write) {
> > +		ftrace_mod_code();
> > +		atomic_inc(&nmi_update_count);
> > +    }
> > +}
> > +
> > +void ftrace_nmi_exit(void)
> > +{
> > +    /* Finish all executions before clearing nmi_running */
> > +    smp_wmb();
> > +    atomic_dec(&nmi_running);
> > +}
> > +
> > +static void wait_for_nmi(void)
> > +{
> > +    int waited = 0;
> > +
> > +    while (atomic_read(&nmi_running)) {
> > +		waited = 1;
> > +		cpu_relax();
> > +    }
> > +
> > +    if (waited)
> > +		nmi_wait_count++;
> > +}
> > +
> > +static int do_ftrace_mod_code(unsigned long ip, void *new_code)
> > +{
> > +    mod_code_ip = (void *) ip;
> > +    mod_code_newcode = new_code;
> > +
> > +    /* The buffers need to be visible before we let NMIs write them */
> > +    smp_wmb();
> > +
> > +    mod_code_write = 1;
> > +
> > +    /* Make sure write bit is visible before we wait on NMIs */
> > +    smp_mb();
> > +
> > +    wait_for_nmi();
> > +
> > +    /* Make sure all running NMIs have finished before we write the code */
> > +    smp_mb();
> > +
> > +    ftrace_mod_code();
> > +
> > +    /* Make sure the write happens before clearing the bit */
> > +    smp_wmb();
> > +
> > +    mod_code_write = 0;
> > +
> > +    /* make sure NMIs see the cleared bit */
> > +    smp_mb();
> > +
> > +    wait_for_nmi();
> > +
> > +    return mod_code_status;
> > +}
> 
> Hmm, this is basically exactly the same as x86's version. I wounder if we 
> should make a helper function in generic code to let archs use it. We can 
> put the do_ftrace_mod_code into kernel/trace/ftrace.c and have weak 
> functions for the ftrace_mod_code. If the arch needs this to handle NMIs, 
> then it can use it. This code was tricky to write, and I would hate to 
> have it duplicated in every arch.
> 

so, when will you put do_ftrace_mod_code into kernel/trace/ftrace.c? 
i just checked the powerpc version, seems something different, so, we
should handle it carefully and tune the relative arch-dependent parts? 

> > +
> > +static unsigned char *ftrace_nop_replace(void)
> > +{
> > +    return (unsigned char *) &ftrace_nop;
> > +}
> > +
> > +static int
> > +ftrace_modify_code(unsigned long ip, unsigned char *old_code,
> > +		   unsigned char *new_code)
> > +{
> > +    unsigned char replaced[MCOUNT_INSN_SIZE];
> > +
> > +    /*
> > +     * Note: Due to modules and __init, code can
> > +     *  disappear and change, we need to protect against faulting
> > +     *  as well as code changing. We do this by using the
> > +     *  probe_kernel_* functions.
> 
> hehe, this is an old comment. We don't touch __init sections anymore. I 
> need to remove it from the x86 file.
> 

Removed, this is the same in powerpc version.

> > +     *
> > +     * No real locking needed, this code is run through
> > +     * kstop_machine, or before SMP starts.
> > +     */
> > +
> > +    /* read the text we want to modify */
> > +    if (probe_kernel_read(replaced, (void *) ip, MCOUNT_INSN_SIZE))
> > +		return -EFAULT;
> > +
> > +    /* Make sure it is what we expect it to be */
> > +    if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
> > +		return -EINVAL;
> > +
> > +    /* replace the text with the new text */
> > +    if (do_ftrace_mod_code(ip, new_code))
> > +		return -EPERM;
> > +
> > +    return 0;
> > +}
> > +
> > +int ftrace_make_nop(struct module *mod,
> > +		    struct dyn_ftrace *rec, unsigned long addr)
> > +{
> > +    unsigned char *new, *old;
> > +
> > +    old = ftrace_call_replace(JAL, addr);
> > +    new = ftrace_nop_replace();
> > +
> > +    return ftrace_modify_code(rec->ip, old, new);
> > +}
> > +
> > +int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
> > +{
> > +    unsigned char *new, *old;
> > +
> > +    old = ftrace_nop_replace();
> > +    new = ftrace_call_replace(JAL, addr);
> > +
> > +    return ftrace_modify_code(rec->ip, old, new);
> > +}
> > +
> > +int ftrace_update_ftrace_func(ftrace_func_t func)
> > +{
> > +    unsigned long ip = (unsigned long) (&ftrace_call);
> > +    unsigned char old[MCOUNT_INSN_SIZE], *new;
> > +    int ret;
> > +
> > +    memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
> > +    new = ftrace_call_replace(JAL, (unsigned long) func);
> > +    ret = ftrace_modify_code(ip, old, new);
> > +
> > +    return ret;
> > +}
> > +
> > +int __init ftrace_dyn_arch_init(void *data)
> > +{
> > +    /* The return code is retured via data */
> > +    *(unsigned long *) data = 0;
> 
> egad, I need to clean that up too. I should return the true error code 
> with ret. That is legacy from the first version of the dynamic ftrace 
> code.

> This review is showing all the flaws of my own work ;-)
> 

Yeap, most of it is copied from your original x86 version.

there are really lots of duplications among different arch-specific
versions, need to cleanup carefully, and should we write something like
a helper document for people developing arch-specific version?

> > +
> > +    return 0;
> > +}
> > +#endif				/* CONFIG_DYNAMIC_FTRACE */
> > diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S
> > index 268724e..ce8a0ba 100644
> > --- a/arch/mips/kernel/mcount.S
> > +++ b/arch/mips/kernel/mcount.S
> > @@ -67,6 +67,35 @@
> >  	move ra, $1
> >  	.endm
> >  
> > +#ifdef CONFIG_DYNAMIC_FTRACE
> > +
> > +LEAF(_mcount)
> > +	RESTORE_SP_FOR_32BIT
> > + 	RETURN_BACK
> > + 	END(_mcount)
> > +
> > +NESTED(ftrace_caller, PT_SIZE, ra)
> > +	RESTORE_SP_FOR_32BIT
> > +	lw	t0, function_trace_stop
> > +	bnez	t0, ftrace_stub
> > +	nop
> > +
> > +	MCOUNT_SAVE_REGS
> > +
> > +	MCOUNT_SET_ARGS
> > +	.globl ftrace_call
> > +ftrace_call:
> > +	jal	ftrace_stub
> > +	nop
> > +
> > +	MCOUNT_RESTORE_REGS
> > +	.globl ftrace_stub
> > +ftrace_stub:
> > +	RETURN_BACK
> > +	END(ftrace_caller)
> > +
> > +#else	/* ! CONFIG_DYNAMIC_FTRACE */
> > +
> >  NESTED(_mcount, PT_SIZE, ra)
> >  	RESTORE_SP_FOR_32BIT
> >  	PTR_L	t0, function_trace_stop
> > @@ -94,5 +123,7 @@ ftrace_stub:
> >  	RETURN_BACK
> >  	END(_mcount)
> >  
> > +#endif	/* ! CONFIG_DYNAMIC_FTRACE */
> > +
> >  	.set at
> >  	.set reorder
> > diff --git a/scripts/Makefile.build b/scripts/Makefile.build
> > index 5c4b7a4..548d575 100644
> > --- a/scripts/Makefile.build
> > +++ b/scripts/Makefile.build
> > @@ -207,6 +207,7 @@ endif
> >  
> >  ifdef CONFIG_FTRACE_MCOUNT_RECORD
> >  cmd_record_mcount = perl $(srctree)/scripts/recordmcount.pl "$(ARCH)" \
> > +	"$(if $(CONFIG_CPU_BIG_ENDIAN),big,little)" \
> >  	"$(if $(CONFIG_64BIT),64,32)" \
> >  	"$(OBJDUMP)" "$(OBJCOPY)" "$(CC)" "$(LD)" "$(NM)" "$(RM)" "$(MV)" \
> >  	"$(if $(part-of-module),1,0)" "$(@)";
> 
> This big/little endian addition, I would like in its own patch.
> 

okay, will split it out later.

> > diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
> > index 409596e..e963948 100755
> > --- a/scripts/recordmcount.pl
> > +++ b/scripts/recordmcount.pl
> > @@ -100,13 +100,13 @@ $P =~ s@.*/@@g;
> >  
> >  my $V = '0.1';
> >  
> > -if ($#ARGV < 7) {
> > -	print "usage: $P arch bits objdump objcopy cc ld nm rm mv is_module inputfile\n";
> > +if ($#ARGV < 8) {
> > +	print "usage: $P arch endian bits objdump objcopy cc ld nm rm mv is_module inputfile\n";
> >  	print "version: $V\n";
> >  	exit(1);
> >  }
> >  
> > -my ($arch, $bits, $objdump, $objcopy, $cc,
> > +my ($arch, $endian, $bits, $objdump, $objcopy, $cc,
> >      $ld, $nm, $rm, $mv, $is_module, $inputfile) = @ARGV;
> >  
> >  # This file refers to mcount and shouldn't be ftraced, so lets' ignore it
> > @@ -213,6 +213,26 @@ if ($arch eq "x86_64") {
> >      if ($is_module eq "0") {
> >          $cc .= " -mconstant-gp";
> >      }
> > +
> > +} elsif ($arch eq "mips") {
> > +	$mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s_mcount\$";
> > +	$objdump .= " -Melf-trad".$endian."mips ";
> > +
> > +	if ($endian eq "big") {
> > +		$endian = " -EB ";
> > +		$ld .= " -melf".$bits."btsmip";
> > +	} else {
> > +		$endian = " -EL ";
> > +		$ld .= " -melf".$bits."ltsmip";
> > +	}
> > +
> > +	$cc .= " -mno-abicalls -fno-pic -mabi=" . $bits . $endian;
> > +    $ld .= $endian;
> > +
> > +    if ($bits == 64) {
> > +		$type = ".dword";
> > +    }
> 
> The mips addition to the recordmcount.pl is OK to keep with this patch.
> 
> > +
> >  } else {
> >      die "Arch $arch is not supported with CONFIG_FTRACE_MCOUNT_RECORD";
> >  }
> > @@ -441,12 +461,12 @@ if ($#converts >= 0) {
> >      #
> >      # Step 5: set up each local function as a global
> >      #
> > -    `$objcopy $globallist $inputfile $globalobj`;
> > +    `$objcopy $globallist $inputfile $globalobj 2>&1 >/dev/null`;
> 
> Are these spitting out errors?
> 

no errors, but some warnings.

seems some files not have _mcount(ooh, I did this patch about two months
ago, so not remember the _real_ reason now), so there will some
complaint about "No such file ...", this fix just make it not complain
again.

> -- Steve
> 
> >  
> >      #
> >      # Step 6: Link the global version to our list.
> >      #
> > -    `$ld -r $globalobj $mcount_o -o $globalmix`;
> > +    `$ld -r $globalobj $mcount_o -o $globalmix 2>&1 >/dev/null`;
> >  
> >      #
> >      # Step 7: Convert the local functions back into local symbols
> > @@ -454,7 +474,7 @@ if ($#converts >= 0) {
> >      `$objcopy $locallist $globalmix $inputfile`;
> >  
> >      # Remove the temp files
> > -    `$rm $globalobj $globalmix`;
> > +    `$rm $globalobj $globalmix 2>&1 >/dev/null`;
> >  
> >  } else {
> >  
> > -- 
> > 1.6.0.4
> > 
> > 

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH v1 3/5] mips function graph tracer support
  2009-05-29  2:01   ` Steven Rostedt
@ 2009-05-29  9:07     ` Wu Zhangjin
  2009-05-29 15:16       ` Steven Rostedt
  0 siblings, 1 reply; 15+ messages in thread
From: Wu Zhangjin @ 2009-05-29  9:07 UTC (permalink / raw)
  To: Steven Rostedt
  Cc: linux-mips, linux-kernel, Ralf Baechle, Ingo Molnar,
	Andrew Morton, Thomas Gleixner, Nicholas Mc Guire

On Thu, 2009-05-28 at 22:01 -0400, Steven Rostedt wrote:
> On Fri, 29 May 2009, wuzhangjin@gmail.com wrote:
> > diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S
> > index ce8a0ba..bd58f16 100644
> > --- a/arch/mips/kernel/mcount.S
> > +++ b/arch/mips/kernel/mcount.S
> > @@ -28,6 +28,10 @@
> >  	PTR_SUBU	sp, PT_SIZE
> >  	PTR_S	ra, PT_R31(sp)
> >  	PTR_S	$1, PT_R1(sp)
> > +#ifdef CONFIG_FUNCTION_GRAPH_TRACER
> > +	PTR_S	v0, PT_R2(sp)
> > +	PTR_S	v1, PT_R3(sp)
> > +#endif
> >  	PTR_S	a0, PT_R4(sp)
> >  	PTR_S	a1, PT_R5(sp)
> >  	PTR_S	a2, PT_R6(sp)
> > @@ -43,6 +47,10 @@
> >  	.macro MCOUNT_RESTORE_REGS
> >  	PTR_L	ra, PT_R31(sp)
> >  	PTR_L	$1, PT_R1(sp)
> > +#ifdef CONFIG_FUNCTION_GRAPH_TRACER
> > +	PTR_L	v0, PT_R2(sp)
> > +	PTR_L	v1, PT_R3(sp)
> > +#endif
> >  	PTR_L	a0, PT_R4(sp)
> >  	PTR_L	a1, PT_R5(sp)
> >  	PTR_L	a2, PT_R6(sp)
> > @@ -89,6 +97,14 @@ ftrace_call:
> >  	nop
> >  
> >  	MCOUNT_RESTORE_REGS
> > +
> > +#ifdef CONFIG_FUNCTION_GRAPH_TRACER
> > +	.globl ftrace_graph_call
> > +ftrace_graph_call:
> > +	j	ftrace_stub
> > +	nop
> > +#endif
> > +
> >  	.globl ftrace_stub
> >  ftrace_stub:
> >  	RETURN_BACK
> > @@ -106,7 +122,15 @@ NESTED(_mcount, PT_SIZE, ra)
> >  	PTR_L	t1, ftrace_trace_function /* please don't use t1 later, safe? */
> >  	bne	t0, t1, static_trace
> >  	nop
> > -
> > +#ifdef	CONFIG_FUNCTION_GRAPH_TRACER
> > +	PTR_L	t2, ftrace_graph_return
> > +	bne	t0,	t2, ftrace_graph_caller
> > +	nop
> > +	PTR_LA	t0, ftrace_graph_entry_stub
> > +	PTR_L	t2, ftrace_graph_entry
> > +	bne	t0,	t2, ftrace_graph_caller
> > +	nop
> > +#endif
> >  	j	ftrace_stub
> >  	nop
> >  
> > @@ -125,5 +149,37 @@ ftrace_stub:
> >  
> >  #endif	/* ! CONFIG_DYNAMIC_FTRACE */
> >  
> > +#ifdef CONFIG_FUNCTION_GRAPH_TRACER
> > +
> > +NESTED(ftrace_graph_caller, PT_SIZE, ra)
> > +	MCOUNT_SAVE_REGS
> > +
> > +	MCOUNT_SET_ARGS
> > +	jal	prepare_ftrace_return
> > +	nop
> > +
> > +	/* overwrite the parent as &return_to_handler: v0 -> $1(at) */
> > +	PTR_S	v0, PT_R1(sp)
> > +
> > +	MCOUNT_RESTORE_REGS
> > +	RETURN_BACK
> > +	END(ftrace_graph_caller)
> > +
> > +	.align	2
> > +	.globl	return_to_handler
> > +return_to_handler:
> > +	MCOUNT_SAVE_REGS
> 
> I'm not sure which version of function_graph tracer you looked at, 

currently, I'm using the master branch of the latest linux-mips git
tree. so, the function_graph should be the latest version?

BTW: which git branch should i apply these patches to?

> but I'm 
> pretty sure you can just save the return code registers of the function.
> 
> return_to_handler is called on the return of a function. 

Yeap.

> Thus, any callee 
> saved registers have already been restored and would also be restored by 
> ftrace_return_to_handler.  Any callee registers would have been saved by 
> the function you are about to return to.
> 
> Thus the only things you need to save are the return code registers.

have tried to not save/restore the arguments(a0-7) registers, the kernel
will hang:

CPU 0 Unable to handle kernel paging request at virtual address
0000000c, epc == 80101360, ra == 8010b114
Oops[#1]:
Cpu 0
$ 0   : 00000000 8010b114 00000000 00000000
$ 4   : 87830d00 87830d00 8780c380 10002400
$ 8   : 5607ec00 0000000b 00000000 5607ec00
$12   : 5607ec00 9e3779b9 9e3779b9 00000000
$16   : 00000000 10002400 00000000 87884d00
$20   : 000000d0 801857cc 00000000 80483e00
$24   : 00000000 00000000                  
$28   : 87976000 87977cd8 87977cd8 8010b114
Hi    : 0000000b
Lo    : 5607ec00
epc   : 80101360 plat_irq_dispatch+0x70/0x250
    Not tainted
ra    : 8010b114 return_to_handler+0x0/0x4c
Status: 10002402    KERNEL EXL 
Cause : 50808008
BadVA : 0000000c
PrId  : 00019300 (MIPS 24Kc)
Process bash (pid: 533, threadinfo=87976000, task=8796f560,
tls=2aad5100)
Stack : 00000062 0000006f 80455ef8 9e3779b9 00000000 00000020 87977da8
80101f80
        804b0000 804b0000 87977d08 8010b330 00000000 80483e00 00000000
10002401
        00000000 00000000 804b0000 804b3854 00000000 00000004 08042c2b
8010b0a0
        00000000 0000005f 80455fe4 9e3779b9 9e3779b9 00000000 87884d80
00000020
        00000000 87884d00 000000d0 801857cc 00000000 80483e00 00000000
00000000
        ...
Call Trace:
[<80101360>] plat_irq_dispatch+0x70/0x250
[<80101f80>] ret_from_irq+0x0/0x4
[<80183dd4>] ftrace_run_update_code+0x58/0xdc
[<8018410c>] ftrace_startup+0x7c/0x90
[<80185ab8>] register_ftrace_graph+0x40c/0x440
[<801955a0>] graph_trace_init+0x28/0x54
[<80190d00>] tracer_init+0x34/0x50
[<80190f34>] tracing_set_tracer+0x218/0x2c4
[<801910b8>] tracing_set_trace_write+0xd8/0x144


Code: 00000000  0c05cc3c  02002021 <8c43000c> 02002021  0060f809
00402821  0c04cec0  00000000 
Disabling lock debugging due to kernel taint
Kernel panic - not syncing: Fatal exception in interrupt

so, i think there is really a need to use the current
MCOUNT_SAVE/RESTORE_REGS, some arguments registers should be saved.
and we can share this common macros :-)

> -- Steve
> 
> 
> > +
> > +	jal	ftrace_return_to_handler
> > +	nop
> > +
> > +	/* restore the real parent address: v0 -> ra */
> > +	PTR_S	v0, PT_R31(sp)
> > +
> > +	MCOUNT_RESTORE_REGS
> > +	RETURN_BACK
> > +
> > +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
> > +
> >  	.set at
> >  	.set reorder
> > diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
> > index 58738c8..67435e5 100644
> > --- a/arch/mips/kernel/vmlinux.lds.S
> > +++ b/arch/mips/kernel/vmlinux.lds.S
> > @@ -36,6 +36,7 @@ SECTIONS
> >  		SCHED_TEXT
> >  		LOCK_TEXT
> >  		KPROBES_TEXT
> > +		IRQENTRY_TEXT
> >  		*(.text.*)
> >  		*(.fixup)
> >  		*(.gnu.warning)
> > -- 
> > 1.6.0.4
> > 
> > 

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH v1 2/5] mips dynamic function tracer support
  2009-05-29  6:36     ` Wu Zhangjin
@ 2009-05-29 15:07       ` Steven Rostedt
  0 siblings, 0 replies; 15+ messages in thread
From: Steven Rostedt @ 2009-05-29 15:07 UTC (permalink / raw)
  To: Wu Zhangjin
  Cc: linux-mips, linux-kernel, Ralf Baechle, Ingo Molnar,
	Andrew Morton, Thomas Gleixner, Nicholas Mc Guire


On Fri, 29 May 2009, Wu Zhangjin wrote:
> On Thu, 2009-05-28 at 21:24 -0400, Steven Rostedt wrote:
> > On Fri, 29 May 2009, wuzhangjin@gmail.com wrote:
> > > From: Wu Zhangjin <wuzj@lemote.com>
> > 
> > Hmm, this is basically exactly the same as x86's version. I wounder if we 
> > should make a helper function in generic code to let archs use it. We can 
> > put the do_ftrace_mod_code into kernel/trace/ftrace.c and have weak 
> > functions for the ftrace_mod_code. If the arch needs this to handle NMIs, 
> > then it can use it. This code was tricky to write, and I would hate to 
> > have it duplicated in every arch.
> > 
> 
> so, when will you put do_ftrace_mod_code into kernel/trace/ftrace.c? 
> i just checked the powerpc version, seems something different, so, we
> should handle it carefully and tune the relative arch-dependent parts? 

I did not cover NMIs for PowerPC. I probably should. I'll let this go in 
first and then we can think about how to consolidate the archs after we 
see what is similar.

> 
> > > +
> > > +static unsigned char *ftrace_nop_replace(void)
> > > +{
> > > +    return (unsigned char *) &ftrace_nop;
> > > +}
> > > +
> > > +static int
> > > +ftrace_modify_code(unsigned long ip, unsigned char *old_code,
> > > +		   unsigned char *new_code)
> > > +{
> > > +    unsigned char replaced[MCOUNT_INSN_SIZE];
> > > +
> > > +    /*
> > > +     * Note: Due to modules and __init, code can
> > > +     *  disappear and change, we need to protect against faulting
> > > +     *  as well as code changing. We do this by using the
> > > +     *  probe_kernel_* functions.
> > 
> > hehe, this is an old comment. We don't touch __init sections anymore. I 
> > need to remove it from the x86 file.
> > 
> 
> Removed, this is the same in powerpc version.

That's because I wrote the PPC version with the old code as well. I just 
waited to post it. But I never updated the comments there either. I'll 
have to write a clean up patch. But no need to copy an incorrect comment 
again ;-)

> 
> > > +     *
> > > +     * No real locking needed, this code is run through
> > > +     * kstop_machine, or before SMP starts.
> > > +     */
> > > +
> > > +    /* read the text we want to modify */
> > > +    if (probe_kernel_read(replaced, (void *) ip, MCOUNT_INSN_SIZE))
> > > +		return -EFAULT;
> > > +
> > > +    /* Make sure it is what we expect it to be */
> > > +    if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
> > > +		return -EINVAL;
> > > +
> > > +    /* replace the text with the new text */
> > > +    if (do_ftrace_mod_code(ip, new_code))
> > > +		return -EPERM;
> > > +
> > > +    return 0;
> > > +}
> > > +
> > > +int ftrace_make_nop(struct module *mod,
> > > +		    struct dyn_ftrace *rec, unsigned long addr)
> > > +{
> > > +    unsigned char *new, *old;
> > > +
> > > +    old = ftrace_call_replace(JAL, addr);
> > > +    new = ftrace_nop_replace();
> > > +
> > > +    return ftrace_modify_code(rec->ip, old, new);
> > > +}
> > > +
> > > +int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
> > > +{
> > > +    unsigned char *new, *old;
> > > +
> > > +    old = ftrace_nop_replace();
> > > +    new = ftrace_call_replace(JAL, addr);
> > > +
> > > +    return ftrace_modify_code(rec->ip, old, new);
> > > +}
> > > +
> > > +int ftrace_update_ftrace_func(ftrace_func_t func)
> > > +{
> > > +    unsigned long ip = (unsigned long) (&ftrace_call);
> > > +    unsigned char old[MCOUNT_INSN_SIZE], *new;
> > > +    int ret;
> > > +
> > > +    memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
> > > +    new = ftrace_call_replace(JAL, (unsigned long) func);
> > > +    ret = ftrace_modify_code(ip, old, new);
> > > +
> > > +    return ret;
> > > +}
> > > +
> > > +int __init ftrace_dyn_arch_init(void *data)
> > > +{
> > > +    /* The return code is retured via data */
> > > +    *(unsigned long *) data = 0;
> > 
> > egad, I need to clean that up too. I should return the true error code 
> > with ret. That is legacy from the first version of the dynamic ftrace 
> > code.
> 
> > This review is showing all the flaws of my own work ;-)
> > 
> 
> Yeap, most of it is copied from your original x86 version.
> 
> there are really lots of duplications among different arch-specific
> versions, need to cleanup carefully, and should we write something like
> a helper document for people developing arch-specific version?

We could, but so far it seems that pretty much every arch that has 
ported ftrace did fine with just looking at the code. I tried to keep the 
comments in x86 good enough for other arch maintainers to understand what 
was going on.

Probably after mips is ported, I'll do some clean ups to consolidat the 
archs and fix some design flaws. I'll need to get Acked-by from the 
maintainer of each arch I touch.

> 
> > > +
> > > +    return 0;
> > > +}
> > > +#endif				/* CONFIG_DYNAMIC_FTRACE */
> > > diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S
> > > index 268724e..ce8a0ba 100644
> > > --- a/arch/mips/kernel/mcount.S
> > > +++ b/arch/mips/kernel/mcount.S
> > > @@ -67,6 +67,35 @@
> > >  	move ra, $1
> > >  	.endm
> > >  
> > > +#ifdef CONFIG_DYNAMIC_FTRACE
> > > +
> > > +LEAF(_mcount)
> > > +	RESTORE_SP_FOR_32BIT
> > > + 	RETURN_BACK
> > > + 	END(_mcount)
> > > +
> > > +NESTED(ftrace_caller, PT_SIZE, ra)
> > > +	RESTORE_SP_FOR_32BIT
> > > +	lw	t0, function_trace_stop
> > > +	bnez	t0, ftrace_stub
> > > +	nop
> > > +
> > > +	MCOUNT_SAVE_REGS
> > > +
> > > +	MCOUNT_SET_ARGS
> > > +	.globl ftrace_call
> > > +ftrace_call:
> > > +	jal	ftrace_stub
> > > +	nop
> > > +
> > > +	MCOUNT_RESTORE_REGS
> > > +	.globl ftrace_stub
> > > +ftrace_stub:
> > > +	RETURN_BACK
> > > +	END(ftrace_caller)
> > > +
> > > +#else	/* ! CONFIG_DYNAMIC_FTRACE */
> > > +
> > >  NESTED(_mcount, PT_SIZE, ra)
> > >  	RESTORE_SP_FOR_32BIT
> > >  	PTR_L	t0, function_trace_stop
> > > @@ -94,5 +123,7 @@ ftrace_stub:
> > >  	RETURN_BACK
> > >  	END(_mcount)
> > >  
> > > +#endif	/* ! CONFIG_DYNAMIC_FTRACE */
> > > +
> > >  	.set at
> > >  	.set reorder
> > > diff --git a/scripts/Makefile.build b/scripts/Makefile.build
> > > index 5c4b7a4..548d575 100644
> > > --- a/scripts/Makefile.build
> > > +++ b/scripts/Makefile.build
> > > @@ -207,6 +207,7 @@ endif
> > >  
> > >  ifdef CONFIG_FTRACE_MCOUNT_RECORD
> > >  cmd_record_mcount = perl $(srctree)/scripts/recordmcount.pl "$(ARCH)" \
> > > +	"$(if $(CONFIG_CPU_BIG_ENDIAN),big,little)" \
> > >  	"$(if $(CONFIG_64BIT),64,32)" \
> > >  	"$(OBJDUMP)" "$(OBJCOPY)" "$(CC)" "$(LD)" "$(NM)" "$(RM)" "$(MV)" \
> > >  	"$(if $(part-of-module),1,0)" "$(@)";
> > 
> > This big/little endian addition, I would like in its own patch.
> > 
> 
> okay, will split it out later.

Thanks.

> 
> > > diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
> > > index 409596e..e963948 100755
> > > --- a/scripts/recordmcount.pl
> > > +++ b/scripts/recordmcount.pl
> > > @@ -100,13 +100,13 @@ $P =~ s@.*/@@g;
> > >  
> > >  my $V = '0.1';
> > >  
> > > -if ($#ARGV < 7) {
> > > -	print "usage: $P arch bits objdump objcopy cc ld nm rm mv is_module inputfile\n";
> > > +if ($#ARGV < 8) {
> > > +	print "usage: $P arch endian bits objdump objcopy cc ld nm rm mv is_module inputfile\n";
> > >  	print "version: $V\n";
> > >  	exit(1);
> > >  }
> > >  
> > > -my ($arch, $bits, $objdump, $objcopy, $cc,
> > > +my ($arch, $endian, $bits, $objdump, $objcopy, $cc,
> > >      $ld, $nm, $rm, $mv, $is_module, $inputfile) = @ARGV;
> > >  
> > >  # This file refers to mcount and shouldn't be ftraced, so lets' ignore it
> > > @@ -213,6 +213,26 @@ if ($arch eq "x86_64") {
> > >      if ($is_module eq "0") {
> > >          $cc .= " -mconstant-gp";
> > >      }
> > > +
> > > +} elsif ($arch eq "mips") {
> > > +	$mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s_mcount\$";
> > > +	$objdump .= " -Melf-trad".$endian."mips ";
> > > +
> > > +	if ($endian eq "big") {
> > > +		$endian = " -EB ";
> > > +		$ld .= " -melf".$bits."btsmip";
> > > +	} else {
> > > +		$endian = " -EL ";
> > > +		$ld .= " -melf".$bits."ltsmip";
> > > +	}
> > > +
> > > +	$cc .= " -mno-abicalls -fno-pic -mabi=" . $bits . $endian;
> > > +    $ld .= $endian;
> > > +
> > > +    if ($bits == 64) {
> > > +		$type = ".dword";
> > > +    }
> > 
> > The mips addition to the recordmcount.pl is OK to keep with this patch.
> > 
> > > +
> > >  } else {
> > >      die "Arch $arch is not supported with CONFIG_FTRACE_MCOUNT_RECORD";
> > >  }
> > > @@ -441,12 +461,12 @@ if ($#converts >= 0) {
> > >      #
> > >      # Step 5: set up each local function as a global
> > >      #
> > > -    `$objcopy $globallist $inputfile $globalobj`;
> > > +    `$objcopy $globallist $inputfile $globalobj 2>&1 >/dev/null`;
> > 
> > Are these spitting out errors?
> > 
> 
> no errors, but some warnings.
> 
> seems some files not have _mcount(ooh, I did this patch about two months
> ago, so not remember the _real_ reason now), so there will some
> complaint about "No such file ...", this fix just make it not complain
> again.


There's a part in the script that exits early if no mcount callers were 
found. There might be an error here. I prefer not to hide warnings because 
we may be hiding bugs.

-- Steve

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH v1 3/5] mips function graph tracer support
  2009-05-29  9:07     ` Wu Zhangjin
@ 2009-05-29 15:16       ` Steven Rostedt
  0 siblings, 0 replies; 15+ messages in thread
From: Steven Rostedt @ 2009-05-29 15:16 UTC (permalink / raw)
  To: Wu Zhangjin
  Cc: linux-mips, linux-kernel, Ralf Baechle, Ingo Molnar,
	Andrew Morton, Thomas Gleixner, Nicholas Mc Guire



On Fri, 29 May 2009, Wu Zhangjin wrote:
> > > +
> > > +NESTED(ftrace_graph_caller, PT_SIZE, ra)
> > > +	MCOUNT_SAVE_REGS
> > > +
> > > +	MCOUNT_SET_ARGS
> > > +	jal	prepare_ftrace_return
> > > +	nop
> > > +
> > > +	/* overwrite the parent as &return_to_handler: v0 -> $1(at) */
> > > +	PTR_S	v0, PT_R1(sp)
> > > +
> > > +	MCOUNT_RESTORE_REGS
> > > +	RETURN_BACK
> > > +	END(ftrace_graph_caller)
> > > +
> > > +	.align	2
> > > +	.globl	return_to_handler
> > > +return_to_handler:
> > > +	MCOUNT_SAVE_REGS
> > 
> > I'm not sure which version of function_graph tracer you looked at, 
> 
> currently, I'm using the master branch of the latest linux-mips git
> tree. so, the function_graph should be the latest version?
> 
> BTW: which git branch should i apply these patches to?

Any changes to the core ftrace we would like to go through tip. But 
testing of an arch is best in the arch specific trees. What we did for 
powerpc, was that we made a branch based off of linus's tree and did only 
the change in the core ftrace code that was needed by PPC. Then that 
branch was pulled into both tip and the ppc git tree. Thus it was the same 
change set with the same SHA1. Then it could go via either ppc or tip into 
linus's tree and it would not be a duplicate. We might need to do 
something similar as well for mips.


> 
> > but I'm 
> > pretty sure you can just save the return code registers of the function.
> > 
> > return_to_handler is called on the return of a function. 
> 
> Yeap.
> 
> > Thus, any callee 
> > saved registers have already been restored and would also be restored by 
> > ftrace_return_to_handler.  Any callee registers would have been saved by 
> > the function you are about to return to.
> > 
> > Thus the only things you need to save are the return code registers.
> 
> have tried to not save/restore the arguments(a0-7) registers, the kernel
> will hang:

You might want to look into this. When Frederic first did the code for 
x86_64 he had similar issues. But later I converted it to just the return 
values and everything worked fine. You might be hiding some other kind of 
bug.

But I don not remember the mips binary api. Does the callee own all args 
registers? or just the ones it uses. You must also make sure you preserve 
all return value registers. Again, I don't remember mips api (been 8 years 
since I've worked on mips), but it may be more than one reg.

/me searches for his old mips hand book.

> 
> CPU 0 Unable to handle kernel paging request at virtual address
> 0000000c, epc == 80101360, ra == 8010b114
> Oops[#1]:
> Cpu 0
> $ 0   : 00000000 8010b114 00000000 00000000
> $ 4   : 87830d00 87830d00 8780c380 10002400
> $ 8   : 5607ec00 0000000b 00000000 5607ec00
> $12   : 5607ec00 9e3779b9 9e3779b9 00000000
> $16   : 00000000 10002400 00000000 87884d00
> $20   : 000000d0 801857cc 00000000 80483e00
> $24   : 00000000 00000000                  
> $28   : 87976000 87977cd8 87977cd8 8010b114
> Hi    : 0000000b
> Lo    : 5607ec00
> epc   : 80101360 plat_irq_dispatch+0x70/0x250
>     Not tainted
> ra    : 8010b114 return_to_handler+0x0/0x4c
> Status: 10002402    KERNEL EXL 
> Cause : 50808008
> BadVA : 0000000c
> PrId  : 00019300 (MIPS 24Kc)
> Process bash (pid: 533, threadinfo=87976000, task=8796f560,
> tls=2aad5100)
> Stack : 00000062 0000006f 80455ef8 9e3779b9 00000000 00000020 87977da8
> 80101f80
>         804b0000 804b0000 87977d08 8010b330 00000000 80483e00 00000000
> 10002401
>         00000000 00000000 804b0000 804b3854 00000000 00000004 08042c2b
> 8010b0a0
>         00000000 0000005f 80455fe4 9e3779b9 9e3779b9 00000000 87884d80
> 00000020
>         00000000 87884d00 000000d0 801857cc 00000000 80483e00 00000000
> 00000000
>         ...
> Call Trace:
> [<80101360>] plat_irq_dispatch+0x70/0x250
> [<80101f80>] ret_from_irq+0x0/0x4
> [<80183dd4>] ftrace_run_update_code+0x58/0xdc
> [<8018410c>] ftrace_startup+0x7c/0x90
> [<80185ab8>] register_ftrace_graph+0x40c/0x440
> [<801955a0>] graph_trace_init+0x28/0x54
> [<80190d00>] tracer_init+0x34/0x50
> [<80190f34>] tracing_set_tracer+0x218/0x2c4
> [<801910b8>] tracing_set_trace_write+0xd8/0x144
> 
> 
> Code: 00000000  0c05cc3c  02002021 <8c43000c> 02002021  0060f809
> 00402821  0c04cec0  00000000 
> Disabling lock debugging due to kernel taint
> Kernel panic - not syncing: Fatal exception in interrupt
> 
> so, i think there is really a need to use the current
> MCOUNT_SAVE/RESTORE_REGS, some arguments registers should be saved.
> and we can share this common macros :-)

I would still like to understand why it crashes. Perhaps you really need 
to save all regs, but not fully understanding a crash and finding a work 
around does not make me feel too comfortable.

-- Steve

^ permalink raw reply	[flat|nested] 15+ messages in thread

end of thread, other threads:[~2009-05-29 15:16 UTC | newest]

Thread overview: 15+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
     [not found] <cover.1243543471.git.wuzj@lemote.com>
2009-05-28 20:48 ` [PATCH v1 1/5] mips static function tracer support wuzhangjin
2009-05-29  1:13   ` Steven Rostedt
2009-05-29  6:11     ` Wu Zhangjin
2009-05-28 20:48 ` [PATCH v1 2/5] mips dynamic " wuzhangjin
2009-05-29  1:24   ` Steven Rostedt
2009-05-29  6:36     ` Wu Zhangjin
2009-05-29 15:07       ` Steven Rostedt
2009-05-28 20:49 ` [PATCH v1 3/5] mips function graph " wuzhangjin
2009-05-29  2:01   ` Steven Rostedt
2009-05-29  9:07     ` Wu Zhangjin
2009-05-29 15:16       ` Steven Rostedt
2009-05-28 20:49 ` [PATCH v1 4/5] mips specific clock function to get precise timestamp wuzhangjin
2009-05-29  2:06   ` Steven Rostedt
2009-05-28 20:49 ` [PATCH v1 5/5] mips specific system call tracer wuzhangjin
2009-05-29  2:09   ` Steven Rostedt

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).