* [PATCH 1/4] trace: rename unlikely profiler to branch profiler
2008-11-12 21:21 [PATCH 0/4] ftrace: updates for tip Steven Rostedt
@ 2008-11-12 21:21 ` Steven Rostedt
2008-11-12 21:21 ` [PATCH 2/4] ftrace: rename unlikely iter_ctrl to branch Steven Rostedt
` (3 subsequent siblings)
4 siblings, 0 replies; 7+ messages in thread
From: Steven Rostedt @ 2008-11-12 21:21 UTC (permalink / raw)
To: linux-kernel; +Cc: Ingo Molnar, Andrew Morton, Steven Rostedt
[-- Attachment #1: 0001-trace-rename-unlikely-profiler-to-branch-profiler.patch --]
[-- Type: text/plain, Size: 7892 bytes --]
Impact: name change of unlikely tracer and profiler
Ingo Molnar suggested changing the config from UNLIKELY_PROFILE
to BRANCH_PROFILING. I never did like the "unlikely" name so I
went one step farther, and renamed all the unlikely configurations
to a "BRANCH" variant.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
---
arch/x86/kernel/vsyscall_64.c | 2 +-
arch/x86/vdso/vclock_gettime.c | 2 +-
include/asm-generic/vmlinux.lds.h | 2 +-
include/linux/compiler.h | 19 ++++++++++---------
kernel/trace/Kconfig | 10 +++++-----
kernel/trace/Makefile | 7 +++----
kernel/trace/trace.c | 2 +-
kernel/trace/trace.h | 6 +++---
kernel/trace/trace_unlikely.c | 4 ++--
9 files changed, 27 insertions(+), 27 deletions(-)
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index 9a596ae..44153af 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -18,7 +18,7 @@
*/
/* Disable profiling for userspace code: */
-#define DISABLE_UNLIKELY_PROFILE
+#define DISABLE_BRANCH_PROFILING
#include <linux/time.h>
#include <linux/init.h>
diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
index 6e66763..d9d3582 100644
--- a/arch/x86/vdso/vclock_gettime.c
+++ b/arch/x86/vdso/vclock_gettime.c
@@ -10,7 +10,7 @@
*/
/* Disable profiling for userspace code: */
-#define DISABLE_UNLIKELY_PROFILE
+#define DISABLE_BRANCH_PROFILING
#include <linux/kernel.h>
#include <linux/posix-timers.h>
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 2110312..ae7ef0c 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -45,7 +45,7 @@
#define MCOUNT_REC()
#endif
-#ifdef CONFIG_TRACE_UNLIKELY_PROFILE
+#ifdef CONFIG_TRACE_BRANCH_PROFILING
#define LIKELY_PROFILE() VMLINUX_SYMBOL(__start_likely_profile) = .; \
*(_ftrace_likely) \
VMLINUX_SYMBOL(__stop_likely_profile) = .; \
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 63b7d90..c7d804a 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -59,26 +59,27 @@ extern void __chk_io_ptr(const volatile void __iomem *);
* specific implementations come from the above header files
*/
-/*
- * Note: DISABLE_UNLIKELY_PROFILE can be used by special lowlevel code
- * to disable branch tracing on a per file basis.
- */
-#if defined(CONFIG_TRACE_UNLIKELY_PROFILE) && !defined(DISABLE_UNLIKELY_PROFILE)
-struct ftrace_likely_data {
+struct ftrace_branch_data {
const char *func;
const char *file;
unsigned line;
unsigned long correct;
unsigned long incorrect;
};
-void ftrace_likely_update(struct ftrace_likely_data *f, int val, int expect);
+
+/*
+ * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
+ * to disable branch tracing on a per file basis.
+ */
+#if defined(CONFIG_TRACE_BRANCH_PROFILING) && !defined(DISABLE_BRANCH_PROFILING)
+void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
#define likely_notrace(x) __builtin_expect(!!(x), 1)
#define unlikely_notrace(x) __builtin_expect(!!(x), 0)
#define likely_check(x) ({ \
int ______r; \
- static struct ftrace_likely_data \
+ static struct ftrace_branch_data \
__attribute__((__aligned__(4))) \
__attribute__((section("_ftrace_likely"))) \
______f = { \
@@ -93,7 +94,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, int expect);
})
#define unlikely_check(x) ({ \
int ______r; \
- static struct ftrace_likely_data \
+ static struct ftrace_branch_data \
__attribute__((__aligned__(4))) \
__attribute__((section("_ftrace_unlikely"))) \
______f = { \
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 8abcaf8..9c89526 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -159,7 +159,7 @@ config BOOT_TRACER
selected, because the self-tests are an initcall as well and that
would invalidate the boot trace. )
-config TRACE_UNLIKELY_PROFILE
+config TRACE_BRANCH_PROFILING
bool "Trace likely/unlikely profiler"
depends on DEBUG_KERNEL
select TRACING
@@ -175,7 +175,7 @@ config TRACE_UNLIKELY_PROFILE
Say N if unsure.
-config TRACING_UNLIKELY
+config TRACING_BRANCHES
bool
help
Selected by tracers that will trace the likely and unlikely
@@ -183,10 +183,10 @@ config TRACING_UNLIKELY
profiled. Profiling the tracing infrastructure can only happen
when the likelys and unlikelys are not being traced.
-config UNLIKELY_TRACER
+config BRANCH_TRACER
bool "Trace likely/unlikely instances"
- depends on TRACE_UNLIKELY_PROFILE
- select TRACING_UNLIKELY
+ depends on TRACE_BRANCH_PROFILING
+ select TRACING_BRANCHES
help
This traces the events of likely and unlikely condition
calls in the kernel. The difference between this and the
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index c938d03..0087df7 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -11,9 +11,8 @@ obj-y += trace_selftest_dynamic.o
endif
# If unlikely tracing is enabled, do not trace these files
-ifdef CONFIG_TRACING_UNLIKELY
-KBUILD_CFLAGS += '-Dlikely(x)=likely_notrace(x)'
-KBUILD_CFLAGS += '-Dunlikely(x)=unlikely_notrace(x)'
+ifdef CONFIG_TRACING_BRANCHES
+KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
endif
obj-$(CONFIG_FUNCTION_TRACER) += libftrace.o
@@ -31,6 +30,6 @@ obj-$(CONFIG_STACK_TRACER) += trace_stack.o
obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o
obj-$(CONFIG_BOOT_TRACER) += trace_boot.o
obj-$(CONFIG_FUNCTION_RET_TRACER) += trace_functions_return.o
-obj-$(CONFIG_TRACE_UNLIKELY_PROFILE) += trace_unlikely.o
+obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_unlikely.o
libftrace-y := ftrace.o
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index d842db1..bad59d3 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -258,7 +258,7 @@ static const char *trace_options[] = {
"sched-tree",
"ftrace_printk",
"ftrace_preempt",
-#ifdef CONFIG_UNLIKELY_TRACER
+#ifdef CONFIG_BRANCH_TRACER
"unlikely",
#endif
NULL
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 9635aa2..dccae63 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -468,7 +468,7 @@ enum trace_iterator_flags {
TRACE_ITER_SCHED_TREE = 0x200,
TRACE_ITER_PRINTK = 0x400,
TRACE_ITER_PREEMPTONLY = 0x800,
-#ifdef CONFIG_UNLIKELY_TRACER
+#ifdef CONFIG_BRANCH_TRACER
TRACE_ITER_UNLIKELY = 0x1000,
#endif
};
@@ -530,7 +530,7 @@ static inline void ftrace_preempt_enable(int resched)
preempt_enable_notrace();
}
-#ifdef CONFIG_UNLIKELY_TRACER
+#ifdef CONFIG_BRANCH_TRACER
extern int enable_unlikely_tracing(struct trace_array *tr);
extern void disable_unlikely_tracing(void);
static inline int trace_unlikely_enable(struct trace_array *tr)
@@ -552,6 +552,6 @@ static inline int trace_unlikely_enable(struct trace_array *tr)
static inline void trace_unlikely_disable(void)
{
}
-#endif /* CONFIG_UNLIKELY_TRACER */
+#endif /* CONFIG_BRANCH_TRACER */
#endif /* _LINUX_KERNEL_TRACE_H */
diff --git a/kernel/trace/trace_unlikely.c b/kernel/trace/trace_unlikely.c
index 7290e0e..856eb3b 100644
--- a/kernel/trace/trace_unlikely.c
+++ b/kernel/trace/trace_unlikely.c
@@ -15,7 +15,7 @@
#include <asm/local.h>
#include "trace.h"
-#ifdef CONFIG_UNLIKELY_TRACER
+#ifdef CONFIG_BRANCH_TRACER
static int unlikely_tracing_enabled __read_mostly;
static DEFINE_MUTEX(unlikely_tracing_mutex);
@@ -119,7 +119,7 @@ static inline
void trace_likely_condition(struct ftrace_likely_data *f, int val, int expect)
{
}
-#endif /* CONFIG_UNLIKELY_TRACER */
+#endif /* CONFIG_BRANCH_TRACER */
void ftrace_likely_update(struct ftrace_likely_data *f, int val, int expect)
{
--
1.5.6.5
--
^ permalink raw reply related [flat|nested] 7+ messages in thread* [PATCH 2/4] ftrace: rename unlikely iter_ctrl to branch
2008-11-12 21:21 [PATCH 0/4] ftrace: updates for tip Steven Rostedt
2008-11-12 21:21 ` [PATCH 1/4] trace: rename unlikely profiler to branch profiler Steven Rostedt
@ 2008-11-12 21:21 ` Steven Rostedt
2008-11-12 21:21 ` [PATCH 3/4] ftrace: add tracer called branch Steven Rostedt
` (2 subsequent siblings)
4 siblings, 0 replies; 7+ messages in thread
From: Steven Rostedt @ 2008-11-12 21:21 UTC (permalink / raw)
To: linux-kernel; +Cc: Ingo Molnar, Andrew Morton, Steven Rostedt
[-- Attachment #1: 0002-ftrace-rename-unlikely-iter_ctrl-to-branch.patch --]
[-- Type: text/plain, Size: 8198 bytes --]
Impact: rename of iter_ctrl unlikely to branch
The unlikely name is ugly. This patch converts the iter_ctrl command
"unlikely" and "nounlikely" to "branch" and "nobranch" respectively.
It also renames a lot of internal functions to use "branch" instead
of "unlikely".
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
---
kernel/trace/trace.c | 14 +++++-----
kernel/trace/trace.h | 26 ++++++++++----------
kernel/trace/trace_unlikely.c | 50 ++++++++++++++++++++--------------------
3 files changed, 45 insertions(+), 45 deletions(-)
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index bad59d3..4bf070b 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -259,7 +259,7 @@ static const char *trace_options[] = {
"ftrace_printk",
"ftrace_preempt",
#ifdef CONFIG_BRANCH_TRACER
- "unlikely",
+ "branch",
#endif
NULL
};
@@ -1651,8 +1651,8 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
trace_seq_print_cont(s, iter);
break;
}
- case TRACE_UNLIKELY: {
- struct trace_unlikely *field;
+ case TRACE_BRANCH: {
+ struct trace_branch *field;
trace_assign_type(field, entry);
@@ -1802,8 +1802,8 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
return print_return_function(iter);
break;
}
- case TRACE_UNLIKELY: {
- struct trace_unlikely *field;
+ case TRACE_BRANCH: {
+ struct trace_branch *field;
trace_assign_type(field, entry);
@@ -2619,7 +2619,7 @@ static int tracing_set_tracer(char *buf)
if (t == current_trace)
goto out;
- trace_unlikely_disable();
+ trace_branch_disable();
if (current_trace && current_trace->reset)
current_trace->reset(tr);
@@ -2627,7 +2627,7 @@ static int tracing_set_tracer(char *buf)
if (t->init)
t->init(tr);
- trace_unlikely_enable(tr);
+ trace_branch_enable(tr);
out:
mutex_unlock(&trace_types_lock);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index dccae63..7fbf37b 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -22,7 +22,7 @@ enum trace_type {
TRACE_SPECIAL,
TRACE_MMIO_RW,
TRACE_MMIO_MAP,
- TRACE_UNLIKELY,
+ TRACE_BRANCH,
TRACE_BOOT_CALL,
TRACE_BOOT_RET,
TRACE_FN_RET,
@@ -137,7 +137,7 @@ struct trace_boot_ret {
#define TRACE_FUNC_SIZE 30
#define TRACE_FILE_SIZE 20
-struct trace_unlikely {
+struct trace_branch {
struct trace_entry ent;
unsigned line;
char func[TRACE_FUNC_SIZE+1];
@@ -247,7 +247,7 @@ extern void __ftrace_bad_type(void);
TRACE_MMIO_MAP); \
IF_ASSIGN(var, ent, struct trace_boot_call, TRACE_BOOT_CALL);\
IF_ASSIGN(var, ent, struct trace_boot_ret, TRACE_BOOT_RET);\
- IF_ASSIGN(var, ent, struct trace_unlikely, TRACE_UNLIKELY); \
+ IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
IF_ASSIGN(var, ent, struct ftrace_ret_entry, TRACE_FN_RET);\
__ftrace_bad_type(); \
} while (0)
@@ -469,7 +469,7 @@ enum trace_iterator_flags {
TRACE_ITER_PRINTK = 0x400,
TRACE_ITER_PREEMPTONLY = 0x800,
#ifdef CONFIG_BRANCH_TRACER
- TRACE_ITER_UNLIKELY = 0x1000,
+ TRACE_ITER_BRANCH = 0x1000,
#endif
};
@@ -531,25 +531,25 @@ static inline void ftrace_preempt_enable(int resched)
}
#ifdef CONFIG_BRANCH_TRACER
-extern int enable_unlikely_tracing(struct trace_array *tr);
-extern void disable_unlikely_tracing(void);
-static inline int trace_unlikely_enable(struct trace_array *tr)
+extern int enable_branch_tracing(struct trace_array *tr);
+extern void disable_branch_tracing(void);
+static inline int trace_branch_enable(struct trace_array *tr)
{
- if (trace_flags & TRACE_ITER_UNLIKELY)
- return enable_unlikely_tracing(tr);
+ if (trace_flags & TRACE_ITER_BRANCH)
+ return enable_branch_tracing(tr);
return 0;
}
-static inline void trace_unlikely_disable(void)
+static inline void trace_branch_disable(void)
{
/* due to races, always disable */
- disable_unlikely_tracing();
+ disable_branch_tracing();
}
#else
-static inline int trace_unlikely_enable(struct trace_array *tr)
+static inline int trace_branch_enable(struct trace_array *tr)
{
return 0;
}
-static inline void trace_unlikely_disable(void)
+static inline void trace_branch_disable(void)
{
}
#endif /* CONFIG_BRANCH_TRACER */
diff --git a/kernel/trace/trace_unlikely.c b/kernel/trace/trace_unlikely.c
index 856eb3b..e5d5969 100644
--- a/kernel/trace/trace_unlikely.c
+++ b/kernel/trace/trace_unlikely.c
@@ -17,16 +17,16 @@
#ifdef CONFIG_BRANCH_TRACER
-static int unlikely_tracing_enabled __read_mostly;
-static DEFINE_MUTEX(unlikely_tracing_mutex);
-static struct trace_array *unlikely_tracer;
+static int branch_tracing_enabled __read_mostly;
+static DEFINE_MUTEX(branch_tracing_mutex);
+static struct trace_array *branch_tracer;
static void
-probe_likely_condition(struct ftrace_likely_data *f, int val, int expect)
+probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
{
- struct trace_array *tr = unlikely_tracer;
+ struct trace_array *tr = branch_tracer;
struct ring_buffer_event *event;
- struct trace_unlikely *entry;
+ struct trace_branch *entry;
unsigned long flags, irq_flags;
int cpu, pc;
const char *p;
@@ -54,7 +54,7 @@ probe_likely_condition(struct ftrace_likely_data *f, int val, int expect)
pc = preempt_count();
entry = ring_buffer_event_data(event);
tracing_generic_entry_update(&entry->ent, flags, pc);
- entry->ent.type = TRACE_UNLIKELY;
+ entry->ent.type = TRACE_BRANCH;
/* Strip off the path, only save the file */
p = f->file + strlen(f->file);
@@ -77,51 +77,51 @@ probe_likely_condition(struct ftrace_likely_data *f, int val, int expect)
}
static inline
-void trace_likely_condition(struct ftrace_likely_data *f, int val, int expect)
+void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect)
{
- if (!unlikely_tracing_enabled)
+ if (!branch_tracing_enabled)
return;
probe_likely_condition(f, val, expect);
}
-int enable_unlikely_tracing(struct trace_array *tr)
+int enable_branch_tracing(struct trace_array *tr)
{
int ret = 0;
- mutex_lock(&unlikely_tracing_mutex);
- unlikely_tracer = tr;
+ mutex_lock(&branch_tracing_mutex);
+ branch_tracer = tr;
/*
* Must be seen before enabling. The reader is a condition
* where we do not need a matching rmb()
*/
smp_wmb();
- unlikely_tracing_enabled++;
- mutex_unlock(&unlikely_tracing_mutex);
+ branch_tracing_enabled++;
+ mutex_unlock(&branch_tracing_mutex);
return ret;
}
-void disable_unlikely_tracing(void)
+void disable_branch_tracing(void)
{
- mutex_lock(&unlikely_tracing_mutex);
+ mutex_lock(&branch_tracing_mutex);
- if (!unlikely_tracing_enabled)
+ if (!branch_tracing_enabled)
goto out_unlock;
- unlikely_tracing_enabled--;
+ branch_tracing_enabled--;
out_unlock:
- mutex_unlock(&unlikely_tracing_mutex);
+ mutex_unlock(&branch_tracing_mutex);
}
#else
static inline
-void trace_likely_condition(struct ftrace_likely_data *f, int val, int expect)
+void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect)
{
}
#endif /* CONFIG_BRANCH_TRACER */
-void ftrace_likely_update(struct ftrace_likely_data *f, int val, int expect)
+void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect)
{
/*
* I would love to have a trace point here instead, but the
@@ -148,7 +148,7 @@ static void *
t_next(struct seq_file *m, void *v, loff_t *pos)
{
struct ftrace_pointer *f = m->private;
- struct ftrace_likely_data *p = v;
+ struct ftrace_branch_data *p = v;
(*pos)++;
@@ -180,7 +180,7 @@ static void t_stop(struct seq_file *m, void *p)
static int t_show(struct seq_file *m, void *v)
{
- struct ftrace_likely_data *p = v;
+ struct ftrace_branch_data *p = v;
const char *f;
unsigned long percent;
@@ -252,7 +252,7 @@ static struct ftrace_pointer ftrace_unlikely_pos = {
.stop = __stop_unlikely_profile,
};
-static __init int ftrace_unlikely_init(void)
+static __init int ftrace_branch_init(void)
{
struct dentry *d_tracer;
struct dentry *entry;
@@ -275,4 +275,4 @@ static __init int ftrace_unlikely_init(void)
return 0;
}
-device_initcall(ftrace_unlikely_init);
+device_initcall(ftrace_branch_init);
--
1.5.6.5
--
^ permalink raw reply related [flat|nested] 7+ messages in thread* [PATCH 3/4] ftrace: add tracer called branch
2008-11-12 21:21 [PATCH 0/4] ftrace: updates for tip Steven Rostedt
2008-11-12 21:21 ` [PATCH 1/4] trace: rename unlikely profiler to branch profiler Steven Rostedt
2008-11-12 21:21 ` [PATCH 2/4] ftrace: rename unlikely iter_ctrl to branch Steven Rostedt
@ 2008-11-12 21:21 ` Steven Rostedt
2008-11-12 21:21 ` [PATCH 4/4] ftrace: rename trace_unlikely.c file Steven Rostedt
2008-11-12 21:28 ` [PATCH 0/4] ftrace: updates for tip Ingo Molnar
4 siblings, 0 replies; 7+ messages in thread
From: Steven Rostedt @ 2008-11-12 21:21 UTC (permalink / raw)
To: linux-kernel; +Cc: Ingo Molnar, Andrew Morton, Steven Rostedt
[-- Attachment #1: 0003-ftrace-add-tracer-called-branch.patch --]
[-- Type: text/plain, Size: 3340 bytes --]
Impact: added new branch tracer
Currently the tracing of branch profiling (unlikelys and likelys hit)
is only activated by the iter_ctrl. This patch adds a tracer called
"branch" that will just trace the branch profiling. The advantage
of adding this tracer is that it can be added to the ftrace selftests
on startup.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
---
kernel/trace/trace.h | 2 +
kernel/trace/trace_selftest.c | 23 ++++++++++++++++++++++
kernel/trace/trace_unlikely.c | 42 +++++++++++++++++++++++++++++++++++++++++
3 files changed, 67 insertions(+), 0 deletions(-)
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 7fbf37b..9e015f5 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -420,6 +420,8 @@ extern int trace_selftest_startup_sched_switch(struct tracer *trace,
struct trace_array *tr);
extern int trace_selftest_startup_sysprof(struct tracer *trace,
struct trace_array *tr);
+extern int trace_selftest_startup_branch(struct tracer *trace,
+ struct trace_array *tr);
#endif /* CONFIG_FTRACE_STARTUP_TEST */
extern void *head_page(struct trace_array_cpu *data);
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index 0728a10..24e6e07 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -13,6 +13,7 @@ static inline int trace_valid_entry(struct trace_entry *entry)
case TRACE_STACK:
case TRACE_PRINT:
case TRACE_SPECIAL:
+ case TRACE_BRANCH:
return 1;
}
return 0;
@@ -544,3 +545,25 @@ trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
return ret;
}
#endif /* CONFIG_SYSPROF_TRACER */
+
+#ifdef CONFIG_BRANCH_TRACER
+int
+trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
+{
+ unsigned long count;
+ int ret;
+
+ /* start the tracing */
+ trace->init(tr);
+ /* Sleep for a 1/10 of a second */
+ msleep(100);
+ /* stop the tracing. */
+ tracing_stop();
+ /* check the trace buffer */
+ ret = trace_test_buffer(tr, &count);
+ trace->reset(tr);
+ tracing_start();
+
+ return ret;
+}
+#endif /* CONFIG_BRANCH_TRACER */
diff --git a/kernel/trace/trace_unlikely.c b/kernel/trace/trace_unlikely.c
index e5d5969..8526555 100644
--- a/kernel/trace/trace_unlikely.c
+++ b/kernel/trace/trace_unlikely.c
@@ -114,6 +114,48 @@ void disable_branch_tracing(void)
out_unlock:
mutex_unlock(&branch_tracing_mutex);
}
+
+static void start_branch_trace(struct trace_array *tr)
+{
+ enable_branch_tracing(tr);
+}
+
+static void stop_branch_trace(struct trace_array *tr)
+{
+ disable_branch_tracing();
+}
+
+static void branch_trace_init(struct trace_array *tr)
+{
+ int cpu;
+
+ for_each_online_cpu(cpu)
+ tracing_reset(tr, cpu);
+
+ start_branch_trace(tr);
+}
+
+static void branch_trace_reset(struct trace_array *tr)
+{
+ stop_branch_trace(tr);
+}
+
+struct tracer branch_trace __read_mostly =
+{
+ .name = "branch",
+ .init = branch_trace_init,
+ .reset = branch_trace_reset,
+#ifdef CONFIG_FTRACE_SELFTEST
+ .selftest = trace_selftest_startup_branch,
+#endif
+};
+
+__init static int init_branch_trace(void)
+{
+ return register_tracer(&branch_trace);
+}
+
+device_initcall(init_branch_trace);
#else
static inline
void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect)
--
1.5.6.5
--
^ permalink raw reply related [flat|nested] 7+ messages in thread* [PATCH 4/4] ftrace: rename trace_unlikely.c file
2008-11-12 21:21 [PATCH 0/4] ftrace: updates for tip Steven Rostedt
` (2 preceding siblings ...)
2008-11-12 21:21 ` [PATCH 3/4] ftrace: add tracer called branch Steven Rostedt
@ 2008-11-12 21:21 ` Steven Rostedt
2008-11-12 21:28 ` [PATCH 0/4] ftrace: updates for tip Ingo Molnar
4 siblings, 0 replies; 7+ messages in thread
From: Steven Rostedt @ 2008-11-12 21:21 UTC (permalink / raw)
To: linux-kernel; +Cc: Ingo Molnar, Andrew Morton, Steven Rostedt
[-- Attachment #1: 0004-ftrace-rename-trace_unlikely.c-file.patch --]
[-- Type: text/plain, Size: 15978 bytes --]
Impact: File name change of trace_unlikely.c
The "unlikely" name for the tracer is quite ugly. We renamed all the
parts of it to "branch" and now it is time to rename the file too.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
---
kernel/trace/Makefile | 2 +-
kernel/trace/trace_branch.c | 320 +++++++++++++++++++++++++++++++++++++++++
kernel/trace/trace_unlikely.c | 320 -----------------------------------------
3 files changed, 321 insertions(+), 321 deletions(-)
create mode 100644 kernel/trace/trace_branch.c
delete mode 100644 kernel/trace/trace_unlikely.c
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index 0087df7..1a8c925 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -30,6 +30,6 @@ obj-$(CONFIG_STACK_TRACER) += trace_stack.o
obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o
obj-$(CONFIG_BOOT_TRACER) += trace_boot.o
obj-$(CONFIG_FUNCTION_RET_TRACER) += trace_functions_return.o
-obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_unlikely.o
+obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o
libftrace-y := ftrace.o
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
new file mode 100644
index 0000000..8526555
--- /dev/null
+++ b/kernel/trace/trace_branch.c
@@ -0,0 +1,320 @@
+/*
+ * unlikely profiler
+ *
+ * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
+ */
+#include <linux/kallsyms.h>
+#include <linux/seq_file.h>
+#include <linux/spinlock.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/ftrace.h>
+#include <linux/hash.h>
+#include <linux/fs.h>
+#include <asm/local.h>
+#include "trace.h"
+
+#ifdef CONFIG_BRANCH_TRACER
+
+static int branch_tracing_enabled __read_mostly;
+static DEFINE_MUTEX(branch_tracing_mutex);
+static struct trace_array *branch_tracer;
+
+static void
+probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
+{
+ struct trace_array *tr = branch_tracer;
+ struct ring_buffer_event *event;
+ struct trace_branch *entry;
+ unsigned long flags, irq_flags;
+ int cpu, pc;
+ const char *p;
+
+ /*
+ * I would love to save just the ftrace_likely_data pointer, but
+ * this code can also be used by modules. Ugly things can happen
+ * if the module is unloaded, and then we go and read the
+ * pointer. This is slower, but much safer.
+ */
+
+ if (unlikely(!tr))
+ return;
+
+ local_irq_save(flags);
+ cpu = raw_smp_processor_id();
+ if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
+ goto out;
+
+ event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
+ &irq_flags);
+ if (!event)
+ goto out;
+
+ pc = preempt_count();
+ entry = ring_buffer_event_data(event);
+ tracing_generic_entry_update(&entry->ent, flags, pc);
+ entry->ent.type = TRACE_BRANCH;
+
+ /* Strip off the path, only save the file */
+ p = f->file + strlen(f->file);
+ while (p >= f->file && *p != '/')
+ p--;
+ p++;
+
+ strncpy(entry->func, f->func, TRACE_FUNC_SIZE);
+ strncpy(entry->file, p, TRACE_FILE_SIZE);
+ entry->func[TRACE_FUNC_SIZE] = 0;
+ entry->file[TRACE_FILE_SIZE] = 0;
+ entry->line = f->line;
+ entry->correct = val == expect;
+
+ ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
+
+ out:
+ atomic_dec(&tr->data[cpu]->disabled);
+ local_irq_restore(flags);
+}
+
+static inline
+void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect)
+{
+ if (!branch_tracing_enabled)
+ return;
+
+ probe_likely_condition(f, val, expect);
+}
+
+int enable_branch_tracing(struct trace_array *tr)
+{
+ int ret = 0;
+
+ mutex_lock(&branch_tracing_mutex);
+ branch_tracer = tr;
+ /*
+ * Must be seen before enabling. The reader is a condition
+ * where we do not need a matching rmb()
+ */
+ smp_wmb();
+ branch_tracing_enabled++;
+ mutex_unlock(&branch_tracing_mutex);
+
+ return ret;
+}
+
+void disable_branch_tracing(void)
+{
+ mutex_lock(&branch_tracing_mutex);
+
+ if (!branch_tracing_enabled)
+ goto out_unlock;
+
+ branch_tracing_enabled--;
+
+ out_unlock:
+ mutex_unlock(&branch_tracing_mutex);
+}
+
+static void start_branch_trace(struct trace_array *tr)
+{
+ enable_branch_tracing(tr);
+}
+
+static void stop_branch_trace(struct trace_array *tr)
+{
+ disable_branch_tracing();
+}
+
+static void branch_trace_init(struct trace_array *tr)
+{
+ int cpu;
+
+ for_each_online_cpu(cpu)
+ tracing_reset(tr, cpu);
+
+ start_branch_trace(tr);
+}
+
+static void branch_trace_reset(struct trace_array *tr)
+{
+ stop_branch_trace(tr);
+}
+
+struct tracer branch_trace __read_mostly =
+{
+ .name = "branch",
+ .init = branch_trace_init,
+ .reset = branch_trace_reset,
+#ifdef CONFIG_FTRACE_SELFTEST
+ .selftest = trace_selftest_startup_branch,
+#endif
+};
+
+__init static int init_branch_trace(void)
+{
+ return register_tracer(&branch_trace);
+}
+
+device_initcall(init_branch_trace);
+#else
+static inline
+void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect)
+{
+}
+#endif /* CONFIG_BRANCH_TRACER */
+
+void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect)
+{
+ /*
+ * I would love to have a trace point here instead, but the
+ * trace point code is so inundated with unlikely and likely
+ * conditions that the recursive nightmare that exists is too
+ * much to try to get working. At least for now.
+ */
+ trace_likely_condition(f, val, expect);
+
+ /* FIXME: Make this atomic! */
+ if (val == expect)
+ f->correct++;
+ else
+ f->incorrect++;
+}
+EXPORT_SYMBOL(ftrace_likely_update);
+
+struct ftrace_pointer {
+ void *start;
+ void *stop;
+};
+
+static void *
+t_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ struct ftrace_pointer *f = m->private;
+ struct ftrace_branch_data *p = v;
+
+ (*pos)++;
+
+ if (v == (void *)1)
+ return f->start;
+
+ ++p;
+
+ if ((void *)p >= (void *)f->stop)
+ return NULL;
+
+ return p;
+}
+
+static void *t_start(struct seq_file *m, loff_t *pos)
+{
+ void *t = (void *)1;
+ loff_t l = 0;
+
+ for (; t && l < *pos; t = t_next(m, t, &l))
+ ;
+
+ return t;
+}
+
+static void t_stop(struct seq_file *m, void *p)
+{
+}
+
+static int t_show(struct seq_file *m, void *v)
+{
+ struct ftrace_branch_data *p = v;
+ const char *f;
+ unsigned long percent;
+
+ if (v == (void *)1) {
+ seq_printf(m, " correct incorrect %% "
+ " Function "
+ " File Line\n"
+ " ------- --------- - "
+ " -------- "
+ " ---- ----\n");
+ return 0;
+ }
+
+ /* Only print the file, not the path */
+ f = p->file + strlen(p->file);
+ while (f >= p->file && *f != '/')
+ f--;
+ f++;
+
+ if (p->correct) {
+ percent = p->incorrect * 100;
+ percent /= p->correct + p->incorrect;
+ } else
+ percent = p->incorrect ? 100 : 0;
+
+ seq_printf(m, "%8lu %8lu %3lu ", p->correct, p->incorrect, percent);
+ seq_printf(m, "%-30.30s %-20.20s %d\n", p->func, f, p->line);
+ return 0;
+}
+
+static struct seq_operations tracing_likely_seq_ops = {
+ .start = t_start,
+ .next = t_next,
+ .stop = t_stop,
+ .show = t_show,
+};
+
+static int tracing_likely_open(struct inode *inode, struct file *file)
+{
+ int ret;
+
+ ret = seq_open(file, &tracing_likely_seq_ops);
+ if (!ret) {
+ struct seq_file *m = file->private_data;
+ m->private = (void *)inode->i_private;
+ }
+
+ return ret;
+}
+
+static struct file_operations tracing_likely_fops = {
+ .open = tracing_likely_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+
+extern unsigned long __start_likely_profile[];
+extern unsigned long __stop_likely_profile[];
+extern unsigned long __start_unlikely_profile[];
+extern unsigned long __stop_unlikely_profile[];
+
+static struct ftrace_pointer ftrace_likely_pos = {
+ .start = __start_likely_profile,
+ .stop = __stop_likely_profile,
+};
+
+static struct ftrace_pointer ftrace_unlikely_pos = {
+ .start = __start_unlikely_profile,
+ .stop = __stop_unlikely_profile,
+};
+
+static __init int ftrace_branch_init(void)
+{
+ struct dentry *d_tracer;
+ struct dentry *entry;
+
+ d_tracer = tracing_init_dentry();
+
+ entry = debugfs_create_file("profile_likely", 0444, d_tracer,
+ &ftrace_likely_pos,
+ &tracing_likely_fops);
+ if (!entry)
+ pr_warning("Could not create debugfs 'profile_likely' entry\n");
+
+ entry = debugfs_create_file("profile_unlikely", 0444, d_tracer,
+ &ftrace_unlikely_pos,
+ &tracing_likely_fops);
+ if (!entry)
+ pr_warning("Could not create debugfs"
+ " 'profile_unlikely' entry\n");
+
+ return 0;
+}
+
+device_initcall(ftrace_branch_init);
diff --git a/kernel/trace/trace_unlikely.c b/kernel/trace/trace_unlikely.c
deleted file mode 100644
index 8526555..0000000
--- a/kernel/trace/trace_unlikely.c
+++ /dev/null
@@ -1,320 +0,0 @@
-/*
- * unlikely profiler
- *
- * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
- */
-#include <linux/kallsyms.h>
-#include <linux/seq_file.h>
-#include <linux/spinlock.h>
-#include <linux/debugfs.h>
-#include <linux/uaccess.h>
-#include <linux/module.h>
-#include <linux/ftrace.h>
-#include <linux/hash.h>
-#include <linux/fs.h>
-#include <asm/local.h>
-#include "trace.h"
-
-#ifdef CONFIG_BRANCH_TRACER
-
-static int branch_tracing_enabled __read_mostly;
-static DEFINE_MUTEX(branch_tracing_mutex);
-static struct trace_array *branch_tracer;
-
-static void
-probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
-{
- struct trace_array *tr = branch_tracer;
- struct ring_buffer_event *event;
- struct trace_branch *entry;
- unsigned long flags, irq_flags;
- int cpu, pc;
- const char *p;
-
- /*
- * I would love to save just the ftrace_likely_data pointer, but
- * this code can also be used by modules. Ugly things can happen
- * if the module is unloaded, and then we go and read the
- * pointer. This is slower, but much safer.
- */
-
- if (unlikely(!tr))
- return;
-
- local_irq_save(flags);
- cpu = raw_smp_processor_id();
- if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
- goto out;
-
- event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
- &irq_flags);
- if (!event)
- goto out;
-
- pc = preempt_count();
- entry = ring_buffer_event_data(event);
- tracing_generic_entry_update(&entry->ent, flags, pc);
- entry->ent.type = TRACE_BRANCH;
-
- /* Strip off the path, only save the file */
- p = f->file + strlen(f->file);
- while (p >= f->file && *p != '/')
- p--;
- p++;
-
- strncpy(entry->func, f->func, TRACE_FUNC_SIZE);
- strncpy(entry->file, p, TRACE_FILE_SIZE);
- entry->func[TRACE_FUNC_SIZE] = 0;
- entry->file[TRACE_FILE_SIZE] = 0;
- entry->line = f->line;
- entry->correct = val == expect;
-
- ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
-
- out:
- atomic_dec(&tr->data[cpu]->disabled);
- local_irq_restore(flags);
-}
-
-static inline
-void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect)
-{
- if (!branch_tracing_enabled)
- return;
-
- probe_likely_condition(f, val, expect);
-}
-
-int enable_branch_tracing(struct trace_array *tr)
-{
- int ret = 0;
-
- mutex_lock(&branch_tracing_mutex);
- branch_tracer = tr;
- /*
- * Must be seen before enabling. The reader is a condition
- * where we do not need a matching rmb()
- */
- smp_wmb();
- branch_tracing_enabled++;
- mutex_unlock(&branch_tracing_mutex);
-
- return ret;
-}
-
-void disable_branch_tracing(void)
-{
- mutex_lock(&branch_tracing_mutex);
-
- if (!branch_tracing_enabled)
- goto out_unlock;
-
- branch_tracing_enabled--;
-
- out_unlock:
- mutex_unlock(&branch_tracing_mutex);
-}
-
-static void start_branch_trace(struct trace_array *tr)
-{
- enable_branch_tracing(tr);
-}
-
-static void stop_branch_trace(struct trace_array *tr)
-{
- disable_branch_tracing();
-}
-
-static void branch_trace_init(struct trace_array *tr)
-{
- int cpu;
-
- for_each_online_cpu(cpu)
- tracing_reset(tr, cpu);
-
- start_branch_trace(tr);
-}
-
-static void branch_trace_reset(struct trace_array *tr)
-{
- stop_branch_trace(tr);
-}
-
-struct tracer branch_trace __read_mostly =
-{
- .name = "branch",
- .init = branch_trace_init,
- .reset = branch_trace_reset,
-#ifdef CONFIG_FTRACE_SELFTEST
- .selftest = trace_selftest_startup_branch,
-#endif
-};
-
-__init static int init_branch_trace(void)
-{
- return register_tracer(&branch_trace);
-}
-
-device_initcall(init_branch_trace);
-#else
-static inline
-void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect)
-{
-}
-#endif /* CONFIG_BRANCH_TRACER */
-
-void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect)
-{
- /*
- * I would love to have a trace point here instead, but the
- * trace point code is so inundated with unlikely and likely
- * conditions that the recursive nightmare that exists is too
- * much to try to get working. At least for now.
- */
- trace_likely_condition(f, val, expect);
-
- /* FIXME: Make this atomic! */
- if (val == expect)
- f->correct++;
- else
- f->incorrect++;
-}
-EXPORT_SYMBOL(ftrace_likely_update);
-
-struct ftrace_pointer {
- void *start;
- void *stop;
-};
-
-static void *
-t_next(struct seq_file *m, void *v, loff_t *pos)
-{
- struct ftrace_pointer *f = m->private;
- struct ftrace_branch_data *p = v;
-
- (*pos)++;
-
- if (v == (void *)1)
- return f->start;
-
- ++p;
-
- if ((void *)p >= (void *)f->stop)
- return NULL;
-
- return p;
-}
-
-static void *t_start(struct seq_file *m, loff_t *pos)
-{
- void *t = (void *)1;
- loff_t l = 0;
-
- for (; t && l < *pos; t = t_next(m, t, &l))
- ;
-
- return t;
-}
-
-static void t_stop(struct seq_file *m, void *p)
-{
-}
-
-static int t_show(struct seq_file *m, void *v)
-{
- struct ftrace_branch_data *p = v;
- const char *f;
- unsigned long percent;
-
- if (v == (void *)1) {
- seq_printf(m, " correct incorrect %% "
- " Function "
- " File Line\n"
- " ------- --------- - "
- " -------- "
- " ---- ----\n");
- return 0;
- }
-
- /* Only print the file, not the path */
- f = p->file + strlen(p->file);
- while (f >= p->file && *f != '/')
- f--;
- f++;
-
- if (p->correct) {
- percent = p->incorrect * 100;
- percent /= p->correct + p->incorrect;
- } else
- percent = p->incorrect ? 100 : 0;
-
- seq_printf(m, "%8lu %8lu %3lu ", p->correct, p->incorrect, percent);
- seq_printf(m, "%-30.30s %-20.20s %d\n", p->func, f, p->line);
- return 0;
-}
-
-static struct seq_operations tracing_likely_seq_ops = {
- .start = t_start,
- .next = t_next,
- .stop = t_stop,
- .show = t_show,
-};
-
-static int tracing_likely_open(struct inode *inode, struct file *file)
-{
- int ret;
-
- ret = seq_open(file, &tracing_likely_seq_ops);
- if (!ret) {
- struct seq_file *m = file->private_data;
- m->private = (void *)inode->i_private;
- }
-
- return ret;
-}
-
-static struct file_operations tracing_likely_fops = {
- .open = tracing_likely_open,
- .read = seq_read,
- .llseek = seq_lseek,
-};
-
-extern unsigned long __start_likely_profile[];
-extern unsigned long __stop_likely_profile[];
-extern unsigned long __start_unlikely_profile[];
-extern unsigned long __stop_unlikely_profile[];
-
-static struct ftrace_pointer ftrace_likely_pos = {
- .start = __start_likely_profile,
- .stop = __stop_likely_profile,
-};
-
-static struct ftrace_pointer ftrace_unlikely_pos = {
- .start = __start_unlikely_profile,
- .stop = __stop_unlikely_profile,
-};
-
-static __init int ftrace_branch_init(void)
-{
- struct dentry *d_tracer;
- struct dentry *entry;
-
- d_tracer = tracing_init_dentry();
-
- entry = debugfs_create_file("profile_likely", 0444, d_tracer,
- &ftrace_likely_pos,
- &tracing_likely_fops);
- if (!entry)
- pr_warning("Could not create debugfs 'profile_likely' entry\n");
-
- entry = debugfs_create_file("profile_unlikely", 0444, d_tracer,
- &ftrace_unlikely_pos,
- &tracing_likely_fops);
- if (!entry)
- pr_warning("Could not create debugfs"
- " 'profile_unlikely' entry\n");
-
- return 0;
-}
-
-device_initcall(ftrace_branch_init);
--
1.5.6.5
--
^ permalink raw reply related [flat|nested] 7+ messages in thread* Re: [PATCH 0/4] ftrace: updates for tip
2008-11-12 21:21 [PATCH 0/4] ftrace: updates for tip Steven Rostedt
` (3 preceding siblings ...)
2008-11-12 21:21 ` [PATCH 4/4] ftrace: rename trace_unlikely.c file Steven Rostedt
@ 2008-11-12 21:28 ` Ingo Molnar
4 siblings, 0 replies; 7+ messages in thread
From: Ingo Molnar @ 2008-11-12 21:28 UTC (permalink / raw)
To: Steven Rostedt; +Cc: linux-kernel, Andrew Morton
* Steven Rostedt <rostedt@goodmis.org> wrote:
>
> Ingo,
>
> The following patches are in:
>
> git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace.git
>
> branch: tip/devel
>
>
> Steven Rostedt (4):
> trace: rename unlikely profiler to branch profiler
> ftrace: rename unlikely iter_ctrl to branch
> ftrace: add tracer called branch
> ftrace: rename trace_unlikely.c file
>
> ----
> arch/x86/kernel/vsyscall_64.c | 2 +-
> arch/x86/vdso/vclock_gettime.c | 2 +-
> include/asm-generic/vmlinux.lds.h | 2 +-
> include/linux/compiler.h | 19 ++-
> kernel/trace/Kconfig | 10 +-
> kernel/trace/Makefile | 7 +-
> kernel/trace/trace.c | 16 +-
> kernel/trace/trace.h | 34 ++--
> kernel/trace/trace_branch.c | 320 +++++++++++++++++++++++++++++++++++++
> kernel/trace/trace_selftest.c | 23 +++
> kernel/trace/trace_unlikely.c | 278 --------------------------------
> 11 files changed, 390 insertions(+), 323 deletions(-)
pulled, thanks Steve!
Ingo
^ permalink raw reply [flat|nested] 7+ messages in thread