* [PATCHv3 1/2] tracing: graph output support for irqsoff tracer
2010-03-26 12:32 [PATCHv3 0/2] tracing: function graph output for preempt/irqs-off tracers Jiri Olsa
@ 2010-03-26 12:32 ` Jiri Olsa
2010-03-26 12:32 ` [PATCHv3 2/2] tracing: graph output support for preemptirqsoff/preemptoff tracers Jiri Olsa
2010-03-26 14:53 ` [PATCHv3 0/2] tracing: function graph output for preempt/irqs-off tracers Steven Rostedt
2 siblings, 0 replies; 11+ messages in thread
From: Jiri Olsa @ 2010-03-26 12:32 UTC (permalink / raw)
To: rostedt; +Cc: linux-kernel, Jiri Olsa
hi,
when I was using irqsoff tracer, I thought it could be any use
to see the disabled IRQs flow output same way as function graph output.
The graph output is enabled by setting new 'display-graph' trace option
of irqsoff tracer.
wbr,
jirka
Signed-off-by: Jiri Olsa <jolsa@redhat.com>
---
kernel/trace/trace.c | 31 ++++--
kernel/trace/trace.h | 10 ++
kernel/trace/trace_functions_graph.c | 15 ++-
kernel/trace/trace_irqsoff.c | 184 ++++++++++++++++++++++++++++++++--
4 files changed, 217 insertions(+), 23 deletions(-)
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 3ec2ee6..2229f5a 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -2058,6 +2058,23 @@ static enum print_line_t print_trace_line(struct trace_iterator *iter)
return print_trace_fmt(iter);
}
+void trace_default_header(struct seq_file *m)
+{
+ struct trace_iterator *iter = m->private;
+
+ if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
+ /* print nothing if the buffers are empty */
+ if (trace_empty(iter))
+ return;
+ print_trace_header(m, iter);
+ if (!(trace_flags & TRACE_ITER_VERBOSE))
+ print_lat_help_header(m);
+ } else {
+ if (!(trace_flags & TRACE_ITER_VERBOSE))
+ print_func_help_header(m);
+ }
+}
+
static int s_show(struct seq_file *m, void *v)
{
struct trace_iterator *iter = v;
@@ -2070,17 +2087,9 @@ static int s_show(struct seq_file *m, void *v)
}
if (iter->trace && iter->trace->print_header)
iter->trace->print_header(m);
- else if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
- /* print nothing if the buffers are empty */
- if (trace_empty(iter))
- return 0;
- print_trace_header(m, iter);
- if (!(trace_flags & TRACE_ITER_VERBOSE))
- print_lat_help_header(m);
- } else {
- if (!(trace_flags & TRACE_ITER_VERBOSE))
- print_func_help_header(m);
- }
+ else
+ trace_default_header(m);
+
} else if (iter->leftover) {
/*
* If we filled the seq_file buffer earlier, we
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 2825ef2..42ad603 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -378,6 +378,7 @@ void trace_function(struct trace_array *tr,
unsigned long ip,
unsigned long parent_ip,
unsigned long flags, int pc);
+void trace_default_header(struct seq_file *m);
void trace_graph_return(struct ftrace_graph_ret *trace);
int trace_graph_entry(struct ftrace_graph_ent *trace);
@@ -492,8 +493,17 @@ extern int trace_clock_id;
/* Standard output formatting function used for function return traces */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
extern enum print_line_t print_graph_function(struct trace_iterator *iter);
+extern void print_graph_headers(struct seq_file *s);
extern enum print_line_t
trace_print_graph_duration(unsigned long long duration, struct trace_seq *s);
+extern void graph_trace_open(struct trace_iterator *iter);
+extern void graph_trace_close(struct trace_iterator *iter);
+extern int __trace_graph_entry(struct trace_array *tr,
+ struct ftrace_graph_ent *trace,
+ unsigned long flags, int pc);
+extern void __trace_graph_return(struct trace_array *tr,
+ struct ftrace_graph_ret *trace,
+ unsigned long flags, int pc);
#ifdef CONFIG_DYNAMIC_FTRACE
/* TODO: make this variable */
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index e6989d9..d1b5d38 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -178,7 +178,7 @@ unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
return ret;
}
-static int __trace_graph_entry(struct trace_array *tr,
+int __trace_graph_entry(struct trace_array *tr,
struct ftrace_graph_ent *trace,
unsigned long flags,
int pc)
@@ -245,7 +245,7 @@ int trace_graph_thresh_entry(struct ftrace_graph_ent *trace)
return trace_graph_entry(trace);
}
-static void __trace_graph_return(struct trace_array *tr,
+void __trace_graph_return(struct trace_array *tr,
struct ftrace_graph_ret *trace,
unsigned long flags,
int pc)
@@ -1087,6 +1087,11 @@ print_graph_function(struct trace_iterator *iter)
trace_assign_type(field, entry);
return print_graph_return(&field->ret, s, entry, iter);
}
+ case TRACE_STACK:
+ case TRACE_FN:
+ /* dont trace stack and functions as comments */
+ return TRACE_TYPE_UNHANDLED;
+
default:
return print_graph_comment(s, entry, iter);
}
@@ -1116,7 +1121,7 @@ static void print_lat_header(struct seq_file *s)
seq_printf(s, "#%.*s|||| / \n", size, spaces);
}
-static void print_graph_headers(struct seq_file *s)
+void print_graph_headers(struct seq_file *s)
{
int lat = trace_flags & TRACE_ITER_LATENCY_FMT;
@@ -1152,7 +1157,7 @@ static void print_graph_headers(struct seq_file *s)
seq_printf(s, " | | | |\n");
}
-static void graph_trace_open(struct trace_iterator *iter)
+void graph_trace_open(struct trace_iterator *iter)
{
/* pid and depth on the last trace processed */
struct fgraph_data *data;
@@ -1187,7 +1192,7 @@ static void graph_trace_open(struct trace_iterator *iter)
pr_warning("function graph tracer: not enough memory\n");
}
-static void graph_trace_close(struct trace_iterator *iter)
+void graph_trace_close(struct trace_iterator *iter)
{
struct fgraph_data *data = iter->private;
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 2974bc7..2b67e35 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -34,6 +34,9 @@ static int trace_type __read_mostly;
static int save_lat_flag;
+static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
+static int start_irqsoff_tracer(struct trace_array *tr, int graph);
+
#ifdef CONFIG_PREEMPT_TRACER
static inline int
preempt_trace(void)
@@ -55,6 +58,23 @@ irq_trace(void)
# define irq_trace() (0)
#endif
+#define TRACE_DISPLAY_GRAPH 1
+
+static struct tracer_opt trace_opts[] = {
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ /* display latency trace as call graph */
+ { TRACER_OPT(display-graph, TRACE_DISPLAY_GRAPH) },
+#endif
+ { } /* Empty entry */
+};
+
+static struct tracer_flags tracer_flags = {
+ .val = 0,
+ .opts = trace_opts,
+};
+
+#define is_graph() (tracer_flags.val & TRACE_DISPLAY_GRAPH)
+
/*
* Sequence count - we record it when starting a measurement and
* skip the latency if the sequence has changed - some other section
@@ -108,6 +128,129 @@ static struct ftrace_ops trace_ops __read_mostly =
};
#endif /* CONFIG_FUNCTION_TRACER */
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+static int irqsoff_set_flag(u32 old_flags, u32 bit, int set)
+{
+ int cpu;
+
+ if (!(bit & TRACE_DISPLAY_GRAPH))
+ return -EINVAL;
+
+ if (!(is_graph() ^ set))
+ return 0;
+
+ stop_irqsoff_tracer(irqsoff_trace, !set);
+
+ for_each_possible_cpu(cpu)
+ per_cpu(tracing_cpu, cpu) = 0;
+
+ tracing_max_latency = 0;
+ tracing_reset_online_cpus(irqsoff_trace);
+
+ return start_irqsoff_tracer(irqsoff_trace, set);
+}
+
+static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
+{
+ struct trace_array *tr = irqsoff_trace;
+ struct trace_array_cpu *data;
+ unsigned long flags;
+ long disabled;
+ int ret;
+ int cpu;
+ int pc;
+
+ cpu = raw_smp_processor_id();
+ if (likely(!per_cpu(tracing_cpu, cpu)))
+ return 0;
+
+ local_save_flags(flags);
+ /* slight chance to get a false positive on tracing_cpu */
+ if (!irqs_disabled_flags(flags))
+ return 0;
+
+ data = tr->data[cpu];
+ disabled = atomic_inc_return(&data->disabled);
+
+ if (likely(disabled == 1)) {
+ pc = preempt_count();
+ ret = __trace_graph_entry(tr, trace, flags, pc);
+ } else
+ ret = 0;
+
+ atomic_dec(&data->disabled);
+ return ret;
+}
+
+static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
+{
+ struct trace_array *tr = irqsoff_trace;
+ struct trace_array_cpu *data;
+ unsigned long flags;
+ long disabled;
+ int cpu;
+ int pc;
+
+ cpu = raw_smp_processor_id();
+ if (likely(!per_cpu(tracing_cpu, cpu)))
+ return;
+
+ local_save_flags(flags);
+ /* slight chance to get a false positive on tracing_cpu */
+ if (!irqs_disabled_flags(flags))
+ return;
+
+ data = tr->data[cpu];
+ disabled = atomic_inc_return(&data->disabled);
+
+ if (likely(disabled == 1)) {
+ pc = preempt_count();
+ __trace_graph_return(tr, trace, flags, pc);
+ }
+
+ atomic_dec(&data->disabled);
+}
+
+static void irqsoff_trace_open(struct trace_iterator *iter)
+{
+ if (is_graph())
+ graph_trace_open(iter);
+
+}
+
+static void irqsoff_trace_close(struct trace_iterator *iter)
+{
+ if (iter->private)
+ graph_trace_close(iter);
+}
+
+static enum print_line_t
+irqsoff_print_line(struct trace_iterator *iter)
+{
+ /*
+ * In graph mode call the graph tracer output function,
+ * otherwise go with the TRACE_FN event handler
+ */
+ if (is_graph())
+ return print_graph_function(iter);
+
+ return TRACE_TYPE_UNHANDLED;
+}
+
+void irqsoff_print_header(struct seq_file *s)
+{
+ if (is_graph())
+ print_graph_headers(s);
+ else
+ trace_default_header(s);
+}
+#else
+static int irqsoff_set_flag(u32 old_flags, u32 bit, int set)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
/*
* Should this new latency be reported/recorded?
*/
@@ -347,19 +490,36 @@ void trace_preempt_off(unsigned long a0, unsigned long a1)
}
#endif /* CONFIG_PREEMPT_TRACER */
-static void start_irqsoff_tracer(struct trace_array *tr)
+static int start_irqsoff_tracer(struct trace_array *tr, int graph)
{
- register_ftrace_function(&trace_ops);
- if (tracing_is_enabled())
+ int ret = 0;
+
+ if (!graph)
+ ret = register_ftrace_function(&trace_ops);
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ else
+ ret = register_ftrace_graph(&irqsoff_graph_return,
+ &irqsoff_graph_entry);
+#endif
+
+ if (!ret && tracing_is_enabled())
tracer_enabled = 1;
else
tracer_enabled = 0;
+
+ return ret;
}
-static void stop_irqsoff_tracer(struct trace_array *tr)
+static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
{
tracer_enabled = 0;
- unregister_ftrace_function(&trace_ops);
+
+ if (!graph)
+ unregister_ftrace_function(&trace_ops);
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ else
+ unregister_ftrace_graph();
+#endif
}
static void __irqsoff_tracer_init(struct trace_array *tr)
@@ -372,12 +532,14 @@ static void __irqsoff_tracer_init(struct trace_array *tr)
/* make sure that the tracer is visible */
smp_wmb();
tracing_reset_online_cpus(tr);
- start_irqsoff_tracer(tr);
+
+ if (start_irqsoff_tracer(tr, is_graph()))
+ printk(KERN_ERR "failed to start irqsoff tracer\n");
}
static void irqsoff_tracer_reset(struct trace_array *tr)
{
- stop_irqsoff_tracer(tr);
+ stop_irqsoff_tracer(tr, is_graph());
if (!save_lat_flag)
trace_flags &= ~TRACE_ITER_LATENCY_FMT;
@@ -409,9 +571,17 @@ static struct tracer irqsoff_tracer __read_mostly =
.start = irqsoff_tracer_start,
.stop = irqsoff_tracer_stop,
.print_max = 1,
+ .print_header = irqsoff_print_header,
+ .print_line = irqsoff_print_line,
+ .flags = &tracer_flags,
+ .set_flag = irqsoff_set_flag,
#ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_irqsoff,
#endif
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ .open = irqsoff_trace_open,
+ .close = irqsoff_trace_close,
+#endif
};
# define register_irqsoff(trace) register_tracer(&trace)
#else
--
1.6.6.1
^ permalink raw reply related [flat|nested] 11+ messages in thread* [PATCHv3 2/2] tracing: graph output support for preemptirqsoff/preemptoff tracers
2010-03-26 12:32 [PATCHv3 0/2] tracing: function graph output for preempt/irqs-off tracers Jiri Olsa
2010-03-26 12:32 ` [PATCHv3 1/2] tracing: graph output support for irqsoff tracer Jiri Olsa
@ 2010-03-26 12:32 ` Jiri Olsa
2010-03-26 14:53 ` [PATCHv3 0/2] tracing: function graph output for preempt/irqs-off tracers Steven Rostedt
2 siblings, 0 replies; 11+ messages in thread
From: Jiri Olsa @ 2010-03-26 12:32 UTC (permalink / raw)
To: rostedt; +Cc: linux-kernel, Jiri Olsa
hi,
adding support for graph output for preemptirqsoff/preemptoff tracers.
wbr,
jirka
Signed-off-by: Jiri Olsa <jolsa@redhat.com>
---
kernel/trace/trace_irqsoff.c | 16 ++++++++++++++++
1 files changed, 16 insertions(+), 0 deletions(-)
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 2b67e35..d036b8d 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -605,9 +605,17 @@ static struct tracer preemptoff_tracer __read_mostly =
.start = irqsoff_tracer_start,
.stop = irqsoff_tracer_stop,
.print_max = 1,
+ .print_header = irqsoff_print_header,
+ .print_line = irqsoff_print_line,
+ .flags = &tracer_flags,
+ .set_flag = irqsoff_set_flag,
#ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_preemptoff,
#endif
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ .open = irqsoff_trace_open,
+ .close = irqsoff_trace_close,
+#endif
};
# define register_preemptoff(trace) register_tracer(&trace)
#else
@@ -633,9 +641,17 @@ static struct tracer preemptirqsoff_tracer __read_mostly =
.start = irqsoff_tracer_start,
.stop = irqsoff_tracer_stop,
.print_max = 1,
+ .print_header = irqsoff_print_header,
+ .print_line = irqsoff_print_line,
+ .flags = &tracer_flags,
+ .set_flag = irqsoff_set_flag,
#ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_preemptirqsoff,
#endif
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ .open = irqsoff_trace_open,
+ .close = irqsoff_trace_close,
+#endif
};
# define register_preemptirqsoff(trace) register_tracer(&trace)
--
1.6.6.1
^ permalink raw reply related [flat|nested] 11+ messages in thread
* Re: [PATCHv3 0/2] tracing: function graph output for preempt/irqs-off tracers
2010-03-26 12:32 [PATCHv3 0/2] tracing: function graph output for preempt/irqs-off tracers Jiri Olsa
2010-03-26 12:32 ` [PATCHv3 1/2] tracing: graph output support for irqsoff tracer Jiri Olsa
2010-03-26 12:32 ` [PATCHv3 2/2] tracing: graph output support for preemptirqsoff/preemptoff tracers Jiri Olsa
@ 2010-03-26 14:53 ` Steven Rostedt
2010-03-29 11:17 ` Jiri Olsa
2 siblings, 1 reply; 11+ messages in thread
From: Steven Rostedt @ 2010-03-26 14:53 UTC (permalink / raw)
To: Jiri Olsa; +Cc: linux-kernel, Frederic Weisbecker, Ingo Molnar
On Fri, 2010-03-26 at 13:32 +0100, Jiri Olsa wrote:
> hi,
>
> I reworked it based on your comments:
>
> - the function_graph tracer should have the same output
> as current version even with events enabled,
> unless I missed something again ;)
>
> - I gave up the idea of function_graph events,
> since it was unsuitable for outputing other
> events as comments "/* */"
>
> - I'm not including the raw function_graph for now
>
Getting better, but I still have some issues ;-)
Without graph, header looks like this: (old way)
# tracer: irqsoff
#
# irqsoff latency trace v1.1.5 on 2.6.34-rc2
# --------------------------------------------------------------------
# latency: 326 us, #73/73, CPU#1 | (M:preempt VP:0, KP:0, SP:0 HP:0
#P:4)
# -----------------
# | task: swapper-0 (uid:0 nice:0 policy:0 rt_prio:0)
# -----------------
# => started at: save_args
# => ended at: __do_softirq
#
#
# _------=> CPU#
# / _-----=> irqs-off
# | / _----=> need-resched
# || / _---=> hardirq/softirq
# ||| / _--=> preempt-depth
# |||| /_--=> lock-depth
# |||||/ delay
# cmd pid |||||| time | caller
# \ / |||||| \ | /
<idle>-0 1d..1. 1us+: trace_hardirqs_off_thunk <-save_args
Lots of good info.
But if I enable the graph:
# trace-cmd start -p irqsoff -O display-graph
# cat /debug/tracing/trace
# tracer: irqsoff
#
# _-----=> irqs-off
# / _----=> need-resched
# | / _---=> hardirq/softirq
# || / _--=> preempt-depth
# ||| / _-=> lock-depth
# |||| /
# CPU||||| DURATION FUNCTION CALLS
# | ||||| | | | | | |
trace-cm-3661 0d.s4. 1us+: trace_hardirqs_off_thunk <-save_args
0) ==========> |
0) d.s4. | smp_apic_timer_interrupt() {
0) d.s4. 5.740 us | native_apic_mem_write();
0) d.s4. 6.458 us | exit_idle();
0) d.s4. | irq_enter() {
0) d.s4. 5.391 us | rcu_irq_enter();
I see you are using the function graph output, but you just lost some
important information that was with the original tracer.
Another thing is. If I disable the option after I record, I get this:
# echo 0 > /debug/tracing/tracing_enabled
# echo 0 > /debug/tracing/option/display-graph
# cat /debug/tracing/trace
# tracer: irqsoff
#
# irqsoff latency trace v1.1.5 on 2.6.34-rc2
# --------------------------------------------------------------------
# latency: 1084 us, #210/210, CPU#0 | (M:preempt VP:0, KP:0, SP:0 HP:0
#P:4)
# -----------------
# | task: irqbalance-2672 (uid:0 nice:0 policy:0 rt_prio:0)
# -----------------
# => started at: save_args
# => ended at: __do_softirq
#
#
# _------=> CPU#
# / _-----=> irqs-off
# | / _----=> need-resched
# || / _---=> hardirq/softirq
# ||| / _--=> preempt-depth
# |||| /_--=> lock-depth
# |||||/ delay
# cmd pid |||||| time | caller
# \ / |||||| \ | /
irqbalan-2672 0d..2. 2us+: trace_hardirqs_off_thunk <-save_args
irqbalan-2672 0d..2. 10us+: Unknown type 14
irqbalan-2672 0d..2. 16us+: Unknown type 14
irqbalan-2672 0d..2. 20us+: Unknown type 13
irqbalan-2672 0d..2. 25us+: Unknown type 14
irqbalan-2672 0d..2. 29us+: Unknown type 13
irqbalan-2672 0d..2. 33us+: Unknown type 14
irqbalan-2672 0d..2. 37us+: Unknown type 14
irqbalan-2672 0d..2. 42us+: Unknown type 13
irqbalan-2672 0d..2. 49us+: Unknown type 14
irqbalan-2672 0d..2. 55us+: Unknown type 13
irqbalan-2672 0d.h2. 62us+: Unknown type 13
I think you can still do the "event" part, without effecting the way the
function graph outputs normally. I would not have given up on that
method. You don't need to worry about it processing other events,
because when you register it to write as an event, it will only be
called when a function graph event was found. It will not be processing
other events. Only when the tracer itself overrides the default writing
will it do so.
-- Steve
>
> attached patches:
> - 1/2 graph output support for irqsoff tracer
> - 2/2 graph output support for preemptirqsoff/preemptoff tracers
>
> plz let me know what you think
>
> wbr,
> jirka
> ---
> kernel/trace/trace.c | 31 ++++--
> kernel/trace/trace.h | 10 ++
> kernel/trace/trace_functions_graph.c | 15 ++-
> kernel/trace/trace_irqsoff.c | 200 ++++++++++++++++++++++++++++++++-
> 4 files changed, 233 insertions(+), 23 deletions(-)
^ permalink raw reply [flat|nested] 11+ messages in thread