linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH cleanup RFC] ftrace: kill unused and puzzled sample code in ftrace.h
@ 2012-11-03  4:38 Shan Wei
  2012-11-12 13:00 ` Shan Wei
                   ` (2 more replies)
  0 siblings, 3 replies; 4+ messages in thread
From: Shan Wei @ 2012-11-03  4:38 UTC (permalink / raw)
  To: rostedt, fweisbec, mingo, Shan Wei, Kernel-Maillist

From: Shan Wei <davidshan@tencent.com>

When doing per-cpu helper optimizing work, find that this code is so puzzled.
1. It's mark as comment text, maybe a sample function for guidelines 
   or a todo work.
2. But, this sample code is odd where struct perf_trace_buf is nonexistent.
   commit ce71b9 delete struct perf_trace_buf definition.

   Author: Frederic Weisbecker <fweisbec@gmail.com>
   Date:   Sun Nov 22 05:26:55 2009 +0100

   tracing: Use the perf recursion protection from trace event

Is it necessary to keep there?
just compile test. 



Signed-off-by: Shan Wei <davidshan@tencent.com>
---
 include/trace/ftrace.h |   73 ------------------------------------------------
 1 files changed, 0 insertions(+), 73 deletions(-)

diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index a763888..4f993c2 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -620,79 +620,6 @@ __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
 
 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
 
-/*
- * Define the insertion callback to perf events
- *
- * The job is very similar to ftrace_raw_event_<call> except that we don't
- * insert in the ring buffer but in a perf counter.
- *
- * static void ftrace_perf_<call>(proto)
- * {
- *	struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
- *	struct ftrace_event_call *event_call = &event_<call>;
- *	extern void perf_tp_event(int, u64, u64, void *, int);
- *	struct ftrace_raw_##call *entry;
- *	struct perf_trace_buf *trace_buf;
- *	u64 __addr = 0, __count = 1;
- *	unsigned long irq_flags;
- *	struct trace_entry *ent;
- *	int __entry_size;
- *	int __data_size;
- *	int __cpu
- *	int pc;
- *
- *	pc = preempt_count();
- *
- *	__data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
- *
- *	// Below we want to get the aligned size by taking into account
- *	// the u32 field that will later store the buffer size
- *	__entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),
- *			     sizeof(u64));
- *	__entry_size -= sizeof(u32);
- *
- *	// Protect the non nmi buffer
- *	// This also protects the rcu read side
- *	local_irq_save(irq_flags);
- *	__cpu = smp_processor_id();
- *
- *	if (in_nmi())
- *		trace_buf = rcu_dereference_sched(perf_trace_buf_nmi);
- *	else
- *		trace_buf = rcu_dereference_sched(perf_trace_buf);
- *
- *	if (!trace_buf)
- *		goto end;
- *
- *	trace_buf = per_cpu_ptr(trace_buf, __cpu);
- *
- * 	// Avoid recursion from perf that could mess up the buffer
- * 	if (trace_buf->recursion++)
- *		goto end_recursion;
- *
- * 	raw_data = trace_buf->buf;
- *
- *	// Make recursion update visible before entering perf_tp_event
- *	// so that we protect from perf recursions.
- *
- *	barrier();
- *
- *	//zero dead bytes from alignment to avoid stack leak to userspace:
- *	*(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
- *	entry = (struct ftrace_raw_<call> *)raw_data;
- *	ent = &entry->ent;
- *	tracing_generic_entry_update(ent, irq_flags, pc);
- *	ent->type = event_call->id;
- *
- *	<tstruct> <- do some jobs with dynamic arrays
- *
- *	<assign>  <- affect our values
- *
- *	perf_tp_event(event_call->id, __addr, __count, entry,
- *		     __entry_size);  <- submit them to perf counter
- *
- * }
- */
 
 #ifdef CONFIG_PERF_EVENTS
 
-- 
1.7.1


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH cleanup RFC] ftrace: kill unused and puzzled sample code in ftrace.h
  2012-11-03  4:38 [PATCH cleanup RFC] ftrace: kill unused and puzzled sample code in ftrace.h Shan Wei
@ 2012-11-12 13:00 ` Shan Wei
  2012-11-13 20:14 ` Steven Rostedt
  2012-12-09 11:28 ` [tip:perf/core] tracing: Kill " tip-bot for Shan Wei
  2 siblings, 0 replies; 4+ messages in thread
From: Shan Wei @ 2012-11-12 13:00 UTC (permalink / raw)
  To: rostedt, fweisbec, mingo, Shan Wei, Kernel-Maillist


ping...................

Shan Wei said, at 2012/11/3 12:38:
> From: Shan Wei <davidshan@tencent.com>
> 
> When doing per-cpu helper optimizing work, find that this code is so puzzled.
> 1. It's mark as comment text, maybe a sample function for guidelines 
>    or a todo work.
> 2. But, this sample code is odd where struct perf_trace_buf is nonexistent.
>    commit ce71b9 delete struct perf_trace_buf definition.
> 
>    Author: Frederic Weisbecker <fweisbec@gmail.com>
>    Date:   Sun Nov 22 05:26:55 2009 +0100
> 
>    tracing: Use the perf recursion protection from trace event
> 
> Is it necessary to keep there?
> just compile test. 

> 
> 
> 
> Signed-off-by: Shan Wei <davidshan@tencent.com>
> ---
>  include/trace/ftrace.h |   73 ------------------------------------------------
>  1 files changed, 0 insertions(+), 73 deletions(-)
> 
> diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
> index a763888..4f993c2 100644
> --- a/include/trace/ftrace.h
> +++ b/include/trace/ftrace.h
> @@ -620,79 +620,6 @@ __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
>  
>  #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
>  
> -/*
> - * Define the insertion callback to perf events
> - *
> - * The job is very similar to ftrace_raw_event_<call> except that we don't
> - * insert in the ring buffer but in a perf counter.
> - *
> - * static void ftrace_perf_<call>(proto)
> - * {
> - *	struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
> - *	struct ftrace_event_call *event_call = &event_<call>;
> - *	extern void perf_tp_event(int, u64, u64, void *, int);
> - *	struct ftrace_raw_##call *entry;
> - *	struct perf_trace_buf *trace_buf;
> - *	u64 __addr = 0, __count = 1;
> - *	unsigned long irq_flags;
> - *	struct trace_entry *ent;
> - *	int __entry_size;
> - *	int __data_size;
> - *	int __cpu
> - *	int pc;
> - *
> - *	pc = preempt_count();
> - *
> - *	__data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
> - *
> - *	// Below we want to get the aligned size by taking into account
> - *	// the u32 field that will later store the buffer size
> - *	__entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),
> - *			     sizeof(u64));
> - *	__entry_size -= sizeof(u32);
> - *
> - *	// Protect the non nmi buffer
> - *	// This also protects the rcu read side
> - *	local_irq_save(irq_flags);
> - *	__cpu = smp_processor_id();
> - *
> - *	if (in_nmi())
> - *		trace_buf = rcu_dereference_sched(perf_trace_buf_nmi);
> - *	else
> - *		trace_buf = rcu_dereference_sched(perf_trace_buf);
> - *
> - *	if (!trace_buf)
> - *		goto end;
> - *
> - *	trace_buf = per_cpu_ptr(trace_buf, __cpu);
> - *
> - * 	// Avoid recursion from perf that could mess up the buffer
> - * 	if (trace_buf->recursion++)
> - *		goto end_recursion;
> - *
> - * 	raw_data = trace_buf->buf;
> - *
> - *	// Make recursion update visible before entering perf_tp_event
> - *	// so that we protect from perf recursions.
> - *
> - *	barrier();
> - *
> - *	//zero dead bytes from alignment to avoid stack leak to userspace:
> - *	*(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
> - *	entry = (struct ftrace_raw_<call> *)raw_data;
> - *	ent = &entry->ent;
> - *	tracing_generic_entry_update(ent, irq_flags, pc);
> - *	ent->type = event_call->id;
> - *
> - *	<tstruct> <- do some jobs with dynamic arrays
> - *
> - *	<assign>  <- affect our values
> - *
> - *	perf_tp_event(event_call->id, __addr, __count, entry,
> - *		     __entry_size);  <- submit them to perf counter
> - *
> - * }
> - */
>  
>  #ifdef CONFIG_PERF_EVENTS
>  
> 


^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH cleanup RFC] ftrace: kill unused and puzzled sample code in ftrace.h
  2012-11-03  4:38 [PATCH cleanup RFC] ftrace: kill unused and puzzled sample code in ftrace.h Shan Wei
  2012-11-12 13:00 ` Shan Wei
@ 2012-11-13 20:14 ` Steven Rostedt
  2012-12-09 11:28 ` [tip:perf/core] tracing: Kill " tip-bot for Shan Wei
  2 siblings, 0 replies; 4+ messages in thread
From: Steven Rostedt @ 2012-11-13 20:14 UTC (permalink / raw)
  To: Shan Wei; +Cc: fweisbec, mingo, Shan Wei, Kernel-Maillist

On Sat, 2012-11-03 at 12:38 +0800, Shan Wei wrote:
> From: Shan Wei <davidshan@tencent.com>
> 
> When doing per-cpu helper optimizing work, find that this code is so puzzled.
> 1. It's mark as comment text, maybe a sample function for guidelines 
>    or a todo work.
> 2. But, this sample code is odd where struct perf_trace_buf is nonexistent.
>    commit ce71b9 delete struct perf_trace_buf definition.
> 
>    Author: Frederic Weisbecker <fweisbec@gmail.com>
>    Date:   Sun Nov 22 05:26:55 2009 +0100
> 
>    tracing: Use the perf recursion protection from trace event
> 
> Is it necessary to keep there?
> just compile test. 
> 

I have no problem with this. I can take a look later. If it it's just
dead comments then I'll apply it.

Thanks,

-- Steve

> 
> 
> Signed-off-by: Shan Wei <davidshan@tencent.com>
> ---
>  include/trace/ftrace.h |   73 ------------------------------------------------
>  1 files changed, 0 insertions(+), 73 deletions(-)
> 
> diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
> index a763888..4f993c2 100644
> --- a/include/trace/ftrace.h
> +++ b/include/trace/ftrace.h
> @@ -620,79 +620,6 @@ __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
>  
>  #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
>  
> -/*
> - * Define the insertion callback to perf events
> - *
> - * The job is very similar to ftrace_raw_event_<call> except that we don't
> - * insert in the ring buffer but in a perf counter.
> - *
> - * static void ftrace_perf_<call>(proto)
> - * {
> - *	struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
> - *	struct ftrace_event_call *event_call = &event_<call>;
> - *	extern void perf_tp_event(int, u64, u64, void *, int);
> - *	struct ftrace_raw_##call *entry;
> - *	struct perf_trace_buf *trace_buf;
> - *	u64 __addr = 0, __count = 1;
> - *	unsigned long irq_flags;
> - *	struct trace_entry *ent;
> - *	int __entry_size;
> - *	int __data_size;
> - *	int __cpu
> - *	int pc;
> - *
> - *	pc = preempt_count();
> - *
> - *	__data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
> - *
> - *	// Below we want to get the aligned size by taking into account
> - *	// the u32 field that will later store the buffer size
> - *	__entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),
> - *			     sizeof(u64));
> - *	__entry_size -= sizeof(u32);
> - *
> - *	// Protect the non nmi buffer
> - *	// This also protects the rcu read side
> - *	local_irq_save(irq_flags);
> - *	__cpu = smp_processor_id();
> - *
> - *	if (in_nmi())
> - *		trace_buf = rcu_dereference_sched(perf_trace_buf_nmi);
> - *	else
> - *		trace_buf = rcu_dereference_sched(perf_trace_buf);
> - *
> - *	if (!trace_buf)
> - *		goto end;
> - *
> - *	trace_buf = per_cpu_ptr(trace_buf, __cpu);
> - *
> - * 	// Avoid recursion from perf that could mess up the buffer
> - * 	if (trace_buf->recursion++)
> - *		goto end_recursion;
> - *
> - * 	raw_data = trace_buf->buf;
> - *
> - *	// Make recursion update visible before entering perf_tp_event
> - *	// so that we protect from perf recursions.
> - *
> - *	barrier();
> - *
> - *	//zero dead bytes from alignment to avoid stack leak to userspace:
> - *	*(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
> - *	entry = (struct ftrace_raw_<call> *)raw_data;
> - *	ent = &entry->ent;
> - *	tracing_generic_entry_update(ent, irq_flags, pc);
> - *	ent->type = event_call->id;
> - *
> - *	<tstruct> <- do some jobs with dynamic arrays
> - *
> - *	<assign>  <- affect our values
> - *
> - *	perf_tp_event(event_call->id, __addr, __count, entry,
> - *		     __entry_size);  <- submit them to perf counter
> - *
> - * }
> - */
>  
>  #ifdef CONFIG_PERF_EVENTS
>  



^ permalink raw reply	[flat|nested] 4+ messages in thread

* [tip:perf/core] tracing: Kill unused and puzzled sample code in ftrace.h
  2012-11-03  4:38 [PATCH cleanup RFC] ftrace: kill unused and puzzled sample code in ftrace.h Shan Wei
  2012-11-12 13:00 ` Shan Wei
  2012-11-13 20:14 ` Steven Rostedt
@ 2012-12-09 11:28 ` tip-bot for Shan Wei
  2 siblings, 0 replies; 4+ messages in thread
From: tip-bot for Shan Wei @ 2012-12-09 11:28 UTC (permalink / raw)
  To: linux-tip-commits; +Cc: linux-kernel, hpa, mingo, davidshan, rostedt, tglx

Commit-ID:  1c7d66732458dc187008e3f5b2f71e019e320fc2
Gitweb:     http://git.kernel.org/tip/1c7d66732458dc187008e3f5b2f71e019e320fc2
Author:     Shan Wei <davidshan@tencent.com>
AuthorDate: Sat, 3 Nov 2012 12:38:33 +0800
Committer:  Steven Rostedt <rostedt@goodmis.org>
CommitDate: Tue, 13 Nov 2012 15:51:21 -0500

tracing: Kill unused and puzzled sample code in ftrace.h

When doing per-cpu helper optimizing work, find that this code is so puzzled.
1. It's mark as comment text, maybe a sample function for guidelines
   or a todo work.
2. But, this sample code is odd where struct perf_trace_buf is nonexistent.
   commit ce71b9 delete struct perf_trace_buf definition.

   Author: Frederic Weisbecker <fweisbec@gmail.com>
   Date:   Sun Nov 22 05:26:55 2009 +0100

   tracing: Use the perf recursion protection from trace event

Is it necessary to keep there?
just compile test.

Link: http://lkml.kernel.org/r/50949FC9.6050202@gmail.com

Signed-off-by: Shan Wei <davidshan@tencent.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
---
 include/trace/ftrace.h | 73 --------------------------------------------------
 1 file changed, 73 deletions(-)

diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 698f2a8..40dc5e8 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -619,79 +619,6 @@ __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
 
 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
 
-/*
- * Define the insertion callback to perf events
- *
- * The job is very similar to ftrace_raw_event_<call> except that we don't
- * insert in the ring buffer but in a perf counter.
- *
- * static void ftrace_perf_<call>(proto)
- * {
- *	struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
- *	struct ftrace_event_call *event_call = &event_<call>;
- *	extern void perf_tp_event(int, u64, u64, void *, int);
- *	struct ftrace_raw_##call *entry;
- *	struct perf_trace_buf *trace_buf;
- *	u64 __addr = 0, __count = 1;
- *	unsigned long irq_flags;
- *	struct trace_entry *ent;
- *	int __entry_size;
- *	int __data_size;
- *	int __cpu
- *	int pc;
- *
- *	pc = preempt_count();
- *
- *	__data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
- *
- *	// Below we want to get the aligned size by taking into account
- *	// the u32 field that will later store the buffer size
- *	__entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),
- *			     sizeof(u64));
- *	__entry_size -= sizeof(u32);
- *
- *	// Protect the non nmi buffer
- *	// This also protects the rcu read side
- *	local_irq_save(irq_flags);
- *	__cpu = smp_processor_id();
- *
- *	if (in_nmi())
- *		trace_buf = rcu_dereference_sched(perf_trace_buf_nmi);
- *	else
- *		trace_buf = rcu_dereference_sched(perf_trace_buf);
- *
- *	if (!trace_buf)
- *		goto end;
- *
- *	trace_buf = per_cpu_ptr(trace_buf, __cpu);
- *
- * 	// Avoid recursion from perf that could mess up the buffer
- * 	if (trace_buf->recursion++)
- *		goto end_recursion;
- *
- * 	raw_data = trace_buf->buf;
- *
- *	// Make recursion update visible before entering perf_tp_event
- *	// so that we protect from perf recursions.
- *
- *	barrier();
- *
- *	//zero dead bytes from alignment to avoid stack leak to userspace:
- *	*(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
- *	entry = (struct ftrace_raw_<call> *)raw_data;
- *	ent = &entry->ent;
- *	tracing_generic_entry_update(ent, irq_flags, pc);
- *	ent->type = event_call->id;
- *
- *	<tstruct> <- do some jobs with dynamic arrays
- *
- *	<assign>  <- affect our values
- *
- *	perf_tp_event(event_call->id, __addr, __count, entry,
- *		     __entry_size);  <- submit them to perf counter
- *
- * }
- */
 
 #ifdef CONFIG_PERF_EVENTS
 

^ permalink raw reply related	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2012-12-09 11:29 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2012-11-03  4:38 [PATCH cleanup RFC] ftrace: kill unused and puzzled sample code in ftrace.h Shan Wei
2012-11-12 13:00 ` Shan Wei
2012-11-13 20:14 ` Steven Rostedt
2012-12-09 11:28 ` [tip:perf/core] tracing: Kill " tip-bot for Shan Wei

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).