public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
* [RFC][BUG] Revert "sched, perf: Use a single callback into the scheduler"
@ 2012-05-21  8:06 Jiri Olsa
  2012-05-22 11:01 ` Peter Zijlstra
  0 siblings, 1 reply; 5+ messages in thread
From: Jiri Olsa @ 2012-05-21  8:06 UTC (permalink / raw)
  To: acme, a.p.zijlstra, mingo, paulus, cjashfor, fweisbec
  Cc: linux-kernel, eranian

hi,
Arnaldo an I were hunting why test__PERF_RECORD stop work recently.
It seems that following commit is the culprit:

sched, perf: Use a single callback into the scheduler
commit cb04ff9ac424d0e689d9b612e9f73cb443ab4b7e
Author: Peter Zijlstra <a.p.zijlstra@chello.nl>
Date:   Tue May 8 18:56:04 2012 +0200

the reason seems to be following:

  - before commit this change was introduced, the process switch worked
    like this (wrt to perf event schedule):

    schedule (prev, next)
      - schedule out all perf events for prev
      - switch to next
      - schedule in all perf events for current (next)

  - after the commit, the process switch looks like:

    schedule (prev, next)
      - schedule out all perf events for prev
      - schedule in all perf events for (next)
      - switch to next

The problem is, that after we schedule perf events in, the pmu is
enabled and we can receive events even before we make the switch
to next - so "current" still being prev process (event SAMPLE data
are filled based on the value of the "current" process).

Thats exactly what we see for test__PERF_RECORD test. We receive
SAMPLES with PID of the process that our tracee is scheduled from.
Most of the time it's parent, sometimes idle (0).

I tried to keep the current code and add a new hook from finish_task_switch
to enable related PMUs for task.. but I'm getting following warning:

WARNING: at arch/x86/kernel/cpu/perf_event.c:1054 x86_pmu_start+0x133/0x140()

which I haven't figured out yet... also I'm not sure if thats even
right thing to do ;)

thoughts? thanks,
jirka


---
This reverts commit cb04ff9ac424d0e689d9b612e9f73cb443ab4b7e.
---
 include/linux/perf_event.h |   24 ++++++++++++++++++------
 kernel/events/core.c       |   14 ++++----------
 kernel/sched/core.c        |    9 ++++++++-
 3 files changed, 30 insertions(+), 17 deletions(-)

diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 8adf70e..f325786 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -1084,8 +1084,10 @@ extern void perf_pmu_unregister(struct pmu *pmu);
 
 extern int perf_num_counters(void);
 extern const char *perf_pmu_name(void);
-extern void __perf_event_task_sched(struct task_struct *prev,
-				    struct task_struct *next);
+extern void __perf_event_task_sched_in(struct task_struct *prev,
+				       struct task_struct *task);
+extern void __perf_event_task_sched_out(struct task_struct *prev,
+					struct task_struct *next);
 extern int perf_event_init_task(struct task_struct *child);
 extern void perf_event_exit_task(struct task_struct *child);
 extern void perf_event_free_task(struct task_struct *task);
@@ -1205,13 +1207,20 @@ perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
 
 extern struct static_key_deferred perf_sched_events;
 
-static inline void perf_event_task_sched(struct task_struct *prev,
+static inline void perf_event_task_sched_in(struct task_struct *prev,
 					    struct task_struct *task)
 {
+	if (static_key_false(&perf_sched_events.key))
+		__perf_event_task_sched_in(prev, task);
+}
+
+static inline void perf_event_task_sched_out(struct task_struct *prev,
+					     struct task_struct *next)
+{
 	perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0);
 
 	if (static_key_false(&perf_sched_events.key))
-		__perf_event_task_sched(prev, task);
+		__perf_event_task_sched_out(prev, next);
 }
 
 extern void perf_event_mmap(struct vm_area_struct *vma);
@@ -1286,8 +1295,11 @@ extern void perf_event_disable(struct perf_event *event);
 extern void perf_event_task_tick(void);
 #else
 static inline void
-perf_event_task_sched(struct task_struct *prev,
-		      struct task_struct *task)				{ }
+perf_event_task_sched_in(struct task_struct *prev,
+			 struct task_struct *task)			{ }
+static inline void
+perf_event_task_sched_out(struct task_struct *prev,
+			  struct task_struct *next)			{ }
 static inline int perf_event_init_task(struct task_struct *child)	{ return 0; }
 static inline void perf_event_exit_task(struct task_struct *child)	{ }
 static inline void perf_event_free_task(struct task_struct *task)	{ }
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 91a4459..5b06cbb 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2039,8 +2039,8 @@ static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
  * accessing the event control register. If a NMI hits, then it will
  * not restart the event.
  */
-static void __perf_event_task_sched_out(struct task_struct *task,
-					struct task_struct *next)
+void __perf_event_task_sched_out(struct task_struct *task,
+				 struct task_struct *next)
 {
 	int ctxn;
 
@@ -2279,8 +2279,8 @@ static void perf_branch_stack_sched_in(struct task_struct *prev,
  * accessing the event control register. If a NMI hits, then it will
  * keep the event running.
  */
-static void __perf_event_task_sched_in(struct task_struct *prev,
-				       struct task_struct *task)
+void __perf_event_task_sched_in(struct task_struct *prev,
+				struct task_struct *task)
 {
 	struct perf_event_context *ctx;
 	int ctxn;
@@ -2305,12 +2305,6 @@ static void __perf_event_task_sched_in(struct task_struct *prev,
 		perf_branch_stack_sched_in(prev, task);
 }
 
-void __perf_event_task_sched(struct task_struct *prev, struct task_struct *next)
-{
-	__perf_event_task_sched_out(prev, next);
-	__perf_event_task_sched_in(prev, next);
-}
-
 static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
 {
 	u64 frequency = event->attr.sample_freq;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 18eed17..90c2427 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1913,7 +1913,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev,
 		    struct task_struct *next)
 {
 	sched_info_switch(prev, next);
-	perf_event_task_sched(prev, next);
+	perf_event_task_sched_out(prev, next);
 	fire_sched_out_preempt_notifiers(prev, next);
 	prepare_lock_switch(rq, next);
 	prepare_arch_switch(next);
@@ -1956,6 +1956,13 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
 	 */
 	prev_state = prev->state;
 	finish_arch_switch(prev);
+#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
+	local_irq_disable();
+#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
+	perf_event_task_sched_in(prev, current);
+#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
+	local_irq_enable();
+#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
 	finish_lock_switch(rq, prev);
 	finish_arch_post_lock_switch();
 
-- 
1.7.7.6


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [RFC][BUG] Revert "sched, perf: Use a single callback into the scheduler"
  2012-05-21  8:06 [RFC][BUG] Revert "sched, perf: Use a single callback into the scheduler" Jiri Olsa
@ 2012-05-22 11:01 ` Peter Zijlstra
  2012-05-23 11:13   ` [PATCH] " Jiri Olsa
  0 siblings, 1 reply; 5+ messages in thread
From: Peter Zijlstra @ 2012-05-22 11:01 UTC (permalink / raw)
  To: Jiri Olsa; +Cc: acme, mingo, paulus, cjashfor, fweisbec, linux-kernel, eranian

On Mon, 2012-05-21 at 10:06 +0200, Jiri Olsa wrote:
> hi,
> Arnaldo an I were hunting why test__PERF_RECORD stop work recently.
> It seems that following commit is the culprit:
> 
> sched, perf: Use a single callback into the scheduler
> commit cb04ff9ac424d0e689d9b612e9f73cb443ab4b7e
> Author: Peter Zijlstra <a.p.zijlstra@chello.nl>
> Date:   Tue May 8 18:56:04 2012 +0200
> 
> the reason seems to be following:
> 
>   - before commit this change was introduced, the process switch worked
>     like this (wrt to perf event schedule):
> 
>     schedule (prev, next)
>       - schedule out all perf events for prev
>       - switch to next
>       - schedule in all perf events for current (next)
> 
>   - after the commit, the process switch looks like:
> 
>     schedule (prev, next)
>       - schedule out all perf events for prev
>       - schedule in all perf events for (next)
>       - switch to next
> 
> The problem is, that after we schedule perf events in, the pmu is
> enabled and we can receive events even before we make the switch
> to next - so "current" still being prev process (event SAMPLE data
> are filled based on the value of the "current" process).
> 
> Thats exactly what we see for test__PERF_RECORD test. We receive
> SAMPLES with PID of the process that our tracee is scheduled from.
> Most of the time it's parent, sometimes idle (0).
> 
> I tried to keep the current code and add a new hook from finish_task_switch
> to enable related PMUs for task.. but I'm getting following warning:
> 
> WARNING: at arch/x86/kernel/cpu/perf_event.c:1054 x86_pmu_start+0x133/0x140()
> 
> which I haven't figured out yet... also I'm not sure if thats even
> right thing to do ;)
> 
> thoughts? 

Bah!, yeah I guess reverting is the right thing for now. Sad though.

So by having the two hooks we have a black-spot between them where we
receive no events at all, this black-spot covers the hand-over of
current and we thus don't receive the 'wrong' events.

I rather liked we could do away with both that black-spot and clean up
the code a little, but apparently people rely on it.



^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH] Revert "sched, perf: Use a single callback into the scheduler"
  2012-05-22 11:01 ` Peter Zijlstra
@ 2012-05-23 11:13   ` Jiri Olsa
  2012-05-23 16:09     ` [tip:perf/core] " tip-bot for Jiri Olsa
  0 siblings, 1 reply; 5+ messages in thread
From: Jiri Olsa @ 2012-05-23 11:13 UTC (permalink / raw)
  To: Peter Zijlstra, mingo
  Cc: acme, paulus, cjashfor, fweisbec, linux-kernel, eranian

On Tue, May 22, 2012 at 01:01:22PM +0200, Peter Zijlstra wrote:
> On Mon, 2012-05-21 at 10:06 +0200, Jiri Olsa wrote:
> > hi,

SNIP

> 
> Bah!, yeah I guess reverting is the right thing for now. Sad though.
> 
> So by having the two hooks we have a black-spot between them where we
> receive no events at all, this black-spot covers the hand-over of
> current and we thus don't receive the 'wrong' events.
> 
> I rather liked we could do away with both that black-spot and clean up
> the code a little, but apparently people rely on it.
> 
> 

ok, resending as patch with updated changelog

thanks,
jirka


---
This reverts commit cb04ff9ac424d0e689d9b612e9f73cb443ab4b7e.
  sched, perf: Use a single callback into the scheduler
  Author: Peter Zijlstra <a.p.zijlstra@chello.nl>
  Date:   Tue May 8 18:56:04 2012 +0200

Before commit this change was introduced, the process switch worked
like this (wrt to perf event schedule):

     schedule (prev, next)
       - schedule out all perf events for prev
       - switch to next
       - schedule in all perf events for current (next)

After the commit, the process switch looks like:

     schedule (prev, next)
       - schedule out all perf events for prev
       - schedule in all perf events for (next)
       - switch to next

The problem is, that after we schedule perf events in, the pmu is
enabled and we can receive events even before we make the switch
to next - so "current" still being prev process (event SAMPLE data
are filled based on the value of the "current" process).

Thats exactly what we see for test__PERF_RECORD test. We receive
SAMPLES with PID of the process that our tracee is scheduled from.

Discussed with Peter Zijlstra:
> Bah!, yeah I guess reverting is the right thing for now. Sad though.
>
> So by having the two hooks we have a black-spot between them where we
> receive no events at all, this black-spot covers the hand-over of
> current and we thus don't receive the 'wrong' events.
>
> I rather liked we could do away with both that black-spot and clean up
> the code a little, but apparently people rely on it.

Signed-off-by: Jiri Olsa <jolsa@redhat.com>
---
 include/linux/perf_event.h |   24 ++++++++++++++++++------
 kernel/events/core.c       |   14 ++++----------
 kernel/sched/core.c        |    9 ++++++++-
 3 files changed, 30 insertions(+), 17 deletions(-)

diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 8adf70e..f325786 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -1084,8 +1084,10 @@ extern void perf_pmu_unregister(struct pmu *pmu);
 
 extern int perf_num_counters(void);
 extern const char *perf_pmu_name(void);
-extern void __perf_event_task_sched(struct task_struct *prev,
-				    struct task_struct *next);
+extern void __perf_event_task_sched_in(struct task_struct *prev,
+				       struct task_struct *task);
+extern void __perf_event_task_sched_out(struct task_struct *prev,
+					struct task_struct *next);
 extern int perf_event_init_task(struct task_struct *child);
 extern void perf_event_exit_task(struct task_struct *child);
 extern void perf_event_free_task(struct task_struct *task);
@@ -1205,13 +1207,20 @@ perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
 
 extern struct static_key_deferred perf_sched_events;
 
-static inline void perf_event_task_sched(struct task_struct *prev,
+static inline void perf_event_task_sched_in(struct task_struct *prev,
 					    struct task_struct *task)
 {
+	if (static_key_false(&perf_sched_events.key))
+		__perf_event_task_sched_in(prev, task);
+}
+
+static inline void perf_event_task_sched_out(struct task_struct *prev,
+					     struct task_struct *next)
+{
 	perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0);
 
 	if (static_key_false(&perf_sched_events.key))
-		__perf_event_task_sched(prev, task);
+		__perf_event_task_sched_out(prev, next);
 }
 
 extern void perf_event_mmap(struct vm_area_struct *vma);
@@ -1286,8 +1295,11 @@ extern void perf_event_disable(struct perf_event *event);
 extern void perf_event_task_tick(void);
 #else
 static inline void
-perf_event_task_sched(struct task_struct *prev,
-		      struct task_struct *task)				{ }
+perf_event_task_sched_in(struct task_struct *prev,
+			 struct task_struct *task)			{ }
+static inline void
+perf_event_task_sched_out(struct task_struct *prev,
+			  struct task_struct *next)			{ }
 static inline int perf_event_init_task(struct task_struct *child)	{ return 0; }
 static inline void perf_event_exit_task(struct task_struct *child)	{ }
 static inline void perf_event_free_task(struct task_struct *task)	{ }
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 91a4459..5b06cbb 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2039,8 +2039,8 @@ static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
  * accessing the event control register. If a NMI hits, then it will
  * not restart the event.
  */
-static void __perf_event_task_sched_out(struct task_struct *task,
-					struct task_struct *next)
+void __perf_event_task_sched_out(struct task_struct *task,
+				 struct task_struct *next)
 {
 	int ctxn;
 
@@ -2279,8 +2279,8 @@ static void perf_branch_stack_sched_in(struct task_struct *prev,
  * accessing the event control register. If a NMI hits, then it will
  * keep the event running.
  */
-static void __perf_event_task_sched_in(struct task_struct *prev,
-				       struct task_struct *task)
+void __perf_event_task_sched_in(struct task_struct *prev,
+				struct task_struct *task)
 {
 	struct perf_event_context *ctx;
 	int ctxn;
@@ -2305,12 +2305,6 @@ static void __perf_event_task_sched_in(struct task_struct *prev,
 		perf_branch_stack_sched_in(prev, task);
 }
 
-void __perf_event_task_sched(struct task_struct *prev, struct task_struct *next)
-{
-	__perf_event_task_sched_out(prev, next);
-	__perf_event_task_sched_in(prev, next);
-}
-
 static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
 {
 	u64 frequency = event->attr.sample_freq;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 18eed17..90c2427 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1913,7 +1913,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev,
 		    struct task_struct *next)
 {
 	sched_info_switch(prev, next);
-	perf_event_task_sched(prev, next);
+	perf_event_task_sched_out(prev, next);
 	fire_sched_out_preempt_notifiers(prev, next);
 	prepare_lock_switch(rq, next);
 	prepare_arch_switch(next);
@@ -1956,6 +1956,13 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
 	 */
 	prev_state = prev->state;
 	finish_arch_switch(prev);
+#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
+	local_irq_disable();
+#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
+	perf_event_task_sched_in(prev, current);
+#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
+	local_irq_enable();
+#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
 	finish_lock_switch(rq, prev);
 	finish_arch_post_lock_switch();
 
-- 
1.7.7.6


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [tip:perf/core] Revert "sched, perf: Use a single callback into the scheduler"
  2012-05-23 11:13   ` [PATCH] " Jiri Olsa
@ 2012-05-23 16:09     ` tip-bot for Jiri Olsa
  2012-05-23 16:33       ` Peter Zijlstra
  0 siblings, 1 reply; 5+ messages in thread
From: tip-bot for Jiri Olsa @ 2012-05-23 16:09 UTC (permalink / raw)
  To: linux-tip-commits; +Cc: linux-kernel, hpa, mingo, a.p.zijlstra, tglx, jolsa

Commit-ID:  ab0cce560ef177bdc7a8f73e9962be9d829a7b2c
Gitweb:     http://git.kernel.org/tip/ab0cce560ef177bdc7a8f73e9962be9d829a7b2c
Author:     Jiri Olsa <jolsa@redhat.com>
AuthorDate: Wed, 23 May 2012 13:13:02 +0200
Committer:  Ingo Molnar <mingo@kernel.org>
CommitDate: Wed, 23 May 2012 17:40:51 +0200

Revert "sched, perf: Use a single callback into the scheduler"

This reverts commit cb04ff9ac424 ("sched, perf: Use a single
callback into the scheduler").

Before this change was introduced, the process switch worked
like this (wrt. to perf event schedule):

     schedule (prev, next)
       - schedule out all perf events for prev
       - switch to next
       - schedule in all perf events for current (next)

After the commit, the process switch looks like:

     schedule (prev, next)
       - schedule out all perf events for prev
       - schedule in all perf events for (next)
       - switch to next

The problem is, that after we schedule perf events in, the pmu
is enabled and we can receive events even before we make the
switch to next - so "current" still being prev process (event
SAMPLE data are filled based on the value of the "current"
process).

Thats exactly what we see for test__PERF_RECORD test. We receive
SAMPLES with PID of the process that our tracee is scheduled
from.

Discussed with Peter Zijlstra:

 > Bah!, yeah I guess reverting is the right thing for now. Sad
 > though.
 >
 > So by having the two hooks we have a black-spot between them
 > where we receive no events at all, this black-spot covers the
 > hand-over of current and we thus don't receive the 'wrong'
 > events.
 >
 > I rather liked we could do away with both that black-spot and
 > clean up the code a little, but apparently people rely on it.

Signed-off-by: Jiri Olsa <jolsa@redhat.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: acme@redhat.com
Cc: paulus@samba.org
Cc: cjashfor@linux.vnet.ibm.com
Cc: fweisbec@gmail.com
Cc: eranian@google.com
Link: http://lkml.kernel.org/r/20120523111302.GC1638@m.brq.redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
---
 include/linux/perf_event.h |   24 ++++++++++++++++++------
 kernel/events/core.c       |   14 ++++----------
 kernel/sched/core.c        |    9 ++++++++-
 3 files changed, 30 insertions(+), 17 deletions(-)

diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 8adf70e..f325786 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -1084,8 +1084,10 @@ extern void perf_pmu_unregister(struct pmu *pmu);
 
 extern int perf_num_counters(void);
 extern const char *perf_pmu_name(void);
-extern void __perf_event_task_sched(struct task_struct *prev,
-				    struct task_struct *next);
+extern void __perf_event_task_sched_in(struct task_struct *prev,
+				       struct task_struct *task);
+extern void __perf_event_task_sched_out(struct task_struct *prev,
+					struct task_struct *next);
 extern int perf_event_init_task(struct task_struct *child);
 extern void perf_event_exit_task(struct task_struct *child);
 extern void perf_event_free_task(struct task_struct *task);
@@ -1205,13 +1207,20 @@ perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
 
 extern struct static_key_deferred perf_sched_events;
 
-static inline void perf_event_task_sched(struct task_struct *prev,
+static inline void perf_event_task_sched_in(struct task_struct *prev,
 					    struct task_struct *task)
 {
+	if (static_key_false(&perf_sched_events.key))
+		__perf_event_task_sched_in(prev, task);
+}
+
+static inline void perf_event_task_sched_out(struct task_struct *prev,
+					     struct task_struct *next)
+{
 	perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0);
 
 	if (static_key_false(&perf_sched_events.key))
-		__perf_event_task_sched(prev, task);
+		__perf_event_task_sched_out(prev, next);
 }
 
 extern void perf_event_mmap(struct vm_area_struct *vma);
@@ -1286,8 +1295,11 @@ extern void perf_event_disable(struct perf_event *event);
 extern void perf_event_task_tick(void);
 #else
 static inline void
-perf_event_task_sched(struct task_struct *prev,
-		      struct task_struct *task)				{ }
+perf_event_task_sched_in(struct task_struct *prev,
+			 struct task_struct *task)			{ }
+static inline void
+perf_event_task_sched_out(struct task_struct *prev,
+			  struct task_struct *next)			{ }
 static inline int perf_event_init_task(struct task_struct *child)	{ return 0; }
 static inline void perf_event_exit_task(struct task_struct *child)	{ }
 static inline void perf_event_free_task(struct task_struct *task)	{ }
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 91a4459..5b06cbb 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2039,8 +2039,8 @@ static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
  * accessing the event control register. If a NMI hits, then it will
  * not restart the event.
  */
-static void __perf_event_task_sched_out(struct task_struct *task,
-					struct task_struct *next)
+void __perf_event_task_sched_out(struct task_struct *task,
+				 struct task_struct *next)
 {
 	int ctxn;
 
@@ -2279,8 +2279,8 @@ static void perf_branch_stack_sched_in(struct task_struct *prev,
  * accessing the event control register. If a NMI hits, then it will
  * keep the event running.
  */
-static void __perf_event_task_sched_in(struct task_struct *prev,
-				       struct task_struct *task)
+void __perf_event_task_sched_in(struct task_struct *prev,
+				struct task_struct *task)
 {
 	struct perf_event_context *ctx;
 	int ctxn;
@@ -2305,12 +2305,6 @@ static void __perf_event_task_sched_in(struct task_struct *prev,
 		perf_branch_stack_sched_in(prev, task);
 }
 
-void __perf_event_task_sched(struct task_struct *prev, struct task_struct *next)
-{
-	__perf_event_task_sched_out(prev, next);
-	__perf_event_task_sched_in(prev, next);
-}
-
 static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
 {
 	u64 frequency = event->attr.sample_freq;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 13c3883..0533a68 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1913,7 +1913,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev,
 		    struct task_struct *next)
 {
 	sched_info_switch(prev, next);
-	perf_event_task_sched(prev, next);
+	perf_event_task_sched_out(prev, next);
 	fire_sched_out_preempt_notifiers(prev, next);
 	prepare_lock_switch(rq, next);
 	prepare_arch_switch(next);
@@ -1956,6 +1956,13 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
 	 */
 	prev_state = prev->state;
 	finish_arch_switch(prev);
+#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
+	local_irq_disable();
+#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
+	perf_event_task_sched_in(prev, current);
+#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
+	local_irq_enable();
+#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
 	finish_lock_switch(rq, prev);
 	finish_arch_post_lock_switch();
 

^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [tip:perf/core] Revert "sched, perf: Use a single callback into the scheduler"
  2012-05-23 16:09     ` [tip:perf/core] " tip-bot for Jiri Olsa
@ 2012-05-23 16:33       ` Peter Zijlstra
  0 siblings, 0 replies; 5+ messages in thread
From: Peter Zijlstra @ 2012-05-23 16:33 UTC (permalink / raw)
  To: mingo, hpa, linux-kernel, tglx, jolsa; +Cc: linux-tip-commits

On Wed, 2012-05-23 at 09:09 -0700, tip-bot for Jiri Olsa wrote:
>  > I rather liked we could do away with both that black-spot and
>  > clean up the code a little, but apparently people rely on it.
> 
Ingo noted that inherited counters already are active over the context
switch (due to the lazy switch optimization) and thus profile the
context switch cost.

Regular per-task counters not including this are therefore not
consistent.

So it would be good to find a way to fix the tracing fallout
differently. A well..


^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2012-05-23 16:33 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2012-05-21  8:06 [RFC][BUG] Revert "sched, perf: Use a single callback into the scheduler" Jiri Olsa
2012-05-22 11:01 ` Peter Zijlstra
2012-05-23 11:13   ` [PATCH] " Jiri Olsa
2012-05-23 16:09     ` [tip:perf/core] " tip-bot for Jiri Olsa
2012-05-23 16:33       ` Peter Zijlstra

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox