linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Frederic Weisbecker <fweisbec@gmail.com>
To: LKML <linux-kernel@vger.kernel.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>,
	Peter Zijlstra <peterz@infradead.org>,
	Chris Metcalf <cmetcalf@ezchip.com>,
	Thomas Gleixner <tglx@linutronix.de>,
	Luiz Capitulino <lcapitulino@redhat.com>,
	Christoph Lameter <cl@linux.com>, Ingo Molnar <mingo@kernel.org>,
	Viresh Kumar <viresh.kumar@linaro.org>,
	Rik van Riel <riel@redhat.com>
Subject: [PATCH 5/9] perf: Migrate perf to use new tick dependency mask model
Date: Mon, 14 Dec 2015 19:38:28 +0100	[thread overview]
Message-ID: <1450118312-3788-6-git-send-email-fweisbec@gmail.com> (raw)
In-Reply-To: <1450118312-3788-1-git-send-email-fweisbec@gmail.com>

Instead of providing asynchronous checks for the nohz subsystem to verify
perf event tick dependency, migrate perf to the new mask.

Perf needs the tick for two situations:

1) Freq events. We could set the tick dependency when those are
installed on a CPU context. But setting a global dependency on top of
the global freq events accounting is much easier. If people want that
to be optimized, we can still refine that on the per-CPU tick dependency
level. This patch dooesn't change the current behaviour anyway.

2) Throttled events: this is a per-cpu dependency.

Cc: Christoph Lameter <cl@linux.com>
Cc: Chris Metcalf <cmetcalf@ezchip.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Luiz Capitulino <lcapitulino@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Viresh Kumar <viresh.kumar@linaro.org>
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
---
 include/linux/perf_event.h |  6 -----
 include/linux/tick.h       |  2 --
 kernel/events/core.c       | 65 ++++++++++++++++++++++++++++++++++------------
 kernel/time/tick-sched.c   |  8 +-----
 4 files changed, 49 insertions(+), 32 deletions(-)

diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index d841d33..56ff20f 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -1109,12 +1109,6 @@ static inline void perf_event_task_tick(void)				{ }
 static inline int perf_event_release_kernel(struct perf_event *event)	{ return 0; }
 #endif
 
-#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_NO_HZ_FULL)
-extern bool perf_event_can_stop_tick(void);
-#else
-static inline bool perf_event_can_stop_tick(void)			{ return true; }
-#endif
-
 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
 extern void perf_restore_debug_store(void);
 #else
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 6d09dc1..eab61d0 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -178,7 +178,6 @@ extern void tick_nohz_set_dep_signal(struct signal_struct *signal,
 extern void tick_nohz_clear_dep_signal(struct signal_struct *signal,
 				       enum tick_dependency_bit bit);
 
-extern void tick_nohz_full_kick(void);
 extern void tick_nohz_full_kick_cpu(int cpu);
 extern void tick_nohz_full_kick_all(void);
 extern void __tick_nohz_task_switch(void);
@@ -205,7 +204,6 @@ static inline void tick_nohz_clear_dep_signal(enum tick_dependency_bit bit,
 					      struct task_struct *signal) { }
 
 static inline void tick_nohz_full_kick_cpu(int cpu) { }
-static inline void tick_nohz_full_kick(void) { }
 static inline void tick_nohz_full_kick_all(void) { }
 static inline void __tick_nohz_task_switch(void) { }
 #endif
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 36babfd..0802499 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3100,17 +3100,6 @@ done:
 	return rotate;
 }
 
-#ifdef CONFIG_NO_HZ_FULL
-bool perf_event_can_stop_tick(void)
-{
-	if (atomic_read(&nr_freq_events) ||
-	    __this_cpu_read(perf_throttled_count))
-		return false;
-	else
-		return true;
-}
-#endif
-
 void perf_event_task_tick(void)
 {
 	struct list_head *head = this_cpu_ptr(&active_ctx_list);
@@ -3121,6 +3110,7 @@ void perf_event_task_tick(void)
 
 	__this_cpu_inc(perf_throttled_seq);
 	throttled = __this_cpu_xchg(perf_throttled_count, 0);
+	tick_nohz_clear_dep_cpu(TICK_PERF_EVENTS_BIT, smp_processor_id());
 
 	list_for_each_entry_safe(ctx, tmp, head, active_ctx_list)
 		perf_adjust_freq_unthr_context(ctx, throttled);
@@ -3573,6 +3563,28 @@ static void unaccount_event_cpu(struct perf_event *event, int cpu)
 		atomic_dec(&per_cpu(perf_cgroup_events, cpu));
 }
 
+#ifdef CONFIG_NO_HZ_FULL
+static DEFINE_SPINLOCK(nr_freq_lock);
+#endif
+
+static void unaccount_freq_event_nohz(void)
+{
+#ifdef CONFIG_NO_HZ_FULL
+	spin_lock(&nr_freq_lock);
+	if (atomic_dec_and_test(&nr_freq_events))
+		tick_nohz_clear_dep(TICK_PERF_EVENTS_BIT);
+	spin_unlock(&nr_freq_lock);
+#endif
+}
+
+static void unaccount_freq_event(void)
+{
+	if (tick_nohz_full_enabled())
+		unaccount_freq_event_nohz();
+	else
+		atomic_dec(&nr_freq_events);
+}
+
 static void unaccount_event(struct perf_event *event)
 {
 	if (event->parent)
@@ -3587,7 +3599,7 @@ static void unaccount_event(struct perf_event *event)
 	if (event->attr.task)
 		atomic_dec(&nr_task_events);
 	if (event->attr.freq)
-		atomic_dec(&nr_freq_events);
+		unaccount_freq_event();
 	if (event->attr.context_switch) {
 		static_key_slow_dec_deferred(&perf_sched_events);
 		atomic_dec(&nr_switch_events);
@@ -6402,9 +6414,9 @@ static int __perf_event_overflow(struct perf_event *event,
 		if (unlikely(throttle
 			     && hwc->interrupts >= max_samples_per_tick)) {
 			__this_cpu_inc(perf_throttled_count);
+			tick_nohz_set_dep_cpu(TICK_PERF_EVENTS_BIT, smp_processor_id());
 			hwc->interrupts = MAX_INTERRUPTS;
 			perf_log_throttle(event, 0);
-			tick_nohz_full_kick();
 			ret = 1;
 		}
 	}
@@ -7804,6 +7816,27 @@ static void account_event_cpu(struct perf_event *event, int cpu)
 		atomic_inc(&per_cpu(perf_cgroup_events, cpu));
 }
 
+/* Freq events need the tick to stay alive (see perf_event_task_tick). */
+static void account_freq_event_nohz(void)
+{
+#ifdef CONFIG_NO_HZ_FULL
+	/* Lock so we don't race with concurrent unaccount */
+	spin_lock(&nr_freq_lock);
+	if (atomic_inc_return(&nr_freq_events) == 1)
+		tick_nohz_set_dep(TICK_PERF_EVENTS_BIT);
+	spin_unlock(&nr_freq_lock);
+#endif
+}
+
+static void account_freq_event(void)
+{
+	if (tick_nohz_full_enabled())
+		account_freq_event_nohz();
+	else
+		atomic_inc(&nr_freq_events);
+}
+
+
 static void account_event(struct perf_event *event)
 {
 	if (event->parent)
@@ -7817,10 +7850,8 @@ static void account_event(struct perf_event *event)
 		atomic_inc(&nr_comm_events);
 	if (event->attr.task)
 		atomic_inc(&nr_task_events);
-	if (event->attr.freq) {
-		if (atomic_inc_return(&nr_freq_events) == 1)
-			tick_nohz_full_kick_all();
-	}
+	if (event->attr.freq)
+		account_freq_event();
 	if (event->attr.context_switch) {
 		atomic_inc(&nr_switch_events);
 		static_key_slow_inc(&perf_sched_events.key);
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index b3bd5c4..00c20db 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -22,7 +22,6 @@
 #include <linux/module.h>
 #include <linux/irq_work.h>
 #include <linux/posix-timers.h>
-#include <linux/perf_event.h>
 #include <linux/context_tracking.h>
 
 #include <asm/irq_regs.h>
@@ -213,11 +212,6 @@ static bool can_stop_full_tick(struct tick_sched *ts)
 		return false;
 	}
 
-	if (!perf_event_can_stop_tick()) {
-		trace_tick_stop(0, TICK_PERF_EVENTS_MASK);
-		return false;
-	}
-
 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
 	/*
 	 * sched_clock_tick() needs us?
@@ -255,7 +249,7 @@ static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = {
  * This kick, unlike tick_nohz_full_kick_cpu() and tick_nohz_full_kick_all(),
  * is NMI safe.
  */
-void tick_nohz_full_kick(void)
+static void tick_nohz_full_kick(void)
 {
 	if (!tick_nohz_full_cpu(smp_processor_id()))
 		return;
-- 
2.6.4


  parent reply	other threads:[~2015-12-14 18:38 UTC|newest]

Thread overview: 12+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-12-14 18:38 [PATCH 0/9] nohz: Tick dependency mask v4 Frederic Weisbecker
2015-12-14 18:38 ` [PATCH 1/9] atomic: Export fetch_or() Frederic Weisbecker
2015-12-16 16:34   ` Chris Metcalf
2015-12-14 18:38 ` [PATCH 2/9] nohz: Implement wide kick on top of irq work Frederic Weisbecker
2015-12-14 18:38 ` [PATCH 3/9] nohz: New tick dependency mask Frederic Weisbecker
2015-12-14 18:38 ` [PATCH 4/9] nohz: Use enum code for tick stop failure tracing message Frederic Weisbecker
2015-12-14 18:38 ` Frederic Weisbecker [this message]
2015-12-14 18:38 ` [PATCH 6/9] sched: Account rr tasks Frederic Weisbecker
2015-12-14 18:38 ` [PATCH 7/9] sched: Migrate sched to use new tick dependency mask model Frederic Weisbecker
2015-12-14 18:38 ` [PATCH 8/9] posix-cpu-timers: Migrate " Frederic Weisbecker
2015-12-14 18:38 ` [PATCH 9/9] sched-clock: " Frederic Weisbecker
  -- strict thread matches above, loose matches on Subject: below --
2016-02-04 17:00 [PATCH 0/9] nohz: Tick dependency mask v5 Frederic Weisbecker
2016-02-04 17:00 ` [PATCH 5/9] perf: Migrate perf to use new tick dependency mask model Frederic Weisbecker

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1450118312-3788-6-git-send-email-fweisbec@gmail.com \
    --to=fweisbec@gmail.com \
    --cc=cl@linux.com \
    --cc=cmetcalf@ezchip.com \
    --cc=lcapitulino@redhat.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@kernel.org \
    --cc=peterz@infradead.org \
    --cc=riel@redhat.com \
    --cc=tglx@linutronix.de \
    --cc=viresh.kumar@linaro.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).