linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Alexander Shishkin <alexander.shishkin@linux.intel.com>
To: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Ingo Molnar <mingo@redhat.com>,
	linux-kernel@vger.kernel.org, vince@deater.net,
	eranian@google.com, johannes@sipsolutions.net,
	Arnaldo Carvalho de Melo <acme@infradead.org>,
	Alexander Shishkin <alexander.shishkin@linux.intel.com>
Subject: [PATCH 2/7] perf: Generalize task_function_call()ers
Date: Thu,  3 Dec 2015 12:32:37 +0200	[thread overview]
Message-ID: <1449138762-15194-3-git-send-email-alexander.shishkin@linux.intel.com> (raw)
In-Reply-To: <1449138762-15194-1-git-send-email-alexander.shishkin@linux.intel.com>

A number of places in perf core (perf_event_enable, perf_event_disable,
perf_install_in_context, perf_remove_from_context) are performing the
same tapdance around task_function_call() to modify an event and/or its
context on its target cpu. The sequence mainly consists of re-trying
task_function_call() until it either succeeds or is no longer necessary
or the event's context has gone inactive, in which case the action can
be performed on the caller's cpu provided the context lock is still
held. The complication that all callers are dealing with is that they
have to release the context lock before calling task_function_call(),
which gives an opportunity for the context in question to get scheduled
out, so then they have to re-acquire the lock, check whether that is
the case and re-try the cross-call.

This patch creates a more generic helper that either executes a callback
on the target cpu or returns with ctx::lock locked, so that the caller
can perform its action on an inactive context. This patch also converts
the existing users of this sequence to use the new helper.

Signed-off-by: Alexander Shishkin <alexander.shishkin@linux.intel.com>
---
 include/linux/perf_event.h |   1 +
 kernel/events/core.c       | 190 +++++++++++++++------------------------------
 2 files changed, 64 insertions(+), 127 deletions(-)

diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index f9828a48f1..55290d609e 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -402,6 +402,7 @@ enum perf_event_active_state {
 	PERF_EVENT_STATE_OFF		= -1,
 	PERF_EVENT_STATE_INACTIVE	=  0,
 	PERF_EVENT_STATE_ACTIVE		=  1,
+	PERF_EVENT_STATE_NONE		=  2,
 };
 
 struct file;
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 5854fcf7f0..0d3296f600 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -126,6 +126,59 @@ static int cpu_function_call(int cpu, remote_function_f func, void *info)
 	return data.ret;
 }
 
+static int
+remote_call_or_ctx_lock(struct perf_event *event, remote_function_f func,
+			void *info, enum perf_event_active_state retry_state)
+{
+	struct perf_event_context *ctx = event->ctx;
+	struct task_struct *task = ctx->task;
+	unsigned int retry = 1;
+
+	if (!task) {
+		/*
+		 * Per cpu events are removed via an smp call. The removal can
+		 * fail if the CPU is currently offline, but in that case we
+		 * already called __perf_remove_from_context from
+		 * perf_event_exit_cpu.
+		 */
+		cpu_function_call(event->cpu, func, info);
+		return 0;
+	}
+
+	raw_spin_lock_irq(&ctx->lock);
+	do {
+		/*
+		 * Reload the task pointer, it might have been changed by
+		 * a concurrent perf_event_context_sched_out().
+		 */
+		task = ctx->task;
+
+		/*
+		 * If the context is inactive, we don't need a cross call to
+		 * fiddle with the event so long as the ctx::lock is held.
+		 */
+		if (!ctx->is_active)
+			break;
+
+		raw_spin_unlock_irq(&ctx->lock);
+
+		if (!task_function_call(task, func, info))
+			return 0;
+
+		raw_spin_lock_irq(&ctx->lock);
+
+		if (retry_state <= PERF_EVENT_STATE_ACTIVE)
+			retry = event->state == retry_state;
+	} while (retry);
+
+	/*
+	 * No luck: leaving with ctx::lock held and interrupts disabled
+	 * so that the caller can safely fiddle with event or context.
+	 */
+
+	return -ENXIO;
+}
+
 #define EVENT_OWNER_KERNEL ((void *) -1)
 
 static bool is_kernel_event(struct perf_event *event)
@@ -1673,7 +1726,6 @@ static int __perf_remove_from_context(void *info)
 static void perf_remove_from_context(struct perf_event *event, bool detach_group)
 {
 	struct perf_event_context *ctx = event->ctx;
-	struct task_struct *task = ctx->task;
 	struct remove_event re = {
 		.event = event,
 		.detach_group = detach_group,
@@ -1681,36 +1733,10 @@ static void perf_remove_from_context(struct perf_event *event, bool detach_group
 
 	lockdep_assert_held(&ctx->mutex);
 
-	if (!task) {
-		/*
-		 * Per cpu events are removed via an smp call. The removal can
-		 * fail if the CPU is currently offline, but in that case we
-		 * already called __perf_remove_from_context from
-		 * perf_event_exit_cpu.
-		 */
-		cpu_function_call(event->cpu, __perf_remove_from_context, &re);
-		return;
-	}
-
-retry:
-	if (!task_function_call(task, __perf_remove_from_context, &re))
+	if (!remote_call_or_ctx_lock(event, __perf_remove_from_context, &re,
+				     PERF_EVENT_STATE_NONE))
 		return;
 
-	raw_spin_lock_irq(&ctx->lock);
-	/*
-	 * If we failed to find a running task, but find the context active now
-	 * that we've acquired the ctx->lock, retry.
-	 */
-	if (ctx->is_active) {
-		raw_spin_unlock_irq(&ctx->lock);
-		/*
-		 * Reload the task pointer, it might have been changed by
-		 * a concurrent perf_event_context_sched_out().
-		 */
-		task = ctx->task;
-		goto retry;
-	}
-
 	/*
 	 * Since the task isn't running, its safe to remove the event, us
 	 * holding the ctx->lock ensures the task won't get scheduled in.
@@ -1778,34 +1804,10 @@ int __perf_event_disable(void *info)
 static void _perf_event_disable(struct perf_event *event)
 {
 	struct perf_event_context *ctx = event->ctx;
-	struct task_struct *task = ctx->task;
-
-	if (!task) {
-		/*
-		 * Disable the event on the cpu that it's on
-		 */
-		cpu_function_call(event->cpu, __perf_event_disable, event);
-		return;
-	}
 
-retry:
-	if (!task_function_call(task, __perf_event_disable, event))
+	if (!remote_call_or_ctx_lock(event, __perf_event_disable, event, PERF_EVENT_STATE_ACTIVE))
 		return;
 
-	raw_spin_lock_irq(&ctx->lock);
-	/*
-	 * If the event is still active, we need to retry the cross-call.
-	 */
-	if (event->state == PERF_EVENT_STATE_ACTIVE) {
-		raw_spin_unlock_irq(&ctx->lock);
-		/*
-		 * Reload the task pointer, it might have been changed by
-		 * a concurrent perf_event_context_sched_out().
-		 */
-		task = ctx->task;
-		goto retry;
-	}
-
 	/*
 	 * Since we have the lock this context can't be scheduled
 	 * in, so we can change the state safely.
@@ -2143,42 +2145,16 @@ perf_install_in_context(struct perf_event_context *ctx,
 			struct perf_event *event,
 			int cpu)
 {
-	struct task_struct *task = ctx->task;
-
 	lockdep_assert_held(&ctx->mutex);
 
 	event->ctx = ctx;
 	if (event->cpu != -1)
 		event->cpu = cpu;
 
-	if (!task) {
-		/*
-		 * Per cpu events are installed via an smp call and
-		 * the install is always successful.
-		 */
-		cpu_function_call(cpu, __perf_install_in_context, event);
-		return;
-	}
-
-retry:
-	if (!task_function_call(task, __perf_install_in_context, event))
+	if (!remote_call_or_ctx_lock(event, __perf_install_in_context, event,
+				     PERF_EVENT_STATE_NONE))
 		return;
 
-	raw_spin_lock_irq(&ctx->lock);
-	/*
-	 * If we failed to find a running task, but find the context active now
-	 * that we've acquired the ctx->lock, retry.
-	 */
-	if (ctx->is_active) {
-		raw_spin_unlock_irq(&ctx->lock);
-		/*
-		 * Reload the task pointer, it might have been changed by
-		 * a concurrent perf_event_context_sched_out().
-		 */
-		task = ctx->task;
-		goto retry;
-	}
-
 	/*
 	 * Since the task isn't running, its safe to add the event, us holding
 	 * the ctx->lock ensures the task won't get scheduled in.
@@ -2299,15 +2275,6 @@ unlock:
 static void _perf_event_enable(struct perf_event *event)
 {
 	struct perf_event_context *ctx = event->ctx;
-	struct task_struct *task = ctx->task;
-
-	if (!task) {
-		/*
-		 * Enable the event on the cpu that it's on
-		 */
-		cpu_function_call(event->cpu, __perf_event_enable, event);
-		return;
-	}
 
 	raw_spin_lock_irq(&ctx->lock);
 	if (event->state >= PERF_EVENT_STATE_INACTIVE)
@@ -2322,32 +2289,13 @@ static void _perf_event_enable(struct perf_event *event)
 	 */
 	if (event->state == PERF_EVENT_STATE_ERROR)
 		event->state = PERF_EVENT_STATE_OFF;
-
-retry:
-	if (!ctx->is_active) {
-		__perf_event_mark_enabled(event);
-		goto out;
-	}
-
 	raw_spin_unlock_irq(&ctx->lock);
 
-	if (!task_function_call(task, __perf_event_enable, event))
+	if (!remote_call_or_ctx_lock(event, __perf_event_enable, event, PERF_EVENT_STATE_OFF))
 		return;
 
-	raw_spin_lock_irq(&ctx->lock);
-
-	/*
-	 * If the context is active and the event is still off,
-	 * we need to retry the cross-call.
-	 */
-	if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) {
-		/*
-		 * task could have been flipped by a concurrent
-		 * perf_event_context_sched_out()
-		 */
-		task = ctx->task;
-		goto retry;
-	}
+	if (!ctx->is_active)
+		__perf_event_mark_enabled(event);
 
 out:
 	raw_spin_unlock_irq(&ctx->lock);
@@ -4209,22 +4157,10 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
 	task = ctx->task;
 	pe.value = value;
 
-	if (!task) {
-		cpu_function_call(event->cpu, __perf_event_period, &pe);
-		return 0;
-	}
-
-retry:
-	if (!task_function_call(task, __perf_event_period, &pe))
+	if (!remote_call_or_ctx_lock(event, __perf_event_period, &pe,
+				     PERF_EVENT_STATE_NONE))
 		return 0;
 
-	raw_spin_lock_irq(&ctx->lock);
-	if (ctx->is_active) {
-		raw_spin_unlock_irq(&ctx->lock);
-		task = ctx->task;
-		goto retry;
-	}
-
 	__perf_event_period(&pe);
 	raw_spin_unlock_irq(&ctx->lock);
 
-- 
2.6.2


  parent reply	other threads:[~2015-12-03 10:36 UTC|newest]

Thread overview: 28+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-12-03 10:32 [PATCH 0/7] perf: Untangle aux refcounting Alexander Shishkin
2015-12-03 10:32 ` [PATCH 1/7] perf: Refuse to begin aux transaction after aux_mmap_count drops Alexander Shishkin
2015-12-03 10:32 ` Alexander Shishkin [this message]
2015-12-03 17:34   ` [PATCH 2/7] perf: Generalize task_function_call()ers Peter Zijlstra
2015-12-08 16:42     ` Alexander Shishkin
2015-12-08 16:57       ` Peter Zijlstra
2015-12-17 13:40         ` Peter Zijlstra
2015-12-17 14:25           ` Alexander Shishkin
2015-12-17 15:07             ` Peter Zijlstra
2015-12-18  9:01               ` Peter Zijlstra
2015-12-18 15:07                 ` Alexander Shishkin
2015-12-18 16:47                   ` Peter Zijlstra
2015-12-18 17:41                     ` Alexander Shishkin
2015-12-21 14:39                 ` Alexander Shishkin
2016-01-11 10:44                 ` Alexander Shishkin
2015-12-03 10:32 ` [PATCH 3/7] perf: Add a helper to stop running events Alexander Shishkin
2015-12-03 10:32 ` [PATCH 4/7] perf: Free aux pages in unmap path Alexander Shishkin
2015-12-04 17:02   ` Peter Zijlstra
2015-12-04 22:17     ` Peter Zijlstra
2015-12-07 16:16       ` Peter Zijlstra
2015-12-09  9:57     ` Alexander Shishkin
2015-12-09 10:56       ` Peter Zijlstra
2015-12-10 11:20         ` Alexander Shishkin
2015-12-10 12:58           ` Alexander Shishkin
2015-12-03 10:32 ` [PATCH 5/7] perf: Document aux api usage Alexander Shishkin
2015-12-03 20:36   ` Mathieu Poirier
2015-12-03 10:32 ` [PATCH 6/7] perf/x86/intel/pt: Move transaction start/stop to pmu start/stop callbacks Alexander Shishkin
2015-12-03 10:32 ` [PATCH 7/7] perf/x86/intel/bts: Move transaction start/stop to " Alexander Shishkin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1449138762-15194-3-git-send-email-alexander.shishkin@linux.intel.com \
    --to=alexander.shishkin@linux.intel.com \
    --cc=a.p.zijlstra@chello.nl \
    --cc=acme@infradead.org \
    --cc=eranian@google.com \
    --cc=johannes@sipsolutions.net \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@redhat.com \
    --cc=vince@deater.net \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).