public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH] perf: Fix intel shared extra msr allocation
@ 2012-06-01  3:20 Yan, Zheng
  2012-06-01  9:35 ` Stephane Eranian
  0 siblings, 1 reply; 35+ messages in thread
From: Yan, Zheng @ 2012-06-01  3:20 UTC (permalink / raw)
  To: a.p.zijlstra, eranian, linux-kernel

From: "Yan, Zheng" <zheng.z.yan@intel.com>

intel_shared_reg_get/put_constraints() can be indirectly called
by validate_group(). In that case, they should avoid modifying
the perf_event date structure because the event can be already
in active state. Otherwise the shared extra msr's reference
count will be left in inconsistent state.

Signed-off-by: Zheng Yan <zheng.z.yan@intel.com>
---
 arch/x86/kernel/cpu/perf_event_intel.c |   31 +++++++++++++++++++++++--------
 1 files changed, 23 insertions(+), 8 deletions(-)

diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 166546e..10840d0 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1119,11 +1119,21 @@ intel_bts_constraints(struct perf_event *event)
 	return NULL;
 }
 
-static bool intel_try_alt_er(struct perf_event *event, int orig_idx)
+static bool intel_try_alt_er(struct perf_event *event, int *idx,
+			     int orig_idx, bool fake_cpuc)
 {
-	if (!(x86_pmu.er_flags & ERF_HAS_RSP_1))
+	if (!(x86_pmu.er_flags & ERF_HAS_RSP_1) || *idx != orig_idx)
 		return false;
 
+	/* don't modify the event structure if the cpuc is faked */
+	if (fake_cpuc) {
+		if (*idx == EXTRA_REG_RSP_0)
+			*idx = EXTRA_REG_RSP_1;
+		else if (*idx == EXTRA_REG_RSP_1)
+			*idx = EXTRA_REG_RSP_0;
+		return (*idx != orig_idx);
+	}
+
 	if (event->hw.extra_reg.idx == EXTRA_REG_RSP_0) {
 		event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
 		event->hw.config |= 0x01bb;
@@ -1139,6 +1149,7 @@ static bool intel_try_alt_er(struct perf_event *event, int orig_idx)
 	if (event->hw.extra_reg.idx == orig_idx)
 		return false;
 
+	*idx = event->hw.extra_reg.idx;
 	return true;
 }
 
@@ -1155,16 +1166,18 @@ __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc,
 				   struct hw_perf_event_extra *reg)
 {
 	struct event_constraint *c = &emptyconstraint;
+	struct intel_shared_regs *shared_regs = cpuc->shared_regs;
 	struct er_account *era;
 	unsigned long flags;
 	int orig_idx = reg->idx;
+	int idx = orig_idx;
 
-	/* already allocated shared msr */
-	if (reg->alloc)
+	/* shared msr is already allocated and cpuc is not faked */
+	if (reg->alloc && shared_regs->core_id != -1)
 		return NULL; /* call x86_get_event_constraint() */
 
 again:
-	era = &cpuc->shared_regs->regs[reg->idx];
+	era = &shared_regs->regs[idx];
 	/*
 	 * we use spin_lock_irqsave() to avoid lockdep issues when
 	 * passing a fake cpuc
@@ -1181,14 +1194,16 @@ __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc,
 		atomic_inc(&era->ref);
 
 		/* no need to reallocate during incremental event scheduling */
-		reg->alloc = 1;
+		if (shared_regs->core_id != -1)
+			reg->alloc = 1;
 
 		/*
 		 * need to call x86_get_event_constraint()
 		 * to check if associated event has constraints
 		 */
 		c = NULL;
-	} else if (intel_try_alt_er(event, orig_idx)) {
+	} else if (intel_try_alt_er(event, &idx, orig_idx,
+				    shared_regs->core_id == -1)) {
 		raw_spin_unlock_irqrestore(&era->lock, flags);
 		goto again;
 	}
@@ -1208,7 +1223,7 @@ __intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc,
 	 * allocated. Also takes care of event which do
 	 * not use an extra shared reg
 	 */
-	if (!reg->alloc)
+	if (!reg->alloc || cpuc->shared_regs->core_id == -1)
 		return;
 
 	era = &cpuc->shared_regs->regs[reg->idx];
-- 
1.7.7.6


^ permalink raw reply related	[flat|nested] 35+ messages in thread
* Re: [PATCH] perf, x86: Fix Intel shared extra MSR allocation
@ 2012-06-05 21:35 Stephane Eranian
  2012-06-06 10:35 ` Stephane Eranian
  0 siblings, 1 reply; 35+ messages in thread
From: Stephane Eranian @ 2012-06-05 21:35 UTC (permalink / raw)
  To: linux-kernel; +Cc: peterz, zheng.z.yan


Zheng Yan reported that event group validation can wreck event state
when Intel extra_reg allocation changes event state.

Validation shouldn't change any persistent state. Cloning events in
validate_{event,group}() isn't really pretty either, so add a few
special cases to avoid modifying the event state.

The code is restructured to minimize the special case impact.

Reported-by: Zheng Yan <zheng.z.yan@linux.intel.com>
Cc: Stephane Eranian <eranian@google.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: Stephane Eranian <eranian@google.com>
---

diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index e049d6d..cb60838 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1496,6 +1496,7 @@ static struct cpu_hw_events *allocate_fake_cpuc(void)
 		if (!cpuc->shared_regs)
 			goto error;
 	}
+	cpuc->is_fake = 1;
 	return cpuc;
 error:
 	free_fake_cpuc(cpuc);
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index 6638aaf..83794d8 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -117,6 +117,7 @@ struct cpu_hw_events {
 	struct perf_event	*event_list[X86_PMC_IDX_MAX]; /* in enabled order */
 
 	unsigned int		group_flag;
+	int			is_fake;
 
 	/*
 	 * Intel DebugStore bits
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 166546e..76a2bd2 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1119,27 +1119,33 @@ intel_bts_constraints(struct perf_event *event)
 	return NULL;
 }
 
-static bool intel_try_alt_er(struct perf_event *event, int orig_idx)
+static int intel_alt_er(int idx)
 {
 	if (!(x86_pmu.er_flags & ERF_HAS_RSP_1))
-		return false;
+		return idx;
 
-	if (event->hw.extra_reg.idx == EXTRA_REG_RSP_0) {
+	if (idx == EXTRA_REG_RSP_0)
+		return EXTRA_REG_RSP_1;
+
+	if (idx == EXTRA_REG_RSP_1)
+		return EXTRA_REG_RSP_0;
+
+	return idx;
+}
+
+static void intel_fixup_er(struct perf_event *event, int idx)
+{
+	if (idx == EXTRA_REG_RSP_0) {
 		event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
 		event->hw.config |= 0x01bb;
 		event->hw.extra_reg.idx = EXTRA_REG_RSP_1;
 		event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
-	} else if (event->hw.extra_reg.idx == EXTRA_REG_RSP_1) {
+	} else if (idx == EXTRA_REG_RSP_1) {
 		event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
 		event->hw.config |= 0x01b7;
 		event->hw.extra_reg.idx = EXTRA_REG_RSP_0;
 		event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0;
 	}
-
-	if (event->hw.extra_reg.idx == orig_idx)
-		return false;
-
-	return true;
 }
 
 /*
@@ -1157,14 +1163,19 @@ __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc,
 	struct event_constraint *c = &emptyconstraint;
 	struct er_account *era;
 	unsigned long flags;
-	int orig_idx = reg->idx;
+	int idx = reg->idx;
 
-	/* already allocated shared msr */
-	if (reg->alloc)
-		return NULL; /* call x86_get_event_constraint() */
+	/*
+	 * reg->alloc can be set due to existing state, so for fake cpuc
+	 * we need to ignore this, otherwise we might fail to allocate
+	 * proper fake state for this extra reg constraint. Also see
+	 * the comment below.
+	*/
+	if (reg->alloc && !cpuc->is_fake)
+		return NULL; /* call x86_get_event_constraints() */
 
 again:
-	era = &cpuc->shared_regs->regs[reg->idx];
+	era = &cpuc->shared_regs->regs[idx];
 	/*
 	 * we use spin_lock_irqsave() to avoid lockdep issues when
 	 * passing a fake cpuc
@@ -1172,6 +1183,27 @@ again:
 	raw_spin_lock_irqsave(&era->lock, flags);
 
 	if (!atomic_read(&era->ref) || era->config == reg->config) {
+		/*
+		 * If its a fake cpuc -- as per validate_{group,event}() we
+		 * shouldn't touch event state and we can avoid doing so
+		 * since both will only call get_event_constraints() once
+		 * on each event, this avoids the need for reg->alloc.
+		 *
+		 * Not doing the ER fixup will only result in era->reg being
+		 * wrong, but since we won't actually try and program hardware
+		 * this isn't a problem either.
+		 */
+		if (!cpuc->is_fake) {
+			if (idx != reg->idx)
+				intel_fixup_er(event, idx);
+			/*
+			 * x86_schedule_events() calls get_event_constraints()
+			 * multiple times on events in the case of incremental
+			 * scheduling(). reg->alloc ensures we only do the ER
+			 * allocation once.
+			 */
+			reg->alloc = 1;
+		}
 
 		/* lock in msr value */
 		era->config = reg->config;
@@ -1180,18 +1212,19 @@ again:
 		/* one more user */
 		atomic_inc(&era->ref);
 
-		/* no need to reallocate during incremental event scheduling */
-		reg->alloc = 1;
-
 		/*
 		 * need to call x86_get_event_constraint()
 		 * to check if associated event has constraints
 		 */
 		c = NULL;
-	} else if (intel_try_alt_er(event, orig_idx)) {
-		raw_spin_unlock_irqrestore(&era->lock, flags);
-		goto again;
+	} else {
+		idx = intel_alt_er(idx);
+		if (idx != reg->idx) {
+			raw_spin_unlock_irqrestore(&era->lock, flags);
+			goto again;
+		}
 	}
+
 	raw_spin_unlock_irqrestore(&era->lock, flags);
 
 	return c;
@@ -1204,11 +1237,14 @@ __intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc,
 	struct er_account *era;
 
 	/*
-	 * only put constraint if extra reg was actually
-	 * allocated. Also takes care of event which do
-	 * not use an extra shared reg
+	 * Only put constraint if extra reg was actually allocated. Also takes
+	 * care of event which do not use an extra shared reg.
+	 *
+	 * Also, if this is a fake cpuc we shouldn't touch any event state
+	 * (reg->alloc) and we don't care about leaving inconsistent cpuc state
+	 * either since it'll be thrown out.
 	 */
-	if (!reg->alloc)
+	if (!reg->alloc || cpuc->is_fake)
 		return;
 
 	era = &cpuc->shared_regs->regs[reg->idx];

^ permalink raw reply related	[flat|nested] 35+ messages in thread

end of thread, other threads:[~2012-06-07  4:01 UTC | newest]

Thread overview: 35+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2012-06-01  3:20 [PATCH] perf: Fix intel shared extra msr allocation Yan, Zheng
2012-06-01  9:35 ` Stephane Eranian
2012-06-01 14:11   ` Yan, Zheng
2012-06-04 13:12     ` Stephane Eranian
2012-06-05  2:18       ` Yan, Zheng
2012-06-05 10:14     ` Peter Zijlstra
2012-06-05 10:21       ` Stephane Eranian
2012-06-05 10:27         ` Peter Zijlstra
2012-06-05 10:38           ` Stephane Eranian
2012-06-05 12:07             ` Peter Zijlstra
2012-06-05 12:39               ` Peter Zijlstra
2012-06-05 12:51                 ` Stephane Eranian
2012-06-05 13:04                   ` Peter Zijlstra
2012-06-05 13:30                     ` [PATCH] perf, x86: Fix Intel shared extra MSR allocation Peter Zijlstra
2012-06-05 13:56                       ` Peter Zijlstra
2012-06-05 21:26                         ` Stephane Eranian
2012-06-06  1:00                         ` Yan, Zheng
2012-06-06 15:57                       ` [tip:perf/core] perf/x86: " tip-bot for Peter Zijlstra
2012-06-06 16:11                       ` tip-bot for Peter Zijlstra
2012-06-05 13:31                     ` [PATCH] perf: Fix intel shared extra msr allocation Stephane Eranian
2012-06-05 13:32                       ` Peter Zijlstra
2012-06-05 13:38                         ` Stephane Eranian
2012-06-05 13:47                           ` Peter Zijlstra
2012-06-05 13:51                             ` Stephane Eranian
2012-06-06 10:12                               ` Stephane Eranian
2012-06-07  1:25                                 ` Yan, Zheng
2012-06-07  4:01                                 ` Yan, Zheng
  -- strict thread matches above, loose matches on Subject: below --
2012-06-05 21:35 [PATCH] perf, x86: Fix Intel shared extra MSR allocation Stephane Eranian
2012-06-06 10:35 ` Stephane Eranian
2012-06-06 10:36   ` Peter Zijlstra
2012-06-06 10:53     ` Peter Zijlstra
2012-06-06 11:43       ` Stephane Eranian
2012-06-06 11:57       ` Stephane Eranian
2012-06-06 12:06         ` Peter Zijlstra
2012-06-06 12:08           ` Stephane Eranian

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox