public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
From: Stephane Eranian <eranian@google.com>
To: linux-kernel@vger.kernel.org
Cc: peterz@infradead.org, mingo@elte.hu, ak@linux.intel.com,
	jolsa@redhat.com, zheng.z.yan@intel.com,
	maria.n.dimakopoulou@gmail.com
Subject: [PATCH 4/9] perf/x86: add cross-HT counter exclusion infrastructure
Date: Wed,  4 Jun 2014 23:34:13 +0200	[thread overview]
Message-ID: <1401917658-26065-5-git-send-email-eranian@google.com> (raw)
In-Reply-To: <1401917658-26065-1-git-send-email-eranian@google.com>

From: Maria Dimakopoulou <maria.n.dimakopoulou@gmail.com>

This patch adds a new shared_regs style structure to the
per-cpu x86 state (cpuc). It is used to coordinate access
between counters which must be used with exclusion across
HyperThreads on Intel processors. This new struct is not
needed on each PMU, thus is is allocated on demand.

Reviewed-by: Stephane Eranian <eranian@google.com>
Signed-off-by: Maria Dimakopoulou <maria.n.dimakopoulou@gmail.com>
---
 arch/x86/kernel/cpu/perf_event.h       |   40 ++++++++++++++++++--
 arch/x86/kernel/cpu/perf_event_intel.c |   63 +++++++++++++++++++++++++++++---
 2 files changed, 94 insertions(+), 9 deletions(-)

diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index 413799f..5da0a2b 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -65,10 +65,11 @@ struct event_constraint {
 /*
  * struct hw_perf_event.flags flags
  */
-#define PERF_X86_EVENT_PEBS_LDLAT	0x1 /* ld+ldlat data address sampling */
-#define PERF_X86_EVENT_PEBS_ST		0x2 /* st data address sampling */
-#define PERF_X86_EVENT_PEBS_ST_HSW	0x4 /* haswell style st data sampling */
-#define PERF_X86_EVENT_COMMITTED	0x8 /* event passed commit_txn */
+#define PERF_X86_EVENT_PEBS_LDLAT	0x01 /* ld+ldlat data address sampling */
+#define PERF_X86_EVENT_PEBS_ST		0x02 /* st data address sampling */
+#define PERF_X86_EVENT_PEBS_ST_HSW	0x04 /* haswell style st data sampling */
+#define PERF_X86_EVENT_COMMITTED	0x08 /* event passed commit_txn */
+#define PERF_X86_EVENT_EXCL		0x10 /* HT exclusivity on counter */
 
 struct amd_nb {
 	int nb_id;  /* NorthBridge id */
@@ -119,6 +120,27 @@ struct intel_shared_regs {
 	unsigned                core_id;	/* per-core: core id */
 };
 
+enum intel_excl_state_type {
+	INTEL_EXCL_UNUSED    = 0, /* counter is unused */
+	INTEL_EXCL_SHARED    = 1, /* counter can be used by both threads */
+	INTEL_EXCL_EXCLUSIVE = 2, /* counter can be used by one thread only */
+};
+
+struct intel_excl_states {
+	enum intel_excl_state_type init_state[X86_PMC_IDX_MAX];
+	enum intel_excl_state_type state[X86_PMC_IDX_MAX];
+};
+
+struct intel_excl_cntrs {
+	spinlock_t	lock;
+	unsigned long	lock_flags;
+
+	struct intel_excl_states states[2];
+
+	int		refcnt;		/* per-core: #HT threads */
+	unsigned	core_id;	/* per-core: core id */
+};
+
 #define MAX_LBR_ENTRIES		16
 
 enum {
@@ -181,6 +203,11 @@ struct cpu_hw_events {
 	 * used on Intel NHM/WSM/SNB
 	 */
 	struct intel_shared_regs	*shared_regs;
+	/*
+	 * manage exclusive counter access between hyperthread
+	 */
+	struct intel_excl_cntrs		*excl_cntrs;
+	int excl_thread_id; /* 0 or 1 */
 
 	/*
 	 * AMD specific bits
@@ -204,6 +231,10 @@ struct cpu_hw_events {
 #define EVENT_CONSTRAINT(c, n, m)	\
 	__EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0, 0)
 
+#define INTEL_EXCLEVT_CONSTRAINT(c, n)	\
+	__EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT, HWEIGHT(n),\
+			   0, PERF_X86_EVENT_EXCL)
+
 /*
  * The overlap flag marks event constraints with overlapping counter
  * masks. This is the case if the counter mask of such an event is not
@@ -499,6 +530,7 @@ do {									\
  */
 #define PMU_FL_NO_HT_SHARING	0x1 /* no hyper-threading resource sharing */
 #define PMU_FL_HAS_RSP_1	0x2 /* has 2 equivalent offcore_rsp regs   */
+#define PMU_FL_EXCL_CNTRS	0x4 /* has exclusive counter requirements  */
 
 #define EVENT_VAR(_id)  event_attr_##_id
 #define EVENT_PTR(_id) &event_attr_##_id.attr.attr
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index e913e46..380fce2 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1970,16 +1970,46 @@ struct intel_shared_regs *allocate_shared_regs(int cpu)
 	return regs;
 }
 
+struct intel_excl_cntrs *allocate_excl_cntrs(int cpu)
+{
+	struct intel_excl_cntrs *c;
+	int i;
+
+	c = kzalloc_node(sizeof(struct intel_excl_cntrs),
+			 GFP_KERNEL, cpu_to_node(cpu));
+	if (c) {
+		spin_lock_init(&c->lock);
+		for (i = 0; i < X86_PMC_IDX_MAX; i++) {
+			c->states[0].state[i] = INTEL_EXCL_UNUSED;
+			c->states[0].init_state[i] = INTEL_EXCL_UNUSED;
+
+			c->states[1].state[i] = INTEL_EXCL_UNUSED;
+			c->states[1].init_state[i] = INTEL_EXCL_UNUSED;
+		}
+		c->core_id = -1;
+	}
+	return c;
+}
+
 static int intel_pmu_cpu_prepare(int cpu)
 {
 	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
 
-	if (!(x86_pmu.extra_regs || x86_pmu.lbr_sel_map))
-		return NOTIFY_OK;
+	if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
+		cpuc->shared_regs = allocate_shared_regs(cpu);
+		if (!cpuc->shared_regs)
+			return NOTIFY_BAD;
+	}
 
-	cpuc->shared_regs = allocate_shared_regs(cpu);
-	if (!cpuc->shared_regs)
-		return NOTIFY_BAD;
+	if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
+		cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
+		if (!cpuc->excl_cntrs) {
+			if (cpuc->shared_regs)
+				kfree(cpuc->shared_regs);
+			return NOTIFY_BAD;
+		}
+		cpuc->excl_thread_id = 0;
+	}
 
 	return NOTIFY_OK;
 }
@@ -2020,12 +2050,29 @@ static void intel_pmu_cpu_starting(int cpu)
 
 	if (x86_pmu.lbr_sel_map)
 		cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR];
+
+	if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
+		for_each_cpu(i, topology_thread_cpumask(cpu)) {
+			struct intel_excl_cntrs *c;
+
+			c = per_cpu(cpu_hw_events, i).excl_cntrs;
+			if (c && c->core_id == core_id) {
+				cpuc->kfree_on_online[1] = cpuc->excl_cntrs;
+				cpuc->excl_cntrs = c;
+				cpuc->excl_thread_id = 1;
+				break;
+			}
+		}
+		cpuc->excl_cntrs->core_id = core_id;
+		cpuc->excl_cntrs->refcnt++;
+	}
 }
 
 static void intel_pmu_cpu_dying(int cpu)
 {
 	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
 	struct intel_shared_regs *pc;
+	struct intel_excl_cntrs *c;
 
 	pc = cpuc->shared_regs;
 	if (pc) {
@@ -2033,6 +2080,12 @@ static void intel_pmu_cpu_dying(int cpu)
 			kfree(pc);
 		cpuc->shared_regs = NULL;
 	}
+	c = cpuc->excl_cntrs;
+	if (c) {
+		if (c->core_id == -1 || --c->refcnt == 0)
+			kfree(c);
+		cpuc->excl_cntrs = NULL;
+	}
 
 	fini_debug_store_on_cpu(cpu);
 }
-- 
1.7.9.5


  parent reply	other threads:[~2014-06-04 21:44 UTC|newest]

Thread overview: 59+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2014-06-04 21:34 [PATCH 0/9] perf/x86: implement HT counter corruption workaround Stephane Eranian
2014-06-04 21:34 ` [PATCH 1/9] perf,x86: rename er_flags to flags Stephane Eranian
2014-06-04 21:34 ` [PATCH 2/9] pref/x86: vectorize cpuc->kfree_on_online Stephane Eranian
2014-06-04 21:34 ` [PATCH 3/9] perf/x86: add 3 new scheduling callbacks Stephane Eranian
2014-06-04 21:34 ` Stephane Eranian [this message]
2014-06-05  7:47   ` [PATCH 4/9] perf/x86: add cross-HT counter exclusion infrastructure Peter Zijlstra
2014-06-05 10:51     ` Stephane Eranian
2014-06-05  8:04   ` Peter Zijlstra
2014-06-05 13:36     ` Maria Dimakopoulou
2014-06-05  8:29   ` Peter Zijlstra
2014-06-05 21:33     ` Andi Kleen
2014-06-05 21:38       ` Stephane Eranian
2014-06-10 11:53     ` Stephane Eranian
2014-06-10 12:26       ` Peter Zijlstra
2014-06-04 21:34 ` [PATCH 5/9] perf/x86: implement cross-HT corruption bug workaround Stephane Eranian
2014-06-05 13:38   ` Peter Zijlstra
2014-06-05 13:42   ` Peter Zijlstra
2014-06-05 13:48   ` Peter Zijlstra
2014-06-05 14:01     ` Maria Dimakopoulou
2014-06-05 14:04       ` Borislav Petkov
2014-06-05 14:11       ` Peter Zijlstra
2014-06-05 14:14         ` Peter Zijlstra
2014-06-05 14:24           ` Maria Dimakopoulou
2014-06-05 14:04   ` Peter Zijlstra
2014-06-05 14:15     ` Stephane Eranian
2014-06-05 14:21       ` Peter Zijlstra
2014-06-05 14:26         ` Stephane Eranian
2014-06-05 14:31           ` Peter Zijlstra
2014-06-05 15:31         ` Stephane Eranian
2014-06-04 21:34 ` [PATCH 6/9] perf/x86: enforce HT bug workaround for SNB/IVB/HSW Stephane Eranian
2014-06-04 21:34 ` [PATCH 7/9] perf/x86: enforce HT bug workaround with PEBS " Stephane Eranian
2014-06-04 21:34 ` [PATCH 8/9] perf/x86: fix intel_get_event_constraints() for dynamic constraints Stephane Eranian
2014-06-04 21:34 ` [PATCH 9/9] perf/x86: add syfs entry to disable HT bug workaround Stephane Eranian
2014-06-05  8:32   ` Matt Fleming
2014-06-05  9:29     ` Stephane Eranian
2014-06-05 10:01       ` Matt Fleming
2014-06-05 10:19         ` Stephane Eranian
2014-06-05 11:16           ` Matt Fleming
2014-06-05 12:02             ` Stephane Eranian
2014-06-05 13:27               ` Borislav Petkov
2014-06-05 13:42                 ` Stephane Eranian
2014-06-05 14:03                   ` Borislav Petkov
2014-06-05 14:45                     ` Maria Dimakopoulou
2014-06-05 15:17                       ` Borislav Petkov
2014-06-05 16:39                         ` Maria Dimakopoulou
2014-06-05 16:47                           ` Stephane Eranian
2014-06-05 16:52                           ` Borislav Petkov
2014-06-05 18:00                             ` Maria Dimakopoulou
2014-06-05 23:29                               ` Andi Kleen
2014-06-06  8:28                                 ` Matt Fleming
2014-06-05 12:50             ` Peter Zijlstra
2014-06-05 12:55               ` Stephane Eranian
2014-06-05 12:59                 ` Peter Zijlstra
2014-06-05 13:16                   ` Stephane Eranian
2014-06-05 13:26                     ` Peter Zijlstra
2014-06-05 13:19       ` Peter Zijlstra
2014-06-05 13:26         ` Stephane Eranian
2014-06-04 22:28 ` [PATCH 0/9] perf/x86: implement HT counter corruption workaround Andi Kleen
2014-06-05 12:45   ` Stephane Eranian

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1401917658-26065-5-git-send-email-eranian@google.com \
    --to=eranian@google.com \
    --cc=ak@linux.intel.com \
    --cc=jolsa@redhat.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=maria.n.dimakopoulou@gmail.com \
    --cc=mingo@elte.hu \
    --cc=peterz@infradead.org \
    --cc=zheng.z.yan@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox