From: Andi Kleen <andi@firstfloor.org>
To: linux-kernel@vger.kernel.org
Cc: acme@redhat.com, peterz@infradead.org, jolsa@redhat.com,
eranian@google.com, mingo@kernel.org,
Andi Kleen <ak@linux.intel.com>
Subject: [PATCH 01/33] perf, x86: Add PEBSv2 record support
Date: Fri, 26 Oct 2012 13:29:43 -0700 [thread overview]
Message-ID: <1351283415-13170-2-git-send-email-andi@firstfloor.org> (raw)
In-Reply-To: <1351283415-13170-1-git-send-email-andi@firstfloor.org>
From: Andi Kleen <ak@linux.intel.com>
Add support for the v2 PEBS format. It has a superset of the v1 PEBS
fields, but has a longer record so we need to adjust the code paths.
The main advantage is the new "EventingRip" support which directly
gives the instruction, not off-by-one instruction. So with precise == 2
we use that directly and don't try to use LBRs and walking basic blocks.
This lowers the overhead significantly.
Some other features are added in later patches.
Signed-off-by: Andi Kleen <ak@linux.intel.com>
---
arch/x86/kernel/cpu/perf_event.c | 2 +-
arch/x86/kernel/cpu/perf_event_intel_ds.c | 101 ++++++++++++++++++++++-------
2 files changed, 79 insertions(+), 24 deletions(-)
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 3373f84..81b5e65 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -401,7 +401,7 @@ int x86_pmu_hw_config(struct perf_event *event)
* check that PEBS LBR correction does not conflict with
* whatever the user is asking with attr->branch_sample_type
*/
- if (event->attr.precise_ip > 1) {
+ if (event->attr.precise_ip > 1 && x86_pmu.intel_cap.pebs_format < 2) {
u64 *br_type = &event->attr.branch_sample_type;
if (has_branch_stack(event)) {
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 826054a..9d0dae0 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -41,6 +41,12 @@ struct pebs_record_nhm {
u64 status, dla, dse, lat;
};
+struct pebs_record_v2 {
+ struct pebs_record_nhm nhm;
+ u64 eventingrip;
+ u64 tsx_tuning;
+};
+
void init_debug_store_on_cpu(int cpu)
{
struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
@@ -559,8 +565,7 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
{
/*
* We cast to pebs_record_core since that is a subset of
- * both formats and we don't use the other fields in this
- * routine.
+ * both formats.
*/
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct pebs_record_core *pebs = __pebs;
@@ -588,7 +593,10 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
regs.bp = pebs->bp;
regs.sp = pebs->sp;
- if (event->attr.precise_ip > 1 && intel_pmu_pebs_fixup_ip(®s))
+ if (event->attr.precise_ip > 1 && x86_pmu.intel_cap.pebs_format >= 2) {
+ regs.ip = ((struct pebs_record_v2 *)pebs)->eventingrip;
+ regs.flags |= PERF_EFLAGS_EXACT;
+ } else if (event->attr.precise_ip > 1 && intel_pmu_pebs_fixup_ip(®s))
regs.flags |= PERF_EFLAGS_EXACT;
else
regs.flags &= ~PERF_EFLAGS_EXACT;
@@ -641,35 +649,21 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
__intel_pmu_pebs_event(event, iregs, at);
}
-static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
+static void intel_pmu_drain_pebs_common(struct pt_regs *iregs, void *at,
+ void *top)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct debug_store *ds = cpuc->ds;
- struct pebs_record_nhm *at, *top;
struct perf_event *event = NULL;
u64 status = 0;
- int bit, n;
-
- if (!x86_pmu.pebs_active)
- return;
-
- at = (struct pebs_record_nhm *)(unsigned long)ds->pebs_buffer_base;
- top = (struct pebs_record_nhm *)(unsigned long)ds->pebs_index;
+ int bit;
ds->pebs_index = ds->pebs_buffer_base;
- n = top - at;
- if (n <= 0)
- return;
+ for ( ; at < top; at += x86_pmu.pebs_record_size) {
+ struct pebs_record_nhm *p = at;
- /*
- * Should not happen, we program the threshold at 1 and do not
- * set a reset value.
- */
- WARN_ONCE(n > x86_pmu.max_pebs_events, "Unexpected number of pebs records %d\n", n);
-
- for ( ; at < top; at++) {
- for_each_set_bit(bit, (unsigned long *)&at->status, x86_pmu.max_pebs_events) {
+ for_each_set_bit(bit, (unsigned long *)&p->status, x86_pmu.max_pebs_events) {
event = cpuc->events[bit];
if (!test_bit(bit, cpuc->active_mask))
continue;
@@ -692,6 +686,61 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
}
}
+static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct debug_store *ds = cpuc->ds;
+ struct pebs_record_nhm *at, *top;
+ int n;
+
+ if (!x86_pmu.pebs_active)
+ return;
+
+ at = (struct pebs_record_nhm *)(unsigned long)ds->pebs_buffer_base;
+ top = (struct pebs_record_nhm *)(unsigned long)ds->pebs_index;
+
+ ds->pebs_index = ds->pebs_buffer_base;
+
+ n = top - at;
+ if (n <= 0)
+ return;
+
+ /*
+ * Should not happen, we program the threshold at 1 and do not
+ * set a reset value.
+ */
+ WARN_ONCE(n > x86_pmu.max_pebs_events,
+ "Unexpected number of pebs records %d\n", n);
+
+ return intel_pmu_drain_pebs_common(iregs, at, top);
+}
+
+static void intel_pmu_drain_pebs_v2(struct pt_regs *iregs)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct debug_store *ds = cpuc->ds;
+ struct pebs_record_v2 *at, *top;
+ int n;
+
+ if (!x86_pmu.pebs_active)
+ return;
+
+ at = (struct pebs_record_v2 *)(unsigned long)ds->pebs_buffer_base;
+ top = (struct pebs_record_v2 *)(unsigned long)ds->pebs_index;
+
+ n = top - at;
+ if (n <= 0)
+ return;
+ /*
+ * Should not happen, we program the threshold at 1 and do not
+ * set a reset value.
+ */
+ WARN_ONCE(n > x86_pmu.max_pebs_events,
+ "Unexpected number of pebs records %d\n", n);
+
+ return intel_pmu_drain_pebs_common(iregs, at, top);
+}
+
/*
* BTS, PEBS probe and setup
*/
@@ -723,6 +772,12 @@ void intel_ds_init(void)
x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
break;
+ case 2:
+ printk(KERN_CONT "PEBS fmt2%c, ", pebs_type);
+ x86_pmu.pebs_record_size = sizeof(struct pebs_record_v2);
+ x86_pmu.drain_pebs = intel_pmu_drain_pebs_v2;
+ break;
+
default:
printk(KERN_CONT "no PEBS fmt%d%c, ", format, pebs_type);
x86_pmu.pebs = 0;
--
1.7.7.6
next prev parent reply other threads:[~2012-10-26 20:30 UTC|newest]
Thread overview: 55+ messages / expand[flat|nested] mbox.gz Atom feed top
2012-10-26 20:29 perf PMU support for Haswell v4 Andi Kleen
2012-10-26 20:29 ` Andi Kleen [this message]
2012-10-29 10:08 ` [PATCH 01/33] perf, x86: Add PEBSv2 record support Namhyung Kim
2012-10-29 10:13 ` Andi Kleen
2012-10-29 10:23 ` Peter Zijlstra
2012-10-26 20:29 ` [PATCH 02/33] perf, x86: Basic Haswell PMU support v2 Andi Kleen
2012-10-26 20:29 ` [PATCH 03/33] perf, x86: Basic Haswell PEBS support v3 Andi Kleen
2012-10-26 20:29 ` [PATCH 04/33] perf, x86: Support the TSX intx/intx_cp qualifiers v2 Andi Kleen
2012-10-26 20:29 ` [PATCH 05/33] perf, kvm: Support the intx/intx_cp modifiers in KVM arch perfmon emulation v3 Andi Kleen
2012-10-30 9:25 ` Gleb Natapov
2012-10-26 20:29 ` [PATCH 06/33] perf, x86: Support PERF_SAMPLE_ADDR on Haswell Andi Kleen
2012-10-26 20:29 ` [PATCH 07/33] perf, x86: Support Haswell v4 LBR format Andi Kleen
2012-10-26 20:29 ` [PATCH 08/33] perf, x86: Disable LBR recording for unknown LBR_FMT Andi Kleen
2012-10-26 20:29 ` [PATCH 09/33] perf, x86: Support LBR filtering by INTX/NOTX/ABORT v2 Andi Kleen
2012-10-26 20:29 ` [PATCH 10/33] perf, tools: Add abort,notx,intx branch filter options to perf report -j v2 Andi Kleen
2012-10-29 10:19 ` Namhyung Kim
2012-10-26 20:29 ` [PATCH 11/33] perf, tools: Support sorting by intx, abort branch flags Andi Kleen
2012-10-26 20:29 ` [PATCH 12/33] perf, x86: Support full width counting Andi Kleen
2012-10-26 20:29 ` [PATCH 13/33] perf, x86: Avoid checkpointed counters causing excessive TSX aborts v3 Andi Kleen
2012-10-26 20:29 ` [PATCH 14/33] perf, core: Add a concept of a weightened sample Andi Kleen
2012-10-26 20:29 ` [PATCH 15/33] perf, x86: Support weight samples for PEBS Andi Kleen
2012-10-26 20:29 ` [PATCH 16/33] perf, tools: Add support for weight v2 Andi Kleen
2012-10-29 10:44 ` Namhyung Kim
2012-10-29 11:02 ` Andi Kleen
2012-10-26 20:29 ` [PATCH 17/33] perf, tools: Handle XBEGIN like a jump Andi Kleen
2012-10-26 20:30 ` [PATCH 18/33] perf, x86: Support for printing PMU state on spurious PMIs v3 Andi Kleen
2012-10-26 20:30 ` [PATCH 19/33] perf, core: Add generic transaction flags Andi Kleen
2012-10-26 20:30 ` [PATCH 20/33] perf, x86: Add Haswell specific transaction flag reporting Andi Kleen
2012-10-26 20:30 ` [PATCH 21/33] perf, tools: Add support for record transaction flags Andi Kleen
2012-10-29 10:49 ` Namhyung Kim
2012-10-26 20:30 ` [PATCH 22/33] perf, tools: Point --sort documentation to --help Andi Kleen
2012-10-26 20:30 ` [PATCH 23/33] perf, tools: Add browser support for transaction flags Andi Kleen
2012-10-26 20:30 ` [PATCH 24/33] perf, tools: Move parse_events error printing to parse_events_options Andi Kleen
2012-10-27 19:08 ` Jiri Olsa
2012-10-30 11:58 ` [tip:perf/core] perf " tip-bot for Andi Kleen
2012-10-26 20:30 ` [PATCH 25/33] perf, tools: Support events with - in the name Andi Kleen
2012-10-27 19:32 ` Jiri Olsa
2012-10-26 20:30 ` [PATCH 26/33] perf, x86: Report the arch perfmon events in sysfs Andi Kleen
2012-10-26 20:30 ` [PATCH 27/33] tools, perf: Add a precise event qualifier Andi Kleen
2012-10-27 19:35 ` Jiri Olsa
2012-10-28 19:13 ` Andi Kleen
2012-10-28 19:24 ` Jiri Olsa
2012-10-28 20:06 ` Andi Kleen
2012-10-26 20:30 ` [PATCH 28/33] perf, x86: Add Haswell TSX event aliases Andi Kleen
2012-10-26 20:30 ` [PATCH 29/33] perf, tools: Add perf stat --transaction v2 Andi Kleen
2012-10-26 20:30 ` [PATCH 30/33] perf, x86: Add a Haswell precise instructions event Andi Kleen
2012-10-26 20:30 ` [PATCH 31/33] perf, tools: Support generic events as pmu event names v2 Andi Kleen
2012-10-27 19:42 ` Jiri Olsa
2012-10-28 19:12 ` Andi Kleen
2012-10-29 9:23 ` Peter Zijlstra
2012-10-26 20:30 ` [PATCH 32/33] perf, tools: Default to cpu// for events v2 Andi Kleen
2012-10-27 20:16 ` Jiri Olsa
2012-10-26 20:30 ` [PATCH 33/33] perf, tools: List kernel supplied event aliases in perf list v2 Andi Kleen
2012-10-27 20:20 ` Jiri Olsa
2012-10-28 19:05 ` Andi Kleen
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1351283415-13170-2-git-send-email-andi@firstfloor.org \
--to=andi@firstfloor.org \
--cc=acme@redhat.com \
--cc=ak@linux.intel.com \
--cc=eranian@google.com \
--cc=jolsa@redhat.com \
--cc=linux-kernel@vger.kernel.org \
--cc=mingo@kernel.org \
--cc=peterz@infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox