public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
From: kan.liang@intel.com
To: a.p.zijlstra@chello.nl
Cc: mingo@redhat.com, acme@kernel.org, eranian@google.com,
	ak@linux.intel.com, mark.rutland@arm.com,
	adrian.hunter@intel.com, dsahern@gmail.com, jolsa@kernel.org,
	namhyung@kernel.org, linux-kernel@vger.kernel.org,
	Kan Liang <kan.liang@intel.com>
Subject: [PATCH 2/9] perf/x86: core_misc PMU disable and enable support
Date: Thu, 16 Jul 2015 16:33:44 -0400	[thread overview]
Message-ID: <1437078831-10152-3-git-send-email-kan.liang@intel.com> (raw)
In-Reply-To: <1437078831-10152-1-git-send-email-kan.liang@intel.com>

From: Kan Liang <kan.liang@intel.com>

This patch implements core_misc PMU disable and enable functions.
core_misc PMU counters are free running counters, so it's impossible to
stop/start them. The "disable" means not read counters.
With disable/enable functions, it's possible to "disable" core_misc
events when other PMU events stop. For example, we are able to stop
read the core_misc counter during irq handler.

Signed-off-by: Kan Liang <kan.liang@intel.com>
---
 arch/x86/include/asm/perf_event.h                |  2 ++
 arch/x86/kernel/cpu/perf_event.h                 | 10 ++++++
 arch/x86/kernel/cpu/perf_event_intel.c           |  4 +++
 arch/x86/kernel/cpu/perf_event_intel_core_misc.c | 41 ++++++++++++++++++++++++
 4 files changed, 57 insertions(+)

diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index dc0f6ed..2905f4c 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -11,6 +11,8 @@
 
 #define X86_PMC_IDX_MAX					       64
 
+#define X86_CORE_MISC_COUNTER_MAX			       64
+
 #define MSR_ARCH_PERFMON_PERFCTR0			      0xc1
 #define MSR_ARCH_PERFMON_PERFCTR1			      0xc2
 
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index 3e7fd27..fb14f8a 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -239,6 +239,12 @@ struct cpu_hw_events {
 	int excl_thread_id; /* 0 or 1 */
 
 	/*
+	 * Intel core misc
+	 */
+	struct perf_event	*core_misc_events[X86_CORE_MISC_COUNTER_MAX]; /* in counter order */
+	unsigned long		core_misc_active_mask[BITS_TO_LONGS(X86_CORE_MISC_COUNTER_MAX)];
+
+	/*
 	 * AMD specific bits
 	 */
 	struct amd_nb			*amd_nb;
@@ -927,6 +933,10 @@ int p6_pmu_init(void);
 
 int knc_pmu_init(void);
 
+void intel_core_misc_pmu_enable(void);
+
+void intel_core_misc_pmu_disable(void);
+
 ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr,
 			  char *page);
 
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index b9826a9..651a86d 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1586,6 +1586,8 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
 	if (!x86_pmu.late_ack)
 		apic_write(APIC_LVTPC, APIC_DM_NMI);
 	__intel_pmu_disable_all();
+	if (cpuc->core_misc_active_mask)
+		intel_core_misc_pmu_disable();
 	handled = intel_pmu_drain_bts_buffer();
 	handled += intel_bts_interrupt();
 	status = intel_pmu_get_status();
@@ -1671,6 +1673,8 @@ again:
 
 done:
 	__intel_pmu_enable_all(0, true);
+	if (cpuc->core_misc_active_mask)
+		intel_core_misc_pmu_enable();
 	/*
 	 * Only unmask the NMI after the overflow counters
 	 * have been reset. This avoids spurious NMIs on
diff --git a/arch/x86/kernel/cpu/perf_event_intel_core_misc.c b/arch/x86/kernel/cpu/perf_event_intel_core_misc.c
index c6c82ac..4efe842 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_core_misc.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_core_misc.c
@@ -250,12 +250,19 @@ static void __core_misc_pmu_event_start(struct core_misc_pmu *pmu,
 static void core_misc_pmu_event_start(struct perf_event *event, int mode)
 {
 	struct core_misc_pmu *pmu = get_core_misc_pmu(event);
+	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+	int idx = event->hw.idx;
 	unsigned long flags;
 
 	if (pmu == NULL)
 		return;
 
 	spin_lock_irqsave(&pmu->lock, flags);
+
+	if (pmu->pmu->type == perf_intel_core_misc_thread) {
+		cpuc->core_misc_events[idx] = event;
+		__set_bit(idx, cpuc->core_misc_active_mask);
+	}
 	__core_misc_pmu_event_start(pmu, event);
 	spin_unlock_irqrestore(&pmu->lock, flags);
 }
@@ -264,6 +271,7 @@ static void core_misc_pmu_event_stop(struct perf_event *event, int mode)
 {
 	struct core_misc_pmu *pmu = get_core_misc_pmu(event);
 	struct hw_perf_event *hwc = &event->hw;
+	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
 	unsigned long flags;
 
 	if (pmu == NULL)
@@ -273,6 +281,8 @@ static void core_misc_pmu_event_stop(struct perf_event *event, int mode)
 
 	/* mark event as deactivated and stopped */
 	if (!(hwc->state & PERF_HES_STOPPED)) {
+		if (__test_and_clear_bit(hwc->idx, cpuc->core_misc_active_mask))
+			cpuc->core_misc_events[hwc->idx] = NULL;
 		WARN_ON_ONCE(pmu->n_active <= 0);
 		pmu->n_active--;
 
@@ -294,6 +304,32 @@ static void core_misc_pmu_event_stop(struct perf_event *event, int mode)
 	spin_unlock_irqrestore(&pmu->lock, flags);
 }
 
+void intel_core_misc_pmu_enable(void)
+{
+	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+	struct perf_event *event;
+	u64 start;
+	int bit;
+
+	for_each_set_bit(bit, cpuc->core_misc_active_mask,
+			 X86_CORE_MISC_COUNTER_MAX) {
+		event = cpuc->core_misc_events[bit];
+		start = core_misc_pmu_read_counter(event);
+		local64_set(&event->hw.prev_count, start);
+	}
+}
+
+void intel_core_misc_pmu_disable(void)
+{
+	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+	int bit;
+
+	for_each_set_bit(bit, cpuc->core_misc_active_mask,
+			 X86_CORE_MISC_COUNTER_MAX) {
+		core_misc_pmu_event_update(cpuc->core_misc_events[bit]);
+	}
+}
+
 static void core_misc_pmu_event_del(struct perf_event *event, int mode)
 {
 	core_misc_pmu_event_stop(event, PERF_EF_UPDATE);
@@ -863,6 +899,11 @@ static void __init core_misc_pmus_register(void)
 			.capabilities	= PERF_PMU_CAP_NO_INTERRUPT,
 		};
 
+		if (type->type == perf_intel_core_misc_thread) {
+			type->pmu.pmu_disable = (void *) intel_core_misc_pmu_disable;
+			type->pmu.pmu_enable = (void *) intel_core_misc_pmu_enable;
+		}
+
 		err = perf_pmu_register(&type->pmu, type->name, -1);
 		if (WARN_ON(err))
 			pr_info("Failed to register PMU %s error %d\n",
-- 
1.8.3.1


  parent reply	other threads:[~2015-07-17  3:51 UTC|newest]

Thread overview: 33+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-07-16 20:33 [PATCH 0/9] Intel core misc PMUs support kan.liang
2015-07-16 20:33 ` [PATCH 1/9] perf/x86: Add " kan.liang
2015-07-16 20:33 ` kan.liang [this message]
2015-07-17 12:11   ` [PATCH 2/9] perf/x86: core_misc PMU disable and enable support Mark Rutland
2015-07-17 13:46     ` Peter Zijlstra
2015-07-17 13:51       ` Peter Zijlstra
2015-07-17 15:35         ` Liang, Kan
2015-07-17 17:01           ` Andy Lutomirski
2015-07-17 17:52             ` Liang, Kan
2015-07-17 17:58               ` Andy Lutomirski
2015-07-17 18:15                 ` Liang, Kan
2015-07-17 18:56                   ` Andy Lutomirski
2015-07-17 21:11                   ` Peter Zijlstra
2015-07-16 20:33 ` [PATCH 3/9] perf/x86: Add is_hardware_event kan.liang
2015-07-17 10:48   ` Mark Rutland
2015-07-17 15:03     ` Liang, Kan
2015-07-17 15:47       ` Mark Rutland
2015-07-17 16:11         ` Mark Rutland
2015-07-16 20:33 ` [PATCH 4/9] perf/x86: special case per-cpu core misc PMU events kan.liang
2015-07-17 12:21   ` Mark Rutland
2015-07-17 12:55     ` Peter Zijlstra
2015-07-17 18:11       ` Stephane Eranian
2015-07-17 20:17     ` Andi Kleen
2015-07-20 16:12       ` Mark Rutland
2015-07-16 20:33 ` [PATCH 5/9] perf,tools: open event with it's own cpus and threads kan.liang
2015-07-16 20:33 ` [PATCH 6/9] perf,tools: Dump per-sample freq in report -D kan.liang
2015-07-16 20:33 ` [PATCH 7/9] perf,tools: save APERF/MPERF/TSC in struct perf_sample kan.liang
2015-07-16 20:33 ` [PATCH 8/9] perf,tools: caculate and save tsc/avg/bzy freq in he_stat kan.liang
2015-07-17 20:25   ` Andi Kleen
2015-07-17 20:57     ` Liang, Kan
2015-07-17 21:27       ` Andi Kleen
2015-07-16 20:33 ` [PATCH 9/9] perf,tools: Show freq in perf report --stdio kan.liang
2015-07-17 11:39 ` [PATCH 0/9] Intel core misc PMUs support Ingo Molnar

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1437078831-10152-3-git-send-email-kan.liang@intel.com \
    --to=kan.liang@intel.com \
    --cc=a.p.zijlstra@chello.nl \
    --cc=acme@kernel.org \
    --cc=adrian.hunter@intel.com \
    --cc=ak@linux.intel.com \
    --cc=dsahern@gmail.com \
    --cc=eranian@google.com \
    --cc=jolsa@kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mark.rutland@arm.com \
    --cc=mingo@redhat.com \
    --cc=namhyung@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox