public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH] perf_event: fix Ming's Intel NHM/WSM/SNB uncore PMU patch
@ 2011-08-31 12:41 Stephane Eranian
  2011-09-01 12:38 ` Lin Ming
  0 siblings, 1 reply; 3+ messages in thread
From: Stephane Eranian @ 2011-08-31 12:41 UTC (permalink / raw)
  To: linux-kernel; +Cc: ming.m.lin, peterz, mingo, andi, acme


The following patch sits on top of Lin Ming's patch
which adds support for Intel uncore PMU on NHM/WSM/SNB.
The patch series was posted on LKML in July 2011.

The patches fixes:
  - nhm_uncore_pmu_enable_all() to also enabled the fixed uncore counters
  - uncore_pmu_add() to schedule for the actual max number of generic counters
  - fixed the NHM/WSM vs. SNB MSRs for the fixed counter, they are swapped 

The patch adds the following improvement:
  - add an event group validation function, to check if a group can ever be
    schedule. This way, the behavior is consistent with core PMU event groups.

Signed-off-by: Stephane Eranian <eranian@google.com>
---

diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index e250977..283f292 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -426,8 +426,11 @@
 
 /* Intel Nehalem/Westmere/SandyBridge uncore performance counters */
 #define MSR_UNCORE_PERF_GLOBAL_CTRL	0x00000391
-#define MSR_UNCORE_FIXED_CTR_CTRL	0x00000394
-#define MSR_UNCORE_FIXED_CTR0		0x00000395
+#define MSR_NHM_UNCORE_FIXED_CTR0	0x00000394
+#define MSR_NHM_UNCORE_FIXED_CTR_CTRL	0x00000395
+
+#define MSR_SNB_UNCORE_FIXED_CTR0	0x00000395
+#define MSR_SNB_UNCORE_FIXED_CTR_CTRL	0x00000394
 
 #define MSR_NHM_UNCORE_PMC0		0x000003b0
 #define MSR_NHM_UNCORE_PERFEVTSEL0	0x000003c0
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index 1100589..70bd28c 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -23,8 +23,8 @@ static void uncore_fixed_hw_config(struct perf_event *event)
 {
 	struct hw_perf_event *hwc = &event->hw;
 
-	hwc->config_base = MSR_UNCORE_FIXED_CTR_CTRL;
-	hwc->event_base = MSR_UNCORE_FIXED_CTR0;
+	hwc->config_base = intel_uncore_pmu.fixed_config_base;
+	hwc->event_base = intel_uncore_pmu.fixed_event_base;
 }
 
 static void uncore_fixed_disable_event(struct perf_event *event)
@@ -63,7 +63,14 @@ static void uncore_pmu_disable_event(struct perf_event *event)
 
 static void nhm_uncore_pmu_enable_all(void)
 {
-	u64 ctrl = (1 << UNCORE_NUM_COUNTERS) - 1;
+	u64 ctrl, fmask;
+
+	/* generic counters */
+	ctrl = (1 << UNCORE_NUM_GENERIC_COUNTERS) - 1;
+
+	/* fixed counters */
+	fmask = (1 << UNCORE_NUM_FIXED_COUNTERS) - 1;
+	ctrl |= fmask << X86_PMC_IDX_FIXED;
 
 	wrmsrl(MSR_UNCORE_PERF_GLOBAL_CTRL, ctrl);
 }
@@ -96,6 +103,8 @@ static __initconst const struct intel_uncore_pmu nhm_uncore_pmu = {
 	.hw_config		= nhm_uncore_pmu_hw_config,
 	.cntval_bits		= 48,
 	.cntval_bits_fixed	= 48,
+	.fixed_config_base	= MSR_NHM_UNCORE_FIXED_CTR_CTRL,
+	.fixed_event_base	= MSR_NHM_UNCORE_FIXED_CTR0,
 };
 
 /* SandyBridge uncore pmu */
@@ -149,6 +158,8 @@ static __initconst const struct intel_uncore_pmu snb_uncore_pmu = {
 	.hw_config		= snb_uncore_pmu_hw_config,
 	.cntval_bits		= 44,
 	.cntval_bits_fixed	= 48,
+	.fixed_config_base	= MSR_SNB_UNCORE_FIXED_CTR_CTRL,
+	.fixed_event_base	= MSR_SNB_UNCORE_FIXED_CTR0,
 };
 
 static u64 uncore_perf_event_update(struct perf_event *event)
@@ -234,9 +245,43 @@ static void uncore_pmu_init_hrtimer(struct intel_uncore *uncore)
 
 static struct pmu uncore_pmu;
 
+static int uncore_validate_group(struct perf_event *event)
+{
+	struct perf_event *leader = event->group_leader;
+	struct perf_event *e = event->group_leader;
+	int gen = 0, fixed = 0;
+	int ret = 0;
+
+	if (leader->attr.config == UNCORE_FIXED_EVENT)
+		fixed++;
+	else
+		gen++;
+
+	if (event->attr.config == UNCORE_FIXED_EVENT)
+		fixed++;
+	else
+		gen++;
+
+	list_for_each_entry(e, &leader->sibling_list, group_entry) {
+		if (e->attr.config == UNCORE_FIXED_EVENT)
+			fixed++;
+		else
+			gen++;
+	}
+
+	if (fixed > UNCORE_NUM_FIXED_COUNTERS)
+		ret = -ENOSPC;
+
+	if (gen > UNCORE_NUM_GENERIC_COUNTERS)
+		ret = -ENOSPC;
+
+	return ret;
+}
+
 static int uncore_pmu_event_init(struct perf_event *event)
 {
 	struct hw_perf_event *hwc = &event->hw;
+	int ret = 0;
 
 	if (!uncore_pmu_initialized)
 		return -ENOENT;
@@ -256,7 +301,10 @@ static int uncore_pmu_event_init(struct perf_event *event)
 	if (hwc->sample_period)
 		return -EINVAL;
 
-	return 0;
+	if (event->group_leader != event)
+		ret = uncore_validate_group(event);
+
+	return ret;
 }
 
 static void uncore_pmu_start(struct perf_event *event, int flags)
@@ -290,7 +338,7 @@ static int uncore_pmu_add(struct perf_event *event, int flags)
 		goto fixed_event;
 	}
 
-	for (i = 0; i < X86_PMC_IDX_FIXED; i++) {
+	for (i = 0; i < UNCORE_NUM_GENERIC_COUNTERS; i++) {
 fixed_event:
 		if (!uncore->events[i]) {
 			uncore->events[i] = event;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
index c7392aa..79f3d19 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
@@ -61,4 +61,6 @@ struct intel_uncore_pmu {
 	void		(*hw_config)(struct perf_event *event);
 	int		cntval_bits;
 	int		cntval_bits_fixed;
+	int		fixed_config_base;
+	int		fixed_event_base;
 };

^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH] perf_event: fix Ming's Intel NHM/WSM/SNB uncore PMU patch
  2011-08-31 12:41 [PATCH] perf_event: fix Ming's Intel NHM/WSM/SNB uncore PMU patch Stephane Eranian
@ 2011-09-01 12:38 ` Lin Ming
  2011-09-01 12:45   ` Stephane Eranian
  0 siblings, 1 reply; 3+ messages in thread
From: Lin Ming @ 2011-09-01 12:38 UTC (permalink / raw)
  To: Stephane Eranian
  Cc: linux-kernel@vger.kernel.org, peterz@infradead.org, mingo@elte.hu,
	andi@firstfloor.org, acme@ghostprotocols.net

On Wed, 2011-08-31 at 20:41 +0800, Stephane Eranian wrote:
> The following patch sits on top of Lin Ming's patch
> which adds support for Intel uncore PMU on NHM/WSM/SNB.
> The patch series was posted on LKML in July 2011.
> 
> The patches fixes:
>   - nhm_uncore_pmu_enable_all() to also enabled the fixed uncore counters
>   - uncore_pmu_add() to schedule for the actual max number of generic counters
>   - fixed the NHM/WSM vs. SNB MSRs for the fixed counter, they are swapped 
> 
> The patch adds the following improvement:
>   - add an event group validation function, to check if a group can ever be
>     schedule. This way, the behavior is consistent with core PMU event groups.
> 
> Signed-off-by: Stephane Eranian <eranian@google.com>

Can I merge this patch into my series with these change logs and your
signed-off-by?

Thanks,
Lin Ming


^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH] perf_event: fix Ming's Intel NHM/WSM/SNB uncore PMU patch
  2011-09-01 12:38 ` Lin Ming
@ 2011-09-01 12:45   ` Stephane Eranian
  0 siblings, 0 replies; 3+ messages in thread
From: Stephane Eranian @ 2011-09-01 12:45 UTC (permalink / raw)
  To: Lin Ming
  Cc: linux-kernel@vger.kernel.org, peterz@infradead.org, mingo@elte.hu,
	andi@firstfloor.org, acme@ghostprotocols.net

On Thu, Sep 1, 2011 at 2:38 PM, Lin Ming <ming.m.lin@intel.com> wrote:
> On Wed, 2011-08-31 at 20:41 +0800, Stephane Eranian wrote:
>> The following patch sits on top of Lin Ming's patch
>> which adds support for Intel uncore PMU on NHM/WSM/SNB.
>> The patch series was posted on LKML in July 2011.
>>
>> The patches fixes:
>>   - nhm_uncore_pmu_enable_all() to also enabled the fixed uncore counters
>>   - uncore_pmu_add() to schedule for the actual max number of generic counters
>>   - fixed the NHM/WSM vs. SNB MSRs for the fixed counter, they are swapped
>>
>> The patch adds the following improvement:
>>   - add an event group validation function, to check if a group can ever be
>>     schedule. This way, the behavior is consistent with core PMU event groups.
>>
>> Signed-off-by: Stephane Eranian <eranian@google.com>
>
> Can I merge this patch into my series with these change logs and your
> signed-off-by?
>
Yes.

> Thanks,
> Lin Ming
>
>

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2011-09-01 12:45 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2011-08-31 12:41 [PATCH] perf_event: fix Ming's Intel NHM/WSM/SNB uncore PMU patch Stephane Eranian
2011-09-01 12:38 ` Lin Ming
2011-09-01 12:45   ` Stephane Eranian

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox