public inbox for linux-arm-kernel@lists.infradead.org
 help / color / mirror / Atom feed
* [PATCH] arm64: perf: add support for percpu pmu interrupt
@ 2013-10-14  6:46 Vinayak Kale
  2013-10-14 12:34 ` Will Deacon
  0 siblings, 1 reply; 6+ messages in thread
From: Vinayak Kale @ 2013-10-14  6:46 UTC (permalink / raw)
  To: linux-arm-kernel

This patch adds support for irq registration when pmu interrupt type is PPI.
The patch also fixes ARMV8_EVTYPE_* macros since evtCount field width is
10bits.

Signed-off-by: Vinayak Kale <vkale@apm.com>
Signed-off-by: Tuan Phan <tphan@apm.com>
---
 arch/arm64/kernel/perf_event.c |  108 +++++++++++++++++++++++++++++-----------
 1 file changed, 78 insertions(+), 30 deletions(-)

diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index cea1594..ba3706d 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -363,22 +363,53 @@ validate_group(struct perf_event *event)
 }
 
 static void
+armpmu_disable_percpu_irq(void *data)
+{
+	struct arm_pmu *armpmu = (struct arm_pmu *)data;
+	struct platform_device *pmu_device = armpmu->plat_device;
+	int irq = platform_get_irq(pmu_device, 0);
+
+	cpumask_test_and_clear_cpu(smp_processor_id(), &armpmu->active_irqs);
+	disable_percpu_irq(irq);
+}
+
+static void
 armpmu_release_hardware(struct arm_pmu *armpmu)
 {
 	int i, irq, irqs;
 	struct platform_device *pmu_device = armpmu->plat_device;
 
-	irqs = min(pmu_device->num_resources, num_possible_cpus());
+	irq = platform_get_irq(pmu_device, 0);
 
-	for (i = 0; i < irqs; ++i) {
-		if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs))
-			continue;
-		irq = platform_get_irq(pmu_device, i);
-		if (irq >= 0)
-			free_irq(irq, armpmu);
+	if (irq >= 16 && irq <= 31) {
+		on_each_cpu(armpmu_disable_percpu_irq, (void *)armpmu, 1);
+
+		free_percpu_irq(irq, armpmu);
+	} else {
+		irqs = min(pmu_device->num_resources, num_possible_cpus());
+
+		for (i = 0; i < irqs; ++i) {
+			if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs))
+				continue;
+			irq = platform_get_irq(pmu_device, i);
+			if (irq >= 0)
+				free_irq(irq, armpmu);
+		}
 	}
 }
 
+static void
+armpmu_enable_percpu_irq(void *data)
+{
+	struct arm_pmu *armpmu = (struct arm_pmu *)data;
+	struct platform_device *pmu_device = armpmu->plat_device;
+	int irq = platform_get_irq(pmu_device, 0);
+
+	enable_percpu_irq(irq, 0);
+
+	cpumask_set_cpu(smp_processor_id(), &armpmu->active_irqs);
+}
+
 static int
 armpmu_reserve_hardware(struct arm_pmu *armpmu)
 {
@@ -396,36 +427,53 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
 		return -ENODEV;
 	}
 
-	for (i = 0; i < irqs; ++i) {
-		err = 0;
-		irq = platform_get_irq(pmu_device, i);
-		if (irq < 0)
-			continue;
+	irq = platform_get_irq(pmu_device, 0);
 
+	if (irq >= 16 && irq <= 31) {
 		/*
-		 * If we have a single PMU interrupt that we can't shift,
-		 * assume that we're running on a uniprocessor machine and
-		 * continue. Otherwise, continue without this interrupt.
+		 * percpu PMU interrupt.
 		 */
-		if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
-			pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
-				    irq, i);
-			continue;
-		}
-
-		err = request_irq(irq, armpmu->handle_irq,
-				  IRQF_NOBALANCING,
-				  "arm-pmu", armpmu);
+		err = request_percpu_irq(irq, armpmu->handle_irq,
+						"arm-pmu", armpmu);
 		if (err) {
 			pr_err("unable to request IRQ%d for ARM PMU counters\n",
-				irq);
+					irq);
 			armpmu_release_hardware(armpmu);
 			return err;
 		}
 
-		cpumask_set_cpu(i, &armpmu->active_irqs);
+		on_each_cpu(armpmu_enable_percpu_irq, (void *)armpmu, 1);
+	} else {
+		for (i = 0; i < irqs; ++i) {
+			err = 0;
+			irq = platform_get_irq(pmu_device, i);
+			if (irq < 0)
+				continue;
+
+			/*
+			 * If we have a single PMU interrupt that we can't shift,
+			 * assume that we're running on a uniprocessor machine and
+			 * continue. Otherwise, continue without this interrupt.
+			 */
+			if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
+				pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
+						irq, i);
+				continue;
+			}
+
+			err = request_irq(irq, armpmu->handle_irq,
+					IRQF_NOBALANCING,
+					"arm-pmu", armpmu);
+			if (err) {
+				pr_err("unable to request IRQ%d for ARM PMU counters\n",
+						irq);
+				armpmu_release_hardware(armpmu);
+				return err;
+			}
+
+			cpumask_set_cpu(i, &armpmu->active_irqs);
+		}
 	}
-
 	return 0;
 }
 
@@ -784,8 +832,8 @@ static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 /*
  * PMXEVTYPER: Event selection reg
  */
-#define	ARMV8_EVTYPE_MASK	0xc80000ff	/* Mask for writable bits */
-#define	ARMV8_EVTYPE_EVENT	0xff		/* Mask for EVENT bits */
+#define	ARMV8_EVTYPE_MASK	0xc80003ff	/* Mask for writable bits */
+#define	ARMV8_EVTYPE_EVENT	0x3ff		/* Mask for EVENT bits */
 
 /*
  * Event filters for PMUv3
@@ -1175,7 +1223,7 @@ static void armv8pmu_reset(void *info)
 static int armv8_pmuv3_map_event(struct perf_event *event)
 {
 	return map_cpu_event(event, &armv8_pmuv3_perf_map,
-				&armv8_pmuv3_perf_cache_map, 0xFF);
+				&armv8_pmuv3_perf_cache_map, 0x3FF);
 }
 
 static struct arm_pmu armv8pmu = {
-- 
1.7.9.5

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH] arm64: perf: add support for percpu pmu interrupt
  2013-10-14  6:46 [PATCH] arm64: perf: add support for percpu pmu interrupt Vinayak Kale
@ 2013-10-14 12:34 ` Will Deacon
  2013-10-15  6:33   ` Vinayak Kale
  0 siblings, 1 reply; 6+ messages in thread
From: Will Deacon @ 2013-10-14 12:34 UTC (permalink / raw)
  To: linux-arm-kernel

Hi Vinayak,

On Mon, Oct 14, 2013 at 07:46:29AM +0100, Vinayak Kale wrote:
> This patch adds support for irq registration when pmu interrupt type is PPI.
> The patch also fixes ARMV8_EVTYPE_* macros since evtCount field width is
> 10bits.
> 
> Signed-off-by: Vinayak Kale <vkale@apm.com>
> Signed-off-by: Tuan Phan <tphan@apm.com>
> ---
>  arch/arm64/kernel/perf_event.c |  108 +++++++++++++++++++++++++++++-----------
>  1 file changed, 78 insertions(+), 30 deletions(-)
> 
> diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
> index cea1594..ba3706d 100644
> --- a/arch/arm64/kernel/perf_event.c
> +++ b/arch/arm64/kernel/perf_event.c
> @@ -363,22 +363,53 @@ validate_group(struct perf_event *event)
>  }

[...]

>  static int
>  armpmu_reserve_hardware(struct arm_pmu *armpmu)
>  {
> @@ -396,36 +427,53 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
>  		return -ENODEV;
>  	}
>  
> -	for (i = 0; i < irqs; ++i) {
> -		err = 0;
> -		irq = platform_get_irq(pmu_device, i);
> -		if (irq < 0)
> -			continue;
> +	irq = platform_get_irq(pmu_device, 0);
>  
> +	if (irq >= 16 && irq <= 31) {

This is horribly GIC specific and will break in the face of IRQ domains
(since this is a virtual interrupt number).

>  		/*
> -		 * If we have a single PMU interrupt that we can't shift,
> -		 * assume that we're running on a uniprocessor machine and
> -		 * continue. Otherwise, continue without this interrupt.
> +		 * percpu PMU interrupt.
>  		 */
> -		if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
> -			pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
> -				    irq, i);
> -			continue;
> -		}
> -
> -		err = request_irq(irq, armpmu->handle_irq,
> -				  IRQF_NOBALANCING,
> -				  "arm-pmu", armpmu);
> +		err = request_percpu_irq(irq, armpmu->handle_irq,
> +						"arm-pmu", armpmu);

This is broken -- the dev_id *must* be a __percpu variable for percpu irqs.

>  		if (err) {
>  			pr_err("unable to request IRQ%d for ARM PMU counters\n",
> -				irq);
> +					irq);
>  			armpmu_release_hardware(armpmu);
>  			return err;
>  		}
>  
> -		cpumask_set_cpu(i, &armpmu->active_irqs);
> +		on_each_cpu(armpmu_enable_percpu_irq, (void *)armpmu, 1);
> +	} else {
> +		for (i = 0; i < irqs; ++i) {
> +			err = 0;
> +			irq = platform_get_irq(pmu_device, i);
> +			if (irq < 0)
> +				continue;
> +
> +			/*
> +			 * If we have a single PMU interrupt that we can't shift,
> +			 * assume that we're running on a uniprocessor machine and
> +			 * continue. Otherwise, continue without this interrupt.
> +			 */
> +			if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
> +				pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
> +						irq, i);
> +				continue;
> +			}
> +
> +			err = request_irq(irq, armpmu->handle_irq,
> +					IRQF_NOBALANCING,
> +					"arm-pmu", armpmu);

A better way to do this is to try request_percpu_irq first. If that fails,
then try request_irq. However, the error reporting out of request_percpu_irq
could do with some cleanup (rather than just return -EINVAL) so we can
detect the difference between `this interrupt isn't per-cpu' and `this
per-cpu interrupt is invalid'. This can help us avoid the WARN_ON in
request_irq when it is passed a per-cpu interrupt.

> @@ -784,8 +832,8 @@ static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
>  /*
>   * PMXEVTYPER: Event selection reg
>   */
> -#define	ARMV8_EVTYPE_MASK	0xc80000ff	/* Mask for writable bits */
> -#define	ARMV8_EVTYPE_EVENT	0xff		/* Mask for EVENT bits */
> +#define	ARMV8_EVTYPE_MASK	0xc80003ff	/* Mask for writable bits */
> +#define	ARMV8_EVTYPE_EVENT	0x3ff		/* Mask for EVENT bits */
>  
>  /*
>   * Event filters for PMUv3
> @@ -1175,7 +1223,7 @@ static void armv8pmu_reset(void *info)
>  static int armv8_pmuv3_map_event(struct perf_event *event)
>  {
>  	return map_cpu_event(event, &armv8_pmuv3_perf_map,
> -				&armv8_pmuv3_perf_cache_map, 0xFF);
> +				&armv8_pmuv3_perf_cache_map, 0x3FF);
>  }

What's all this?

Will

^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH] arm64: perf: add support for percpu pmu interrupt
  2013-10-14 12:34 ` Will Deacon
@ 2013-10-15  6:33   ` Vinayak Kale
  2013-10-15  9:21     ` Will Deacon
  0 siblings, 1 reply; 6+ messages in thread
From: Vinayak Kale @ 2013-10-15  6:33 UTC (permalink / raw)
  To: linux-arm-kernel

Hi Will,

Thanks for your comments. Please see inline below.

On Mon, Oct 14, 2013 at 6:04 PM, Will Deacon <will.deacon@arm.com> wrote:
> Hi Vinayak,
>
> On Mon, Oct 14, 2013 at 07:46:29AM +0100, Vinayak Kale wrote:
>> This patch adds support for irq registration when pmu interrupt type is PPI.
>> The patch also fixes ARMV8_EVTYPE_* macros since evtCount field width is
>> 10bits.
>>
>> Signed-off-by: Vinayak Kale <vkale@apm.com>
>> Signed-off-by: Tuan Phan <tphan@apm.com>
>> ---
>>  arch/arm64/kernel/perf_event.c |  108 +++++++++++++++++++++++++++++-----------
>>  1 file changed, 78 insertions(+), 30 deletions(-)
>>
>> diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
>> index cea1594..ba3706d 100644
>> --- a/arch/arm64/kernel/perf_event.c
>> +++ b/arch/arm64/kernel/perf_event.c
>> @@ -363,22 +363,53 @@ validate_group(struct perf_event *event)
>>  }
>
> [...]
>
>>  static int
>>  armpmu_reserve_hardware(struct arm_pmu *armpmu)
>>  {
>> @@ -396,36 +427,53 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
>>               return -ENODEV;
>>       }
>>
>> -     for (i = 0; i < irqs; ++i) {
>> -             err = 0;
>> -             irq = platform_get_irq(pmu_device, i);
>> -             if (irq < 0)
>> -                     continue;
>> +     irq = platform_get_irq(pmu_device, 0);
>>
>> +     if (irq >= 16 && irq <= 31) {
>
> This is horribly GIC specific and will break in the face of IRQ domains
> (since this is a virtual interrupt number).
>

hmm, yes. I had not considered irq domain mapping.

>>               /*
>> -              * If we have a single PMU interrupt that we can't shift,
>> -              * assume that we're running on a uniprocessor machine and
>> -              * continue. Otherwise, continue without this interrupt.
>> +              * percpu PMU interrupt.
>>                */
>> -             if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
>> -                     pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
>> -                                 irq, i);
>> -                     continue;
>> -             }
>> -
>> -             err = request_irq(irq, armpmu->handle_irq,
>> -                               IRQF_NOBALANCING,
>> -                               "arm-pmu", armpmu);
>> +             err = request_percpu_irq(irq, armpmu->handle_irq,
>> +                                             "arm-pmu", armpmu);
>
> This is broken -- the dev_id *must* be a __percpu variable for percpu irqs.
>

Yes, this needs to be fixed.

>>               if (err) {
>>                       pr_err("unable to request IRQ%d for ARM PMU counters\n",
>> -                             irq);
>> +                                     irq);
>>                       armpmu_release_hardware(armpmu);
>>                       return err;
>>               }
>>
>> -             cpumask_set_cpu(i, &armpmu->active_irqs);
>> +             on_each_cpu(armpmu_enable_percpu_irq, (void *)armpmu, 1);
>> +     } else {
>> +             for (i = 0; i < irqs; ++i) {
>> +                     err = 0;
>> +                     irq = platform_get_irq(pmu_device, i);
>> +                     if (irq < 0)
>> +                             continue;
>> +
>> +                     /*
>> +                      * If we have a single PMU interrupt that we can't shift,
>> +                      * assume that we're running on a uniprocessor machine and
>> +                      * continue. Otherwise, continue without this interrupt.
>> +                      */
>> +                     if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
>> +                             pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
>> +                                             irq, i);
>> +                             continue;
>> +                     }
>> +
>> +                     err = request_irq(irq, armpmu->handle_irq,
>> +                                     IRQF_NOBALANCING,
>> +                                     "arm-pmu", armpmu);
>
> A better way to do this is to try request_percpu_irq first. If that fails,
> then try request_irq. However, the error reporting out of request_percpu_irq
> could do with some cleanup (rather than just return -EINVAL) so we can
> detect the difference between `this interrupt isn't per-cpu' and `this
> per-cpu interrupt is invalid'. This can help us avoid the WARN_ON in
> request_irq when it is passed a per-cpu interrupt.
>

Trying request_percpu_irq first seems better. But if it fails then we
would straight away
assume it's not per-cpu interrupt and try request_irq. In this case we
may not be able to
detect 'this per-cpu interrupt is invalid' case.

>> @@ -784,8 +832,8 @@ static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
>>  /*
>>   * PMXEVTYPER: Event selection reg
>>   */
>> -#define      ARMV8_EVTYPE_MASK       0xc80000ff      /* Mask for writable bits */
>> -#define      ARMV8_EVTYPE_EVENT      0xff            /* Mask for EVENT bits */
>> +#define      ARMV8_EVTYPE_MASK       0xc80003ff      /* Mask for writable bits */
>> +#define      ARMV8_EVTYPE_EVENT      0x3ff           /* Mask for EVENT bits */
>>
>>  /*
>>   * Event filters for PMUv3
>> @@ -1175,7 +1223,7 @@ static void armv8pmu_reset(void *info)
>>  static int armv8_pmuv3_map_event(struct perf_event *event)
>>  {
>>       return map_cpu_event(event, &armv8_pmuv3_perf_map,
>> -                             &armv8_pmuv3_perf_cache_map, 0xFF);
>> +                             &armv8_pmuv3_perf_cache_map, 0x3FF);
>>  }
>
> What's all this?
>

The evtCount (event number) field width is 10bits in event selection register.
So need to fix ARMV8_EVTYPE_* macros and related mask value.

>From the subject of patch, one may think that the patch is specific
only to percpu irq changes (which is not true).

I had mentioned about fixing ARMV8_EVTYPE_* macros in patch description.

> Will

-Vinayak

^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH] arm64: perf: add support for percpu pmu interrupt
  2013-10-15  6:33   ` Vinayak Kale
@ 2013-10-15  9:21     ` Will Deacon
  2013-10-15 11:54       ` Vinayak Kale
  0 siblings, 1 reply; 6+ messages in thread
From: Will Deacon @ 2013-10-15  9:21 UTC (permalink / raw)
  To: linux-arm-kernel

On Tue, Oct 15, 2013 at 07:33:25AM +0100, Vinayak Kale wrote:
> On Mon, Oct 14, 2013 at 6:04 PM, Will Deacon <will.deacon@arm.com> wrote:
> > On Mon, Oct 14, 2013 at 07:46:29AM +0100, Vinayak Kale wrote:
> >>               if (err) {
> >>                       pr_err("unable to request IRQ%d for ARM PMU counters\n",
> >> -                             irq);
> >> +                                     irq);
> >>                       armpmu_release_hardware(armpmu);
> >>                       return err;
> >>               }
> >>
> >> -             cpumask_set_cpu(i, &armpmu->active_irqs);
> >> +             on_each_cpu(armpmu_enable_percpu_irq, (void *)armpmu, 1);
> >> +     } else {
> >> +             for (i = 0; i < irqs; ++i) {
> >> +                     err = 0;
> >> +                     irq = platform_get_irq(pmu_device, i);
> >> +                     if (irq < 0)
> >> +                             continue;
> >> +
> >> +                     /*
> >> +                      * If we have a single PMU interrupt that we can't shift,
> >> +                      * assume that we're running on a uniprocessor machine and
> >> +                      * continue. Otherwise, continue without this interrupt.
> >> +                      */
> >> +                     if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
> >> +                             pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
> >> +                                             irq, i);
> >> +                             continue;
> >> +                     }
> >> +
> >> +                     err = request_irq(irq, armpmu->handle_irq,
> >> +                                     IRQF_NOBALANCING,
> >> +                                     "arm-pmu", armpmu);
> >
> > A better way to do this is to try request_percpu_irq first. If that fails,
> > then try request_irq. However, the error reporting out of request_percpu_irq
> > could do with some cleanup (rather than just return -EINVAL) so we can
> > detect the difference between `this interrupt isn't per-cpu' and `this
> > per-cpu interrupt is invalid'. This can help us avoid the WARN_ON in
> > request_irq when it is passed a per-cpu interrupt.
> >
> 
> Trying request_percpu_irq first seems better. But if it fails then we
> would straight away
> assume it's not per-cpu interrupt and try request_irq. In this case we
> may not be able to
> detect 'this per-cpu interrupt is invalid' case.

Right, but you could have a patch to fix the core code as part of this
series, as I hinted at above.

> >> @@ -784,8 +832,8 @@ static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
> >>  /*
> >>   * PMXEVTYPER: Event selection reg
> >>   */
> >> -#define      ARMV8_EVTYPE_MASK       0xc80000ff      /* Mask for writable bits */
> >> -#define      ARMV8_EVTYPE_EVENT      0xff            /* Mask for EVENT bits */
> >> +#define      ARMV8_EVTYPE_MASK       0xc80003ff      /* Mask for writable bits */
> >> +#define      ARMV8_EVTYPE_EVENT      0x3ff           /* Mask for EVENT bits */
> >>
> >>  /*
> >>   * Event filters for PMUv3
> >> @@ -1175,7 +1223,7 @@ static void armv8pmu_reset(void *info)
> >>  static int armv8_pmuv3_map_event(struct perf_event *event)
> >>  {
> >>       return map_cpu_event(event, &armv8_pmuv3_perf_map,
> >> -                             &armv8_pmuv3_perf_cache_map, 0xFF);
> >> +                             &armv8_pmuv3_perf_cache_map, 0x3FF);
> >>  }
> >
> > What's all this?
> >
> 
> The evtCount (event number) field width is 10bits in event selection register.
> So need to fix ARMV8_EVTYPE_* macros and related mask value.
> 
> From the subject of patch, one may think that the patch is specific
> only to percpu irq changes (which is not true).
> 
> I had mentioned about fixing ARMV8_EVTYPE_* macros in patch description.

Ok, please put this change in a separate patch.

Will

^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH] arm64: perf: add support for percpu pmu interrupt
  2013-10-15  9:21     ` Will Deacon
@ 2013-10-15 11:54       ` Vinayak Kale
  2013-10-16 14:15         ` Will Deacon
  0 siblings, 1 reply; 6+ messages in thread
From: Vinayak Kale @ 2013-10-15 11:54 UTC (permalink / raw)
  To: linux-arm-kernel

On Tue, Oct 15, 2013 at 2:51 PM, Will Deacon <will.deacon@arm.com> wrote:
> On Tue, Oct 15, 2013 at 07:33:25AM +0100, Vinayak Kale wrote:
>> On Mon, Oct 14, 2013 at 6:04 PM, Will Deacon <will.deacon@arm.com> wrote:
>> > On Mon, Oct 14, 2013 at 07:46:29AM +0100, Vinayak Kale wrote:
>> >>               if (err) {
>> >>                       pr_err("unable to request IRQ%d for ARM PMU counters\n",
>> >> -                             irq);
>> >> +                                     irq);
>> >>                       armpmu_release_hardware(armpmu);
>> >>                       return err;
>> >>               }
>> >>
>> >> -             cpumask_set_cpu(i, &armpmu->active_irqs);
>> >> +             on_each_cpu(armpmu_enable_percpu_irq, (void *)armpmu, 1);
>> >> +     } else {
>> >> +             for (i = 0; i < irqs; ++i) {
>> >> +                     err = 0;
>> >> +                     irq = platform_get_irq(pmu_device, i);
>> >> +                     if (irq < 0)
>> >> +                             continue;
>> >> +
>> >> +                     /*
>> >> +                      * If we have a single PMU interrupt that we can't shift,
>> >> +                      * assume that we're running on a uniprocessor machine and
>> >> +                      * continue. Otherwise, continue without this interrupt.
>> >> +                      */
>> >> +                     if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
>> >> +                             pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
>> >> +                                             irq, i);
>> >> +                             continue;
>> >> +                     }
>> >> +
>> >> +                     err = request_irq(irq, armpmu->handle_irq,
>> >> +                                     IRQF_NOBALANCING,
>> >> +                                     "arm-pmu", armpmu);
>> >
>> > A better way to do this is to try request_percpu_irq first. If that fails,
>> > then try request_irq. However, the error reporting out of request_percpu_irq
>> > could do with some cleanup (rather than just return -EINVAL) so we can
>> > detect the difference between `this interrupt isn't per-cpu' and `this
>> > per-cpu interrupt is invalid'. This can help us avoid the WARN_ON in
>> > request_irq when it is passed a per-cpu interrupt.
>> >
>>
>> Trying request_percpu_irq first seems better. But if it fails then we
>> would straight away
>> assume it's not per-cpu interrupt and try request_irq. In this case we
>> may not be able to
>> detect 'this per-cpu interrupt is invalid' case.
>
> Right, but you could have a patch to fix the core code as part of this
> series, as I hinted at above.
>

Modifying core code to change return value of request_percpu_irq seems
risky as other drivers might be checking the error code.

As you said, passing invalid ppi to request_irq would cause unwanted
WARN_ON. But this would be rare case and platform specific. Can we
just live with this WARN_ON in such rare cases?

>> >> @@ -784,8 +832,8 @@ static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
>> >>  /*
>> >>   * PMXEVTYPER: Event selection reg
>> >>   */
>> >> -#define      ARMV8_EVTYPE_MASK       0xc80000ff      /* Mask for writable bits */
>> >> -#define      ARMV8_EVTYPE_EVENT      0xff            /* Mask for EVENT bits */
>> >> +#define      ARMV8_EVTYPE_MASK       0xc80003ff      /* Mask for writable bits */
>> >> +#define      ARMV8_EVTYPE_EVENT      0x3ff           /* Mask for EVENT bits */
>> >>
>> >>  /*
>> >>   * Event filters for PMUv3
>> >> @@ -1175,7 +1223,7 @@ static void armv8pmu_reset(void *info)
>> >>  static int armv8_pmuv3_map_event(struct perf_event *event)
>> >>  {
>> >>       return map_cpu_event(event, &armv8_pmuv3_perf_map,
>> >> -                             &armv8_pmuv3_perf_cache_map, 0xFF);
>> >> +                             &armv8_pmuv3_perf_cache_map, 0x3FF);
>> >>  }
>> >
>> > What's all this?
>> >
>>
>> The evtCount (event number) field width is 10bits in event selection register.
>> So need to fix ARMV8_EVTYPE_* macros and related mask value.
>>
>> From the subject of patch, one may think that the patch is specific
>> only to percpu irq changes (which is not true).
>>
>> I had mentioned about fixing ARMV8_EVTYPE_* macros in patch description.
>
> Ok, please put this change in a separate patch.
>

Okay.

> Will

^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH] arm64: perf: add support for percpu pmu interrupt
  2013-10-15 11:54       ` Vinayak Kale
@ 2013-10-16 14:15         ` Will Deacon
  0 siblings, 0 replies; 6+ messages in thread
From: Will Deacon @ 2013-10-16 14:15 UTC (permalink / raw)
  To: linux-arm-kernel

On Tue, Oct 15, 2013 at 12:54:57PM +0100, Vinayak Kale wrote:
> On Tue, Oct 15, 2013 at 2:51 PM, Will Deacon <will.deacon@arm.com> wrote:
> >> > A better way to do this is to try request_percpu_irq first. If that fails,
> >> > then try request_irq. However, the error reporting out of request_percpu_irq
> >> > could do with some cleanup (rather than just return -EINVAL) so we can
> >> > detect the difference between `this interrupt isn't per-cpu' and `this
> >> > per-cpu interrupt is invalid'. This can help us avoid the WARN_ON in
> >> > request_irq when it is passed a per-cpu interrupt.
> >> >
> >>
> >> Trying request_percpu_irq first seems better. But if it fails then we
> >> would straight away
> >> assume it's not per-cpu interrupt and try request_irq. In this case we
> >> may not be able to
> >> detect 'this per-cpu interrupt is invalid' case.
> >
> > Right, but you could have a patch to fix the core code as part of this
> > series, as I hinted at above.
> >
> 
> Modifying core code to change return value of request_percpu_irq seems
> risky as other drivers might be checking the error code.

Well, grepping for "request_percpu_irq" shows a handful of callers, which
doesn't look too onerous to audit.

> As you said, passing invalid ppi to request_irq would cause unwanted
> WARN_ON. But this would be rare case and platform specific. Can we
> just live with this WARN_ON in such rare cases?

Just fix the fundamental problem rather than paper over it.

Will

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2013-10-16 14:15 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2013-10-14  6:46 [PATCH] arm64: perf: add support for percpu pmu interrupt Vinayak Kale
2013-10-14 12:34 ` Will Deacon
2013-10-15  6:33   ` Vinayak Kale
2013-10-15  9:21     ` Will Deacon
2013-10-15 11:54       ` Vinayak Kale
2013-10-16 14:15         ` Will Deacon

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox