* [RFC PATCH 04/15] ARM: perf: remove active_mask
@ 2011-07-19 8:37 Mark Rutland
0 siblings, 0 replies; 2+ messages in thread
From: Mark Rutland @ 2011-07-19 8:37 UTC (permalink / raw)
To: linux-arm-kernel
Currently, pmu_hw_events::active_mask is used to keep track of which
events are active in hardware. As we can stop counters and their
interrupts, this is unnecessary.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Will Deacon <will.deacon@arm.com>
---
arch/arm/kernel/perf_event.c | 8 --------
arch/arm/kernel/perf_event_v6.c | 19 ++++++++++++++++++-
arch/arm/kernel/perf_event_v7.c | 3 ---
arch/arm/kernel/perf_event_xscale.c | 6 ------
4 files changed, 18 insertions(+), 18 deletions(-)
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index dfde928..438482f 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -57,12 +57,6 @@ struct cpu_hw_events {
* an event. A 0 means that the counter can be used.
*/
unsigned long used_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)];
-
- /*
- * A 1 bit for an index indicates that the counter is actively being
- * used.
- */
- unsigned long active_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)];
};
static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
@@ -295,7 +289,6 @@ armpmu_del(struct perf_event *event, int flags)
WARN_ON(idx < 0);
- clear_bit(idx, cpuc->active_mask);
armpmu_stop(event, PERF_EF_UPDATE);
cpuc->events[idx] = NULL;
clear_bit(idx, cpuc->used_mask);
@@ -327,7 +320,6 @@ armpmu_add(struct perf_event *event, int flags)
event->hw.idx = idx;
armpmu->disable(hwc, idx);
cpuc->events[idx] = event;
- set_bit(idx, cpuc->active_mask);
hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
if (flags & PERF_EF_START)
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c
index 87f29b5..8390128 100644
--- a/arch/arm/kernel/perf_event_v6.c
+++ b/arch/arm/kernel/perf_event_v6.c
@@ -462,6 +462,23 @@ armv6pmu_enable_event(struct hw_perf_event *hwc,
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
+static int counter_is_active(unsigned long pmcr, int idx)
+{
+ unsigned long mask = 0;
+ if (idx == ARMV6_CYCLE_COUNTER)
+ mask = ARMV6_PMCR_CCOUNT_IEN;
+ else if (idx == ARMV6_COUNTER0)
+ mask = ARMV6_PMCR_COUNT0_IEN;
+ else if (idx == ARMV6_COUNTER1)
+ mask = ARMV6_PMCR_COUNT1_IEN;
+
+ if (mask)
+ return pmcr & mask;
+
+ WARN_ONCE(1, "invalid counter number (%d)\n", idx);
+ return 0;
+}
+
static irqreturn_t
armv6pmu_handle_irq(int irq_num,
void *dev)
@@ -491,7 +508,7 @@ armv6pmu_handle_irq(int irq_num,
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
- if (!test_bit(idx, cpuc->active_mask))
+ if (!counter_is_active(pmcr, idx))
continue;
/*
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index fe6c931..f4170fc 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -1022,9 +1022,6 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
- if (!test_bit(idx, cpuc->active_mask))
- continue;
-
/*
* We have a single interrupt for all counters. Check that
* each counter has overflowed before we process it.
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c
index 54312fc..ca89a06 100644
--- a/arch/arm/kernel/perf_event_xscale.c
+++ b/arch/arm/kernel/perf_event_xscale.c
@@ -253,9 +253,6 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
- if (!test_bit(idx, cpuc->active_mask))
- continue;
-
if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx))
continue;
@@ -585,9 +582,6 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
- if (!test_bit(idx, cpuc->active_mask))
- continue;
-
if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx))
continue;
--
1.7.0.4
^ permalink raw reply related [flat|nested] 2+ messages in thread
* [RFC PATCH 00/15] ARM: perf: support multiple PMUs
@ 2011-08-15 13:55 Mark Rutland
2011-08-15 13:55 ` [RFC PATCH 04/15] ARM: perf: remove active_mask Mark Rutland
0 siblings, 1 reply; 2+ messages in thread
From: Mark Rutland @ 2011-08-15 13:55 UTC (permalink / raw)
To: linux-arm-kernel
System (AKA nest or uncore) PMUs exist on devices which are not affine
to a single CPU. They usually cannot be directly associated with
individual tasks and are asynchronous with respect to the current
execution. Examples of devices which could have system PMUs include L2
cache controllers, GPUs and memory buses.
The following patch series refactors the ARM PMU backend, enabling
new PMUs to reuse the existing code. This should allow for system PMUs
to be supported in future. Further work will be required to get perf to
fully understand system PMUs, but this provides something usable.
The framework is intended to be used by system PMUs which hang off core
platform components (e.g. L2 cache, AXI bus). If a device is complex
enough or separate enough from core functionality to have its own
driver, it should implement its own PMU handling using the core perf
API directly.
The first patch ("perf: provide PMU when initing events") is currently
sitting in the tip tree, but as it's required for event initialization
to function (and hence for the PMU to be usable), it's provided here
for convenience.
The series is based on Will Deacon's perf-updates branch at:
git://linux-arm.org/linux-2.6-wd.git perf-updates
An example driver using the framework (supporting the PMU present in
L220/PL310 level 2 cache controllers) can be found at:
git://linux-arm.org/linux-2.6-wd.git perf-l2x0-wip
Any comments would be welcome.
Thanks,
Mark.
Mark Rutland (15):
perf: provide PMU when initing events
ARM: perf: only register a CPU PMU when present
ARM: perf: clean up event group validation
ARM: perf: remove active_mask
ARM: perf: move active_events into struct arm_pmu
ARM: perf: move platform device to struct arm_pmu
ARM: perf: indirect access to cpu_hw_events
ARM: perf: remove unnecessary armpmu->stop
ARM: perf: lock PMU registers per-CPU
ARM: perf: add type field to struct arm_pmu
ARM: perf: refactor event mapping
ARM: perf: add support for multiple PMUs
ARM: perf: remove event limit from pmu_hw_events
ARM: perf: remove cpu-related misnomers
ARM: perf: move arm_pmu into <asm/pmu.h>
arch/arm/include/asm/pmu.h | 64 +++++++
arch/arm/kernel/perf_event.c | 318 +++++++++++++++++------------------
arch/arm/kernel/perf_event_v6.c | 73 ++++++---
arch/arm/kernel/perf_event_v7.c | 74 +++++---
arch/arm/kernel/perf_event_xscale.c | 76 +++++----
kernel/events/core.c | 4 +-
6 files changed, 359 insertions(+), 250 deletions(-)
^ permalink raw reply [flat|nested] 2+ messages in thread
* [RFC PATCH 04/15] ARM: perf: remove active_mask
2011-08-15 13:55 [RFC PATCH 00/15] ARM: perf: support multiple PMUs Mark Rutland
@ 2011-08-15 13:55 ` Mark Rutland
0 siblings, 0 replies; 2+ messages in thread
From: Mark Rutland @ 2011-08-15 13:55 UTC (permalink / raw)
To: linux-arm-kernel
Currently, pmu_hw_events::active_mask is used to keep track of which
events are active in hardware. As we can stop counters and their
interrupts, this is unnecessary.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Will Deacon <will.deacon@arm.com>
---
arch/arm/kernel/perf_event.c | 8 --------
arch/arm/kernel/perf_event_v6.c | 19 ++++++++++++++++++-
arch/arm/kernel/perf_event_v7.c | 3 ---
arch/arm/kernel/perf_event_xscale.c | 6 ------
4 files changed, 18 insertions(+), 18 deletions(-)
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index dfde928..438482f 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -57,12 +57,6 @@ struct cpu_hw_events {
* an event. A 0 means that the counter can be used.
*/
unsigned long used_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)];
-
- /*
- * A 1 bit for an index indicates that the counter is actively being
- * used.
- */
- unsigned long active_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)];
};
static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
@@ -295,7 +289,6 @@ armpmu_del(struct perf_event *event, int flags)
WARN_ON(idx < 0);
- clear_bit(idx, cpuc->active_mask);
armpmu_stop(event, PERF_EF_UPDATE);
cpuc->events[idx] = NULL;
clear_bit(idx, cpuc->used_mask);
@@ -327,7 +320,6 @@ armpmu_add(struct perf_event *event, int flags)
event->hw.idx = idx;
armpmu->disable(hwc, idx);
cpuc->events[idx] = event;
- set_bit(idx, cpuc->active_mask);
hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
if (flags & PERF_EF_START)
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c
index 87f29b5..8390128 100644
--- a/arch/arm/kernel/perf_event_v6.c
+++ b/arch/arm/kernel/perf_event_v6.c
@@ -462,6 +462,23 @@ armv6pmu_enable_event(struct hw_perf_event *hwc,
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
+static int counter_is_active(unsigned long pmcr, int idx)
+{
+ unsigned long mask = 0;
+ if (idx == ARMV6_CYCLE_COUNTER)
+ mask = ARMV6_PMCR_CCOUNT_IEN;
+ else if (idx == ARMV6_COUNTER0)
+ mask = ARMV6_PMCR_COUNT0_IEN;
+ else if (idx == ARMV6_COUNTER1)
+ mask = ARMV6_PMCR_COUNT1_IEN;
+
+ if (mask)
+ return pmcr & mask;
+
+ WARN_ONCE(1, "invalid counter number (%d)\n", idx);
+ return 0;
+}
+
static irqreturn_t
armv6pmu_handle_irq(int irq_num,
void *dev)
@@ -491,7 +508,7 @@ armv6pmu_handle_irq(int irq_num,
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
- if (!test_bit(idx, cpuc->active_mask))
+ if (!counter_is_active(pmcr, idx))
continue;
/*
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index fe6c931..f4170fc 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -1022,9 +1022,6 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
- if (!test_bit(idx, cpuc->active_mask))
- continue;
-
/*
* We have a single interrupt for all counters. Check that
* each counter has overflowed before we process it.
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c
index 54312fc..ca89a06 100644
--- a/arch/arm/kernel/perf_event_xscale.c
+++ b/arch/arm/kernel/perf_event_xscale.c
@@ -253,9 +253,6 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
- if (!test_bit(idx, cpuc->active_mask))
- continue;
-
if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx))
continue;
@@ -585,9 +582,6 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
- if (!test_bit(idx, cpuc->active_mask))
- continue;
-
if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx))
continue;
--
1.7.0.4
^ permalink raw reply related [flat|nested] 2+ messages in thread
end of thread, other threads:[~2011-08-15 13:55 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2011-07-19 8:37 [RFC PATCH 04/15] ARM: perf: remove active_mask Mark Rutland
-- strict thread matches above, loose matches on Subject: below --
2011-08-15 13:55 [RFC PATCH 00/15] ARM: perf: support multiple PMUs Mark Rutland
2011-08-15 13:55 ` [RFC PATCH 04/15] ARM: perf: remove active_mask Mark Rutland
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).