* [PATCH v3 01/20] ARM64: Move PMU register related defines to asm/pmu.h
2015-09-24 22:31 [PATCH v3 00/20] KVM: ARM64: Add guest PMU support Shannon Zhao
@ 2015-09-24 22:31 ` Shannon Zhao
2015-09-24 22:31 ` [PATCH v3 02/20] KVM: ARM64: Define PMU data structure for each vcpu Shannon Zhao
` (20 subsequent siblings)
21 siblings, 0 replies; 37+ messages in thread
From: Shannon Zhao @ 2015-09-24 22:31 UTC (permalink / raw)
To: linux-arm-kernel
To use the ARMv8 PMU related register defines from the KVM code,
we move the relevant definitions to asm/pmu.h header file.
Signed-off-by: Anup Patel <anup.patel@linaro.org>
Signed-off-by: Shannon Zhao <shannon.zhao@linaro.org>
---
arch/arm64/include/asm/pmu.h | 45 ++++++++++++++++++++++++++++++++++++++++++
arch/arm64/kernel/perf_event.c | 35 --------------------------------
2 files changed, 45 insertions(+), 35 deletions(-)
diff --git a/arch/arm64/include/asm/pmu.h b/arch/arm64/include/asm/pmu.h
index b7710a5..b9f394a 100644
--- a/arch/arm64/include/asm/pmu.h
+++ b/arch/arm64/include/asm/pmu.h
@@ -19,6 +19,51 @@
#ifndef __ASM_PMU_H
#define __ASM_PMU_H
+#define ARMV8_MAX_COUNTERS 32
+#define ARMV8_COUNTER_MASK (ARMV8_MAX_COUNTERS - 1)
+
+/*
+ * Per-CPU PMCR: config reg
+ */
+#define ARMV8_PMCR_E (1 << 0) /* Enable all counters */
+#define ARMV8_PMCR_P (1 << 1) /* Reset all counters */
+#define ARMV8_PMCR_C (1 << 2) /* Cycle counter reset */
+#define ARMV8_PMCR_D (1 << 3) /* CCNT counts every 64th cpu cycle */
+#define ARMV8_PMCR_X (1 << 4) /* Export to ETM */
+#define ARMV8_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
+#define ARMV8_PMCR_N_SHIFT 11 /* Number of counters supported */
+#define ARMV8_PMCR_N_MASK 0x1f
+#define ARMV8_PMCR_MASK 0x3f /* Mask for writable bits */
+
+/*
+ * PMCNTEN: counters enable reg
+ */
+#define ARMV8_CNTEN_MASK 0xffffffff /* Mask for writable bits */
+
+/*
+ * PMINTEN: counters interrupt enable reg
+ */
+#define ARMV8_INTEN_MASK 0xffffffff /* Mask for writable bits */
+
+/*
+ * PMOVSR: counters overflow flag status reg
+ */
+#define ARMV8_OVSR_MASK 0xffffffff /* Mask for writable bits */
+#define ARMV8_OVERFLOWED_MASK ARMV8_OVSR_MASK
+
+/*
+ * PMXEVTYPER: Event selection reg
+ */
+#define ARMV8_EVTYPE_MASK 0xc80003ff /* Mask for writable bits */
+#define ARMV8_EVTYPE_EVENT 0x3ff /* Mask for EVENT bits */
+
+/*
+ * Event filters for PMUv3
+ */
+#define ARMV8_EXCLUDE_EL1 (1 << 31)
+#define ARMV8_EXCLUDE_EL0 (1 << 30)
+#define ARMV8_INCLUDE_EL2 (1 << 27)
+
#ifdef CONFIG_HW_PERF_EVENTS
/* The events for a given PMU register set. */
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index f9a74d4..534e8ad 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -741,9 +741,6 @@ static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
#define ARMV8_IDX_COUNTER0 1
#define ARMV8_IDX_COUNTER_LAST (ARMV8_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
-#define ARMV8_MAX_COUNTERS 32
-#define ARMV8_COUNTER_MASK (ARMV8_MAX_COUNTERS - 1)
-
/*
* ARMv8 low level PMU access
*/
@@ -754,38 +751,6 @@ static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
#define ARMV8_IDX_TO_COUNTER(x) \
(((x) - ARMV8_IDX_COUNTER0) & ARMV8_COUNTER_MASK)
-/*
- * Per-CPU PMCR: config reg
- */
-#define ARMV8_PMCR_E (1 << 0) /* Enable all counters */
-#define ARMV8_PMCR_P (1 << 1) /* Reset all counters */
-#define ARMV8_PMCR_C (1 << 2) /* Cycle counter reset */
-#define ARMV8_PMCR_D (1 << 3) /* CCNT counts every 64th cpu cycle */
-#define ARMV8_PMCR_X (1 << 4) /* Export to ETM */
-#define ARMV8_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
-#define ARMV8_PMCR_N_SHIFT 11 /* Number of counters supported */
-#define ARMV8_PMCR_N_MASK 0x1f
-#define ARMV8_PMCR_MASK 0x3f /* Mask for writable bits */
-
-/*
- * PMOVSR: counters overflow flag status reg
- */
-#define ARMV8_OVSR_MASK 0xffffffff /* Mask for writable bits */
-#define ARMV8_OVERFLOWED_MASK ARMV8_OVSR_MASK
-
-/*
- * PMXEVTYPER: Event selection reg
- */
-#define ARMV8_EVTYPE_MASK 0xc80003ff /* Mask for writable bits */
-#define ARMV8_EVTYPE_EVENT 0x3ff /* Mask for EVENT bits */
-
-/*
- * Event filters for PMUv3
- */
-#define ARMV8_EXCLUDE_EL1 (1 << 31)
-#define ARMV8_EXCLUDE_EL0 (1 << 30)
-#define ARMV8_INCLUDE_EL2 (1 << 27)
-
static inline u32 armv8pmu_pmcr_read(void)
{
u32 val;
--
2.1.4
^ permalink raw reply related [flat|nested] 37+ messages in thread
* [PATCH v3 02/20] KVM: ARM64: Define PMU data structure for each vcpu
2015-09-24 22:31 [PATCH v3 00/20] KVM: ARM64: Add guest PMU support Shannon Zhao
2015-09-24 22:31 ` [PATCH v3 01/20] ARM64: Move PMU register related defines to asm/pmu.h Shannon Zhao
@ 2015-09-24 22:31 ` Shannon Zhao
2015-09-24 22:31 ` [PATCH v3 03/20] KVM: ARM64: Add offset defines for PMU registers Shannon Zhao
` (19 subsequent siblings)
21 siblings, 0 replies; 37+ messages in thread
From: Shannon Zhao @ 2015-09-24 22:31 UTC (permalink / raw)
To: linux-arm-kernel
Here we plan to support virtual PMU for guest by full software
emulation, so define some basic structs and functions preparing for
futher steps. Define struct kvm_pmc for performance monitor counter and
struct kvm_pmu for performance monitor unit for each vcpu. According to
ARMv8 spec, the PMU contains at most 32(ARMV8_MAX_COUNTERS) counters.
Since this only supports ARM64 (or PMUv3), add a separate config symbol
for it.
Signed-off-by: Shannon Zhao <shannon.zhao@linaro.org>
---
arch/arm64/include/asm/kvm_host.h | 2 ++
arch/arm64/kvm/Kconfig | 8 ++++++++
include/kvm/arm_pmu.h | 40 +++++++++++++++++++++++++++++++++++++++
3 files changed, 50 insertions(+)
create mode 100644 include/kvm/arm_pmu.h
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 415938d..6a246a3 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -42,6 +42,7 @@
#include <kvm/arm_vgic.h>
#include <kvm/arm_arch_timer.h>
+#include <kvm/arm_pmu.h>
#define KVM_VCPU_MAX_FEATURES 3
@@ -135,6 +136,7 @@ struct kvm_vcpu_arch {
/* VGIC state */
struct vgic_cpu vgic_cpu;
struct arch_timer_cpu timer_cpu;
+ struct kvm_pmu pmu;
/*
* Anything that is not used directly from assembly code goes
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index bfffe8f..3c7c58d 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -31,6 +31,7 @@ config KVM
select KVM_VFIO
select HAVE_KVM_EVENTFD
select HAVE_KVM_IRQFD
+ select KVM_ARM_PMU
---help---
Support hosting virtualized guest machines.
@@ -52,4 +53,11 @@ config KVM_ARM_MAX_VCPUS
large, so only choose a reasonable number that you expect to
actually use.
+config KVM_ARM_PMU
+ bool
+ depends on KVM_ARM_HOST
+ ---help---
+ Adds support for a virtual Performance Monitoring Unit (PMU) in
+ virtual machines.
+
endif # VIRTUALIZATION
diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
new file mode 100644
index 0000000..bb0cd21
--- /dev/null
+++ b/include/kvm/arm_pmu.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2015 Linaro Ltd.
+ * Author: Shannon Zhao <shannon.zhao@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ASM_ARM_KVM_PMU_H
+#define __ASM_ARM_KVM_PMU_H
+
+#include <linux/perf_event.h>
+#include <asm/pmu.h>
+
+struct kvm_pmc {
+ u8 idx;/* index into the pmu->pmc array */
+ struct perf_event *perf_event;
+ struct kvm_vcpu *vcpu;
+};
+
+struct kvm_pmu {
+#ifdef CONFIG_KVM_ARM_PMU
+ /* PMU IRQ Number per VCPU */
+ int irq_num;
+ /* IRQ pending flag */
+ bool irq_pending;
+ struct kvm_pmc pmc[ARMV8_MAX_COUNTERS];
+#endif
+};
+
+#endif
--
2.1.4
^ permalink raw reply related [flat|nested] 37+ messages in thread
* [PATCH v3 03/20] KVM: ARM64: Add offset defines for PMU registers
2015-09-24 22:31 [PATCH v3 00/20] KVM: ARM64: Add guest PMU support Shannon Zhao
2015-09-24 22:31 ` [PATCH v3 01/20] ARM64: Move PMU register related defines to asm/pmu.h Shannon Zhao
2015-09-24 22:31 ` [PATCH v3 02/20] KVM: ARM64: Define PMU data structure for each vcpu Shannon Zhao
@ 2015-09-24 22:31 ` Shannon Zhao
2015-10-07 8:25 ` Marc Zyngier
2015-09-24 22:31 ` [PATCH v3 04/20] KVM: ARM64: Add reset and access handlers for PMCR_EL0 register Shannon Zhao
` (18 subsequent siblings)
21 siblings, 1 reply; 37+ messages in thread
From: Shannon Zhao @ 2015-09-24 22:31 UTC (permalink / raw)
To: linux-arm-kernel
We are about to trap and emulate acccesses to each PMU register
individually. This adds the context offsets for the AArch64 PMU
registers and their AArch32 counterparts.
Signed-off-by: Shannon Zhao <shannon.zhao@linaro.org>
---
arch/arm64/include/asm/kvm_asm.h | 59 +++++++++++++++++++++++++++++++++++-----
1 file changed, 52 insertions(+), 7 deletions(-)
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 67fa0de..0a4dfcc 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -48,14 +48,36 @@
#define MDSCR_EL1 22 /* Monitor Debug System Control Register */
#define MDCCINT_EL1 23 /* Monitor Debug Comms Channel Interrupt Enable Reg */
+/* Performance Monitors Registers */
+#define PMCR_EL0 24 /* Control Register */
+#define PMOVSSET_EL0 25 /* Overflow Flag Status Set Register */
+#define PMOVSCLR_EL0 26 /* Overflow Flag Status Clear Register */
+#define PMSELR_EL0 27 /* Event Counter Selection Register */
+#define PMCEID0_EL0 28 /* Common Event Identification Register 0 */
+#define PMCEID1_EL0 29 /* Common Event Identification Register 1 */
+#define PMEVCNTR0_EL0 30 /* Event Counter Register (0-30) */
+#define PMEVCNTR30_EL0 60
+#define PMCCNTR_EL0 61 /* Cycle Counter Register */
+#define PMEVTYPER0_EL0 62 /* Event Type Register (0-30) */
+#define PMEVTYPER30_EL0 92
+#define PMCCFILTR_EL0 93 /* Cycle Count Filter Register */
+#define PMXEVCNTR_EL0 94 /* Selected Event Count Register */
+#define PMXEVTYPER_EL0 95 /* Selected Event Type Register */
+#define PMCNTENSET_EL0 96 /* Count Enable Set Register */
+#define PMCNTENCLR_EL0 97 /* Count Enable Clear Register */
+#define PMINTENSET_EL1 98 /* Interrupt Enable Set Register */
+#define PMINTENCLR_EL1 99 /* Interrupt Enable Clear Register */
+#define PMUSERENR_EL0 100 /* User Enable Register */
+#define PMSWINC_EL0 101 /* Software Increment Register */
+
/* 32bit specific registers. Keep them at the end of the range */
-#define DACR32_EL2 24 /* Domain Access Control Register */
-#define IFSR32_EL2 25 /* Instruction Fault Status Register */
-#define FPEXC32_EL2 26 /* Floating-Point Exception Control Register */
-#define DBGVCR32_EL2 27 /* Debug Vector Catch Register */
-#define TEECR32_EL1 28 /* ThumbEE Configuration Register */
-#define TEEHBR32_EL1 29 /* ThumbEE Handler Base Register */
-#define NR_SYS_REGS 30
+#define DACR32_EL2 102 /* Domain Access Control Register */
+#define IFSR32_EL2 103 /* Instruction Fault Status Register */
+#define FPEXC32_EL2 104 /* Floating-Point Exception Control Register */
+#define DBGVCR32_EL2 105 /* Debug Vector Catch Register */
+#define TEECR32_EL1 106 /* ThumbEE Configuration Register */
+#define TEEHBR32_EL1 107 /* ThumbEE Handler Base Register */
+#define NR_SYS_REGS 108
/* 32bit mapping */
#define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */
@@ -77,6 +99,24 @@
#define c6_IFAR (c6_DFAR + 1) /* Instruction Fault Address Register */
#define c7_PAR (PAR_EL1 * 2) /* Physical Address Register */
#define c7_PAR_high (c7_PAR + 1) /* PAR top 32 bits */
+
+/* Performance Monitors*/
+#define c9_PMCR (PMCR_EL0 * 2)
+#define c9_PMOVSSET (PMOVSSET_EL0 * 2)
+#define c9_PMOVSCLR (PMOVSCLR_EL0 * 2)
+#define c9_PMCCNTR (PMCCNTR_EL0 * 2)
+#define c9_PMSELR (PMSELR_EL0 * 2)
+#define c9_PMCEID0 (PMCEID0_EL0 * 2)
+#define c9_PMCEID1 (PMCEID1_EL0 * 2)
+#define c9_PMXEVCNTR (PMXEVCNTR_EL0 * 2)
+#define c9_PMXEVTYPER (PMXEVTYPER_EL0 * 2)
+#define c9_PMCNTENSET (PMCNTENSET_EL0 * 2)
+#define c9_PMCNTENCLR (PMCNTENCLR_EL0 * 2)
+#define c9_PMINTENSET (PMINTENSET_EL1 * 2)
+#define c9_PMINTENCLR (PMINTENCLR_EL1 * 2)
+#define c9_PMUSERENR (PMUSERENR_EL0 * 2)
+#define c9_PMSWINC (PMSWINC_EL0 * 2)
+
#define c10_PRRR (MAIR_EL1 * 2) /* Primary Region Remap Register */
#define c10_NMRR (c10_PRRR + 1) /* Normal Memory Remap Register */
#define c12_VBAR (VBAR_EL1 * 2) /* Vector Base Address Register */
@@ -88,6 +128,11 @@
#define c10_AMAIR1 (c10_AMAIR0 + 1)/* Aux Memory Attr Indirection Reg */
#define c14_CNTKCTL (CNTKCTL_EL1 * 2) /* Timer Control Register (PL1) */
+/* Performance Monitors*/
+#define c14_PMEVCNTR0 (PMEVCNTR0_EL0 * 2)
+#define c14_PMEVTYPER0 (PMEVTYPER0_EL0 * 2)
+#define c14_PMCCFILTR (PMCCFILTR_EL0 * 2)
+
#define cp14_DBGDSCRext (MDSCR_EL1 * 2)
#define cp14_DBGBCR0 (DBGBCR0_EL1 * 2)
#define cp14_DBGBVR0 (DBGBVR0_EL1 * 2)
--
2.1.4
^ permalink raw reply related [flat|nested] 37+ messages in thread
* [PATCH v3 03/20] KVM: ARM64: Add offset defines for PMU registers
2015-09-24 22:31 ` [PATCH v3 03/20] KVM: ARM64: Add offset defines for PMU registers Shannon Zhao
@ 2015-10-07 8:25 ` Marc Zyngier
0 siblings, 0 replies; 37+ messages in thread
From: Marc Zyngier @ 2015-10-07 8:25 UTC (permalink / raw)
To: linux-arm-kernel
On 24/09/15 23:31, Shannon Zhao wrote:
> We are about to trap and emulate acccesses to each PMU register
> individually. This adds the context offsets for the AArch64 PMU
> registers and their AArch32 counterparts.
>
> Signed-off-by: Shannon Zhao <shannon.zhao@linaro.org>
> ---
> arch/arm64/include/asm/kvm_asm.h | 59 +++++++++++++++++++++++++++++++++++-----
> 1 file changed, 52 insertions(+), 7 deletions(-)
>
> diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
> index 67fa0de..0a4dfcc 100644
> --- a/arch/arm64/include/asm/kvm_asm.h
> +++ b/arch/arm64/include/asm/kvm_asm.h
> @@ -48,14 +48,36 @@
> #define MDSCR_EL1 22 /* Monitor Debug System Control Register */
> #define MDCCINT_EL1 23 /* Monitor Debug Comms Channel Interrupt Enable Reg */
>
> +/* Performance Monitors Registers */
> +#define PMCR_EL0 24 /* Control Register */
> +#define PMOVSSET_EL0 25 /* Overflow Flag Status Set Register */
> +#define PMOVSCLR_EL0 26 /* Overflow Flag Status Clear Register */
> +#define PMSELR_EL0 27 /* Event Counter Selection Register */
> +#define PMCEID0_EL0 28 /* Common Event Identification Register 0 */
> +#define PMCEID1_EL0 29 /* Common Event Identification Register 1 */
> +#define PMEVCNTR0_EL0 30 /* Event Counter Register (0-30) */
> +#define PMEVCNTR30_EL0 60
> +#define PMCCNTR_EL0 61 /* Cycle Counter Register */
> +#define PMEVTYPER0_EL0 62 /* Event Type Register (0-30) */
> +#define PMEVTYPER30_EL0 92
> +#define PMCCFILTR_EL0 93 /* Cycle Count Filter Register */
> +#define PMXEVCNTR_EL0 94 /* Selected Event Count Register */
> +#define PMXEVTYPER_EL0 95 /* Selected Event Type Register */
> +#define PMCNTENSET_EL0 96 /* Count Enable Set Register */
> +#define PMCNTENCLR_EL0 97 /* Count Enable Clear Register */
> +#define PMINTENSET_EL1 98 /* Interrupt Enable Set Register */
> +#define PMINTENCLR_EL1 99 /* Interrupt Enable Clear Register */
> +#define PMUSERENR_EL0 100 /* User Enable Register */
> +#define PMSWINC_EL0 101 /* Software Increment Register */
> +
> /* 32bit specific registers. Keep them at the end of the range */
> -#define DACR32_EL2 24 /* Domain Access Control Register */
> -#define IFSR32_EL2 25 /* Instruction Fault Status Register */
> -#define FPEXC32_EL2 26 /* Floating-Point Exception Control Register */
> -#define DBGVCR32_EL2 27 /* Debug Vector Catch Register */
> -#define TEECR32_EL1 28 /* ThumbEE Configuration Register */
> -#define TEEHBR32_EL1 29 /* ThumbEE Handler Base Register */
> -#define NR_SYS_REGS 30
> +#define DACR32_EL2 102 /* Domain Access Control Register */
> +#define IFSR32_EL2 103 /* Instruction Fault Status Register */
> +#define FPEXC32_EL2 104 /* Floating-Point Exception Control Register */
> +#define DBGVCR32_EL2 105 /* Debug Vector Catch Register */
> +#define TEECR32_EL1 106 /* ThumbEE Configuration Register */
> +#define TEEHBR32_EL1 107 /* ThumbEE Handler Base Register */
> +#define NR_SYS_REGS 108
This will need some rebasing - some of the registers have already
changed or disappeared. I really need to find a way to make this mess
more manageable...
M.
--
Jazz is not dead. It just smells funny...
^ permalink raw reply [flat|nested] 37+ messages in thread
* [PATCH v3 04/20] KVM: ARM64: Add reset and access handlers for PMCR_EL0 register
2015-09-24 22:31 [PATCH v3 00/20] KVM: ARM64: Add guest PMU support Shannon Zhao
` (2 preceding siblings ...)
2015-09-24 22:31 ` [PATCH v3 03/20] KVM: ARM64: Add offset defines for PMU registers Shannon Zhao
@ 2015-09-24 22:31 ` Shannon Zhao
2015-10-16 5:35 ` Wei Huang
2015-09-24 22:31 ` [PATCH v3 05/20] KVM: ARM64: Add reset and access handlers for PMSELR register Shannon Zhao
` (17 subsequent siblings)
21 siblings, 1 reply; 37+ messages in thread
From: Shannon Zhao @ 2015-09-24 22:31 UTC (permalink / raw)
To: linux-arm-kernel
Add reset handler which gets host value of PMCR_EL0 and make writable
bits architecturally UNKNOWN. Add a common access handler for PMU
registers which emulates writing and reading register and add emulation
for PMCR.
Signed-off-by: Shannon Zhao <shannon.zhao@linaro.org>
---
arch/arm64/kvm/sys_regs.c | 81 +++++++++++++++++++++++++++++++++++++++++++++--
1 file changed, 79 insertions(+), 2 deletions(-)
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index b41607d..60c0842 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -33,6 +33,7 @@
#include <asm/kvm_emulate.h>
#include <asm/kvm_host.h>
#include <asm/kvm_mmu.h>
+#include <asm/pmu.h>
#include <trace/events/kvm.h>
@@ -446,6 +447,53 @@ static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
vcpu_sys_reg(vcpu, MPIDR_EL1) = (1ULL << 31) | mpidr;
}
+static void vcpu_sysreg_write(struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *r, u64 val)
+{
+ if (!vcpu_mode_is_32bit(vcpu))
+ vcpu_sys_reg(vcpu, r->reg) = val;
+ else
+ vcpu_cp15(vcpu, r->reg) = lower_32_bits(val);
+}
+
+static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
+{
+ u64 pmcr, val;
+
+ asm volatile("mrs %0, pmcr_el0\n" : "=r" (pmcr));
+ /* Writable bits of PMCR_EL0 (ARMV8_PMCR_MASK) is reset to UNKNOWN*/
+ val = (pmcr & ~ARMV8_PMCR_MASK) | (ARMV8_PMCR_MASK & 0xdecafbad);
+ vcpu_sysreg_write(vcpu, r, val);
+}
+
+/* PMU registers accessor. */
+static bool access_pmu_regs(struct kvm_vcpu *vcpu,
+ const struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ unsigned long val;
+
+ if (p->is_write) {
+ switch (r->reg) {
+ case PMCR_EL0: {
+ /* Only update writeable bits of PMCR */
+ val = vcpu_sys_reg(vcpu, r->reg);
+ val &= ~ARMV8_PMCR_MASK;
+ val |= *vcpu_reg(vcpu, p->Rt) & ARMV8_PMCR_MASK;
+ vcpu_sys_reg(vcpu, r->reg) = val;
+ break;
+ }
+ default:
+ vcpu_sys_reg(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt);
+ break;
+ }
+ } else {
+ *vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, r->reg);
+ }
+
+ return true;
+}
+
/* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
#define DBG_BCR_BVR_WCR_WVR_EL1(n) \
/* DBGBVRn_EL1 */ \
@@ -637,7 +685,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
/* PMCR_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b000),
- trap_raz_wi },
+ access_pmu_regs, reset_pmcr, PMCR_EL0, },
/* PMCNTENSET_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b001),
trap_raz_wi },
@@ -871,6 +919,34 @@ static const struct sys_reg_desc cp14_64_regs[] = {
{ Op1( 0), CRm( 2), .access = trap_raz_wi },
};
+/* PMU CP15 registers accessor. */
+static bool access_pmu_cp15_regs(struct kvm_vcpu *vcpu,
+ const struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ unsigned long val;
+
+ if (p->is_write) {
+ switch (r->reg) {
+ case c9_PMCR: {
+ /* Only update writeable bits of PMCR */
+ val = vcpu_cp15(vcpu, r->reg);
+ val &= ~ARMV8_PMCR_MASK;
+ val |= *vcpu_reg(vcpu, p->Rt) & ARMV8_PMCR_MASK;
+ vcpu_cp15(vcpu, r->reg) = val;
+ break;
+ }
+ default:
+ vcpu_cp15(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt);
+ break;
+ }
+ } else {
+ *vcpu_reg(vcpu, p->Rt) = vcpu_cp15(vcpu, r->reg);
+ }
+
+ return true;
+}
+
/*
* Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
* depending on the way they are accessed (as a 32bit or a 64bit
@@ -899,7 +975,8 @@ static const struct sys_reg_desc cp15_regs[] = {
{ Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
/* PMU */
- { Op1( 0), CRn( 9), CRm(12), Op2( 0), trap_raz_wi },
+ { Op1( 0), CRn( 9), CRm(12), Op2( 0), access_pmu_cp15_regs,
+ reset_pmcr, c9_PMCR },
{ Op1( 0), CRn( 9), CRm(12), Op2( 1), trap_raz_wi },
{ Op1( 0), CRn( 9), CRm(12), Op2( 2), trap_raz_wi },
{ Op1( 0), CRn( 9), CRm(12), Op2( 3), trap_raz_wi },
--
2.1.4
^ permalink raw reply related [flat|nested] 37+ messages in thread
* [PATCH v3 04/20] KVM: ARM64: Add reset and access handlers for PMCR_EL0 register
2015-09-24 22:31 ` [PATCH v3 04/20] KVM: ARM64: Add reset and access handlers for PMCR_EL0 register Shannon Zhao
@ 2015-10-16 5:35 ` Wei Huang
2015-10-21 6:27 ` Shannon Zhao
0 siblings, 1 reply; 37+ messages in thread
From: Wei Huang @ 2015-10-16 5:35 UTC (permalink / raw)
To: linux-arm-kernel
On 09/24/2015 05:31 PM, Shannon Zhao wrote:
> Add reset handler which gets host value of PMCR_EL0 and make writable
> bits architecturally UNKNOWN. Add a common access handler for PMU
> registers which emulates writing and reading register and add emulation
> for PMCR.
>
> Signed-off-by: Shannon Zhao <shannon.zhao@linaro.org>
> ---
> arch/arm64/kvm/sys_regs.c | 81 +++++++++++++++++++++++++++++++++++++++++++++--
> 1 file changed, 79 insertions(+), 2 deletions(-)
>
> diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
> index b41607d..60c0842 100644
> --- a/arch/arm64/kvm/sys_regs.c
> +++ b/arch/arm64/kvm/sys_regs.c
> @@ -33,6 +33,7 @@
> #include <asm/kvm_emulate.h>
> #include <asm/kvm_host.h>
> #include <asm/kvm_mmu.h>
> +#include <asm/pmu.h>
>
> #include <trace/events/kvm.h>
>
> @@ -446,6 +447,53 @@ static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
> vcpu_sys_reg(vcpu, MPIDR_EL1) = (1ULL << 31) | mpidr;
> }
>
> +static void vcpu_sysreg_write(struct kvm_vcpu *vcpu,
> + const struct sys_reg_desc *r, u64 val)
> +{
> + if (!vcpu_mode_is_32bit(vcpu))
> + vcpu_sys_reg(vcpu, r->reg) = val;
> + else
> + vcpu_cp15(vcpu, r->reg) = lower_32_bits(val);
> +}
> +
> +static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
> +{
> + u64 pmcr, val;
> +
> + asm volatile("mrs %0, pmcr_el0\n" : "=r" (pmcr));
> + /* Writable bits of PMCR_EL0 (ARMV8_PMCR_MASK) is reset to UNKNOWN*/
> + val = (pmcr & ~ARMV8_PMCR_MASK) | (ARMV8_PMCR_MASK & 0xdecafbad);
Two comments:
(1) In Patch 1, ARMV8_PMCR_MASK is defined as 0x3f. According to ARMv8
spec, PMCR_EL0.LC (bit 6) is also writable. Should ARMV8_PMCR_MASK be 0x7f?
(2) According to spec the PMCR_EL0.E bit reset to 0, not UNKNOWN.
> + vcpu_sysreg_write(vcpu, r, val);
> +}
> +
> +/* PMU registers accessor. */
> +static bool access_pmu_regs(struct kvm_vcpu *vcpu,
> + const struct sys_reg_params *p,
> + const struct sys_reg_desc *r)
> +{
> + unsigned long val;
> +
> + if (p->is_write) {
> + switch (r->reg) {
> + case PMCR_EL0: {
> + /* Only update writeable bits of PMCR */
> + val = vcpu_sys_reg(vcpu, r->reg);
> + val &= ~ARMV8_PMCR_MASK;
> + val |= *vcpu_reg(vcpu, p->Rt) & ARMV8_PMCR_MASK;
> + vcpu_sys_reg(vcpu, r->reg) = val;
> + break;
> + }
> + default:
> + vcpu_sys_reg(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt);
> + break;
> + }
> + } else {
> + *vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, r->reg);
> + }
> +
> + return true;
> +}
> +
> /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
> #define DBG_BCR_BVR_WCR_WVR_EL1(n) \
> /* DBGBVRn_EL1 */ \
> @@ -637,7 +685,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
>
> /* PMCR_EL0 */
> { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b000),
> - trap_raz_wi },
> + access_pmu_regs, reset_pmcr, PMCR_EL0, },
> /* PMCNTENSET_EL0 */
> { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b001),
> trap_raz_wi },
> @@ -871,6 +919,34 @@ static const struct sys_reg_desc cp14_64_regs[] = {
> { Op1( 0), CRm( 2), .access = trap_raz_wi },
> };
>
> +/* PMU CP15 registers accessor. */
> +static bool access_pmu_cp15_regs(struct kvm_vcpu *vcpu,
> + const struct sys_reg_params *p,
> + const struct sys_reg_desc *r)
> +{
> + unsigned long val;
> +
> + if (p->is_write) {
> + switch (r->reg) {
> + case c9_PMCR: {
> + /* Only update writeable bits of PMCR */
> + val = vcpu_cp15(vcpu, r->reg);
> + val &= ~ARMV8_PMCR_MASK;
> + val |= *vcpu_reg(vcpu, p->Rt) & ARMV8_PMCR_MASK;
> + vcpu_cp15(vcpu, r->reg) = val;
> + break;
> + }
> + default:
> + vcpu_cp15(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt);
> + break;
> + }
> + } else {
> + *vcpu_reg(vcpu, p->Rt) = vcpu_cp15(vcpu, r->reg);
> + }
> +
> + return true;
> +}
> +
> /*
> * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
> * depending on the way they are accessed (as a 32bit or a 64bit
> @@ -899,7 +975,8 @@ static const struct sys_reg_desc cp15_regs[] = {
> { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
>
> /* PMU */
> - { Op1( 0), CRn( 9), CRm(12), Op2( 0), trap_raz_wi },
> + { Op1( 0), CRn( 9), CRm(12), Op2( 0), access_pmu_cp15_regs,
> + reset_pmcr, c9_PMCR },
> { Op1( 0), CRn( 9), CRm(12), Op2( 1), trap_raz_wi },
> { Op1( 0), CRn( 9), CRm(12), Op2( 2), trap_raz_wi },
> { Op1( 0), CRn( 9), CRm(12), Op2( 3), trap_raz_wi },
>
^ permalink raw reply [flat|nested] 37+ messages in thread
* [PATCH v3 04/20] KVM: ARM64: Add reset and access handlers for PMCR_EL0 register
2015-10-16 5:35 ` Wei Huang
@ 2015-10-21 6:27 ` Shannon Zhao
0 siblings, 0 replies; 37+ messages in thread
From: Shannon Zhao @ 2015-10-21 6:27 UTC (permalink / raw)
To: linux-arm-kernel
On 2015/10/16 13:35, Wei Huang wrote:
>
> On 09/24/2015 05:31 PM, Shannon Zhao wrote:
>> > Add reset handler which gets host value of PMCR_EL0 and make writable
>> > bits architecturally UNKNOWN. Add a common access handler for PMU
>> > registers which emulates writing and reading register and add emulation
>> > for PMCR.
>> >
>> > Signed-off-by: Shannon Zhao <shannon.zhao@linaro.org>
>> > ---
>> > arch/arm64/kvm/sys_regs.c | 81 +++++++++++++++++++++++++++++++++++++++++++++--
>> > 1 file changed, 79 insertions(+), 2 deletions(-)
>> >
>> > diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
>> > index b41607d..60c0842 100644
>> > --- a/arch/arm64/kvm/sys_regs.c
>> > +++ b/arch/arm64/kvm/sys_regs.c
>> > @@ -33,6 +33,7 @@
>> > #include <asm/kvm_emulate.h>
>> > #include <asm/kvm_host.h>
>> > #include <asm/kvm_mmu.h>
>> > +#include <asm/pmu.h>
>> >
>> > #include <trace/events/kvm.h>
>> >
>> > @@ -446,6 +447,53 @@ static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
>> > vcpu_sys_reg(vcpu, MPIDR_EL1) = (1ULL << 31) | mpidr;
>> > }
>> >
>> > +static void vcpu_sysreg_write(struct kvm_vcpu *vcpu,
>> > + const struct sys_reg_desc *r, u64 val)
>> > +{
>> > + if (!vcpu_mode_is_32bit(vcpu))
>> > + vcpu_sys_reg(vcpu, r->reg) = val;
>> > + else
>> > + vcpu_cp15(vcpu, r->reg) = lower_32_bits(val);
>> > +}
>> > +
>> > +static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
>> > +{
>> > + u64 pmcr, val;
>> > +
>> > + asm volatile("mrs %0, pmcr_el0\n" : "=r" (pmcr));
>> > + /* Writable bits of PMCR_EL0 (ARMV8_PMCR_MASK) is reset to UNKNOWN*/
>> > + val = (pmcr & ~ARMV8_PMCR_MASK) | (ARMV8_PMCR_MASK & 0xdecafbad);
> Two comments:
> (1) In Patch 1, ARMV8_PMCR_MASK is defined as 0x3f. According to ARMv8
> spec, PMCR_EL0.LC (bit 6) is also writable. Should ARMV8_PMCR_MASK be 0x7f?
According to the spec, it should be 0x7f.
> (2) According to spec the PMCR_EL0.E bit reset to 0, not UNKNOWN.
>
Yeah, will fix this.
Thanks,
--
Shannon
^ permalink raw reply [flat|nested] 37+ messages in thread
* [PATCH v3 05/20] KVM: ARM64: Add reset and access handlers for PMSELR register
2015-09-24 22:31 [PATCH v3 00/20] KVM: ARM64: Add guest PMU support Shannon Zhao
` (3 preceding siblings ...)
2015-09-24 22:31 ` [PATCH v3 04/20] KVM: ARM64: Add reset and access handlers for PMCR_EL0 register Shannon Zhao
@ 2015-09-24 22:31 ` Shannon Zhao
2015-09-24 22:31 ` [PATCH v3 06/20] KVM: ARM64: Add reset and access handlers for PMCEID0 and PMCEID1 register Shannon Zhao
` (16 subsequent siblings)
21 siblings, 0 replies; 37+ messages in thread
From: Shannon Zhao @ 2015-09-24 22:31 UTC (permalink / raw)
To: linux-arm-kernel
Since the reset value of PMSELR_EL0 is UNKNOWN, use reset_unknown for
its reset handler. As it doesn't need to deal with the acsessing action
specially, it uses default case to emulate writing and reading PMSELR
register.
Add a helper for CP15 registers reset to UNKNOWN.
Signed-off-by: Shannon Zhao <shannon.zhao@linaro.org>
---
arch/arm64/kvm/sys_regs.c | 5 +++--
arch/arm64/kvm/sys_regs.h | 8 ++++++++
2 files changed, 11 insertions(+), 2 deletions(-)
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 60c0842..f73aea9 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -700,7 +700,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
trap_raz_wi },
/* PMSELR_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b101),
- trap_raz_wi },
+ access_pmu_regs, reset_unknown, PMSELR_EL0 },
/* PMCEID0_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b110),
trap_raz_wi },
@@ -980,7 +980,8 @@ static const struct sys_reg_desc cp15_regs[] = {
{ Op1( 0), CRn( 9), CRm(12), Op2( 1), trap_raz_wi },
{ Op1( 0), CRn( 9), CRm(12), Op2( 2), trap_raz_wi },
{ Op1( 0), CRn( 9), CRm(12), Op2( 3), trap_raz_wi },
- { Op1( 0), CRn( 9), CRm(12), Op2( 5), trap_raz_wi },
+ { Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmu_cp15_regs,
+ reset_unknown_cp15, c9_PMSELR },
{ Op1( 0), CRn( 9), CRm(12), Op2( 6), trap_raz_wi },
{ Op1( 0), CRn( 9), CRm(12), Op2( 7), trap_raz_wi },
{ Op1( 0), CRn( 9), CRm(13), Op2( 0), trap_raz_wi },
diff --git a/arch/arm64/kvm/sys_regs.h b/arch/arm64/kvm/sys_regs.h
index eaa324e..8afeff7 100644
--- a/arch/arm64/kvm/sys_regs.h
+++ b/arch/arm64/kvm/sys_regs.h
@@ -110,6 +110,14 @@ static inline void reset_unknown(struct kvm_vcpu *vcpu,
vcpu_sys_reg(vcpu, r->reg) = 0x1de7ec7edbadc0deULL;
}
+static inline void reset_unknown_cp15(struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *r)
+{
+ BUG_ON(!r->reg);
+ BUG_ON(r->reg >= NR_COPRO_REGS);
+ vcpu_cp15(vcpu, r->reg) = 0xdecafbad;
+}
+
static inline void reset_val(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
{
BUG_ON(!r->reg);
--
2.1.4
^ permalink raw reply related [flat|nested] 37+ messages in thread
* [PATCH v3 06/20] KVM: ARM64: Add reset and access handlers for PMCEID0 and PMCEID1 register
2015-09-24 22:31 [PATCH v3 00/20] KVM: ARM64: Add guest PMU support Shannon Zhao
` (4 preceding siblings ...)
2015-09-24 22:31 ` [PATCH v3 05/20] KVM: ARM64: Add reset and access handlers for PMSELR register Shannon Zhao
@ 2015-09-24 22:31 ` Shannon Zhao
2015-09-24 22:31 ` [PATCH v3 07/20] KVM: ARM64: PMU: Add perf event map and introduce perf event creating function Shannon Zhao
` (15 subsequent siblings)
21 siblings, 0 replies; 37+ messages in thread
From: Shannon Zhao @ 2015-09-24 22:31 UTC (permalink / raw)
To: linux-arm-kernel
Add reset handler which gets host value of PMCEID0 or PMCEID1. Since
write action to PMCEID0 or PMCEID1 is ignored, add a new case for this.
Signed-off-by: Shannon Zhao <shannon.zhao@linaro.org>
---
arch/arm64/kvm/sys_regs.c | 29 +++++++++++++++++++++++++----
1 file changed, 25 insertions(+), 4 deletions(-)
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index f73aea9..d49657a 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -466,6 +466,19 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
vcpu_sysreg_write(vcpu, r, val);
}
+static void reset_pmceid(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
+{
+ u64 pmceid;
+
+ if (r->reg == PMCEID0_EL0 || r->reg == c9_PMCEID0)
+ asm volatile("mrs %0, pmceid0_el0\n" : "=r" (pmceid));
+ else
+ /* PMCEID1_EL0 or c9_PMCEID1 */
+ asm volatile("mrs %0, pmceid1_el0\n" : "=r" (pmceid));
+
+ vcpu_sysreg_write(vcpu, r, pmceid);
+}
+
/* PMU registers accessor. */
static bool access_pmu_regs(struct kvm_vcpu *vcpu,
const struct sys_reg_params *p,
@@ -483,6 +496,9 @@ static bool access_pmu_regs(struct kvm_vcpu *vcpu,
vcpu_sys_reg(vcpu, r->reg) = val;
break;
}
+ case PMCEID0_EL0:
+ case PMCEID1_EL0:
+ return ignore_write(vcpu, p);
default:
vcpu_sys_reg(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt);
break;
@@ -703,10 +719,10 @@ static const struct sys_reg_desc sys_reg_descs[] = {
access_pmu_regs, reset_unknown, PMSELR_EL0 },
/* PMCEID0_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b110),
- trap_raz_wi },
+ access_pmu_regs, reset_pmceid, PMCEID0_EL0 },
/* PMCEID1_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b111),
- trap_raz_wi },
+ access_pmu_regs, reset_pmceid, PMCEID1_EL0 },
/* PMCCNTR_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b000),
trap_raz_wi },
@@ -936,6 +952,9 @@ static bool access_pmu_cp15_regs(struct kvm_vcpu *vcpu,
vcpu_cp15(vcpu, r->reg) = val;
break;
}
+ case c9_PMCEID0:
+ case c9_PMCEID1:
+ return ignore_write(vcpu, p);
default:
vcpu_cp15(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt);
break;
@@ -982,8 +1001,10 @@ static const struct sys_reg_desc cp15_regs[] = {
{ Op1( 0), CRn( 9), CRm(12), Op2( 3), trap_raz_wi },
{ Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmu_cp15_regs,
reset_unknown_cp15, c9_PMSELR },
- { Op1( 0), CRn( 9), CRm(12), Op2( 6), trap_raz_wi },
- { Op1( 0), CRn( 9), CRm(12), Op2( 7), trap_raz_wi },
+ { Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmu_cp15_regs,
+ reset_pmceid, c9_PMCEID0 },
+ { Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmu_cp15_regs,
+ reset_pmceid, c9_PMCEID1 },
{ Op1( 0), CRn( 9), CRm(13), Op2( 0), trap_raz_wi },
{ Op1( 0), CRn( 9), CRm(13), Op2( 1), trap_raz_wi },
{ Op1( 0), CRn( 9), CRm(13), Op2( 2), trap_raz_wi },
--
2.1.4
^ permalink raw reply related [flat|nested] 37+ messages in thread
* [PATCH v3 07/20] KVM: ARM64: PMU: Add perf event map and introduce perf event creating function
2015-09-24 22:31 [PATCH v3 00/20] KVM: ARM64: Add guest PMU support Shannon Zhao
` (5 preceding siblings ...)
2015-09-24 22:31 ` [PATCH v3 06/20] KVM: ARM64: Add reset and access handlers for PMCEID0 and PMCEID1 register Shannon Zhao
@ 2015-09-24 22:31 ` Shannon Zhao
2015-10-16 6:08 ` Wei Huang
2015-09-24 22:31 ` [PATCH v3 08/20] KVM: ARM64: Add reset and access handlers for PMXEVTYPER register Shannon Zhao
` (14 subsequent siblings)
21 siblings, 1 reply; 37+ messages in thread
From: Shannon Zhao @ 2015-09-24 22:31 UTC (permalink / raw)
To: linux-arm-kernel
When we use tools like perf on host, perf passes the event type and the
id of this event type category to kernel, then kernel will map them to
hardware event number and write this number to PMU PMEVTYPER<n>_EL0
register. When getting the event number in KVM, directly use raw event
type to create a perf_event for it.
Signed-off-by: Shannon Zhao <shannon.zhao@linaro.org>
---
arch/arm64/include/asm/pmu.h | 2 +
arch/arm64/kvm/Makefile | 1 +
include/kvm/arm_pmu.h | 13 ++++
virt/kvm/arm/pmu.c | 154 +++++++++++++++++++++++++++++++++++++++++++
4 files changed, 170 insertions(+)
create mode 100644 virt/kvm/arm/pmu.c
diff --git a/arch/arm64/include/asm/pmu.h b/arch/arm64/include/asm/pmu.h
index b9f394a..2c025f2 100644
--- a/arch/arm64/include/asm/pmu.h
+++ b/arch/arm64/include/asm/pmu.h
@@ -31,6 +31,8 @@
#define ARMV8_PMCR_D (1 << 3) /* CCNT counts every 64th cpu cycle */
#define ARMV8_PMCR_X (1 << 4) /* Export to ETM */
#define ARMV8_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
+/* Determines which PMCCNTR_EL0 bit generates an overflow */
+#define ARMV8_PMCR_LC (1 << 6)
#define ARMV8_PMCR_N_SHIFT 11 /* Number of counters supported */
#define ARMV8_PMCR_N_MASK 0x1f
#define ARMV8_PMCR_MASK 0x3f /* Mask for writable bits */
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index 1949fe5..18d56d8 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -27,3 +27,4 @@ kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3-emul.o
kvm-$(CONFIG_KVM_ARM_HOST) += vgic-v3-switch.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/arch_timer.o
+kvm-$(CONFIG_KVM_ARM_PMU) += $(KVM)/arm/pmu.o
diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
index bb0cd21..b48cdc6 100644
--- a/include/kvm/arm_pmu.h
+++ b/include/kvm/arm_pmu.h
@@ -37,4 +37,17 @@ struct kvm_pmu {
#endif
};
+#ifdef CONFIG_KVM_ARM_PMU
+unsigned long kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u32 select_idx);
+void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u32 data,
+ u32 select_idx);
+#else
+unsigned long kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u32 select_idx)
+{
+ return 0;
+}
+void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u32 data,
+ u32 select_idx) {}
+#endif
+
#endif
diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c
new file mode 100644
index 0000000..002ec79
--- /dev/null
+++ b/virt/kvm/arm/pmu.c
@@ -0,0 +1,154 @@
+/*
+ * Copyright (C) 2015 Linaro Ltd.
+ * Author: Shannon Zhao <shannon.zhao@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/cpu.h>
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include <linux/perf_event.h>
+#include <asm/kvm_emulate.h>
+#include <kvm/arm_pmu.h>
+
+static void kvm_pmu_set_evttyper(struct kvm_vcpu *vcpu, u32 idx, u32 val)
+{
+ if (!vcpu_mode_is_32bit(vcpu))
+ vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + idx) = val;
+ else
+ vcpu_cp15(vcpu, c14_PMEVTYPER0 + idx) = val;
+}
+
+static unsigned long kvm_pmu_get_evttyper(struct kvm_vcpu *vcpu, u32 idx)
+{
+ if (!vcpu_mode_is_32bit(vcpu))
+ return vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + idx)
+ & ARMV8_EVTYPE_EVENT;
+ else
+ return vcpu_cp15(vcpu, c14_PMEVTYPER0 + idx)
+ & ARMV8_EVTYPE_EVENT;
+}
+
+/**
+ * kvm_pmu_stop_counter - stop PMU counter for the selected counter
+ * @vcpu: The vcpu pointer
+ * @select_idx: The counter index
+ *
+ * If this counter has been configured to monitor some event, disable and
+ * release it.
+ */
+static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, u32 select_idx)
+{
+ struct kvm_pmu *pmu = &vcpu->arch.pmu;
+ struct kvm_pmc *pmc = &pmu->pmc[select_idx];
+
+ if (pmc->perf_event) {
+ perf_event_disable(pmc->perf_event);
+ perf_event_release_kernel(pmc->perf_event);
+ pmc->perf_event = NULL;
+ }
+ kvm_pmu_set_evttyper(vcpu, select_idx, ARMV8_EVTYPE_EVENT);
+}
+
+/**
+ * kvm_pmu_get_counter_value - get PMU counter value
+ * @vcpu: The vcpu pointer
+ * @select_idx: The counter index
+ */
+unsigned long kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u32 select_idx)
+{
+ u64 enabled, running;
+ struct kvm_pmu *pmu = &vcpu->arch.pmu;
+ struct kvm_pmc *pmc = &pmu->pmc[select_idx];
+ u64 counter;
+
+ if (!vcpu_mode_is_32bit(vcpu))
+ counter = vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + select_idx);
+ else
+ counter = vcpu_cp15(vcpu, c14_PMEVCNTR0 + select_idx);
+
+ if (pmc->perf_event) {
+ counter += perf_event_read_value(pmc->perf_event,
+ &enabled, &running);
+ }
+ return counter;
+}
+
+/**
+ * kvm_pmu_set_counter_event_type - set selected counter to monitor some event
+ * @vcpu: The vcpu pointer
+ * @data: The data guest writes to PMXEVTYPER_EL0
+ * @select_idx: The number of selected counter
+ *
+ * When OS accesses PMXEVTYPER_EL0, that means it wants to set a PMC to count an
+ * event with given hardware event number. Here we call perf_event API to
+ * emulate this action and create a kernel perf event for it.
+ */
+void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u32 data,
+ u32 select_idx)
+{
+ struct kvm_pmu *pmu = &vcpu->arch.pmu;
+ struct kvm_pmc *pmc = &pmu->pmc[select_idx];
+ struct perf_event *event;
+ struct perf_event_attr attr;
+ u32 new_eventsel, old_eventsel;
+ u64 counter;
+ int overflow_bit, pmcr_lc;
+
+ old_eventsel = kvm_pmu_get_evttyper(vcpu, select_idx);
+ new_eventsel = data & ARMV8_EVTYPE_EVENT;
+ if (new_eventsel == old_eventsel) {
+ if (pmc->perf_event)
+ local64_set(&pmc->perf_event->count, 0);
+ return;
+ }
+
+ kvm_pmu_stop_counter(vcpu, select_idx);
+ kvm_pmu_set_evttyper(vcpu, select_idx, data);
+
+ memset(&attr, 0, sizeof(struct perf_event_attr));
+ attr.type = PERF_TYPE_RAW;
+ attr.size = sizeof(attr);
+ attr.pinned = 1;
+ attr.disabled = 1;
+ attr.exclude_user = data & ARMV8_EXCLUDE_EL0 ? 1 : 0;
+ attr.exclude_kernel = data & ARMV8_EXCLUDE_EL1 ? 1 : 0;
+ attr.exclude_host = 1; /* Don't count host events */
+ attr.config = new_eventsel;
+
+ overflow_bit = 31; /* Generic counters are 32-bit registers*/
+ if (new_eventsel == 0x11) {
+ /* Cycle counter overflow on increment that changes PMCCNTR[63]
+ * or PMCCNTR[31] from 1 to 0 according to the value of
+ * ARMV8_PMCR_LC
+ */
+ if (!vcpu_mode_is_32bit(vcpu))
+ pmcr_lc = vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMCR_LC;
+ else
+ pmcr_lc = vcpu_cp15(vcpu, c9_PMCR) & ARMV8_PMCR_LC;
+
+ overflow_bit = pmcr_lc ? 63 : 31;
+ }
+ counter = kvm_pmu_get_counter_value(vcpu, select_idx);
+ /* The initial sample period (overflow count) of an event. */
+ attr.sample_period = (-counter) & (((u64)1 << overflow_bit) - 1);
+
+ event = perf_event_create_kernel_counter(&attr, -1, current, NULL, pmc);
+ if (IS_ERR(event)) {
+ printk_once("kvm: pmu event creation failed %ld\n",
+ PTR_ERR(event));
+ return;
+ }
+ pmc->perf_event = event;
+}
--
2.1.4
^ permalink raw reply related [flat|nested] 37+ messages in thread
* [PATCH v3 07/20] KVM: ARM64: PMU: Add perf event map and introduce perf event creating function
2015-09-24 22:31 ` [PATCH v3 07/20] KVM: ARM64: PMU: Add perf event map and introduce perf event creating function Shannon Zhao
@ 2015-10-16 6:08 ` Wei Huang
2015-10-21 6:32 ` Shannon Zhao
0 siblings, 1 reply; 37+ messages in thread
From: Wei Huang @ 2015-10-16 6:08 UTC (permalink / raw)
To: linux-arm-kernel
On 09/24/2015 05:31 PM, Shannon Zhao wrote:
> When we use tools like perf on host, perf passes the event type and the
> id of this event type category to kernel, then kernel will map them to
> hardware event number and write this number to PMU PMEVTYPER<n>_EL0
> register. When getting the event number in KVM, directly use raw event
> type to create a perf_event for it.
>
> Signed-off-by: Shannon Zhao <shannon.zhao@linaro.org>
> ---
> arch/arm64/include/asm/pmu.h | 2 +
> arch/arm64/kvm/Makefile | 1 +
> include/kvm/arm_pmu.h | 13 ++++
> virt/kvm/arm/pmu.c | 154 +++++++++++++++++++++++++++++++++++++++++++
> 4 files changed, 170 insertions(+)
> create mode 100644 virt/kvm/arm/pmu.c
>
> diff --git a/arch/arm64/include/asm/pmu.h b/arch/arm64/include/asm/pmu.h
> index b9f394a..2c025f2 100644
> --- a/arch/arm64/include/asm/pmu.h
> +++ b/arch/arm64/include/asm/pmu.h
> @@ -31,6 +31,8 @@
> #define ARMV8_PMCR_D (1 << 3) /* CCNT counts every 64th cpu cycle */
> #define ARMV8_PMCR_X (1 << 4) /* Export to ETM */
> #define ARMV8_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
> +/* Determines which PMCCNTR_EL0 bit generates an overflow */
> +#define ARMV8_PMCR_LC (1 << 6)
> #define ARMV8_PMCR_N_SHIFT 11 /* Number of counters supported */
> #define ARMV8_PMCR_N_MASK 0x1f
> #define ARMV8_PMCR_MASK 0x3f /* Mask for writable bits */
> diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
> index 1949fe5..18d56d8 100644
> --- a/arch/arm64/kvm/Makefile
> +++ b/arch/arm64/kvm/Makefile
> @@ -27,3 +27,4 @@ kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3.o
> kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3-emul.o
> kvm-$(CONFIG_KVM_ARM_HOST) += vgic-v3-switch.o
> kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/arch_timer.o
> +kvm-$(CONFIG_KVM_ARM_PMU) += $(KVM)/arm/pmu.o
> diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
> index bb0cd21..b48cdc6 100644
> --- a/include/kvm/arm_pmu.h
> +++ b/include/kvm/arm_pmu.h
> @@ -37,4 +37,17 @@ struct kvm_pmu {
> #endif
> };
>
> +#ifdef CONFIG_KVM_ARM_PMU
> +unsigned long kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u32 select_idx);
> +void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u32 data,
> + u32 select_idx);
> +#else
> +unsigned long kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u32 select_idx)
> +{
> + return 0;
> +}
> +void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u32 data,
> + u32 select_idx) {}
> +#endif
> +
> #endif
> diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c
> new file mode 100644
> index 0000000..002ec79
> --- /dev/null
> +++ b/virt/kvm/arm/pmu.c
> @@ -0,0 +1,154 @@
> +/*
> + * Copyright (C) 2015 Linaro Ltd.
> + * Author: Shannon Zhao <shannon.zhao@linaro.org>
> + *
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License version 2 as
> + * published by the Free Software Foundation.
> + *
> + * This program is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
> + * GNU General Public License for more details.
> + *
> + * You should have received a copy of the GNU General Public License
> + * along with this program. If not, see <http://www.gnu.org/licenses/>.
> + */
> +
> +#include <linux/cpu.h>
> +#include <linux/kvm.h>
> +#include <linux/kvm_host.h>
> +#include <linux/perf_event.h>
> +#include <asm/kvm_emulate.h>
> +#include <kvm/arm_pmu.h>
> +
> +static void kvm_pmu_set_evttyper(struct kvm_vcpu *vcpu, u32 idx, u32 val)
> +{
> + if (!vcpu_mode_is_32bit(vcpu))
> + vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + idx) = val;
> + else
> + vcpu_cp15(vcpu, c14_PMEVTYPER0 + idx) = val;
> +}
> +
> +static unsigned long kvm_pmu_get_evttyper(struct kvm_vcpu *vcpu, u32 idx)
> +{
> + if (!vcpu_mode_is_32bit(vcpu))
> + return vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + idx)
> + & ARMV8_EVTYPE_EVENT;
> + else
> + return vcpu_cp15(vcpu, c14_PMEVTYPER0 + idx)
> + & ARMV8_EVTYPE_EVENT;
> +}
> +
> +/**
> + * kvm_pmu_stop_counter - stop PMU counter for the selected counter
> + * @vcpu: The vcpu pointer
> + * @select_idx: The counter index
> + *
> + * If this counter has been configured to monitor some event, disable and
> + * release it.
> + */
> +static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, u32 select_idx)
> +{
> + struct kvm_pmu *pmu = &vcpu->arch.pmu;
> + struct kvm_pmc *pmc = &pmu->pmc[select_idx];
A small suggestion (optional). It might be cleaner to define a macro and
use it here. Something like in arm_pmu.h :
#define VCPU_TO_PMU(vcpu) (&(vcpu)->arch.pmu)
> +
> + if (pmc->perf_event) {
> + perf_event_disable(pmc->perf_event);
> + perf_event_release_kernel(pmc->perf_event);
> + pmc->perf_event = NULL;
> + }
> + kvm_pmu_set_evttyper(vcpu, select_idx, ARMV8_EVTYPE_EVENT);
> +}
> +
> +/**
> + * kvm_pmu_get_counter_value - get PMU counter value
> + * @vcpu: The vcpu pointer
> + * @select_idx: The counter index
> + */
> +unsigned long kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u32 select_idx)
> +{
> + u64 enabled, running;
> + struct kvm_pmu *pmu = &vcpu->arch.pmu;
> + struct kvm_pmc *pmc = &pmu->pmc[select_idx];
> + u64 counter;
> +
> + if (!vcpu_mode_is_32bit(vcpu))
> + counter = vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + select_idx);
The select_idx is from PMSELR_EL0. According to PMUv3 spec, PMSELR_EL0
is the register that "selects the current event counter PMEVCNTR<x> or
the cycle counter, CCNT". The code here always reads the counter value
from PMEVCNTR. It doesn't read the value from cycle counter when
select_idx=0b11111. We might waste some perf counter resources here.
> + else
> + counter = vcpu_cp15(vcpu, c14_PMEVCNTR0 + select_idx);
> +
> + if (pmc->perf_event) {
> + counter += perf_event_read_value(pmc->perf_event,
> + &enabled, &running);
> + }
> + return counter;
> +}
> +
> +/**
> + * kvm_pmu_set_counter_event_type - set selected counter to monitor some event
> + * @vcpu: The vcpu pointer
> + * @data: The data guest writes to PMXEVTYPER_EL0
> + * @select_idx: The number of selected counter
> + *
> + * When OS accesses PMXEVTYPER_EL0, that means it wants to set a PMC to count an
> + * event with given hardware event number. Here we call perf_event API to
> + * emulate this action and create a kernel perf event for it.
> + */
> +void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u32 data,
> + u32 select_idx)
> +{
> + struct kvm_pmu *pmu = &vcpu->arch.pmu;
> + struct kvm_pmc *pmc = &pmu->pmc[select_idx];
> + struct perf_event *event;
> + struct perf_event_attr attr;
> + u32 new_eventsel, old_eventsel;
> + u64 counter;
> + int overflow_bit, pmcr_lc;
> +
> + old_eventsel = kvm_pmu_get_evttyper(vcpu, select_idx);
> + new_eventsel = data & ARMV8_EVTYPE_EVENT;
> + if (new_eventsel == old_eventsel) {
> + if (pmc->perf_event)
> + local64_set(&pmc->perf_event->count, 0);
> + return;
> + }
> +
> + kvm_pmu_stop_counter(vcpu, select_idx);
> + kvm_pmu_set_evttyper(vcpu, select_idx, data);
> +
> + memset(&attr, 0, sizeof(struct perf_event_attr));
> + attr.type = PERF_TYPE_RAW;
> + attr.size = sizeof(attr);
> + attr.pinned = 1;
> + attr.disabled = 1;
> + attr.exclude_user = data & ARMV8_EXCLUDE_EL0 ? 1 : 0;
> + attr.exclude_kernel = data & ARMV8_EXCLUDE_EL1 ? 1 : 0;
> + attr.exclude_host = 1; /* Don't count host events */
> + attr.config = new_eventsel;
> +
> + overflow_bit = 31; /* Generic counters are 32-bit registers*/
> + if (new_eventsel == 0x11) {
> + /* Cycle counter overflow on increment that changes PMCCNTR[63]
> + * or PMCCNTR[31] from 1 to 0 according to the value of
> + * ARMV8_PMCR_LC
> + */
> + if (!vcpu_mode_is_32bit(vcpu))
> + pmcr_lc = vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMCR_LC;
> + else
> + pmcr_lc = vcpu_cp15(vcpu, c9_PMCR) & ARMV8_PMCR_LC;
> +
> + overflow_bit = pmcr_lc ? 63 : 31;
> + }
> + counter = kvm_pmu_get_counter_value(vcpu, select_idx);
> + /* The initial sample period (overflow count) of an event. */
> + attr.sample_period = (-counter) & (((u64)1 << overflow_bit) - 1);
> +
> + event = perf_event_create_kernel_counter(&attr, -1, current, NULL, pmc);
> + if (IS_ERR(event)) {
> + printk_once("kvm: pmu event creation failed %ld\n",
> + PTR_ERR(event));
> + return;
> + }
> + pmc->perf_event = event;
> +}
>
^ permalink raw reply [flat|nested] 37+ messages in thread
* [PATCH v3 07/20] KVM: ARM64: PMU: Add perf event map and introduce perf event creating function
2015-10-16 6:08 ` Wei Huang
@ 2015-10-21 6:32 ` Shannon Zhao
0 siblings, 0 replies; 37+ messages in thread
From: Shannon Zhao @ 2015-10-21 6:32 UTC (permalink / raw)
To: linux-arm-kernel
On 2015/10/16 14:08, Wei Huang wrote:
>> +/**
>> > + * kvm_pmu_get_counter_value - get PMU counter value
>> > + * @vcpu: The vcpu pointer
>> > + * @select_idx: The counter index
>> > + */
>> > +unsigned long kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u32 select_idx)
>> > +{
>> > + u64 enabled, running;
>> > + struct kvm_pmu *pmu = &vcpu->arch.pmu;
>> > + struct kvm_pmc *pmc = &pmu->pmc[select_idx];
>> > + u64 counter;
>> > +
>> > + if (!vcpu_mode_is_32bit(vcpu))
>> > + counter = vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + select_idx);
> The select_idx is from PMSELR_EL0. According to PMUv3 spec, PMSELR_EL0
> is the register that "selects the current event counter PMEVCNTR<x> or
> the cycle counter, CCNT". The code here always reads the counter value
> from PMEVCNTR. It doesn't read the value from cycle counter when
> select_idx=0b11111. We might waste some perf counter resources here.
>
No, it does read the value from the cycle counter. When
select_idx=0b11111, PMEVCNTR0_EL0 + select_idx = PMCCNTR_EL0( See patch
03/20).
--
Shannon
^ permalink raw reply [flat|nested] 37+ messages in thread
* [PATCH v3 08/20] KVM: ARM64: Add reset and access handlers for PMXEVTYPER register
2015-09-24 22:31 [PATCH v3 00/20] KVM: ARM64: Add guest PMU support Shannon Zhao
` (6 preceding siblings ...)
2015-09-24 22:31 ` [PATCH v3 07/20] KVM: ARM64: PMU: Add perf event map and introduce perf event creating function Shannon Zhao
@ 2015-09-24 22:31 ` Shannon Zhao
2015-09-24 22:31 ` [PATCH v3 09/20] KVM: ARM64: Add reset and access handlers for PMXEVCNTR register Shannon Zhao
` (13 subsequent siblings)
21 siblings, 0 replies; 37+ messages in thread
From: Shannon Zhao @ 2015-09-24 22:31 UTC (permalink / raw)
To: linux-arm-kernel
Since the reset value of PMXEVTYPER is UNKNOWN, use reset_unknown or
reset_unknown_cp15 for its reset handler. Add access handler which
emulates writing and reading PMXEVTYPER register. When writing to
PMXEVTYPER, call kvm_pmu_set_counter_event_type to create a perf_event
for the selected event type.
Signed-off-by: Shannon Zhao <shannon.zhao@linaro.org>
---
arch/arm64/kvm/sys_regs.c | 19 +++++++++++++++++--
1 file changed, 17 insertions(+), 2 deletions(-)
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index d49657a..605972e 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -488,6 +488,13 @@ static bool access_pmu_regs(struct kvm_vcpu *vcpu,
if (p->is_write) {
switch (r->reg) {
+ case PMXEVTYPER_EL0: {
+ val = vcpu_sys_reg(vcpu, PMSELR_EL0);
+ kvm_pmu_set_counter_event_type(vcpu,
+ *vcpu_reg(vcpu, p->Rt),
+ val);
+ break;
+ }
case PMCR_EL0: {
/* Only update writeable bits of PMCR */
val = vcpu_sys_reg(vcpu, r->reg);
@@ -728,7 +735,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
trap_raz_wi },
/* PMXEVTYPER_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b001),
- trap_raz_wi },
+ access_pmu_regs, reset_unknown, PMXEVTYPER_EL0 },
/* PMXEVCNTR_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010),
trap_raz_wi },
@@ -944,6 +951,13 @@ static bool access_pmu_cp15_regs(struct kvm_vcpu *vcpu,
if (p->is_write) {
switch (r->reg) {
+ case c9_PMXEVTYPER: {
+ val = vcpu_cp15(vcpu, c9_PMSELR);
+ kvm_pmu_set_counter_event_type(vcpu,
+ *vcpu_reg(vcpu, p->Rt),
+ val);
+ break;
+ }
case c9_PMCR: {
/* Only update writeable bits of PMCR */
val = vcpu_cp15(vcpu, r->reg);
@@ -1006,7 +1020,8 @@ static const struct sys_reg_desc cp15_regs[] = {
{ Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmu_cp15_regs,
reset_pmceid, c9_PMCEID1 },
{ Op1( 0), CRn( 9), CRm(13), Op2( 0), trap_raz_wi },
- { Op1( 0), CRn( 9), CRm(13), Op2( 1), trap_raz_wi },
+ { Op1( 0), CRn( 9), CRm(13), Op2( 1), access_pmu_cp15_regs,
+ reset_unknown_cp15, c9_PMXEVTYPER },
{ Op1( 0), CRn( 9), CRm(13), Op2( 2), trap_raz_wi },
{ Op1( 0), CRn( 9), CRm(14), Op2( 0), trap_raz_wi },
{ Op1( 0), CRn( 9), CRm(14), Op2( 1), trap_raz_wi },
--
2.1.4
^ permalink raw reply related [flat|nested] 37+ messages in thread
* [PATCH v3 09/20] KVM: ARM64: Add reset and access handlers for PMXEVCNTR register
2015-09-24 22:31 [PATCH v3 00/20] KVM: ARM64: Add guest PMU support Shannon Zhao
` (7 preceding siblings ...)
2015-09-24 22:31 ` [PATCH v3 08/20] KVM: ARM64: Add reset and access handlers for PMXEVTYPER register Shannon Zhao
@ 2015-09-24 22:31 ` Shannon Zhao
2015-09-24 22:31 ` [PATCH v3 10/20] KVM: ARM64: Add reset and access handlers for PMCCNTR register Shannon Zhao
` (12 subsequent siblings)
21 siblings, 0 replies; 37+ messages in thread
From: Shannon Zhao @ 2015-09-24 22:31 UTC (permalink / raw)
To: linux-arm-kernel
Since the reset value of PMXEVCNTR is UNKNOWN, use reset_unknown for
its reset handler. Add access handler which emulates writing and reading
PMXEVCNTR register. When reading PMXEVCNTR, call perf_event_read_value
to get the count value of the perf event.
Signed-off-by: Shannon Zhao <shannon.zhao@linaro.org>
---
arch/arm64/kvm/sys_regs.c | 41 +++++++++++++++++++++++++++++++++++++----
1 file changed, 37 insertions(+), 4 deletions(-)
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 605972e..e7f6058 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -488,6 +488,12 @@ static bool access_pmu_regs(struct kvm_vcpu *vcpu,
if (p->is_write) {
switch (r->reg) {
+ case PMXEVCNTR_EL0: {
+ val = PMEVCNTR0_EL0 + vcpu_sys_reg(vcpu, PMSELR_EL0);
+ vcpu_sys_reg(vcpu, val) =
+ *vcpu_reg(vcpu, p->Rt) & 0xffffffffUL;
+ break;
+ }
case PMXEVTYPER_EL0: {
val = vcpu_sys_reg(vcpu, PMSELR_EL0);
kvm_pmu_set_counter_event_type(vcpu,
@@ -511,7 +517,17 @@ static bool access_pmu_regs(struct kvm_vcpu *vcpu,
break;
}
} else {
- *vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, r->reg);
+ switch (r->reg) {
+ case PMXEVCNTR_EL0: {
+ val = kvm_pmu_get_counter_value(vcpu,
+ vcpu_sys_reg(vcpu, PMSELR_EL0));
+ *vcpu_reg(vcpu, p->Rt) = val;
+ break;
+ }
+ default:
+ *vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, r->reg);
+ break;
+ }
}
return true;
@@ -738,7 +754,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
access_pmu_regs, reset_unknown, PMXEVTYPER_EL0 },
/* PMXEVCNTR_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010),
- trap_raz_wi },
+ access_pmu_regs, reset_unknown, PMXEVCNTR_EL0 },
/* PMUSERENR_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b000),
trap_raz_wi },
@@ -951,6 +967,12 @@ static bool access_pmu_cp15_regs(struct kvm_vcpu *vcpu,
if (p->is_write) {
switch (r->reg) {
+ case c9_PMXEVCNTR: {
+ val = c14_PMEVCNTR0 + vcpu_cp15(vcpu, c9_PMSELR);
+ vcpu_cp15(vcpu, val) =
+ *vcpu_reg(vcpu, p->Rt) & 0xffffffffUL;
+ break;
+ }
case c9_PMXEVTYPER: {
val = vcpu_cp15(vcpu, c9_PMSELR);
kvm_pmu_set_counter_event_type(vcpu,
@@ -974,7 +996,17 @@ static bool access_pmu_cp15_regs(struct kvm_vcpu *vcpu,
break;
}
} else {
- *vcpu_reg(vcpu, p->Rt) = vcpu_cp15(vcpu, r->reg);
+ switch (r->reg) {
+ case c9_PMXEVCNTR: {
+ val = kvm_pmu_get_counter_value(vcpu,
+ vcpu_cp15(vcpu, c9_PMSELR));
+ *vcpu_reg(vcpu, p->Rt) = val;
+ break;
+ }
+ default:
+ *vcpu_reg(vcpu, p->Rt) = vcpu_cp15(vcpu, r->reg);
+ break;
+ }
}
return true;
@@ -1022,7 +1054,8 @@ static const struct sys_reg_desc cp15_regs[] = {
{ Op1( 0), CRn( 9), CRm(13), Op2( 0), trap_raz_wi },
{ Op1( 0), CRn( 9), CRm(13), Op2( 1), access_pmu_cp15_regs,
reset_unknown_cp15, c9_PMXEVTYPER },
- { Op1( 0), CRn( 9), CRm(13), Op2( 2), trap_raz_wi },
+ { Op1( 0), CRn( 9), CRm(13), Op2( 2), access_pmu_cp15_regs,
+ reset_unknown_cp15, c9_PMXEVCNTR },
{ Op1( 0), CRn( 9), CRm(14), Op2( 0), trap_raz_wi },
{ Op1( 0), CRn( 9), CRm(14), Op2( 1), trap_raz_wi },
{ Op1( 0), CRn( 9), CRm(14), Op2( 2), trap_raz_wi },
--
2.1.4
^ permalink raw reply related [flat|nested] 37+ messages in thread
* [PATCH v3 10/20] KVM: ARM64: Add reset and access handlers for PMCCNTR register
2015-09-24 22:31 [PATCH v3 00/20] KVM: ARM64: Add guest PMU support Shannon Zhao
` (8 preceding siblings ...)
2015-09-24 22:31 ` [PATCH v3 09/20] KVM: ARM64: Add reset and access handlers for PMXEVCNTR register Shannon Zhao
@ 2015-09-24 22:31 ` Shannon Zhao
2015-10-16 15:06 ` Wei Huang
2015-09-24 22:31 ` [PATCH v3 11/20] KVM: ARM64: Add reset and access handlers for PMCNTENSET and PMCNTENCLR register Shannon Zhao
` (11 subsequent siblings)
21 siblings, 1 reply; 37+ messages in thread
From: Shannon Zhao @ 2015-09-24 22:31 UTC (permalink / raw)
To: linux-arm-kernel
Since the reset value of PMCCNTR is UNKNOWN, use reset_unknown for its
reset handler. Add a new case to emulate reading to PMCCNTR register.
Signed-off-by: Shannon Zhao <shannon.zhao@linaro.org>
---
arch/arm64/kvm/sys_regs.c | 17 +++++++++++++++--
1 file changed, 15 insertions(+), 2 deletions(-)
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index e7f6058..c38c2de 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -518,6 +518,12 @@ static bool access_pmu_regs(struct kvm_vcpu *vcpu,
}
} else {
switch (r->reg) {
+ case PMCCNTR_EL0: {
+ val = kvm_pmu_get_counter_value(vcpu,
+ ARMV8_MAX_COUNTERS - 1);
+ *vcpu_reg(vcpu, p->Rt) = val;
+ break;
+ }
case PMXEVCNTR_EL0: {
val = kvm_pmu_get_counter_value(vcpu,
vcpu_sys_reg(vcpu, PMSELR_EL0));
@@ -748,7 +754,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
access_pmu_regs, reset_pmceid, PMCEID1_EL0 },
/* PMCCNTR_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b000),
- trap_raz_wi },
+ access_pmu_regs, reset_unknown, PMCCNTR_EL0 },
/* PMXEVTYPER_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b001),
access_pmu_regs, reset_unknown, PMXEVTYPER_EL0 },
@@ -997,6 +1003,12 @@ static bool access_pmu_cp15_regs(struct kvm_vcpu *vcpu,
}
} else {
switch (r->reg) {
+ case c9_PMCCNTR: {
+ val = kvm_pmu_get_counter_value(vcpu,
+ ARMV8_MAX_COUNTERS - 1);
+ *vcpu_reg(vcpu, p->Rt) = val;
+ break;
+ }
case c9_PMXEVCNTR: {
val = kvm_pmu_get_counter_value(vcpu,
vcpu_cp15(vcpu, c9_PMSELR));
@@ -1051,7 +1063,8 @@ static const struct sys_reg_desc cp15_regs[] = {
reset_pmceid, c9_PMCEID0 },
{ Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmu_cp15_regs,
reset_pmceid, c9_PMCEID1 },
- { Op1( 0), CRn( 9), CRm(13), Op2( 0), trap_raz_wi },
+ { Op1( 0), CRn( 9), CRm(13), Op2( 0), access_pmu_cp15_regs,
+ reset_unknown_cp15, c9_PMCCNTR },
{ Op1( 0), CRn( 9), CRm(13), Op2( 1), access_pmu_cp15_regs,
reset_unknown_cp15, c9_PMXEVTYPER },
{ Op1( 0), CRn( 9), CRm(13), Op2( 2), access_pmu_cp15_regs,
--
2.1.4
^ permalink raw reply related [flat|nested] 37+ messages in thread
* [PATCH v3 10/20] KVM: ARM64: Add reset and access handlers for PMCCNTR register
2015-09-24 22:31 ` [PATCH v3 10/20] KVM: ARM64: Add reset and access handlers for PMCCNTR register Shannon Zhao
@ 2015-10-16 15:06 ` Wei Huang
2015-10-21 6:48 ` Shannon Zhao
0 siblings, 1 reply; 37+ messages in thread
From: Wei Huang @ 2015-10-16 15:06 UTC (permalink / raw)
To: linux-arm-kernel
On 09/24/2015 05:31 PM, Shannon Zhao wrote:
> Since the reset value of PMCCNTR is UNKNOWN, use reset_unknown for its
> reset handler. Add a new case to emulate reading to PMCCNTR register.
>
> Signed-off-by: Shannon Zhao <shannon.zhao@linaro.org>
> ---
> arch/arm64/kvm/sys_regs.c | 17 +++++++++++++++--
> 1 file changed, 15 insertions(+), 2 deletions(-)
>
> diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
> index e7f6058..c38c2de 100644
> --- a/arch/arm64/kvm/sys_regs.c
> +++ b/arch/arm64/kvm/sys_regs.c
> @@ -518,6 +518,12 @@ static bool access_pmu_regs(struct kvm_vcpu *vcpu,
> }
> } else {
> switch (r->reg) {
> + case PMCCNTR_EL0: {
> + val = kvm_pmu_get_counter_value(vcpu,
> + ARMV8_MAX_COUNTERS - 1);
> + *vcpu_reg(vcpu, p->Rt) = val;
> + break;
> + }
> case PMXEVCNTR_EL0: {
> val = kvm_pmu_get_counter_value(vcpu,
> vcpu_sys_reg(vcpu, PMSELR_EL0));
> @@ -748,7 +754,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
> access_pmu_regs, reset_pmceid, PMCEID1_EL0 },
> /* PMCCNTR_EL0 */
> { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b000),
> - trap_raz_wi },
> + access_pmu_regs, reset_unknown, PMCCNTR_EL0 },
> /* PMXEVTYPER_EL0 */
> { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b001),
> access_pmu_regs, reset_unknown, PMXEVTYPER_EL0 },
> @@ -997,6 +1003,12 @@ static bool access_pmu_cp15_regs(struct kvm_vcpu *vcpu,
> }
> } else {
> switch (r->reg) {
> + case c9_PMCCNTR: {
> + val = kvm_pmu_get_counter_value(vcpu,
> + ARMV8_MAX_COUNTERS - 1);
PMCCNTR is for cycle counter. There is a filter register, PMCCFILTR_EL0,
associated with it. When kvm_pmu_set_counter_event_type() is called, I
didn't see this filter config been used in perf_event_attr when
perf_event is created.
> + *vcpu_reg(vcpu, p->Rt) = val;
> + break;
> + }
> case c9_PMXEVCNTR: {
> val = kvm_pmu_get_counter_value(vcpu,
> vcpu_cp15(vcpu, c9_PMSELR));
> @@ -1051,7 +1063,8 @@ static const struct sys_reg_desc cp15_regs[] = {
> reset_pmceid, c9_PMCEID0 },
> { Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmu_cp15_regs,
> reset_pmceid, c9_PMCEID1 },
> - { Op1( 0), CRn( 9), CRm(13), Op2( 0), trap_raz_wi },
> + { Op1( 0), CRn( 9), CRm(13), Op2( 0), access_pmu_cp15_regs,
> + reset_unknown_cp15, c9_PMCCNTR },
> { Op1( 0), CRn( 9), CRm(13), Op2( 1), access_pmu_cp15_regs,
> reset_unknown_cp15, c9_PMXEVTYPER },
> { Op1( 0), CRn( 9), CRm(13), Op2( 2), access_pmu_cp15_regs,
>
^ permalink raw reply [flat|nested] 37+ messages in thread
* [PATCH v3 10/20] KVM: ARM64: Add reset and access handlers for PMCCNTR register
2015-10-16 15:06 ` Wei Huang
@ 2015-10-21 6:48 ` Shannon Zhao
0 siblings, 0 replies; 37+ messages in thread
From: Shannon Zhao @ 2015-10-21 6:48 UTC (permalink / raw)
To: linux-arm-kernel
On 2015/10/16 23:06, Wei Huang wrote:
>
>
> On 09/24/2015 05:31 PM, Shannon Zhao wrote:
>> Since the reset value of PMCCNTR is UNKNOWN, use reset_unknown for its
>> reset handler. Add a new case to emulate reading to PMCCNTR register.
>>
>> Signed-off-by: Shannon Zhao <shannon.zhao@linaro.org>
>> ---
>> arch/arm64/kvm/sys_regs.c | 17 +++++++++++++++--
>> 1 file changed, 15 insertions(+), 2 deletions(-)
>>
>> diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
>> index e7f6058..c38c2de 100644
>> --- a/arch/arm64/kvm/sys_regs.c
>> +++ b/arch/arm64/kvm/sys_regs.c
>> @@ -518,6 +518,12 @@ static bool access_pmu_regs(struct kvm_vcpu *vcpu,
>> }
>> } else {
>> switch (r->reg) {
>> + case PMCCNTR_EL0: {
>> + val = kvm_pmu_get_counter_value(vcpu,
>> + ARMV8_MAX_COUNTERS - 1);
>> + *vcpu_reg(vcpu, p->Rt) = val;
>> + break;
>> + }
>> case PMXEVCNTR_EL0: {
>> val = kvm_pmu_get_counter_value(vcpu,
>> vcpu_sys_reg(vcpu, PMSELR_EL0));
>> @@ -748,7 +754,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
>> access_pmu_regs, reset_pmceid, PMCEID1_EL0 },
>> /* PMCCNTR_EL0 */
>> { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b000),
>> - trap_raz_wi },
>> + access_pmu_regs, reset_unknown, PMCCNTR_EL0 },
>> /* PMXEVTYPER_EL0 */
>> { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b001),
>> access_pmu_regs, reset_unknown, PMXEVTYPER_EL0 },
>> @@ -997,6 +1003,12 @@ static bool access_pmu_cp15_regs(struct kvm_vcpu *vcpu,
>> }
>> } else {
>> switch (r->reg) {
>> + case c9_PMCCNTR: {
>> + val = kvm_pmu_get_counter_value(vcpu,
>> + ARMV8_MAX_COUNTERS - 1);
>
> PMCCNTR is for cycle counter. There is a filter register, PMCCFILTR_EL0,
> associated with it. When kvm_pmu_set_counter_event_type() is called, I
> didn't see this filter config been used in perf_event_attr when
> perf_event is created.
According to the spec, to PMXEVTYPER_EL0 it says "When PMSELR_EL0.SEL
selects the cycle counter, this accesses PMCCFILTR_EL0." So within
kvm_pmu_set_counter_event_type, I configure the perf_event_attr based on
the bits of PMXEVTYPER_EL0 and only handle bit P for EL0 and bit U for
EL1 since KVM guest doesn't see EL2 and EL3.
See patch 07/20 :
+ attr.exclude_user = data & ARMV8_EXCLUDE_EL0 ? 1 : 0;
+ attr.exclude_kernel = data & ARMV8_EXCLUDE_EL1 ? 1 : 0;
--
Shannon
^ permalink raw reply [flat|nested] 37+ messages in thread
* [PATCH v3 11/20] KVM: ARM64: Add reset and access handlers for PMCNTENSET and PMCNTENCLR register
2015-09-24 22:31 [PATCH v3 00/20] KVM: ARM64: Add guest PMU support Shannon Zhao
` (9 preceding siblings ...)
2015-09-24 22:31 ` [PATCH v3 10/20] KVM: ARM64: Add reset and access handlers for PMCCNTR register Shannon Zhao
@ 2015-09-24 22:31 ` Shannon Zhao
2015-09-24 22:31 ` [PATCH v3 12/20] KVM: ARM64: Add reset and access handlers for PMINTENSET and PMINTENCLR register Shannon Zhao
` (10 subsequent siblings)
21 siblings, 0 replies; 37+ messages in thread
From: Shannon Zhao @ 2015-09-24 22:31 UTC (permalink / raw)
To: linux-arm-kernel
Since the reset value of PMCNTENSET and PMCNTENCLR is UNKNOWN, use
reset_unknown for its reset handler. Add a new case to emulate writing
PMCNTENSET or PMCNTENCLR register.
When writing to PMCNTENSET, call perf_event_enable to enable the perf
event. When writing to PMCNTENCLR, call perf_event_disable to disable
the perf event.
Signed-off-by: Shannon Zhao <shannon.zhao@linaro.org>
---
arch/arm64/kvm/sys_regs.c | 46 +++++++++++++++++++++++++++++++++++++++++----
include/kvm/arm_pmu.h | 4 ++++
virt/kvm/arm/pmu.c | 48 +++++++++++++++++++++++++++++++++++++++++++++++
3 files changed, 94 insertions(+), 4 deletions(-)
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index c38c2de..98da5a2 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -501,6 +501,24 @@ static bool access_pmu_regs(struct kvm_vcpu *vcpu,
val);
break;
}
+ case PMCNTENSET_EL0: {
+ val = *vcpu_reg(vcpu, p->Rt);
+ kvm_pmu_enable_counter(vcpu, val);
+ /*Value 1 of PMCNTENSET_EL0 and PMCNTENCLR_EL0 means
+ * corresponding counter enabled */
+ vcpu_sys_reg(vcpu, r->reg) |= val;
+ vcpu_sys_reg(vcpu, PMCNTENCLR_EL0) |= val;
+ break;
+ }
+ case PMCNTENCLR_EL0: {
+ val = *vcpu_reg(vcpu, p->Rt);
+ kvm_pmu_disable_counter(vcpu, val);
+ /*Value 0 of PMCNTENSET_EL0 and PMCNTENCLR_EL0 means
+ * corresponding counter disabled */
+ vcpu_sys_reg(vcpu, r->reg) &= ~val;
+ vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
+ break;
+ }
case PMCR_EL0: {
/* Only update writeable bits of PMCR */
val = vcpu_sys_reg(vcpu, r->reg);
@@ -733,10 +751,10 @@ static const struct sys_reg_desc sys_reg_descs[] = {
access_pmu_regs, reset_pmcr, PMCR_EL0, },
/* PMCNTENSET_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b001),
- trap_raz_wi },
+ access_pmu_regs, reset_unknown, PMCNTENSET_EL0 },
/* PMCNTENCLR_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b010),
- trap_raz_wi },
+ access_pmu_regs, reset_unknown, PMCNTENCLR_EL0 },
/* PMOVSCLR_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b011),
trap_raz_wi },
@@ -986,6 +1004,24 @@ static bool access_pmu_cp15_regs(struct kvm_vcpu *vcpu,
val);
break;
}
+ case c9_PMCNTENSET: {
+ val = *vcpu_reg(vcpu, p->Rt);
+ kvm_pmu_enable_counter(vcpu, val);
+ /*Value 1 of PMCNTENSET_EL0 and PMCNTENCLR_EL0 means
+ * corresponding counter enabled */
+ vcpu_cp15(vcpu, r->reg) |= val;
+ vcpu_cp15(vcpu, c9_PMCNTENCLR) |= val;
+ break;
+ }
+ case c9_PMCNTENCLR: {
+ val = *vcpu_reg(vcpu, p->Rt);
+ kvm_pmu_disable_counter(vcpu, val);
+ /*Value 0 of PMCNTENSET_EL0 and PMCNTENCLR_EL0 means
+ * corresponding counter disabled */
+ vcpu_cp15(vcpu, r->reg) &= ~val;
+ vcpu_cp15(vcpu, c9_PMCNTENSET) &= ~val;
+ break;
+ }
case c9_PMCR: {
/* Only update writeable bits of PMCR */
val = vcpu_cp15(vcpu, r->reg);
@@ -1054,8 +1090,10 @@ static const struct sys_reg_desc cp15_regs[] = {
/* PMU */
{ Op1( 0), CRn( 9), CRm(12), Op2( 0), access_pmu_cp15_regs,
reset_pmcr, c9_PMCR },
- { Op1( 0), CRn( 9), CRm(12), Op2( 1), trap_raz_wi },
- { Op1( 0), CRn( 9), CRm(12), Op2( 2), trap_raz_wi },
+ { Op1( 0), CRn( 9), CRm(12), Op2( 1), access_pmu_cp15_regs,
+ reset_unknown_cp15, c9_PMCNTENSET },
+ { Op1( 0), CRn( 9), CRm(12), Op2( 2), access_pmu_cp15_regs,
+ reset_unknown_cp15, c9_PMCNTENCLR },
{ Op1( 0), CRn( 9), CRm(12), Op2( 3), trap_raz_wi },
{ Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmu_cp15_regs,
reset_unknown_cp15, c9_PMSELR },
diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
index b48cdc6..9b4ee5e 100644
--- a/include/kvm/arm_pmu.h
+++ b/include/kvm/arm_pmu.h
@@ -39,6 +39,8 @@ struct kvm_pmu {
#ifdef CONFIG_KVM_ARM_PMU
unsigned long kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u32 select_idx);
+void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u32 val);
+void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u32 val);
void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u32 data,
u32 select_idx);
#else
@@ -46,6 +48,8 @@ unsigned long kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u32 select_idx)
{
return 0;
}
+void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u32 val) {}
+void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u32 val) {}
void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u32 data,
u32 select_idx) {}
#endif
diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c
index 002ec79..46145d1 100644
--- a/virt/kvm/arm/pmu.c
+++ b/virt/kvm/arm/pmu.c
@@ -86,6 +86,54 @@ unsigned long kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u32 select_idx)
}
/**
+ * kvm_pmu_enable_counter - enable selected PMU counter
+ * @vcpu: The vcpu pointer
+ * @val: the value guest writes to PMCNTENSET register
+ *
+ * Call perf_event_enable to start counting the perf event
+ */
+void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u32 val)
+{
+ int i;
+ struct kvm_pmu *pmu = &vcpu->arch.pmu;
+ struct kvm_pmc *pmc;
+
+ for (i = 0; i < 32; i++) {
+ if ((val >> i) & 0x1) {
+ pmc = &pmu->pmc[i];
+ if (pmc->perf_event) {
+ perf_event_enable(pmc->perf_event);
+ if (pmc->perf_event->state
+ != PERF_EVENT_STATE_ACTIVE)
+ kvm_debug("fail to enable event\n");
+ }
+ }
+ }
+}
+
+/**
+ * kvm_pmu_disable_counter - disable selected PMU counter
+ * @vcpu: The vcpu pointer
+ * @val: the value guest writes to PMCNTENCLR register
+ *
+ * Call perf_event_disable to stop counting the perf event
+ */
+void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u32 val)
+{
+ int i;
+ struct kvm_pmu *pmu = &vcpu->arch.pmu;
+ struct kvm_pmc *pmc;
+
+ for (i = 0; i < 32; i++) {
+ if ((val >> i) & 0x1) {
+ pmc = &pmu->pmc[i];
+ if (pmc->perf_event)
+ perf_event_disable(pmc->perf_event);
+ }
+ }
+}
+
+/**
* kvm_pmu_set_counter_event_type - set selected counter to monitor some event
* @vcpu: The vcpu pointer
* @data: The data guest writes to PMXEVTYPER_EL0
--
2.1.4
^ permalink raw reply related [flat|nested] 37+ messages in thread
* [PATCH v3 12/20] KVM: ARM64: Add reset and access handlers for PMINTENSET and PMINTENCLR register
2015-09-24 22:31 [PATCH v3 00/20] KVM: ARM64: Add guest PMU support Shannon Zhao
` (10 preceding siblings ...)
2015-09-24 22:31 ` [PATCH v3 11/20] KVM: ARM64: Add reset and access handlers for PMCNTENSET and PMCNTENCLR register Shannon Zhao
@ 2015-09-24 22:31 ` Shannon Zhao
2015-09-24 22:31 ` [PATCH v3 13/20] KVM: ARM64: Add reset and access handlers for PMOVSSET and PMOVSCLR register Shannon Zhao
` (9 subsequent siblings)
21 siblings, 0 replies; 37+ messages in thread
From: Shannon Zhao @ 2015-09-24 22:31 UTC (permalink / raw)
To: linux-arm-kernel
Since the reset value of PMINTENSET and PMINTENCLR is UNKNOWN, use
reset_unknown for its reset handler. Add a new case to emulate writing
PMINTENSET or PMINTENCLR register.
Signed-off-by: Shannon Zhao <shannon.zhao@linaro.org>
---
arch/arm64/kvm/sys_regs.c | 34 ++++++++++++++++++++++++++++++----
1 file changed, 30 insertions(+), 4 deletions(-)
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 98da5a2..e52325c 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -519,6 +519,18 @@ static bool access_pmu_regs(struct kvm_vcpu *vcpu,
vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
break;
}
+ case PMINTENSET_EL1: {
+ val = *vcpu_reg(vcpu, p->Rt);
+ vcpu_sys_reg(vcpu, r->reg) |= val;
+ vcpu_sys_reg(vcpu, PMINTENCLR_EL1) |= val;
+ break;
+ }
+ case PMINTENCLR_EL1: {
+ val = *vcpu_reg(vcpu, p->Rt);
+ vcpu_sys_reg(vcpu, r->reg) &= ~val;
+ vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val;
+ break;
+ }
case PMCR_EL0: {
/* Only update writeable bits of PMCR */
val = vcpu_sys_reg(vcpu, r->reg);
@@ -708,10 +720,10 @@ static const struct sys_reg_desc sys_reg_descs[] = {
/* PMINTENSET_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001),
- trap_raz_wi },
+ access_pmu_regs, reset_unknown, PMINTENSET_EL1 },
/* PMINTENCLR_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b010),
- trap_raz_wi },
+ access_pmu_regs, reset_unknown, PMINTENCLR_EL1 },
/* MAIR_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0010), Op2(0b000),
@@ -1022,6 +1034,18 @@ static bool access_pmu_cp15_regs(struct kvm_vcpu *vcpu,
vcpu_cp15(vcpu, c9_PMCNTENSET) &= ~val;
break;
}
+ case c9_PMINTENSET: {
+ val = *vcpu_reg(vcpu, p->Rt);
+ vcpu_cp15(vcpu, r->reg) |= val;
+ vcpu_cp15(vcpu, c9_PMINTENCLR) |= val;
+ break;
+ }
+ case c9_PMINTENCLR: {
+ val = *vcpu_reg(vcpu, p->Rt);
+ vcpu_cp15(vcpu, r->reg) &= ~val;
+ vcpu_cp15(vcpu, c9_PMINTENSET) &= ~val;
+ break;
+ }
case c9_PMCR: {
/* Only update writeable bits of PMCR */
val = vcpu_cp15(vcpu, r->reg);
@@ -1108,8 +1132,10 @@ static const struct sys_reg_desc cp15_regs[] = {
{ Op1( 0), CRn( 9), CRm(13), Op2( 2), access_pmu_cp15_regs,
reset_unknown_cp15, c9_PMXEVCNTR },
{ Op1( 0), CRn( 9), CRm(14), Op2( 0), trap_raz_wi },
- { Op1( 0), CRn( 9), CRm(14), Op2( 1), trap_raz_wi },
- { Op1( 0), CRn( 9), CRm(14), Op2( 2), trap_raz_wi },
+ { Op1( 0), CRn( 9), CRm(14), Op2( 1), access_pmu_cp15_regs,
+ reset_unknown_cp15, c9_PMINTENSET },
+ { Op1( 0), CRn( 9), CRm(14), Op2( 2), access_pmu_cp15_regs,
+ reset_unknown_cp15, c9_PMINTENCLR },
{ Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR },
{ Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR },
--
2.1.4
^ permalink raw reply related [flat|nested] 37+ messages in thread
* [PATCH v3 13/20] KVM: ARM64: Add reset and access handlers for PMOVSSET and PMOVSCLR register
2015-09-24 22:31 [PATCH v3 00/20] KVM: ARM64: Add guest PMU support Shannon Zhao
` (11 preceding siblings ...)
2015-09-24 22:31 ` [PATCH v3 12/20] KVM: ARM64: Add reset and access handlers for PMINTENSET and PMINTENCLR register Shannon Zhao
@ 2015-09-24 22:31 ` Shannon Zhao
2015-09-24 22:31 ` [PATCH v3 14/20] KVM: ARM64: Add reset and access handlers for PMUSERENR register Shannon Zhao
` (8 subsequent siblings)
21 siblings, 0 replies; 37+ messages in thread
From: Shannon Zhao @ 2015-09-24 22:31 UTC (permalink / raw)
To: linux-arm-kernel
Since the reset value of PMOVSSET and PMOVSCLR is UNKNOWN, use
reset_unknown for its reset handler. Add a new case to emulate writing
PMOVSSET or PMOVSCLR register.
Signed-off-by: Shannon Zhao <shannon.zhao@linaro.org>
---
arch/arm64/kvm/sys_regs.c | 33 ++++++++++++++++++++++++++++++---
1 file changed, 30 insertions(+), 3 deletions(-)
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index e52325c..6f76dc8 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -531,6 +531,18 @@ static bool access_pmu_regs(struct kvm_vcpu *vcpu,
vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val;
break;
}
+ case PMOVSSET_EL0: {
+ val = *vcpu_reg(vcpu, p->Rt);
+ vcpu_sys_reg(vcpu, r->reg) |= val;
+ vcpu_sys_reg(vcpu, PMOVSCLR_EL0) |= val;
+ break;
+ }
+ case PMOVSCLR_EL0: {
+ val = *vcpu_reg(vcpu, p->Rt);
+ vcpu_sys_reg(vcpu, r->reg) &= ~val;
+ vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~val;
+ break;
+ }
case PMCR_EL0: {
/* Only update writeable bits of PMCR */
val = vcpu_sys_reg(vcpu, r->reg);
@@ -769,7 +781,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
access_pmu_regs, reset_unknown, PMCNTENCLR_EL0 },
/* PMOVSCLR_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b011),
- trap_raz_wi },
+ access_pmu_regs, reset_unknown, PMOVSCLR_EL0 },
/* PMSWINC_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b100),
trap_raz_wi },
@@ -796,7 +808,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
trap_raz_wi },
/* PMOVSSET_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b011),
- trap_raz_wi },
+ access_pmu_regs, reset_unknown, PMOVSSET_EL0 },
/* TPIDR_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b010),
@@ -1046,6 +1058,18 @@ static bool access_pmu_cp15_regs(struct kvm_vcpu *vcpu,
vcpu_cp15(vcpu, c9_PMINTENSET) &= ~val;
break;
}
+ case c9_PMOVSSET: {
+ val = *vcpu_reg(vcpu, p->Rt);
+ vcpu_cp15(vcpu, r->reg) |= val;
+ vcpu_cp15(vcpu, c9_PMOVSCLR) |= val;
+ break;
+ }
+ case c9_PMOVSCLR: {
+ val = *vcpu_reg(vcpu, p->Rt);
+ vcpu_cp15(vcpu, r->reg) &= ~val;
+ vcpu_cp15(vcpu, c9_PMOVSSET) &= ~val;
+ break;
+ }
case c9_PMCR: {
/* Only update writeable bits of PMCR */
val = vcpu_cp15(vcpu, r->reg);
@@ -1118,7 +1142,8 @@ static const struct sys_reg_desc cp15_regs[] = {
reset_unknown_cp15, c9_PMCNTENSET },
{ Op1( 0), CRn( 9), CRm(12), Op2( 2), access_pmu_cp15_regs,
reset_unknown_cp15, c9_PMCNTENCLR },
- { Op1( 0), CRn( 9), CRm(12), Op2( 3), trap_raz_wi },
+ { Op1( 0), CRn( 9), CRm(12), Op2( 3), access_pmu_cp15_regs,
+ reset_unknown_cp15, c9_PMOVSCLR },
{ Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmu_cp15_regs,
reset_unknown_cp15, c9_PMSELR },
{ Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmu_cp15_regs,
@@ -1136,6 +1161,8 @@ static const struct sys_reg_desc cp15_regs[] = {
reset_unknown_cp15, c9_PMINTENSET },
{ Op1( 0), CRn( 9), CRm(14), Op2( 2), access_pmu_cp15_regs,
reset_unknown_cp15, c9_PMINTENCLR },
+ { Op1( 0), CRn( 9), CRm(14), Op2( 3), access_pmu_cp15_regs,
+ reset_unknown_cp15, c9_PMOVSSET },
{ Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR },
{ Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR },
--
2.1.4
^ permalink raw reply related [flat|nested] 37+ messages in thread
* [PATCH v3 14/20] KVM: ARM64: Add reset and access handlers for PMUSERENR register
2015-09-24 22:31 [PATCH v3 00/20] KVM: ARM64: Add guest PMU support Shannon Zhao
` (12 preceding siblings ...)
2015-09-24 22:31 ` [PATCH v3 13/20] KVM: ARM64: Add reset and access handlers for PMOVSSET and PMOVSCLR register Shannon Zhao
@ 2015-09-24 22:31 ` Shannon Zhao
2015-09-24 22:31 ` [PATCH v3 15/20] KVM: ARM64: Add reset and access handlers for PMSWINC register Shannon Zhao
` (7 subsequent siblings)
21 siblings, 0 replies; 37+ messages in thread
From: Shannon Zhao @ 2015-09-24 22:31 UTC (permalink / raw)
To: linux-arm-kernel
The reset value of PMUSERENR_EL0 is UNKNOWN, use reset_unknown. While
the reset value of PMUSERENR is zero, use reset_val_cp15 with zero for
its reset handler.
Add a helper for CP15 registers reset to specified value.
Signed-off-by: Shannon Zhao <shannon.zhao@linaro.org>
---
arch/arm64/kvm/sys_regs.c | 5 +++--
arch/arm64/kvm/sys_regs.h | 8 ++++++++
2 files changed, 11 insertions(+), 2 deletions(-)
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 6f76dc8..749e1e2 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -805,7 +805,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
access_pmu_regs, reset_unknown, PMXEVCNTR_EL0 },
/* PMUSERENR_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b000),
- trap_raz_wi },
+ access_pmu_regs, reset_unknown, PMUSERENR_EL0 },
/* PMOVSSET_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b011),
access_pmu_regs, reset_unknown, PMOVSSET_EL0 },
@@ -1156,7 +1156,8 @@ static const struct sys_reg_desc cp15_regs[] = {
reset_unknown_cp15, c9_PMXEVTYPER },
{ Op1( 0), CRn( 9), CRm(13), Op2( 2), access_pmu_cp15_regs,
reset_unknown_cp15, c9_PMXEVCNTR },
- { Op1( 0), CRn( 9), CRm(14), Op2( 0), trap_raz_wi },
+ { Op1( 0), CRn( 9), CRm(14), Op2( 0), access_pmu_cp15_regs,
+ reset_val_cp15, c9_PMUSERENR, 0 },
{ Op1( 0), CRn( 9), CRm(14), Op2( 1), access_pmu_cp15_regs,
reset_unknown_cp15, c9_PMINTENSET },
{ Op1( 0), CRn( 9), CRm(14), Op2( 2), access_pmu_cp15_regs,
diff --git a/arch/arm64/kvm/sys_regs.h b/arch/arm64/kvm/sys_regs.h
index 8afeff7..aba997d 100644
--- a/arch/arm64/kvm/sys_regs.h
+++ b/arch/arm64/kvm/sys_regs.h
@@ -125,6 +125,14 @@ static inline void reset_val(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r
vcpu_sys_reg(vcpu, r->reg) = r->val;
}
+static inline void reset_val_cp15(struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *r)
+{
+ BUG_ON(!r->reg);
+ BUG_ON(r->reg >= NR_SYS_REGS);
+ vcpu_cp15(vcpu, r->reg) = r->val;
+}
+
static inline int cmp_sys_reg(const struct sys_reg_desc *i1,
const struct sys_reg_desc *i2)
{
--
2.1.4
^ permalink raw reply related [flat|nested] 37+ messages in thread
* [PATCH v3 15/20] KVM: ARM64: Add reset and access handlers for PMSWINC register
2015-09-24 22:31 [PATCH v3 00/20] KVM: ARM64: Add guest PMU support Shannon Zhao
` (13 preceding siblings ...)
2015-09-24 22:31 ` [PATCH v3 14/20] KVM: ARM64: Add reset and access handlers for PMUSERENR register Shannon Zhao
@ 2015-09-24 22:31 ` Shannon Zhao
2015-10-16 15:25 ` Wei Huang
2015-09-24 22:31 ` [PATCH v3 16/20] KVM: ARM64: Add access handlers for PMEVCNTRn and PMEVTYPERn register Shannon Zhao
` (6 subsequent siblings)
21 siblings, 1 reply; 37+ messages in thread
From: Shannon Zhao @ 2015-09-24 22:31 UTC (permalink / raw)
To: linux-arm-kernel
Add access handler which emulates writing and reading PMSWINC
register and add support for creating software increment event.
Signed-off-by: Shannon Zhao <shannon.zhao@linaro.org>
---
arch/arm64/kvm/sys_regs.c | 18 +++++++++++++++++-
include/kvm/arm_pmu.h | 2 ++
virt/kvm/arm/pmu.c | 33 +++++++++++++++++++++++++++++++++
3 files changed, 52 insertions(+), 1 deletion(-)
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 749e1e2..dd790c7 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -543,6 +543,11 @@ static bool access_pmu_regs(struct kvm_vcpu *vcpu,
vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~val;
break;
}
+ case PMSWINC_EL0: {
+ val = *vcpu_reg(vcpu, p->Rt);
+ kvm_pmu_software_increment(vcpu, val);
+ break;
+ }
case PMCR_EL0: {
/* Only update writeable bits of PMCR */
val = vcpu_sys_reg(vcpu, r->reg);
@@ -572,6 +577,8 @@ static bool access_pmu_regs(struct kvm_vcpu *vcpu,
*vcpu_reg(vcpu, p->Rt) = val;
break;
}
+ case PMSWINC_EL0:
+ return read_zero(vcpu, p);
default:
*vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, r->reg);
break;
@@ -784,7 +791,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
access_pmu_regs, reset_unknown, PMOVSCLR_EL0 },
/* PMSWINC_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b100),
- trap_raz_wi },
+ access_pmu_regs, reset_unknown, PMSWINC_EL0 },
/* PMSELR_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b101),
access_pmu_regs, reset_unknown, PMSELR_EL0 },
@@ -1070,6 +1077,11 @@ static bool access_pmu_cp15_regs(struct kvm_vcpu *vcpu,
vcpu_cp15(vcpu, c9_PMOVSSET) &= ~val;
break;
}
+ case c9_PMSWINC: {
+ val = *vcpu_reg(vcpu, p->Rt);
+ kvm_pmu_software_increment(vcpu, val);
+ break;
+ }
case c9_PMCR: {
/* Only update writeable bits of PMCR */
val = vcpu_cp15(vcpu, r->reg);
@@ -1099,6 +1111,8 @@ static bool access_pmu_cp15_regs(struct kvm_vcpu *vcpu,
*vcpu_reg(vcpu, p->Rt) = val;
break;
}
+ case c9_PMSWINC:
+ return read_zero(vcpu, p);
default:
*vcpu_reg(vcpu, p->Rt) = vcpu_cp15(vcpu, r->reg);
break;
@@ -1144,6 +1158,8 @@ static const struct sys_reg_desc cp15_regs[] = {
reset_unknown_cp15, c9_PMCNTENCLR },
{ Op1( 0), CRn( 9), CRm(12), Op2( 3), access_pmu_cp15_regs,
reset_unknown_cp15, c9_PMOVSCLR },
+ { Op1( 0), CRn( 9), CRm(12), Op2( 4), access_pmu_cp15_regs,
+ reset_unknown_cp15, c9_PMSWINC },
{ Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmu_cp15_regs,
reset_unknown_cp15, c9_PMSELR },
{ Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmu_cp15_regs,
diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
index 9b4ee5e..9293133 100644
--- a/include/kvm/arm_pmu.h
+++ b/include/kvm/arm_pmu.h
@@ -41,6 +41,7 @@ struct kvm_pmu {
unsigned long kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u32 select_idx);
void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u32 val);
void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u32 val);
+void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u32 val);
void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u32 data,
u32 select_idx);
#else
@@ -50,6 +51,7 @@ unsigned long kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u32 select_idx)
}
void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u32 val) {}
void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u32 val) {}
+void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u32 val) {}
void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u32 data,
u32 select_idx) {}
#endif
diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c
index 46145d1..18637c9 100644
--- a/virt/kvm/arm/pmu.c
+++ b/virt/kvm/arm/pmu.c
@@ -134,6 +134,35 @@ void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u32 val)
}
/**
+ * kvm_pmu_software_increment - do software increment
+ * @vcpu: The vcpu pointer
+ * @val: the value guest writes to PMSWINC register
+ */
+void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u32 val)
+{
+ int i;
+ u32 type, enable;
+
+ for (i = 0; i < 32; i++) {
+ if ((val >> i) & 0x1) {
+ if (!vcpu_mode_is_32bit(vcpu)) {
+ type = vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i)
+ & ARMV8_EVTYPE_EVENT;
+ enable = vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
+ if ((type == 0) && ((enable >> i) & 0x1))
+ vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i)++;
+ } else {
+ type = vcpu_cp15(vcpu, c14_PMEVTYPER0 + i)
+ & ARMV8_EVTYPE_EVENT;
+ enable = vcpu_cp15(vcpu, c9_PMCNTENSET);
+ if ((type == 0) && ((enable >> i) & 0x1))
+ vcpu_cp15(vcpu, c14_PMEVCNTR0 + i)++;
+ }
+ }
+ }
+}
+
+/**
* kvm_pmu_set_counter_event_type - set selected counter to monitor some event
* @vcpu: The vcpu pointer
* @data: The data guest writes to PMXEVTYPER_EL0
@@ -165,6 +194,10 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u32 data,
kvm_pmu_stop_counter(vcpu, select_idx);
kvm_pmu_set_evttyper(vcpu, select_idx, data);
+ /* For software increment event it does't need to create perf event */
+ if (new_eventsel == 0)
+ return;
+
memset(&attr, 0, sizeof(struct perf_event_attr));
attr.type = PERF_TYPE_RAW;
attr.size = sizeof(attr);
--
2.1.4
^ permalink raw reply related [flat|nested] 37+ messages in thread
* [PATCH v3 15/20] KVM: ARM64: Add reset and access handlers for PMSWINC register
2015-09-24 22:31 ` [PATCH v3 15/20] KVM: ARM64: Add reset and access handlers for PMSWINC register Shannon Zhao
@ 2015-10-16 15:25 ` Wei Huang
2015-10-21 7:02 ` Shannon Zhao
0 siblings, 1 reply; 37+ messages in thread
From: Wei Huang @ 2015-10-16 15:25 UTC (permalink / raw)
To: linux-arm-kernel
On 09/24/2015 05:31 PM, Shannon Zhao wrote:
> Add access handler which emulates writing and reading PMSWINC
> register and add support for creating software increment event.
>
> Signed-off-by: Shannon Zhao <shannon.zhao@linaro.org>
> ---
> arch/arm64/kvm/sys_regs.c | 18 +++++++++++++++++-
> include/kvm/arm_pmu.h | 2 ++
> virt/kvm/arm/pmu.c | 33 +++++++++++++++++++++++++++++++++
> 3 files changed, 52 insertions(+), 1 deletion(-)
>
> diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
> index 749e1e2..dd790c7 100644
> --- a/arch/arm64/kvm/sys_regs.c
> +++ b/arch/arm64/kvm/sys_regs.c
> @@ -543,6 +543,11 @@ static bool access_pmu_regs(struct kvm_vcpu *vcpu,
> vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~val;
> break;
> }
> + case PMSWINC_EL0: {
> + val = *vcpu_reg(vcpu, p->Rt);
> + kvm_pmu_software_increment(vcpu, val);
> + break;
> + }
> case PMCR_EL0: {
> /* Only update writeable bits of PMCR */
> val = vcpu_sys_reg(vcpu, r->reg);
> @@ -572,6 +577,8 @@ static bool access_pmu_regs(struct kvm_vcpu *vcpu,
> *vcpu_reg(vcpu, p->Rt) = val;
> break;
> }
> + case PMSWINC_EL0:
> + return read_zero(vcpu, p);
> default:
> *vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, r->reg);
> break;
> @@ -784,7 +791,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
> access_pmu_regs, reset_unknown, PMOVSCLR_EL0 },
> /* PMSWINC_EL0 */
> { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b100),
> - trap_raz_wi },
> + access_pmu_regs, reset_unknown, PMSWINC_EL0 },
> /* PMSELR_EL0 */
> { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b101),
> access_pmu_regs, reset_unknown, PMSELR_EL0 },
> @@ -1070,6 +1077,11 @@ static bool access_pmu_cp15_regs(struct kvm_vcpu *vcpu,
> vcpu_cp15(vcpu, c9_PMOVSSET) &= ~val;
> break;
> }
> + case c9_PMSWINC: {
> + val = *vcpu_reg(vcpu, p->Rt);
> + kvm_pmu_software_increment(vcpu, val);
> + break;
> + }
> case c9_PMCR: {
> /* Only update writeable bits of PMCR */
> val = vcpu_cp15(vcpu, r->reg);
> @@ -1099,6 +1111,8 @@ static bool access_pmu_cp15_regs(struct kvm_vcpu *vcpu,
> *vcpu_reg(vcpu, p->Rt) = val;
> break;
> }
> + case c9_PMSWINC:
> + return read_zero(vcpu, p);
> default:
> *vcpu_reg(vcpu, p->Rt) = vcpu_cp15(vcpu, r->reg);
> break;
> @@ -1144,6 +1158,8 @@ static const struct sys_reg_desc cp15_regs[] = {
> reset_unknown_cp15, c9_PMCNTENCLR },
> { Op1( 0), CRn( 9), CRm(12), Op2( 3), access_pmu_cp15_regs,
> reset_unknown_cp15, c9_PMOVSCLR },
> + { Op1( 0), CRn( 9), CRm(12), Op2( 4), access_pmu_cp15_regs,
> + reset_unknown_cp15, c9_PMSWINC },
> { Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmu_cp15_regs,
> reset_unknown_cp15, c9_PMSELR },
> { Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmu_cp15_regs,
> diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
> index 9b4ee5e..9293133 100644
> --- a/include/kvm/arm_pmu.h
> +++ b/include/kvm/arm_pmu.h
> @@ -41,6 +41,7 @@ struct kvm_pmu {
> unsigned long kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u32 select_idx);
> void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u32 val);
> void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u32 val);
> +void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u32 val);
> void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u32 data,
> u32 select_idx);
> #else
> @@ -50,6 +51,7 @@ unsigned long kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u32 select_idx)
> }
> void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u32 val) {}
> void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u32 val) {}
> +void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u32 val) {}
> void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u32 data,
> u32 select_idx) {}
> #endif
> diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c
> index 46145d1..18637c9 100644
> --- a/virt/kvm/arm/pmu.c
> +++ b/virt/kvm/arm/pmu.c
> @@ -134,6 +134,35 @@ void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u32 val)
> }
>
> /**
> + * kvm_pmu_software_increment - do software increment
> + * @vcpu: The vcpu pointer
> + * @val: the value guest writes to PMSWINC register
> + */
> +void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u32 val)
> +{
> + int i;
> + u32 type, enable;
> +
> + for (i = 0; i < 32; i++) {
> + if ((val >> i) & 0x1) {
> + if (!vcpu_mode_is_32bit(vcpu)) {
> + type = vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i)
> + & ARMV8_EVTYPE_EVENT;
> + enable = vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
> + if ((type == 0) && ((enable >> i) & 0x1))
> + vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i)++;
Most parts make sense here. I just wonder about the case of counter
overflow here. Should we trigger an interrupt and set Overflow Flag
status register when SW increment overflows here? I didn't find anything
in ARM document.
> + } else {
> + type = vcpu_cp15(vcpu, c14_PMEVTYPER0 + i)
> + & ARMV8_EVTYPE_EVENT;
> + enable = vcpu_cp15(vcpu, c9_PMCNTENSET);
> + if ((type == 0) && ((enable >> i) & 0x1))
> + vcpu_cp15(vcpu, c14_PMEVCNTR0 + i)++;
> + }
> + }
> + }
> +}
> +
> +/**
> * kvm_pmu_set_counter_event_type - set selected counter to monitor some event
> * @vcpu: The vcpu pointer
> * @data: The data guest writes to PMXEVTYPER_EL0
> @@ -165,6 +194,10 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u32 data,
> kvm_pmu_stop_counter(vcpu, select_idx);
> kvm_pmu_set_evttyper(vcpu, select_idx, data);
>
> + /* For software increment event it does't need to create perf event */
> + if (new_eventsel == 0)
> + return;
> +
> memset(&attr, 0, sizeof(struct perf_event_attr));
> attr.type = PERF_TYPE_RAW;
> attr.size = sizeof(attr);
>
^ permalink raw reply [flat|nested] 37+ messages in thread
* [PATCH v3 15/20] KVM: ARM64: Add reset and access handlers for PMSWINC register
2015-10-16 15:25 ` Wei Huang
@ 2015-10-21 7:02 ` Shannon Zhao
0 siblings, 0 replies; 37+ messages in thread
From: Shannon Zhao @ 2015-10-21 7:02 UTC (permalink / raw)
To: linux-arm-kernel
On 2015/10/16 23:25, Wei Huang wrote:
>> /**
>> > + * kvm_pmu_software_increment - do software increment
>> > + * @vcpu: The vcpu pointer
>> > + * @val: the value guest writes to PMSWINC register
>> > + */
>> > +void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u32 val)
>> > +{
>> > + int i;
>> > + u32 type, enable;
>> > +
>> > + for (i = 0; i < 32; i++) {
>> > + if ((val >> i) & 0x1) {
>> > + if (!vcpu_mode_is_32bit(vcpu)) {
>> > + type = vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i)
>> > + & ARMV8_EVTYPE_EVENT;
>> > + enable = vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
>> > + if ((type == 0) && ((enable >> i) & 0x1))
>> > + vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i)++;
> Most parts make sense here. I just wonder about the case of counter
> overflow here. Should we trigger an interrupt and set Overflow Flag
> status register when SW increment overflows here? I didn't find anything
> in ARM document.
>
I didn't find either. But since SW increment uses the PMEVCNTR<n>_EL0 to
count, it should be same with other events to trigger an interrupt and
set Overflow Flag status register.
I will add this in next version patch. Thanks.
--
Shannon
^ permalink raw reply [flat|nested] 37+ messages in thread
* [PATCH v3 16/20] KVM: ARM64: Add access handlers for PMEVCNTRn and PMEVTYPERn register
2015-09-24 22:31 [PATCH v3 00/20] KVM: ARM64: Add guest PMU support Shannon Zhao
` (14 preceding siblings ...)
2015-09-24 22:31 ` [PATCH v3 15/20] KVM: ARM64: Add reset and access handlers for PMSWINC register Shannon Zhao
@ 2015-09-24 22:31 ` Shannon Zhao
2015-09-24 22:31 ` [PATCH v3 17/20] KVM: ARM64: Add PMU overflow interrupt routing Shannon Zhao
` (5 subsequent siblings)
21 siblings, 0 replies; 37+ messages in thread
From: Shannon Zhao @ 2015-09-24 22:31 UTC (permalink / raw)
To: linux-arm-kernel
Add access handler which emulates writing and reading PMEVCNTRn and
PMEVTYPERn.
Signed-off-by: Shannon Zhao <shannon.zhao@linaro.org>
---
arch/arm64/kvm/sys_regs.c | 164 ++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 164 insertions(+)
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index dd790c7..0c785fc 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -603,6 +603,20 @@ static bool access_pmu_regs(struct kvm_vcpu *vcpu,
{ Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b111), \
trap_wcr, reset_wcr, n, 0, get_wcr, set_wcr }
+/* Macro to expand the PMEVCNTRn_EL0 register */
+#define PMU_PMEVCNTR_EL0(n) \
+ /* PMEVCNTRn_EL0 */ \
+ { Op0(0b11), Op1(0b011), CRn(0b1110), \
+ CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
+ access_pmu_regs, reset_unknown, (PMEVCNTR0_EL0 + n), }
+
+/* Macro to expand the PMEVTYPERn_EL0 register */
+#define PMU_PMEVTYPER_EL0(n) \
+ /* PMEVTYPERn_EL0 */ \
+ { Op0(0b11), Op1(0b011), CRn(0b1110), \
+ CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
+ access_pmu_regs, reset_unknown, (PMEVTYPER0_EL0 + n), }
+
/*
* Architected system registers.
* Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
@@ -824,6 +838,74 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b011),
NULL, reset_unknown, TPIDRRO_EL0 },
+ /* PMEVCNTRn_EL0 */
+ PMU_PMEVCNTR_EL0(0),
+ PMU_PMEVCNTR_EL0(1),
+ PMU_PMEVCNTR_EL0(2),
+ PMU_PMEVCNTR_EL0(3),
+ PMU_PMEVCNTR_EL0(4),
+ PMU_PMEVCNTR_EL0(5),
+ PMU_PMEVCNTR_EL0(6),
+ PMU_PMEVCNTR_EL0(7),
+ PMU_PMEVCNTR_EL0(8),
+ PMU_PMEVCNTR_EL0(9),
+ PMU_PMEVCNTR_EL0(10),
+ PMU_PMEVCNTR_EL0(11),
+ PMU_PMEVCNTR_EL0(12),
+ PMU_PMEVCNTR_EL0(13),
+ PMU_PMEVCNTR_EL0(14),
+ PMU_PMEVCNTR_EL0(15),
+ PMU_PMEVCNTR_EL0(16),
+ PMU_PMEVCNTR_EL0(17),
+ PMU_PMEVCNTR_EL0(18),
+ PMU_PMEVCNTR_EL0(19),
+ PMU_PMEVCNTR_EL0(20),
+ PMU_PMEVCNTR_EL0(21),
+ PMU_PMEVCNTR_EL0(22),
+ PMU_PMEVCNTR_EL0(23),
+ PMU_PMEVCNTR_EL0(24),
+ PMU_PMEVCNTR_EL0(25),
+ PMU_PMEVCNTR_EL0(26),
+ PMU_PMEVCNTR_EL0(27),
+ PMU_PMEVCNTR_EL0(28),
+ PMU_PMEVCNTR_EL0(29),
+ PMU_PMEVCNTR_EL0(30),
+ /* PMEVTYPERn_EL0 */
+ PMU_PMEVTYPER_EL0(0),
+ PMU_PMEVTYPER_EL0(1),
+ PMU_PMEVTYPER_EL0(2),
+ PMU_PMEVTYPER_EL0(3),
+ PMU_PMEVTYPER_EL0(4),
+ PMU_PMEVTYPER_EL0(5),
+ PMU_PMEVTYPER_EL0(6),
+ PMU_PMEVTYPER_EL0(7),
+ PMU_PMEVTYPER_EL0(8),
+ PMU_PMEVTYPER_EL0(9),
+ PMU_PMEVTYPER_EL0(10),
+ PMU_PMEVTYPER_EL0(11),
+ PMU_PMEVTYPER_EL0(12),
+ PMU_PMEVTYPER_EL0(13),
+ PMU_PMEVTYPER_EL0(14),
+ PMU_PMEVTYPER_EL0(15),
+ PMU_PMEVTYPER_EL0(16),
+ PMU_PMEVTYPER_EL0(17),
+ PMU_PMEVTYPER_EL0(18),
+ PMU_PMEVTYPER_EL0(19),
+ PMU_PMEVTYPER_EL0(20),
+ PMU_PMEVTYPER_EL0(21),
+ PMU_PMEVTYPER_EL0(22),
+ PMU_PMEVTYPER_EL0(23),
+ PMU_PMEVTYPER_EL0(24),
+ PMU_PMEVTYPER_EL0(25),
+ PMU_PMEVTYPER_EL0(26),
+ PMU_PMEVTYPER_EL0(27),
+ PMU_PMEVTYPER_EL0(28),
+ PMU_PMEVTYPER_EL0(29),
+ PMU_PMEVTYPER_EL0(30),
+ /* PMCCFILTR_EL0 */
+ { Op0(0b11), Op1(0b011), CRn(0b1110), CRm(0b1111), Op2(0b111),
+ access_pmu_regs, reset_unknown, PMCCFILTR_EL0, },
+
/* DACR32_EL2 */
{ Op0(0b11), Op1(0b100), CRn(0b0011), CRm(0b0000), Op2(0b000),
NULL, reset_unknown, DACR32_EL2 },
@@ -1122,6 +1204,20 @@ static bool access_pmu_cp15_regs(struct kvm_vcpu *vcpu,
return true;
}
+/* Macro to expand the PMEVCNTRn register */
+#define PMU_PMEVCNTR(n) \
+ /* PMEVCNTRn */ \
+ { Op1(0), CRn(0b1110), \
+ CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
+ access_pmu_cp15_regs, reset_unknown_cp15, (c14_PMEVCNTR0 + n), }
+
+/* Macro to expand the PMEVTYPERn register */
+#define PMU_PMEVTYPER(n) \
+ /* PMEVTYPERn */ \
+ { Op1(0), CRn(0b1110), \
+ CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
+ access_pmu_cp15_regs, reset_unknown_cp15, (c14_PMEVTYPER0 + n), }
+
/*
* Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
* depending on the way they are accessed (as a 32bit or a 64bit
@@ -1190,6 +1286,74 @@ static const struct sys_reg_desc cp15_regs[] = {
{ Op1( 0), CRn(12), CRm(12), Op2( 5), trap_raz_wi },
{ Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
+
+ /* PMEVCNTRn */
+ PMU_PMEVCNTR(0),
+ PMU_PMEVCNTR(1),
+ PMU_PMEVCNTR(2),
+ PMU_PMEVCNTR(3),
+ PMU_PMEVCNTR(4),
+ PMU_PMEVCNTR(5),
+ PMU_PMEVCNTR(6),
+ PMU_PMEVCNTR(7),
+ PMU_PMEVCNTR(8),
+ PMU_PMEVCNTR(9),
+ PMU_PMEVCNTR(10),
+ PMU_PMEVCNTR(11),
+ PMU_PMEVCNTR(12),
+ PMU_PMEVCNTR(13),
+ PMU_PMEVCNTR(14),
+ PMU_PMEVCNTR(15),
+ PMU_PMEVCNTR(16),
+ PMU_PMEVCNTR(17),
+ PMU_PMEVCNTR(18),
+ PMU_PMEVCNTR(19),
+ PMU_PMEVCNTR(20),
+ PMU_PMEVCNTR(21),
+ PMU_PMEVCNTR(22),
+ PMU_PMEVCNTR(23),
+ PMU_PMEVCNTR(24),
+ PMU_PMEVCNTR(25),
+ PMU_PMEVCNTR(26),
+ PMU_PMEVCNTR(27),
+ PMU_PMEVCNTR(28),
+ PMU_PMEVCNTR(29),
+ PMU_PMEVCNTR(30),
+ /* PMEVTYPERn */
+ PMU_PMEVTYPER(0),
+ PMU_PMEVTYPER(1),
+ PMU_PMEVTYPER(2),
+ PMU_PMEVTYPER(3),
+ PMU_PMEVTYPER(4),
+ PMU_PMEVTYPER(5),
+ PMU_PMEVTYPER(6),
+ PMU_PMEVTYPER(7),
+ PMU_PMEVTYPER(8),
+ PMU_PMEVTYPER(9),
+ PMU_PMEVTYPER(10),
+ PMU_PMEVTYPER(11),
+ PMU_PMEVTYPER(12),
+ PMU_PMEVTYPER(13),
+ PMU_PMEVTYPER(14),
+ PMU_PMEVTYPER(15),
+ PMU_PMEVTYPER(16),
+ PMU_PMEVTYPER(17),
+ PMU_PMEVTYPER(18),
+ PMU_PMEVTYPER(19),
+ PMU_PMEVTYPER(20),
+ PMU_PMEVTYPER(21),
+ PMU_PMEVTYPER(22),
+ PMU_PMEVTYPER(23),
+ PMU_PMEVTYPER(24),
+ PMU_PMEVTYPER(25),
+ PMU_PMEVTYPER(26),
+ PMU_PMEVTYPER(27),
+ PMU_PMEVTYPER(28),
+ PMU_PMEVTYPER(29),
+ PMU_PMEVTYPER(30),
+ /* PMCCFILTR */
+ { Op1(0), CRn(14), CRm(15), Op2(7), access_pmu_cp15_regs,
+ reset_val_cp15, c14_PMCCFILTR, 0 },
};
static const struct sys_reg_desc cp15_64_regs[] = {
--
2.1.4
^ permalink raw reply related [flat|nested] 37+ messages in thread
* [PATCH v3 17/20] KVM: ARM64: Add PMU overflow interrupt routing
2015-09-24 22:31 [PATCH v3 00/20] KVM: ARM64: Add guest PMU support Shannon Zhao
` (15 preceding siblings ...)
2015-09-24 22:31 ` [PATCH v3 16/20] KVM: ARM64: Add access handlers for PMEVCNTRn and PMEVTYPERn register Shannon Zhao
@ 2015-09-24 22:31 ` Shannon Zhao
2015-10-07 8:17 ` Marc Zyngier
2015-09-24 22:31 ` [PATCH v3 18/20] KVM: ARM64: Reset PMU state when resetting vcpu Shannon Zhao
` (4 subsequent siblings)
21 siblings, 1 reply; 37+ messages in thread
From: Shannon Zhao @ 2015-09-24 22:31 UTC (permalink / raw)
To: linux-arm-kernel
When calling perf_event_create_kernel_counter to create perf_event,
assign a overflow handler. Then when perf event overflows, set
irq_pending and call kvm_vcpu_kick() to sync the interrupt.
Signed-off-by: Shannon Zhao <shannon.zhao@linaro.org>
---
arch/arm/kvm/arm.c | 4 ++++
include/kvm/arm_pmu.h | 2 ++
virt/kvm/arm/pmu.c | 54 ++++++++++++++++++++++++++++++++++++++++++++++++++-
3 files changed, 59 insertions(+), 1 deletion(-)
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index ce404a5..3fca263 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -28,6 +28,7 @@
#include <linux/sched.h>
#include <linux/kvm.h>
#include <trace/events/kvm.h>
+#include <kvm/arm_pmu.h>
#define CREATE_TRACE_POINTS
#include "trace.h"
@@ -554,6 +555,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
kvm_vgic_sync_hwstate(vcpu);
preempt_enable();
kvm_timer_sync_hwstate(vcpu);
+ kvm_pmu_sync_hwstate(vcpu);
continue;
}
@@ -604,6 +606,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
kvm_timer_sync_hwstate(vcpu);
+ kvm_pmu_sync_hwstate(vcpu);
+
ret = handle_exit(vcpu, run, ret);
}
diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
index 9293133..953c400 100644
--- a/include/kvm/arm_pmu.h
+++ b/include/kvm/arm_pmu.h
@@ -38,6 +38,7 @@ struct kvm_pmu {
};
#ifdef CONFIG_KVM_ARM_PMU
+void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu);
unsigned long kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u32 select_idx);
void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u32 val);
void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u32 val);
@@ -45,6 +46,7 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u32 val);
void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u32 data,
u32 select_idx);
#else
+void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {}
unsigned long kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u32 select_idx)
{
return 0;
diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c
index 18637c9..ca7e849 100644
--- a/virt/kvm/arm/pmu.c
+++ b/virt/kvm/arm/pmu.c
@@ -21,6 +21,7 @@
#include <linux/perf_event.h>
#include <asm/kvm_emulate.h>
#include <kvm/arm_pmu.h>
+#include <kvm/arm_vgic.h>
static void kvm_pmu_set_evttyper(struct kvm_vcpu *vcpu, u32 idx, u32 val)
{
@@ -62,6 +63,56 @@ static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, u32 select_idx)
}
/**
+ * kvm_pmu_sync_hwstate - sync pmu state for cpu
+ * @vcpu: The vcpu pointer
+ *
+ * Inject virtual PMU IRQ if IRQ is pending for this cpu.
+ */
+void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu)
+{
+ struct kvm_pmu *pmu = &vcpu->arch.pmu;
+
+ if (pmu->irq_pending && (pmu->irq_num != -1)) {
+ kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id, pmu->irq_num, 1);
+ pmu->irq_pending = false;
+ }
+}
+
+/**
+ * When perf event overflows, set irq_pending and call kvm_vcpu_kick() to inject
+ * the interrupt.
+ */
+static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
+ struct perf_sample_data *data,
+ struct pt_regs *regs)
+{
+ struct kvm_pmc *pmc = perf_event->overflow_handler_context;
+ struct kvm_vcpu *vcpu = pmc->vcpu;
+ struct kvm_pmu *pmu = &vcpu->arch.pmu;
+ int idx = pmc->idx;
+
+ if (!vcpu_mode_is_32bit(vcpu)) {
+ if ((vcpu_sys_reg(vcpu, PMINTENSET_EL1) >> idx) & 0x1) {
+ __set_bit(idx,
+ (unsigned long *)&vcpu_sys_reg(vcpu, PMOVSSET_EL0));
+ __set_bit(idx,
+ (unsigned long *)&vcpu_sys_reg(vcpu, PMOVSCLR_EL0));
+ pmu->irq_pending = true;
+ kvm_vcpu_kick(vcpu);
+ }
+ } else {
+ if ((vcpu_cp15(vcpu, c9_PMINTENSET) >> idx) & 0x1) {
+ __set_bit(idx,
+ (unsigned long *)&vcpu_cp15(vcpu, c9_PMOVSSET));
+ __set_bit(idx,
+ (unsigned long *)&vcpu_cp15(vcpu, c9_PMOVSCLR));
+ pmu->irq_pending = true;
+ kvm_vcpu_kick(vcpu);
+ }
+ }
+}
+
+/**
* kvm_pmu_get_counter_value - get PMU counter value
* @vcpu: The vcpu pointer
* @select_idx: The counter index
@@ -225,7 +276,8 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u32 data,
/* The initial sample period (overflow count) of an event. */
attr.sample_period = (-counter) & (((u64)1 << overflow_bit) - 1);
- event = perf_event_create_kernel_counter(&attr, -1, current, NULL, pmc);
+ event = perf_event_create_kernel_counter(&attr, -1, current,
+ kvm_pmu_perf_overflow, pmc);
if (IS_ERR(event)) {
printk_once("kvm: pmu event creation failed %ld\n",
PTR_ERR(event));
--
2.1.4
^ permalink raw reply related [flat|nested] 37+ messages in thread
* [PATCH v3 17/20] KVM: ARM64: Add PMU overflow interrupt routing
2015-09-24 22:31 ` [PATCH v3 17/20] KVM: ARM64: Add PMU overflow interrupt routing Shannon Zhao
@ 2015-10-07 8:17 ` Marc Zyngier
0 siblings, 0 replies; 37+ messages in thread
From: Marc Zyngier @ 2015-10-07 8:17 UTC (permalink / raw)
To: linux-arm-kernel
On 24/09/15 23:31, Shannon Zhao wrote:
> When calling perf_event_create_kernel_counter to create perf_event,
> assign a overflow handler. Then when perf event overflows, set
> irq_pending and call kvm_vcpu_kick() to sync the interrupt.
>
> Signed-off-by: Shannon Zhao <shannon.zhao@linaro.org>
> ---
> arch/arm/kvm/arm.c | 4 ++++
> include/kvm/arm_pmu.h | 2 ++
> virt/kvm/arm/pmu.c | 54 ++++++++++++++++++++++++++++++++++++++++++++++++++-
> 3 files changed, 59 insertions(+), 1 deletion(-)
>
> diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
> index ce404a5..3fca263 100644
> --- a/arch/arm/kvm/arm.c
> +++ b/arch/arm/kvm/arm.c
> @@ -28,6 +28,7 @@
> #include <linux/sched.h>
> #include <linux/kvm.h>
> #include <trace/events/kvm.h>
> +#include <kvm/arm_pmu.h>
>
> #define CREATE_TRACE_POINTS
> #include "trace.h"
> @@ -554,6 +555,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
> kvm_vgic_sync_hwstate(vcpu);
> preempt_enable();
> kvm_timer_sync_hwstate(vcpu);
> + kvm_pmu_sync_hwstate(vcpu);
> continue;
> }
>
> @@ -604,6 +606,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
>
> kvm_timer_sync_hwstate(vcpu);
>
> + kvm_pmu_sync_hwstate(vcpu);
> +
> ret = handle_exit(vcpu, run, ret);
> }
The code around here is about to change with Christopher's patches. Most
importantly, virtual devices must signal their changes before we touch
the vgic. I suspect this will have some impacts, see below.
>
> diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
> index 9293133..953c400 100644
> --- a/include/kvm/arm_pmu.h
> +++ b/include/kvm/arm_pmu.h
> @@ -38,6 +38,7 @@ struct kvm_pmu {
> };
>
> #ifdef CONFIG_KVM_ARM_PMU
> +void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu);
> unsigned long kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u32 select_idx);
> void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u32 val);
> void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u32 val);
> @@ -45,6 +46,7 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u32 val);
> void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u32 data,
> u32 select_idx);
> #else
> +void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {}
> unsigned long kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u32 select_idx)
> {
> return 0;
> diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c
> index 18637c9..ca7e849 100644
> --- a/virt/kvm/arm/pmu.c
> +++ b/virt/kvm/arm/pmu.c
> @@ -21,6 +21,7 @@
> #include <linux/perf_event.h>
> #include <asm/kvm_emulate.h>
> #include <kvm/arm_pmu.h>
> +#include <kvm/arm_vgic.h>
>
> static void kvm_pmu_set_evttyper(struct kvm_vcpu *vcpu, u32 idx, u32 val)
> {
> @@ -62,6 +63,56 @@ static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, u32 select_idx)
> }
>
> /**
> + * kvm_pmu_sync_hwstate - sync pmu state for cpu
> + * @vcpu: The vcpu pointer
> + *
> + * Inject virtual PMU IRQ if IRQ is pending for this cpu.
> + */
> +void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu)
> +{
> + struct kvm_pmu *pmu = &vcpu->arch.pmu;
> +
> + if (pmu->irq_pending && (pmu->irq_num != -1)) {
How likely is that pmu->irq_num could be -1? I don't think the interrupt
can be made optional.
> + kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id, pmu->irq_num, 1);
> + pmu->irq_pending = false;
> + }
So you're signalling the interrupt as an edge, not a level. Is that an
accurate modelling of the PMU interrupt?
My hunch is that the interrupt should still be pending until the guest
has cleared the overflow condition (by writing to PMOVSCLR_EL0). You can
probably lift most of that logic from Christoffer's rework of the timer
state.
> +}
> +
> +/**
> + * When perf event overflows, set irq_pending and call kvm_vcpu_kick() to inject
> + * the interrupt.
> + */
> +static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
> + struct perf_sample_data *data,
> + struct pt_regs *regs)
> +{
> + struct kvm_pmc *pmc = perf_event->overflow_handler_context;
> + struct kvm_vcpu *vcpu = pmc->vcpu;
> + struct kvm_pmu *pmu = &vcpu->arch.pmu;
> + int idx = pmc->idx;
> +
> + if (!vcpu_mode_is_32bit(vcpu)) {
> + if ((vcpu_sys_reg(vcpu, PMINTENSET_EL1) >> idx) & 0x1) {
> + __set_bit(idx,
> + (unsigned long *)&vcpu_sys_reg(vcpu, PMOVSSET_EL0));
> + __set_bit(idx,
> + (unsigned long *)&vcpu_sys_reg(vcpu, PMOVSCLR_EL0));
> + pmu->irq_pending = true;
> + kvm_vcpu_kick(vcpu);
> + }
> + } else {
> + if ((vcpu_cp15(vcpu, c9_PMINTENSET) >> idx) & 0x1) {
> + __set_bit(idx,
> + (unsigned long *)&vcpu_cp15(vcpu, c9_PMOVSSET));
> + __set_bit(idx,
> + (unsigned long *)&vcpu_cp15(vcpu, c9_PMOVSCLR));
> + pmu->irq_pending = true;
> + kvm_vcpu_kick(vcpu);
> + }
> + }
> +}
> +
> +/**
> * kvm_pmu_get_counter_value - get PMU counter value
> * @vcpu: The vcpu pointer
> * @select_idx: The counter index
> @@ -225,7 +276,8 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u32 data,
> /* The initial sample period (overflow count) of an event. */
> attr.sample_period = (-counter) & (((u64)1 << overflow_bit) - 1);
>
> - event = perf_event_create_kernel_counter(&attr, -1, current, NULL, pmc);
> + event = perf_event_create_kernel_counter(&attr, -1, current,
> + kvm_pmu_perf_overflow, pmc);
> if (IS_ERR(event)) {
> printk_once("kvm: pmu event creation failed %ld\n",
> PTR_ERR(event));
>
Thanks,
M.
--
Jazz is not dead. It just smells funny...
^ permalink raw reply [flat|nested] 37+ messages in thread
* [PATCH v3 18/20] KVM: ARM64: Reset PMU state when resetting vcpu
2015-09-24 22:31 [PATCH v3 00/20] KVM: ARM64: Add guest PMU support Shannon Zhao
` (16 preceding siblings ...)
2015-09-24 22:31 ` [PATCH v3 17/20] KVM: ARM64: Add PMU overflow interrupt routing Shannon Zhao
@ 2015-09-24 22:31 ` Shannon Zhao
2015-10-16 15:28 ` Wei Huang
2015-09-24 22:31 ` [PATCH v3 19/20] KVM: ARM64: Free perf event of PMU when destroying vcpu Shannon Zhao
` (3 subsequent siblings)
21 siblings, 1 reply; 37+ messages in thread
From: Shannon Zhao @ 2015-09-24 22:31 UTC (permalink / raw)
To: linux-arm-kernel
Signed-off-by: Shannon Zhao <shannon.zhao@linaro.org>
---
arch/arm64/kvm/reset.c | 3 +++
include/kvm/arm_pmu.h | 2 ++
virt/kvm/arm/pmu.c | 18 ++++++++++++++++++
3 files changed, 23 insertions(+)
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index 91cf535..4da7f6c 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -120,6 +120,9 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
/* Reset system registers */
kvm_reset_sys_regs(vcpu);
+ /* Reset PMU */
+ kvm_pmu_vcpu_reset(vcpu);
+
/* Reset timer */
return kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq);
}
diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
index 953c400..8dacfd3 100644
--- a/include/kvm/arm_pmu.h
+++ b/include/kvm/arm_pmu.h
@@ -38,6 +38,7 @@ struct kvm_pmu {
};
#ifdef CONFIG_KVM_ARM_PMU
+void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu);
void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu);
unsigned long kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u32 select_idx);
void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u32 val);
@@ -46,6 +47,7 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u32 val);
void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u32 data,
u32 select_idx);
#else
+void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {}
void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {}
unsigned long kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u32 select_idx)
{
diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c
index ca7e849..faa2b76 100644
--- a/virt/kvm/arm/pmu.c
+++ b/virt/kvm/arm/pmu.c
@@ -63,6 +63,24 @@ static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, u32 select_idx)
}
/**
+ * kvm_pmu_vcpu_reset - reset pmu state for cpu
+ * @vcpu: The vcpu pointer
+ *
+ */
+void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu)
+{
+ int i;
+ struct kvm_pmu *pmu = &vcpu->arch.pmu;
+
+ for (i = 0; i < ARMV8_MAX_COUNTERS; i++) {
+ kvm_pmu_stop_counter(vcpu, i);
+ pmu->pmc[i].idx = i;
+ pmu->pmc[i].vcpu = vcpu;
+ }
+ pmu->irq_pending = false;
+}
+
+/**
* kvm_pmu_sync_hwstate - sync pmu state for cpu
* @vcpu: The vcpu pointer
*
--
2.1.4
^ permalink raw reply related [flat|nested] 37+ messages in thread
* [PATCH v3 18/20] KVM: ARM64: Reset PMU state when resetting vcpu
2015-09-24 22:31 ` [PATCH v3 18/20] KVM: ARM64: Reset PMU state when resetting vcpu Shannon Zhao
@ 2015-10-16 15:28 ` Wei Huang
0 siblings, 0 replies; 37+ messages in thread
From: Wei Huang @ 2015-10-16 15:28 UTC (permalink / raw)
To: linux-arm-kernel
On 09/24/2015 05:31 PM, Shannon Zhao wrote:
> Signed-off-by: Shannon Zhao <shannon.zhao@linaro.org>
Missing commit message here.
> ---
> arch/arm64/kvm/reset.c | 3 +++
> include/kvm/arm_pmu.h | 2 ++
> virt/kvm/arm/pmu.c | 18 ++++++++++++++++++
> 3 files changed, 23 insertions(+)
>
> diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
> index 91cf535..4da7f6c 100644
> --- a/arch/arm64/kvm/reset.c
> +++ b/arch/arm64/kvm/reset.c
> @@ -120,6 +120,9 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
> /* Reset system registers */
> kvm_reset_sys_regs(vcpu);
>
> + /* Reset PMU */
> + kvm_pmu_vcpu_reset(vcpu);
> +
> /* Reset timer */
> return kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq);
> }
> diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
> index 953c400..8dacfd3 100644
> --- a/include/kvm/arm_pmu.h
> +++ b/include/kvm/arm_pmu.h
> @@ -38,6 +38,7 @@ struct kvm_pmu {
> };
>
> #ifdef CONFIG_KVM_ARM_PMU
> +void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu);
> void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu);
> unsigned long kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u32 select_idx);
> void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u32 val);
> @@ -46,6 +47,7 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u32 val);
> void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u32 data,
> u32 select_idx);
> #else
> +void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {}
> void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {}
> unsigned long kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u32 select_idx)
> {
> diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c
> index ca7e849..faa2b76 100644
> --- a/virt/kvm/arm/pmu.c
> +++ b/virt/kvm/arm/pmu.c
> @@ -63,6 +63,24 @@ static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, u32 select_idx)
> }
>
> /**
> + * kvm_pmu_vcpu_reset - reset pmu state for cpu
> + * @vcpu: The vcpu pointer
> + *
> + */
> +void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu)
> +{
> + int i;
> + struct kvm_pmu *pmu = &vcpu->arch.pmu;
> +
> + for (i = 0; i < ARMV8_MAX_COUNTERS; i++) {
> + kvm_pmu_stop_counter(vcpu, i);
> + pmu->pmc[i].idx = i;
> + pmu->pmc[i].vcpu = vcpu;
> + }
> + pmu->irq_pending = false;
> +}
> +
> +/**
> * kvm_pmu_sync_hwstate - sync pmu state for cpu
> * @vcpu: The vcpu pointer
> *
>
^ permalink raw reply [flat|nested] 37+ messages in thread
* [PATCH v3 19/20] KVM: ARM64: Free perf event of PMU when destroying vcpu
2015-09-24 22:31 [PATCH v3 00/20] KVM: ARM64: Add guest PMU support Shannon Zhao
` (17 preceding siblings ...)
2015-09-24 22:31 ` [PATCH v3 18/20] KVM: ARM64: Reset PMU state when resetting vcpu Shannon Zhao
@ 2015-09-24 22:31 ` Shannon Zhao
2015-09-24 22:31 ` [PATCH v3 20/20] KVM: ARM64: Add a new kvm ARM PMU device Shannon Zhao
` (2 subsequent siblings)
21 siblings, 0 replies; 37+ messages in thread
From: Shannon Zhao @ 2015-09-24 22:31 UTC (permalink / raw)
To: linux-arm-kernel
When KVM frees VCPU, it needs to free the perf_event of PMU.
Signed-off-by: Shannon Zhao <shannon.zhao@linaro.org>
---
arch/arm/kvm/arm.c | 1 +
include/kvm/arm_pmu.h | 2 ++
virt/kvm/arm/pmu.c | 21 +++++++++++++++++++++
3 files changed, 24 insertions(+)
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 3fca263..4d24498 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -259,6 +259,7 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
kvm_mmu_free_memory_caches(vcpu);
kvm_timer_vcpu_terminate(vcpu);
kvm_vgic_vcpu_destroy(vcpu);
+ kvm_pmu_vcpu_destroy(vcpu);
kmem_cache_free(kvm_vcpu_cache, vcpu);
}
diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
index 8dacfd3..a32e630 100644
--- a/include/kvm/arm_pmu.h
+++ b/include/kvm/arm_pmu.h
@@ -39,6 +39,7 @@ struct kvm_pmu {
#ifdef CONFIG_KVM_ARM_PMU
void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu);
+void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu);
void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu);
unsigned long kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u32 select_idx);
void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u32 val);
@@ -48,6 +49,7 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u32 data,
u32 select_idx);
#else
void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {}
+void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {}
void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {}
unsigned long kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u32 select_idx)
{
diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c
index faa2b76..00ef0b4 100644
--- a/virt/kvm/arm/pmu.c
+++ b/virt/kvm/arm/pmu.c
@@ -81,6 +81,27 @@ void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu)
}
/**
+ * kvm_pmu_vcpu_destroy - free perf event of PMU for cpu
+ * @vcpu: The vcpu pointer
+ *
+ */
+void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu)
+{
+ int i;
+ struct kvm_pmu *pmu = &vcpu->arch.pmu;
+
+ for (i = 0; i < ARMV8_MAX_COUNTERS; i++) {
+ struct kvm_pmc *pmc = &pmu->pmc[i];
+
+ if (pmc->perf_event) {
+ perf_event_disable(pmc->perf_event);
+ perf_event_release_kernel(pmc->perf_event);
+ pmc->perf_event = NULL;
+ }
+ }
+}
+
+/**
* kvm_pmu_sync_hwstate - sync pmu state for cpu
* @vcpu: The vcpu pointer
*
--
2.1.4
^ permalink raw reply related [flat|nested] 37+ messages in thread
* [PATCH v3 20/20] KVM: ARM64: Add a new kvm ARM PMU device
2015-09-24 22:31 [PATCH v3 00/20] KVM: ARM64: Add guest PMU support Shannon Zhao
` (18 preceding siblings ...)
2015-09-24 22:31 ` [PATCH v3 19/20] KVM: ARM64: Free perf event of PMU when destroying vcpu Shannon Zhao
@ 2015-09-24 22:31 ` Shannon Zhao
2015-10-16 4:55 ` [PATCH v3 00/20] KVM: ARM64: Add guest PMU support Wei Huang
2015-10-26 11:33 ` Christoffer Dall
21 siblings, 0 replies; 37+ messages in thread
From: Shannon Zhao @ 2015-09-24 22:31 UTC (permalink / raw)
To: linux-arm-kernel
Add a new kvm device type KVM_DEV_TYPE_ARM_PMU_V3 for ARM PMU. Implement
the kvm_device_ops for it.
Signed-off-by: Shannon Zhao <shannon.zhao@linaro.org>
---
Documentation/virtual/kvm/devices/arm-pmu.txt | 15 +++++
arch/arm64/include/uapi/asm/kvm.h | 3 +
include/linux/kvm_host.h | 1 +
include/uapi/linux/kvm.h | 2 +
virt/kvm/arm/pmu.c | 88 +++++++++++++++++++++++++++
virt/kvm/kvm_main.c | 4 ++
6 files changed, 113 insertions(+)
create mode 100644 Documentation/virtual/kvm/devices/arm-pmu.txt
diff --git a/Documentation/virtual/kvm/devices/arm-pmu.txt b/Documentation/virtual/kvm/devices/arm-pmu.txt
new file mode 100644
index 0000000..49481c4
--- /dev/null
+++ b/Documentation/virtual/kvm/devices/arm-pmu.txt
@@ -0,0 +1,15 @@
+ARM Virtual Performance Monitor Unit (vPMU)
+===========================================
+
+Device types supported:
+ KVM_DEV_TYPE_ARM_PMU_V3 ARM Performance Monitor Unit v3
+
+Instantiate one PMU instance for per VCPU through this API.
+
+Groups:
+ KVM_DEV_ARM_PMU_GRP_IRQ
+ Attributes:
+ A value describing the interrupt number of PMU overflow interrupt.
+
+ Errors:
+ -EINVAL: Value set is out of the expected range
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index 0cd7b59..1309a93 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -204,6 +204,9 @@ struct kvm_arch_memory_slot {
#define KVM_DEV_ARM_VGIC_GRP_CTRL 4
#define KVM_DEV_ARM_VGIC_CTRL_INIT 0
+/* Device Control API: ARM PMU */
+#define KVM_DEV_ARM_PMU_GRP_IRQ 0
+
/* KVM_IRQ_LINE irq field index values */
#define KVM_ARM_IRQ_TYPE_SHIFT 24
#define KVM_ARM_IRQ_TYPE_MASK 0xff
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 1bef9e2..f6be696 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1122,6 +1122,7 @@ extern struct kvm_device_ops kvm_mpic_ops;
extern struct kvm_device_ops kvm_xics_ops;
extern struct kvm_device_ops kvm_arm_vgic_v2_ops;
extern struct kvm_device_ops kvm_arm_vgic_v3_ops;
+extern struct kvm_device_ops kvm_arm_pmu_ops;
#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index a9256f0..f41e6b6 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -1025,6 +1025,8 @@ enum kvm_device_type {
#define KVM_DEV_TYPE_FLIC KVM_DEV_TYPE_FLIC
KVM_DEV_TYPE_ARM_VGIC_V3,
#define KVM_DEV_TYPE_ARM_VGIC_V3 KVM_DEV_TYPE_ARM_VGIC_V3
+ KVM_DEV_TYPE_ARM_PMU_V3,
+#define KVM_DEV_TYPE_ARM_PMU_V3 KVM_DEV_TYPE_ARM_PMU_V3
KVM_DEV_TYPE_MAX,
};
diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c
index 00ef0b4..0aa4bc1 100644
--- a/virt/kvm/arm/pmu.c
+++ b/virt/kvm/arm/pmu.c
@@ -19,6 +19,7 @@
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <linux/perf_event.h>
+#include <linux/uaccess.h>
#include <asm/kvm_emulate.h>
#include <kvm/arm_pmu.h>
#include <kvm/arm_vgic.h>
@@ -324,3 +325,90 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u32 data,
}
pmc->perf_event = event;
}
+
+static int kvm_arm_pmu_set_irq(struct kvm *kvm, int irq)
+{
+ int j;
+ struct kvm_vcpu *vcpu;
+
+ kvm_for_each_vcpu(j, vcpu, kvm) {
+ struct kvm_pmu *pmu = &vcpu->arch.pmu;
+
+ kvm_debug("Set kvm ARM PMU irq: %d\n", irq);
+ pmu->irq_num = irq;
+ }
+
+ return 0;
+}
+
+static int kvm_arm_pmu_create(struct kvm_device *dev, u32 type)
+{
+ int i, j;
+ struct kvm_vcpu *vcpu;
+ struct kvm *kvm = dev->kvm;
+
+ kvm_for_each_vcpu(j, vcpu, kvm) {
+ struct kvm_pmu *pmu = &vcpu->arch.pmu;
+
+ memset(pmu, 0, sizeof(*pmu));
+ for (i = 0; i < ARMV8_MAX_COUNTERS; i++) {
+ pmu->pmc[i].idx = i;
+ pmu->pmc[i].vcpu = vcpu;
+ }
+ pmu->irq_num = -1;
+ }
+
+ return 0;
+}
+
+static void kvm_arm_pmu_destroy(struct kvm_device *dev)
+{
+ kfree(dev);
+}
+
+static int kvm_arm_pmu_set_attr(struct kvm_device *dev,
+ struct kvm_device_attr *attr)
+{
+ switch (attr->group) {
+ case KVM_DEV_ARM_PMU_GRP_IRQ: {
+ int __user *uaddr = (int __user *)(long)attr->addr;
+ int reg;
+
+ if (get_user(reg, uaddr))
+ return -EFAULT;
+
+ if (reg < VGIC_NR_SGIS || reg > dev->kvm->arch.vgic.nr_irqs)
+ return -EINVAL;
+
+ return kvm_arm_pmu_set_irq(dev->kvm, reg);
+ }
+ }
+
+ return -ENXIO;
+}
+
+static int kvm_arm_pmu_get_attr(struct kvm_device *dev,
+ struct kvm_device_attr *attr)
+{
+ return 0;
+}
+
+static int kvm_arm_pmu_has_attr(struct kvm_device *dev,
+ struct kvm_device_attr *attr)
+{
+ switch (attr->group) {
+ case KVM_DEV_ARM_PMU_GRP_IRQ:
+ return 0;
+ }
+
+ return -ENXIO;
+}
+
+struct kvm_device_ops kvm_arm_pmu_ops = {
+ .name = "kvm-arm-pmu",
+ .create = kvm_arm_pmu_create,
+ .destroy = kvm_arm_pmu_destroy,
+ .set_attr = kvm_arm_pmu_set_attr,
+ .get_attr = kvm_arm_pmu_get_attr,
+ .has_attr = kvm_arm_pmu_has_attr,
+};
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index a25a731..028bd54 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -2639,6 +2639,10 @@ static struct kvm_device_ops *kvm_device_ops_table[KVM_DEV_TYPE_MAX] = {
#ifdef CONFIG_KVM_XICS
[KVM_DEV_TYPE_XICS] = &kvm_xics_ops,
#endif
+
+#ifdef CONFIG_KVM_ARM_PMU
+ [KVM_DEV_TYPE_ARM_PMU_V3] = &kvm_arm_pmu_ops,
+#endif
};
int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type)
--
2.1.4
^ permalink raw reply related [flat|nested] 37+ messages in thread
* [PATCH v3 00/20] KVM: ARM64: Add guest PMU support
2015-09-24 22:31 [PATCH v3 00/20] KVM: ARM64: Add guest PMU support Shannon Zhao
` (19 preceding siblings ...)
2015-09-24 22:31 ` [PATCH v3 20/20] KVM: ARM64: Add a new kvm ARM PMU device Shannon Zhao
@ 2015-10-16 4:55 ` Wei Huang
2015-10-16 17:01 ` Christopher Covington
2015-10-26 11:33 ` Christoffer Dall
21 siblings, 1 reply; 37+ messages in thread
From: Wei Huang @ 2015-10-16 4:55 UTC (permalink / raw)
To: linux-arm-kernel
On 09/24/2015 05:31 PM, Shannon Zhao wrote:
> This patchset adds guest PMU support for KVM on ARM64. It takes
> trap-and-emulate approach. When guest wants to monitor one event, it
> will be trapped by KVM and KVM will call perf_event API to create a perf
> event and call relevant perf_event APIs to get the count value of event.
>
> Use perf to test this patchset in guest. When using "perf list", it
> shows the list of the hardware events and hardware cache events perf
> supports. Then use "perf stat -e EVENT" to monitor some event. For
> example, use "perf stat -e cycles" to count cpu cycles and
> "perf stat -e cache-misses" to count cache misses.
>
> Below are the outputs of "perf stat -r 5 sleep 5" when running in host
> and guest.
>
> Host:
> Performance counter stats for 'sleep 5' (5 runs):
>
> 0.551428 task-clock (msec) # 0.000 CPUs utilized ( +- 0.91% )
> 1 context-switches # 0.002 M/sec
> 0 cpu-migrations # 0.000 K/sec
> 48 page-faults # 0.088 M/sec ( +- 1.05% )
> 1150265 cycles # 2.086 GHz ( +- 0.92% )
> <not supported> stalled-cycles-frontend
> <not supported> stalled-cycles-backend
> 526398 instructions # 0.46 insns per cycle ( +- 0.89% )
> <not supported> branches
> 9485 branch-misses # 17.201 M/sec ( +- 2.35% )
>
> 5.000831616 seconds time elapsed ( +- 0.00% )
>
> Guest:
> Performance counter stats for 'sleep 5' (5 runs):
>
> 0.730868 task-clock (msec) # 0.000 CPUs utilized ( +- 1.13% )
> 1 context-switches # 0.001 M/sec
> 0 cpu-migrations # 0.000 K/sec
> 48 page-faults # 0.065 M/sec ( +- 0.42% )
> 1642982 cycles # 2.248 GHz ( +- 1.04% )
> <not supported> stalled-cycles-frontend
> <not supported> stalled-cycles-backend
> 637964 instructions # 0.39 insns per cycle ( +- 0.65% )
> <not supported> branches
> 10377 branch-misses # 14.198 M/sec ( +- 1.09% )
>
> 5.001289068 seconds time elapsed ( +- 0.00% )
>
Thanks for V3. One suggestion is to run more perf stress tests, such as
"perf test". So we know the corner cases are covered as much as possible.
> This patchset can be fetched from [1] and the relevant QEMU version for
> test can be fetched from [2].
>
> Thanks,
> Shannon
>
> [1] https://git.linaro.org/people/shannon.zhao/linux-mainline.git KVM_ARM64_PMU_v3
> [2] https://git.linaro.org/people/shannon.zhao/qemu.git PMU_v2
>
> Changes since v2->v3:
> * Directly use perf raw event type to create perf_event in KVM
> * Add a helper vcpu_sysreg_write
> * remove unrelated header file
>
> Changes since v1->v2:
> * Use switch...case for registers access handler instead of adding
> alone handler for each register
> * Try to use the sys_regs to store the register value instead of adding
> new variables in struct kvm_pmc
> * Fix the handle of cp15 regs
> * Create a new kvm device vPMU, then userspace could choose whether to
> create PMU
> * Fix the handle of PMU overflow interrupt
>
> Shannon Zhao (20):
> ARM64: Move PMU register related defines to asm/pmu.h
> KVM: ARM64: Define PMU data structure for each vcpu
> KVM: ARM64: Add offset defines for PMU registers
> KVM: ARM64: Add reset and access handlers for PMCR_EL0 register
> KVM: ARM64: Add reset and access handlers for PMSELR register
> KVM: ARM64: Add reset and access handlers for PMCEID0 and PMCEID1
> register
> KVM: ARM64: PMU: Add perf event map and introduce perf event creating
> function
> KVM: ARM64: Add reset and access handlers for PMXEVTYPER register
> KVM: ARM64: Add reset and access handlers for PMXEVCNTR register
> KVM: ARM64: Add reset and access handlers for PMCCNTR register
> KVM: ARM64: Add reset and access handlers for PMCNTENSET and
> PMCNTENCLR register
> KVM: ARM64: Add reset and access handlers for PMINTENSET and
> PMINTENCLR register
> KVM: ARM64: Add reset and access handlers for PMOVSSET and PMOVSCLR
> register
> KVM: ARM64: Add reset and access handlers for PMUSERENR register
> KVM: ARM64: Add reset and access handlers for PMSWINC register
> KVM: ARM64: Add access handlers for PMEVCNTRn and PMEVTYPERn register
> KVM: ARM64: Add PMU overflow interrupt routing
> KVM: ARM64: Reset PMU state when resetting vcpu
> KVM: ARM64: Free perf event of PMU when destroying vcpu
> KVM: ARM64: Add a new kvm ARM PMU device
>
> Documentation/virtual/kvm/devices/arm-pmu.txt | 15 +
> arch/arm/kvm/arm.c | 5 +
> arch/arm64/include/asm/kvm_asm.h | 59 +++-
> arch/arm64/include/asm/kvm_host.h | 2 +
> arch/arm64/include/asm/pmu.h | 47 +++
> arch/arm64/include/uapi/asm/kvm.h | 3 +
> arch/arm64/kernel/perf_event.c | 35 --
> arch/arm64/kvm/Kconfig | 8 +
> arch/arm64/kvm/Makefile | 1 +
> arch/arm64/kvm/reset.c | 3 +
> arch/arm64/kvm/sys_regs.c | 488 ++++++++++++++++++++++++--
> arch/arm64/kvm/sys_regs.h | 16 +
> include/kvm/arm_pmu.h | 65 ++++
> include/linux/kvm_host.h | 1 +
> include/uapi/linux/kvm.h | 2 +
> virt/kvm/arm/pmu.c | 414 ++++++++++++++++++++++
> virt/kvm/kvm_main.c | 4 +
> 17 files changed, 1098 insertions(+), 70 deletions(-)
> create mode 100644 Documentation/virtual/kvm/devices/arm-pmu.txt
> create mode 100644 include/kvm/arm_pmu.h
> create mode 100644 virt/kvm/arm/pmu.c
>
^ permalink raw reply [flat|nested] 37+ messages in thread
* [PATCH v3 00/20] KVM: ARM64: Add guest PMU support
2015-10-16 4:55 ` [PATCH v3 00/20] KVM: ARM64: Add guest PMU support Wei Huang
@ 2015-10-16 17:01 ` Christopher Covington
2015-10-21 7:26 ` Shannon Zhao
0 siblings, 1 reply; 37+ messages in thread
From: Christopher Covington @ 2015-10-16 17:01 UTC (permalink / raw)
To: linux-arm-kernel
On 10/16/2015 12:55 AM, Wei Huang wrote:
>
>
> On 09/24/2015 05:31 PM, Shannon Zhao wrote:
>> This patchset adds guest PMU support for KVM on ARM64. It takes
>> trap-and-emulate approach. When guest wants to monitor one event, it
>> will be trapped by KVM and KVM will call perf_event API to create a perf
>> event and call relevant perf_event APIs to get the count value of event.
>>
>> Use perf to test this patchset in guest. When using "perf list", it
>> shows the list of the hardware events and hardware cache events perf
>> supports. Then use "perf stat -e EVENT" to monitor some event. For
>> example, use "perf stat -e cycles" to count cpu cycles and
>> "perf stat -e cache-misses" to count cache misses.
>>
>> Below are the outputs of "perf stat -r 5 sleep 5" when running in host
>> and guest.
>>
>> Host:
>> Performance counter stats for 'sleep 5' (5 runs):
>>
>> 0.551428 task-clock (msec) # 0.000 CPUs utilized ( +- 0.91% )
>> 1 context-switches # 0.002 M/sec
>> 0 cpu-migrations # 0.000 K/sec
>> 48 page-faults # 0.088 M/sec ( +- 1.05% )
>> 1150265 cycles # 2.086 GHz ( +- 0.92% )
>> <not supported> stalled-cycles-frontend
>> <not supported> stalled-cycles-backend
>> 526398 instructions # 0.46 insns per cycle ( +- 0.89% )
>> <not supported> branches
>> 9485 branch-misses # 17.201 M/sec ( +- 2.35% )
>>
>> 5.000831616 seconds time elapsed ( +- 0.00% )
>>
>> Guest:
>> Performance counter stats for 'sleep 5' (5 runs):
>>
>> 0.730868 task-clock (msec) # 0.000 CPUs utilized ( +- 1.13% )
>> 1 context-switches # 0.001 M/sec
>> 0 cpu-migrations # 0.000 K/sec
>> 48 page-faults # 0.065 M/sec ( +- 0.42% )
>> 1642982 cycles # 2.248 GHz ( +- 1.04% )
>> <not supported> stalled-cycles-frontend
>> <not supported> stalled-cycles-backend
>> 637964 instructions # 0.39 insns per cycle ( +- 0.65% )
>> <not supported> branches
>> 10377 branch-misses # 14.198 M/sec ( +- 1.09% )
>>
>> 5.001289068 seconds time elapsed ( +- 0.00% )
>>
>
> Thanks for V3. One suggestion is to run more perf stress tests, such as
> "perf test". So we know the corner cases are covered as much as possible.
I'd also recommend Vince Weaver's perf_event_tests. It tests things like
signal-on-counter-overflow that I've never seen anywhere else (other than some
of my own code).
https://github.com/deater/perf_event_tests
Christopher Covington
--
Qualcomm Innovation Center, Inc.
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum,
a Linux Foundation Collaborative Project
^ permalink raw reply [flat|nested] 37+ messages in thread
* [PATCH v3 00/20] KVM: ARM64: Add guest PMU support
2015-10-16 17:01 ` Christopher Covington
@ 2015-10-21 7:26 ` Shannon Zhao
0 siblings, 0 replies; 37+ messages in thread
From: Shannon Zhao @ 2015-10-21 7:26 UTC (permalink / raw)
To: linux-arm-kernel
On 2015/10/17 1:01, Christopher Covington wrote:
> On 10/16/2015 12:55 AM, Wei Huang wrote:
>> >
>> >
>> > On 09/24/2015 05:31 PM, Shannon Zhao wrote:
>>> >> This patchset adds guest PMU support for KVM on ARM64. It takes
>>> >> trap-and-emulate approach. When guest wants to monitor one event, it
>>> >> will be trapped by KVM and KVM will call perf_event API to create a perf
>>> >> event and call relevant perf_event APIs to get the count value of event.
>>> >>
>>> >> Use perf to test this patchset in guest. When using "perf list", it
>>> >> shows the list of the hardware events and hardware cache events perf
>>> >> supports. Then use "perf stat -e EVENT" to monitor some event. For
>>> >> example, use "perf stat -e cycles" to count cpu cycles and
>>> >> "perf stat -e cache-misses" to count cache misses.
>>> >>
>>> >> Below are the outputs of "perf stat -r 5 sleep 5" when running in host
>>> >> and guest.
>>> >>
>>> >> Host:
>>> >> Performance counter stats for 'sleep 5' (5 runs):
>>> >>
>>> >> 0.551428 task-clock (msec) # 0.000 CPUs utilized ( +- 0.91% )
>>> >> 1 context-switches # 0.002 M/sec
>>> >> 0 cpu-migrations # 0.000 K/sec
>>> >> 48 page-faults # 0.088 M/sec ( +- 1.05% )
>>> >> 1150265 cycles # 2.086 GHz ( +- 0.92% )
>>> >> <not supported> stalled-cycles-frontend
>>> >> <not supported> stalled-cycles-backend
>>> >> 526398 instructions # 0.46 insns per cycle ( +- 0.89% )
>>> >> <not supported> branches
>>> >> 9485 branch-misses # 17.201 M/sec ( +- 2.35% )
>>> >>
>>> >> 5.000831616 seconds time elapsed ( +- 0.00% )
>>> >>
>>> >> Guest:
>>> >> Performance counter stats for 'sleep 5' (5 runs):
>>> >>
>>> >> 0.730868 task-clock (msec) # 0.000 CPUs utilized ( +- 1.13% )
>>> >> 1 context-switches # 0.001 M/sec
>>> >> 0 cpu-migrations # 0.000 K/sec
>>> >> 48 page-faults # 0.065 M/sec ( +- 0.42% )
>>> >> 1642982 cycles # 2.248 GHz ( +- 1.04% )
>>> >> <not supported> stalled-cycles-frontend
>>> >> <not supported> stalled-cycles-backend
>>> >> 637964 instructions # 0.39 insns per cycle ( +- 0.65% )
>>> >> <not supported> branches
>>> >> 10377 branch-misses # 14.198 M/sec ( +- 1.09% )
>>> >>
>>> >> 5.001289068 seconds time elapsed ( +- 0.00% )
>>> >>
>> >
>> > Thanks for V3. One suggestion is to run more perf stress tests, such as
>> > "perf test". So we know the corner cases are covered as much as possible.
> I'd also recommend Vince Weaver's perf_event_tests. It tests things like
> signal-on-counter-overflow that I've never seen anywhere else (other than some
> of my own code).
>
> https://github.com/deater/perf_event_tests
Ok. Thanks for your suggestion.
--
Shannon
^ permalink raw reply [flat|nested] 37+ messages in thread
* [PATCH v3 00/20] KVM: ARM64: Add guest PMU support
2015-09-24 22:31 [PATCH v3 00/20] KVM: ARM64: Add guest PMU support Shannon Zhao
` (20 preceding siblings ...)
2015-10-16 4:55 ` [PATCH v3 00/20] KVM: ARM64: Add guest PMU support Wei Huang
@ 2015-10-26 11:33 ` Christoffer Dall
2015-10-27 1:15 ` Shannon Zhao
21 siblings, 1 reply; 37+ messages in thread
From: Christoffer Dall @ 2015-10-26 11:33 UTC (permalink / raw)
To: linux-arm-kernel
On Thu, Sep 24, 2015 at 03:31:05PM -0700, Shannon Zhao wrote:
> This patchset adds guest PMU support for KVM on ARM64. It takes
> trap-and-emulate approach. When guest wants to monitor one event, it
> will be trapped by KVM and KVM will call perf_event API to create a perf
> event and call relevant perf_event APIs to get the count value of event.
>
> Use perf to test this patchset in guest. When using "perf list", it
> shows the list of the hardware events and hardware cache events perf
> supports. Then use "perf stat -e EVENT" to monitor some event. For
> example, use "perf stat -e cycles" to count cpu cycles and
> "perf stat -e cache-misses" to count cache misses.
>
> Below are the outputs of "perf stat -r 5 sleep 5" when running in host
> and guest.
>
> Host:
> Performance counter stats for 'sleep 5' (5 runs):
>
> 0.551428 task-clock (msec) # 0.000 CPUs utilized ( +- 0.91% )
> 1 context-switches # 0.002 M/sec
> 0 cpu-migrations # 0.000 K/sec
> 48 page-faults # 0.088 M/sec ( +- 1.05% )
> 1150265 cycles # 2.086 GHz ( +- 0.92% )
> <not supported> stalled-cycles-frontend
> <not supported> stalled-cycles-backend
> 526398 instructions # 0.46 insns per cycle ( +- 0.89% )
> <not supported> branches
> 9485 branch-misses # 17.201 M/sec ( +- 2.35% )
>
> 5.000831616 seconds time elapsed ( +- 0.00% )
>
> Guest:
> Performance counter stats for 'sleep 5' (5 runs):
>
> 0.730868 task-clock (msec) # 0.000 CPUs utilized ( +- 1.13% )
> 1 context-switches # 0.001 M/sec
> 0 cpu-migrations # 0.000 K/sec
> 48 page-faults # 0.065 M/sec ( +- 0.42% )
> 1642982 cycles # 2.248 GHz ( +- 1.04% )
> <not supported> stalled-cycles-frontend
> <not supported> stalled-cycles-backend
> 637964 instructions # 0.39 insns per cycle ( +- 0.65% )
> <not supported> branches
> 10377 branch-misses # 14.198 M/sec ( +- 1.09% )
>
> 5.001289068 seconds time elapsed ( +- 0.00% )
This looks pretty cool!
I'll review your next patch set version in more detail.
Have you tried runnig a no-op cycle counter read test in the guest and
in the host?
Basically something like:
static void nop(void *junk)
{
}
static void test_nop(void)
{
unsigned long before,after;
before = read_cycles();
isb();
nop(NULL);
isb();
after = read_cycles();
}
I would be very curious to see if we get a ~6000 cycles overhead in the
guest compared to bare-metal, which I expect.
If we do, we should consider a hot-path in the the EL2 assembly code to
read the cycle counter to reduce the overhead to something more precise.
Thanks,
-Christoffer
>
> This patchset can be fetched from [1] and the relevant QEMU version for
> test can be fetched from [2].
>
> Thanks,
> Shannon
>
> [1] https://git.linaro.org/people/shannon.zhao/linux-mainline.git KVM_ARM64_PMU_v3
> [2] https://git.linaro.org/people/shannon.zhao/qemu.git PMU_v2
>
> Changes since v2->v3:
> * Directly use perf raw event type to create perf_event in KVM
> * Add a helper vcpu_sysreg_write
> * remove unrelated header file
>
> Changes since v1->v2:
> * Use switch...case for registers access handler instead of adding
> alone handler for each register
> * Try to use the sys_regs to store the register value instead of adding
> new variables in struct kvm_pmc
> * Fix the handle of cp15 regs
> * Create a new kvm device vPMU, then userspace could choose whether to
> create PMU
> * Fix the handle of PMU overflow interrupt
>
> Shannon Zhao (20):
> ARM64: Move PMU register related defines to asm/pmu.h
> KVM: ARM64: Define PMU data structure for each vcpu
> KVM: ARM64: Add offset defines for PMU registers
> KVM: ARM64: Add reset and access handlers for PMCR_EL0 register
> KVM: ARM64: Add reset and access handlers for PMSELR register
> KVM: ARM64: Add reset and access handlers for PMCEID0 and PMCEID1
> register
> KVM: ARM64: PMU: Add perf event map and introduce perf event creating
> function
> KVM: ARM64: Add reset and access handlers for PMXEVTYPER register
> KVM: ARM64: Add reset and access handlers for PMXEVCNTR register
> KVM: ARM64: Add reset and access handlers for PMCCNTR register
> KVM: ARM64: Add reset and access handlers for PMCNTENSET and
> PMCNTENCLR register
> KVM: ARM64: Add reset and access handlers for PMINTENSET and
> PMINTENCLR register
> KVM: ARM64: Add reset and access handlers for PMOVSSET and PMOVSCLR
> register
> KVM: ARM64: Add reset and access handlers for PMUSERENR register
> KVM: ARM64: Add reset and access handlers for PMSWINC register
> KVM: ARM64: Add access handlers for PMEVCNTRn and PMEVTYPERn register
> KVM: ARM64: Add PMU overflow interrupt routing
> KVM: ARM64: Reset PMU state when resetting vcpu
> KVM: ARM64: Free perf event of PMU when destroying vcpu
> KVM: ARM64: Add a new kvm ARM PMU device
>
> Documentation/virtual/kvm/devices/arm-pmu.txt | 15 +
> arch/arm/kvm/arm.c | 5 +
> arch/arm64/include/asm/kvm_asm.h | 59 +++-
> arch/arm64/include/asm/kvm_host.h | 2 +
> arch/arm64/include/asm/pmu.h | 47 +++
> arch/arm64/include/uapi/asm/kvm.h | 3 +
> arch/arm64/kernel/perf_event.c | 35 --
> arch/arm64/kvm/Kconfig | 8 +
> arch/arm64/kvm/Makefile | 1 +
> arch/arm64/kvm/reset.c | 3 +
> arch/arm64/kvm/sys_regs.c | 488 ++++++++++++++++++++++++--
> arch/arm64/kvm/sys_regs.h | 16 +
> include/kvm/arm_pmu.h | 65 ++++
> include/linux/kvm_host.h | 1 +
> include/uapi/linux/kvm.h | 2 +
> virt/kvm/arm/pmu.c | 414 ++++++++++++++++++++++
> virt/kvm/kvm_main.c | 4 +
> 17 files changed, 1098 insertions(+), 70 deletions(-)
> create mode 100644 Documentation/virtual/kvm/devices/arm-pmu.txt
> create mode 100644 include/kvm/arm_pmu.h
> create mode 100644 virt/kvm/arm/pmu.c
>
> --
> 2.1.4
>
^ permalink raw reply [flat|nested] 37+ messages in thread
* [PATCH v3 00/20] KVM: ARM64: Add guest PMU support
2015-10-26 11:33 ` Christoffer Dall
@ 2015-10-27 1:15 ` Shannon Zhao
0 siblings, 0 replies; 37+ messages in thread
From: Shannon Zhao @ 2015-10-27 1:15 UTC (permalink / raw)
To: linux-arm-kernel
On 2015/10/26 19:33, Christoffer Dall wrote:
> On Thu, Sep 24, 2015 at 03:31:05PM -0700, Shannon Zhao wrote:
>> This patchset adds guest PMU support for KVM on ARM64. It takes
>> trap-and-emulate approach. When guest wants to monitor one event, it
>> will be trapped by KVM and KVM will call perf_event API to create a perf
>> event and call relevant perf_event APIs to get the count value of event.
>>
>> Use perf to test this patchset in guest. When using "perf list", it
>> shows the list of the hardware events and hardware cache events perf
>> supports. Then use "perf stat -e EVENT" to monitor some event. For
>> example, use "perf stat -e cycles" to count cpu cycles and
>> "perf stat -e cache-misses" to count cache misses.
>>
>> Below are the outputs of "perf stat -r 5 sleep 5" when running in host
>> and guest.
>>
>> Host:
>> Performance counter stats for 'sleep 5' (5 runs):
>>
>> 0.551428 task-clock (msec) # 0.000 CPUs utilized ( +- 0.91% )
>> 1 context-switches # 0.002 M/sec
>> 0 cpu-migrations # 0.000 K/sec
>> 48 page-faults # 0.088 M/sec ( +- 1.05% )
>> 1150265 cycles # 2.086 GHz ( +- 0.92% )
>> <not supported> stalled-cycles-frontend
>> <not supported> stalled-cycles-backend
>> 526398 instructions # 0.46 insns per cycle ( +- 0.89% )
>> <not supported> branches
>> 9485 branch-misses # 17.201 M/sec ( +- 2.35% )
>>
>> 5.000831616 seconds time elapsed ( +- 0.00% )
>>
>> Guest:
>> Performance counter stats for 'sleep 5' (5 runs):
>>
>> 0.730868 task-clock (msec) # 0.000 CPUs utilized ( +- 1.13% )
>> 1 context-switches # 0.001 M/sec
>> 0 cpu-migrations # 0.000 K/sec
>> 48 page-faults # 0.065 M/sec ( +- 0.42% )
>> 1642982 cycles # 2.248 GHz ( +- 1.04% )
>> <not supported> stalled-cycles-frontend
>> <not supported> stalled-cycles-backend
>> 637964 instructions # 0.39 insns per cycle ( +- 0.65% )
>> <not supported> branches
>> 10377 branch-misses # 14.198 M/sec ( +- 1.09% )
>>
>> 5.001289068 seconds time elapsed ( +- 0.00% )
>
> This looks pretty cool!
>
> I'll review your next patch set version in more detail.
>
> Have you tried runnig a no-op cycle counter read test in the guest and
> in the host?
>
> Basically something like:
>
> static void nop(void *junk)
> {
> }
>
> static void test_nop(void)
> {
> unsigned long before,after;
> before = read_cycles();
> isb();
> nop(NULL);
> isb();
> after = read_cycles();
> }
>
> I would be very curious to see if we get a ~6000 cycles overhead in the
> guest compared to bare-metal, which I expect.
>
Ok, I'll try this while I'm doing more tests on v4.
> If we do, we should consider a hot-path in the the EL2 assembly code to
> read the cycle counter to reduce the overhead to something more precise.
>
--
Shannon
^ permalink raw reply [flat|nested] 37+ messages in thread