* [PATCH v4 01/18] KVM: arm64: Extend masking facility to arbitrary registers
2024-10-25 18:23 [PATCH v4 00/18] KVM: arm64: nv: Support for EL2 PMU controls Oliver Upton
@ 2024-10-25 18:23 ` Oliver Upton
2024-10-25 18:23 ` [PATCH v4 02/18] arm64: sysreg: Describe ID_AA64DFR2_EL1 fields Oliver Upton
` (18 subsequent siblings)
19 siblings, 0 replies; 27+ messages in thread
From: Oliver Upton @ 2024-10-25 18:23 UTC (permalink / raw)
To: kvmarm
Cc: Marc Zyngier, Joey Gouly, Suzuki K Poulose, Zenghui Yu,
Catalin Marinas, Will Deacon, Anshuman Khandual, linux-arm-kernel,
linux-kernel, Oliver Upton
From: Marc Zyngier <maz@kernel.org>
We currently only use the masking (RES0/RES1) facility for VNCR
registers, as they are memory-based and thus easy to sanitise.
But we could apply the same thing to other registers if we:
- split the sanitisation from __VNCR_START__
- apply the sanitisation when reading from a HW register
This involves a new "marker" in the vcpu_sysreg enum, which
defines the point at which the sanitisation applies (the VNCR
registers being of course after this marker).
Whle we are at it, rename kvm_vcpu_sanitise_vncr_reg() to
kvm_vcpu_apply_reg_masks(), which is vaguely more explicit,
and harden set_sysreg_masks() against setting masks for
random registers...
Signed-off-by: Marc Zyngier <maz@kernel.org>
Reviewed-by: Joey Gouly <joey.gouly@arm.com>
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
---
arch/arm64/include/asm/kvm_host.h | 19 +++++++++++++------
arch/arm64/kvm/nested.c | 12 ++++++++----
arch/arm64/kvm/sys_regs.c | 3 +++
3 files changed, 24 insertions(+), 10 deletions(-)
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 94cff508874b..07c7d2b7b3a3 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -367,7 +367,7 @@ struct kvm_arch {
u64 ctr_el0;
- /* Masks for VNCR-baked sysregs */
+ /* Masks for VNCR-backed and general EL2 sysregs */
struct kvm_sysreg_masks *sysreg_masks;
/*
@@ -401,6 +401,9 @@ struct kvm_vcpu_fault_info {
r = __VNCR_START__ + ((VNCR_ ## r) / 8), \
__after_##r = __MAX__(__before_##r - 1, r)
+#define MARKER(m) \
+ m, __after_##m = m - 1
+
enum vcpu_sysreg {
__INVALID_SYSREG__, /* 0 is reserved as an invalid value */
MPIDR_EL1, /* MultiProcessor Affinity Register */
@@ -487,7 +490,11 @@ enum vcpu_sysreg {
CNTHV_CTL_EL2,
CNTHV_CVAL_EL2,
- __VNCR_START__, /* Any VNCR-capable reg goes after this point */
+ /* Anything from this can be RES0/RES1 sanitised */
+ MARKER(__SANITISED_REG_START__),
+
+ /* Any VNCR-capable reg goes after this point */
+ MARKER(__VNCR_START__),
VNCR(SCTLR_EL1),/* System Control Register */
VNCR(ACTLR_EL1),/* Auxiliary Control Register */
@@ -547,7 +554,7 @@ struct kvm_sysreg_masks {
struct {
u64 res0;
u64 res1;
- } mask[NR_SYS_REGS - __VNCR_START__];
+ } mask[NR_SYS_REGS - __SANITISED_REG_START__];
};
struct kvm_cpu_context {
@@ -995,13 +1002,13 @@ static inline u64 *___ctxt_sys_reg(const struct kvm_cpu_context *ctxt, int r)
#define ctxt_sys_reg(c,r) (*__ctxt_sys_reg(c,r))
-u64 kvm_vcpu_sanitise_vncr_reg(const struct kvm_vcpu *, enum vcpu_sysreg);
+u64 kvm_vcpu_apply_reg_masks(const struct kvm_vcpu *, enum vcpu_sysreg, u64);
#define __vcpu_sys_reg(v,r) \
(*({ \
const struct kvm_cpu_context *ctxt = &(v)->arch.ctxt; \
u64 *__r = __ctxt_sys_reg(ctxt, (r)); \
- if (vcpu_has_nv((v)) && (r) >= __VNCR_START__) \
- *__r = kvm_vcpu_sanitise_vncr_reg((v), (r)); \
+ if (vcpu_has_nv((v)) && (r) >= __SANITISED_REG_START__) \
+ *__r = kvm_vcpu_apply_reg_masks((v), (r), *__r);\
__r; \
}))
diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
index f9e30dd34c7a..b20b3bfb9cae 100644
--- a/arch/arm64/kvm/nested.c
+++ b/arch/arm64/kvm/nested.c
@@ -908,15 +908,15 @@ static void limit_nv_id_regs(struct kvm *kvm)
kvm_set_vm_id_reg(kvm, SYS_ID_AA64DFR0_EL1, val);
}
-u64 kvm_vcpu_sanitise_vncr_reg(const struct kvm_vcpu *vcpu, enum vcpu_sysreg sr)
+u64 kvm_vcpu_apply_reg_masks(const struct kvm_vcpu *vcpu,
+ enum vcpu_sysreg sr, u64 v)
{
- u64 v = ctxt_sys_reg(&vcpu->arch.ctxt, sr);
struct kvm_sysreg_masks *masks;
masks = vcpu->kvm->arch.sysreg_masks;
if (masks) {
- sr -= __VNCR_START__;
+ sr -= __SANITISED_REG_START__;
v &= ~masks->mask[sr].res0;
v |= masks->mask[sr].res1;
@@ -927,7 +927,11 @@ u64 kvm_vcpu_sanitise_vncr_reg(const struct kvm_vcpu *vcpu, enum vcpu_sysreg sr)
static void set_sysreg_masks(struct kvm *kvm, int sr, u64 res0, u64 res1)
{
- int i = sr - __VNCR_START__;
+ int i = sr - __SANITISED_REG_START__;
+
+ BUILD_BUG_ON(!__builtin_constant_p(sr));
+ BUILD_BUG_ON(sr < __SANITISED_REG_START__);
+ BUILD_BUG_ON(sr >= NR_SYS_REGS);
kvm->arch.sysreg_masks->mask[i].res0 = res0;
kvm->arch.sysreg_masks->mask[i].res1 = res1;
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index dad88e31f953..30fe940cd5bd 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -165,6 +165,9 @@ u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
/* Get the current version of the EL1 counterpart. */
WARN_ON(!__vcpu_read_sys_reg_from_cpu(el1r, &val));
+ if (reg >= __SANITISED_REG_START__)
+ val = kvm_vcpu_apply_reg_masks(vcpu, reg, val);
+
return val;
}
--
2.47.0.163.g1226f6d8fa-goog
^ permalink raw reply related [flat|nested] 27+ messages in thread* [PATCH v4 02/18] arm64: sysreg: Describe ID_AA64DFR2_EL1 fields
2024-10-25 18:23 [PATCH v4 00/18] KVM: arm64: nv: Support for EL2 PMU controls Oliver Upton
2024-10-25 18:23 ` [PATCH v4 01/18] KVM: arm64: Extend masking facility to arbitrary registers Oliver Upton
@ 2024-10-25 18:23 ` Oliver Upton
2024-10-25 18:23 ` [PATCH v4 03/18] arm64: sysreg: Migrate MDCR_EL2 definition to table Oliver Upton
` (17 subsequent siblings)
19 siblings, 0 replies; 27+ messages in thread
From: Oliver Upton @ 2024-10-25 18:23 UTC (permalink / raw)
To: kvmarm
Cc: Marc Zyngier, Joey Gouly, Suzuki K Poulose, Zenghui Yu,
Catalin Marinas, Will Deacon, Anshuman Khandual, linux-arm-kernel,
linux-kernel, Oliver Upton
Describe the new ID register in line with DDI0601 2024-09.
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
---
arch/arm64/tools/sysreg | 26 ++++++++++++++++++++++++++
1 file changed, 26 insertions(+)
diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg
index 8d637ac4b7c6..5670e51a6f90 100644
--- a/arch/arm64/tools/sysreg
+++ b/arch/arm64/tools/sysreg
@@ -1287,6 +1287,32 @@ Field 15:8 BRPs
Field 7:0 SYSPMUID
EndSysreg
+Sysreg ID_AA64DFR2_EL1 3 0 0 5 2
+Res0 63:28
+UnsignedEnum 27:24 TRBE_EXC
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+UnsignedEnum 23:20 SPE_nVM
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+UnsignedEnum 19:16 SPE_EXC
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+Res0 15:8
+UnsignedEnum 7:4 BWE
+ 0b0000 NI
+ 0b0001 FEAT_BWE
+ 0b0002 FEAT_BWE2
+EndEnum
+UnsignedEnum 3:0 STEP
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
+EndSysreg
+
Sysreg ID_AA64AFR0_EL1 3 0 0 5 4
Res0 63:32
Field 31:28 IMPDEF7
--
2.47.0.163.g1226f6d8fa-goog
^ permalink raw reply related [flat|nested] 27+ messages in thread* [PATCH v4 03/18] arm64: sysreg: Migrate MDCR_EL2 definition to table
2024-10-25 18:23 [PATCH v4 00/18] KVM: arm64: nv: Support for EL2 PMU controls Oliver Upton
2024-10-25 18:23 ` [PATCH v4 01/18] KVM: arm64: Extend masking facility to arbitrary registers Oliver Upton
2024-10-25 18:23 ` [PATCH v4 02/18] arm64: sysreg: Describe ID_AA64DFR2_EL1 fields Oliver Upton
@ 2024-10-25 18:23 ` Oliver Upton
2024-10-25 18:23 ` [PATCH v4 04/18] arm64: sysreg: Add new definitions for ID_AA64DFR0_EL1 Oliver Upton
` (16 subsequent siblings)
19 siblings, 0 replies; 27+ messages in thread
From: Oliver Upton @ 2024-10-25 18:23 UTC (permalink / raw)
To: kvmarm
Cc: Marc Zyngier, Joey Gouly, Suzuki K Poulose, Zenghui Yu,
Catalin Marinas, Will Deacon, Anshuman Khandual, linux-arm-kernel,
linux-kernel, Oliver Upton
Migrate MDCR_EL2 over to the sysreg table and align definitions with
DDI0601 2024-09.
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
---
arch/arm64/include/asm/kvm_arm.h | 29 --------------------------
arch/arm64/tools/sysreg | 35 ++++++++++++++++++++++++++++++++
2 files changed, 35 insertions(+), 29 deletions(-)
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 109a85ee6910..fb8d15f299a4 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -311,35 +311,6 @@
GENMASK(19, 18) | \
GENMASK(15, 0))
-/* Hyp Debug Configuration Register bits */
-#define MDCR_EL2_E2TB_MASK (UL(0x3))
-#define MDCR_EL2_E2TB_SHIFT (UL(24))
-#define MDCR_EL2_HPMFZS (UL(1) << 36)
-#define MDCR_EL2_HPMFZO (UL(1) << 29)
-#define MDCR_EL2_MTPME (UL(1) << 28)
-#define MDCR_EL2_TDCC (UL(1) << 27)
-#define MDCR_EL2_HLP (UL(1) << 26)
-#define MDCR_EL2_HCCD (UL(1) << 23)
-#define MDCR_EL2_TTRF (UL(1) << 19)
-#define MDCR_EL2_HPMD (UL(1) << 17)
-#define MDCR_EL2_TPMS (UL(1) << 14)
-#define MDCR_EL2_E2PB_MASK (UL(0x3))
-#define MDCR_EL2_E2PB_SHIFT (UL(12))
-#define MDCR_EL2_TDRA (UL(1) << 11)
-#define MDCR_EL2_TDOSA (UL(1) << 10)
-#define MDCR_EL2_TDA (UL(1) << 9)
-#define MDCR_EL2_TDE (UL(1) << 8)
-#define MDCR_EL2_HPME (UL(1) << 7)
-#define MDCR_EL2_TPM (UL(1) << 6)
-#define MDCR_EL2_TPMCR (UL(1) << 5)
-#define MDCR_EL2_HPMN_MASK (UL(0x1F))
-#define MDCR_EL2_RES0 (GENMASK(63, 37) | \
- GENMASK(35, 30) | \
- GENMASK(25, 24) | \
- GENMASK(22, 20) | \
- BIT(18) | \
- GENMASK(16, 15))
-
/*
* FGT register definitions
*
diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg
index 5670e51a6f90..7061cf2d8444 100644
--- a/arch/arm64/tools/sysreg
+++ b/arch/arm64/tools/sysreg
@@ -2414,6 +2414,41 @@ Field 1 AFSR1_EL1
Field 0 AFSR0_EL1
EndSysregFields
+Sysreg MDCR_EL2 3 4 1 1 1
+Res0 63:51
+Field 50 EnSTEPOP
+Res0 49:44
+Field 43 EBWE
+Res0 42
+Field 41:40 PMEE
+Res0 39:37
+Field 36 HPMFZS
+Res0 35:32
+Field 31:30 PMSSE
+Field 29 HPMFZO
+Field 28 MTPME
+Field 27 TDCC
+Field 26 HLP
+Field 25:24 E2TB
+Field 23 HCCD
+Res0 22:20
+Field 19 TTRF
+Res0 18
+Field 17 HPMD
+Res0 16
+Field 15 EnSPM
+Field 14 TPMS
+Field 13:12 E2PB
+Field 11 TDRA
+Field 10 TDOSA
+Field 9 TDA
+Field 8 TDE
+Field 7 HPME
+Field 6 TPM
+Field 5 TPMCR
+Field 4:0 HPMN
+EndSysreg
+
Sysreg HFGRTR_EL2 3 4 1 1 4
Fields HFGxTR_EL2
EndSysreg
--
2.47.0.163.g1226f6d8fa-goog
^ permalink raw reply related [flat|nested] 27+ messages in thread* [PATCH v4 04/18] arm64: sysreg: Add new definitions for ID_AA64DFR0_EL1
2024-10-25 18:23 [PATCH v4 00/18] KVM: arm64: nv: Support for EL2 PMU controls Oliver Upton
` (2 preceding siblings ...)
2024-10-25 18:23 ` [PATCH v4 03/18] arm64: sysreg: Migrate MDCR_EL2 definition to table Oliver Upton
@ 2024-10-25 18:23 ` Oliver Upton
2024-10-25 18:23 ` [PATCH v4 05/18] KVM: arm64: Describe RES0/RES1 bits of MDCR_EL2 Oliver Upton
` (15 subsequent siblings)
19 siblings, 0 replies; 27+ messages in thread
From: Oliver Upton @ 2024-10-25 18:23 UTC (permalink / raw)
To: kvmarm
Cc: Marc Zyngier, Joey Gouly, Suzuki K Poulose, Zenghui Yu,
Catalin Marinas, Will Deacon, Anshuman Khandual, linux-arm-kernel,
linux-kernel, Oliver Upton
Align the field definitions w/ DDI0601 2024-09 and opportunistically
declare MTPMU as a signed field.
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
---
arch/arm64/tools/sysreg | 15 ++++++++++++---
1 file changed, 12 insertions(+), 3 deletions(-)
diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg
index 7061cf2d8444..e5d2c25fe463 100644
--- a/arch/arm64/tools/sysreg
+++ b/arch/arm64/tools/sysreg
@@ -1200,7 +1200,7 @@ UnsignedEnum 55:52 BRBE
0b0001 IMP
0b0010 BRBE_V1P1
EndEnum
-Enum 51:48 MTPMU
+SignedEnum 51:48 MTPMU
0b0000 NI_IMPDEF
0b0001 IMP
0b1111 NI
@@ -1208,6 +1208,7 @@ EndEnum
UnsignedEnum 47:44 TraceBuffer
0b0000 NI
0b0001 IMP
+ 0b0010 TRBE_V1P1
EndEnum
UnsignedEnum 43:40 TraceFilt
0b0000 NI
@@ -1224,11 +1225,18 @@ UnsignedEnum 35:32 PMSVer
0b0011 V1P2
0b0100 V1P3
0b0101 V1P4
+ 0b0110 V1P5
EndEnum
Field 31:28 CTX_CMPs
-Res0 27:24
+UnsignedEnum 27:24 SEBEP
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
Field 23:20 WRPs
-Res0 19:16
+UnsignedEnum 19:16 PMSS
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
Field 15:12 BRPs
UnsignedEnum 11:8 PMUVer
0b0000 NI
@@ -1238,6 +1246,7 @@ UnsignedEnum 11:8 PMUVer
0b0110 V3P5
0b0111 V3P7
0b1000 V3P8
+ 0b1001 V3P9
0b1111 IMP_DEF
EndEnum
UnsignedEnum 7:4 TraceVer
--
2.47.0.163.g1226f6d8fa-goog
^ permalink raw reply related [flat|nested] 27+ messages in thread* [PATCH v4 05/18] KVM: arm64: Describe RES0/RES1 bits of MDCR_EL2
2024-10-25 18:23 [PATCH v4 00/18] KVM: arm64: nv: Support for EL2 PMU controls Oliver Upton
` (3 preceding siblings ...)
2024-10-25 18:23 ` [PATCH v4 04/18] arm64: sysreg: Add new definitions for ID_AA64DFR0_EL1 Oliver Upton
@ 2024-10-25 18:23 ` Oliver Upton
2024-10-25 18:23 ` [PATCH v4 06/18] KVM: arm64: nv: Allow coarse-grained trap combos to use complex traps Oliver Upton
` (14 subsequent siblings)
19 siblings, 0 replies; 27+ messages in thread
From: Oliver Upton @ 2024-10-25 18:23 UTC (permalink / raw)
To: kvmarm
Cc: Marc Zyngier, Joey Gouly, Suzuki K Poulose, Zenghui Yu,
Catalin Marinas, Will Deacon, Anshuman Khandual, linux-arm-kernel,
linux-kernel, Oliver Upton
Add support for sanitising MDCR_EL2 and describe the RES0/RES1 bits
according to the feature set exposed to the VM.
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
---
arch/arm64/include/asm/kvm_host.h | 2 +-
arch/arm64/kvm/nested.c | 37 +++++++++++++++++++++++++++++++
2 files changed, 38 insertions(+), 1 deletion(-)
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 07c7d2b7b3a3..ca8fe74920d4 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -464,7 +464,6 @@ enum vcpu_sysreg {
/* EL2 registers */
SCTLR_EL2, /* System Control Register (EL2) */
ACTLR_EL2, /* Auxiliary Control Register (EL2) */
- MDCR_EL2, /* Monitor Debug Configuration Register (EL2) */
CPTR_EL2, /* Architectural Feature Trap Register (EL2) */
HACR_EL2, /* Hypervisor Auxiliary Control Register */
ZCR_EL2, /* SVE Control Register (EL2) */
@@ -492,6 +491,7 @@ enum vcpu_sysreg {
/* Anything from this can be RES0/RES1 sanitised */
MARKER(__SANITISED_REG_START__),
+ MDCR_EL2, /* Monitor Debug Configuration Register (EL2) */
/* Any VNCR-capable reg goes after this point */
MARKER(__VNCR_START__),
diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
index b20b3bfb9cae..d256a495a6ba 100644
--- a/arch/arm64/kvm/nested.c
+++ b/arch/arm64/kvm/nested.c
@@ -1186,5 +1186,42 @@ int kvm_init_nv_sysregs(struct kvm *kvm)
res0 |= SCTLR_EL1_EPAN;
set_sysreg_masks(kvm, SCTLR_EL1, res0, res1);
+ /* MDCR_EL2 */
+ res0 = MDCR_EL2_RES0;
+ res1 = MDCR_EL2_RES1;
+ if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMUVer, IMP))
+ res0 |= (MDCR_EL2_HPMN | MDCR_EL2_TPMCR |
+ MDCR_EL2_TPM | MDCR_EL2_HPME);
+ if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMSVer, IMP))
+ res0 |= MDCR_EL2_E2PB | MDCR_EL2_TPMS;
+ if (!kvm_has_feat(kvm, ID_AA64DFR1_EL1, SPMU, IMP))
+ res0 |= MDCR_EL2_EnSPM;
+ if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMUVer, V3P1))
+ res0 |= MDCR_EL2_HPMD;
+ if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, TraceFilt, IMP))
+ res0 |= MDCR_EL2_TTRF;
+ if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMUVer, V3P5))
+ res0 |= MDCR_EL2_HCCD | MDCR_EL2_HLP;
+ if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, TraceBuffer, IMP))
+ res0 |= MDCR_EL2_E2TB;
+ if (!kvm_has_feat(kvm, ID_AA64MMFR0_EL1, FGT, IMP))
+ res0 |= MDCR_EL2_TDCC;
+ if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, MTPMU, IMP) ||
+ kvm_has_feat(kvm, ID_AA64PFR0_EL1, EL3, IMP))
+ res0 |= MDCR_EL2_MTPME;
+ if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMUVer, V3P7))
+ res0 |= MDCR_EL2_HPMFZO;
+ if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMSS, IMP))
+ res0 |= MDCR_EL2_PMSSE;
+ if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMSVer, V1P2))
+ res0 |= MDCR_EL2_HPMFZS;
+ if (!kvm_has_feat(kvm, ID_AA64DFR1_EL1, EBEP, IMP))
+ res0 |= MDCR_EL2_PMEE;
+ if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, DebugVer, V8P9))
+ res0 |= MDCR_EL2_EBWE;
+ if (!kvm_has_feat(kvm, ID_AA64DFR2_EL1, STEP, IMP))
+ res0 |= MDCR_EL2_EnSTEPOP;
+ set_sysreg_masks(kvm, MDCR_EL2, res0, res1);
+
return 0;
}
--
2.47.0.163.g1226f6d8fa-goog
^ permalink raw reply related [flat|nested] 27+ messages in thread* [PATCH v4 06/18] KVM: arm64: nv: Allow coarse-grained trap combos to use complex traps
2024-10-25 18:23 [PATCH v4 00/18] KVM: arm64: nv: Support for EL2 PMU controls Oliver Upton
` (4 preceding siblings ...)
2024-10-25 18:23 ` [PATCH v4 05/18] KVM: arm64: Describe RES0/RES1 bits of MDCR_EL2 Oliver Upton
@ 2024-10-25 18:23 ` Oliver Upton
2024-10-25 18:23 ` [PATCH v4 07/18] KVM: arm64: nv: Rename BEHAVE_FORWARD_ANY Oliver Upton
` (13 subsequent siblings)
19 siblings, 0 replies; 27+ messages in thread
From: Oliver Upton @ 2024-10-25 18:23 UTC (permalink / raw)
To: kvmarm
Cc: Marc Zyngier, Joey Gouly, Suzuki K Poulose, Zenghui Yu,
Catalin Marinas, Will Deacon, Anshuman Khandual, linux-arm-kernel,
linux-kernel, Oliver Upton
KVM uses a sanity-check to avoid infinite recursion in trap combinations
that could potentially depend on itself. Narrow the scope of this sanity
check to the exact CGT IDs that correspond w/ trap combos, opening the
door to using 'complex' traps as part of a combination.
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
---
arch/arm64/kvm/emulate-nested.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/arch/arm64/kvm/emulate-nested.c b/arch/arm64/kvm/emulate-nested.c
index 05b6435d02a9..da7ab14e036d 100644
--- a/arch/arm64/kvm/emulate-nested.c
+++ b/arch/arm64/kvm/emulate-nested.c
@@ -2021,7 +2021,8 @@ int __init populate_nv_trap_config(void)
cgids = coarse_control_combo[id - __MULTIPLE_CONTROL_BITS__];
for (int i = 0; cgids[i] != __RESERVED__; i++) {
- if (cgids[i] >= __MULTIPLE_CONTROL_BITS__) {
+ if (cgids[i] >= __MULTIPLE_CONTROL_BITS__ &&
+ cgids[i] < __COMPLEX_CONDITIONS__) {
kvm_err("Recursive MCB %d/%d\n", id, cgids[i]);
ret = -EINVAL;
}
--
2.47.0.163.g1226f6d8fa-goog
^ permalink raw reply related [flat|nested] 27+ messages in thread* [PATCH v4 07/18] KVM: arm64: nv: Rename BEHAVE_FORWARD_ANY
2024-10-25 18:23 [PATCH v4 00/18] KVM: arm64: nv: Support for EL2 PMU controls Oliver Upton
` (5 preceding siblings ...)
2024-10-25 18:23 ` [PATCH v4 06/18] KVM: arm64: nv: Allow coarse-grained trap combos to use complex traps Oliver Upton
@ 2024-10-25 18:23 ` Oliver Upton
2024-10-25 18:23 ` [PATCH v4 08/18] KVM: arm64: nv: Reinject traps that take effect in Host EL0 Oliver Upton
` (12 subsequent siblings)
19 siblings, 0 replies; 27+ messages in thread
From: Oliver Upton @ 2024-10-25 18:23 UTC (permalink / raw)
To: kvmarm
Cc: Marc Zyngier, Joey Gouly, Suzuki K Poulose, Zenghui Yu,
Catalin Marinas, Will Deacon, Anshuman Khandual, linux-arm-kernel,
linux-kernel, Oliver Upton
BEHAVE_FORWARD_ANY is slightly ambiguous, especially since we're about
to cram some more information into the enum. Rephrase it.
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
---
arch/arm64/kvm/emulate-nested.c | 93 +++++++++++++++++----------------
1 file changed, 47 insertions(+), 46 deletions(-)
diff --git a/arch/arm64/kvm/emulate-nested.c b/arch/arm64/kvm/emulate-nested.c
index da7ab14e036d..e1a30d1bcd06 100644
--- a/arch/arm64/kvm/emulate-nested.c
+++ b/arch/arm64/kvm/emulate-nested.c
@@ -16,9 +16,10 @@
enum trap_behaviour {
BEHAVE_HANDLE_LOCALLY = 0,
+
BEHAVE_FORWARD_READ = BIT(0),
BEHAVE_FORWARD_WRITE = BIT(1),
- BEHAVE_FORWARD_ANY = BEHAVE_FORWARD_READ | BEHAVE_FORWARD_WRITE,
+ BEHAVE_FORWARD_RW = BEHAVE_FORWARD_READ | BEHAVE_FORWARD_WRITE,
};
struct trap_bits {
@@ -138,7 +139,7 @@ static const struct trap_bits coarse_trap_bits[] = {
.index = HCR_EL2,
.value = HCR_TID2,
.mask = HCR_TID2,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_HCR_TID3] = {
.index = HCR_EL2,
@@ -162,37 +163,37 @@ static const struct trap_bits coarse_trap_bits[] = {
.index = HCR_EL2,
.value = HCR_TIDCP,
.mask = HCR_TIDCP,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_HCR_TACR] = {
.index = HCR_EL2,
.value = HCR_TACR,
.mask = HCR_TACR,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_HCR_TSW] = {
.index = HCR_EL2,
.value = HCR_TSW,
.mask = HCR_TSW,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_HCR_TPC] = { /* Also called TCPC when FEAT_DPB is implemented */
.index = HCR_EL2,
.value = HCR_TPC,
.mask = HCR_TPC,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_HCR_TPU] = {
.index = HCR_EL2,
.value = HCR_TPU,
.mask = HCR_TPU,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_HCR_TTLB] = {
.index = HCR_EL2,
.value = HCR_TTLB,
.mask = HCR_TTLB,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_HCR_TVM] = {
.index = HCR_EL2,
@@ -204,7 +205,7 @@ static const struct trap_bits coarse_trap_bits[] = {
.index = HCR_EL2,
.value = HCR_TDZ,
.mask = HCR_TDZ,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_HCR_TRVM] = {
.index = HCR_EL2,
@@ -216,205 +217,205 @@ static const struct trap_bits coarse_trap_bits[] = {
.index = HCR_EL2,
.value = HCR_TLOR,
.mask = HCR_TLOR,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_HCR_TERR] = {
.index = HCR_EL2,
.value = HCR_TERR,
.mask = HCR_TERR,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_HCR_APK] = {
.index = HCR_EL2,
.value = 0,
.mask = HCR_APK,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_HCR_NV] = {
.index = HCR_EL2,
.value = HCR_NV,
.mask = HCR_NV,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_HCR_NV_nNV2] = {
.index = HCR_EL2,
.value = HCR_NV,
.mask = HCR_NV | HCR_NV2,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_HCR_NV1_nNV2] = {
.index = HCR_EL2,
.value = HCR_NV | HCR_NV1,
.mask = HCR_NV | HCR_NV1 | HCR_NV2,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_HCR_AT] = {
.index = HCR_EL2,
.value = HCR_AT,
.mask = HCR_AT,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_HCR_nFIEN] = {
.index = HCR_EL2,
.value = 0,
.mask = HCR_FIEN,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_HCR_TID4] = {
.index = HCR_EL2,
.value = HCR_TID4,
.mask = HCR_TID4,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_HCR_TICAB] = {
.index = HCR_EL2,
.value = HCR_TICAB,
.mask = HCR_TICAB,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_HCR_TOCU] = {
.index = HCR_EL2,
.value = HCR_TOCU,
.mask = HCR_TOCU,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_HCR_ENSCXT] = {
.index = HCR_EL2,
.value = 0,
.mask = HCR_ENSCXT,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_HCR_TTLBIS] = {
.index = HCR_EL2,
.value = HCR_TTLBIS,
.mask = HCR_TTLBIS,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_HCR_TTLBOS] = {
.index = HCR_EL2,
.value = HCR_TTLBOS,
.mask = HCR_TTLBOS,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_MDCR_TPMCR] = {
.index = MDCR_EL2,
.value = MDCR_EL2_TPMCR,
.mask = MDCR_EL2_TPMCR,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_MDCR_TPM] = {
.index = MDCR_EL2,
.value = MDCR_EL2_TPM,
.mask = MDCR_EL2_TPM,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_MDCR_TDE] = {
.index = MDCR_EL2,
.value = MDCR_EL2_TDE,
.mask = MDCR_EL2_TDE,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_MDCR_TDA] = {
.index = MDCR_EL2,
.value = MDCR_EL2_TDA,
.mask = MDCR_EL2_TDA,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_MDCR_TDOSA] = {
.index = MDCR_EL2,
.value = MDCR_EL2_TDOSA,
.mask = MDCR_EL2_TDOSA,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_MDCR_TDRA] = {
.index = MDCR_EL2,
.value = MDCR_EL2_TDRA,
.mask = MDCR_EL2_TDRA,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_MDCR_E2PB] = {
.index = MDCR_EL2,
.value = 0,
.mask = BIT(MDCR_EL2_E2PB_SHIFT),
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_MDCR_TPMS] = {
.index = MDCR_EL2,
.value = MDCR_EL2_TPMS,
.mask = MDCR_EL2_TPMS,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_MDCR_TTRF] = {
.index = MDCR_EL2,
.value = MDCR_EL2_TTRF,
.mask = MDCR_EL2_TTRF,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_MDCR_E2TB] = {
.index = MDCR_EL2,
.value = 0,
.mask = BIT(MDCR_EL2_E2TB_SHIFT),
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_MDCR_TDCC] = {
.index = MDCR_EL2,
.value = MDCR_EL2_TDCC,
.mask = MDCR_EL2_TDCC,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_CPACR_E0POE] = {
.index = CPTR_EL2,
.value = CPACR_ELx_E0POE,
.mask = CPACR_ELx_E0POE,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_CPTR_TAM] = {
.index = CPTR_EL2,
.value = CPTR_EL2_TAM,
.mask = CPTR_EL2_TAM,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_CPTR_TCPAC] = {
.index = CPTR_EL2,
.value = CPTR_EL2_TCPAC,
.mask = CPTR_EL2_TCPAC,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_HCRX_EnFPM] = {
.index = HCRX_EL2,
.value = 0,
.mask = HCRX_EL2_EnFPM,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_HCRX_TCR2En] = {
.index = HCRX_EL2,
.value = 0,
.mask = HCRX_EL2_TCR2En,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_ICH_HCR_TC] = {
.index = ICH_HCR_EL2,
.value = ICH_HCR_TC,
.mask = ICH_HCR_TC,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_ICH_HCR_TALL0] = {
.index = ICH_HCR_EL2,
.value = ICH_HCR_TALL0,
.mask = ICH_HCR_TALL0,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_ICH_HCR_TALL1] = {
.index = ICH_HCR_EL2,
.value = ICH_HCR_TALL1,
.mask = ICH_HCR_TALL1,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
[CGT_ICH_HCR_TDIR] = {
.index = ICH_HCR_EL2,
.value = ICH_HCR_TDIR,
.mask = ICH_HCR_TDIR,
- .behaviour = BEHAVE_FORWARD_ANY,
+ .behaviour = BEHAVE_FORWARD_RW,
},
};
@@ -474,7 +475,7 @@ static enum trap_behaviour check_cnthctl_el1pcten(struct kvm_vcpu *vcpu)
if (get_sanitized_cnthctl(vcpu) & (CNTHCTL_EL1PCTEN << 10))
return BEHAVE_HANDLE_LOCALLY;
- return BEHAVE_FORWARD_ANY;
+ return BEHAVE_FORWARD_RW;
}
static enum trap_behaviour check_cnthctl_el1pten(struct kvm_vcpu *vcpu)
@@ -482,7 +483,7 @@ static enum trap_behaviour check_cnthctl_el1pten(struct kvm_vcpu *vcpu)
if (get_sanitized_cnthctl(vcpu) & (CNTHCTL_EL1PCEN << 10))
return BEHAVE_HANDLE_LOCALLY;
- return BEHAVE_FORWARD_ANY;
+ return BEHAVE_FORWARD_RW;
}
static enum trap_behaviour check_cptr_tta(struct kvm_vcpu *vcpu)
@@ -493,7 +494,7 @@ static enum trap_behaviour check_cptr_tta(struct kvm_vcpu *vcpu)
val = translate_cptr_el2_to_cpacr_el1(val);
if (val & CPACR_ELx_TTA)
- return BEHAVE_FORWARD_ANY;
+ return BEHAVE_FORWARD_RW;
return BEHAVE_HANDLE_LOCALLY;
}
--
2.47.0.163.g1226f6d8fa-goog
^ permalink raw reply related [flat|nested] 27+ messages in thread* [PATCH v4 08/18] KVM: arm64: nv: Reinject traps that take effect in Host EL0
2024-10-25 18:23 [PATCH v4 00/18] KVM: arm64: nv: Support for EL2 PMU controls Oliver Upton
` (6 preceding siblings ...)
2024-10-25 18:23 ` [PATCH v4 07/18] KVM: arm64: nv: Rename BEHAVE_FORWARD_ANY Oliver Upton
@ 2024-10-25 18:23 ` Oliver Upton
2024-10-26 8:13 ` Marc Zyngier
2024-10-25 18:23 ` [PATCH v4 09/18] KVM: arm64: nv: Honor MDCR_EL2.{TPM, TPMCR} " Oliver Upton
` (11 subsequent siblings)
19 siblings, 1 reply; 27+ messages in thread
From: Oliver Upton @ 2024-10-25 18:23 UTC (permalink / raw)
To: kvmarm
Cc: Marc Zyngier, Joey Gouly, Suzuki K Poulose, Zenghui Yu,
Catalin Marinas, Will Deacon, Anshuman Khandual, linux-arm-kernel,
linux-kernel, Oliver Upton
Wire up the other end of traps that affect host EL0 by actually
injecting them into the guest hypervisor. Skip over FGT entirely, as a
cursory glance suggests no FGT is effective in host EL0.
Note that kvm_inject_nested() is already equipped for handling
exceptions while the VM is already in a host context.
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
---
arch/arm64/include/asm/kvm_emulate.h | 5 +++++
arch/arm64/kvm/emulate-nested.c | 29 ++++++++++++++++++++++++----
2 files changed, 30 insertions(+), 4 deletions(-)
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index a601a9305b10..bf0c48403f59 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -225,6 +225,11 @@ static inline bool is_hyp_ctxt(const struct kvm_vcpu *vcpu)
return vcpu_has_nv(vcpu) && __is_hyp_ctxt(&vcpu->arch.ctxt);
}
+static inline bool vcpu_is_host_el0(const struct kvm_vcpu *vcpu)
+{
+ return is_hyp_ctxt(vcpu) && !vcpu_is_el2(vcpu);
+}
+
/*
* The layout of SPSR for an AArch32 state is different when observed from an
* AArch64 SPSR_ELx or an AArch32 SPSR_*. This function generates the AArch32
diff --git a/arch/arm64/kvm/emulate-nested.c b/arch/arm64/kvm/emulate-nested.c
index e1a30d1bcd06..db3149379a4d 100644
--- a/arch/arm64/kvm/emulate-nested.c
+++ b/arch/arm64/kvm/emulate-nested.c
@@ -20,6 +20,9 @@ enum trap_behaviour {
BEHAVE_FORWARD_READ = BIT(0),
BEHAVE_FORWARD_WRITE = BIT(1),
BEHAVE_FORWARD_RW = BEHAVE_FORWARD_READ | BEHAVE_FORWARD_WRITE,
+
+ /* Traps that take effect in Host EL0, this is rare! */
+ BEHAVE_IN_HOST_EL0 = BIT(2),
};
struct trap_bits {
@@ -2128,11 +2131,19 @@ static u64 kvm_get_sysreg_res0(struct kvm *kvm, enum vcpu_sysreg sr)
return masks->mask[sr - __VNCR_START__].res0;
}
-static bool check_fgt_bit(struct kvm *kvm, bool is_read,
+static bool check_fgt_bit(struct kvm_vcpu *vcpu, bool is_read,
u64 val, const union trap_config tc)
{
+ struct kvm *kvm = vcpu->kvm;
enum vcpu_sysreg sr;
+ /*
+ * KVM doesn't know about any FGTs that apply to the host, and hopefully
+ * that'll remain the case.
+ */
+ if (is_hyp_ctxt(vcpu))
+ return false;
+
if (tc.pol)
return (val & BIT(tc.bit));
@@ -2209,7 +2220,15 @@ bool triage_sysreg_trap(struct kvm_vcpu *vcpu, int *sr_index)
* If we're not nesting, immediately return to the caller, with the
* sysreg index, should we have it.
*/
- if (!vcpu_has_nv(vcpu) || is_hyp_ctxt(vcpu))
+ if (!vcpu_has_nv(vcpu))
+ goto local;
+
+ /*
+ * There are a few traps that take effect InHost, but are constrained
+ * to EL0. Don't bother with computing the trap behaviour if the vCPU
+ * isn't in EL0.
+ */
+ if (is_hyp_ctxt(vcpu) && !vcpu_is_host_el0(vcpu))
goto local;
switch ((enum fgt_group_id)tc.fgt) {
@@ -2255,12 +2274,14 @@ bool triage_sysreg_trap(struct kvm_vcpu *vcpu, int *sr_index)
goto local;
}
- if (tc.fgt != __NO_FGT_GROUP__ && check_fgt_bit(vcpu->kvm, is_read,
- val, tc))
+ if (tc.fgt != __NO_FGT_GROUP__ && check_fgt_bit(vcpu, is_read, val, tc))
goto inject;
b = compute_trap_behaviour(vcpu, tc);
+ if (!(b & BEHAVE_IN_HOST_EL0) && vcpu_is_host_el0(vcpu))
+ goto local;
+
if (((b & BEHAVE_FORWARD_READ) && is_read) ||
((b & BEHAVE_FORWARD_WRITE) && !is_read))
goto inject;
--
2.47.0.163.g1226f6d8fa-goog
^ permalink raw reply related [flat|nested] 27+ messages in thread* Re: [PATCH v4 08/18] KVM: arm64: nv: Reinject traps that take effect in Host EL0
2024-10-25 18:23 ` [PATCH v4 08/18] KVM: arm64: nv: Reinject traps that take effect in Host EL0 Oliver Upton
@ 2024-10-26 8:13 ` Marc Zyngier
2024-10-26 14:35 ` Oliver Upton
0 siblings, 1 reply; 27+ messages in thread
From: Marc Zyngier @ 2024-10-26 8:13 UTC (permalink / raw)
To: Oliver Upton
Cc: kvmarm, Joey Gouly, Suzuki K Poulose, Zenghui Yu, Catalin Marinas,
Will Deacon, Anshuman Khandual, linux-arm-kernel, linux-kernel
On Fri, 25 Oct 2024 19:23:43 +0100,
Oliver Upton <oliver.upton@linux.dev> wrote:
>
> Wire up the other end of traps that affect host EL0 by actually
> injecting them into the guest hypervisor. Skip over FGT entirely, as a
> cursory glance suggests no FGT is effective in host EL0.
Yes, and this (thankfully) is by design! :-)
>
> Note that kvm_inject_nested() is already equipped for handling
> exceptions while the VM is already in a host context.
>
> Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
> ---
> arch/arm64/include/asm/kvm_emulate.h | 5 +++++
> arch/arm64/kvm/emulate-nested.c | 29 ++++++++++++++++++++++++----
> 2 files changed, 30 insertions(+), 4 deletions(-)
>
> diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
> index a601a9305b10..bf0c48403f59 100644
> --- a/arch/arm64/include/asm/kvm_emulate.h
> +++ b/arch/arm64/include/asm/kvm_emulate.h
> @@ -225,6 +225,11 @@ static inline bool is_hyp_ctxt(const struct kvm_vcpu *vcpu)
> return vcpu_has_nv(vcpu) && __is_hyp_ctxt(&vcpu->arch.ctxt);
> }
>
> +static inline bool vcpu_is_host_el0(const struct kvm_vcpu *vcpu)
> +{
> + return is_hyp_ctxt(vcpu) && !vcpu_is_el2(vcpu);
> +}
> +
> /*
> * The layout of SPSR for an AArch32 state is different when observed from an
> * AArch64 SPSR_ELx or an AArch32 SPSR_*. This function generates the AArch32
> diff --git a/arch/arm64/kvm/emulate-nested.c b/arch/arm64/kvm/emulate-nested.c
> index e1a30d1bcd06..db3149379a4d 100644
> --- a/arch/arm64/kvm/emulate-nested.c
> +++ b/arch/arm64/kvm/emulate-nested.c
> @@ -20,6 +20,9 @@ enum trap_behaviour {
> BEHAVE_FORWARD_READ = BIT(0),
> BEHAVE_FORWARD_WRITE = BIT(1),
> BEHAVE_FORWARD_RW = BEHAVE_FORWARD_READ | BEHAVE_FORWARD_WRITE,
> +
> + /* Traps that take effect in Host EL0, this is rare! */
> + BEHAVE_IN_HOST_EL0 = BIT(2),
nit: BEHAVE_IN_HOST_EL0 lacks an action verb (forward?).
Thanks,
M.
--
Without deviation from the norm, progress is not possible.
^ permalink raw reply [flat|nested] 27+ messages in thread* Re: [PATCH v4 08/18] KVM: arm64: nv: Reinject traps that take effect in Host EL0
2024-10-26 8:13 ` Marc Zyngier
@ 2024-10-26 14:35 ` Oliver Upton
2024-10-29 9:45 ` Anshuman Khandual
0 siblings, 1 reply; 27+ messages in thread
From: Oliver Upton @ 2024-10-26 14:35 UTC (permalink / raw)
To: Marc Zyngier
Cc: kvmarm, Joey Gouly, Suzuki K Poulose, Zenghui Yu, Catalin Marinas,
Will Deacon, Anshuman Khandual, linux-arm-kernel, linux-kernel
Hey,
On Sat, Oct 26, 2024 at 09:13:17AM +0100, Marc Zyngier wrote:
> On Fri, 25 Oct 2024 19:23:43 +0100,
> Oliver Upton <oliver.upton@linux.dev> wrote:
> >
> > Wire up the other end of traps that affect host EL0 by actually
> > injecting them into the guest hypervisor. Skip over FGT entirely, as a
> > cursory glance suggests no FGT is effective in host EL0.
>
> Yes, and this (thankfully) is by design! :-)
>
> >
> > Note that kvm_inject_nested() is already equipped for handling
> > exceptions while the VM is already in a host context.
> >
> > Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
> > ---
> > arch/arm64/include/asm/kvm_emulate.h | 5 +++++
> > arch/arm64/kvm/emulate-nested.c | 29 ++++++++++++++++++++++++----
> > 2 files changed, 30 insertions(+), 4 deletions(-)
> >
> > diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
> > index a601a9305b10..bf0c48403f59 100644
> > --- a/arch/arm64/include/asm/kvm_emulate.h
> > +++ b/arch/arm64/include/asm/kvm_emulate.h
> > @@ -225,6 +225,11 @@ static inline bool is_hyp_ctxt(const struct kvm_vcpu *vcpu)
> > return vcpu_has_nv(vcpu) && __is_hyp_ctxt(&vcpu->arch.ctxt);
> > }
> >
> > +static inline bool vcpu_is_host_el0(const struct kvm_vcpu *vcpu)
> > +{
> > + return is_hyp_ctxt(vcpu) && !vcpu_is_el2(vcpu);
> > +}
> > +
> > /*
> > * The layout of SPSR for an AArch32 state is different when observed from an
> > * AArch64 SPSR_ELx or an AArch32 SPSR_*. This function generates the AArch32
> > diff --git a/arch/arm64/kvm/emulate-nested.c b/arch/arm64/kvm/emulate-nested.c
> > index e1a30d1bcd06..db3149379a4d 100644
> > --- a/arch/arm64/kvm/emulate-nested.c
> > +++ b/arch/arm64/kvm/emulate-nested.c
> > @@ -20,6 +20,9 @@ enum trap_behaviour {
> > BEHAVE_FORWARD_READ = BIT(0),
> > BEHAVE_FORWARD_WRITE = BIT(1),
> > BEHAVE_FORWARD_RW = BEHAVE_FORWARD_READ | BEHAVE_FORWARD_WRITE,
> > +
> > + /* Traps that take effect in Host EL0, this is rare! */
> > + BEHAVE_IN_HOST_EL0 = BIT(2),
>
> nit: BEHAVE_IN_HOST_EL0 lacks an action verb (forward?).
Thinking I'll squash this in (plus renaming in later patches):
diff --git a/arch/arm64/kvm/emulate-nested.c b/arch/arm64/kvm/emulate-nested.c
index db3149379a4d..b072098ee44e 100644
--- a/arch/arm64/kvm/emulate-nested.c
+++ b/arch/arm64/kvm/emulate-nested.c
@@ -22,7 +22,7 @@ enum trap_behaviour {
BEHAVE_FORWARD_RW = BEHAVE_FORWARD_READ | BEHAVE_FORWARD_WRITE,
/* Traps that take effect in Host EL0, this is rare! */
- BEHAVE_IN_HOST_EL0 = BIT(2),
+ BEHAVE_FORWARD_IN_HOST_EL0 = BIT(2),
};
struct trap_bits {
@@ -2279,7 +2279,7 @@ bool triage_sysreg_trap(struct kvm_vcpu *vcpu, int *sr_index)
b = compute_trap_behaviour(vcpu, tc);
- if (!(b & BEHAVE_IN_HOST_EL0) && vcpu_is_host_el0(vcpu))
+ if (!(b & BEHAVE_FORWARD_IN_HOST_EL0) && vcpu_is_host_el0(vcpu))
goto local;
if (((b & BEHAVE_FORWARD_READ) && is_read) ||
--
Thanks,
Oliver
^ permalink raw reply related [flat|nested] 27+ messages in thread* Re: [PATCH v4 08/18] KVM: arm64: nv: Reinject traps that take effect in Host EL0
2024-10-26 14:35 ` Oliver Upton
@ 2024-10-29 9:45 ` Anshuman Khandual
0 siblings, 0 replies; 27+ messages in thread
From: Anshuman Khandual @ 2024-10-29 9:45 UTC (permalink / raw)
To: Oliver Upton, Marc Zyngier
Cc: kvmarm, Joey Gouly, Suzuki K Poulose, Zenghui Yu, Catalin Marinas,
Will Deacon, linux-arm-kernel, linux-kernel
On 10/26/24 20:05, Oliver Upton wrote:
> Hey,
>
> On Sat, Oct 26, 2024 at 09:13:17AM +0100, Marc Zyngier wrote:
>> On Fri, 25 Oct 2024 19:23:43 +0100,
>> Oliver Upton <oliver.upton@linux.dev> wrote:
>>>
>>> Wire up the other end of traps that affect host EL0 by actually
>>> injecting them into the guest hypervisor. Skip over FGT entirely, as a
>>> cursory glance suggests no FGT is effective in host EL0.
>>
>> Yes, and this (thankfully) is by design! :-)
>>
>>>
>>> Note that kvm_inject_nested() is already equipped for handling
>>> exceptions while the VM is already in a host context.
>>>
>>> Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
>>> ---
>>> arch/arm64/include/asm/kvm_emulate.h | 5 +++++
>>> arch/arm64/kvm/emulate-nested.c | 29 ++++++++++++++++++++++++----
>>> 2 files changed, 30 insertions(+), 4 deletions(-)
>>>
>>> diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
>>> index a601a9305b10..bf0c48403f59 100644
>>> --- a/arch/arm64/include/asm/kvm_emulate.h
>>> +++ b/arch/arm64/include/asm/kvm_emulate.h
>>> @@ -225,6 +225,11 @@ static inline bool is_hyp_ctxt(const struct kvm_vcpu *vcpu)
>>> return vcpu_has_nv(vcpu) && __is_hyp_ctxt(&vcpu->arch.ctxt);
>>> }
>>>
>>> +static inline bool vcpu_is_host_el0(const struct kvm_vcpu *vcpu)
>>> +{
>>> + return is_hyp_ctxt(vcpu) && !vcpu_is_el2(vcpu);
>>> +}
>>> +
>>> /*
>>> * The layout of SPSR for an AArch32 state is different when observed from an
>>> * AArch64 SPSR_ELx or an AArch32 SPSR_*. This function generates the AArch32
>>> diff --git a/arch/arm64/kvm/emulate-nested.c b/arch/arm64/kvm/emulate-nested.c
>>> index e1a30d1bcd06..db3149379a4d 100644
>>> --- a/arch/arm64/kvm/emulate-nested.c
>>> +++ b/arch/arm64/kvm/emulate-nested.c
>>> @@ -20,6 +20,9 @@ enum trap_behaviour {
>>> BEHAVE_FORWARD_READ = BIT(0),
>>> BEHAVE_FORWARD_WRITE = BIT(1),
>>> BEHAVE_FORWARD_RW = BEHAVE_FORWARD_READ | BEHAVE_FORWARD_WRITE,
>>> +
>>> + /* Traps that take effect in Host EL0, this is rare! */
>>> + BEHAVE_IN_HOST_EL0 = BIT(2),
>>
>> nit: BEHAVE_IN_HOST_EL0 lacks an action verb (forward?).
>
> Thinking I'll squash this in (plus renaming in later patches):
Right, the following additional replacements are required for build.
@@ -307,14 +307,14 @@ static const struct trap_bits coarse_trap_bits[] = {
.value = MDCR_EL2_TPMCR,
.mask = MDCR_EL2_TPMCR,
.behaviour = BEHAVE_FORWARD_RW |
- BEHAVE_IN_HOST_EL0,
+ BEHAVE_FORWARD_IN_HOST_EL0,
},
[CGT_MDCR_TPM] = {
.index = MDCR_EL2,
.value = MDCR_EL2_TPM,
.mask = MDCR_EL2_TPM,
.behaviour = BEHAVE_FORWARD_RW |
- BEHAVE_IN_HOST_EL0,
+ BEHAVE_FORWARD_IN_HOST_EL0,
},
[CGT_MDCR_TDE] = {
.index = MDCR_EL2,
@@ -530,7 +530,7 @@ static enum trap_behaviour check_mdcr_hpmn(struct kvm_vcpu *vcpu)
}
if (kvm_pmu_counter_is_hyp(vcpu, idx))
- return BEHAVE_FORWARD_RW | BEHAVE_IN_HOST_EL0;
+ return BEHAVE_FORWARD_RW | BEHAVE_FORWARD_IN_HOST_EL0;
return BEHAVE_HANDLE_LOCALLY;
}
>
> diff --git a/arch/arm64/kvm/emulate-nested.c b/arch/arm64/kvm/emulate-nested.c
> index db3149379a4d..b072098ee44e 100644
> --- a/arch/arm64/kvm/emulate-nested.c
> +++ b/arch/arm64/kvm/emulate-nested.c
> @@ -22,7 +22,7 @@ enum trap_behaviour {
> BEHAVE_FORWARD_RW = BEHAVE_FORWARD_READ | BEHAVE_FORWARD_WRITE,
>
> /* Traps that take effect in Host EL0, this is rare! */
> - BEHAVE_IN_HOST_EL0 = BIT(2),
> + BEHAVE_FORWARD_IN_HOST_EL0 = BIT(2),
> };
>
> struct trap_bits {
> @@ -2279,7 +2279,7 @@ bool triage_sysreg_trap(struct kvm_vcpu *vcpu, int *sr_index)
>
> b = compute_trap_behaviour(vcpu, tc);
>
> - if (!(b & BEHAVE_IN_HOST_EL0) && vcpu_is_host_el0(vcpu))
> + if (!(b & BEHAVE_FORWARD_IN_HOST_EL0) && vcpu_is_host_el0(vcpu))
> goto local;
>
> if (((b & BEHAVE_FORWARD_READ) && is_read) ||
>
^ permalink raw reply [flat|nested] 27+ messages in thread
* [PATCH v4 09/18] KVM: arm64: nv: Honor MDCR_EL2.{TPM, TPMCR} in Host EL0
2024-10-25 18:23 [PATCH v4 00/18] KVM: arm64: nv: Support for EL2 PMU controls Oliver Upton
` (7 preceding siblings ...)
2024-10-25 18:23 ` [PATCH v4 08/18] KVM: arm64: nv: Reinject traps that take effect in Host EL0 Oliver Upton
@ 2024-10-25 18:23 ` Oliver Upton
2024-10-25 18:23 ` [PATCH v4 10/18] KVM: arm64: nv: Describe trap behaviour of MDCR_EL2.HPMN Oliver Upton
` (10 subsequent siblings)
19 siblings, 0 replies; 27+ messages in thread
From: Oliver Upton @ 2024-10-25 18:23 UTC (permalink / raw)
To: kvmarm
Cc: Marc Zyngier, Joey Gouly, Suzuki K Poulose, Zenghui Yu,
Catalin Marinas, Will Deacon, Anshuman Khandual, linux-arm-kernel,
linux-kernel, Oliver Upton
TPM and TPMCR trap bits also affect Host EL0. How fun.
Mark these two trap bits as such and take advantage of the new
infrastructure for dealing w/ EL0 traps.
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
---
arch/arm64/kvm/emulate-nested.c | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/arch/arm64/kvm/emulate-nested.c b/arch/arm64/kvm/emulate-nested.c
index db3149379a4d..f9594296d69c 100644
--- a/arch/arm64/kvm/emulate-nested.c
+++ b/arch/arm64/kvm/emulate-nested.c
@@ -304,13 +304,15 @@ static const struct trap_bits coarse_trap_bits[] = {
.index = MDCR_EL2,
.value = MDCR_EL2_TPMCR,
.mask = MDCR_EL2_TPMCR,
- .behaviour = BEHAVE_FORWARD_RW,
+ .behaviour = BEHAVE_FORWARD_RW |
+ BEHAVE_IN_HOST_EL0,
},
[CGT_MDCR_TPM] = {
.index = MDCR_EL2,
.value = MDCR_EL2_TPM,
.mask = MDCR_EL2_TPM,
- .behaviour = BEHAVE_FORWARD_RW,
+ .behaviour = BEHAVE_FORWARD_RW |
+ BEHAVE_IN_HOST_EL0,
},
[CGT_MDCR_TDE] = {
.index = MDCR_EL2,
--
2.47.0.163.g1226f6d8fa-goog
^ permalink raw reply related [flat|nested] 27+ messages in thread* [PATCH v4 10/18] KVM: arm64: nv: Describe trap behaviour of MDCR_EL2.HPMN
2024-10-25 18:23 [PATCH v4 00/18] KVM: arm64: nv: Support for EL2 PMU controls Oliver Upton
` (8 preceding siblings ...)
2024-10-25 18:23 ` [PATCH v4 09/18] KVM: arm64: nv: Honor MDCR_EL2.{TPM, TPMCR} " Oliver Upton
@ 2024-10-25 18:23 ` Oliver Upton
2024-10-26 10:21 ` kernel test robot
` (2 more replies)
2024-10-25 18:23 ` [PATCH v4 11/18] KVM: arm64: nv: Advertise support for FEAT_HPMN0 Oliver Upton
` (9 subsequent siblings)
19 siblings, 3 replies; 27+ messages in thread
From: Oliver Upton @ 2024-10-25 18:23 UTC (permalink / raw)
To: kvmarm
Cc: Marc Zyngier, Joey Gouly, Suzuki K Poulose, Zenghui Yu,
Catalin Marinas, Will Deacon, Anshuman Khandual, linux-arm-kernel,
linux-kernel, Oliver Upton
MDCR_EL2.HPMN splits the PMU event counters into two ranges: the first
range is accessible from all ELs, and the second range is accessible
only to EL2/3. Supposing the guest hypervisor allows direct access to
the PMU counters from the L2, KVM needs to locally handle those
accesses.
Add a new complex trap configuration for HPMN that checks if the counter
index is accessible to the current context. As written, the architecture
suggests HPMN only causes PMEVCNTR<n>_EL0 to trap, though intuition (and
the pseudocode) suggest that the trap applies to PMEVTYPER<n>_EL0 as
well.
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
---
arch/arm64/kvm/emulate-nested.c | 160 +++++++++++++++++++-------------
arch/arm64/kvm/pmu-emul.c | 18 ++++
include/kvm/arm_pmu.h | 6 ++
3 files changed, 120 insertions(+), 64 deletions(-)
diff --git a/arch/arm64/kvm/emulate-nested.c b/arch/arm64/kvm/emulate-nested.c
index f9594296d69c..162bddbfbe79 100644
--- a/arch/arm64/kvm/emulate-nested.c
+++ b/arch/arm64/kvm/emulate-nested.c
@@ -110,6 +110,7 @@ enum cgt_group_id {
CGT_HCR_TPU_TOCU,
CGT_HCR_NV1_nNV2_ENSCXT,
CGT_MDCR_TPM_TPMCR,
+ CGT_MDCR_TPM_HPMN,
CGT_MDCR_TDE_TDA,
CGT_MDCR_TDE_TDOSA,
CGT_MDCR_TDE_TDRA,
@@ -126,6 +127,7 @@ enum cgt_group_id {
CGT_CNTHCTL_EL1PTEN,
CGT_CPTR_TTA,
+ CGT_MDCR_HPMN,
/* Must be last */
__NR_CGT_GROUP_IDS__
@@ -441,6 +443,7 @@ static const enum cgt_group_id *coarse_control_combo[] = {
MCB(CGT_HCR_TPU_TOCU, CGT_HCR_TPU, CGT_HCR_TOCU),
MCB(CGT_HCR_NV1_nNV2_ENSCXT, CGT_HCR_NV1_nNV2, CGT_HCR_ENSCXT),
MCB(CGT_MDCR_TPM_TPMCR, CGT_MDCR_TPM, CGT_MDCR_TPMCR),
+ MCB(CGT_MDCR_TPM_HPMN, CGT_MDCR_TPM, CGT_MDCR_HPMN),
MCB(CGT_MDCR_TDE_TDA, CGT_MDCR_TDE, CGT_MDCR_TDA),
MCB(CGT_MDCR_TDE_TDOSA, CGT_MDCR_TDE, CGT_MDCR_TDOSA),
MCB(CGT_MDCR_TDE_TDRA, CGT_MDCR_TDE, CGT_MDCR_TDRA),
@@ -504,6 +507,34 @@ static enum trap_behaviour check_cptr_tta(struct kvm_vcpu *vcpu)
return BEHAVE_HANDLE_LOCALLY;
}
+static enum trap_behaviour check_mdcr_hpmn(struct kvm_vcpu *vcpu)
+{
+ u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu));
+ unsigned int idx;
+
+
+ switch (sysreg) {
+ case SYS_PMEVTYPERn_EL0(0) ... SYS_PMEVTYPERn_EL0(30):
+ case SYS_PMEVCNTRn_EL0(0) ... SYS_PMEVCNTRn_EL0(30):
+ idx = (sys_reg_CRm(sysreg) & 0x3) << 3 | sys_reg_Op2(sysreg);
+ break;
+ case SYS_PMXEVTYPER_EL0:
+ case SYS_PMXEVCNTR_EL0:
+ idx = SYS_FIELD_GET(PMSELR_EL0, SEL,
+ __vcpu_sys_reg(vcpu, PMSELR_EL0));
+ break;
+ default:
+ /* Someone used this trap helper for something else... */
+ KVM_BUG_ON(1, vcpu->kvm);
+ return BEHAVE_HANDLE_LOCALLY;
+ }
+
+ if (kvm_pmu_counter_is_hyp(vcpu, idx))
+ return BEHAVE_FORWARD_RW | BEHAVE_IN_HOST_EL0;
+
+ return BEHAVE_HANDLE_LOCALLY;
+}
+
#define CCC(id, fn) \
[id - __COMPLEX_CONDITIONS__] = fn
@@ -511,6 +542,7 @@ static const complex_condition_check ccc[] = {
CCC(CGT_CNTHCTL_EL1PCTEN, check_cnthctl_el1pcten),
CCC(CGT_CNTHCTL_EL1PTEN, check_cnthctl_el1pten),
CCC(CGT_CPTR_TTA, check_cptr_tta),
+ CCC(CGT_MDCR_HPMN, check_mdcr_hpmn),
};
/*
@@ -925,77 +957,77 @@ static const struct encoding_to_trap_config encoding_to_cgt[] __initconst = {
SR_TRAP(SYS_PMOVSCLR_EL0, CGT_MDCR_TPM),
SR_TRAP(SYS_PMCEID0_EL0, CGT_MDCR_TPM),
SR_TRAP(SYS_PMCEID1_EL0, CGT_MDCR_TPM),
- SR_TRAP(SYS_PMXEVTYPER_EL0, CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMXEVTYPER_EL0, CGT_MDCR_TPM_HPMN),
SR_TRAP(SYS_PMSWINC_EL0, CGT_MDCR_TPM),
SR_TRAP(SYS_PMSELR_EL0, CGT_MDCR_TPM),
- SR_TRAP(SYS_PMXEVCNTR_EL0, CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMXEVCNTR_EL0, CGT_MDCR_TPM_HPMN),
SR_TRAP(SYS_PMCCNTR_EL0, CGT_MDCR_TPM),
SR_TRAP(SYS_PMUSERENR_EL0, CGT_MDCR_TPM),
SR_TRAP(SYS_PMINTENSET_EL1, CGT_MDCR_TPM),
SR_TRAP(SYS_PMINTENCLR_EL1, CGT_MDCR_TPM),
SR_TRAP(SYS_PMMIR_EL1, CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVCNTRn_EL0(0), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVCNTRn_EL0(1), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVCNTRn_EL0(2), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVCNTRn_EL0(3), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVCNTRn_EL0(4), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVCNTRn_EL0(5), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVCNTRn_EL0(6), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVCNTRn_EL0(7), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVCNTRn_EL0(8), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVCNTRn_EL0(9), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVCNTRn_EL0(10), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVCNTRn_EL0(11), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVCNTRn_EL0(12), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVCNTRn_EL0(13), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVCNTRn_EL0(14), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVCNTRn_EL0(15), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVCNTRn_EL0(16), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVCNTRn_EL0(17), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVCNTRn_EL0(18), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVCNTRn_EL0(19), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVCNTRn_EL0(20), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVCNTRn_EL0(21), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVCNTRn_EL0(22), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVCNTRn_EL0(23), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVCNTRn_EL0(24), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVCNTRn_EL0(25), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVCNTRn_EL0(26), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVCNTRn_EL0(27), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVCNTRn_EL0(28), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVCNTRn_EL0(29), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVCNTRn_EL0(30), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVTYPERn_EL0(0), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVTYPERn_EL0(1), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVTYPERn_EL0(2), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVTYPERn_EL0(3), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVTYPERn_EL0(4), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVTYPERn_EL0(5), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVTYPERn_EL0(6), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVTYPERn_EL0(7), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVTYPERn_EL0(8), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVTYPERn_EL0(9), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVTYPERn_EL0(10), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVTYPERn_EL0(11), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVTYPERn_EL0(12), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVTYPERn_EL0(13), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVTYPERn_EL0(14), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVTYPERn_EL0(15), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVTYPERn_EL0(16), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVTYPERn_EL0(17), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVTYPERn_EL0(18), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVTYPERn_EL0(19), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVTYPERn_EL0(20), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVTYPERn_EL0(21), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVTYPERn_EL0(22), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVTYPERn_EL0(23), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVTYPERn_EL0(24), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVTYPERn_EL0(25), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVTYPERn_EL0(26), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVTYPERn_EL0(27), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVTYPERn_EL0(28), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVTYPERn_EL0(29), CGT_MDCR_TPM),
- SR_TRAP(SYS_PMEVTYPERn_EL0(30), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(0), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(1), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(2), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(3), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(4), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(5), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(6), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(7), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(8), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(9), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(10), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(11), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(12), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(13), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(14), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(15), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(16), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(17), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(18), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(19), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(20), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(21), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(22), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(23), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(24), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(25), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(26), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(27), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(28), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(29), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(30), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(0), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(1), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(2), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(3), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(4), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(5), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(6), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(7), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(8), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(9), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(10), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(11), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(12), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(13), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(14), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(15), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(16), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(17), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(18), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(19), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(20), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(21), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(22), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(23), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(24), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(25), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(26), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(27), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(28), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(29), CGT_MDCR_TPM_HPMN),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(30), CGT_MDCR_TPM_HPMN),
SR_TRAP(SYS_PMCCFILTR_EL0, CGT_MDCR_TPM),
SR_TRAP(SYS_MDCCSR_EL0, CGT_MDCR_TDCC_TDE_TDA),
SR_TRAP(SYS_MDCCINT_EL1, CGT_MDCR_TDCC_TDE_TDA),
diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
index ac36c438b8c1..28f938f145ac 100644
--- a/arch/arm64/kvm/pmu-emul.c
+++ b/arch/arm64/kvm/pmu-emul.c
@@ -265,6 +265,24 @@ void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu)
irq_work_sync(&vcpu->arch.pmu.overflow_work);
}
+bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu, unsigned int idx)
+{
+ unsigned int hpmn;
+
+ if (!vcpu_has_nv(vcpu) || idx == ARMV8_PMU_CYCLE_IDX)
+ return false;
+
+ /*
+ * Programming HPMN=0 is CONSTRAINED UNPREDICTABLE if FEAT_HPMN0 isn't
+ * implemented. Since KVM's ability to emulate HPMN=0 does not directly
+ * depend on hardware (all PMU registers are trapped), make the
+ * implementation choice that all counters are included in the second
+ * range reserved for EL2/EL3.
+ */
+ hpmn = SYS_FIELD_GET(MDCR_EL2, HPMN, __vcpu_sys_reg(vcpu, MDCR_EL2));
+ return idx >= hpmn;
+}
+
u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
{
u64 val = FIELD_GET(ARMV8_PMU_PMCR_N, kvm_vcpu_read_pmcr(vcpu));
diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
index e08aeec5d936..e6103df9ef5d 100644
--- a/include/kvm/arm_pmu.h
+++ b/include/kvm/arm_pmu.h
@@ -96,6 +96,7 @@ int kvm_arm_set_default_pmu(struct kvm *kvm);
u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm);
u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu);
+bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu, unsigned int idx);
#else
struct kvm_pmu {
};
@@ -187,6 +188,11 @@ static inline u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu)
return 0;
}
+static inline bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu)
+{
+ return false;
+}
+
#endif
#endif
--
2.47.0.163.g1226f6d8fa-goog
^ permalink raw reply related [flat|nested] 27+ messages in thread* Re: [PATCH v4 10/18] KVM: arm64: nv: Describe trap behaviour of MDCR_EL2.HPMN
2024-10-25 18:23 ` [PATCH v4 10/18] KVM: arm64: nv: Describe trap behaviour of MDCR_EL2.HPMN Oliver Upton
@ 2024-10-26 10:21 ` kernel test robot
2024-10-26 10:42 ` kernel test robot
2024-10-26 14:32 ` Oliver Upton
2 siblings, 0 replies; 27+ messages in thread
From: kernel test robot @ 2024-10-26 10:21 UTC (permalink / raw)
To: Oliver Upton, kvmarm
Cc: llvm, oe-kbuild-all, Marc Zyngier, Joey Gouly, Suzuki K Poulose,
Zenghui Yu, Catalin Marinas, Will Deacon, Anshuman Khandual,
linux-arm-kernel, linux-kernel, Oliver Upton
Hi Oliver,
kernel test robot noticed the following build errors:
[auto build test ERROR on 8e929cb546ee42c9a61d24fae60605e9e3192354]
url: https://github.com/intel-lab-lkp/linux/commits/Oliver-Upton/KVM-arm64-Extend-masking-facility-to-arbitrary-registers/20241026-023055
base: 8e929cb546ee42c9a61d24fae60605e9e3192354
patch link: https://lore.kernel.org/r/20241025182354.3364124-11-oliver.upton%40linux.dev
patch subject: [PATCH v4 10/18] KVM: arm64: nv: Describe trap behaviour of MDCR_EL2.HPMN
config: arm64-randconfig-003-20241026 (https://download.01.org/0day-ci/archive/20241026/202410261855.fvLOtls9-lkp@intel.com/config)
compiler: clang version 20.0.0git (https://github.com/llvm/llvm-project 5886454669c3c9026f7f27eab13509dd0241f2d6)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20241026/202410261855.fvLOtls9-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202410261855.fvLOtls9-lkp@intel.com/
All errors (new ones prefixed by >>):
In file included from arch/arm64/kvm/emulate-nested.c:8:
In file included from include/linux/kvm_host.h:16:
In file included from include/linux/mm.h:2213:
include/linux/vmstat.h:518:36: warning: arithmetic between different enumeration types ('enum node_stat_item' and 'enum lru_list') [-Wenum-enum-conversion]
518 | return node_stat_name(NR_LRU_BASE + lru) + 3; // skip "nr_"
| ~~~~~~~~~~~ ^ ~~~
>> arch/arm64/kvm/emulate-nested.c:532:35: error: too many arguments to function call, expected single argument 'vcpu', have 2 arguments
532 | if (kvm_pmu_counter_is_hyp(vcpu, idx))
| ~~~~~~~~~~~~~~~~~~~~~~ ^~~
include/kvm/arm_pmu.h:191:20: note: 'kvm_pmu_counter_is_hyp' declared here
191 | static inline bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu)
| ^ ~~~~~~~~~~~~~~~~~~~~~
1 warning and 1 error generated.
vim +/vcpu +532 arch/arm64/kvm/emulate-nested.c
509
510 static enum trap_behaviour check_mdcr_hpmn(struct kvm_vcpu *vcpu)
511 {
512 u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu));
513 unsigned int idx;
514
515
516 switch (sysreg) {
517 case SYS_PMEVTYPERn_EL0(0) ... SYS_PMEVTYPERn_EL0(30):
518 case SYS_PMEVCNTRn_EL0(0) ... SYS_PMEVCNTRn_EL0(30):
519 idx = (sys_reg_CRm(sysreg) & 0x3) << 3 | sys_reg_Op2(sysreg);
520 break;
521 case SYS_PMXEVTYPER_EL0:
522 case SYS_PMXEVCNTR_EL0:
523 idx = SYS_FIELD_GET(PMSELR_EL0, SEL,
524 __vcpu_sys_reg(vcpu, PMSELR_EL0));
525 break;
526 default:
527 /* Someone used this trap helper for something else... */
528 KVM_BUG_ON(1, vcpu->kvm);
529 return BEHAVE_HANDLE_LOCALLY;
530 }
531
> 532 if (kvm_pmu_counter_is_hyp(vcpu, idx))
533 return BEHAVE_FORWARD_RW | BEHAVE_IN_HOST_EL0;
534
535 return BEHAVE_HANDLE_LOCALLY;
536 }
537
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
^ permalink raw reply [flat|nested] 27+ messages in thread* Re: [PATCH v4 10/18] KVM: arm64: nv: Describe trap behaviour of MDCR_EL2.HPMN
2024-10-25 18:23 ` [PATCH v4 10/18] KVM: arm64: nv: Describe trap behaviour of MDCR_EL2.HPMN Oliver Upton
2024-10-26 10:21 ` kernel test robot
@ 2024-10-26 10:42 ` kernel test robot
2024-10-26 14:32 ` Oliver Upton
2 siblings, 0 replies; 27+ messages in thread
From: kernel test robot @ 2024-10-26 10:42 UTC (permalink / raw)
To: Oliver Upton, kvmarm
Cc: oe-kbuild-all, Marc Zyngier, Joey Gouly, Suzuki K Poulose,
Zenghui Yu, Catalin Marinas, Will Deacon, Anshuman Khandual,
linux-arm-kernel, linux-kernel, Oliver Upton
Hi Oliver,
kernel test robot noticed the following build errors:
[auto build test ERROR on 8e929cb546ee42c9a61d24fae60605e9e3192354]
url: https://github.com/intel-lab-lkp/linux/commits/Oliver-Upton/KVM-arm64-Extend-masking-facility-to-arbitrary-registers/20241026-023055
base: 8e929cb546ee42c9a61d24fae60605e9e3192354
patch link: https://lore.kernel.org/r/20241025182354.3364124-11-oliver.upton%40linux.dev
patch subject: [PATCH v4 10/18] KVM: arm64: nv: Describe trap behaviour of MDCR_EL2.HPMN
config: arm64-randconfig-004-20241026 (https://download.01.org/0day-ci/archive/20241026/202410261839.enJ68VEv-lkp@intel.com/config)
compiler: aarch64-linux-gcc (GCC) 14.1.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20241026/202410261839.enJ68VEv-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202410261839.enJ68VEv-lkp@intel.com/
All errors (new ones prefixed by >>):
arch/arm64/kvm/emulate-nested.c: In function 'check_mdcr_hpmn':
>> arch/arm64/kvm/emulate-nested.c:532:13: error: too many arguments to function 'kvm_pmu_counter_is_hyp'
532 | if (kvm_pmu_counter_is_hyp(vcpu, idx))
| ^~~~~~~~~~~~~~~~~~~~~~
In file included from arch/arm64/include/asm/kvm_host.h:38,
from include/linux/kvm_host.h:45,
from arch/arm64/kvm/emulate-nested.c:8:
include/kvm/arm_pmu.h:191:20: note: declared here
191 | static inline bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu)
| ^~~~~~~~~~~~~~~~~~~~~~
vim +/kvm_pmu_counter_is_hyp +532 arch/arm64/kvm/emulate-nested.c
509
510 static enum trap_behaviour check_mdcr_hpmn(struct kvm_vcpu *vcpu)
511 {
512 u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu));
513 unsigned int idx;
514
515
516 switch (sysreg) {
517 case SYS_PMEVTYPERn_EL0(0) ... SYS_PMEVTYPERn_EL0(30):
518 case SYS_PMEVCNTRn_EL0(0) ... SYS_PMEVCNTRn_EL0(30):
519 idx = (sys_reg_CRm(sysreg) & 0x3) << 3 | sys_reg_Op2(sysreg);
520 break;
521 case SYS_PMXEVTYPER_EL0:
522 case SYS_PMXEVCNTR_EL0:
523 idx = SYS_FIELD_GET(PMSELR_EL0, SEL,
524 __vcpu_sys_reg(vcpu, PMSELR_EL0));
525 break;
526 default:
527 /* Someone used this trap helper for something else... */
528 KVM_BUG_ON(1, vcpu->kvm);
529 return BEHAVE_HANDLE_LOCALLY;
530 }
531
> 532 if (kvm_pmu_counter_is_hyp(vcpu, idx))
533 return BEHAVE_FORWARD_RW | BEHAVE_IN_HOST_EL0;
534
535 return BEHAVE_HANDLE_LOCALLY;
536 }
537
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
^ permalink raw reply [flat|nested] 27+ messages in thread* Re: [PATCH v4 10/18] KVM: arm64: nv: Describe trap behaviour of MDCR_EL2.HPMN
2024-10-25 18:23 ` [PATCH v4 10/18] KVM: arm64: nv: Describe trap behaviour of MDCR_EL2.HPMN Oliver Upton
2024-10-26 10:21 ` kernel test robot
2024-10-26 10:42 ` kernel test robot
@ 2024-10-26 14:32 ` Oliver Upton
2 siblings, 0 replies; 27+ messages in thread
From: Oliver Upton @ 2024-10-26 14:32 UTC (permalink / raw)
To: kvmarm
Cc: Marc Zyngier, Joey Gouly, Suzuki K Poulose, Zenghui Yu,
Catalin Marinas, Will Deacon, Anshuman Khandual, linux-arm-kernel,
linux-kernel
On Fri, Oct 25, 2024 at 06:23:45PM +0000, Oliver Upton wrote:
> MDCR_EL2.HPMN splits the PMU event counters into two ranges: the first
> range is accessible from all ELs, and the second range is accessible
> only to EL2/3. Supposing the guest hypervisor allows direct access to
> the PMU counters from the L2, KVM needs to locally handle those
> accesses.
>
> Add a new complex trap configuration for HPMN that checks if the counter
> index is accessible to the current context. As written, the architecture
> suggests HPMN only causes PMEVCNTR<n>_EL0 to trap, though intuition (and
> the pseudocode) suggest that the trap applies to PMEVTYPER<n>_EL0 as
> well.
>
> Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
> ---
> arch/arm64/kvm/emulate-nested.c | 160 +++++++++++++++++++-------------
> arch/arm64/kvm/pmu-emul.c | 18 ++++
> include/kvm/arm_pmu.h | 6 ++
> 3 files changed, 120 insertions(+), 64 deletions(-)
Gonna squash in the following to fix !CONFIG_HW_PERF_EVENTS builds.
diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
index e6103df9ef5d..feb5d1d35f0f 100644
--- a/include/kvm/arm_pmu.h
+++ b/include/kvm/arm_pmu.h
@@ -188,7 +188,7 @@ static inline u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu)
return 0;
}
-static inline bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu)
+static inline bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu, unsigned int idx)
{
return false;
}
--
Thanks,
Oliver
^ permalink raw reply related [flat|nested] 27+ messages in thread
* [PATCH v4 11/18] KVM: arm64: nv: Advertise support for FEAT_HPMN0
2024-10-25 18:23 [PATCH v4 00/18] KVM: arm64: nv: Support for EL2 PMU controls Oliver Upton
` (9 preceding siblings ...)
2024-10-25 18:23 ` [PATCH v4 10/18] KVM: arm64: nv: Describe trap behaviour of MDCR_EL2.HPMN Oliver Upton
@ 2024-10-25 18:23 ` Oliver Upton
2024-10-25 18:23 ` [PATCH v4 12/18] KVM: arm64: Rename kvm_pmu_valid_counter_mask() Oliver Upton
` (8 subsequent siblings)
19 siblings, 0 replies; 27+ messages in thread
From: Oliver Upton @ 2024-10-25 18:23 UTC (permalink / raw)
To: kvmarm
Cc: Marc Zyngier, Joey Gouly, Suzuki K Poulose, Zenghui Yu,
Catalin Marinas, Will Deacon, Anshuman Khandual, linux-arm-kernel,
linux-kernel, Oliver Upton
Everything is in place now for KVM to actually handle MDCR_EL2.HPMN. Not
only that, the emulation is capable of doing FEAT_HPMN0. Advertise
support for the feature in the VM's ID registers. It is possible to
emulate FEAT_HPMN0 on hardware that doesn't support it since KVM
currently traps all PMU registers. Having said that, let's only
advertise the feature on supporting hardware in case KVM ever provides
'direct' PMU support to VMs w/o involving host perf.
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
---
arch/arm64/kvm/nested.c | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
index d256a495a6ba..762f3ea8126d 100644
--- a/arch/arm64/kvm/nested.c
+++ b/arch/arm64/kvm/nested.c
@@ -892,12 +892,13 @@ static void limit_nv_id_regs(struct kvm *kvm)
ID_AA64MMFR4_EL1_E2H0_NI_NV1);
kvm_set_vm_id_reg(kvm, SYS_ID_AA64MMFR4_EL1, val);
- /* Only limited support for PMU, Debug, BPs and WPs */
+ /* Only limited support for PMU, Debug, BPs, WPs, and HPMN0 */
val = kvm_read_vm_id_reg(kvm, SYS_ID_AA64DFR0_EL1);
val &= (NV_FTR(DFR0, PMUVer) |
NV_FTR(DFR0, WRPs) |
NV_FTR(DFR0, BRPs) |
- NV_FTR(DFR0, DebugVer));
+ NV_FTR(DFR0, DebugVer) |
+ NV_FTR(DFR0, HPMN0));
/* Cap Debug to ARMv8.1 */
tmp = FIELD_GET(NV_FTR(DFR0, DebugVer), val);
--
2.47.0.163.g1226f6d8fa-goog
^ permalink raw reply related [flat|nested] 27+ messages in thread* [PATCH v4 12/18] KVM: arm64: Rename kvm_pmu_valid_counter_mask()
2024-10-25 18:23 [PATCH v4 00/18] KVM: arm64: nv: Support for EL2 PMU controls Oliver Upton
` (10 preceding siblings ...)
2024-10-25 18:23 ` [PATCH v4 11/18] KVM: arm64: nv: Advertise support for FEAT_HPMN0 Oliver Upton
@ 2024-10-25 18:23 ` Oliver Upton
2024-10-25 18:23 ` [PATCH v4 13/18] KVM: arm64: nv: Adjust range of accessible PMCs according to HPMN Oliver Upton
` (7 subsequent siblings)
19 siblings, 0 replies; 27+ messages in thread
From: Oliver Upton @ 2024-10-25 18:23 UTC (permalink / raw)
To: kvmarm
Cc: Marc Zyngier, Joey Gouly, Suzuki K Poulose, Zenghui Yu,
Catalin Marinas, Will Deacon, Anshuman Khandual, linux-arm-kernel,
linux-kernel, Oliver Upton
Nested PMU support requires dynamically changing the visible range of
PMU counters based on the exception level and value of MDCR_EL2.HPMN. At
the same time, the PMU emulation code needs to know the absolute number
of implemented counters, regardless of context.
Rename the existing helper to make it obvious that it returns the number
of implemented counters and not anything else.
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
---
arch/arm64/kvm/pmu-emul.c | 8 ++++----
arch/arm64/kvm/sys_regs.c | 12 ++++++------
include/kvm/arm_pmu.h | 4 ++--
3 files changed, 12 insertions(+), 12 deletions(-)
diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
index 28f938f145ac..fd08c4b53be3 100644
--- a/arch/arm64/kvm/pmu-emul.c
+++ b/arch/arm64/kvm/pmu-emul.c
@@ -244,7 +244,7 @@ void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu)
*/
void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu)
{
- unsigned long mask = kvm_pmu_valid_counter_mask(vcpu);
+ unsigned long mask = kvm_pmu_implemented_counter_mask(vcpu);
int i;
for_each_set_bit(i, &mask, 32)
@@ -283,7 +283,7 @@ bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu, unsigned int idx)
return idx >= hpmn;
}
-u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
+u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu)
{
u64 val = FIELD_GET(ARMV8_PMU_PMCR_N, kvm_vcpu_read_pmcr(vcpu));
@@ -592,7 +592,7 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
if (val & ARMV8_PMU_PMCR_P) {
- unsigned long mask = kvm_pmu_valid_counter_mask(vcpu);
+ unsigned long mask = kvm_pmu_implemented_counter_mask(vcpu);
mask &= ~BIT(ARMV8_PMU_CYCLE_IDX);
for_each_set_bit(i, &mask, 32)
kvm_pmu_set_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, i), 0, true);
@@ -822,7 +822,7 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu)
{
- u64 mask = kvm_pmu_valid_counter_mask(vcpu);
+ u64 mask = kvm_pmu_implemented_counter_mask(vcpu);
kvm_pmu_handle_pmcr(vcpu, kvm_vcpu_read_pmcr(vcpu));
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 30fe940cd5bd..bd0c116f041b 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -1131,7 +1131,7 @@ static int set_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 va
{
bool set;
- val &= kvm_pmu_valid_counter_mask(vcpu);
+ val &= kvm_pmu_implemented_counter_mask(vcpu);
switch (r->reg) {
case PMOVSSET_EL0:
@@ -1154,7 +1154,7 @@ static int set_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 va
static int get_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 *val)
{
- u64 mask = kvm_pmu_valid_counter_mask(vcpu);
+ u64 mask = kvm_pmu_implemented_counter_mask(vcpu);
*val = __vcpu_sys_reg(vcpu, r->reg) & mask;
return 0;
@@ -1168,7 +1168,7 @@ static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
if (pmu_access_el0_disabled(vcpu))
return false;
- mask = kvm_pmu_valid_counter_mask(vcpu);
+ mask = kvm_pmu_implemented_counter_mask(vcpu);
if (p->is_write) {
val = p->regval & mask;
if (r->Op2 & 0x1) {
@@ -1191,7 +1191,7 @@ static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
- u64 mask = kvm_pmu_valid_counter_mask(vcpu);
+ u64 mask = kvm_pmu_implemented_counter_mask(vcpu);
if (check_pmu_access_disabled(vcpu, 0))
return false;
@@ -1215,7 +1215,7 @@ static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
- u64 mask = kvm_pmu_valid_counter_mask(vcpu);
+ u64 mask = kvm_pmu_implemented_counter_mask(vcpu);
if (pmu_access_el0_disabled(vcpu))
return false;
@@ -1245,7 +1245,7 @@ static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
if (pmu_write_swinc_el0_disabled(vcpu))
return false;
- mask = kvm_pmu_valid_counter_mask(vcpu);
+ mask = kvm_pmu_implemented_counter_mask(vcpu);
kvm_pmu_software_increment(vcpu, p->regval & mask);
return true;
}
diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
index e6103df9ef5d..b175b10491f0 100644
--- a/include/kvm/arm_pmu.h
+++ b/include/kvm/arm_pmu.h
@@ -47,7 +47,7 @@ static __always_inline bool kvm_arm_support_pmu_v3(void)
#define kvm_arm_pmu_irq_initialized(v) ((v)->arch.pmu.irq_num >= VGIC_NR_SGIS)
u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx);
void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val);
-u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu);
+u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu);
u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1);
void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu);
void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu);
@@ -114,7 +114,7 @@ static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu,
}
static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu,
u64 select_idx, u64 val) {}
-static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
+static inline u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu)
{
return 0;
}
--
2.47.0.163.g1226f6d8fa-goog
^ permalink raw reply related [flat|nested] 27+ messages in thread* [PATCH v4 13/18] KVM: arm64: nv: Adjust range of accessible PMCs according to HPMN
2024-10-25 18:23 [PATCH v4 00/18] KVM: arm64: nv: Support for EL2 PMU controls Oliver Upton
` (11 preceding siblings ...)
2024-10-25 18:23 ` [PATCH v4 12/18] KVM: arm64: Rename kvm_pmu_valid_counter_mask() Oliver Upton
@ 2024-10-25 18:23 ` Oliver Upton
2024-10-25 18:23 ` [PATCH v4 14/18] KVM: arm64: Add helpers to determine if PMC counts at a given EL Oliver Upton
` (6 subsequent siblings)
19 siblings, 0 replies; 27+ messages in thread
From: Oliver Upton @ 2024-10-25 18:23 UTC (permalink / raw)
To: kvmarm
Cc: Marc Zyngier, Joey Gouly, Suzuki K Poulose, Zenghui Yu,
Catalin Marinas, Will Deacon, Anshuman Khandual, linux-arm-kernel,
linux-kernel, Oliver Upton
The value of MDCR_EL2.HPMN controls the number of event counters made
visible to EL0 and EL1. This means it is possible for the guest
hypervisor to allow direct access to event counters to the L2.
Rework KVM's PMU register emulation to take the effects of HPMN into
account when handling a trap. For bitmask-style registers, writes only
affect accessible registers.
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
---
arch/arm64/kvm/pmu-emul.c | 14 +++++++++++++-
arch/arm64/kvm/sys_regs.c | 12 ++++++------
include/kvm/arm_pmu.h | 5 +++++
3 files changed, 24 insertions(+), 7 deletions(-)
diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
index fd08c4b53be3..0d669fb84485 100644
--- a/arch/arm64/kvm/pmu-emul.c
+++ b/arch/arm64/kvm/pmu-emul.c
@@ -283,6 +283,18 @@ bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu, unsigned int idx)
return idx >= hpmn;
}
+u64 kvm_pmu_accessible_counter_mask(struct kvm_vcpu *vcpu)
+{
+ u64 mask = kvm_pmu_implemented_counter_mask(vcpu);
+ u64 hpmn;
+
+ if (!vcpu_has_nv(vcpu) || vcpu_is_el2(vcpu))
+ return mask;
+
+ hpmn = SYS_FIELD_GET(MDCR_EL2, HPMN, __vcpu_sys_reg(vcpu, MDCR_EL2));
+ return mask & ~GENMASK(vcpu->kvm->arch.pmcr_n - 1, hpmn);
+}
+
u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu)
{
u64 val = FIELD_GET(ARMV8_PMU_PMCR_N, kvm_vcpu_read_pmcr(vcpu));
@@ -592,7 +604,7 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
if (val & ARMV8_PMU_PMCR_P) {
- unsigned long mask = kvm_pmu_implemented_counter_mask(vcpu);
+ unsigned long mask = kvm_pmu_accessible_counter_mask(vcpu);
mask &= ~BIT(ARMV8_PMU_CYCLE_IDX);
for_each_set_bit(i, &mask, 32)
kvm_pmu_set_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, i), 0, true);
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index bd0c116f041b..8c226ec8bc25 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -1131,7 +1131,7 @@ static int set_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 va
{
bool set;
- val &= kvm_pmu_implemented_counter_mask(vcpu);
+ val &= kvm_pmu_accessible_counter_mask(vcpu);
switch (r->reg) {
case PMOVSSET_EL0:
@@ -1154,7 +1154,7 @@ static int set_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 va
static int get_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 *val)
{
- u64 mask = kvm_pmu_implemented_counter_mask(vcpu);
+ u64 mask = kvm_pmu_accessible_counter_mask(vcpu);
*val = __vcpu_sys_reg(vcpu, r->reg) & mask;
return 0;
@@ -1168,7 +1168,7 @@ static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
if (pmu_access_el0_disabled(vcpu))
return false;
- mask = kvm_pmu_implemented_counter_mask(vcpu);
+ mask = kvm_pmu_accessible_counter_mask(vcpu);
if (p->is_write) {
val = p->regval & mask;
if (r->Op2 & 0x1) {
@@ -1191,7 +1191,7 @@ static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
- u64 mask = kvm_pmu_implemented_counter_mask(vcpu);
+ u64 mask = kvm_pmu_accessible_counter_mask(vcpu);
if (check_pmu_access_disabled(vcpu, 0))
return false;
@@ -1215,7 +1215,7 @@ static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
- u64 mask = kvm_pmu_implemented_counter_mask(vcpu);
+ u64 mask = kvm_pmu_accessible_counter_mask(vcpu);
if (pmu_access_el0_disabled(vcpu))
return false;
@@ -1245,7 +1245,7 @@ static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
if (pmu_write_swinc_el0_disabled(vcpu))
return false;
- mask = kvm_pmu_implemented_counter_mask(vcpu);
+ mask = kvm_pmu_accessible_counter_mask(vcpu);
kvm_pmu_software_increment(vcpu, p->regval & mask);
return true;
}
diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
index b175b10491f0..b738ffb39bb0 100644
--- a/include/kvm/arm_pmu.h
+++ b/include/kvm/arm_pmu.h
@@ -48,6 +48,7 @@ static __always_inline bool kvm_arm_support_pmu_v3(void)
u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx);
void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val);
u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu);
+u64 kvm_pmu_accessible_counter_mask(struct kvm_vcpu *vcpu);
u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1);
void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu);
void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu);
@@ -118,6 +119,10 @@ static inline u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu)
{
return 0;
}
+static inline u64 kvm_pmu_accessible_counter_mask(struct kvm_vcpu *vcpu)
+{
+ return 0;
+}
static inline void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu) {}
static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {}
static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {}
--
2.47.0.163.g1226f6d8fa-goog
^ permalink raw reply related [flat|nested] 27+ messages in thread* [PATCH v4 14/18] KVM: arm64: Add helpers to determine if PMC counts at a given EL
2024-10-25 18:23 [PATCH v4 00/18] KVM: arm64: nv: Support for EL2 PMU controls Oliver Upton
` (12 preceding siblings ...)
2024-10-25 18:23 ` [PATCH v4 13/18] KVM: arm64: nv: Adjust range of accessible PMCs according to HPMN Oliver Upton
@ 2024-10-25 18:23 ` Oliver Upton
2024-10-25 18:23 ` [PATCH v4 15/18] KVM: arm64: nv: Honor MDCR_EL2.HPME Oliver Upton
` (5 subsequent siblings)
19 siblings, 0 replies; 27+ messages in thread
From: Oliver Upton @ 2024-10-25 18:23 UTC (permalink / raw)
To: kvmarm
Cc: Marc Zyngier, Joey Gouly, Suzuki K Poulose, Zenghui Yu,
Catalin Marinas, Will Deacon, Anshuman Khandual, linux-arm-kernel,
linux-kernel, Oliver Upton
Checking the exception level filters for a PMC is a minor annoyance to
open code. Add helpers to check if an event counts at EL0 and EL1, which
will prove useful in a subsequent change.
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
---
arch/arm64/kvm/pmu-emul.c | 40 +++++++++++++++++++++++++++------------
1 file changed, 28 insertions(+), 12 deletions(-)
diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
index 0d669fb84485..03cd1ad7a55a 100644
--- a/arch/arm64/kvm/pmu-emul.c
+++ b/arch/arm64/kvm/pmu-emul.c
@@ -111,6 +111,11 @@ static u32 counter_index_to_evtreg(u64 idx)
return (idx == ARMV8_PMU_CYCLE_IDX) ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + idx;
}
+static u64 kvm_pmc_read_evtreg(const struct kvm_pmc *pmc)
+{
+ return __vcpu_sys_reg(kvm_pmc_to_vcpu(pmc), counter_index_to_evtreg(pmc->idx));
+}
+
static u64 kvm_pmu_get_pmc_value(struct kvm_pmc *pmc)
{
struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
@@ -619,6 +624,24 @@ static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc)
(__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(pmc->idx));
}
+static bool kvm_pmc_counts_at_el0(struct kvm_pmc *pmc)
+{
+ u64 evtreg = kvm_pmc_read_evtreg(pmc);
+ bool nsu = evtreg & ARMV8_PMU_EXCLUDE_NS_EL0;
+ bool u = evtreg & ARMV8_PMU_EXCLUDE_EL0;
+
+ return u == nsu;
+}
+
+static bool kvm_pmc_counts_at_el1(struct kvm_pmc *pmc)
+{
+ u64 evtreg = kvm_pmc_read_evtreg(pmc);
+ bool nsk = evtreg & ARMV8_PMU_EXCLUDE_NS_EL1;
+ bool p = evtreg & ARMV8_PMU_EXCLUDE_EL1;
+
+ return p == nsk;
+}
+
/**
* kvm_pmu_create_perf_event - create a perf event for a counter
* @pmc: Counter context
@@ -629,17 +652,15 @@ static void kvm_pmu_create_perf_event(struct kvm_pmc *pmc)
struct arm_pmu *arm_pmu = vcpu->kvm->arch.arm_pmu;
struct perf_event *event;
struct perf_event_attr attr;
- u64 eventsel, reg, data;
- bool p, u, nsk, nsu;
+ u64 eventsel, evtreg;
- reg = counter_index_to_evtreg(pmc->idx);
- data = __vcpu_sys_reg(vcpu, reg);
+ evtreg = kvm_pmc_read_evtreg(pmc);
kvm_pmu_stop_counter(pmc);
if (pmc->idx == ARMV8_PMU_CYCLE_IDX)
eventsel = ARMV8_PMUV3_PERFCTR_CPU_CYCLES;
else
- eventsel = data & kvm_pmu_event_mask(vcpu->kvm);
+ eventsel = evtreg & kvm_pmu_event_mask(vcpu->kvm);
/*
* Neither SW increment nor chained events need to be backed
@@ -657,18 +678,13 @@ static void kvm_pmu_create_perf_event(struct kvm_pmc *pmc)
!test_bit(eventsel, vcpu->kvm->arch.pmu_filter))
return;
- p = data & ARMV8_PMU_EXCLUDE_EL1;
- u = data & ARMV8_PMU_EXCLUDE_EL0;
- nsk = data & ARMV8_PMU_EXCLUDE_NS_EL1;
- nsu = data & ARMV8_PMU_EXCLUDE_NS_EL0;
-
memset(&attr, 0, sizeof(struct perf_event_attr));
attr.type = arm_pmu->pmu.type;
attr.size = sizeof(attr);
attr.pinned = 1;
attr.disabled = !kvm_pmu_counter_is_enabled(pmc);
- attr.exclude_user = (u != nsu);
- attr.exclude_kernel = (p != nsk);
+ attr.exclude_user = !kvm_pmc_counts_at_el0(pmc);
+ attr.exclude_kernel = !kvm_pmc_counts_at_el1(pmc);
attr.exclude_hv = 1; /* Don't count EL2 events */
attr.exclude_host = 1; /* Don't count host events */
attr.config = eventsel;
--
2.47.0.163.g1226f6d8fa-goog
^ permalink raw reply related [flat|nested] 27+ messages in thread* [PATCH v4 15/18] KVM: arm64: nv: Honor MDCR_EL2.HPME
2024-10-25 18:23 [PATCH v4 00/18] KVM: arm64: nv: Support for EL2 PMU controls Oliver Upton
` (13 preceding siblings ...)
2024-10-25 18:23 ` [PATCH v4 14/18] KVM: arm64: Add helpers to determine if PMC counts at a given EL Oliver Upton
@ 2024-10-25 18:23 ` Oliver Upton
2024-10-25 18:23 ` [PATCH v4 16/18] KVM: arm64: nv: Honor MDCR_EL2.HLP Oliver Upton
` (4 subsequent siblings)
19 siblings, 0 replies; 27+ messages in thread
From: Oliver Upton @ 2024-10-25 18:23 UTC (permalink / raw)
To: kvmarm
Cc: Marc Zyngier, Joey Gouly, Suzuki K Poulose, Zenghui Yu,
Catalin Marinas, Will Deacon, Anshuman Khandual, linux-arm-kernel,
linux-kernel, Oliver Upton
When the PMU is configured with split counter ranges, HPME becomes the
enable bit for the counters reserved for EL2.
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
---
arch/arm64/kvm/pmu-emul.c | 11 +++++++++--
1 file changed, 9 insertions(+), 2 deletions(-)
diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
index 03cd1ad7a55a..349886f03fd5 100644
--- a/arch/arm64/kvm/pmu-emul.c
+++ b/arch/arm64/kvm/pmu-emul.c
@@ -620,8 +620,15 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc)
{
struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
- return (kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E) &&
- (__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(pmc->idx));
+ unsigned int mdcr = __vcpu_sys_reg(vcpu, MDCR_EL2);
+
+ if (!(__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(pmc->idx)))
+ return false;
+
+ if (kvm_pmu_counter_is_hyp(vcpu, pmc->idx))
+ return mdcr & MDCR_EL2_HPME;
+
+ return kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E;
}
static bool kvm_pmc_counts_at_el0(struct kvm_pmc *pmc)
--
2.47.0.163.g1226f6d8fa-goog
^ permalink raw reply related [flat|nested] 27+ messages in thread* [PATCH v4 16/18] KVM: arm64: nv: Honor MDCR_EL2.HLP
2024-10-25 18:23 [PATCH v4 00/18] KVM: arm64: nv: Support for EL2 PMU controls Oliver Upton
` (14 preceding siblings ...)
2024-10-25 18:23 ` [PATCH v4 15/18] KVM: arm64: nv: Honor MDCR_EL2.HPME Oliver Upton
@ 2024-10-25 18:23 ` Oliver Upton
2024-10-25 18:23 ` [PATCH v4 17/18] KVM: arm64: nv: Apply EL2 event filtering when in hyp context Oliver Upton
` (3 subsequent siblings)
19 siblings, 0 replies; 27+ messages in thread
From: Oliver Upton @ 2024-10-25 18:23 UTC (permalink / raw)
To: kvmarm
Cc: Marc Zyngier, Joey Gouly, Suzuki K Poulose, Zenghui Yu,
Catalin Marinas, Will Deacon, Anshuman Khandual, linux-arm-kernel,
linux-kernel, Oliver Upton
Counters that fall in the hypervisor range (i.e. N >= HPMN) have a
separate control for enabling 64 bit overflow. Take it into account.
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
---
arch/arm64/kvm/pmu-emul.c | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
index 349886f03fd5..1e9cdbc235a8 100644
--- a/arch/arm64/kvm/pmu-emul.c
+++ b/arch/arm64/kvm/pmu-emul.c
@@ -89,7 +89,11 @@ static bool kvm_pmc_is_64bit(struct kvm_pmc *pmc)
static bool kvm_pmc_has_64bit_overflow(struct kvm_pmc *pmc)
{
- u64 val = kvm_vcpu_read_pmcr(kvm_pmc_to_vcpu(pmc));
+ struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
+ u64 val = kvm_vcpu_read_pmcr(vcpu);
+
+ if (kvm_pmu_counter_is_hyp(vcpu, pmc->idx))
+ return __vcpu_sys_reg(vcpu, MDCR_EL2) & MDCR_EL2_HLP;
return (pmc->idx < ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LP)) ||
(pmc->idx == ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LC));
--
2.47.0.163.g1226f6d8fa-goog
^ permalink raw reply related [flat|nested] 27+ messages in thread* [PATCH v4 17/18] KVM: arm64: nv: Apply EL2 event filtering when in hyp context
2024-10-25 18:23 [PATCH v4 00/18] KVM: arm64: nv: Support for EL2 PMU controls Oliver Upton
` (15 preceding siblings ...)
2024-10-25 18:23 ` [PATCH v4 16/18] KVM: arm64: nv: Honor MDCR_EL2.HLP Oliver Upton
@ 2024-10-25 18:23 ` Oliver Upton
2024-10-25 18:25 ` [PATCH v4 18/18] KVM: arm64: nv: Reprogram PMU events affected by nested transition Oliver Upton
` (2 subsequent siblings)
19 siblings, 0 replies; 27+ messages in thread
From: Oliver Upton @ 2024-10-25 18:23 UTC (permalink / raw)
To: kvmarm
Cc: Marc Zyngier, Joey Gouly, Suzuki K Poulose, Zenghui Yu,
Catalin Marinas, Will Deacon, Anshuman Khandual, linux-arm-kernel,
linux-kernel, Oliver Upton
It hopefully comes as no surprise when I say that vEL2 actually runs at
EL1. So, the guest hypervisor's EL2 event filter (NSH) needs to actually
be applied to EL1 in the perf event. In addition to this, the disable
bit for the guest counter range (HPMD) needs to have the effect of
stopping the affected counters.
Do exactly that by stuffing ::exclude_kernel with the combined effect of
these controls. This isn't quite enough yet, as the backing perf events
need to be reprogrammed upon nested ERET/exception entry to remap the
effective filter onto ::exclude_kernel.
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
---
arch/arm64/kvm/pmu-emul.c | 21 ++++++++++++++++++++-
1 file changed, 20 insertions(+), 1 deletion(-)
diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
index 1e9cdbc235a8..e2eb2ba903b6 100644
--- a/arch/arm64/kvm/pmu-emul.c
+++ b/arch/arm64/kvm/pmu-emul.c
@@ -653,6 +653,17 @@ static bool kvm_pmc_counts_at_el1(struct kvm_pmc *pmc)
return p == nsk;
}
+static bool kvm_pmc_counts_at_el2(struct kvm_pmc *pmc)
+{
+ struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
+ u64 mdcr = __vcpu_sys_reg(vcpu, MDCR_EL2);
+
+ if (!kvm_pmu_counter_is_hyp(vcpu, pmc->idx) && (mdcr & MDCR_EL2_HPMD))
+ return false;
+
+ return kvm_pmc_read_evtreg(pmc) & ARMV8_PMU_INCLUDE_EL2;
+}
+
/**
* kvm_pmu_create_perf_event - create a perf event for a counter
* @pmc: Counter context
@@ -695,11 +706,19 @@ static void kvm_pmu_create_perf_event(struct kvm_pmc *pmc)
attr.pinned = 1;
attr.disabled = !kvm_pmu_counter_is_enabled(pmc);
attr.exclude_user = !kvm_pmc_counts_at_el0(pmc);
- attr.exclude_kernel = !kvm_pmc_counts_at_el1(pmc);
attr.exclude_hv = 1; /* Don't count EL2 events */
attr.exclude_host = 1; /* Don't count host events */
attr.config = eventsel;
+ /*
+ * Filter events at EL1 (i.e. vEL2) when in a hyp context based on the
+ * guest's EL2 filter.
+ */
+ if (unlikely(is_hyp_ctxt(vcpu)))
+ attr.exclude_kernel = !kvm_pmc_counts_at_el2(pmc);
+ else
+ attr.exclude_kernel = !kvm_pmc_counts_at_el1(pmc);
+
/*
* If counting with a 64bit counter, advertise it to the perf
* code, carefully dealing with the initial sample period
--
2.47.0.163.g1226f6d8fa-goog
^ permalink raw reply related [flat|nested] 27+ messages in thread* [PATCH v4 18/18] KVM: arm64: nv: Reprogram PMU events affected by nested transition
2024-10-25 18:23 [PATCH v4 00/18] KVM: arm64: nv: Support for EL2 PMU controls Oliver Upton
` (16 preceding siblings ...)
2024-10-25 18:23 ` [PATCH v4 17/18] KVM: arm64: nv: Apply EL2 event filtering when in hyp context Oliver Upton
@ 2024-10-25 18:25 ` Oliver Upton
2024-10-30 8:45 ` [PATCH v4 00/18] KVM: arm64: nv: Support for EL2 PMU controls Marc Zyngier
2024-10-31 19:34 ` Oliver Upton
19 siblings, 0 replies; 27+ messages in thread
From: Oliver Upton @ 2024-10-25 18:25 UTC (permalink / raw)
To: kvmarm
Cc: Marc Zyngier, Joey Gouly, Suzuki K Poulose, Zenghui Yu,
Catalin Marinas, Will Deacon, Anshuman Khandual, linux-arm-kernel,
linux-kernel, Oliver Upton
Start reprogramming PMU events at nested boundaries now that everything
is in place to handle the EL2 event filter. Only repaint events where
the filter differs between EL1 and EL2 as a slight optimization.
PMU now 'works' for nested VMs, albeit slow.
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
---
arch/arm64/kvm/emulate-nested.c | 4 ++++
arch/arm64/kvm/pmu-emul.c | 29 +++++++++++++++++++++++++++++
include/kvm/arm_pmu.h | 3 +++
3 files changed, 36 insertions(+)
diff --git a/arch/arm64/kvm/emulate-nested.c b/arch/arm64/kvm/emulate-nested.c
index 162bddbfbe79..13f0be0911e8 100644
--- a/arch/arm64/kvm/emulate-nested.c
+++ b/arch/arm64/kvm/emulate-nested.c
@@ -2450,6 +2450,8 @@ void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu)
kvm_arch_vcpu_load(vcpu, smp_processor_id());
preempt_enable();
+
+ kvm_pmu_nested_transition(vcpu);
}
static void kvm_inject_el2_exception(struct kvm_vcpu *vcpu, u64 esr_el2,
@@ -2532,6 +2534,8 @@ static int kvm_inject_nested(struct kvm_vcpu *vcpu, u64 esr_el2,
kvm_arch_vcpu_load(vcpu, smp_processor_id());
preempt_enable();
+ kvm_pmu_nested_transition(vcpu);
+
return 1;
}
diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
index e2eb2ba903b6..8ad62284fa23 100644
--- a/arch/arm64/kvm/pmu-emul.c
+++ b/arch/arm64/kvm/pmu-emul.c
@@ -1215,3 +1215,32 @@ u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu)
return u64_replace_bits(pmcr, vcpu->kvm->arch.pmcr_n, ARMV8_PMU_PMCR_N);
}
+
+void kvm_pmu_nested_transition(struct kvm_vcpu *vcpu)
+{
+ bool reprogrammed = false;
+ unsigned long mask;
+ int i;
+
+ if (!kvm_vcpu_has_pmu(vcpu))
+ return;
+
+ mask = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
+ for_each_set_bit(i, &mask, 32) {
+ struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
+
+ /*
+ * We only need to reconfigure events where the filter is
+ * different at EL1 vs. EL2, as we're multiplexing the true EL1
+ * event filter bit for nested.
+ */
+ if (kvm_pmc_counts_at_el1(pmc) == kvm_pmc_counts_at_el2(pmc))
+ continue;
+
+ kvm_pmu_create_perf_event(pmc);
+ reprogrammed = true;
+ }
+
+ if (reprogrammed)
+ kvm_vcpu_pmu_restore_guest(vcpu);
+}
diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
index b738ffb39bb0..3493e9d9f58e 100644
--- a/include/kvm/arm_pmu.h
+++ b/include/kvm/arm_pmu.h
@@ -98,6 +98,7 @@ u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm);
u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu);
bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu, unsigned int idx);
+void kvm_pmu_nested_transition(struct kvm_vcpu *vcpu);
#else
struct kvm_pmu {
};
@@ -198,6 +199,8 @@ static inline bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu)
return false;
}
+static inline void kvm_pmu_nested_transition(struct kvm_vcpu *vcpu) {}
+
#endif
#endif
--
2.47.0.163.g1226f6d8fa-goog
^ permalink raw reply related [flat|nested] 27+ messages in thread* Re: [PATCH v4 00/18] KVM: arm64: nv: Support for EL2 PMU controls
2024-10-25 18:23 [PATCH v4 00/18] KVM: arm64: nv: Support for EL2 PMU controls Oliver Upton
` (17 preceding siblings ...)
2024-10-25 18:25 ` [PATCH v4 18/18] KVM: arm64: nv: Reprogram PMU events affected by nested transition Oliver Upton
@ 2024-10-30 8:45 ` Marc Zyngier
2024-10-31 19:34 ` Oliver Upton
19 siblings, 0 replies; 27+ messages in thread
From: Marc Zyngier @ 2024-10-30 8:45 UTC (permalink / raw)
To: Oliver Upton
Cc: kvmarm, Joey Gouly, Suzuki K Poulose, Zenghui Yu, Catalin Marinas,
Will Deacon, Anshuman Khandual, linux-arm-kernel, linux-kernel
On Fri, 25 Oct 2024 19:23:35 +0100,
Oliver Upton <oliver.upton@linux.dev> wrote:
>
> v3 -> v4:
> - Align sysreg definitions with DDI0601 2024-09
> - Fix 'accessible' counter mask construction
> - Fix MDCR_EL2.MTPME RES0 logic
>
> v3: https://lore.kernel.org/kvmarm/20241007174559.1830205-1-oliver.upton@linux.dev/
>
> Marc Zyngier (1):
> KVM: arm64: Extend masking facility to arbitrary registers
>
> Oliver Upton (17):
> arm64: sysreg: Describe ID_AA64DFR2_EL1 fields
> arm64: sysreg: Migrate MDCR_EL2 definition to table
> arm64: sysreg: Add new definitions for ID_AA64DFR0_EL1
> KVM: arm64: Describe RES0/RES1 bits of MDCR_EL2
> KVM: arm64: nv: Allow coarse-grained trap combos to use complex traps
> KVM: arm64: nv: Rename BEHAVE_FORWARD_ANY
> KVM: arm64: nv: Reinject traps that take effect in Host EL0
> KVM: arm64: nv: Honor MDCR_EL2.{TPM, TPMCR} in Host EL0
> KVM: arm64: nv: Describe trap behaviour of MDCR_EL2.HPMN
> KVM: arm64: nv: Advertise support for FEAT_HPMN0
> KVM: arm64: Rename kvm_pmu_valid_counter_mask()
> KVM: arm64: nv: Adjust range of accessible PMCs according to HPMN
> KVM: arm64: Add helpers to determine if PMC counts at a given EL
> KVM: arm64: nv: Honor MDCR_EL2.HPME
> KVM: arm64: nv: Honor MDCR_EL2.HLP
> KVM: arm64: nv: Apply EL2 event filtering when in hyp context
> KVM: arm64: nv: Reprogram PMU events affected by nested transition
>
> arch/arm64/include/asm/kvm_arm.h | 29 ---
> arch/arm64/include/asm/kvm_emulate.h | 5 +
> arch/arm64/include/asm/kvm_host.h | 21 +-
> arch/arm64/kvm/emulate-nested.c | 291 ++++++++++++++++-----------
> arch/arm64/kvm/nested.c | 54 ++++-
> arch/arm64/kvm/pmu-emul.c | 143 +++++++++++--
> arch/arm64/kvm/sys_regs.c | 15 +-
> arch/arm64/tools/sysreg | 76 ++++++-
> include/kvm/arm_pmu.h | 18 +-
> 9 files changed, 465 insertions(+), 187 deletions(-)
>
>
> base-commit: 8e929cb546ee42c9a61d24fae60605e9e3192354
I think this is pretty much good to go, and with the couple of nits
mentioned before fixed:
Reviewed-by: Marc Zyngier <maz@kernel.org>
M.
--
Without deviation from the norm, progress is not possible.
^ permalink raw reply [flat|nested] 27+ messages in thread* Re: [PATCH v4 00/18] KVM: arm64: nv: Support for EL2 PMU controls
2024-10-25 18:23 [PATCH v4 00/18] KVM: arm64: nv: Support for EL2 PMU controls Oliver Upton
` (18 preceding siblings ...)
2024-10-30 8:45 ` [PATCH v4 00/18] KVM: arm64: nv: Support for EL2 PMU controls Marc Zyngier
@ 2024-10-31 19:34 ` Oliver Upton
19 siblings, 0 replies; 27+ messages in thread
From: Oliver Upton @ 2024-10-31 19:34 UTC (permalink / raw)
To: kvmarm, Oliver Upton
Cc: linux-arm-kernel, Will Deacon, Zenghui Yu, Suzuki K Poulose,
Joey Gouly, linux-kernel, Marc Zyngier, Catalin Marinas,
Anshuman Khandual
On Fri, 25 Oct 2024 18:23:35 +0000, Oliver Upton wrote:
> v3 -> v4:
> - Align sysreg definitions with DDI0601 2024-09
> - Fix 'accessible' counter mask construction
> - Fix MDCR_EL2.MTPME RES0 logic
>
> v3: https://lore.kernel.org/kvmarm/20241007174559.1830205-1-oliver.upton@linux.dev/
>
> [...]
Applied to kvmarm/next, thanks!
[01/18] KVM: arm64: Extend masking facility to arbitrary registers
https://git.kernel.org/kvmarm/kvmarm/c/a0162020095e
[02/18] arm64: sysreg: Describe ID_AA64DFR2_EL1 fields
https://git.kernel.org/kvmarm/kvmarm/c/93d7356e4b30
[03/18] arm64: sysreg: Migrate MDCR_EL2 definition to table
https://git.kernel.org/kvmarm/kvmarm/c/641630313e9c
[04/18] arm64: sysreg: Add new definitions for ID_AA64DFR0_EL1
https://git.kernel.org/kvmarm/kvmarm/c/3ecb1fe3842c
[05/18] KVM: arm64: Describe RES0/RES1 bits of MDCR_EL2
https://git.kernel.org/kvmarm/kvmarm/c/eb609638da55
[06/18] KVM: arm64: nv: Allow coarse-grained trap combos to use complex traps
https://git.kernel.org/kvmarm/kvmarm/c/18aeeeb57b93
[07/18] KVM: arm64: nv: Rename BEHAVE_FORWARD_ANY
https://git.kernel.org/kvmarm/kvmarm/c/a4063b5aa0bd
[08/18] KVM: arm64: nv: Reinject traps that take effect in Host EL0
https://git.kernel.org/kvmarm/kvmarm/c/d97e66fbcba7
[09/18] KVM: arm64: nv: Honor MDCR_EL2.{TPM, TPMCR} in Host EL0
https://git.kernel.org/kvmarm/kvmarm/c/4ee5d5ff4b4d
[10/18] KVM: arm64: nv: Describe trap behaviour of MDCR_EL2.HPMN
https://git.kernel.org/kvmarm/kvmarm/c/336afe0c832d
[11/18] KVM: arm64: nv: Advertise support for FEAT_HPMN0
https://git.kernel.org/kvmarm/kvmarm/c/166b77a2f423
[12/18] KVM: arm64: Rename kvm_pmu_valid_counter_mask()
https://git.kernel.org/kvmarm/kvmarm/c/a3034dab74fc
[13/18] KVM: arm64: nv: Adjust range of accessible PMCs according to HPMN
https://git.kernel.org/kvmarm/kvmarm/c/9a1c58cfefb0
[14/18] KVM: arm64: Add helpers to determine if PMC counts at a given EL
https://git.kernel.org/kvmarm/kvmarm/c/9d15f8290a22
[15/18] KVM: arm64: nv: Honor MDCR_EL2.HPME
https://git.kernel.org/kvmarm/kvmarm/c/fe827f916662
[16/18] KVM: arm64: nv: Honor MDCR_EL2.HLP
https://git.kernel.org/kvmarm/kvmarm/c/16535d55e91f
[17/18] KVM: arm64: nv: Apply EL2 event filtering when in hyp context
https://git.kernel.org/kvmarm/kvmarm/c/8a34979030f6
[18/18] KVM: arm64: nv: Reprogram PMU events affected by nested transition
https://git.kernel.org/kvmarm/kvmarm/c/ae323e035801
--
Best,
Oliver
^ permalink raw reply [flat|nested] 27+ messages in thread