* [PATCH v3 1/4] KVM: arm64: Allow userspace to change MIDR_EL1
2025-02-18 16:34 [PATCH v3 0/4] KVM: arm64: writable MIDR/REVIDR Sebastian Ott
@ 2025-02-18 16:34 ` Sebastian Ott
2025-02-18 16:34 ` [PATCH v3 2/4] KVM: arm64: Allow userspace to change REVIDR_EL1 Sebastian Ott
` (3 subsequent siblings)
4 siblings, 0 replies; 8+ messages in thread
From: Sebastian Ott @ 2025-02-18 16:34 UTC (permalink / raw)
To: Marc Zyngier, Oliver Upton, Joey Gouly, Suzuki K Poulose,
Zenghui Yu, Catalin Marinas, Will Deacon, Shameer Kolothum
Cc: Cornelia Huck, Eric Auger, linux-arm-kernel, kvmarm, linux-kernel
Enable VMMs to write MIDR_EL1 by treating it as a VM ID register.
Since MIDR_EL1 is not handled as a proper arm64_ftr_reg apply only
a sanity check against the writable mask to ensure the reserved
bits are 0.
Set up VPIDR_EL2 to hold the MIDR_EL1 value for the guest.
Signed-off-by: Sebastian Ott <sebott@redhat.com>
---
arch/arm64/include/asm/kvm_host.h | 3 ++
arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h | 5 +--
arch/arm64/kvm/hyp/nvhe/sysreg-sr.c | 10 +++++-
arch/arm64/kvm/hyp/vhe/sysreg-sr.c | 28 ++++++---------
arch/arm64/kvm/sys_regs.c | 42 ++++++++++++++++++++--
5 files changed, 64 insertions(+), 24 deletions(-)
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 7cfa024de4e3..3db8c773339e 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -373,6 +373,7 @@ struct kvm_arch {
#define KVM_ARM_ID_REG_NUM (IDREG_IDX(sys_reg(3, 0, 0, 7, 7)) + 1)
u64 id_regs[KVM_ARM_ID_REG_NUM];
+ u64 midr_el1;
u64 ctr_el0;
/* Masks for VNCR-backed and general EL2 sysregs */
@@ -1469,6 +1470,8 @@ static inline u64 *__vm_id_reg(struct kvm_arch *ka, u32 reg)
switch (reg) {
case sys_reg(3, 0, 0, 1, 0) ... sys_reg(3, 0, 0, 7, 7):
return &ka->id_regs[IDREG_IDX(reg)];
+ case SYS_MIDR_EL1:
+ return &ka->midr_el1;
case SYS_CTR_EL0:
return &ka->ctr_el0;
default:
diff --git a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
index 76ff095c6b6e..c8f37e702724 100644
--- a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
+++ b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
@@ -168,9 +168,10 @@ static inline void __sysreg_restore_user_state(struct kvm_cpu_context *ctxt)
}
static inline void __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt,
- u64 mpidr)
+ u64 midr, u64 mpidr)
{
- write_sysreg(mpidr, vmpidr_el2);
+ write_sysreg(midr, vpidr_el2);
+ write_sysreg(mpidr, vmpidr_el2);
if (has_vhe() ||
!cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
diff --git a/arch/arm64/kvm/hyp/nvhe/sysreg-sr.c b/arch/arm64/kvm/hyp/nvhe/sysreg-sr.c
index dba101565de3..a01be1add5ad 100644
--- a/arch/arm64/kvm/hyp/nvhe/sysreg-sr.c
+++ b/arch/arm64/kvm/hyp/nvhe/sysreg-sr.c
@@ -28,7 +28,15 @@ void __sysreg_save_state_nvhe(struct kvm_cpu_context *ctxt)
void __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt)
{
- __sysreg_restore_el1_state(ctxt, ctxt_sys_reg(ctxt, MPIDR_EL1));
+ u64 midr;
+
+ if (ctxt_is_guest(ctxt))
+ midr = kvm_read_vm_id_reg(kern_hyp_va(ctxt_to_vcpu(ctxt)->kvm),
+ SYS_MIDR_EL1);
+ else
+ midr = read_cpuid_id();
+
+ __sysreg_restore_el1_state(ctxt, midr, ctxt_sys_reg(ctxt, MPIDR_EL1));
__sysreg_restore_common_state(ctxt);
__sysreg_restore_user_state(ctxt);
__sysreg_restore_el2_return_state(ctxt);
diff --git a/arch/arm64/kvm/hyp/vhe/sysreg-sr.c b/arch/arm64/kvm/hyp/vhe/sysreg-sr.c
index 90b018e06f2c..a57a771e9be6 100644
--- a/arch/arm64/kvm/hyp/vhe/sysreg-sr.c
+++ b/arch/arm64/kvm/hyp/vhe/sysreg-sr.c
@@ -87,11 +87,12 @@ static void __sysreg_restore_vel2_state(struct kvm_vcpu *vcpu)
write_sysreg(__vcpu_sys_reg(vcpu, PAR_EL1), par_el1);
write_sysreg(__vcpu_sys_reg(vcpu, TPIDR_EL1), tpidr_el1);
- write_sysreg(__vcpu_sys_reg(vcpu, MPIDR_EL1), vmpidr_el2);
- write_sysreg_el1(__vcpu_sys_reg(vcpu, MAIR_EL2), SYS_MAIR);
- write_sysreg_el1(__vcpu_sys_reg(vcpu, VBAR_EL2), SYS_VBAR);
- write_sysreg_el1(__vcpu_sys_reg(vcpu, CONTEXTIDR_EL2), SYS_CONTEXTIDR);
- write_sysreg_el1(__vcpu_sys_reg(vcpu, AMAIR_EL2), SYS_AMAIR);
+ write_sysreg(kvm_read_vm_id_reg(vcpu->kvm, SYS_MIDR_EL1), vpidr_el2);
+ write_sysreg(__vcpu_sys_reg(vcpu, MPIDR_EL1), vmpidr_el2);
+ write_sysreg_el1(__vcpu_sys_reg(vcpu, MAIR_EL2), SYS_MAIR);
+ write_sysreg_el1(__vcpu_sys_reg(vcpu, VBAR_EL2), SYS_VBAR);
+ write_sysreg_el1(__vcpu_sys_reg(vcpu, CONTEXTIDR_EL2), SYS_CONTEXTIDR);
+ write_sysreg_el1(__vcpu_sys_reg(vcpu, AMAIR_EL2), SYS_AMAIR);
if (vcpu_el2_e2h_is_set(vcpu)) {
/*
@@ -191,7 +192,7 @@ void __vcpu_load_switch_sysregs(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
struct kvm_cpu_context *host_ctxt;
- u64 mpidr;
+ u64 midr, mpidr;
host_ctxt = host_data_ptr(host_ctxt);
__sysreg_save_user_state(host_ctxt);
@@ -220,23 +221,18 @@ void __vcpu_load_switch_sysregs(struct kvm_vcpu *vcpu)
__sysreg_restore_vel2_state(vcpu);
} else {
if (vcpu_has_nv(vcpu)) {
- /*
- * Use the guest hypervisor's VPIDR_EL2 when in a
- * nested state. The hardware value of MIDR_EL1 gets
- * restored on put.
- */
- write_sysreg(ctxt_sys_reg(guest_ctxt, VPIDR_EL2), vpidr_el2);
-
/*
* As we're restoring a nested guest, set the value
* provided by the guest hypervisor.
*/
+ midr = ctxt_sys_reg(guest_ctxt, VPIDR_EL2);
mpidr = ctxt_sys_reg(guest_ctxt, VMPIDR_EL2);
} else {
+ midr = kvm_read_vm_id_reg(vcpu->kvm, SYS_MIDR_EL1);
mpidr = ctxt_sys_reg(guest_ctxt, MPIDR_EL1);
}
- __sysreg_restore_el1_state(guest_ctxt, mpidr);
+ __sysreg_restore_el1_state(guest_ctxt, midr, mpidr);
}
vcpu_set_flag(vcpu, SYSREGS_ON_CPU);
@@ -271,9 +267,5 @@ void __vcpu_put_switch_sysregs(struct kvm_vcpu *vcpu)
/* Restore host user state */
__sysreg_restore_user_state(host_ctxt);
- /* If leaving a nesting guest, restore MIDR_EL1 default view */
- if (vcpu_has_nv(vcpu))
- write_sysreg(read_cpuid_id(), vpidr_el2);
-
vcpu_clear_flag(vcpu, SYSREGS_ON_CPU);
}
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 82430c1e1dd0..3cd4dfdd287a 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -1666,7 +1666,7 @@ static bool is_feature_id_reg(u32 encoding)
*/
static inline bool is_vm_ftr_id_reg(u32 id)
{
- if (id == SYS_CTR_EL0)
+ if (id == SYS_CTR_EL0 || id == SYS_MIDR_EL1)
return true;
return (sys_reg_Op0(id) == 3 && sys_reg_Op1(id) == 0 &&
@@ -1999,6 +1999,33 @@ static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
return 0;
}
+static int set_id_reg_non_ftr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ u64 val)
+{
+ u32 id = reg_to_encoding(rd);
+
+ guard(mutex)(&vcpu->kvm->arch.config_lock);
+ /*
+ * Once the VM has started the ID registers are immutable. Reject any
+ * write that does not match the final register value.
+ */
+ if (kvm_vm_has_ran_once(vcpu->kvm)) {
+ if (val != read_id_reg(vcpu, rd))
+ return -EBUSY;
+
+ return 0;
+ }
+
+ /*
+ * For non ftr regs do a limited test against the writable mask only.
+ */
+ if ((rd->val & val) != val)
+ return -EINVAL;
+
+ kvm_set_vm_id_reg(vcpu->kvm, id, val);
+ return 0;
+}
+
static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
u64 val)
{
@@ -2493,6 +2520,15 @@ static bool access_mdcr(struct kvm_vcpu *vcpu,
return true;
}
+#define FUNCTION_RESET(reg) \
+ static u64 reset_##reg(struct kvm_vcpu *v, \
+ const struct sys_reg_desc *r) \
+ { \
+ return read_sysreg(reg); \
+ }
+
+FUNCTION_RESET(midr_el1)
+
/*
* Architected system registers.
@@ -2542,6 +2578,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_DBGVCR32_EL2), undef_access, reset_val, DBGVCR32_EL2, 0 },
+ { ID_DESC(MIDR_EL1), .set_user = set_id_reg_non_ftr, .visibility = id_visibility,
+ .reset = reset_midr_el1, .val = GENMASK_ULL(31, 0) },
{ SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 },
/*
@@ -4594,13 +4632,11 @@ id_to_sys_reg_desc(struct kvm_vcpu *vcpu, u64 id,
return ((struct sys_reg_desc *)r)->val; \
}
-FUNCTION_INVARIANT(midr_el1)
FUNCTION_INVARIANT(revidr_el1)
FUNCTION_INVARIANT(aidr_el1)
/* ->val is filled in by kvm_sys_reg_table_init() */
static struct sys_reg_desc invariant_sys_regs[] __ro_after_init = {
- { SYS_DESC(SYS_MIDR_EL1), NULL, reset_midr_el1 },
{ SYS_DESC(SYS_REVIDR_EL1), NULL, reset_revidr_el1 },
{ SYS_DESC(SYS_AIDR_EL1), NULL, reset_aidr_el1 },
};
--
2.42.0
^ permalink raw reply related [flat|nested] 8+ messages in thread* [PATCH v3 2/4] KVM: arm64: Allow userspace to change REVIDR_EL1
2025-02-18 16:34 [PATCH v3 0/4] KVM: arm64: writable MIDR/REVIDR Sebastian Ott
2025-02-18 16:34 ` [PATCH v3 1/4] KVM: arm64: Allow userspace to change MIDR_EL1 Sebastian Ott
@ 2025-02-18 16:34 ` Sebastian Ott
2025-02-18 16:34 ` [PATCH v3 3/4] KVM: arm64: Allow userspace to change AIDR_EL1 Sebastian Ott
` (2 subsequent siblings)
4 siblings, 0 replies; 8+ messages in thread
From: Sebastian Ott @ 2025-02-18 16:34 UTC (permalink / raw)
To: Marc Zyngier, Oliver Upton, Joey Gouly, Suzuki K Poulose,
Zenghui Yu, Catalin Marinas, Will Deacon, Shameer Kolothum
Cc: Cornelia Huck, Eric Auger, linux-arm-kernel, kvmarm, linux-kernel
Enable VMMs to write REVIDR_EL1 by treating it as a VM ID register.
Trap guest access of REVIDR_EL1 when the VMs value differs from hardware.
Signed-off-by: Sebastian Ott <sebott@redhat.com>
---
arch/arm64/include/asm/kvm_host.h | 3 +++
arch/arm64/kvm/sys_regs.c | 11 ++++++++---
2 files changed, 11 insertions(+), 3 deletions(-)
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 3db8c773339e..c8fba4111b77 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -374,6 +374,7 @@ struct kvm_arch {
u64 id_regs[KVM_ARM_ID_REG_NUM];
u64 midr_el1;
+ u64 revidr_el1;
u64 ctr_el0;
/* Masks for VNCR-backed and general EL2 sysregs */
@@ -1472,6 +1473,8 @@ static inline u64 *__vm_id_reg(struct kvm_arch *ka, u32 reg)
return &ka->id_regs[IDREG_IDX(reg)];
case SYS_MIDR_EL1:
return &ka->midr_el1;
+ case SYS_REVIDR_EL1:
+ return &ka->revidr_el1;
case SYS_CTR_EL0:
return &ka->ctr_el0;
default:
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 3cd4dfdd287a..c12fd91574ab 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -1666,7 +1666,8 @@ static bool is_feature_id_reg(u32 encoding)
*/
static inline bool is_vm_ftr_id_reg(u32 id)
{
- if (id == SYS_CTR_EL0 || id == SYS_MIDR_EL1)
+ if (id == SYS_CTR_EL0 || id == SYS_MIDR_EL1 ||
+ id == SYS_REVIDR_EL1)
return true;
return (sys_reg_Op0(id) == 3 && sys_reg_Op1(id) == 0 &&
@@ -2528,6 +2529,7 @@ static bool access_mdcr(struct kvm_vcpu *vcpu,
}
FUNCTION_RESET(midr_el1)
+FUNCTION_RESET(revidr_el1)
/*
@@ -2581,6 +2583,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ ID_DESC(MIDR_EL1), .set_user = set_id_reg_non_ftr, .visibility = id_visibility,
.reset = reset_midr_el1, .val = GENMASK_ULL(31, 0) },
{ SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 },
+ { ID_DESC(REVIDR_EL1), .set_user = set_id_reg_non_ftr, .visibility = id_visibility,
+ .reset = reset_revidr_el1, .val = -1ULL },
/*
* ID regs: all ID_SANITISED() entries here must have corresponding
@@ -4632,12 +4636,10 @@ id_to_sys_reg_desc(struct kvm_vcpu *vcpu, u64 id,
return ((struct sys_reg_desc *)r)->val; \
}
-FUNCTION_INVARIANT(revidr_el1)
FUNCTION_INVARIANT(aidr_el1)
/* ->val is filled in by kvm_sys_reg_table_init() */
static struct sys_reg_desc invariant_sys_regs[] __ro_after_init = {
- { SYS_DESC(SYS_REVIDR_EL1), NULL, reset_revidr_el1 },
{ SYS_DESC(SYS_AIDR_EL1), NULL, reset_aidr_el1 },
};
@@ -4985,6 +4987,9 @@ static void vcpu_set_hcr(struct kvm_vcpu *vcpu)
else
vcpu->arch.hcr_el2 |= HCR_TID2;
+ if (kvm_read_vm_id_reg(kvm, SYS_REVIDR_EL1) != read_sysreg(REVIDR_EL1))
+ vcpu->arch.hcr_el2 |= HCR_TID1;
+
if (vcpu_el1_is_32bit(vcpu))
vcpu->arch.hcr_el2 &= ~HCR_RW;
--
2.42.0
^ permalink raw reply related [flat|nested] 8+ messages in thread* [PATCH v3 3/4] KVM: arm64: Allow userspace to change AIDR_EL1
2025-02-18 16:34 [PATCH v3 0/4] KVM: arm64: writable MIDR/REVIDR Sebastian Ott
2025-02-18 16:34 ` [PATCH v3 1/4] KVM: arm64: Allow userspace to change MIDR_EL1 Sebastian Ott
2025-02-18 16:34 ` [PATCH v3 2/4] KVM: arm64: Allow userspace to change REVIDR_EL1 Sebastian Ott
@ 2025-02-18 16:34 ` Sebastian Ott
2025-02-18 16:34 ` [PATCH v3 4/4] KVM: selftests: arm64: Test writes to MIDR,REVIDR,AIDR Sebastian Ott
2025-02-24 22:23 ` [PATCH v3 0/4] KVM: arm64: writable MIDR/REVIDR Oliver Upton
4 siblings, 0 replies; 8+ messages in thread
From: Sebastian Ott @ 2025-02-18 16:34 UTC (permalink / raw)
To: Marc Zyngier, Oliver Upton, Joey Gouly, Suzuki K Poulose,
Zenghui Yu, Catalin Marinas, Will Deacon, Shameer Kolothum
Cc: Cornelia Huck, Eric Auger, linux-arm-kernel, kvmarm, linux-kernel
Enable VMMs to write AIDR_EL1 by treating it as a VM ID register.
Trap guest access of AIDR_EL1 when the VMs value differs from hardware.
Since this was the last invariant register remove all the stuff
that was needed to handle these.
Signed-off-by: Sebastian Ott <sebott@redhat.com>
---
arch/arm64/include/asm/kvm_host.h | 3 ++
arch/arm64/kvm/sys_regs.c | 90 +++----------------------------
2 files changed, 10 insertions(+), 83 deletions(-)
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index c8fba4111b77..de735e2ad9ce 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -375,6 +375,7 @@ struct kvm_arch {
u64 midr_el1;
u64 revidr_el1;
+ u64 aidr_el1;
u64 ctr_el0;
/* Masks for VNCR-backed and general EL2 sysregs */
@@ -1475,6 +1476,8 @@ static inline u64 *__vm_id_reg(struct kvm_arch *ka, u32 reg)
return &ka->midr_el1;
case SYS_REVIDR_EL1:
return &ka->revidr_el1;
+ case SYS_AIDR_EL1:
+ return &ka->aidr_el1;
case SYS_CTR_EL0:
return &ka->ctr_el0;
default:
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index c12fd91574ab..a1a683ba6bb9 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -1667,7 +1667,7 @@ static bool is_feature_id_reg(u32 encoding)
static inline bool is_vm_ftr_id_reg(u32 id)
{
if (id == SYS_CTR_EL0 || id == SYS_MIDR_EL1 ||
- id == SYS_REVIDR_EL1)
+ id == SYS_REVIDR_EL1 || id == SYS_AIDR_EL1)
return true;
return (sys_reg_Op0(id) == 3 && sys_reg_Op1(id) == 0 &&
@@ -2530,6 +2530,7 @@ static bool access_mdcr(struct kvm_vcpu *vcpu,
FUNCTION_RESET(midr_el1)
FUNCTION_RESET(revidr_el1)
+FUNCTION_RESET(aidr_el1)
/*
@@ -2856,6 +2857,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
.set_user = set_clidr, .val = ~CLIDR_EL1_RES0 },
{ SYS_DESC(SYS_CCSIDR2_EL1), undef_access },
{ SYS_DESC(SYS_SMIDR_EL1), undef_access },
+ { ID_DESC(AIDR_EL1), .set_user = set_id_reg_non_ftr, .visibility = id_visibility,
+ .reset = reset_aidr_el1, .val = -1ULL },
{ SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
ID_FILTERED(CTR_EL0, ctr_el0,
CTR_EL0_DIC_MASK |
@@ -4620,61 +4623,6 @@ id_to_sys_reg_desc(struct kvm_vcpu *vcpu, u64 id,
return r;
}
-/*
- * These are the invariant sys_reg registers: we let the guest see the
- * host versions of these, so they're part of the guest state.
- *
- * A future CPU may provide a mechanism to present different values to
- * the guest, or a future kvm may trap them.
- */
-
-#define FUNCTION_INVARIANT(reg) \
- static u64 reset_##reg(struct kvm_vcpu *v, \
- const struct sys_reg_desc *r) \
- { \
- ((struct sys_reg_desc *)r)->val = read_sysreg(reg); \
- return ((struct sys_reg_desc *)r)->val; \
- }
-
-FUNCTION_INVARIANT(aidr_el1)
-
-/* ->val is filled in by kvm_sys_reg_table_init() */
-static struct sys_reg_desc invariant_sys_regs[] __ro_after_init = {
- { SYS_DESC(SYS_AIDR_EL1), NULL, reset_aidr_el1 },
-};
-
-static int get_invariant_sys_reg(u64 id, u64 __user *uaddr)
-{
- const struct sys_reg_desc *r;
-
- r = get_reg_by_id(id, invariant_sys_regs,
- ARRAY_SIZE(invariant_sys_regs));
- if (!r)
- return -ENOENT;
-
- return put_user(r->val, uaddr);
-}
-
-static int set_invariant_sys_reg(u64 id, u64 __user *uaddr)
-{
- const struct sys_reg_desc *r;
- u64 val;
-
- r = get_reg_by_id(id, invariant_sys_regs,
- ARRAY_SIZE(invariant_sys_regs));
- if (!r)
- return -ENOENT;
-
- if (get_user(val, uaddr))
- return -EFAULT;
-
- /* This is what we mean by invariant: you can't change it. */
- if (r->val != val)
- return -EINVAL;
-
- return 0;
-}
-
static int demux_c15_get(struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
{
u32 val;
@@ -4756,15 +4704,10 @@ int kvm_sys_reg_get_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
void __user *uaddr = (void __user *)(unsigned long)reg->addr;
- int err;
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
return demux_c15_get(vcpu, reg->id, uaddr);
- err = get_invariant_sys_reg(reg->id, uaddr);
- if (err != -ENOENT)
- return err;
-
return kvm_sys_reg_get_user(vcpu, reg,
sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
}
@@ -4800,15 +4743,10 @@ int kvm_sys_reg_set_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
void __user *uaddr = (void __user *)(unsigned long)reg->addr;
- int err;
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
return demux_c15_set(vcpu, reg->id, uaddr);
- err = set_invariant_sys_reg(reg->id, uaddr);
- if (err != -ENOENT)
- return err;
-
return kvm_sys_reg_set_user(vcpu, reg,
sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
}
@@ -4897,23 +4835,13 @@ static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
{
- return ARRAY_SIZE(invariant_sys_regs)
- + num_demux_regs()
- + walk_sys_regs(vcpu, (u64 __user *)NULL);
+ return num_demux_regs() + walk_sys_regs(vcpu, (u64 __user *)NULL);
}
int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
{
- unsigned int i;
int err;
- /* Then give them all the invariant registers' indices. */
- for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) {
- if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices))
- return -EFAULT;
- uindices++;
- }
-
err = walk_sys_regs(vcpu, uindices);
if (err < 0)
return err;
@@ -4987,7 +4915,8 @@ static void vcpu_set_hcr(struct kvm_vcpu *vcpu)
else
vcpu->arch.hcr_el2 |= HCR_TID2;
- if (kvm_read_vm_id_reg(kvm, SYS_REVIDR_EL1) != read_sysreg(REVIDR_EL1))
+ if ((kvm_read_vm_id_reg(kvm, SYS_REVIDR_EL1) != read_sysreg(REVIDR_EL1)) ||
+ (kvm_read_vm_id_reg(kvm, SYS_AIDR_EL1) != read_sysreg(AIDR_EL1)))
vcpu->arch.hcr_el2 |= HCR_TID1;
if (vcpu_el1_is_32bit(vcpu))
@@ -5142,16 +5071,11 @@ int __init kvm_sys_reg_table_init(void)
valid &= check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs), true);
valid &= check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs), true);
valid &= check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs), true);
- valid &= check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs), false);
valid &= check_sysreg_table(sys_insn_descs, ARRAY_SIZE(sys_insn_descs), false);
if (!valid)
return -EINVAL;
- /* We abuse the reset function to overwrite the table itself. */
- for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
- invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]);
-
ret = populate_nv_trap_config();
for (i = 0; !ret && i < ARRAY_SIZE(sys_reg_descs); i++)
--
2.42.0
^ permalink raw reply related [flat|nested] 8+ messages in thread* [PATCH v3 4/4] KVM: selftests: arm64: Test writes to MIDR,REVIDR,AIDR
2025-02-18 16:34 [PATCH v3 0/4] KVM: arm64: writable MIDR/REVIDR Sebastian Ott
` (2 preceding siblings ...)
2025-02-18 16:34 ` [PATCH v3 3/4] KVM: arm64: Allow userspace to change AIDR_EL1 Sebastian Ott
@ 2025-02-18 16:34 ` Sebastian Ott
2025-02-24 22:23 ` [PATCH v3 0/4] KVM: arm64: writable MIDR/REVIDR Oliver Upton
4 siblings, 0 replies; 8+ messages in thread
From: Sebastian Ott @ 2025-02-18 16:34 UTC (permalink / raw)
To: Marc Zyngier, Oliver Upton, Joey Gouly, Suzuki K Poulose,
Zenghui Yu, Catalin Marinas, Will Deacon, Shameer Kolothum
Cc: Cornelia Huck, Eric Auger, linux-arm-kernel, kvmarm, linux-kernel
Assert that MIDR_EL1, REVIDR_EL1, AIDR_EL1 are writable from userspace,
that the changed values are visible to guests, and that they are
preserved across a vCPU reset.
Signed-off-by: Sebastian Ott <sebott@redhat.com>
---
.../testing/selftests/kvm/arm64/set_id_regs.c | 32 +++++++++++++++----
1 file changed, 26 insertions(+), 6 deletions(-)
diff --git a/tools/testing/selftests/kvm/arm64/set_id_regs.c b/tools/testing/selftests/kvm/arm64/set_id_regs.c
index 217541fe6536..d719c2ab1e31 100644
--- a/tools/testing/selftests/kvm/arm64/set_id_regs.c
+++ b/tools/testing/selftests/kvm/arm64/set_id_regs.c
@@ -230,6 +230,9 @@ static void guest_code(void)
GUEST_REG_SYNC(SYS_ID_AA64MMFR2_EL1);
GUEST_REG_SYNC(SYS_ID_AA64ZFR0_EL1);
GUEST_REG_SYNC(SYS_CTR_EL0);
+ GUEST_REG_SYNC(SYS_MIDR_EL1);
+ GUEST_REG_SYNC(SYS_REVIDR_EL1);
+ GUEST_REG_SYNC(SYS_AIDR_EL1);
GUEST_DONE();
}
@@ -609,18 +612,31 @@ static void test_ctr(struct kvm_vcpu *vcpu)
test_reg_vals[encoding_to_range_idx(SYS_CTR_EL0)] = ctr;
}
-static void test_vcpu_ftr_id_regs(struct kvm_vcpu *vcpu)
+static void test_id_reg(struct kvm_vcpu *vcpu, u32 id)
{
u64 val;
+ val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(id));
+ val++;
+ vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(id), val);
+ test_reg_vals[encoding_to_range_idx(id)] = val;
+}
+
+static void test_vcpu_ftr_id_regs(struct kvm_vcpu *vcpu)
+{
test_clidr(vcpu);
test_ctr(vcpu);
- val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_MPIDR_EL1));
- val++;
- vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_MPIDR_EL1), val);
+ test_id_reg(vcpu, SYS_MPIDR_EL1);
+ ksft_test_result_pass("%s\n", __func__);
+}
+
+static void test_vcpu_non_ftr_id_regs(struct kvm_vcpu *vcpu)
+{
+ test_id_reg(vcpu, SYS_MIDR_EL1);
+ test_id_reg(vcpu, SYS_REVIDR_EL1);
+ test_id_reg(vcpu, SYS_AIDR_EL1);
- test_reg_vals[encoding_to_range_idx(SYS_MPIDR_EL1)] = val;
ksft_test_result_pass("%s\n", __func__);
}
@@ -647,6 +663,9 @@ static void test_reset_preserves_id_regs(struct kvm_vcpu *vcpu)
test_assert_id_reg_unchanged(vcpu, SYS_MPIDR_EL1);
test_assert_id_reg_unchanged(vcpu, SYS_CLIDR_EL1);
test_assert_id_reg_unchanged(vcpu, SYS_CTR_EL0);
+ test_assert_id_reg_unchanged(vcpu, SYS_MIDR_EL1);
+ test_assert_id_reg_unchanged(vcpu, SYS_REVIDR_EL1);
+ test_assert_id_reg_unchanged(vcpu, SYS_AIDR_EL1);
ksft_test_result_pass("%s\n", __func__);
}
@@ -675,13 +694,14 @@ int main(void)
ARRAY_SIZE(ftr_id_aa64isar2_el1) + ARRAY_SIZE(ftr_id_aa64pfr0_el1) +
ARRAY_SIZE(ftr_id_aa64pfr1_el1) + ARRAY_SIZE(ftr_id_aa64mmfr0_el1) +
ARRAY_SIZE(ftr_id_aa64mmfr1_el1) + ARRAY_SIZE(ftr_id_aa64mmfr2_el1) +
- ARRAY_SIZE(ftr_id_aa64zfr0_el1) - ARRAY_SIZE(test_regs) + 2 +
+ ARRAY_SIZE(ftr_id_aa64zfr0_el1) - ARRAY_SIZE(test_regs) + 3 +
MPAM_IDREG_TEST;
ksft_set_plan(test_cnt);
test_vm_ftr_id_regs(vcpu, aarch64_only);
test_vcpu_ftr_id_regs(vcpu);
+ test_vcpu_non_ftr_id_regs(vcpu);
test_user_set_mpam_reg(vcpu);
test_guest_reg_read(vcpu);
--
2.42.0
^ permalink raw reply related [flat|nested] 8+ messages in thread* Re: [PATCH v3 0/4] KVM: arm64: writable MIDR/REVIDR
2025-02-18 16:34 [PATCH v3 0/4] KVM: arm64: writable MIDR/REVIDR Sebastian Ott
` (3 preceding siblings ...)
2025-02-18 16:34 ` [PATCH v3 4/4] KVM: selftests: arm64: Test writes to MIDR,REVIDR,AIDR Sebastian Ott
@ 2025-02-24 22:23 ` Oliver Upton
2025-02-26 16:47 ` Sebastian Ott
4 siblings, 1 reply; 8+ messages in thread
From: Oliver Upton @ 2025-02-24 22:23 UTC (permalink / raw)
To: Sebastian Ott
Cc: Marc Zyngier, Joey Gouly, Suzuki K Poulose, Zenghui Yu,
Catalin Marinas, Will Deacon, Shameer Kolothum, Cornelia Huck,
Eric Auger, linux-arm-kernel, kvmarm, linux-kernel
Hi Sebastian,
On Tue, Feb 18, 2025 at 05:34:39PM +0100, Sebastian Ott wrote:
> Based on prior discussion [1] this series allows VMMs to change
> MIDR/REVIDR to enable migration between machines that differ in
> these registers. Since these are used for errata handling the
> errata management series [2] is a prerequisite for this one.
>
> changes for V3:
> * handle VPIDR_EL2 as part of vcpu ctxt - thanks Oliver!
Thanks for respinning. While your changes are looking good, as I got
ready to apply this series I wound up peeling the onion a bit further
and have a few more concerns:
- Current KVM allows guests to read SMIDR_EL1 despite the fact that we
do not support SME (this is part of TID1 traps)
- The "invariant" values that KVM presents to userspace originate from
the boot CPU, not the CPU that resets the ID registers for a VM
- A VMM that wants to present big-little can do so on current KVM by
affining vCPUs, but cannot with this series
All of this is to say, I think your series is going to collide with
the pre-existing pile of crap we have. I'm going to pick up these
changes and rework them so we can send a fix for #1 to stable trees and
(hopefully) avoid breaking the old "invariant" behavior.
I'll post what I have as soon as I test it, hopefully we can get this
shaped up for 6.15.
Thanks,
Oliver
^ permalink raw reply [flat|nested] 8+ messages in thread* Re: [PATCH v3 0/4] KVM: arm64: writable MIDR/REVIDR
2025-02-24 22:23 ` [PATCH v3 0/4] KVM: arm64: writable MIDR/REVIDR Oliver Upton
@ 2025-02-26 16:47 ` Sebastian Ott
2025-02-26 18:56 ` Oliver Upton
0 siblings, 1 reply; 8+ messages in thread
From: Sebastian Ott @ 2025-02-26 16:47 UTC (permalink / raw)
To: Oliver Upton
Cc: Marc Zyngier, Joey Gouly, Suzuki K Poulose, Zenghui Yu,
Catalin Marinas, Will Deacon, Shameer Kolothum, Cornelia Huck,
Eric Auger, linux-arm-kernel, kvmarm, linux-kernel
Hi Oliver,
On Mon, 24 Feb 2025, Oliver Upton wrote:
> On Tue, Feb 18, 2025 at 05:34:39PM +0100, Sebastian Ott wrote:
>> Based on prior discussion [1] this series allows VMMs to change
>> MIDR/REVIDR to enable migration between machines that differ in
>> these registers. Since these are used for errata handling the
>> errata management series [2] is a prerequisite for this one.
>>
>> changes for V3:
>> * handle VPIDR_EL2 as part of vcpu ctxt - thanks Oliver!
>
> Thanks for respinning. While your changes are looking good, as I got
> ready to apply this series I wound up peeling the onion a bit further
> and have a few more concerns:
>
> - Current KVM allows guests to read SMIDR_EL1 despite the fact that we
> do not support SME (this is part of TID1 traps)
>
> - The "invariant" values that KVM presents to userspace originate from
> the boot CPU, not the CPU that resets the ID registers for a VM
>
> - A VMM that wants to present big-little can do so on current KVM by
> affining vCPUs, but cannot with this series
>
> All of this is to say, I think your series is going to collide with
> the pre-existing pile of crap we have. I'm going to pick up these
> changes and rework them so we can send a fix for #1 to stable trees and
> (hopefully) avoid breaking the old "invariant" behavior.
>
> I'll post what I have as soon as I test it, hopefully we can get this
> shaped up for 6.15.
Sry, for the additional work I've caused. I gave what you have in next a
spin and it looks good so far.
Thank you very much!
Sebastian
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH v3 0/4] KVM: arm64: writable MIDR/REVIDR
2025-02-26 16:47 ` Sebastian Ott
@ 2025-02-26 18:56 ` Oliver Upton
0 siblings, 0 replies; 8+ messages in thread
From: Oliver Upton @ 2025-02-26 18:56 UTC (permalink / raw)
To: Sebastian Ott
Cc: Marc Zyngier, Joey Gouly, Suzuki K Poulose, Zenghui Yu,
Catalin Marinas, Will Deacon, Shameer Kolothum, Cornelia Huck,
Eric Auger, linux-arm-kernel, kvmarm, linux-kernel
On Wed, Feb 26, 2025 at 05:47:53PM +0100, Sebastian Ott wrote:
> On Mon, 24 Feb 2025, Oliver Upton wrote:
> > On Tue, Feb 18, 2025 at 05:34:39PM +0100, Sebastian Ott wrote:
> > > Based on prior discussion [1] this series allows VMMs to change
> > > MIDR/REVIDR to enable migration between machines that differ in
> > > these registers. Since these are used for errata handling the
> > > errata management series [2] is a prerequisite for this one.
> > >
> > > changes for V3:
> > > * handle VPIDR_EL2 as part of vcpu ctxt - thanks Oliver!
> >
> > Thanks for respinning. While your changes are looking good, as I got
> > ready to apply this series I wound up peeling the onion a bit further
> > and have a few more concerns:
> >
> > - Current KVM allows guests to read SMIDR_EL1 despite the fact that we
> > do not support SME (this is part of TID1 traps)
> >
> > - The "invariant" values that KVM presents to userspace originate from
> > the boot CPU, not the CPU that resets the ID registers for a VM
> >
> > - A VMM that wants to present big-little can do so on current KVM by
> > affining vCPUs, but cannot with this series
> >
> > All of this is to say, I think your series is going to collide with
> > the pre-existing pile of crap we have. I'm going to pick up these
> > changes and rework them so we can send a fix for #1 to stable trees and
> > (hopefully) avoid breaking the old "invariant" behavior.
> >
> > I'll post what I have as soon as I test it, hopefully we can get this
> > shaped up for 6.15.
>
> Sry, for the additional work I've caused. I gave what you have in next a
> spin and it looks good so far.
> Thank you very much!
Don't apologize -- it isn't your fault what we had already was a mess! :)
Thanks for kicking the tires.
Best,
Oliver
^ permalink raw reply [flat|nested] 8+ messages in thread