From: Fuad Tabba <tabba@google.com>
To: kvmarm@lists.linux.dev, linux-arm-kernel@lists.infradead.org
Cc: maz@kernel.org, oliver.upton@linux.dev, james.clark@linaro.org,
will@kernel.org, joey.gouly@arm.com, suzuki.poulose@arm.com,
yuzenghui@huawei.com, catalin.marinas@arm.com,
broonie@kernel.org, qperret@google.com, tabba@google.com
Subject: [PATCH v2 05/12] KVM: arm64: Initialize feature id registers for protected VMs
Date: Fri, 22 Nov 2024 11:06:15 +0000 [thread overview]
Message-ID: <20241122110622.3010118-6-tabba@google.com> (raw)
In-Reply-To: <20241122110622.3010118-1-tabba@google.com>
The hypervisor maintains the state of protected VMs. Initialize
the values for feature ID registers for protected VMs, to be used
when setting traps and when advertising features to protected
VMs.
Signed-off-by: Fuad Tabba <tabba@google.com>
---
.../arm64/kvm/hyp/include/nvhe/fixed_config.h | 1 +
arch/arm64/kvm/hyp/nvhe/pkvm.c | 4 ++
arch/arm64/kvm/hyp/nvhe/sys_regs.c | 54 +++++++++++++++++--
3 files changed, 56 insertions(+), 3 deletions(-)
diff --git a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h
index d1e59b88ff66..69e26d1a0ebe 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h
@@ -201,6 +201,7 @@
u64 pvm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id);
bool kvm_handle_pvm_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code);
bool kvm_handle_pvm_restricted(struct kvm_vcpu *vcpu, u64 *exit_code);
+void kvm_init_pvm_id_regs(struct kvm_vcpu *vcpu);
int kvm_check_pvm_sysreg_table(void);
#endif /* __ARM64_KVM_FIXED_CONFIG_H__ */
diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c
index 59ff6aac514c..4ef03294b2b4 100644
--- a/arch/arm64/kvm/hyp/nvhe/pkvm.c
+++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c
@@ -381,6 +381,7 @@ static void init_pkvm_hyp_vm(struct kvm *host_kvm, struct pkvm_hyp_vm *hyp_vm,
hyp_vm->kvm.created_vcpus = nr_vcpus;
hyp_vm->kvm.arch.mmu.vtcr = host_mmu.arch.mmu.vtcr;
hyp_vm->kvm.arch.pkvm.enabled = READ_ONCE(host_kvm->arch.pkvm.enabled);
+ hyp_vm->kvm.arch.flags = 0;
pkvm_init_features_from_host(hyp_vm, host_kvm);
}
@@ -419,6 +420,9 @@ static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu,
hyp_vcpu->vcpu.arch.cflags = READ_ONCE(host_vcpu->arch.cflags);
hyp_vcpu->vcpu.arch.mp_state.mp_state = KVM_MP_STATE_STOPPED;
+ if (pkvm_hyp_vcpu_is_protected(hyp_vcpu))
+ kvm_init_pvm_id_regs(&hyp_vcpu->vcpu);
+
ret = pkvm_vcpu_init_traps(hyp_vcpu);
if (ret)
goto done;
diff --git a/arch/arm64/kvm/hyp/nvhe/sys_regs.c b/arch/arm64/kvm/hyp/nvhe/sys_regs.c
index 59fb2f056177..7008e9641f41 100644
--- a/arch/arm64/kvm/hyp/nvhe/sys_regs.c
+++ b/arch/arm64/kvm/hyp/nvhe/sys_regs.c
@@ -204,8 +204,7 @@ static u64 get_pvm_id_aa64mmfr2(const struct kvm_vcpu *vcpu)
return id_aa64mmfr2_el1_sys_val & PVM_ID_AA64MMFR2_ALLOW;
}
-/* Read a sanitized cpufeature ID register by its encoding */
-u64 pvm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id)
+static u64 pvm_calc_id_reg(const struct kvm_vcpu *vcpu, u32 id)
{
switch (id) {
case SYS_ID_AA64PFR0_EL1:
@@ -240,10 +239,25 @@ u64 pvm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id)
}
}
+/* Read a sanitized cpufeature ID register by its encoding */
+u64 pvm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id)
+{
+ return pvm_calc_id_reg(vcpu, id);
+}
+
static u64 read_id_reg(const struct kvm_vcpu *vcpu,
struct sys_reg_desc const *r)
{
- return pvm_read_id_reg(vcpu, reg_to_encoding(r));
+ struct kvm *kvm = vcpu->kvm;
+ u32 reg = reg_to_encoding(r);
+
+ if (WARN_ON_ONCE(!test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags)))
+ return 0;
+
+ if (reg >= sys_reg(3, 0, 0, 4, 0) && reg <= sys_reg(3, 0, 0, 7, 7))
+ return kvm->arch.id_regs[IDREG_IDX(reg)];
+
+ return 0;
}
/* Handler to RAZ/WI sysregs */
@@ -448,6 +462,40 @@ static const struct sys_reg_desc pvm_sys_reg_descs[] = {
/* Performance Monitoring Registers are restricted. */
};
+/*
+ * Initializes feature registers for protected vms.
+ */
+void kvm_init_pvm_id_regs(struct kvm_vcpu *vcpu)
+{
+ const u32 pvm_feat_id_regs[] = {
+ SYS_ID_AA64PFR0_EL1,
+ SYS_ID_AA64PFR1_EL1,
+ SYS_ID_AA64ISAR0_EL1,
+ SYS_ID_AA64ISAR1_EL1,
+ SYS_ID_AA64ISAR2_EL1,
+ SYS_ID_AA64ZFR0_EL1,
+ SYS_ID_AA64MMFR0_EL1,
+ SYS_ID_AA64MMFR1_EL1,
+ SYS_ID_AA64MMFR2_EL1,
+ SYS_ID_AA64MMFR4_EL1,
+ SYS_ID_AA64DFR0_EL1,
+ };
+ struct kvm *kvm = vcpu->kvm;
+ unsigned long i;
+
+ if (WARN_ON_ONCE(test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags)))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(pvm_feat_id_regs); i++) {
+ struct kvm_arch *ka = &kvm->arch;
+ u32 reg = pvm_feat_id_regs[i];
+
+ ka->id_regs[IDREG_IDX(reg)] = pvm_calc_id_reg(vcpu, reg);
+ }
+
+ set_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags);
+}
+
/*
* Checks that the sysreg table is unique and in-order.
*
--
2.47.0.371.ga323438b13-goog
next prev parent reply other threads:[~2024-11-22 11:12 UTC|newest]
Thread overview: 19+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-11-22 11:06 [PATCH v2 00/12] KVM: arm64: Rework guest VM fixed feature handling and trapping in pKVM Fuad Tabba
2024-11-22 11:06 ` [PATCH v2 01/12] KVM: arm64: Consolidate allowed and restricted VM feature checks Fuad Tabba
2024-11-22 11:06 ` [PATCH v2 02/12] KVM: arm64: Group setting traps for protected VMs by control register Fuad Tabba
2024-11-22 11:06 ` [PATCH v2 03/12] KVM: arm64: Move checking protected vcpu features to a separate function Fuad Tabba
2024-11-22 11:06 ` [PATCH v2 04/12] KVM: arm64: Use KVM extension checks for allowed protected VM capabilities Fuad Tabba
2024-11-22 11:06 ` Fuad Tabba [this message]
2024-11-24 12:12 ` [PATCH v2 05/12] KVM: arm64: Initialize feature id registers for protected VMs Marc Zyngier
2024-11-25 11:58 ` Fuad Tabba
2024-11-22 11:06 ` [PATCH v2 06/12] KVM: arm64: Set protected VM traps based on its view of feature registers Fuad Tabba
2024-11-22 11:06 ` [PATCH v2 07/12] KVM: arm64: Rework specifying restricted features for protected VMs Fuad Tabba
2024-11-24 12:38 ` Marc Zyngier
2024-11-25 12:00 ` Fuad Tabba
2024-11-26 18:28 ` Kristina Martšenko
2024-11-27 9:06 ` Fuad Tabba
2024-11-22 11:06 ` [PATCH v2 08/12] KVM: arm64: Remove fixed_config.h header Fuad Tabba
2024-11-22 11:06 ` [PATCH v2 09/12] KVM: arm64: Remove redundant setting of HCR_EL2 trap bit Fuad Tabba
2024-11-22 11:06 ` [PATCH v2 10/12] KVM: arm64: Calculate cptr_el2 traps on activating traps Fuad Tabba
2024-11-22 11:06 ` [PATCH v2 11/12] KVM: arm64: Refactor kvm_reset_cptr_el2() Fuad Tabba
2024-11-22 11:06 ` [PATCH v2 12/12] KVM: arm64: Fix the value of the CPTR_EL2 RES1 bitmask for nVHE Fuad Tabba
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20241122110622.3010118-6-tabba@google.com \
--to=tabba@google.com \
--cc=broonie@kernel.org \
--cc=catalin.marinas@arm.com \
--cc=james.clark@linaro.org \
--cc=joey.gouly@arm.com \
--cc=kvmarm@lists.linux.dev \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=maz@kernel.org \
--cc=oliver.upton@linux.dev \
--cc=qperret@google.com \
--cc=suzuki.poulose@arm.com \
--cc=will@kernel.org \
--cc=yuzenghui@huawei.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).