From: Mark Brown <broonie@kernel.org>
To: Marc Zyngier <maz@kernel.org>,
Oliver Upton <oliver.upton@linux.dev>,
Joey Gouly <joey.gouly@arm.com>,
Catalin Marinas <catalin.marinas@arm.com>,
Suzuki K Poulose <suzuki.poulose@arm.com>,
Will Deacon <will@kernel.org>,
Paolo Bonzini <pbonzini@redhat.com>,
Jonathan Corbet <corbet@lwn.net>, Shuah Khan <shuah@kernel.org>
Cc: Dave Martin <Dave.Martin@arm.com>, Fuad Tabba <tabba@google.com>,
Mark Rutland <mark.rutland@arm.com>,
linux-arm-kernel@lists.infradead.org, kvmarm@lists.linux.dev,
linux-kernel@vger.kernel.org, kvm@vger.kernel.org,
linux-doc@vger.kernel.org, linux-kselftest@vger.kernel.org,
Mark Brown <broonie@kernel.org>
Subject: [PATCH v6 24/28] KVM: arm64: Handle SME exceptions
Date: Wed, 25 Jun 2025 11:48:15 +0100 [thread overview]
Message-ID: <20250625-kvm-arm64-sme-v6-24-114cff4ffe04@kernel.org> (raw)
In-Reply-To: <20250625-kvm-arm64-sme-v6-0-114cff4ffe04@kernel.org>
The access control for SME follows the same structure as for the base FP
and SVE extensions, with control being via CPACR_ELx.SMEN and CPTR_EL2.TSM
mirroring the equivalent FPSIMD and SVE controls in those registers. Add
handling for these controls and exceptions mirroring the existing handling
for FPSIMD and SVE.
Signed-off-by: Mark Brown <broonie@kernel.org>
---
arch/arm64/kvm/handle_exit.c | 14 ++++++++++++++
arch/arm64/kvm/hyp/include/hyp/switch.h | 11 ++++++-----
arch/arm64/kvm/hyp/nvhe/switch.c | 4 +++-
arch/arm64/kvm/hyp/vhe/switch.c | 17 ++++++++++++-----
4 files changed, 35 insertions(+), 11 deletions(-)
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 453266c96481..74e1db2e0458 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -232,6 +232,19 @@ static int handle_sve(struct kvm_vcpu *vcpu)
return 1;
}
+/*
+ * Guest access to SME registers should be routed to this handler only
+ * when the system doesn't support SME.
+ */
+static int handle_sme(struct kvm_vcpu *vcpu)
+{
+ if (guest_hyp_sme_traps_enabled(vcpu))
+ return kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
+
+ kvm_inject_undefined(vcpu);
+ return 1;
+}
+
/*
* Two possibilities to handle a trapping ptrauth instruction:
*
@@ -391,6 +404,7 @@ static exit_handle_fn arm_exit_handlers[] = {
[ESR_ELx_EC_SVC64] = handle_svc,
[ESR_ELx_EC_SYS64] = kvm_handle_sys_reg,
[ESR_ELx_EC_SVE] = handle_sve,
+ [ESR_ELx_EC_SME] = handle_sme,
[ESR_ELx_EC_ERET] = kvm_handle_eret,
[ESR_ELx_EC_IABT_LOW] = kvm_handle_guest_abort,
[ESR_ELx_EC_DABT_LOW] = kvm_handle_guest_abort,
diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
index 02ff7654fa9d..9f30667e44d4 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -69,11 +69,8 @@ static inline void __activate_cptr_traps_nvhe(struct kvm_vcpu *vcpu)
{
u64 val = CPTR_NVHE_EL2_RES1 | CPTR_EL2_TAM | CPTR_EL2_TTA;
- /*
- * Always trap SME since it's not supported in KVM.
- * TSM is RES1 if SME isn't implemented.
- */
- val |= CPTR_EL2_TSM;
+ if (!vcpu_has_sme(vcpu) || !guest_owns_fp_regs())
+ val |= CPTR_EL2_TSM;
if (!vcpu_has_sve(vcpu) || !guest_owns_fp_regs())
val |= CPTR_EL2_TZ;
@@ -101,6 +98,8 @@ static inline void __activate_cptr_traps_vhe(struct kvm_vcpu *vcpu)
val |= CPACR_EL1_FPEN;
if (vcpu_has_sve(vcpu))
val |= CPACR_EL1_ZEN;
+ if (vcpu_has_sme(vcpu))
+ val |= CPACR_EL1_SMEN;
}
if (!vcpu_has_nv(vcpu))
@@ -142,6 +141,8 @@ static inline void __activate_cptr_traps_vhe(struct kvm_vcpu *vcpu)
val &= ~CPACR_EL1_FPEN;
if (!(SYS_FIELD_GET(CPACR_EL1, ZEN, cptr) & BIT(0)))
val &= ~CPACR_EL1_ZEN;
+ if (!(SYS_FIELD_GET(CPACR_EL1, SMEN, cptr) & BIT(0)))
+ val &= ~CPACR_EL1_SMEN;
if (kvm_has_feat(vcpu->kvm, ID_AA64MMFR3_EL1, S2POE, IMP))
val |= cptr & CPACR_EL1_E0POE;
diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
index 0e752b515d0f..3de22aff33a4 100644
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -175,6 +175,7 @@ static const exit_handler_fn hyp_exit_handlers[] = {
[ESR_ELx_EC_CP15_32] = kvm_hyp_handle_cp15_32,
[ESR_ELx_EC_SYS64] = kvm_hyp_handle_sysreg,
[ESR_ELx_EC_SVE] = kvm_hyp_handle_fpsimd,
+ [ESR_ELx_EC_SME] = kvm_hyp_handle_fpsimd,
[ESR_ELx_EC_FP_ASIMD] = kvm_hyp_handle_fpsimd,
[ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low,
[ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low,
@@ -186,7 +187,8 @@ static const exit_handler_fn pvm_exit_handlers[] = {
[0 ... ESR_ELx_EC_MAX] = NULL,
[ESR_ELx_EC_SYS64] = kvm_handle_pvm_sys64,
[ESR_ELx_EC_SVE] = kvm_handle_pvm_restricted,
- [ESR_ELx_EC_FP_ASIMD] = kvm_hyp_handle_fpsimd,
+ [ESR_ELx_EC_SME] = kvm_handle_pvm_restricted,
+ [ESR_ELx_EC_FP_ASIMD] = kvm_handle_pvm_restricted,
[ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low,
[ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low,
[ESR_ELx_EC_WATCHPT_LOW] = kvm_hyp_handle_watchpt_low,
diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c
index 477f1580ffea..cb3062d53a5e 100644
--- a/arch/arm64/kvm/hyp/vhe/switch.c
+++ b/arch/arm64/kvm/hyp/vhe/switch.c
@@ -438,22 +438,28 @@ static bool kvm_hyp_handle_cpacr_el1(struct kvm_vcpu *vcpu, u64 *exit_code)
return true;
}
-static bool kvm_hyp_handle_zcr_el2(struct kvm_vcpu *vcpu, u64 *exit_code)
+static bool kvm_hyp_handle_vec_cr_el2(struct kvm_vcpu *vcpu, u64 *exit_code)
{
u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu));
if (!vcpu_has_nv(vcpu))
return false;
- if (sysreg != SYS_ZCR_EL2)
+ switch (sysreg) {
+ case SYS_ZCR_EL2:
+ case SYS_SMCR_EL2:
+ break;
+ default:
return false;
+ }
if (guest_owns_fp_regs())
return false;
/*
- * ZCR_EL2 traps are handled in the slow path, with the expectation
- * that the guest's FP context has already been loaded onto the CPU.
+ * ZCR_EL2 and SMCR_EL2 traps are handled in the slow path,
+ * with the expectation that the guest's FP context has
+ * already been loaded onto the CPU.
*
* Load the guest's FP context and unconditionally forward to the
* slow path for handling (i.e. return false).
@@ -473,7 +479,7 @@ static bool kvm_hyp_handle_sysreg_vhe(struct kvm_vcpu *vcpu, u64 *exit_code)
if (kvm_hyp_handle_cpacr_el1(vcpu, exit_code))
return true;
- if (kvm_hyp_handle_zcr_el2(vcpu, exit_code))
+ if (kvm_hyp_handle_vec_cr_el2(vcpu, exit_code))
return true;
return kvm_hyp_handle_sysreg(vcpu, exit_code);
@@ -502,6 +508,7 @@ static const exit_handler_fn hyp_exit_handlers[] = {
[0 ... ESR_ELx_EC_MAX] = NULL,
[ESR_ELx_EC_CP15_32] = kvm_hyp_handle_cp15_32,
[ESR_ELx_EC_SYS64] = kvm_hyp_handle_sysreg_vhe,
+ [ESR_ELx_EC_SME] = kvm_hyp_handle_fpsimd,
[ESR_ELx_EC_SVE] = kvm_hyp_handle_fpsimd,
[ESR_ELx_EC_FP_ASIMD] = kvm_hyp_handle_fpsimd,
[ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low,
--
2.39.5
next prev parent reply other threads:[~2025-06-25 11:26 UTC|newest]
Thread overview: 32+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-06-25 10:47 [PATCH v6 00/28] KVM: arm64: Implement support for SME Mark Brown
2025-06-25 10:47 ` [PATCH v6 01/28] arm64/fpsimd: Update FA64 and ZT0 enables when loading SME state Mark Brown
2025-06-25 10:47 ` [PATCH v6 02/28] arm64/fpsimd: Decide to save ZT0 and streaming mode FFR at bind time Mark Brown
2025-06-25 10:47 ` [PATCH v6 03/28] arm64/fpsimd: Check enable bit for FA64 when saving EFI state Mark Brown
2025-06-25 10:47 ` [PATCH v6 04/28] arm64/fpsimd: Determine maximum virtualisable SME vector length Mark Brown
2025-06-25 10:47 ` [PATCH v6 05/28] KVM: arm64: Introduce non-UNDEF FGT control Mark Brown
2025-06-25 10:47 ` [PATCH v6 06/28] KVM: arm64: Pay attention to FFR parameter in SVE save and load Mark Brown
2025-06-25 10:47 ` [PATCH v6 07/28] KVM: arm64: Pull ctxt_has_ helpers to start of sysreg-sr.h Mark Brown
2025-06-25 10:47 ` [PATCH v6 08/28] KVM: arm64: Move SVE state access macros after feature test macros Mark Brown
2025-06-25 10:48 ` [PATCH v6 09/28] KVM: arm64: Rename SVE finalization constants to be more general Mark Brown
2025-06-25 10:48 ` [PATCH v6 10/28] KVM: arm64: Document the KVM ABI for SME Mark Brown
2025-06-25 10:48 ` [PATCH v6 11/28] KVM: arm64: Define internal features " Mark Brown
2025-06-25 10:48 ` [PATCH v6 12/28] KVM: arm64: Rename sve_state_reg_region Mark Brown
2025-06-25 10:48 ` [PATCH v6 13/28] KVM: arm64: Store vector lengths in an array Mark Brown
2025-06-25 10:48 ` [PATCH v6 14/28] KVM: arm64: Implement SME vector length configuration Mark Brown
2025-06-25 10:48 ` [PATCH v6 15/28] KVM: arm64: Support SME control registers Mark Brown
2025-06-25 10:48 ` [PATCH v6 16/28] KVM: arm64: Support TPIDR2_EL0 Mark Brown
2025-06-25 10:48 ` [PATCH v6 17/28] KVM: arm64: Support SME identification registers for guests Mark Brown
2025-06-29 10:08 ` Marc Zyngier
2025-06-25 10:48 ` [PATCH v6 18/28] KVM: arm64: Support SME priority registers Mark Brown
2025-06-29 9:32 ` Marc Zyngier
2025-07-03 18:03 ` Mark Brown
2025-06-25 10:48 ` [PATCH v6 19/28] KVM: arm64: Provide assembly for SME register access Mark Brown
2025-06-25 10:48 ` [PATCH v6 20/28] KVM: arm64: Support userspace access to streaming mode Z and P registers Mark Brown
2025-06-25 10:48 ` [PATCH v6 21/28] KVM: arm64: Flush register state on writes to SVCR.SM and SVCR.ZA Mark Brown
2025-06-25 10:48 ` [PATCH v6 22/28] KVM: arm64: Expose SME specific state to userspace Mark Brown
2025-06-25 10:48 ` [PATCH v6 23/28] KVM: arm64: Context switch SME state for guests Mark Brown
2025-06-25 10:48 ` Mark Brown [this message]
2025-06-25 10:48 ` [PATCH v6 25/28] KVM: arm64: Expose SME to nested guests Mark Brown
2025-06-25 10:48 ` [PATCH v6 26/28] KVM: arm64: Provide interface for configuring and enabling SME for guests Mark Brown
2025-06-25 10:48 ` [PATCH v6 27/28] KVM: arm64: selftests: Add SME system registers to get-reg-list Mark Brown
2025-06-25 10:48 ` [PATCH v6 28/28] KVM: arm64: selftests: Add SME to set_id_regs test Mark Brown
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250625-kvm-arm64-sme-v6-24-114cff4ffe04@kernel.org \
--to=broonie@kernel.org \
--cc=Dave.Martin@arm.com \
--cc=catalin.marinas@arm.com \
--cc=corbet@lwn.net \
--cc=joey.gouly@arm.com \
--cc=kvm@vger.kernel.org \
--cc=kvmarm@lists.linux.dev \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-doc@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-kselftest@vger.kernel.org \
--cc=mark.rutland@arm.com \
--cc=maz@kernel.org \
--cc=oliver.upton@linux.dev \
--cc=pbonzini@redhat.com \
--cc=shuah@kernel.org \
--cc=suzuki.poulose@arm.com \
--cc=tabba@google.com \
--cc=will@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).