From: Mark Brown <broonie@kernel.org>
To: Catalin Marinas <catalin.marinas@arm.com>,
Will Deacon <will@kernel.org>, Marc Zyngier <maz@kernel.org>,
Shuah Khan <skhan@linuxfoundation.org>,
Shuah Khan <shuah@kernel.org>
Cc: Basant Kumar Dwivedi <Basant.KumarDwivedi@arm.com>,
Luis Machado <luis.machado@arm.com>,
Szabolcs Nagy <szabolcs.nagy@arm.com>,
Mark Brown <broonie@kernel.org>,
linux-arm-kernel@lists.infradead.org,
linux-kselftest@vger.kernel.org,
Alan Hayward <alan.hayward@arm.com>,
kvmarm@lists.cs.columbia.edu,
Salil Akerkar <Salil.Akerkar@arm.com>
Subject: [PATCH v12 17/40] arm64/sme: Implement ZA context switching
Date: Fri, 25 Feb 2022 16:59:00 +0000 [thread overview]
Message-ID: <20220225165923.1474372-18-broonie@kernel.org> (raw)
In-Reply-To: <20220225165923.1474372-1-broonie@kernel.org>
Allocate space for storing ZA on first access to SME and use that to save
and restore ZA state when context switching. We do this by using the vector
form of the LDR and STR ZA instructions, these do not require streaming
mode and have implementation recommendations that they avoid contention
issues in shared SMCU implementations.
Since ZA is architecturally guaranteed to be zeroed when enabled we do not
need to explicitly zero ZA, either we will be restoring from a saved copy
or trapping on first use of SME so we know that ZA must be disabled.
Signed-off-by: Mark Brown <broonie@kernel.org>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
---
arch/arm64/include/asm/fpsimd.h | 5 ++++-
arch/arm64/include/asm/fpsimdmacros.h | 22 ++++++++++++++++++++++
arch/arm64/include/asm/kvm_host.h | 3 +++
arch/arm64/include/asm/processor.h | 1 +
arch/arm64/kernel/entry-fpsimd.S | 22 ++++++++++++++++++++++
arch/arm64/kernel/fpsimd.c | 20 +++++++++++++-------
arch/arm64/kvm/fpsimd.c | 2 +-
7 files changed, 66 insertions(+), 9 deletions(-)
diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
index cd94f5c5b516..1a709c03bb6c 100644
--- a/arch/arm64/include/asm/fpsimd.h
+++ b/arch/arm64/include/asm/fpsimd.h
@@ -47,7 +47,8 @@ extern void fpsimd_update_current_state(struct user_fpsimd_state const *state);
extern void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *state,
void *sve_state, unsigned int sve_vl,
- unsigned int sme_vl, u64 *svcr);
+ void *za_state, unsigned int sme_vl,
+ u64 *svcr);
extern void fpsimd_flush_task_state(struct task_struct *target);
extern void fpsimd_save_and_flush_cpu_state(void);
@@ -90,6 +91,8 @@ extern void sve_flush_live(bool flush_ffr, unsigned long vq_minus_1);
extern unsigned int sve_get_vl(void);
extern void sve_set_vq(unsigned long vq_minus_1);
extern void sme_set_vq(unsigned long vq_minus_1);
+extern void za_save_state(void *state);
+extern void za_load_state(void const *state);
struct arm64_cpu_capabilities;
extern void sve_kernel_enable(const struct arm64_cpu_capabilities *__unused);
diff --git a/arch/arm64/include/asm/fpsimdmacros.h b/arch/arm64/include/asm/fpsimdmacros.h
index f6ab36e0cd8d..5e0910cf4832 100644
--- a/arch/arm64/include/asm/fpsimdmacros.h
+++ b/arch/arm64/include/asm/fpsimdmacros.h
@@ -319,3 +319,25 @@
ldr w\nxtmp, [\xpfpsr, #4]
msr fpcr, x\nxtmp
.endm
+
+.macro sme_save_za nxbase, xvl, nw
+ mov w\nw, #0
+
+423:
+ _sme_str_zav \nw, \nxbase
+ add x\nxbase, x\nxbase, \xvl
+ add x\nw, x\nw, #1
+ cmp \xvl, x\nw
+ bne 423b
+.endm
+
+.macro sme_load_za nxbase, xvl, nw
+ mov w\nw, #0
+
+423:
+ _sme_ldr_zav \nw, \nxbase
+ add x\nxbase, x\nxbase, \xvl
+ add x\nw, x\nw, #1
+ cmp \xvl, x\nw
+ bne 423b
+.endm
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 5bc01e62c08a..7dc85d5a6552 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -280,8 +280,11 @@ struct vcpu_reset_state {
struct kvm_vcpu_arch {
struct kvm_cpu_context ctxt;
+
+ /* Guest floating point state */
void *sve_state;
unsigned int sve_max_vl;
+ u64 svcr;
/* Stage 2 paging state used by the hardware on next switch */
struct kvm_s2_mmu *hw_mmu;
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 6e2af9de153c..5a5c5edd76df 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -153,6 +153,7 @@ struct thread_struct {
unsigned int fpsimd_cpu;
void *sve_state; /* SVE registers, if any */
+ void *za_state; /* ZA register, if any */
unsigned int vl[ARM64_VEC_MAX]; /* vector length */
unsigned int vl_onexec[ARM64_VEC_MAX]; /* vl after next exec */
unsigned long fault_address; /* fault info */
diff --git a/arch/arm64/kernel/entry-fpsimd.S b/arch/arm64/kernel/entry-fpsimd.S
index 6f88c0f86d50..229436f33df5 100644
--- a/arch/arm64/kernel/entry-fpsimd.S
+++ b/arch/arm64/kernel/entry-fpsimd.S
@@ -99,4 +99,26 @@ SYM_FUNC_START(sme_set_vq)
ret
SYM_FUNC_END(sme_set_vq)
+/*
+ * Save the SME state
+ *
+ * x0 - pointer to buffer for state
+ */
+SYM_FUNC_START(za_save_state)
+ _sme_rdsvl 1, 1 // x1 = VL/8
+ sme_save_za 0, x1, 12
+ ret
+SYM_FUNC_END(za_save_state)
+
+/*
+ * Load the SME state
+ *
+ * x0 - pointer to buffer for state
+ */
+SYM_FUNC_START(za_load_state)
+ _sme_rdsvl 1, 1 // x1 = VL/8
+ sme_load_za 0, x1, 12
+ ret
+SYM_FUNC_END(za_load_state)
+
#endif /* CONFIG_ARM64_SME */
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 12fef62cf07a..c9e8186e69c0 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -121,6 +121,7 @@
struct fpsimd_last_state_struct {
struct user_fpsimd_state *st;
void *sve_state;
+ void *za_state;
u64 *svcr;
unsigned int sve_vl;
unsigned int sme_vl;
@@ -387,11 +388,15 @@ static void task_fpsimd_load(void)
if (system_supports_sme()) {
unsigned long sme_vl = task_get_sme_vl(current);
+ /* Ensure VL is set up for restoring data */
if (test_thread_flag(TIF_SME))
sme_set_vq(sve_vq_from_vl(sme_vl) - 1);
write_sysreg_s(current->thread.svcr, SYS_SVCR_EL0);
+ if (thread_za_enabled(¤t->thread))
+ za_load_state(current->thread.za_state);
+
if (thread_sm_enabled(¤t->thread)) {
restore_sve_regs = true;
restore_ffr = system_supports_fa64();
@@ -435,11 +440,10 @@ static void fpsimd_save(void)
u64 *svcr = last->svcr;
*svcr = read_sysreg_s(SYS_SVCR_EL0);
- if (thread_za_enabled(¤t->thread)) {
- /* ZA state managment is not implemented yet */
- force_signal_inject(SIGKILL, SI_KERNEL, 0, 0);
- return;
- }
+ *svcr = read_sysreg_s(SYS_SVCR_EL0);
+
+ if (*svcr & SYS_SVCR_EL0_ZA_MASK)
+ za_save_state(last->za_state);
/* If we are in streaming mode override regular SVE. */
if (*svcr & SYS_SVCR_EL0_SM_MASK) {
@@ -1477,6 +1481,7 @@ static void fpsimd_bind_task_to_cpu(void)
WARN_ON(!system_supports_fpsimd());
last->st = ¤t->thread.uw.fpsimd_state;
last->sve_state = current->thread.sve_state;
+ last->za_state = current->thread.za_state;
last->sve_vl = task_get_sve_vl(current);
last->sme_vl = task_get_sme_vl(current);
last->svcr = ¤t->thread.svcr;
@@ -1494,8 +1499,8 @@ static void fpsimd_bind_task_to_cpu(void)
}
void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state,
- unsigned int sve_vl, unsigned int sme_vl,
- u64 *svcr)
+ unsigned int sve_vl, void *za_state,
+ unsigned int sme_vl, u64 *svcr)
{
struct fpsimd_last_state_struct *last =
this_cpu_ptr(&fpsimd_last_state);
@@ -1506,6 +1511,7 @@ void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state,
last->st = st;
last->svcr = svcr;
last->sve_state = sve_state;
+ last->za_state = za_state;
last->sve_vl = sve_vl;
last->sme_vl = sme_vl;
}
diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c
index 902c598b7ed2..338733ac63f8 100644
--- a/arch/arm64/kvm/fpsimd.c
+++ b/arch/arm64/kvm/fpsimd.c
@@ -110,7 +110,7 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu)
fpsimd_bind_state_to_cpu(&vcpu->arch.ctxt.fp_regs,
vcpu->arch.sve_state,
vcpu->arch.sve_max_vl,
- 0);
+ NULL, 0, &vcpu->arch.svcr);
clear_thread_flag(TIF_FOREIGN_FPSTATE);
update_thread_flag(TIF_SVE, vcpu_has_sve(vcpu));
--
2.30.2
_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm
next prev parent reply other threads:[~2022-02-25 17:35 UTC|newest]
Thread overview: 57+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-02-25 16:58 [PATCH v12 00/40] arm64/sme: Initial support for the Scalable Matrix Extension Mark Brown
2022-02-25 16:58 ` [PATCH v12 01/40] arm64: Define CPACR_EL1_FPEN similarly to other floating point controls Mark Brown
2022-02-25 16:58 ` [PATCH v12 02/40] arm64: Always use individual bits in CPACR floating point enables Mark Brown
2022-02-25 16:58 ` [PATCH v12 03/40] arm64: cpufeature: Always specify and use a field width for capabilities Mark Brown
2022-02-25 16:58 ` [PATCH v12 04/40] kselftest/arm64: Remove local ARRAY_SIZE() definitions Mark Brown
2022-02-25 16:58 ` [PATCH v12 05/40] kselftest/arm64: signal: Allow tests to be incompatible with features Mark Brown
2022-02-25 16:58 ` [PATCH v12 06/40] arm64/sme: Provide ABI documentation for SME Mark Brown
2022-03-02 17:23 ` Catalin Marinas
2022-03-11 17:21 ` Szabolcs Nagy
2022-03-11 18:42 ` Mark Brown
2022-03-31 16:05 ` Szabolcs Nagy
2022-04-06 18:50 ` Mark Brown
2022-04-07 15:26 ` Szabolcs Nagy
2022-06-06 10:35 ` Luis Machado
2022-02-25 16:58 ` [PATCH v12 07/40] arm64/sme: System register and exception syndrome definitions Mark Brown
2022-02-25 16:58 ` [PATCH v12 08/40] arm64/sme: Manually encode SME instructions Mark Brown
2022-03-02 14:35 ` Catalin Marinas
2022-02-25 16:58 ` [PATCH v12 09/40] arm64/sme: Early CPU setup for SME Mark Brown
2022-02-25 16:58 ` [PATCH v12 10/40] arm64/sme: Basic enumeration support Mark Brown
2022-03-02 16:29 ` Catalin Marinas
2022-02-25 16:58 ` [PATCH v12 11/40] arm64/sme: Identify supported SME vector lengths at boot Mark Brown
2022-03-02 16:41 ` Catalin Marinas
2022-03-16 21:32 ` Thiago Jung Bauermann
2022-02-25 16:58 ` [PATCH v12 12/40] arm64/sme: Implement sysctl to set the default vector length Mark Brown
2022-02-25 16:58 ` [PATCH v12 13/40] arm64/sme: Implement vector length configuration prctl()s Mark Brown
2022-02-25 16:58 ` [PATCH v12 14/40] arm64/sme: Implement support for TPIDR2 Mark Brown
2022-02-25 16:58 ` [PATCH v12 15/40] arm64/sme: Implement SVCR context switching Mark Brown
2022-02-25 16:58 ` [PATCH v12 16/40] arm64/sme: Implement streaming SVE " Mark Brown
2022-02-25 16:59 ` Mark Brown [this message]
2022-02-25 16:59 ` [PATCH v12 18/40] arm64/sme: Implement traps and syscall handling for SME Mark Brown
2022-03-02 17:07 ` Catalin Marinas
2022-02-25 16:59 ` [PATCH v12 19/40] arm64/sme: Disable ZA and streaming mode when handling signals Mark Brown
2022-02-25 16:59 ` [PATCH v12 20/40] arm64/sme: Implement streaming SVE signal handling Mark Brown
2022-03-02 17:09 ` Catalin Marinas
2022-03-16 22:38 ` Thiago Jung Bauermann
2022-02-25 16:59 ` [PATCH v12 21/40] arm64/sme: Implement ZA " Mark Brown
2022-02-25 16:59 ` [PATCH v12 22/40] arm64/sme: Implement ptrace support for streaming mode SVE registers Mark Brown
2022-03-02 17:11 ` Catalin Marinas
2022-02-25 16:59 ` [PATCH v12 23/40] arm64/sme: Add ptrace support for ZA Mark Brown
2022-03-15 21:51 ` Thiago Jung Bauermann
2022-02-25 16:59 ` [PATCH v12 24/40] arm64/sme: Disable streaming mode and ZA when flushing CPU state Mark Brown
2022-02-25 16:59 ` [PATCH v12 25/40] arm64/sme: Save and restore streaming mode over EFI runtime calls Mark Brown
2022-02-25 16:59 ` [PATCH v12 26/40] KVM: arm64: Hide SME system registers from guests Mark Brown
2022-02-25 16:59 ` [PATCH v12 27/40] KVM: arm64: Trap SME usage in guest Mark Brown
2022-02-25 16:59 ` [PATCH v12 28/40] KVM: arm64: Handle SME host state when running guests Mark Brown
2022-02-25 16:59 ` [PATCH v12 29/40] arm64/sme: Provide Kconfig for SME Mark Brown
2022-02-25 16:59 ` [PATCH v12 30/40] kselftest/arm64: Add manual encodings for SME instructions Mark Brown
2022-02-25 16:59 ` [PATCH v12 31/40] kselftest/arm64: sme: Add SME support to vlset Mark Brown
2022-02-25 16:59 ` [PATCH v12 32/40] kselftest/arm64: Add tests for TPIDR2 Mark Brown
2022-02-25 16:59 ` [PATCH v12 33/40] kselftest/arm64: Extend vector configuration API tests to cover SME Mark Brown
2022-02-25 16:59 ` [PATCH v12 34/40] kselftest/arm64: sme: Provide streaming mode SVE stress test Mark Brown
2022-02-25 16:59 ` [PATCH v12 35/40] kselftest/arm64: signal: Handle ZA signal context in core code Mark Brown
2022-02-25 16:59 ` [PATCH v12 36/40] kselftest/arm64: Add stress test for SME ZA context switching Mark Brown
2022-02-25 16:59 ` [PATCH v12 37/40] kselftest/arm64: signal: Add SME signal handling tests Mark Brown
2022-02-25 16:59 ` [PATCH v12 38/40] kselftest/arm64: Add streaming SVE to SVE ptrace tests Mark Brown
2022-02-25 16:59 ` [PATCH v12 39/40] kselftest/arm64: Add coverage for the ZA ptrace interface Mark Brown
2022-02-25 16:59 ` [PATCH v12 40/40] kselftest/arm64: Add SME support to syscall ABI test Mark Brown
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220225165923.1474372-18-broonie@kernel.org \
--to=broonie@kernel.org \
--cc=Basant.KumarDwivedi@arm.com \
--cc=Salil.Akerkar@arm.com \
--cc=alan.hayward@arm.com \
--cc=catalin.marinas@arm.com \
--cc=kvmarm@lists.cs.columbia.edu \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-kselftest@vger.kernel.org \
--cc=luis.machado@arm.com \
--cc=maz@kernel.org \
--cc=shuah@kernel.org \
--cc=skhan@linuxfoundation.org \
--cc=szabolcs.nagy@arm.com \
--cc=will@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox