From: Andrew Scull <ascull@google.com>
To: kvmarm@lists.cs.columbia.edu
Cc: maz@kernel.org, kernel-team@android.com
Subject: [PATCH 08/37] KVM: arm64: nVHE: Introduce a hyp run loop for the host
Date: Wed, 15 Jul 2020 19:44:09 +0100 [thread overview]
Message-ID: <20200715184438.1390996-9-ascull@google.com> (raw)
In-Reply-To: <20200715184438.1390996-1-ascull@google.com>
After installing the page tables and exception vector, the call to
__do_hyp_init no longer directly returns to the host with an eret but,
instead, begins to treat the host as a vCPU and repeatedly __guest_enters
into it.
As a result, hyp is endowed with its very own context for the general
purpose registers. However, at this point in time, the state is stored
in a confusing way:
- hyp gp_regs and ptrauth are stored in the kvm_host_data context
- host gp_regs and ptrauth are stored in kvm_host_vcpu
- other host sysregs are store in the kvm_host_data context
This is the initial step in the migration but all the host registers
will need to be moved into kvm_host_vcpu for the migration to be
complete.
Signed-off-by: Andrew Scull <ascull@google.com>
---
arch/arm64/include/asm/kvm_host.h | 5 ++
arch/arm64/include/asm/kvm_hyp.h | 3 +
arch/arm64/kernel/image-vars.h | 1 +
arch/arm64/kvm/arm.c | 10 +++
arch/arm64/kvm/hyp/entry.S | 4 +-
arch/arm64/kvm/hyp/hyp-entry.S | 29 +-------
arch/arm64/kvm/hyp/include/hyp/switch.h | 4 +-
arch/arm64/kvm/hyp/nvhe/Makefile | 2 +-
arch/arm64/kvm/hyp/nvhe/hyp-main.c | 90 +++++++++++++++++++++++++
arch/arm64/kvm/hyp/nvhe/hyp-start.S | 39 ++++++++++-
10 files changed, 154 insertions(+), 33 deletions(-)
create mode 100644 arch/arm64/kvm/hyp/nvhe/hyp-main.c
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 67a760d08b6e..183312340d2c 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -413,6 +413,11 @@ struct kvm_vcpu_arch {
#define KVM_ARM64_VCPU_SVE_FINALIZED (1 << 6) /* SVE config completed */
#define KVM_ARM64_GUEST_HAS_PTRAUTH (1 << 7) /* PTRAUTH exposed to guest */
+#define KVM_ARM64_HOST_VCPU_FLAGS KVM_ARM64_DEBUG_DIRTY \
+ | KVM_ARM64_GUEST_HAS_SVE \
+ | KVM_ARM64_VCPU_SVE_FINALIZED \
+ | KVM_ARM64_GUEST_HAS_PTRAUTH
+
#define vcpu_has_sve(vcpu) (system_supports_sve() && \
((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_SVE))
diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
index 50a774812761..d6915ab60e1f 100644
--- a/arch/arm64/include/asm/kvm_hyp.h
+++ b/arch/arm64/include/asm/kvm_hyp.h
@@ -13,6 +13,9 @@
#include <asm/sysreg.h>
DECLARE_PER_CPU(struct kvm_vcpu *, kvm_hyp_running_vcpu);
+#ifdef __KVM_NVHE_HYPERVISOR__
+DECLARE_PER_CPU(struct kvm_vcpu, kvm_host_vcpu);
+#endif
#define read_sysreg_elx(r,nvh,vh) \
({ \
diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h
index dfe0f37567f3..5b93da2359d4 100644
--- a/arch/arm64/kernel/image-vars.h
+++ b/arch/arm64/kernel/image-vars.h
@@ -71,6 +71,7 @@ KVM_NVHE_ALIAS(kvm_update_va_mask);
/* Global kernel state accessed by nVHE hyp code. */
KVM_NVHE_ALIAS(arm64_ssbd_callback_required);
KVM_NVHE_ALIAS(kvm_host_data);
+KVM_NVHE_ALIAS(kvm_host_vcpu);
KVM_NVHE_ALIAS(kvm_hyp_running_vcpu);
KVM_NVHE_ALIAS(kvm_vgic_global_state);
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 52be6149fcbf..8bd4630666ca 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -47,6 +47,7 @@ __asm__(".arch_extension virt");
#endif
DEFINE_PER_CPU(kvm_host_data_t, kvm_host_data);
+DEFINE_PER_CPU(struct kvm_vcpu, kvm_host_vcpu);
DEFINE_PER_CPU(struct kvm_vcpu *, kvm_hyp_running_vcpu);
static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
@@ -1544,6 +1545,7 @@ static int init_hyp_mode(void)
for_each_possible_cpu(cpu) {
kvm_host_data_t *cpu_data;
+ struct kvm_vcpu *host_vcpu;
struct kvm_vcpu **running_vcpu;
cpu_data = per_cpu_ptr(&kvm_host_data, cpu);
@@ -1554,6 +1556,14 @@ static int init_hyp_mode(void)
goto out_err;
}
+ host_vcpu = per_cpu_ptr(&kvm_host_vcpu, cpu);
+ err = create_hyp_mappings(host_vcpu, host_vcpu + 1, PAGE_HYP);
+
+ if (err) {
+ kvm_err("Cannot map host vCPU: %d\n", err);
+ goto out_err;
+ }
+
running_vcpu = per_cpu_ptr(&kvm_hyp_running_vcpu, cpu);
err = create_hyp_mappings(running_vcpu, running_vcpu + 1, PAGE_HYP);
diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S
index dc4e3e7e7407..da349c152791 100644
--- a/arch/arm64/kvm/hyp/entry.S
+++ b/arch/arm64/kvm/hyp/entry.S
@@ -72,8 +72,8 @@ SYM_FUNC_START(__guest_enter)
// Save the host's sp_el0
save_sp_el0 x1, x2
- // Now the host state is stored if we have a pending RAS SError it must
- // affect the host. If physical IRQ interrupts are going to be trapped
+ // Now the hyp state is stored if we have a pending RAS SError it must
+ // affect the hyp. If physical IRQ interrupts are going to be trapped
// and there are already asynchronous exceptions pending then we defer
// the entry. The DSB isn't necessary before v8.2 as any SError would
// be fatal.
diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
index c441aabb8ab0..a45459d1c135 100644
--- a/arch/arm64/kvm/hyp/hyp-entry.S
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -17,20 +17,6 @@
.text
-.macro do_el2_call
- /*
- * Shuffle the parameters before calling the function
- * pointed to in x0. Assumes parameters in x[1,2,3].
- */
- str lr, [sp, #-16]!
- mov lr, x0
- mov x0, x1
- mov x1, x2
- mov x2, x3
- blr lr
- ldr lr, [sp], #16
-.endm
-
el1_sync: // Guest trapped into EL2
mrs x0, esr_el2
@@ -44,11 +30,12 @@ el1_sync: // Guest trapped into EL2
cbnz x1, el1_hvc_guest // called HVC
/* Here, we're pretty sure the host called HVC. */
- ldp x0, x1, [sp], #16
+ ldp x0, x1, [sp]
/* Check for a stub HVC call */
cmp x0, #HVC_STUB_HCALL_NR
- b.hs 1f
+ b.hs el1_trap
+ add sp, sp, #16
/*
* Compute the idmap address of __kvm_handle_stub_hvc and
@@ -64,16 +51,6 @@ el1_sync: // Guest trapped into EL2
/* x5 = __pa(x5) */
sub x5, x5, x6
br x5
-
-1:
- /*
- * Perform the EL2 call
- */
- kern_hyp_va x0
- do_el2_call
-
- eret
- sb
#endif /* __KVM_NVHE_HYPERVISOR__ */
el1_hvc_guest:
diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
index 14a774d1a35a..248f434c5de6 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -405,8 +405,8 @@ static inline bool __hyp_handle_ptrauth(struct kvm_vcpu *vcpu)
*/
static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
{
- /* Flush guest SErrors. */
- if (ARM_SERROR_PENDING(*exit_code))
+ /* Flush guest SErrors but leave them pending for the host. */
+ if (ARM_SERROR_PENDING(*exit_code) && !vcpu->arch.ctxt.is_host)
__vaxorcize_serror();
if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ)
diff --git a/arch/arm64/kvm/hyp/nvhe/Makefile b/arch/arm64/kvm/hyp/nvhe/Makefile
index 1f3a39efaa6e..d60cf9434895 100644
--- a/arch/arm64/kvm/hyp/nvhe/Makefile
+++ b/arch/arm64/kvm/hyp/nvhe/Makefile
@@ -7,7 +7,7 @@ asflags-y := -D__KVM_NVHE_HYPERVISOR__
ccflags-y := -D__KVM_NVHE_HYPERVISOR__
obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o \
- hyp-start.o
+ hyp-start.o hyp-main.o
obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \
../fpsimd.o ../hyp-entry.o
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
new file mode 100644
index 000000000000..9b58d58d6cfa
--- /dev/null
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 - Google Inc
+ * Author: Andrew Scull <ascull@google.com>
+ */
+
+#include <hyp/switch.h>
+
+#include <asm/kvm_asm.h>
+#include <asm/kvm_emulate.h>
+#include <asm/kvm_hyp.h>
+#include <asm/kvm_mmu.h>
+
+typedef unsigned long (*hypcall_fn_t)
+ (unsigned long, unsigned long, unsigned long);
+
+static void handle_trap(struct kvm_vcpu *host_vcpu) {
+ if (kvm_vcpu_trap_get_class(host_vcpu) == ESR_ELx_EC_HVC64) {
+ hypcall_fn_t func;
+ unsigned long ret;
+
+ /*
+ * __kvm_call_hyp takes a pointer in the host address space and
+ * up to three arguments.
+ */
+ func = (hypcall_fn_t)kern_hyp_va(vcpu_get_reg(host_vcpu, 0));
+ ret = func(vcpu_get_reg(host_vcpu, 1),
+ vcpu_get_reg(host_vcpu, 2),
+ vcpu_get_reg(host_vcpu, 3));
+ vcpu_set_reg(host_vcpu, 0, ret);
+ }
+
+ /* Other traps are ignored. */
+}
+
+void __noreturn kvm_hyp_main(void)
+{
+ /* Set tpidr_el2 for use by HYP */
+ struct kvm_vcpu *host_vcpu;
+ struct kvm_cpu_context *hyp_ctxt;
+
+ host_vcpu = __hyp_this_cpu_ptr(kvm_host_vcpu);
+ hyp_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
+
+ kvm_init_host_cpu_context(&host_vcpu->arch.ctxt);
+
+ host_vcpu->arch.flags = KVM_ARM64_HOST_VCPU_FLAGS;
+ host_vcpu->arch.workaround_flags = VCPU_WORKAROUND_2_FLAG;
+
+ while (true) {
+ u64 exit_code;
+
+ /*
+ * Set the running cpu for the vectors to pass to __guest_exit
+ * so it can get the cpu context.
+ */
+ *__hyp_this_cpu_ptr(kvm_hyp_running_vcpu) = host_vcpu;
+
+ /*
+ * Enter the host now that we feel like we're in charge.
+ *
+ * This should merge with __kvm_vcpu_run as host becomes more
+ * vcpu-like.
+ */
+ do {
+ exit_code = __guest_enter(host_vcpu, hyp_ctxt);
+ } while (fixup_guest_exit(host_vcpu, &exit_code));
+
+ switch (ARM_EXCEPTION_CODE(exit_code)) {
+ case ARM_EXCEPTION_TRAP:
+ handle_trap(host_vcpu);
+ break;
+ case ARM_EXCEPTION_IRQ:
+ case ARM_EXCEPTION_EL1_SERROR:
+ case ARM_EXCEPTION_IL:
+ default:
+ /*
+ * These cases are not expected to be observed for the
+ * host so, in the event that they are seen, take a
+ * best-effort approach to keep things going.
+ *
+ * Ok, our expended effort comes to a grand total of
+ * diddly squat but the internet protocol has gotten
+ * away with the "best-effort" euphemism so we can too.
+ */
+ break;
+ }
+
+ }
+}
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-start.S b/arch/arm64/kvm/hyp/nvhe/hyp-start.S
index 5f7fbcb57fd5..dd955e022963 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-start.S
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-start.S
@@ -6,11 +6,46 @@
#include <linux/linkage.h>
+#include <asm/alternative.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
+#include <asm/kvm_arm.h>
#include <asm/kvm_asm.h>
+#include <asm/kvm_ptrauth.h>
+
+#define CPU_LR_OFFSET (CPU_USER_PT_REGS + (8 * 30))
+
+/*
+ * Initialize ptrauth in the hyp ctxt by populating it with the keys of the
+ * host, which are the keys currently installed.
+ */
+.macro ptrauth_hyp_ctxt_init hyp_ctxt, reg1, reg2, reg3
+#ifdef CONFIG_ARM64_PTR_AUTH
+alternative_if_not ARM64_HAS_ADDRESS_AUTH
+ b .L__skip_switch\@
+alternative_else_nop_endif
+ add \reg1, \hyp_ctxt, #CPU_APIAKEYLO_EL1
+ ptrauth_save_state \reg1, \reg2, \reg3
+.L__skip_switch\@:
+#endif
+.endm
SYM_CODE_START(__kvm_hyp_start)
- /* Hello, World! */
- eret
+ get_host_ctxt x0, x1
+
+ ptrauth_hyp_ctxt_init x0, x1, x2, x3
+
+ /* Prepare a tail call from __guest_exit to kvm_hyp_main */
+ adr x1, kvm_hyp_main
+ str x1, [x0, #CPU_LR_OFFSET]
+
+ /*
+ * The host's x0 and x1 are expected on the stack but they will be
+ * clobbered so there's no need to load real values.
+ */
+ sub sp, sp, 16
+
+ hyp_adr_this_cpu x1, kvm_host_vcpu, x0
+ mov x0, #ARM_EXCEPTION_TRAP
+ b __guest_exit
SYM_CODE_END(__kvm_hyp_start)
--
2.27.0.389.gc38d7665816-goog
_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm
next prev parent reply other threads:[~2020-07-15 18:45 UTC|newest]
Thread overview: 45+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-07-15 18:44 [PATCH 00/37] Transform the host into a vCPU Andrew Scull
2020-07-15 18:44 ` [PATCH 01/37] smccc: Make constants available to assembly Andrew Scull
2020-07-15 18:44 ` [PATCH 02/37] KVM: arm64: Move clearing of vcpu debug dirty bit Andrew Scull
2020-07-15 18:44 ` [PATCH 03/37] KVM: arm64: Track running vCPU outside of the CPU context Andrew Scull
2020-07-15 18:44 ` [PATCH 04/37] KVM: arm64: nVHE: Pass pointers consistently to hyp-init Andrew Scull
2020-07-15 18:44 ` [PATCH 05/37] KVM: arm64: nVHE: Break out of the hyp-init idmap Andrew Scull
2020-07-15 18:44 ` [PATCH 06/37] KVM: arm64: Only check pending interrupts if it would trap Andrew Scull
2020-07-17 16:21 ` Marc Zyngier
2020-07-15 18:44 ` [PATCH 07/37] KVM: arm64: Separate SError detection from VAXorcism Andrew Scull
2020-07-18 9:00 ` Marc Zyngier
2020-07-20 14:13 ` Andrew Scull
2020-07-20 14:56 ` Marc Zyngier
2020-07-23 0:59 ` FW: " Renters Cancellation Requests
2020-07-20 15:40 ` Andrew Scull
2020-07-20 15:57 ` Marc Zyngier
2020-07-15 18:44 ` Andrew Scull [this message]
2020-07-15 18:44 ` [PATCH 09/37] smccc: Cast arguments to unsigned long Andrew Scull
2020-07-15 18:44 ` [PATCH 10/37] KVM: arm64: nVHE: Migrate hyp interface to SMCCC Andrew Scull
2020-07-15 18:44 ` [PATCH 11/37] KVM: arm64: nVHE: Migrate hyp-init " Andrew Scull
2020-07-15 18:44 ` [PATCH 12/37] KVM: arm64: nVHE: Fix pointers during SMCCC convertion Andrew Scull
2020-07-15 18:44 ` [PATCH 13/37] KVM: arm64: Rename workaround 2 helpers Andrew Scull
2020-07-15 18:44 ` [PATCH 14/37] KVM: arm64: nVHE: Use __kvm_vcpu_run for the host vcpu Andrew Scull
2020-07-15 18:44 ` [PATCH 15/37] KVM: arm64: Share some context save and restore macros Andrew Scull
2020-07-15 18:44 ` [PATCH 16/37] KVM: arm64: nVHE: Handle stub HVCs in the host loop Andrew Scull
2020-07-15 18:44 ` [PATCH 17/37] KVM: arm64: nVHE: Store host sysregs in host vcpu Andrew Scull
2020-07-15 18:44 ` [PATCH 18/37] KVM: arm64: nVHE: Access pmu_events directly in kvm_host_data Andrew Scull
2020-07-15 18:44 ` [PATCH 19/37] KVM: arm64: nVHE: Drop host_ctxt argument for context switching Andrew Scull
2020-07-15 18:44 ` [PATCH 20/37] KVM: arm64: nVHE: Use host vcpu context for host debug state Andrew Scull
2020-07-15 18:44 ` [PATCH 21/37] KVM: arm64: Move host debug state from vcpu to percpu Andrew Scull
2020-07-15 18:44 ` [PATCH 22/37] KVM: arm64: nVHE: Store host's mdcr_el2 and hcr_el2 in its vcpu Andrew Scull
2020-07-15 18:44 ` [PATCH 23/37] KVM: arm64: Skip __hyp_panic and go direct to hyp_panic Andrew Scull
2020-07-15 18:44 ` [PATCH 24/37] KVM: arm64: Break apart kvm_host_data Andrew Scull
2020-07-15 18:44 ` [PATCH 25/37] KVM: arm64: nVHE: Unify sysreg state saving paths Andrew Scull
2020-07-15 18:44 ` [PATCH 26/37] KVM: arm64: nVHE: Unify 32-bit sysreg " Andrew Scull
2020-07-15 18:44 ` [PATCH 27/37] KVM: arm64: nVHE: Unify vgic save and restore Andrew Scull
2020-07-15 18:44 ` [PATCH 28/37] KVM: arm64: nVHE: Unify fpexc32 saving paths Andrew Scull
2020-07-15 18:44 ` [PATCH 29/37] KVM: arm64: nVHE: Separate the save and restore of debug state Andrew Scull
2020-07-15 18:44 ` [PATCH 30/37] KVM: arm64: nVHE: Remove MMU assumption in speculative AT workaround Andrew Scull
2020-07-15 18:44 ` [PATCH 31/37] KVM: arm64: Move speculative AT ISBs into context Andrew Scull
2020-07-15 18:44 ` [PATCH 32/37] KVM: arm64: nVHE: Unify sysreg state restoration paths Andrew Scull
2020-07-15 18:44 ` [PATCH 33/37] KVM: arm64: Remove __activate_vm wrapper Andrew Scull
2020-07-15 18:44 ` [PATCH 34/37] KVM: arm64: nVHE: Unify timer restore paths Andrew Scull
2020-07-15 18:44 ` [PATCH 35/37] KVM: arm64: nVHE: Unify PMU event restoration paths Andrew Scull
2020-07-15 18:44 ` [PATCH 36/37] KVM: arm64: nVHE: Unify GIC PMR " Andrew Scull
2020-07-15 18:44 ` [PATCH 37/37] KVM: arm64: Separate save and restore of vcpu trap state Andrew Scull
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200715184438.1390996-9-ascull@google.com \
--to=ascull@google.com \
--cc=kernel-team@android.com \
--cc=kvmarm@lists.cs.columbia.edu \
--cc=maz@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox