From: Lai Jiangshan <jiangshanlai@gmail.com>
To: linux-kernel@vger.kernel.org
Cc: Lai Jiangshan <jiangshan.ljs@antgroup.com>,
Hou Wenlong <houwenlong.hwl@antgroup.com>,
Linus Torvalds <torvalds@linux-foundation.org>,
Peter Zijlstra <peterz@infradead.org>,
Sean Christopherson <seanjc@google.com>,
Thomas Gleixner <tglx@linutronix.de>,
Borislav Petkov <bp@alien8.de>, Ingo Molnar <mingo@redhat.com>,
kvm@vger.kernel.org, Paolo Bonzini <pbonzini@redhat.com>,
x86@kernel.org, Kees Cook <keescook@chromium.org>,
Juergen Gross <jgross@suse.com>,
Dave Hansen <dave.hansen@linux.intel.com>,
"H. Peter Anvin" <hpa@zytor.com>
Subject: [RFC PATCH 21/73] KVM: x86/PVM: Implement vcpu_run() callbacks
Date: Mon, 26 Feb 2024 22:35:38 +0800 [thread overview]
Message-ID: <20240226143630.33643-22-jiangshanlai@gmail.com> (raw)
In-Reply-To: <20240226143630.33643-1-jiangshanlai@gmail.com>
From: Lai Jiangshan <jiangshan.ljs@antgroup.com>
In the vcpu_run() callback, the hypervisor needs to prepare for VM enter
in the switcher and record exit reasons after VM exit. The guest
registers are prepared on the host SP0 stack, and the guest/host
hardware CR3 is saved in the TSS for the switcher before VM enter.
Additionally, the guest xsave state is loaded into hardware before VM
enter. After VM exit, the guest registers are saved from the entry
stack, and host xsave states are restored.
Signed-off-by: Lai Jiangshan <jiangshan.ljs@antgroup.com>
Signed-off-by: Hou Wenlong <houwenlong.hwl@antgroup.com>
---
arch/x86/kvm/pvm/pvm.c | 163 +++++++++++++++++++++++++++++++++++++++++
arch/x86/kvm/pvm/pvm.h | 5 ++
2 files changed, 168 insertions(+)
diff --git a/arch/x86/kvm/pvm/pvm.c b/arch/x86/kvm/pvm/pvm.c
index 52b3b47ffe42..00a50ed0c118 100644
--- a/arch/x86/kvm/pvm/pvm.c
+++ b/arch/x86/kvm/pvm/pvm.c
@@ -16,8 +16,11 @@
#include <asm/gsseg.h>
#include <asm/io_bitmap.h>
#include <asm/pvm_para.h>
+#include <asm/mmu_context.h>
#include "cpuid.h"
+#include "lapic.h"
+#include "trace.h"
#include "x86.h"
#include "pvm.h"
@@ -204,6 +207,31 @@ static void pvm_switch_to_host(struct vcpu_pvm *pvm)
preempt_enable();
}
+static void pvm_set_host_cr3_for_hypervisor(struct vcpu_pvm *pvm)
+{
+ unsigned long cr3;
+
+ if (static_cpu_has(X86_FEATURE_PCID))
+ cr3 = __get_current_cr3_fast() | X86_CR3_PCID_NOFLUSH;
+ else
+ cr3 = __get_current_cr3_fast();
+ this_cpu_write(cpu_tss_rw.tss_ex.host_cr3, cr3);
+}
+
+// Set tss_ex.host_cr3 for VMExit.
+// Set tss_ex.enter_cr3 for VMEnter.
+static void pvm_set_host_cr3(struct vcpu_pvm *pvm)
+{
+ pvm_set_host_cr3_for_hypervisor(pvm);
+ this_cpu_write(cpu_tss_rw.tss_ex.enter_cr3, pvm->vcpu.arch.mmu->root.hpa);
+}
+
+static void pvm_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa,
+ int root_level)
+{
+ /* Nothing to do. Guest cr3 will be prepared in pvm_set_host_cr3(). */
+}
+
DEFINE_PER_CPU(struct vcpu_pvm *, active_pvm_vcpu);
/*
@@ -262,6 +290,136 @@ static bool cpu_has_pvm_wbinvd_exit(void)
return true;
}
+static int pvm_vcpu_pre_run(struct kvm_vcpu *vcpu)
+{
+ return 1;
+}
+
+// Save guest registers from host sp0 or IST stack.
+static __always_inline void save_regs(struct kvm_vcpu *vcpu, struct pt_regs *guest)
+{
+ struct vcpu_pvm *pvm = to_pvm(vcpu);
+
+ vcpu->arch.regs[VCPU_REGS_RAX] = guest->ax;
+ vcpu->arch.regs[VCPU_REGS_RCX] = guest->cx;
+ vcpu->arch.regs[VCPU_REGS_RDX] = guest->dx;
+ vcpu->arch.regs[VCPU_REGS_RBX] = guest->bx;
+ vcpu->arch.regs[VCPU_REGS_RSP] = guest->sp;
+ vcpu->arch.regs[VCPU_REGS_RBP] = guest->bp;
+ vcpu->arch.regs[VCPU_REGS_RSI] = guest->si;
+ vcpu->arch.regs[VCPU_REGS_RDI] = guest->di;
+ vcpu->arch.regs[VCPU_REGS_R8] = guest->r8;
+ vcpu->arch.regs[VCPU_REGS_R9] = guest->r9;
+ vcpu->arch.regs[VCPU_REGS_R10] = guest->r10;
+ vcpu->arch.regs[VCPU_REGS_R11] = guest->r11;
+ vcpu->arch.regs[VCPU_REGS_R12] = guest->r12;
+ vcpu->arch.regs[VCPU_REGS_R13] = guest->r13;
+ vcpu->arch.regs[VCPU_REGS_R14] = guest->r14;
+ vcpu->arch.regs[VCPU_REGS_R15] = guest->r15;
+ vcpu->arch.regs[VCPU_REGS_RIP] = guest->ip;
+ pvm->rflags = guest->flags;
+ pvm->hw_cs = guest->cs;
+ pvm->hw_ss = guest->ss;
+}
+
+// load guest registers to host sp0 stack.
+static __always_inline void load_regs(struct kvm_vcpu *vcpu, struct pt_regs *guest)
+{
+ struct vcpu_pvm *pvm = to_pvm(vcpu);
+
+ guest->ss = pvm->hw_ss;
+ guest->sp = vcpu->arch.regs[VCPU_REGS_RSP];
+ guest->flags = (pvm->rflags & SWITCH_ENTER_EFLAGS_ALLOWED) | SWITCH_ENTER_EFLAGS_FIXED;
+ guest->cs = pvm->hw_cs;
+ guest->ip = vcpu->arch.regs[VCPU_REGS_RIP];
+ guest->orig_ax = -1;
+ guest->di = vcpu->arch.regs[VCPU_REGS_RDI];
+ guest->si = vcpu->arch.regs[VCPU_REGS_RSI];
+ guest->dx = vcpu->arch.regs[VCPU_REGS_RDX];
+ guest->cx = vcpu->arch.regs[VCPU_REGS_RCX];
+ guest->ax = vcpu->arch.regs[VCPU_REGS_RAX];
+ guest->r8 = vcpu->arch.regs[VCPU_REGS_R8];
+ guest->r9 = vcpu->arch.regs[VCPU_REGS_R9];
+ guest->r10 = vcpu->arch.regs[VCPU_REGS_R10];
+ guest->r11 = vcpu->arch.regs[VCPU_REGS_R11];
+ guest->bx = vcpu->arch.regs[VCPU_REGS_RBX];
+ guest->bp = vcpu->arch.regs[VCPU_REGS_RBP];
+ guest->r12 = vcpu->arch.regs[VCPU_REGS_R12];
+ guest->r13 = vcpu->arch.regs[VCPU_REGS_R13];
+ guest->r14 = vcpu->arch.regs[VCPU_REGS_R14];
+ guest->r15 = vcpu->arch.regs[VCPU_REGS_R15];
+}
+
+static noinstr void pvm_vcpu_run_noinstr(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_pvm *pvm = to_pvm(vcpu);
+ struct pt_regs *sp0_regs = (struct pt_regs *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
+ struct pt_regs *ret_regs;
+
+ guest_state_enter_irqoff();
+
+ // Load guest registers into the host sp0 stack for switcher.
+ load_regs(vcpu, sp0_regs);
+
+ // Call into switcher and enter guest.
+ ret_regs = switcher_enter_guest();
+
+ // Get the guest registers from the host sp0 stack.
+ save_regs(vcpu, ret_regs);
+ pvm->exit_vector = (ret_regs->orig_ax >> 32);
+ pvm->exit_error_code = (u32)ret_regs->orig_ax;
+
+ guest_state_exit_irqoff();
+}
+
+/*
+ * PVM wrappers for kvm_load_{guest|host}_xsave_state().
+ *
+ * Currently PKU is disabled for shadowpaging and to avoid overhead,
+ * host CR4.PKE is unchanged for entering/exiting guest even when
+ * host CR4.PKE is enabled.
+ *
+ * These wrappers fix pkru when host CR4.PKE is enabled.
+ */
+static inline void pvm_load_guest_xsave_state(struct kvm_vcpu *vcpu)
+{
+ kvm_load_guest_xsave_state(vcpu);
+
+ if (cpu_feature_enabled(X86_FEATURE_PKU)) {
+ if (vcpu->arch.host_pkru)
+ write_pkru(0);
+ }
+}
+
+static inline void pvm_load_host_xsave_state(struct kvm_vcpu *vcpu)
+{
+ kvm_load_host_xsave_state(vcpu);
+
+ if (cpu_feature_enabled(X86_FEATURE_PKU)) {
+ if (rdpkru() != vcpu->arch.host_pkru)
+ write_pkru(vcpu->arch.host_pkru);
+ }
+}
+
+static fastpath_t pvm_vcpu_run(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_pvm *pvm = to_pvm(vcpu);
+
+ trace_kvm_entry(vcpu);
+
+ pvm_load_guest_xsave_state(vcpu);
+
+ kvm_wait_lapic_expire(vcpu);
+
+ pvm_set_host_cr3(pvm);
+
+ pvm_vcpu_run_noinstr(vcpu);
+
+ pvm_load_host_xsave_state(vcpu);
+
+ return EXIT_FASTPATH_NONE;
+}
+
static void reset_segment(struct kvm_segment *var, int seg)
{
memset(var, 0, sizeof(*var));
@@ -520,6 +678,11 @@ static struct kvm_x86_ops pvm_x86_ops __initdata = {
.vcpu_load = pvm_vcpu_load,
.vcpu_put = pvm_vcpu_put,
+ .load_mmu_pgd = pvm_load_mmu_pgd,
+
+ .vcpu_pre_run = pvm_vcpu_pre_run,
+ .vcpu_run = pvm_vcpu_run,
+
.vcpu_after_set_cpuid = pvm_vcpu_after_set_cpuid,
.sched_in = pvm_sched_in,
diff --git a/arch/x86/kvm/pvm/pvm.h b/arch/x86/kvm/pvm/pvm.h
index 6584314487bc..349f4eac98ec 100644
--- a/arch/x86/kvm/pvm/pvm.h
+++ b/arch/x86/kvm/pvm/pvm.h
@@ -28,10 +28,15 @@ int host_mmu_init(void);
struct vcpu_pvm {
struct kvm_vcpu vcpu;
+ // guest rflags, turned into hw rflags when in switcher
+ unsigned long rflags;
+
unsigned long switch_flags;
u16 host_ds_sel, host_es_sel;
+ u32 exit_vector;
+ u32 exit_error_code;
u32 hw_cs, hw_ss;
int loaded_cpu_state;
--
2.19.1.6.gb485710b
next prev parent reply other threads:[~2024-02-26 14:35 UTC|newest]
Thread overview: 82+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-02-26 14:35 [RFC PATCH 00/73] KVM: x86/PVM: Introduce a new hypervisor Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 01/73] KVM: Documentation: Add the specification for PVM Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 02/73] x86/ABI/PVM: Add PVM-specific ABI header file Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 03/73] x86/entry: Implement switcher for PVM VM enter/exit Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 04/73] x86/entry: Implement direct switching for the switcher Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 05/73] KVM: x86: Set 'vcpu->arch.exception.injected' as true before vendor callback Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 06/73] KVM: x86: Move VMX interrupt/nmi handling into kvm.ko Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 07/73] KVM: x86/mmu: Adapt shadow MMU for PVM Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 08/73] KVM: x86: Allow hypercall handling to not skip the instruction Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 09/73] KVM: x86: Add PVM virtual MSRs into emulated_msrs_all[] Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 10/73] KVM: x86: Introduce vendor feature to expose vendor-specific CPUID Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 11/73] KVM: x86: Implement gpc refresh for guest usage Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 12/73] KVM: x86: Add NR_VCPU_SREG in SREG enum Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 13/73] KVM: x86/emulator: Reinject #GP if instruction emulation failed for PVM Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 14/73] KVM: x86: Create stubs for PVM module as a new vendor Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 15/73] mm/vmalloc: Add a helper to reserve a contiguous and aligned kernel virtual area Lai Jiangshan
2024-02-27 14:56 ` Christoph Hellwig
2024-02-27 17:07 ` Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 16/73] KVM: x86/PVM: Implement host mmu initialization Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 17/73] KVM: x86/PVM: Implement module initialization related callbacks Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 18/73] KVM: x86/PVM: Implement VM/VCPU " Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 19/73] x86/entry: Export 32-bit ignore syscall entry and __ia32_enabled variable Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 20/73] KVM: x86/PVM: Implement vcpu_load()/vcpu_put() related callbacks Lai Jiangshan
2024-02-26 14:35 ` Lai Jiangshan [this message]
2024-02-26 14:35 ` [RFC PATCH 22/73] KVM: x86/PVM: Handle some VM exits before enable interrupts Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 23/73] KVM: x86/PVM: Handle event handling related MSR read/write operation Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 24/73] KVM: x86/PVM: Introduce PVM mode switching Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 25/73] KVM: x86/PVM: Implement APIC emulation related callbacks Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 26/73] KVM: x86/PVM: Implement event delivery flags " Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 27/73] KVM: x86/PVM: Implement event injection " Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 28/73] KVM: x86/PVM: Handle syscall from user mode Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 29/73] KVM: x86/PVM: Implement allowed range checking for #PF Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 30/73] KVM: x86/PVM: Implement segment related callbacks Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 31/73] KVM: x86/PVM: Implement instruction emulation for #UD and #GP Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 32/73] KVM: x86/PVM: Enable guest debugging functions Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 33/73] KVM: x86/PVM: Handle VM-exit due to hardware exceptions Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 34/73] KVM: x86/PVM: Handle ERETU/ERETS synthetic instruction Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 35/73] KVM: x86/PVM: Handle PVM_SYNTHETIC_CPUID " Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 36/73] KVM: x86/PVM: Handle KVM hypercall Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 37/73] KVM: x86/PVM: Use host PCID to reduce guest TLB flushing Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 38/73] KVM: x86/PVM: Handle hypercalls for privilege instruction emulation Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 39/73] KVM: x86/PVM: Handle hypercall for CR3 switching Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 40/73] KVM: x86/PVM: Handle hypercall for loading GS selector Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 41/73] KVM: x86/PVM: Allow to load guest TLS in host GDT Lai Jiangshan
2024-02-26 14:35 ` [RFC PATCH 42/73] KVM: x86/PVM: Support for kvm_exit() tracepoint Lai Jiangshan
2024-02-26 14:36 ` [RFC PATCH 43/73] KVM: x86/PVM: Enable direct switching Lai Jiangshan
2024-02-26 14:36 ` [RFC PATCH 44/73] KVM: x86/PVM: Implement TSC related callbacks Lai Jiangshan
2024-02-26 14:36 ` [RFC PATCH 45/73] KVM: x86/PVM: Add dummy PMU " Lai Jiangshan
2024-02-26 14:36 ` [RFC PATCH 46/73] KVM: x86/PVM: Support for CPUID faulting Lai Jiangshan
2024-02-26 14:36 ` [RFC PATCH 47/73] KVM: x86/PVM: Handle the left supported MSRs in msrs_to_save_base[] Lai Jiangshan
2024-02-26 14:36 ` [RFC PATCH 48/73] KVM: x86/PVM: Implement system registers setting callbacks Lai Jiangshan
2024-02-26 14:36 ` [RFC PATCH 49/73] KVM: x86/PVM: Implement emulation for non-PVM mode Lai Jiangshan
2024-02-26 14:36 ` [RFC PATCH 50/73] x86/tools/relocs: Cleanup cmdline options Lai Jiangshan
2024-02-26 14:36 ` [RFC PATCH 51/73] x86/tools/relocs: Append relocations into input file Lai Jiangshan
2024-02-26 14:36 ` [RFC PATCH 52/73] x86/boot: Allow to do relocation for uncompressed kernel Lai Jiangshan
2024-02-26 14:36 ` [RFC PATCH 53/73] x86/pvm: Add Kconfig option and the CPU feature bit for PVM guest Lai Jiangshan
2024-02-26 14:36 ` [RFC PATCH 54/73] x86/pvm: Detect PVM hypervisor support Lai Jiangshan
2024-02-26 14:36 ` [RFC PATCH 55/73] x86/pvm: Relocate kernel image to specific virtual address range Lai Jiangshan
2024-02-26 14:36 ` [RFC PATCH 56/73] x86/pvm: Relocate kernel image early in PVH entry Lai Jiangshan
2024-02-26 14:36 ` [RFC PATCH 57/73] x86/pvm: Make cpu entry area and vmalloc area variable Lai Jiangshan
2024-02-26 14:36 ` [RFC PATCH 58/73] x86/pvm: Relocate kernel address space layout Lai Jiangshan
2024-02-26 14:36 ` [RFC PATCH 59/73] x86/pti: Force enabling KPTI for PVM guest Lai Jiangshan
2024-02-26 14:36 ` [RFC PATCH 60/73] x86/pvm: Add event entry/exit and dispatch code Lai Jiangshan
2024-02-26 14:36 ` [RFC PATCH 61/73] x86/pvm: Allow to install a system interrupt handler Lai Jiangshan
2024-02-26 14:36 ` [RFC PATCH 62/73] x86/pvm: Add early kernel event entry and dispatch code Lai Jiangshan
2024-02-26 14:36 ` [RFC PATCH 63/73] x86/pvm: Add hypercall support Lai Jiangshan
2024-02-26 14:36 ` [RFC PATCH 64/73] x86/pvm: Enable PVM event delivery Lai Jiangshan
2024-02-26 14:36 ` [RFC PATCH 65/73] x86/kvm: Patch KVM hypercall as PVM hypercall Lai Jiangshan
2024-02-26 14:36 ` [RFC PATCH 66/73] x86/pvm: Use new cpu feature to describe XENPV and PVM Lai Jiangshan
2024-02-26 14:36 ` [RFC PATCH 67/73] x86/pvm: Implement cpu related PVOPS Lai Jiangshan
2024-02-26 14:36 ` [RFC PATCH 68/73] x86/pvm: Implement irq " Lai Jiangshan
2024-02-26 14:36 ` [RFC PATCH 69/73] x86/pvm: Implement mmu " Lai Jiangshan
2024-02-26 14:36 ` [RFC PATCH 70/73] x86/pvm: Don't use SWAPGS for gsbase read/write Lai Jiangshan
2024-02-26 14:36 ` [RFC PATCH 71/73] x86/pvm: Adapt pushf/popf in this_cpu_cmpxchg16b_emu() Lai Jiangshan
2024-02-26 14:36 ` [RFC PATCH 72/73] x86/pvm: Use RDTSCP as default in vdso_read_cpunode() Lai Jiangshan
2024-02-26 14:36 ` [RFC PATCH 73/73] x86/pvm: Disable some unsupported syscalls and features Lai Jiangshan
2024-02-26 14:49 ` [RFC PATCH 00/73] KVM: x86/PVM: Introduce a new hypervisor Paolo Bonzini
2024-02-27 17:27 ` Sean Christopherson
2024-02-29 9:33 ` David Woodhouse
2024-03-01 14:00 ` Lai Jiangshan
2024-02-29 14:55 ` Lai Jiangshan
2024-03-06 11:05 ` Like Xu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240226143630.33643-22-jiangshanlai@gmail.com \
--to=jiangshanlai@gmail.com \
--cc=bp@alien8.de \
--cc=dave.hansen@linux.intel.com \
--cc=houwenlong.hwl@antgroup.com \
--cc=hpa@zytor.com \
--cc=jgross@suse.com \
--cc=jiangshan.ljs@antgroup.com \
--cc=keescook@chromium.org \
--cc=kvm@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=mingo@redhat.com \
--cc=pbonzini@redhat.com \
--cc=peterz@infradead.org \
--cc=seanjc@google.com \
--cc=tglx@linutronix.de \
--cc=torvalds@linux-foundation.org \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox