From: Ladi Prosek <lprosek@redhat.com>
To: kvm@vger.kernel.org
Cc: rkrcmar@redhat.com, pbonzini@redhat.com
Subject: [PATCH v3 1/6] KVM: x86: introduce ISA specific SMM entry/exit callbacks
Date: Mon, 25 Sep 2017 10:08:59 +0200 [thread overview]
Message-ID: <20170925080904.24850-2-lprosek@redhat.com> (raw)
In-Reply-To: <20170925080904.24850-1-lprosek@redhat.com>
Entering and exiting SMM may require ISA specific handling under certain
circumstances. This commit adds two new callbacks with empty implementations.
Actual functionality will be added in following commits.
* prep_enter_smm() is to be called when injecting an SMM, before any
SMM related vcpu state has been changed
* post_leave_smm() is to be called when emulating the RSM instruction,
after all SMM related vcpu state has been restored
Signed-off-by: Ladi Prosek <lprosek@redhat.com>
---
arch/x86/include/asm/kvm_emulate.h | 2 ++
arch/x86/include/asm/kvm_host.h | 3 +++
arch/x86/kvm/svm.c | 15 +++++++++++++++
arch/x86/kvm/vmx.c | 15 +++++++++++++++
arch/x86/kvm/x86.c | 13 ++++++++++++-
5 files changed, 47 insertions(+), 1 deletion(-)
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index fa2558e12024..99f83367b92c 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -289,6 +289,7 @@ struct x86_emulate_ctxt {
/* Register state before/after emulation. */
unsigned long eflags;
unsigned long eip; /* eip before instruction emulation */
+ u64 smbase; /* smbase before instruction emulation */
/* Emulated execution mode, represented by an X86EMUL_MODE value. */
enum x86emul_mode mode;
@@ -298,6 +299,7 @@ struct x86_emulate_ctxt {
bool perm_ok; /* do not check permissions if true */
bool ud; /* inject an #UD if host doesn't support insn */
bool tf; /* TF value before instruction (after for syscall/sysret) */
+ bool left_smm; /* post_leave_smm() needs to be called after emulation */
bool have_exception;
struct x86_exception exception;
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index c73e493adf07..596f2e826327 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1061,6 +1061,9 @@ struct kvm_x86_ops {
void (*cancel_hv_timer)(struct kvm_vcpu *vcpu);
void (*setup_mce)(struct kvm_vcpu *vcpu);
+
+ int (*prep_enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
+ int (*post_leave_smm)(struct kvm_vcpu *vcpu, u64 smbase);
};
struct kvm_arch_async_pf {
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 0e68f0b3cbf7..cdbbf9537111 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -5393,6 +5393,18 @@ static void svm_setup_mce(struct kvm_vcpu *vcpu)
vcpu->arch.mcg_cap &= 0x1ff;
}
+static int svm_prep_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
+{
+ /* TODO: Implement */
+ return 0;
+}
+
+static int svm_post_leave_smm(struct kvm_vcpu *vcpu, u64 smbase)
+{
+ /* TODO: Implement */
+ return 0;
+}
+
static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.cpu_has_kvm_support = has_svm,
.disabled_by_bios = is_disabled,
@@ -5503,6 +5515,9 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.deliver_posted_interrupt = svm_deliver_avic_intr,
.update_pi_irte = svm_update_pi_irte,
.setup_mce = svm_setup_mce,
+
+ .prep_enter_smm = svm_prep_enter_smm,
+ .post_leave_smm = svm_post_leave_smm,
};
static int __init svm_init(void)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index c83d28b0ab05..10f5526f1069 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -11946,6 +11946,18 @@ static void vmx_setup_mce(struct kvm_vcpu *vcpu)
~FEATURE_CONTROL_LMCE;
}
+static int vmx_prep_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
+{
+ /* TODO: Implement */
+ return 0;
+}
+
+static int vmx_post_leave_smm(struct kvm_vcpu *vcpu, u64 smbase)
+{
+ /* TODO: Implement */
+ return 0;
+}
+
static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
.cpu_has_kvm_support = cpu_has_kvm_support,
.disabled_by_bios = vmx_disabled_by_bios,
@@ -12071,6 +12083,9 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
#endif
.setup_mce = vmx_setup_mce,
+
+ .prep_enter_smm = vmx_prep_enter_smm,
+ .post_leave_smm = vmx_post_leave_smm,
};
static int __init vmx_init(void)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index cd17b7d9a107..8007a6ec2e5b 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5272,7 +5272,11 @@ static unsigned emulator_get_hflags(struct x86_emulate_ctxt *ctxt)
static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_flags)
{
- kvm_set_hflags(emul_to_vcpu(ctxt), emul_flags);
+ struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
+
+ if ((vcpu->arch.hflags & HF_SMM_MASK) && !(emul_flags & HF_SMM_MASK))
+ ctxt->left_smm = true;
+ kvm_set_hflags(vcpu, emul_flags);
}
static const struct x86_emulate_ops emulate_ops = {
@@ -5692,6 +5696,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
ctxt->have_exception = false;
ctxt->exception.vector = -1;
ctxt->perm_ok = false;
+ ctxt->smbase = ctxt->ops->get_smbase(ctxt);
+ ctxt->left_smm = false;
ctxt->ud = emulation_type & EMULTYPE_TRAP_UD;
@@ -5779,6 +5785,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
if (!ctxt->have_exception ||
exception_type(ctxt->exception.vector) == EXCPT_TRAP)
__kvm_set_rflags(vcpu, ctxt->eflags);
+ if (r == EMULATE_DONE && ctxt->left_smm)
+ kvm_x86_ops->post_leave_smm(vcpu, ctxt->smbase);
/*
* For STI, interrupts are shadowed; so KVM_REQ_EVENT will
@@ -6643,6 +6651,9 @@ static void enter_smm(struct kvm_vcpu *vcpu)
trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true);
vcpu->arch.hflags |= HF_SMM_MASK;
memset(buf, 0, 512);
+
+ kvm_x86_ops->prep_enter_smm(vcpu, buf);
+
if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
enter_smm_save_state_64(vcpu, buf);
else
--
2.13.5
next prev parent reply other threads:[~2017-09-25 8:09 UTC|newest]
Thread overview: 11+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-09-25 8:08 [PATCH v3 0/6] KVM: nested virt SMM fixes Ladi Prosek
2017-09-25 8:08 ` Ladi Prosek [this message]
2017-09-25 8:09 ` [PATCH v3 2/6] KVM: x86: introduce ISA specific smi_allowed callback Ladi Prosek
2017-09-25 8:09 ` [PATCH v3 3/6] KVM: nVMX: fix SMI injection in guest mode Ladi Prosek
2017-09-25 8:09 ` [PATCH v3 4/6] KVM: nVMX: treat CR4.VMXE as reserved in SMM Ladi Prosek
2017-09-25 8:09 ` [PATCH v3 5/6] KVM: nSVM: refactor nested_svm_vmrun Ladi Prosek
2017-09-25 8:09 ` [PATCH v3 6/6] KVM: nSVM: fix SMI injection in guest mode Ladi Prosek
2017-10-03 19:53 ` Radim Krčmář
2017-10-04 10:10 ` Ladi Prosek
2017-10-04 14:42 ` Radim Krčmář
2017-10-10 8:03 ` Ladi Prosek
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20170925080904.24850-2-lprosek@redhat.com \
--to=lprosek@redhat.com \
--cc=kvm@vger.kernel.org \
--cc=pbonzini@redhat.com \
--cc=rkrcmar@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox