public inbox for kvm@vger.kernel.org
 help / color / mirror / Atom feed
From: Maxim Levitsky <mlevitsk@redhat.com>
To: kvm@vger.kernel.org
Cc: Thomas Gleixner <tglx@linutronix.de>,
	Yang Zhong <yang.zhong@intel.com>,
	x86@kernel.org, Jim Mattson <jmattson@google.com>,
	Vitaly Kuznetsov <vkuznets@redhat.com>,
	Paolo Bonzini <pbonzini@redhat.com>,
	Sean Christopherson <seanjc@google.com>,
	Wanpeng Li <wanpengli@tencent.com>, Shuah Khan <shuah@kernel.org>,
	Guang Zeng <guang.zeng@intel.com>, Joerg Roedel <joro@8bytes.org>,
	Maxim Levitsky <mlevitsk@redhat.com>,
	linux-kernel@vger.kernel.org,
	Dave Hansen <dave.hansen@linux.intel.com>,
	Ingo Molnar <mingo@redhat.com>,
	linux-kselftest@vger.kernel.org,
	Kees Cook <keescook@chromium.org>,
	"H. Peter Anvin" <hpa@zytor.com>, Wei Wang <wei.w.wang@intel.com>,
	Borislav Petkov <bp@alien8.de>
Subject: [PATCH RESEND v4 06/23] KVM: x86: compile out vendor-specific code if SMM is disabled
Date: Tue, 25 Oct 2022 15:47:24 +0300	[thread overview]
Message-ID: <20221025124741.228045-7-mlevitsk@redhat.com> (raw)
In-Reply-To: <20221025124741.228045-1-mlevitsk@redhat.com>

From: Paolo Bonzini <pbonzini@redhat.com>

Vendor-specific code that deals with SMI injection and saving/restoring
SMM state is not needed if CONFIG_KVM_SMM is disabled, so remove the
four callbacks smi_allowed, enter_smm, leave_smm and enable_smi_window.
The users in svm/nested.c and x86.c also have to be compiled out; the
amount of #ifdef'ed code is small and it's not worth moving it to
smm.c.

enter_smm is now used only within #ifdef CONFIG_KVM_SMM, and the stub
can therefore be removed.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 arch/x86/include/asm/kvm-x86-ops.h | 2 ++
 arch/x86/include/asm/kvm_host.h    | 2 ++
 arch/x86/kvm/smm.h                 | 1 -
 arch/x86/kvm/svm/nested.c          | 2 ++
 arch/x86/kvm/svm/svm.c             | 4 ++++
 arch/x86/kvm/vmx/vmx.c             | 4 ++++
 arch/x86/kvm/x86.c                 | 4 ++++
 7 files changed, 18 insertions(+), 1 deletion(-)

diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
index 82ba4a564e5875..ea58e67e9a6701 100644
--- a/arch/x86/include/asm/kvm-x86-ops.h
+++ b/arch/x86/include/asm/kvm-x86-ops.h
@@ -110,10 +110,12 @@ KVM_X86_OP_OPTIONAL_RET0(dy_apicv_has_pending_interrupt)
 KVM_X86_OP_OPTIONAL(set_hv_timer)
 KVM_X86_OP_OPTIONAL(cancel_hv_timer)
 KVM_X86_OP(setup_mce)
+#ifdef CONFIG_KVM_SMM
 KVM_X86_OP(smi_allowed)
 KVM_X86_OP(enter_smm)
 KVM_X86_OP(leave_smm)
 KVM_X86_OP(enable_smi_window)
+#endif
 KVM_X86_OP_OPTIONAL(mem_enc_ioctl)
 KVM_X86_OP_OPTIONAL(mem_enc_register_region)
 KVM_X86_OP_OPTIONAL(mem_enc_unregister_region)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 4afed04fcc8241..541ed36cbb82f8 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1607,10 +1607,12 @@ struct kvm_x86_ops {
 
 	void (*setup_mce)(struct kvm_vcpu *vcpu);
 
+#ifdef CONFIG_KVM_SMM
 	int (*smi_allowed)(struct kvm_vcpu *vcpu, bool for_injection);
 	int (*enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
 	int (*leave_smm)(struct kvm_vcpu *vcpu, const char *smstate);
 	void (*enable_smi_window)(struct kvm_vcpu *vcpu);
+#endif
 
 	int (*mem_enc_ioctl)(struct kvm *kvm, void __user *argp);
 	int (*mem_enc_register_region)(struct kvm *kvm, struct kvm_enc_region *argp);
diff --git a/arch/x86/kvm/smm.h b/arch/x86/kvm/smm.h
index 4c699fee449296..7ccce6b655cacf 100644
--- a/arch/x86/kvm/smm.h
+++ b/arch/x86/kvm/smm.h
@@ -28,7 +28,6 @@ void process_smi(struct kvm_vcpu *vcpu);
 static inline int kvm_inject_smi(struct kvm_vcpu *vcpu) { return -ENOTTY; }
 static inline bool is_smm(struct kvm_vcpu *vcpu) { return false; }
 static inline void kvm_smm_changed(struct kvm_vcpu *vcpu, bool in_smm) { WARN_ON_ONCE(1); }
-static inline void enter_smm(struct kvm_vcpu *vcpu) { WARN_ON_ONCE(1); }
 static inline void process_smi(struct kvm_vcpu *vcpu) { WARN_ON_ONCE(1); }
 
 /*
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index cc0fd75f7cbab5..b258d6988f5dde 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -1378,6 +1378,7 @@ static int svm_check_nested_events(struct kvm_vcpu *vcpu)
 		return 0;
 	}
 
+#ifdef CONFIG_KVM_SMM
 	if (vcpu->arch.smi_pending && !svm_smi_blocked(vcpu)) {
 		if (block_nested_events)
 			return -EBUSY;
@@ -1386,6 +1387,7 @@ static int svm_check_nested_events(struct kvm_vcpu *vcpu)
 		nested_svm_simple_vmexit(svm, SVM_EXIT_SMI);
 		return 0;
 	}
+#endif
 
 	if (vcpu->arch.nmi_pending && !svm_nmi_blocked(vcpu)) {
 		if (block_nested_events)
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 6f7ceb35d2ff08..2200b8aa727398 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -4408,6 +4408,7 @@ static void svm_setup_mce(struct kvm_vcpu *vcpu)
 	vcpu->arch.mcg_cap &= 0x1ff;
 }
 
+#ifdef CONFIG_KVM_SMM
 bool svm_smi_blocked(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
@@ -4557,6 +4558,7 @@ static void svm_enable_smi_window(struct kvm_vcpu *vcpu)
 		/* We must be in SMM; RSM will cause a vmexit anyway.  */
 	}
 }
+#endif
 
 static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
 					void *insn, int insn_len)
@@ -4832,10 +4834,12 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
 	.pi_update_irte = avic_pi_update_irte,
 	.setup_mce = svm_setup_mce,
 
+#ifdef CONFIG_KVM_SMM
 	.smi_allowed = svm_smi_allowed,
 	.enter_smm = svm_enter_smm,
 	.leave_smm = svm_leave_smm,
 	.enable_smi_window = svm_enable_smi_window,
+#endif
 
 	.mem_enc_ioctl = sev_mem_enc_ioctl,
 	.mem_enc_register_region = sev_mem_enc_register_region,
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index b22330a15adb63..107fc035c91b80 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -7905,6 +7905,7 @@ static void vmx_setup_mce(struct kvm_vcpu *vcpu)
 			~FEAT_CTL_LMCE_ENABLED;
 }
 
+#ifdef CONFIG_KVM_SMM
 static int vmx_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
 {
 	/* we need a nested vmexit to enter SMM, postpone if run is pending */
@@ -7959,6 +7960,7 @@ static void vmx_enable_smi_window(struct kvm_vcpu *vcpu)
 {
 	/* RSM will cause a vmexit anyway.  */
 }
+#endif
 
 static bool vmx_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
 {
@@ -8126,10 +8128,12 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
 
 	.setup_mce = vmx_setup_mce,
 
+#ifdef CONFIG_KVM_SMM
 	.smi_allowed = vmx_smi_allowed,
 	.enter_smm = vmx_enter_smm,
 	.leave_smm = vmx_leave_smm,
 	.enable_smi_window = vmx_enable_smi_window,
+#endif
 
 	.can_emulate_instruction = vmx_can_emulate_instruction,
 	.apic_init_signal_blocked = vmx_apic_init_signal_blocked,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 6c81d3a606e257..8394cd62c2854c 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -9876,6 +9876,7 @@ static int kvm_check_and_inject_events(struct kvm_vcpu *vcpu,
 	 * in order to make progress and get back here for another iteration.
 	 * The kvm_x86_ops hooks communicate this by returning -EBUSY.
 	 */
+#ifdef CONFIG_KVM_SMM
 	if (vcpu->arch.smi_pending) {
 		r = can_inject ? static_call(kvm_x86_smi_allowed)(vcpu, true) : -EBUSY;
 		if (r < 0)
@@ -9888,6 +9889,7 @@ static int kvm_check_and_inject_events(struct kvm_vcpu *vcpu,
 		} else
 			static_call(kvm_x86_enable_smi_window)(vcpu);
 	}
+#endif
 
 	if (vcpu->arch.nmi_pending) {
 		r = can_inject ? static_call(kvm_x86_nmi_allowed)(vcpu, true) : -EBUSY;
@@ -12517,10 +12519,12 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
 	     static_call(kvm_x86_nmi_allowed)(vcpu, false)))
 		return true;
 
+#ifdef CONFIG_KVM_SMM
 	if (kvm_test_request(KVM_REQ_SMI, vcpu) ||
 	    (vcpu->arch.smi_pending &&
 	     static_call(kvm_x86_smi_allowed)(vcpu, false)))
 		return true;
+#endif
 
 	if (kvm_arch_interrupt_allowed(vcpu) &&
 	    (kvm_cpu_has_interrupt(vcpu) ||
-- 
2.34.3


  parent reply	other threads:[~2022-10-25 12:51 UTC|newest]

Thread overview: 31+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-10-25 12:47 [PATCH RESEND v4 00/23] SMM emulation and interrupt shadow fixes Maxim Levitsky
2022-10-25 12:47 ` [PATCH RESEND v4 01/23] KVM: x86: start moving SMM-related functions to new files Maxim Levitsky
2022-10-25 12:47 ` [PATCH RESEND v4 02/23] KVM: x86: move SMM entry to a new file Maxim Levitsky
2022-10-25 12:47 ` [PATCH RESEND v4 03/23] KVM: x86: move SMM exit " Maxim Levitsky
2022-10-25 12:47 ` [PATCH RESEND v4 04/23] KVM: x86: do not go through ctxt->ops when emulating rsm Maxim Levitsky
2022-10-25 12:47 ` [PATCH RESEND v4 05/23] KVM: allow compiling out SMM support Maxim Levitsky
2022-10-25 12:47 ` Maxim Levitsky [this message]
2022-10-25 12:47 ` [PATCH RESEND v4 07/23] KVM: x86: remove SMRAM address space if SMM is not supported Maxim Levitsky
2022-10-25 12:47 ` [PATCH RESEND v4 08/23] KVM: x86: do not define KVM_REQ_SMI if SMM disabled Maxim Levitsky
2022-10-25 12:47 ` [PATCH RESEND v4 09/23] bug: introduce ASSERT_STRUCT_OFFSET Maxim Levitsky
2022-10-25 12:47 ` [PATCH RESEND v4 10/23] KVM: x86: emulator: em_sysexit should update ctxt->mode Maxim Levitsky
2022-10-25 12:47 ` [PATCH RESEND v4 11/23] KVM: x86: emulator: introduce emulator_recalc_and_set_mode Maxim Levitsky
2022-10-25 12:47 ` [PATCH RESEND v4 12/23] KVM: x86: emulator: update the emulation mode after rsm Maxim Levitsky
2022-10-25 12:47 ` [PATCH RESEND v4 13/23] KVM: x86: emulator: update the emulation mode after CR0 write Maxim Levitsky
2022-10-25 12:47 ` [PATCH RESEND v4 14/23] KVM: x86: smm: number of GPRs in the SMRAM image depends on the image format Maxim Levitsky
2022-10-25 12:47 ` [PATCH RESEND v4 15/23] KVM: x86: smm: check for failures on smm entry Maxim Levitsky
2022-10-25 12:47 ` [PATCH RESEND v4 16/23] KVM: x86: smm: add structs for KVM's smram layout Maxim Levitsky
2022-10-28 13:34   ` Paolo Bonzini
2022-10-25 12:47 ` [PATCH RESEND v4 17/23] KVM: x86: smm: use smram structs in the common code Maxim Levitsky
2022-10-25 12:47 ` [PATCH RESEND v4 18/23] KVM: x86: smm: use smram struct for 32 bit smram load/restore Maxim Levitsky
2022-10-25 12:47 ` [PATCH RESEND v4 19/23] KVM: x86: smm: use smram struct for 64 " Maxim Levitsky
2022-10-25 12:47 ` [PATCH RESEND v4 20/23] KVM: svm: drop explicit return value of kvm_vcpu_map Maxim Levitsky
2022-10-25 12:47 ` [PATCH RESEND v4 21/23] KVM: x86: SVM: use smram structs Maxim Levitsky
2022-10-25 12:47 ` [PATCH RESEND v4 22/23] KVM: x86: SVM: don't save SVM state to SMRAM when VM is not long mode capable Maxim Levitsky
2022-10-25 12:47 ` [PATCH RESEND v4 23/23] KVM: x86: smm: preserve interrupt shadow in SMRAM Maxim Levitsky
2022-10-28 10:35   ` Paolo Bonzini
2022-10-30  8:23     ` Maxim Levitsky
2022-10-27 16:49 ` [PATCH RESEND v4 00/23] SMM emulation and interrupt shadow fixes Paolo Bonzini
2022-10-27 17:06   ` Maxim Levitsky
2022-10-28 10:36     ` Paolo Bonzini
2022-10-28 22:42       ` Sean Christopherson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20221025124741.228045-7-mlevitsk@redhat.com \
    --to=mlevitsk@redhat.com \
    --cc=bp@alien8.de \
    --cc=dave.hansen@linux.intel.com \
    --cc=guang.zeng@intel.com \
    --cc=hpa@zytor.com \
    --cc=jmattson@google.com \
    --cc=joro@8bytes.org \
    --cc=keescook@chromium.org \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-kselftest@vger.kernel.org \
    --cc=mingo@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=seanjc@google.com \
    --cc=shuah@kernel.org \
    --cc=tglx@linutronix.de \
    --cc=vkuznets@redhat.com \
    --cc=wanpengli@tencent.com \
    --cc=wei.w.wang@intel.com \
    --cc=x86@kernel.org \
    --cc=yang.zhong@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox