From: Binbin Wu <binbin.wu@linux.intel.com>
To: pbonzini@redhat.com, seanjc@google.com, kvm@vger.kernel.org
Cc: rick.p.edgecombe@intel.com, kai.huang@intel.com,
adrian.hunter@intel.com, reinette.chatre@intel.com,
xiaoyao.li@intel.com, tony.lindgren@linux.intel.com,
isaku.yamahata@intel.com, yan.y.zhao@intel.com,
chao.gao@intel.com, linux-kernel@vger.kernel.org,
binbin.wu@linux.intel.com
Subject: [PATCH 14/16] KVM: VMX: Move NMI/exception handler to common helper
Date: Mon, 9 Dec 2024 09:07:28 +0800 [thread overview]
Message-ID: <20241209010734.3543481-15-binbin.wu@linux.intel.com> (raw)
In-Reply-To: <20241209010734.3543481-1-binbin.wu@linux.intel.com>
From: Sean Christopherson <sean.j.christopherson@intel.com>
TDX handles NMI/exception exit mostly the same as VMX case. The
difference is how to retrieve exit qualification. To share the code with
TDX, move NMI/exception to a common header, common.h.
No functional change intended.
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>
Co-developed-by: Binbin Wu <binbin.wu@linux.intel.com>
Signed-off-by: Binbin Wu <binbin.wu@linux.intel.com>
---
TDX interrupts breakout:
- Update change log with suggestions from (Binbin)
- Move the NMI handling code to common header and add a helper
__vmx_handle_nmi() for it. (Binbin)
---
arch/x86/kvm/vmx/common.h | 72 ++++++++++++++++++++++++++++++++++++
arch/x86/kvm/vmx/vmx.c | 77 +++++----------------------------------
2 files changed, 82 insertions(+), 67 deletions(-)
diff --git a/arch/x86/kvm/vmx/common.h b/arch/x86/kvm/vmx/common.h
index a46f15ddeda1..809ced4c6cd8 100644
--- a/arch/x86/kvm/vmx/common.h
+++ b/arch/x86/kvm/vmx/common.h
@@ -4,8 +4,70 @@
#include <linux/kvm_host.h>
+#include <asm/traps.h>
+#include <asm/fred.h>
+
#include "posted_intr.h"
#include "mmu.h"
+#include "vmcs.h"
+#include "x86.h"
+
+extern unsigned long vmx_host_idt_base;
+void vmx_do_interrupt_irqoff(unsigned long entry);
+void vmx_do_nmi_irqoff(void);
+
+static inline void vmx_handle_nm_fault_irqoff(struct kvm_vcpu *vcpu)
+{
+ /*
+ * Save xfd_err to guest_fpu before interrupt is enabled, so the
+ * MSR value is not clobbered by the host activity before the guest
+ * has chance to consume it.
+ *
+ * Do not blindly read xfd_err here, since this exception might
+ * be caused by L1 interception on a platform which doesn't
+ * support xfd at all.
+ *
+ * Do it conditionally upon guest_fpu::xfd. xfd_err matters
+ * only when xfd contains a non-zero value.
+ *
+ * Queuing exception is done in vmx_handle_exit. See comment there.
+ */
+ if (vcpu->arch.guest_fpu.fpstate->xfd)
+ rdmsrl(MSR_IA32_XFD_ERR, vcpu->arch.guest_fpu.xfd_err);
+}
+
+static inline void vmx_handle_exception_irqoff(struct kvm_vcpu *vcpu,
+ u32 intr_info)
+{
+ /* if exit due to PF check for async PF */
+ if (is_page_fault(intr_info))
+ vcpu->arch.apf.host_apf_flags = kvm_read_and_reset_apf_flags();
+ /* if exit due to NM, handle before interrupts are enabled */
+ else if (is_nm_fault(intr_info))
+ vmx_handle_nm_fault_irqoff(vcpu);
+ /* Handle machine checks before interrupts are enabled */
+ else if (is_machine_check(intr_info))
+ kvm_machine_check();
+}
+
+static inline void vmx_handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu,
+ u32 intr_info)
+{
+ unsigned int vector = intr_info & INTR_INFO_VECTOR_MASK;
+
+ if (KVM_BUG(!is_external_intr(intr_info), vcpu->kvm,
+ "unexpected VM-Exit interrupt info: 0x%x", intr_info))
+ return;
+
+ kvm_before_interrupt(vcpu, KVM_HANDLING_IRQ);
+ if (cpu_feature_enabled(X86_FEATURE_FRED))
+ fred_entry_from_kvm(EVENT_TYPE_EXTINT, vector);
+ else
+ vmx_do_interrupt_irqoff(gate_offset((gate_desc *)vmx_host_idt_base + vector));
+ kvm_after_interrupt(vcpu);
+
+ vcpu->arch.at_instruction_boundary = true;
+}
static inline bool vt_is_tdx_private_gpa(struct kvm *kvm, gpa_t gpa)
{
@@ -111,4 +173,14 @@ static inline void __vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu,
kvm_vcpu_trigger_posted_interrupt(vcpu, POSTED_INTR_VECTOR);
}
+static __always_inline void __vmx_handle_nmi(struct kvm_vcpu *vcpu)
+{
+ kvm_before_interrupt(vcpu, KVM_HANDLING_NMI);
+ if (cpu_feature_enabled(X86_FEATURE_FRED))
+ fred_entry_from_kvm(EVENT_TYPE_NMI, NMI_VECTOR);
+ else
+ vmx_do_nmi_irqoff();
+ kvm_after_interrupt(vcpu);
+}
+
#endif /* __KVM_X86_VMX_COMMON_H */
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 75432e1c9f7f..d2f926d85c3e 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -527,7 +527,7 @@ static const struct kvm_vmx_segment_field {
};
-static unsigned long host_idt_base;
+unsigned long vmx_host_idt_base;
#if IS_ENABLED(CONFIG_HYPERV)
static bool __read_mostly enlightened_vmcs = true;
@@ -4290,7 +4290,7 @@ void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */
- vmcs_writel(HOST_IDTR_BASE, host_idt_base); /* 22.2.4 */
+ vmcs_writel(HOST_IDTR_BASE, vmx_host_idt_base); /* 22.2.4 */
vmcs_writel(HOST_RIP, (unsigned long)vmx_vmexit); /* 22.2.5 */
@@ -5160,7 +5160,7 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
intr_info = vmx_get_intr_info(vcpu);
/*
- * Machine checks are handled by handle_exception_irqoff(), or by
+ * Machine checks are handled by vmx_handle_exception_irqoff(), or by
* vmx_vcpu_run() if a #MC occurs on VM-Entry. NMIs are handled by
* vmx_vcpu_enter_exit().
*/
@@ -5168,7 +5168,7 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
return 1;
/*
- * Queue the exception here instead of in handle_nm_fault_irqoff().
+ * Queue the exception here instead of in vmx_handle_nm_fault_irqoff().
* This ensures the nested_vmx check is not skipped so vmexit can
* be reflected to L1 (when it intercepts #NM) before reaching this
* point.
@@ -6887,58 +6887,6 @@ void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
void vmx_do_interrupt_irqoff(unsigned long entry);
void vmx_do_nmi_irqoff(void);
-static void handle_nm_fault_irqoff(struct kvm_vcpu *vcpu)
-{
- /*
- * Save xfd_err to guest_fpu before interrupt is enabled, so the
- * MSR value is not clobbered by the host activity before the guest
- * has chance to consume it.
- *
- * Do not blindly read xfd_err here, since this exception might
- * be caused by L1 interception on a platform which doesn't
- * support xfd at all.
- *
- * Do it conditionally upon guest_fpu::xfd. xfd_err matters
- * only when xfd contains a non-zero value.
- *
- * Queuing exception is done in vmx_handle_exit. See comment there.
- */
- if (vcpu->arch.guest_fpu.fpstate->xfd)
- rdmsrl(MSR_IA32_XFD_ERR, vcpu->arch.guest_fpu.xfd_err);
-}
-
-static void handle_exception_irqoff(struct kvm_vcpu *vcpu, u32 intr_info)
-{
- /* if exit due to PF check for async PF */
- if (is_page_fault(intr_info))
- vcpu->arch.apf.host_apf_flags = kvm_read_and_reset_apf_flags();
- /* if exit due to NM, handle before interrupts are enabled */
- else if (is_nm_fault(intr_info))
- handle_nm_fault_irqoff(vcpu);
- /* Handle machine checks before interrupts are enabled */
- else if (is_machine_check(intr_info))
- kvm_machine_check();
-}
-
-static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu,
- u32 intr_info)
-{
- unsigned int vector = intr_info & INTR_INFO_VECTOR_MASK;
-
- if (KVM_BUG(!is_external_intr(intr_info), vcpu->kvm,
- "unexpected VM-Exit interrupt info: 0x%x", intr_info))
- return;
-
- kvm_before_interrupt(vcpu, KVM_HANDLING_IRQ);
- if (cpu_feature_enabled(X86_FEATURE_FRED))
- fred_entry_from_kvm(EVENT_TYPE_EXTINT, vector);
- else
- vmx_do_interrupt_irqoff(gate_offset((gate_desc *)host_idt_base + vector));
- kvm_after_interrupt(vcpu);
-
- vcpu->arch.at_instruction_boundary = true;
-}
-
void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -6947,9 +6895,10 @@ void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu)
return;
if (vmx->exit_reason.basic == EXIT_REASON_EXTERNAL_INTERRUPT)
- handle_external_interrupt_irqoff(vcpu, vmx_get_intr_info(vcpu));
+ vmx_handle_external_interrupt_irqoff(vcpu,
+ vmx_get_intr_info(vcpu));
else if (vmx->exit_reason.basic == EXIT_REASON_EXCEPTION_NMI)
- handle_exception_irqoff(vcpu, vmx_get_intr_info(vcpu));
+ vmx_handle_exception_irqoff(vcpu, vmx_get_intr_info(vcpu));
}
/*
@@ -7238,14 +7187,8 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
if ((u16)vmx->exit_reason.basic == EXIT_REASON_EXCEPTION_NMI &&
- is_nmi(vmx_get_intr_info(vcpu))) {
- kvm_before_interrupt(vcpu, KVM_HANDLING_NMI);
- if (cpu_feature_enabled(X86_FEATURE_FRED))
- fred_entry_from_kvm(EVENT_TYPE_NMI, NMI_VECTOR);
- else
- vmx_do_nmi_irqoff();
- kvm_after_interrupt(vcpu);
- }
+ is_nmi(vmx_get_intr_info(vcpu)))
+ __vmx_handle_nmi(vcpu);
out:
guest_state_exit_irqoff();
@@ -8309,7 +8252,7 @@ __init int vmx_hardware_setup(void)
int r;
store_idt(&dt);
- host_idt_base = dt.address;
+ vmx_host_idt_base = dt.address;
vmx_setup_user_return_msrs();
--
2.46.0
next prev parent reply other threads:[~2024-12-09 1:06 UTC|newest]
Thread overview: 56+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-12-09 1:07 [PATCH 00/16] KVM: TDX: TDX interrupts Binbin Wu
2024-12-09 1:07 ` [PATCH 01/16] KVM: TDX: Add support for find pending IRQ in a protected local APIC Binbin Wu
2025-01-09 15:38 ` Nikolay Borisov
2025-01-10 5:36 ` Binbin Wu
2024-12-09 1:07 ` [PATCH 02/16] KVM: VMX: Remove use of struct vcpu_vmx from posted_intr.c Binbin Wu
2024-12-09 1:07 ` [PATCH 03/16] KVM: TDX: Disable PI wakeup for IPIv Binbin Wu
2024-12-09 1:07 ` [PATCH 04/16] KVM: VMX: Move posted interrupt delivery code to common header Binbin Wu
2024-12-09 1:07 ` [PATCH 05/16] KVM: TDX: Implement non-NMI interrupt injection Binbin Wu
2024-12-09 1:07 ` [PATCH 06/16] KVM: x86: Assume timer IRQ was injected if APIC state is protected Binbin Wu
2024-12-09 1:07 ` [PATCH 07/16] KVM: TDX: Wait lapic expire when timer IRQ was injected Binbin Wu
2024-12-09 1:07 ` [PATCH 08/16] KVM: TDX: Implement methods to inject NMI Binbin Wu
2024-12-09 1:07 ` [PATCH 09/16] KVM: TDX: Complete interrupts after TD exit Binbin Wu
2024-12-09 1:07 ` [PATCH 10/16] KVM: TDX: Handle SMI request as !CONFIG_KVM_SMM Binbin Wu
2024-12-09 1:07 ` [PATCH 11/16] KVM: TDX: Always block INIT/SIPI Binbin Wu
2025-01-08 7:21 ` Xiaoyao Li
2025-01-08 7:53 ` Binbin Wu
2025-01-08 14:40 ` Sean Christopherson
2025-01-09 2:09 ` Xiaoyao Li
2025-01-09 2:26 ` Binbin Wu
2025-01-09 2:46 ` Huang, Kai
2025-01-09 3:20 ` Binbin Wu
2025-01-09 4:01 ` Huang, Kai
2025-01-09 2:51 ` Huang, Kai
2024-12-09 1:07 ` [PATCH 12/16] KVM: TDX: Inhibit APICv for TDX guest Binbin Wu
2025-01-03 21:59 ` Vishal Annapurve
2025-01-06 1:46 ` Binbin Wu
2025-01-06 22:49 ` Vishal Annapurve
2025-01-06 23:40 ` Sean Christopherson
2025-01-07 3:24 ` Chao Gao
2025-01-07 8:09 ` Binbin Wu
2025-01-07 21:15 ` Sean Christopherson
2025-01-13 2:03 ` Binbin Wu
2025-01-13 2:09 ` Binbin Wu
2025-01-13 17:16 ` Sean Christopherson
2025-01-14 8:20 ` Binbin Wu
2025-01-14 16:59 ` Sean Christopherson
2025-01-16 11:55 ` Huang, Kai
2025-01-16 14:50 ` Sean Christopherson
2025-01-16 20:16 ` Huang, Kai
2025-01-16 22:37 ` Sean Christopherson
2025-01-17 9:53 ` Huang, Kai
2025-01-17 10:46 ` Huang, Kai
2025-01-17 15:08 ` Sean Christopherson
2025-01-17 0:49 ` Binbin Wu
2024-12-09 1:07 ` [PATCH 13/16] KVM: TDX: Add methods to ignore virtual apic related operation Binbin Wu
2025-01-03 22:04 ` Vishal Annapurve
2025-01-06 2:18 ` Binbin Wu
2025-01-22 11:34 ` Paolo Bonzini
2025-01-22 13:59 ` Binbin Wu
2024-12-09 1:07 ` Binbin Wu [this message]
2024-12-09 1:07 ` [PATCH 15/16] KVM: TDX: Handle EXCEPTION_NMI and EXTERNAL_INTERRUPT Binbin Wu
2024-12-09 1:07 ` [PATCH 16/16] KVM: TDX: Handle EXIT_REASON_OTHER_SMI Binbin Wu
2024-12-10 18:24 ` [PATCH 00/16] KVM: TDX: TDX interrupts Paolo Bonzini
2025-01-06 10:51 ` Xiaoyao Li
2025-01-06 20:08 ` Sean Christopherson
2025-01-09 2:44 ` Binbin Wu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20241209010734.3543481-15-binbin.wu@linux.intel.com \
--to=binbin.wu@linux.intel.com \
--cc=adrian.hunter@intel.com \
--cc=chao.gao@intel.com \
--cc=isaku.yamahata@intel.com \
--cc=kai.huang@intel.com \
--cc=kvm@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=pbonzini@redhat.com \
--cc=reinette.chatre@intel.com \
--cc=rick.p.edgecombe@intel.com \
--cc=seanjc@google.com \
--cc=tony.lindgren@linux.intel.com \
--cc=xiaoyao.li@intel.com \
--cc=yan.y.zhao@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).