From: "Chang S. Bae" <chang.seok.bae@intel.com>
To: pbonzini@redhat.com, seanjc@google.com
Cc: kvm@vger.kernel.org, x86@kernel.org,
linux-kernel@vger.kernel.org, chao.gao@intel.com,
chang.seok.bae@intel.com
Subject: [PATCH v4 01/21] KVM: VMX: Macrofy GPR swapping in __vmx_vcpu_run()
Date: Tue, 12 May 2026 01:14:42 +0000 [thread overview]
Message-ID: <20260512011502.53072-2-chang.seok.bae@intel.com> (raw)
In-Reply-To: <20260512011502.53072-1-chang.seok.bae@intel.com>
Convert the repeated register save/restore sequences into macros to
simplify the VM entry code. This can also make it easier to extend for
additional registers.
Reuse the previously deprecated macros in inst.h by moving them into KVM,
and remove unused ones there.
No functional change intended.
Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
Cc: x86@kernel.org
Signed-off-by: Chang S. Bae <chang.seok.bae@intel.com>
Link: https://lore.kernel.org/6e67df0e-e5f0-43f5-aa86-22e8b01b75d2@redhat.com
---
V3 -> V4:
* Move inst.h into KVM (Paolo)
* Generalize macros to cover all GPRs (Paolo)
Dependency: Based on Paolo's SPEC_CTRL rework, currently at
https://git.kernel.org/pub/scm/virt/kvm/kvm.git/log/?h=queue
---
arch/x86/{include/asm => kvm}/inst.h | 38 ++++--------
arch/x86/kvm/svm/vmenter.S | 2 -
arch/x86/kvm/vmenter.h | 43 ++++++++++++++
arch/x86/kvm/vmx/vmenter.S | 89 +++++++---------------------
4 files changed, 74 insertions(+), 98 deletions(-)
rename arch/x86/{include/asm => kvm}/inst.h (67%)
diff --git a/arch/x86/include/asm/inst.h b/arch/x86/kvm/inst.h
similarity index 67%
rename from arch/x86/include/asm/inst.h
rename to arch/x86/kvm/inst.h
index e48a00b3311d..3a878850ea20 100644
--- a/arch/x86/include/asm/inst.h
+++ b/arch/x86/kvm/inst.h
@@ -1,19 +1,15 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Generate .byte code for some instructions not supported by old
- * binutils.
+ * Convert register names to their sizes and the indices used when
+ * encoding instructions.
*/
-#ifndef X86_ASM_INST_H
-#define X86_ASM_INST_H
+#ifndef X86_KVM_INST_H
+#define X86_KVM_INST_H
#ifdef __ASSEMBLER__
#define REG_NUM_INVALID 100
-#define REG_TYPE_R32 0
-#define REG_TYPE_R64 1
-#define REG_TYPE_INVALID 100
-
.macro R32_NUM opd r32
\opd = REG_NUM_INVALID
.ifc \r32,%eax
@@ -122,27 +118,15 @@
#endif
.endm
- .macro REG_TYPE type reg
- R32_NUM reg_type_r32 \reg
- R64_NUM reg_type_r64 \reg
- .if reg_type_r64 <> REG_NUM_INVALID
- \type = REG_TYPE_R64
- .elseif reg_type_r32 <> REG_NUM_INVALID
- \type = REG_TYPE_R32
- .else
- \type = REG_TYPE_INVALID
- .endif
- .endm
+.macro REG_NUM reg_num reg
+#ifdef CONFIG_X86_64
+ R64_NUM \reg_num \reg
+#else
+ R32_NUM \reg_num \reg
+#endif
+.endm
- .macro PFX_REX opd1 opd2 W=0
- .if ((\opd1 | \opd2) & 8) || \W
- .byte 0x40 | ((\opd1 & 8) >> 3) | ((\opd2 & 8) >> 1) | (\W << 3)
- .endif
- .endm
- .macro MODRM mod opd1 opd2
- .byte \mod | (\opd1 & 7) | ((\opd2 & 7) << 3)
- .endm
#endif
#endif
diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S
index f523d9e49839..0bad9707d219 100644
--- a/arch/x86/kvm/svm/vmenter.S
+++ b/arch/x86/kvm/svm/vmenter.S
@@ -9,8 +9,6 @@
#include "kvm-asm-offsets.h"
#include "vmenter.h"
-#define WORD_SIZE (BITS_PER_LONG / 8)
-
/* Intentionally omit RAX as it's context switched by hardware */
#define VCPU_RCX (SVM_vcpu_arch_regs + __VCPU_REGS_RCX * WORD_SIZE)
#define VCPU_RDX (SVM_vcpu_arch_regs + __VCPU_REGS_RDX * WORD_SIZE)
diff --git a/arch/x86/kvm/vmenter.h b/arch/x86/kvm/vmenter.h
index ba3f71449c62..a68020254a8d 100644
--- a/arch/x86/kvm/vmenter.h
+++ b/arch/x86/kvm/vmenter.h
@@ -2,6 +2,8 @@
#ifndef __KVM_X86_VMENTER_H
#define __KVM_X86_VMENTER_H
+#include "inst.h"
+
#define KVM_ENTER_VMRESUME BIT(0)
#define KVM_ENTER_SAVE_SPEC_CTRL BIT(1)
#define KVM_ENTER_CLEAR_CPU_BUFFERS_FOR_MMIO BIT(2)
@@ -76,5 +78,46 @@
wrmsr
.endm
+#define WORD_SIZE (BITS_PER_LONG / 8)
+
+.macro LOAD_REGS src:req, regs_ofs:req, regs:vararg
+.irp reg, \regs
+ REG_NUM reg_num \reg
+ .if reg_num <> REG_NUM_INVALID
+ mov (\regs_ofs + reg_num * WORD_SIZE)(\src), \reg
+ .else
+ .err invalid register \reg
+ .endif
+.endr
+.endm
+
+.macro STORE_REGS dst:req, regs_ofs:req, regs:vararg
+.irp reg, \regs
+ REG_NUM reg_num \reg
+ .if reg_num <> REG_NUM_INVALID
+ mov \reg, (\regs_ofs + reg_num * WORD_SIZE)(\dst)
+ .else
+ .err invalid register \reg
+ .endif
+.endr
+.endm
+
+.macro POP_REGS dst:req, regs_ofs:req, regs:vararg
+.irp reg, \regs
+ REG_NUM reg_num \reg
+ .if reg_num <> REG_NUM_INVALID
+ pop (\regs_ofs + reg_num * WORD_SIZE)(\dst)
+ .else
+ .err invalid register \reg
+ .endif
+.endr
+.endm
+
+.macro CLEAR_REGS regs:vararg
+.irp reg, \regs
+ xorl \reg, \reg
+.endr
+.endm
+
#endif /* __ASSEMBLER__ */
#endif /* __KVM_X86_VMENTER_H */
diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
index 7e4dc17fc0b8..4b7aaa7430fb 100644
--- a/arch/x86/kvm/vmx/vmenter.S
+++ b/arch/x86/kvm/vmx/vmenter.S
@@ -2,35 +2,12 @@
#include <linux/linkage.h>
#include <asm/asm.h>
#include <asm/bitsperlong.h>
-#include <asm/kvm_vcpu_regs.h>
#include <asm/nospec-branch.h>
#include <asm/percpu.h>
#include <asm/segment.h>
#include "kvm-asm-offsets.h"
#include "vmenter.h"
-#define WORD_SIZE (BITS_PER_LONG / 8)
-
-#define VCPU_RAX (VMX_vcpu_arch_regs + __VCPU_REGS_RAX * WORD_SIZE)
-#define VCPU_RCX (VMX_vcpu_arch_regs + __VCPU_REGS_RCX * WORD_SIZE)
-#define VCPU_RDX (VMX_vcpu_arch_regs + __VCPU_REGS_RDX * WORD_SIZE)
-#define VCPU_RBX (VMX_vcpu_arch_regs + __VCPU_REGS_RBX * WORD_SIZE)
-/* Intentionally omit RSP as it's context switched by hardware */
-#define VCPU_RBP (VMX_vcpu_arch_regs + __VCPU_REGS_RBP * WORD_SIZE)
-#define VCPU_RSI (VMX_vcpu_arch_regs + __VCPU_REGS_RSI * WORD_SIZE)
-#define VCPU_RDI (VMX_vcpu_arch_regs + __VCPU_REGS_RDI * WORD_SIZE)
-
-#ifdef CONFIG_X86_64
-#define VCPU_R8 (VMX_vcpu_arch_regs + __VCPU_REGS_R8 * WORD_SIZE)
-#define VCPU_R9 (VMX_vcpu_arch_regs + __VCPU_REGS_R9 * WORD_SIZE)
-#define VCPU_R10 (VMX_vcpu_arch_regs + __VCPU_REGS_R10 * WORD_SIZE)
-#define VCPU_R11 (VMX_vcpu_arch_regs + __VCPU_REGS_R11 * WORD_SIZE)
-#define VCPU_R12 (VMX_vcpu_arch_regs + __VCPU_REGS_R12 * WORD_SIZE)
-#define VCPU_R13 (VMX_vcpu_arch_regs + __VCPU_REGS_R13 * WORD_SIZE)
-#define VCPU_R14 (VMX_vcpu_arch_regs + __VCPU_REGS_R14 * WORD_SIZE)
-#define VCPU_R15 (VMX_vcpu_arch_regs + __VCPU_REGS_R15 * WORD_SIZE)
-#endif
-
.macro VMX_DO_EVENT_IRQOFF call_insn call_target
/*
* Unconditionally create a stack frame, getting the correct RSP on the
@@ -114,25 +91,18 @@ SYM_FUNC_START(__vmx_vcpu_run)
* an LFENCE to stop speculation from skipping the wrmsr.
*/
- /* Load guest registers. Don't clobber flags. */
- mov VCPU_RAX(%_ASM_DI), %_ASM_AX
- mov VCPU_RCX(%_ASM_DI), %_ASM_CX
- mov VCPU_RDX(%_ASM_DI), %_ASM_DX
- mov VCPU_RBX(%_ASM_DI), %_ASM_BX
- mov VCPU_RBP(%_ASM_DI), %_ASM_BP
- mov VCPU_RSI(%_ASM_DI), %_ASM_SI
+ /*
+ * Load guest registers. Don't clobber flags. Intentionally omit
+ * %_ASM_SP as it's context switched by hardware
+ */
+ LOAD_REGS %_ASM_DI, VMX_vcpu_arch_regs, \
+ %_ASM_AX, %_ASM_CX, %_ASM_DX, %_ASM_BX, %_ASM_BP, %_ASM_SI
#ifdef CONFIG_X86_64
- mov VCPU_R8 (%_ASM_DI), %r8
- mov VCPU_R9 (%_ASM_DI), %r9
- mov VCPU_R10(%_ASM_DI), %r10
- mov VCPU_R11(%_ASM_DI), %r11
- mov VCPU_R12(%_ASM_DI), %r12
- mov VCPU_R13(%_ASM_DI), %r13
- mov VCPU_R14(%_ASM_DI), %r14
- mov VCPU_R15(%_ASM_DI), %r15
+ LOAD_REGS %_ASM_DI, VMX_vcpu_arch_regs, \
+ %r8, %r9, %r10, %r11, %r12, %r13, %r14, %r15
#endif
/* Load guest RDI. This kills the @vmx pointer! */
- mov VCPU_RDI(%_ASM_DI), %_ASM_DI
+ LOAD_REGS %_ASM_DI, VMX_vcpu_arch_regs, %_ASM_DI
/*
* Note, ALTERNATIVE_2 works in reverse order. If CLEAR_CPU_BUF_VM is
@@ -187,23 +157,16 @@ SYM_INNER_LABEL_ALIGN(vmx_vmexit, SYM_L_GLOBAL)
/* Reload @vmx to RDI. */
mov 2*WORD_SIZE(%_ASM_SP), %_ASM_DI
- /* Save all guest registers, including RDI from the stack */
- mov %_ASM_AX, VCPU_RAX(%_ASM_DI)
- mov %_ASM_CX, VCPU_RCX(%_ASM_DI)
- mov %_ASM_DX, VCPU_RDX(%_ASM_DI)
- mov %_ASM_BX, VCPU_RBX(%_ASM_DI)
- mov %_ASM_BP, VCPU_RBP(%_ASM_DI)
- mov %_ASM_SI, VCPU_RSI(%_ASM_DI)
- pop VCPU_RDI(%_ASM_DI)
+ /*
+ * Save all guest registers, including RDI from the stack. Intentionally
+ * omit %_ASM_SP as it's context switched by hardware
+ */
+ STORE_REGS %_ASM_DI, VMX_vcpu_arch_regs, \
+ %_ASM_AX, %_ASM_CX, %_ASM_DX, %_ASM_BX, %_ASM_BP, %_ASM_SI
+ POP_REGS %_ASM_DI, VMX_vcpu_arch_regs, %_ASM_DI
#ifdef CONFIG_X86_64
- mov %r8, VCPU_R8 (%_ASM_DI)
- mov %r9, VCPU_R9 (%_ASM_DI)
- mov %r10, VCPU_R10(%_ASM_DI)
- mov %r11, VCPU_R11(%_ASM_DI)
- mov %r12, VCPU_R12(%_ASM_DI)
- mov %r13, VCPU_R13(%_ASM_DI)
- mov %r14, VCPU_R14(%_ASM_DI)
- mov %r15, VCPU_R15(%_ASM_DI)
+ STORE_REGS %_ASM_DI, VMX_vcpu_arch_regs, \
+ %r8, %r9, %r10, %r11, %r12, %r13, %r14, %r15
#endif
/* Clear return value to indicate VM-Exit (as opposed to VM-Fail). */
@@ -220,21 +183,9 @@ SYM_INNER_LABEL_ALIGN(vmx_vmexit, SYM_L_GLOBAL)
* VM-Exit and RBX is explicitly loaded with 0 or 1 to hold the return
* value.
*/
- xor %eax, %eax
- xor %ecx, %ecx
- xor %edx, %edx
- xor %ebp, %ebp
- xor %esi, %esi
- xor %edi, %edi
+ CLEAR_REGS %eax, %ecx, %edx, %ebp, %esi, %edi
#ifdef CONFIG_X86_64
- xor %r8d, %r8d
- xor %r9d, %r9d
- xor %r10d, %r10d
- xor %r11d, %r11d
- xor %r12d, %r12d
- xor %r13d, %r13d
- xor %r14d, %r14d
- xor %r15d, %r15d
+ CLEAR_REGS %r8d, %r9d, %r10d, %r11d, %r12d, %r13d, %r14d, %r15d
#endif
/*
--
2.51.0
next prev parent reply other threads:[~2026-05-12 1:40 UTC|newest]
Thread overview: 22+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-05-12 1:14 [PATCH v4 00/21] KVM: x86: Enable APX for guests Chang S. Bae
2026-05-12 1:14 ` Chang S. Bae [this message]
2026-05-12 1:14 ` [PATCH v4 02/21] KVM: SVM: Macrofy GPR swapping in __svm_vcpu_run() Chang S. Bae
2026-05-12 1:14 ` [PATCH v4 03/21] KVM: SEV: Macrofy GPR swapping in __svm_sev_es_vcpu_run() Chang S. Bae
2026-05-12 1:14 ` [PATCH v4 04/21] KVM: x86: Extend VCPU registers for EGPRs Chang S. Bae
2026-05-12 1:14 ` [PATCH v4 05/21] KVM: VMX: Save guest EGPRs in VCPU cache Chang S. Bae
2026-05-12 1:14 ` [PATCH v4 06/21] x86/fpu: Ignore APX when copying from/to guest FPU Chang S. Bae
2026-05-12 1:14 ` [PATCH v4 07/21] KVM: x86: Support APX state for XSAVE ABI Chang S. Bae
2026-05-12 1:14 ` [PATCH v4 08/21] KVM: VMX: Refactor VMX instruction information access Chang S. Bae
2026-05-12 1:14 ` [PATCH v4 09/21] KVM: VMX: Refactor instruction information decoding Chang S. Bae
2026-05-12 1:14 ` [PATCH v4 10/21] KVM: VMX: Refactor register index retrieval from exit qualification Chang S. Bae
2026-05-12 1:14 ` [PATCH v4 11/21] KVM: VMX: Support instruction information extension Chang S. Bae
2026-05-12 1:14 ` [PATCH v4 12/21] KVM: nVMX: Propagate the extended instruction info field Chang S. Bae
2026-05-12 1:14 ` [PATCH v4 13/21] KVM: x86: Support EGPR accessing and tracking for emulator Chang S. Bae
2026-05-12 1:14 ` [PATCH v4 14/21] KVM: x86: Handle EGPR index and REX2-incompatible opcodes Chang S. Bae
2026-05-12 1:14 ` [PATCH v4 15/21] KVM: x86: Support REX2-prefixed opcode decode Chang S. Bae
2026-05-12 1:14 ` [PATCH v4 16/21] KVM: x86: Reject EVEX-prefixed instructions Chang S. Bae
2026-05-12 1:14 ` [PATCH v4 17/21] KVM: x86: Guard valid XCR0.APX settings Chang S. Bae
2026-05-12 1:14 ` [PATCH v4 18/21] KVM: x86: Expose APX foundation feature to guests Chang S. Bae
2026-05-12 1:15 ` [PATCH v4 19/21] KVM: x86: Expose APX sub-features " Chang S. Bae
2026-05-12 1:15 ` [PATCH v4 20/21] KVM: x86: selftests: Add APX state and ABI test Chang S. Bae
2026-05-12 1:15 ` [PATCH v4 21/21] KVM: x86: selftests: Add APX state handling and XCR0 sanity checks Chang S. Bae
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260512011502.53072-2-chang.seok.bae@intel.com \
--to=chang.seok.bae@intel.com \
--cc=chao.gao@intel.com \
--cc=kvm@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=pbonzini@redhat.com \
--cc=seanjc@google.com \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox