From: "Chang S. Bae" <chang.seok.bae@intel.com>
To: pbonzini@redhat.com, seanjc@google.com
Cc: kvm@vger.kernel.org, x86@kernel.org,
linux-kernel@vger.kernel.org, chao.gao@intel.com,
chang.seok.bae@intel.com
Subject: [PATCH v4 02/21] KVM: SVM: Macrofy GPR swapping in __svm_vcpu_run()
Date: Tue, 12 May 2026 01:14:43 +0000 [thread overview]
Message-ID: <20260512011502.53072-3-chang.seok.bae@intel.com> (raw)
In-Reply-To: <20260512011502.53072-1-chang.seok.bae@intel.com>
Convert the register save/restore sequences in the SVM entry into macros,
following the VMX code. Drop the now-redundant register offset defines.
No functional change intended.
Signed-off-by: Chang S. Bae <chang.seok.bae@intel.com>
---
V3 -> V4: Cover all GPRs (Paolo)
---
arch/x86/kvm/svm/vmenter.S | 83 +++++++++-----------------------------
1 file changed, 19 insertions(+), 64 deletions(-)
diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S
index 0bad9707d219..4b20aadbb741 100644
--- a/arch/x86/kvm/svm/vmenter.S
+++ b/arch/x86/kvm/svm/vmenter.S
@@ -9,26 +9,6 @@
#include "kvm-asm-offsets.h"
#include "vmenter.h"
-/* Intentionally omit RAX as it's context switched by hardware */
-#define VCPU_RCX (SVM_vcpu_arch_regs + __VCPU_REGS_RCX * WORD_SIZE)
-#define VCPU_RDX (SVM_vcpu_arch_regs + __VCPU_REGS_RDX * WORD_SIZE)
-#define VCPU_RBX (SVM_vcpu_arch_regs + __VCPU_REGS_RBX * WORD_SIZE)
-/* Intentionally omit RSP as it's context switched by hardware */
-#define VCPU_RBP (SVM_vcpu_arch_regs + __VCPU_REGS_RBP * WORD_SIZE)
-#define VCPU_RSI (SVM_vcpu_arch_regs + __VCPU_REGS_RSI * WORD_SIZE)
-#define VCPU_RDI (SVM_vcpu_arch_regs + __VCPU_REGS_RDI * WORD_SIZE)
-
-#ifdef CONFIG_X86_64
-#define VCPU_R8 (SVM_vcpu_arch_regs + __VCPU_REGS_R8 * WORD_SIZE)
-#define VCPU_R9 (SVM_vcpu_arch_regs + __VCPU_REGS_R9 * WORD_SIZE)
-#define VCPU_R10 (SVM_vcpu_arch_regs + __VCPU_REGS_R10 * WORD_SIZE)
-#define VCPU_R11 (SVM_vcpu_arch_regs + __VCPU_REGS_R11 * WORD_SIZE)
-#define VCPU_R12 (SVM_vcpu_arch_regs + __VCPU_REGS_R12 * WORD_SIZE)
-#define VCPU_R13 (SVM_vcpu_arch_regs + __VCPU_REGS_R13 * WORD_SIZE)
-#define VCPU_R14 (SVM_vcpu_arch_regs + __VCPU_REGS_R14 * WORD_SIZE)
-#define VCPU_R15 (SVM_vcpu_arch_regs + __VCPU_REGS_R15 * WORD_SIZE)
-#endif
-
#define SVM_vmcb01_pa (SVM_vmcb01 + KVM_VMCB_pa)
.section .noinstr.text, "ax"
@@ -109,23 +89,17 @@ SYM_FUNC_START(__svm_vcpu_run)
mov SVM_current_vmcb(%_ASM_DI), %_ASM_AX
mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX
- /* Load guest registers. */
- mov VCPU_RCX(%_ASM_DI), %_ASM_CX
- mov VCPU_RDX(%_ASM_DI), %_ASM_DX
- mov VCPU_RBX(%_ASM_DI), %_ASM_BX
- mov VCPU_RBP(%_ASM_DI), %_ASM_BP
- mov VCPU_RSI(%_ASM_DI), %_ASM_SI
+ /*
+ * Load guest registers. Intentionally omit %_ASM_AX and %_ASM_SP as
+ * context switched by hardware
+ */
+ LOAD_REGS %_ASM_DI, SVM_vcpu_arch_regs, \
+ %_ASM_CX, %_ASM_DX, %_ASM_BX, %_ASM_BP, %_ASM_SI
#ifdef CONFIG_X86_64
- mov VCPU_R8 (%_ASM_DI), %r8
- mov VCPU_R9 (%_ASM_DI), %r9
- mov VCPU_R10(%_ASM_DI), %r10
- mov VCPU_R11(%_ASM_DI), %r11
- mov VCPU_R12(%_ASM_DI), %r12
- mov VCPU_R13(%_ASM_DI), %r13
- mov VCPU_R14(%_ASM_DI), %r14
- mov VCPU_R15(%_ASM_DI), %r15
+ LOAD_REGS %_ASM_DI, SVM_vcpu_arch_regs, \
+ %r8, %r9, %r10, %r11, %r12, %r13, %r14, %r15
#endif
- mov VCPU_RDI(%_ASM_DI), %_ASM_DI
+ LOAD_REGS %_ASM_DI, SVM_vcpu_arch_regs, %_ASM_DI
/* Clobbers EFLAGS.ZF */
SVM_CLEAR_CPU_BUFFERS
@@ -136,22 +110,15 @@ SYM_FUNC_START(__svm_vcpu_run)
/* Pop @svm to RAX while it's the only available register. */
pop %_ASM_AX
- /* Save all guest registers. */
- mov %_ASM_CX, VCPU_RCX(%_ASM_AX)
- mov %_ASM_DX, VCPU_RDX(%_ASM_AX)
- mov %_ASM_BX, VCPU_RBX(%_ASM_AX)
- mov %_ASM_BP, VCPU_RBP(%_ASM_AX)
- mov %_ASM_SI, VCPU_RSI(%_ASM_AX)
- mov %_ASM_DI, VCPU_RDI(%_ASM_AX)
+ /*
+ * Save all guest registers. Intentionally omit %_ASM_AX and %_ASM_SP as
+ * context switched by hardware
+ */
+ STORE_REGS %_ASM_AX, SVM_vcpu_arch_regs, \
+ %_ASM_CX, %_ASM_DX, %_ASM_BX, %_ASM_BP, %_ASM_SI, %_ASM_DI
#ifdef CONFIG_X86_64
- mov %r8, VCPU_R8 (%_ASM_AX)
- mov %r9, VCPU_R9 (%_ASM_AX)
- mov %r10, VCPU_R10(%_ASM_AX)
- mov %r11, VCPU_R11(%_ASM_AX)
- mov %r12, VCPU_R12(%_ASM_AX)
- mov %r13, VCPU_R13(%_ASM_AX)
- mov %r14, VCPU_R14(%_ASM_AX)
- mov %r15, VCPU_R15(%_ASM_AX)
+ STORE_REGS %_ASM_AX, SVM_vcpu_arch_regs, \
+ %r8, %r9, %r10, %r11, %r12, %r13, %r14, %r15
#endif
/* @svm can stay in RDI from now on. */
@@ -194,21 +161,9 @@ SYM_FUNC_START(__svm_vcpu_run)
* free. RSP and RAX are exempt as they are restored by hardware
* during VM-Exit.
*/
- xor %ecx, %ecx
- xor %edx, %edx
- xor %ebx, %ebx
- xor %ebp, %ebp
- xor %esi, %esi
- xor %edi, %edi
+ CLEAR_REGS %ecx, %edx, %ebx, %ebp, %esi, %edi
#ifdef CONFIG_X86_64
- xor %r8d, %r8d
- xor %r9d, %r9d
- xor %r10d, %r10d
- xor %r11d, %r11d
- xor %r12d, %r12d
- xor %r13d, %r13d
- xor %r14d, %r14d
- xor %r15d, %r15d
+ CLEAR_REGS %r8d, %r9d, %r10d, %r11d, %r12d, %r13d, %r14d, %r15d
#endif
/* "Pop" @enter_flags. */
--
2.51.0
next prev parent reply other threads:[~2026-05-12 1:40 UTC|newest]
Thread overview: 22+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-05-12 1:14 [PATCH v4 00/21] KVM: x86: Enable APX for guests Chang S. Bae
2026-05-12 1:14 ` [PATCH v4 01/21] KVM: VMX: Macrofy GPR swapping in __vmx_vcpu_run() Chang S. Bae
2026-05-12 1:14 ` Chang S. Bae [this message]
2026-05-12 1:14 ` [PATCH v4 03/21] KVM: SEV: Macrofy GPR swapping in __svm_sev_es_vcpu_run() Chang S. Bae
2026-05-12 1:14 ` [PATCH v4 04/21] KVM: x86: Extend VCPU registers for EGPRs Chang S. Bae
2026-05-12 1:14 ` [PATCH v4 05/21] KVM: VMX: Save guest EGPRs in VCPU cache Chang S. Bae
2026-05-12 1:14 ` [PATCH v4 06/21] x86/fpu: Ignore APX when copying from/to guest FPU Chang S. Bae
2026-05-12 1:14 ` [PATCH v4 07/21] KVM: x86: Support APX state for XSAVE ABI Chang S. Bae
2026-05-12 1:14 ` [PATCH v4 08/21] KVM: VMX: Refactor VMX instruction information access Chang S. Bae
2026-05-12 1:14 ` [PATCH v4 09/21] KVM: VMX: Refactor instruction information decoding Chang S. Bae
2026-05-12 1:14 ` [PATCH v4 10/21] KVM: VMX: Refactor register index retrieval from exit qualification Chang S. Bae
2026-05-12 1:14 ` [PATCH v4 11/21] KVM: VMX: Support instruction information extension Chang S. Bae
2026-05-12 1:14 ` [PATCH v4 12/21] KVM: nVMX: Propagate the extended instruction info field Chang S. Bae
2026-05-12 1:14 ` [PATCH v4 13/21] KVM: x86: Support EGPR accessing and tracking for emulator Chang S. Bae
2026-05-12 1:14 ` [PATCH v4 14/21] KVM: x86: Handle EGPR index and REX2-incompatible opcodes Chang S. Bae
2026-05-12 1:14 ` [PATCH v4 15/21] KVM: x86: Support REX2-prefixed opcode decode Chang S. Bae
2026-05-12 1:14 ` [PATCH v4 16/21] KVM: x86: Reject EVEX-prefixed instructions Chang S. Bae
2026-05-12 1:14 ` [PATCH v4 17/21] KVM: x86: Guard valid XCR0.APX settings Chang S. Bae
2026-05-12 1:14 ` [PATCH v4 18/21] KVM: x86: Expose APX foundation feature to guests Chang S. Bae
2026-05-12 1:15 ` [PATCH v4 19/21] KVM: x86: Expose APX sub-features " Chang S. Bae
2026-05-12 1:15 ` [PATCH v4 20/21] KVM: x86: selftests: Add APX state and ABI test Chang S. Bae
2026-05-12 1:15 ` [PATCH v4 21/21] KVM: x86: selftests: Add APX state handling and XCR0 sanity checks Chang S. Bae
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260512011502.53072-3-chang.seok.bae@intel.com \
--to=chang.seok.bae@intel.com \
--cc=chao.gao@intel.com \
--cc=kvm@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=pbonzini@redhat.com \
--cc=seanjc@google.com \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox