public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
From: "Chang S. Bae" <chang.seok.bae@intel.com>
To: pbonzini@redhat.com, seanjc@google.com
Cc: kvm@vger.kernel.org, linux-kernel@vger.kernel.org,
	chao.gao@intel.com, chang.seok.bae@intel.com
Subject: [PATCH v3 01/20] KVM: VMX: Macrofy 64-bit GPR swapping in __vmx_vcpu_run()
Date: Tue, 28 Apr 2026 05:00:52 +0000	[thread overview]
Message-ID: <20260428050111.39323-2-chang.seok.bae@intel.com> (raw)
In-Reply-To: <20260428050111.39323-1-chang.seok.bae@intel.com>

Simplify repeated register saving/restoring sequences with macros to make
it easier to extend for additional registers.

On x86_64, R8-R15 register naming indicates each register ID, and the ID
itself can derive an offset in struct kvm_vcpu_arch. Leverage this fact
to generalize save/restore sequences in macros, removing the need for
defining offsets separately. Apply this approach to register clearing
sequences as well.

No functional change intended.

Signed-off-by: Chang S. Bae <chang.seok.bae@intel.com>
---
V2 -> V3: New patch

Dependency: Based on Paolo's SPEC_CTRL rework [1]

Test:
* Broad KVM testing should catch regressions.
* The following KVM unit test on top of this patch [2] can validate the
  changes more explicitly:

    TEST_MOV_GPRS(8, 15);
    TEST_MOV_GPRS(9, 14);
    ...

[1]: https://lore.kernel.org/20260427105848.44865-1-pbonzini@redhat.com/
[2]: https://lore.kernel.org/20260420212355.507827-1-chang.seok.bae@intel.com/
---
 arch/x86/kvm/svm/vmenter.S |  3 ---
 arch/x86/kvm/vmenter.h     | 22 ++++++++++++++++++++
 arch/x86/kvm/vmx/vmenter.S | 41 +++-----------------------------------
 3 files changed, 25 insertions(+), 41 deletions(-)

diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S
index f523d9e49839..358236557454 100644
--- a/arch/x86/kvm/svm/vmenter.S
+++ b/arch/x86/kvm/svm/vmenter.S
@@ -6,11 +6,8 @@
 #include <asm/frame.h>
 #include <asm/kvm_vcpu_regs.h>
 #include <asm/nospec-branch.h>
-#include "kvm-asm-offsets.h"
 #include "vmenter.h"
 
-#define WORD_SIZE (BITS_PER_LONG / 8)
-
 /* Intentionally omit RAX as it's context switched by hardware */
 #define VCPU_RCX	(SVM_vcpu_arch_regs + __VCPU_REGS_RCX * WORD_SIZE)
 #define VCPU_RDX	(SVM_vcpu_arch_regs + __VCPU_REGS_RDX * WORD_SIZE)
diff --git a/arch/x86/kvm/vmenter.h b/arch/x86/kvm/vmenter.h
index 93db912eef44..03e4067c188b 100644
--- a/arch/x86/kvm/vmenter.h
+++ b/arch/x86/kvm/vmenter.h
@@ -2,6 +2,8 @@
 #ifndef __KVM_X86_VMENTER_H
 #define __KVM_X86_VMENTER_H
 
+#include "kvm-asm-offsets.h"
+
 #define KVM_ENTER_VMRESUME			BIT(0)
 #define KVM_ENTER_SAVE_SPEC_CTRL		BIT(1)
 #define KVM_ENTER_CLEAR_CPU_BUFFERS_FOR_MMIO	BIT(2)
@@ -76,5 +78,25 @@
 	wrmsr
 .endm
 
+#define WORD_SIZE (BITS_PER_LONG / 8)
+
+#ifdef CONFIG_X86_64
+.macro CLEAR_REGS regs:vararg
+ .irp i, \regs
+	xor %r\i, %r\i
+ .endr
+.endm
+.macro VMX_LOAD_REGS src:req, regs:vararg
+ .irp i, \regs
+	mov (VMX_vcpu_arch_regs + \i * WORD_SIZE)(\src), %r\i
+ .endr
+.endm
+.macro VMX_STORE_REGS dst:req, regs:vararg
+ .irp i, \regs
+	mov %r\i, (VMX_vcpu_arch_regs + \i * WORD_SIZE)(\dst)
+ .endr
+.endm
+#endif
+
 #endif /* __ASSEMBLER__ */
 #endif /* __KVM_X86_ENTER_FLAGS_H */
diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
index 294407dfc24f..f8420b3a2741 100644
--- a/arch/x86/kvm/vmx/vmenter.S
+++ b/arch/x86/kvm/vmx/vmenter.S
@@ -6,11 +6,8 @@
 #include <asm/nospec-branch.h>
 #include <asm/percpu.h>
 #include <asm/segment.h>
-#include "kvm-asm-offsets.h"
 #include "vmenter.h"
 
-#define WORD_SIZE (BITS_PER_LONG / 8)
-
 #define VCPU_RAX	(VMX_vcpu_arch_regs + __VCPU_REGS_RAX * WORD_SIZE)
 #define VCPU_RCX	(VMX_vcpu_arch_regs + __VCPU_REGS_RCX * WORD_SIZE)
 #define VCPU_RDX	(VMX_vcpu_arch_regs + __VCPU_REGS_RDX * WORD_SIZE)
@@ -20,17 +17,6 @@
 #define VCPU_RSI	(VMX_vcpu_arch_regs + __VCPU_REGS_RSI * WORD_SIZE)
 #define VCPU_RDI	(VMX_vcpu_arch_regs + __VCPU_REGS_RDI * WORD_SIZE)
 
-#ifdef CONFIG_X86_64
-#define VCPU_R8		(VMX_vcpu_arch_regs + __VCPU_REGS_R8  * WORD_SIZE)
-#define VCPU_R9		(VMX_vcpu_arch_regs + __VCPU_REGS_R9  * WORD_SIZE)
-#define VCPU_R10	(VMX_vcpu_arch_regs + __VCPU_REGS_R10 * WORD_SIZE)
-#define VCPU_R11	(VMX_vcpu_arch_regs + __VCPU_REGS_R11 * WORD_SIZE)
-#define VCPU_R12	(VMX_vcpu_arch_regs + __VCPU_REGS_R12 * WORD_SIZE)
-#define VCPU_R13	(VMX_vcpu_arch_regs + __VCPU_REGS_R13 * WORD_SIZE)
-#define VCPU_R14	(VMX_vcpu_arch_regs + __VCPU_REGS_R14 * WORD_SIZE)
-#define VCPU_R15	(VMX_vcpu_arch_regs + __VCPU_REGS_R15 * WORD_SIZE)
-#endif
-
 .macro VMX_DO_EVENT_IRQOFF call_insn call_target
 	/*
 	 * Unconditionally create a stack frame, getting the correct RSP on the
@@ -122,14 +108,7 @@ SYM_FUNC_START(__vmx_vcpu_run)
 	mov VCPU_RBP(%_ASM_DI), %_ASM_BP
 	mov VCPU_RSI(%_ASM_DI), %_ASM_SI
 #ifdef CONFIG_X86_64
-	mov VCPU_R8 (%_ASM_DI),  %r8
-	mov VCPU_R9 (%_ASM_DI),  %r9
-	mov VCPU_R10(%_ASM_DI), %r10
-	mov VCPU_R11(%_ASM_DI), %r11
-	mov VCPU_R12(%_ASM_DI), %r12
-	mov VCPU_R13(%_ASM_DI), %r13
-	mov VCPU_R14(%_ASM_DI), %r14
-	mov VCPU_R15(%_ASM_DI), %r15
+	VMX_LOAD_REGS %_ASM_DI, 8,9,10,11,12,13,14,15
 #endif
 	/* Load guest RDI.  This kills the @vmx pointer! */
 	mov VCPU_RDI(%_ASM_DI), %_ASM_DI
@@ -196,14 +175,7 @@ SYM_INNER_LABEL_ALIGN(vmx_vmexit, SYM_L_GLOBAL)
 	mov %_ASM_SI, VCPU_RSI(%_ASM_DI)
 	pop           VCPU_RDI(%_ASM_DI)
 #ifdef CONFIG_X86_64
-	mov %r8,  VCPU_R8 (%_ASM_DI)
-	mov %r9,  VCPU_R9 (%_ASM_DI)
-	mov %r10, VCPU_R10(%_ASM_DI)
-	mov %r11, VCPU_R11(%_ASM_DI)
-	mov %r12, VCPU_R12(%_ASM_DI)
-	mov %r13, VCPU_R13(%_ASM_DI)
-	mov %r14, VCPU_R14(%_ASM_DI)
-	mov %r15, VCPU_R15(%_ASM_DI)
+	VMX_STORE_REGS %_ASM_DI, 8,9,10,11,12,13,14,15
 #endif
 
 	/* Clear return value to indicate VM-Exit (as opposed to VM-Fail). */
@@ -227,14 +199,7 @@ SYM_INNER_LABEL_ALIGN(vmx_vmexit, SYM_L_GLOBAL)
 	xor %esi, %esi
 	xor %edi, %edi
 #ifdef CONFIG_X86_64
-	xor %r8d,  %r8d
-	xor %r9d,  %r9d
-	xor %r10d, %r10d
-	xor %r11d, %r11d
-	xor %r12d, %r12d
-	xor %r13d, %r13d
-	xor %r14d, %r14d
-	xor %r15d, %r15d
+	CLEAR_REGS 8d,9d,10d,11d,12d,13d,14d,15d
 #endif
 
 	/*
-- 
2.51.0


  reply	other threads:[~2026-04-28  5:26 UTC|newest]

Thread overview: 27+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-04-28  5:00 [PATCH v3 00/20] KVM: x86: Enable APX for guests Chang S. Bae
2026-04-28  5:00 ` Chang S. Bae [this message]
2026-04-28  9:03   ` [PATCH v3 01/20] KVM: VMX: Macrofy 64-bit GPR swapping in __vmx_vcpu_run() Paolo Bonzini
2026-04-28 20:12     ` Chang S. Bae
2026-04-29  7:50       ` Paolo Bonzini
2026-04-29 17:22         ` Chang S. Bae
2026-04-28  9:09   ` Paolo Bonzini
2026-04-28  5:00 ` [PATCH v3 02/20] KVM: SVM: Macrofy 64-bit GPR swapping in __svm_vcpu_run() Chang S. Bae
2026-04-28  5:00 ` [PATCH v3 03/20] KVM: SEV: Macrofy 64-bit GPR swapping in __svm_sev_es_vcpu_run() Chang S. Bae
2026-04-28  5:00 ` [PATCH v3 04/20] KVM: x86: Extend VCPU registers for EGPRs Chang S. Bae
2026-04-28  5:00 ` [PATCH v3 05/20] KVM: VMX: Save guest EGPRs in VCPU cache Chang S. Bae
2026-04-28  5:00 ` [PATCH v3 06/20] KVM: x86: Support APX state for XSAVE ABI Chang S. Bae
2026-04-28  9:31   ` Paolo Bonzini
2026-04-28  5:00 ` [PATCH v3 07/20] KVM: VMX: Refactor VMX instruction information access Chang S. Bae
2026-04-28  5:00 ` [PATCH v3 08/20] KVM: VMX: Refactor instruction information decoding Chang S. Bae
2026-04-28  5:01 ` [PATCH v3 09/20] KVM: VMX: Refactor register index retrieval from exit qualification Chang S. Bae
2026-04-28  5:01 ` [PATCH v3 10/20] KVM: VMX: Support instruction information extension Chang S. Bae
2026-04-28  5:01 ` [PATCH v3 11/20] KVM: nVMX: Propagate the extended instruction info field Chang S. Bae
2026-04-28  5:01 ` [PATCH v3 12/20] KVM: x86: Support EGPR accessing and tracking for emulator Chang S. Bae
2026-04-28  5:01 ` [PATCH v3 13/20] KVM: x86: Handle EGPR index and REX2-incompatible opcodes Chang S. Bae
2026-04-28  5:01 ` [PATCH v3 14/20] KVM: x86: Support REX2-prefixed opcode decode Chang S. Bae
2026-04-28  5:01 ` [PATCH v3 15/20] KVM: x86: Reject EVEX-prefixed instructions Chang S. Bae
2026-04-28  5:01 ` [PATCH v3 16/20] KVM: x86: Guard valid XCR0.APX settings Chang S. Bae
2026-04-28  5:01 ` [PATCH v3 17/20] KVM: x86: Expose APX foundation feature to guests Chang S. Bae
2026-04-28  5:01 ` [PATCH v3 18/20] KVM: x86: Expose APX sub-features " Chang S. Bae
2026-04-28  5:01 ` [PATCH v3 19/20] KVM: x86: selftests: Add APX state and ABI test Chang S. Bae
2026-04-28  5:01 ` [PATCH v3 20/20] KVM: x86: selftests: Add APX state handling and XCR0 sanity checks Chang S. Bae

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20260428050111.39323-2-chang.seok.bae@intel.com \
    --to=chang.seok.bae@intel.com \
    --cc=chao.gao@intel.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=pbonzini@redhat.com \
    --cc=seanjc@google.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox