* [PATCH] KVM: SVM: Ensure SPEC_CTRL[63:32] is context switched between guest and host
@ 2025-11-06 1:13 Sean Christopherson
2025-11-06 18:21 ` Uros Bizjak
0 siblings, 1 reply; 3+ messages in thread
From: Sean Christopherson @ 2025-11-06 1:13 UTC (permalink / raw)
To: Sean Christopherson, Paolo Bonzini; +Cc: kvm, linux-kernel, Uros Bizjak
From: Uros Bizjak <ubizjak@gmail.com>
SPEC_CTRL is an MSR, i.e. a 64-bit value, but the VMRUN assembly code
assumes bits 63:32 are always zero. The bug is _currently_ benign because
neither KVM nor the kernel support setting any of bits 63:32, but it's
still a bug that needs to be fixed.
Signed-off-by: Uros Bizjak <ubizjak@gmail.com>
Suggested-by: Sean Christopherson <seanjc@google.com>
Co-developed-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
---
arch/x86/kvm/svm/vmenter.S | 44 +++++++++++++++++++++++++++++++-------
1 file changed, 36 insertions(+), 8 deletions(-)
diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S
index 235c4af6b692..53f45f5b611f 100644
--- a/arch/x86/kvm/svm/vmenter.S
+++ b/arch/x86/kvm/svm/vmenter.S
@@ -52,11 +52,23 @@
* there must not be any returns or indirect branches between this code
* and vmentry.
*/
- movl SVM_spec_ctrl(%_ASM_DI), %eax
- cmp PER_CPU_VAR(x86_spec_ctrl_current), %eax
+#ifdef CONFIG_X86_64
+ mov SVM_spec_ctrl(%rdi), %rdx
+ cmp PER_CPU_VAR(x86_spec_ctrl_current), %rdx
je 801b
+ movl %edx, %eax
+ shr $32, %rdx
+#else
+ mov SVM_spec_ctrl(%edi), %eax
+ mov PER_CPU_VAR(x86_spec_ctrl_current), %ecx
+ xor %eax, %ecx
+ mov SVM_spec_ctrl + 4(%edi), %edx
+ mov PER_CPU_VAR(x86_spec_ctrl_current + 4), %esi
+ xor %edx, %esi
+ or %esi, %ecx
+ je 801b
+#endif
mov $MSR_IA32_SPEC_CTRL, %ecx
- xor %edx, %edx
wrmsr
jmp 801b
.endm
@@ -81,13 +93,26 @@
jnz 998f
rdmsr
movl %eax, SVM_spec_ctrl(%_ASM_DI)
+ movl %edx, SVM_spec_ctrl + 4(%_ASM_DI)
998:
-
/* Now restore the host value of the MSR if different from the guest's. */
- movl PER_CPU_VAR(x86_spec_ctrl_current), %eax
- cmp SVM_spec_ctrl(%_ASM_DI), %eax
+#ifdef CONFIG_X86_64
+ mov SVM_spec_ctrl(%rdi), %rdx
+ cmp PER_CPU_VAR(x86_spec_ctrl_current), %rdx
je 901b
- xor %edx, %edx
+ mov PER_CPU_VAR(x86_spec_ctrl_current), %rdx
+ movl %edx, %eax
+ shr $32, %rdx
+#else
+ mov SVM_spec_ctrl(%edi), %esi
+ mov PER_CPU_VAR(x86_spec_ctrl_current), %eax
+ xor %eax, %esi
+ mov SVM_spec_ctrl + 4(%edi), %edi
+ mov PER_CPU_VAR(x86_spec_ctrl_current + 4), %edx
+ xor %edx, %edi
+ or %edi, %esi
+ je 901b
+#endif
wrmsr
jmp 901b
.endm
@@ -211,7 +236,10 @@ SYM_FUNC_START(__svm_vcpu_run)
/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_VMEXIT
- /* Clobbers RAX, RCX, RDX. */
+ /*
+ * Clobbers RAX, RCX, RDX (and EDI on 32-bit), consumes RDI (@svm) and
+ * RSP (pointer to @spec_ctrl_intercepted).
+ */
RESTORE_HOST_SPEC_CTRL
/*
base-commit: a996dd2a5e1ec54dcf7d7b93915ea3f97e14e68a
--
2.51.2.1041.gc1ab5b90ca-goog
^ permalink raw reply related [flat|nested] 3+ messages in thread
* Re: [PATCH] KVM: SVM: Ensure SPEC_CTRL[63:32] is context switched between guest and host
2025-11-06 1:13 [PATCH] KVM: SVM: Ensure SPEC_CTRL[63:32] is context switched between guest and host Sean Christopherson
@ 2025-11-06 18:21 ` Uros Bizjak
2025-11-06 18:32 ` Sean Christopherson
0 siblings, 1 reply; 3+ messages in thread
From: Uros Bizjak @ 2025-11-06 18:21 UTC (permalink / raw)
To: Sean Christopherson; +Cc: Paolo Bonzini, kvm, linux-kernel
On Thu, Nov 6, 2025 at 2:13 AM Sean Christopherson <seanjc@google.com> wrote:
>
> From: Uros Bizjak <ubizjak@gmail.com>
>
> SPEC_CTRL is an MSR, i.e. a 64-bit value, but the VMRUN assembly code
> assumes bits 63:32 are always zero. The bug is _currently_ benign because
> neither KVM nor the kernel support setting any of bits 63:32, but it's
> still a bug that needs to be fixed.
>
> Signed-off-by: Uros Bizjak <ubizjak@gmail.com>
> Suggested-by: Sean Christopherson <seanjc@google.com>
> Co-developed-by: Sean Christopherson <seanjc@google.com>
> Signed-off-by: Sean Christopherson <seanjc@google.com>
> ---
> arch/x86/kvm/svm/vmenter.S | 44 +++++++++++++++++++++++++++++++-------
> 1 file changed, 36 insertions(+), 8 deletions(-)
>
> diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S
> index 235c4af6b692..53f45f5b611f 100644
> --- a/arch/x86/kvm/svm/vmenter.S
> +++ b/arch/x86/kvm/svm/vmenter.S
> @@ -52,11 +52,23 @@
> * there must not be any returns or indirect branches between this code
> * and vmentry.
> */
> - movl SVM_spec_ctrl(%_ASM_DI), %eax
> - cmp PER_CPU_VAR(x86_spec_ctrl_current), %eax
> +#ifdef CONFIG_X86_64
> + mov SVM_spec_ctrl(%rdi), %rdx
> + cmp PER_CPU_VAR(x86_spec_ctrl_current), %rdx
> je 801b
> + movl %edx, %eax
> + shr $32, %rdx
> +#else
> + mov SVM_spec_ctrl(%edi), %eax
> + mov PER_CPU_VAR(x86_spec_ctrl_current), %ecx
> + xor %eax, %ecx
> + mov SVM_spec_ctrl + 4(%edi), %edx
> + mov PER_CPU_VAR(x86_spec_ctrl_current + 4), %esi
> + xor %edx, %esi
> + or %esi, %ecx
> + je 801b
> +#endif
> mov $MSR_IA32_SPEC_CTRL, %ecx
> - xor %edx, %edx
> wrmsr
> jmp 801b
> .endm
> @@ -81,13 +93,26 @@
> jnz 998f
> rdmsr
> movl %eax, SVM_spec_ctrl(%_ASM_DI)
> + movl %edx, SVM_spec_ctrl + 4(%_ASM_DI)
> 998:
> -
> /* Now restore the host value of the MSR if different from the guest's. */
> - movl PER_CPU_VAR(x86_spec_ctrl_current), %eax
> - cmp SVM_spec_ctrl(%_ASM_DI), %eax
> +#ifdef CONFIG_X86_64
> + mov SVM_spec_ctrl(%rdi), %rdx
> + cmp PER_CPU_VAR(x86_spec_ctrl_current), %rdx
> je 901b
> - xor %edx, %edx
> + mov PER_CPU_VAR(x86_spec_ctrl_current), %rdx
> + movl %edx, %eax
> + shr $32, %rdx
The above code can be written as:
mov PER_CPU_VAR(x86_spec_ctrl_current), %rdx
cmp SVM_spec_ctrl(%rdi), %rdx
je 901b
movl %edx, %eax
shr $32, %rdx
The improved code will save a memory read from x86_spec_ctrl_current.
> +#else
> + mov SVM_spec_ctrl(%edi), %esi
> + mov PER_CPU_VAR(x86_spec_ctrl_current), %eax
Can the above two instructions be swapped, just to be consistent with
x86_64 code?
> + xor %eax, %esi
> + mov SVM_spec_ctrl + 4(%edi), %edi
> + mov PER_CPU_VAR(x86_spec_ctrl_current + 4), %edx
... and the above two insns.
Uros.
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [PATCH] KVM: SVM: Ensure SPEC_CTRL[63:32] is context switched between guest and host
2025-11-06 18:21 ` Uros Bizjak
@ 2025-11-06 18:32 ` Sean Christopherson
0 siblings, 0 replies; 3+ messages in thread
From: Sean Christopherson @ 2025-11-06 18:32 UTC (permalink / raw)
To: Uros Bizjak; +Cc: Paolo Bonzini, kvm, linux-kernel
On Thu, Nov 06, 2025, Uros Bizjak wrote:
> On Thu, Nov 6, 2025 at 2:13 AM Sean Christopherson <seanjc@google.com> wrote:
> > 998:
> > -
> > /* Now restore the host value of the MSR if different from the guest's. */
> > - movl PER_CPU_VAR(x86_spec_ctrl_current), %eax
> > - cmp SVM_spec_ctrl(%_ASM_DI), %eax
> > +#ifdef CONFIG_X86_64
> > + mov SVM_spec_ctrl(%rdi), %rdx
> > + cmp PER_CPU_VAR(x86_spec_ctrl_current), %rdx
> > je 901b
> > - xor %edx, %edx
> > + mov PER_CPU_VAR(x86_spec_ctrl_current), %rdx
> > + movl %edx, %eax
> > + shr $32, %rdx
>
> The above code can be written as:
>
> mov PER_CPU_VAR(x86_spec_ctrl_current), %rdx
> cmp SVM_spec_ctrl(%rdi), %rdx
Gah, that's obvious in hindsight.
> je 901b
> movl %edx, %eax
> shr $32, %rdx
>
> The improved code will save a memory read from x86_spec_ctrl_current.
>
> > +#else
> > + mov SVM_spec_ctrl(%edi), %esi
> > + mov PER_CPU_VAR(x86_spec_ctrl_current), %eax
>
> Can the above two instructions be swapped, just to be consistent with
> x86_64 code?
>
> > + xor %eax, %esi
>
> > + mov SVM_spec_ctrl + 4(%edi), %edi
> > + mov PER_CPU_VAR(x86_spec_ctrl_current + 4), %edx
>
> ... and the above two insns.
Ya, will do. Thanks!
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2025-11-06 18:32 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-11-06 1:13 [PATCH] KVM: SVM: Ensure SPEC_CTRL[63:32] is context switched between guest and host Sean Christopherson
2025-11-06 18:21 ` Uros Bizjak
2025-11-06 18:32 ` Sean Christopherson
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox