* [PATCH] KVM: VMX: Micro-optimize SPEC_CTRL handling in __vmx_vcpu_run()
@ 2025-08-07 6:36 Uros Bizjak
2025-08-19 14:59 ` Sean Christopherson
0 siblings, 1 reply; 6+ messages in thread
From: Uros Bizjak @ 2025-08-07 6:36 UTC (permalink / raw)
To: kvm, x86, linux-kernel
Cc: Uros Bizjak, Sean Christopherson, Paolo Bonzini, Thomas Gleixner,
Ingo Molnar, Borislav Petkov, Dave Hansen, H. Peter Anvin
Use memory operand in CMP instruction to avoid usage of a
temporary register. Use %eax register to hold VMX_spec_ctrl
and use it directly in the follow-up WRMSR.
The new code saves a few bytes by removing two MOV insns, from:
2d: 48 8b 7c 24 10 mov 0x10(%rsp),%rdi
32: 8b bf 48 18 00 00 mov 0x1848(%rdi),%edi
38: 65 8b 35 00 00 00 00 mov %gs:0x0(%rip),%esi
3f: 39 fe cmp %edi,%esi
41: 74 0b je 4e <...>
43: b9 48 00 00 00 mov $0x48,%ecx
48: 31 d2 xor %edx,%edx
4a: 89 f8 mov %edi,%eax
4c: 0f 30 wrmsr
to:
2d: 48 8b 7c 24 10 mov 0x10(%rsp),%rdi
32: 8b 87 48 18 00 00 mov 0x1848(%rdi),%eax
38: 65 3b 05 00 00 00 00 cmp %gs:0x0(%rip),%eax
3f: 74 09 je 4a <...>
41: b9 48 00 00 00 mov $0x48,%ecx
46: 31 d2 xor %edx,%edx
48: 0f 30 wrmsr
No functional change intended.
Signed-off-by: Uros Bizjak <ubizjak@gmail.com>
Cc: Sean Christopherson <seanjc@google.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
---
arch/x86/kvm/vmx/vmenter.S | 6 ++----
1 file changed, 2 insertions(+), 4 deletions(-)
diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
index 0a6cf5bff2aa..c65de5de92ab 100644
--- a/arch/x86/kvm/vmx/vmenter.S
+++ b/arch/x86/kvm/vmx/vmenter.S
@@ -118,13 +118,11 @@ SYM_FUNC_START(__vmx_vcpu_run)
* and vmentry.
*/
mov 2*WORD_SIZE(%_ASM_SP), %_ASM_DI
- movl VMX_spec_ctrl(%_ASM_DI), %edi
- movl PER_CPU_VAR(x86_spec_ctrl_current), %esi
- cmp %edi, %esi
+ movl VMX_spec_ctrl(%_ASM_DI), %eax
+ cmp PER_CPU_VAR(x86_spec_ctrl_current), %eax
je .Lspec_ctrl_done
mov $MSR_IA32_SPEC_CTRL, %ecx
xor %edx, %edx
- mov %edi, %eax
wrmsr
.Lspec_ctrl_done:
--
2.50.1
^ permalink raw reply related [flat|nested] 6+ messages in thread
* Re: [PATCH] KVM: VMX: Micro-optimize SPEC_CTRL handling in __vmx_vcpu_run()
2025-08-07 6:36 [PATCH] KVM: VMX: Micro-optimize SPEC_CTRL handling in __vmx_vcpu_run() Uros Bizjak
@ 2025-08-19 14:59 ` Sean Christopherson
2025-08-19 16:24 ` Uros Bizjak
0 siblings, 1 reply; 6+ messages in thread
From: Sean Christopherson @ 2025-08-19 14:59 UTC (permalink / raw)
To: Uros Bizjak
Cc: kvm, x86, linux-kernel, Paolo Bonzini, Thomas Gleixner,
Ingo Molnar, Borislav Petkov, Dave Hansen, H. Peter Anvin
On Thu, Aug 07, 2025, Uros Bizjak wrote:
> Use memory operand in CMP instruction to avoid usage of a
> temporary register. Use %eax register to hold VMX_spec_ctrl
> and use it directly in the follow-up WRMSR.
>
> The new code saves a few bytes by removing two MOV insns, from:
>
> 2d: 48 8b 7c 24 10 mov 0x10(%rsp),%rdi
> 32: 8b bf 48 18 00 00 mov 0x1848(%rdi),%edi
> 38: 65 8b 35 00 00 00 00 mov %gs:0x0(%rip),%esi
> 3f: 39 fe cmp %edi,%esi
> 41: 74 0b je 4e <...>
> 43: b9 48 00 00 00 mov $0x48,%ecx
> 48: 31 d2 xor %edx,%edx
> 4a: 89 f8 mov %edi,%eax
> 4c: 0f 30 wrmsr
>
> to:
>
> 2d: 48 8b 7c 24 10 mov 0x10(%rsp),%rdi
> 32: 8b 87 48 18 00 00 mov 0x1848(%rdi),%eax
> 38: 65 3b 05 00 00 00 00 cmp %gs:0x0(%rip),%eax
> 3f: 74 09 je 4a <...>
> 41: b9 48 00 00 00 mov $0x48,%ecx
> 46: 31 d2 xor %edx,%edx
> 48: 0f 30 wrmsr
>
> No functional change intended.
>
> Signed-off-by: Uros Bizjak <ubizjak@gmail.com>
> Cc: Sean Christopherson <seanjc@google.com>
> Cc: Paolo Bonzini <pbonzini@redhat.com>
> Cc: Thomas Gleixner <tglx@linutronix.de>
> Cc: Ingo Molnar <mingo@kernel.org>
> Cc: Borislav Petkov <bp@alien8.de>
> Cc: Dave Hansen <dave.hansen@linux.intel.com>
> Cc: "H. Peter Anvin" <hpa@zytor.com>
> ---
> arch/x86/kvm/vmx/vmenter.S | 6 ++----
> 1 file changed, 2 insertions(+), 4 deletions(-)
>
> diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
> index 0a6cf5bff2aa..c65de5de92ab 100644
> --- a/arch/x86/kvm/vmx/vmenter.S
> +++ b/arch/x86/kvm/vmx/vmenter.S
> @@ -118,13 +118,11 @@ SYM_FUNC_START(__vmx_vcpu_run)
> * and vmentry.
> */
> mov 2*WORD_SIZE(%_ASM_SP), %_ASM_DI
> - movl VMX_spec_ctrl(%_ASM_DI), %edi
> - movl PER_CPU_VAR(x86_spec_ctrl_current), %esi
> - cmp %edi, %esi
> + movl VMX_spec_ctrl(%_ASM_DI), %eax
> + cmp PER_CPU_VAR(x86_spec_ctrl_current), %eax
Huh. There's a pre-existing bug lurking here, and in the SVM code. SPEC_CTRL
is an MSR, i.e. a 64-bit value, but the assembly code assumes bits 63:32 are always
zero.
So, while this patch looks good, I'm leaning toward skipping it and going straight
to a fix.
> je .Lspec_ctrl_done
> mov $MSR_IA32_SPEC_CTRL, %ecx
> xor %edx, %edx
> - mov %edi, %eax
> wrmsr
>
> .Lspec_ctrl_done:
> --
> 2.50.1
>
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH] KVM: VMX: Micro-optimize SPEC_CTRL handling in __vmx_vcpu_run()
2025-08-19 14:59 ` Sean Christopherson
@ 2025-08-19 16:24 ` Uros Bizjak
2025-08-19 18:56 ` Sean Christopherson
0 siblings, 1 reply; 6+ messages in thread
From: Uros Bizjak @ 2025-08-19 16:24 UTC (permalink / raw)
To: Sean Christopherson
Cc: kvm, x86, linux-kernel, Paolo Bonzini, Thomas Gleixner,
Ingo Molnar, Borislav Petkov, Dave Hansen, H. Peter Anvin
On Tue, Aug 19, 2025 at 5:00 PM Sean Christopherson <seanjc@google.com> wrote:
>
> On Thu, Aug 07, 2025, Uros Bizjak wrote:
> > Use memory operand in CMP instruction to avoid usage of a
> > temporary register. Use %eax register to hold VMX_spec_ctrl
> > and use it directly in the follow-up WRMSR.
> >
> > The new code saves a few bytes by removing two MOV insns, from:
> >
> > 2d: 48 8b 7c 24 10 mov 0x10(%rsp),%rdi
> > 32: 8b bf 48 18 00 00 mov 0x1848(%rdi),%edi
> > 38: 65 8b 35 00 00 00 00 mov %gs:0x0(%rip),%esi
> > 3f: 39 fe cmp %edi,%esi
> > 41: 74 0b je 4e <...>
> > 43: b9 48 00 00 00 mov $0x48,%ecx
> > 48: 31 d2 xor %edx,%edx
> > 4a: 89 f8 mov %edi,%eax
> > 4c: 0f 30 wrmsr
> >
> > to:
> >
> > 2d: 48 8b 7c 24 10 mov 0x10(%rsp),%rdi
> > 32: 8b 87 48 18 00 00 mov 0x1848(%rdi),%eax
> > 38: 65 3b 05 00 00 00 00 cmp %gs:0x0(%rip),%eax
> > 3f: 74 09 je 4a <...>
> > 41: b9 48 00 00 00 mov $0x48,%ecx
> > 46: 31 d2 xor %edx,%edx
> > 48: 0f 30 wrmsr
> >
> > No functional change intended.
> >
> > Signed-off-by: Uros Bizjak <ubizjak@gmail.com>
> > Cc: Sean Christopherson <seanjc@google.com>
> > Cc: Paolo Bonzini <pbonzini@redhat.com>
> > Cc: Thomas Gleixner <tglx@linutronix.de>
> > Cc: Ingo Molnar <mingo@kernel.org>
> > Cc: Borislav Petkov <bp@alien8.de>
> > Cc: Dave Hansen <dave.hansen@linux.intel.com>
> > Cc: "H. Peter Anvin" <hpa@zytor.com>
> > ---
> > arch/x86/kvm/vmx/vmenter.S | 6 ++----
> > 1 file changed, 2 insertions(+), 4 deletions(-)
> >
> > diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
> > index 0a6cf5bff2aa..c65de5de92ab 100644
> > --- a/arch/x86/kvm/vmx/vmenter.S
> > +++ b/arch/x86/kvm/vmx/vmenter.S
> > @@ -118,13 +118,11 @@ SYM_FUNC_START(__vmx_vcpu_run)
> > * and vmentry.
> > */
> > mov 2*WORD_SIZE(%_ASM_SP), %_ASM_DI
> > - movl VMX_spec_ctrl(%_ASM_DI), %edi
> > - movl PER_CPU_VAR(x86_spec_ctrl_current), %esi
> > - cmp %edi, %esi
> > + movl VMX_spec_ctrl(%_ASM_DI), %eax
> > + cmp PER_CPU_VAR(x86_spec_ctrl_current), %eax
>
> Huh. There's a pre-existing bug lurking here, and in the SVM code. SPEC_CTRL
> is an MSR, i.e. a 64-bit value, but the assembly code assumes bits 63:32 are always
> zero.
But MSBs are zero, MSR is defined in arch/x86/include/msr-index.h as:
#define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */
and "movl $..., %eax" zero-extends the value to full 64-bit width.
FWIW, MSR_IA32_SPEC_CTR is handled in the same way in arch/x86/entry/entry.S:
movl $MSR_IA32_PRED_CMD, %ecx
So, the insn is OK when MSR fits 32-bits. The insn is a bit smaller, too:
movl $0x01, %ecx
movq $0x01, %rcx
assembles to:
0: b9 01 00 00 00 mov $0x1,%ecx
5: 48 c7 c1 01 00 00 00 mov $0x1,%rcx
Rest assured that the assembler checks the immediate when 32-bit
registers are involved, e.g.:
mov.s: Assembler messages:
mov.s:1: Warning: 0xaaaaaaaaaa shortened to 0xaaaaaaaa
Uros.
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH] KVM: VMX: Micro-optimize SPEC_CTRL handling in __vmx_vcpu_run()
2025-08-19 16:24 ` Uros Bizjak
@ 2025-08-19 18:56 ` Sean Christopherson
2025-08-20 6:10 ` Uros Bizjak
0 siblings, 1 reply; 6+ messages in thread
From: Sean Christopherson @ 2025-08-19 18:56 UTC (permalink / raw)
To: Uros Bizjak
Cc: kvm, x86, linux-kernel, Paolo Bonzini, Thomas Gleixner,
Ingo Molnar, Borislav Petkov, Dave Hansen, H. Peter Anvin
On Tue, Aug 19, 2025, Uros Bizjak wrote:
> > > 2d: 48 8b 7c 24 10 mov 0x10(%rsp),%rdi
> > > 32: 8b 87 48 18 00 00 mov 0x1848(%rdi),%eax
> > > 38: 65 3b 05 00 00 00 00 cmp %gs:0x0(%rip),%eax
> > > 3f: 74 09 je 4a <...>
> > > 41: b9 48 00 00 00 mov $0x48,%ecx
> > > 46: 31 d2 xor %edx,%edx
> > > 48: 0f 30 wrmsr
> > >
> > > No functional change intended.
> > >
> > > Signed-off-by: Uros Bizjak <ubizjak@gmail.com>
> > > Cc: Sean Christopherson <seanjc@google.com>
> > > Cc: Paolo Bonzini <pbonzini@redhat.com>
> > > Cc: Thomas Gleixner <tglx@linutronix.de>
> > > Cc: Ingo Molnar <mingo@kernel.org>
> > > Cc: Borislav Petkov <bp@alien8.de>
> > > Cc: Dave Hansen <dave.hansen@linux.intel.com>
> > > Cc: "H. Peter Anvin" <hpa@zytor.com>
> > > ---
> > > arch/x86/kvm/vmx/vmenter.S | 6 ++----
> > > 1 file changed, 2 insertions(+), 4 deletions(-)
> > >
> > > diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
> > > index 0a6cf5bff2aa..c65de5de92ab 100644
> > > --- a/arch/x86/kvm/vmx/vmenter.S
> > > +++ b/arch/x86/kvm/vmx/vmenter.S
> > > @@ -118,13 +118,11 @@ SYM_FUNC_START(__vmx_vcpu_run)
> > > * and vmentry.
> > > */
> > > mov 2*WORD_SIZE(%_ASM_SP), %_ASM_DI
> > > - movl VMX_spec_ctrl(%_ASM_DI), %edi
> > > - movl PER_CPU_VAR(x86_spec_ctrl_current), %esi
> > > - cmp %edi, %esi
> > > + movl VMX_spec_ctrl(%_ASM_DI), %eax
> > > + cmp PER_CPU_VAR(x86_spec_ctrl_current), %eax
> >
> > Huh. There's a pre-existing bug lurking here, and in the SVM code. SPEC_CTRL
> > is an MSR, i.e. a 64-bit value, but the assembly code assumes bits 63:32 are always
> > zero.
>
> But MSBs are zero, MSR is defined in arch/x86/include/msr-index.h as:
>
> #define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */
>
> and "movl $..., %eax" zero-extends the value to full 64-bit width.
>
> FWIW, MSR_IA32_SPEC_CTR is handled in the same way in arch/x86/entry/entry.S:
>
> movl $MSR_IA32_PRED_CMD, %ecx
That's the MSR index, not the value. I'm pointing out that:
movl VMX_spec_ctrl(%_ASM_DI), %edi <== drops vmx->spec_ctrl[63:32]
movl PER_CPU_VAR(x86_spec_ctrl_current), %esi <== drop x86_spec_ctrl_current[63:32]
cmp %edi, %esi <== can get false negatives
je .Lspec_ctrl_done
mov $MSR_IA32_SPEC_CTRL, %ecx
xor %edx, %edx <== can clobber guest value
mov %edi, %eax
wrmsr
The bug is _currently_ benign because neither KVM nor the kernel support setting
any of bits 63:32, but it's still a bug that needs to be fixed.
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH] KVM: VMX: Micro-optimize SPEC_CTRL handling in __vmx_vcpu_run()
2025-08-19 18:56 ` Sean Christopherson
@ 2025-08-20 6:10 ` Uros Bizjak
2025-08-20 11:50 ` Uros Bizjak
0 siblings, 1 reply; 6+ messages in thread
From: Uros Bizjak @ 2025-08-20 6:10 UTC (permalink / raw)
To: Sean Christopherson
Cc: kvm, x86, linux-kernel, Paolo Bonzini, Thomas Gleixner,
Ingo Molnar, Borislav Petkov, Dave Hansen, H. Peter Anvin
On Tue, Aug 19, 2025 at 8:56 PM Sean Christopherson <seanjc@google.com> wrote:
>
> On Tue, Aug 19, 2025, Uros Bizjak wrote:
> > > > 2d: 48 8b 7c 24 10 mov 0x10(%rsp),%rdi
> > > > 32: 8b 87 48 18 00 00 mov 0x1848(%rdi),%eax
> > > > 38: 65 3b 05 00 00 00 00 cmp %gs:0x0(%rip),%eax
> > > > 3f: 74 09 je 4a <...>
> > > > 41: b9 48 00 00 00 mov $0x48,%ecx
> > > > 46: 31 d2 xor %edx,%edx
> > > > 48: 0f 30 wrmsr
> > > >
> > > > No functional change intended.
> > > >
> > > > Signed-off-by: Uros Bizjak <ubizjak@gmail.com>
> > > > Cc: Sean Christopherson <seanjc@google.com>
> > > > Cc: Paolo Bonzini <pbonzini@redhat.com>
> > > > Cc: Thomas Gleixner <tglx@linutronix.de>
> > > > Cc: Ingo Molnar <mingo@kernel.org>
> > > > Cc: Borislav Petkov <bp@alien8.de>
> > > > Cc: Dave Hansen <dave.hansen@linux.intel.com>
> > > > Cc: "H. Peter Anvin" <hpa@zytor.com>
> > > > ---
> > > > arch/x86/kvm/vmx/vmenter.S | 6 ++----
> > > > 1 file changed, 2 insertions(+), 4 deletions(-)
> > > >
> > > > diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
> > > > index 0a6cf5bff2aa..c65de5de92ab 100644
> > > > --- a/arch/x86/kvm/vmx/vmenter.S
> > > > +++ b/arch/x86/kvm/vmx/vmenter.S
> > > > @@ -118,13 +118,11 @@ SYM_FUNC_START(__vmx_vcpu_run)
> > > > * and vmentry.
> > > > */
> > > > mov 2*WORD_SIZE(%_ASM_SP), %_ASM_DI
> > > > - movl VMX_spec_ctrl(%_ASM_DI), %edi
> > > > - movl PER_CPU_VAR(x86_spec_ctrl_current), %esi
> > > > - cmp %edi, %esi
> > > > + movl VMX_spec_ctrl(%_ASM_DI), %eax
> > > > + cmp PER_CPU_VAR(x86_spec_ctrl_current), %eax
> > >
> > > Huh. There's a pre-existing bug lurking here, and in the SVM code. SPEC_CTRL
> > > is an MSR, i.e. a 64-bit value, but the assembly code assumes bits 63:32 are always
> > > zero.
> >
> > But MSBs are zero, MSR is defined in arch/x86/include/msr-index.h as:
> >
> > #define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */
> >
> > and "movl $..., %eax" zero-extends the value to full 64-bit width.
> >
> > FWIW, MSR_IA32_SPEC_CTR is handled in the same way in arch/x86/entry/entry.S:
> >
> > movl $MSR_IA32_PRED_CMD, %ecx
>
> That's the MSR index, not the value. I'm pointing out that:
>
> movl VMX_spec_ctrl(%_ASM_DI), %edi <== drops vmx->spec_ctrl[63:32]
> movl PER_CPU_VAR(x86_spec_ctrl_current), %esi <== drop x86_spec_ctrl_current[63:32]
> cmp %edi, %esi <== can get false negatives
> je .Lspec_ctrl_done
> mov $MSR_IA32_SPEC_CTRL, %ecx
> xor %edx, %edx <== can clobber guest value
> mov %edi, %eax
> wrmsr
>
> The bug is _currently_ benign because neither KVM nor the kernel support setting
> any of bits 63:32, but it's still a bug that needs to be fixed.
Oh, I see it. Let me try to fix it in a new patch.
Thanks,
Uros.
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH] KVM: VMX: Micro-optimize SPEC_CTRL handling in __vmx_vcpu_run()
2025-08-20 6:10 ` Uros Bizjak
@ 2025-08-20 11:50 ` Uros Bizjak
0 siblings, 0 replies; 6+ messages in thread
From: Uros Bizjak @ 2025-08-20 11:50 UTC (permalink / raw)
To: Sean Christopherson
Cc: kvm, x86, linux-kernel, Paolo Bonzini, Thomas Gleixner,
Ingo Molnar, Borislav Petkov, Dave Hansen, H. Peter Anvin
[-- Attachment #1: Type: text/plain, Size: 3654 bytes --]
On Wed, Aug 20, 2025 at 8:10 AM Uros Bizjak <ubizjak@gmail.com> wrote:
>
> On Tue, Aug 19, 2025 at 8:56 PM Sean Christopherson <seanjc@google.com> wrote:
> >
> > On Tue, Aug 19, 2025, Uros Bizjak wrote:
> > > > > 2d: 48 8b 7c 24 10 mov 0x10(%rsp),%rdi
> > > > > 32: 8b 87 48 18 00 00 mov 0x1848(%rdi),%eax
> > > > > 38: 65 3b 05 00 00 00 00 cmp %gs:0x0(%rip),%eax
> > > > > 3f: 74 09 je 4a <...>
> > > > > 41: b9 48 00 00 00 mov $0x48,%ecx
> > > > > 46: 31 d2 xor %edx,%edx
> > > > > 48: 0f 30 wrmsr
> > > > >
> > > > > No functional change intended.
> > > > >
> > > > > Signed-off-by: Uros Bizjak <ubizjak@gmail.com>
> > > > > Cc: Sean Christopherson <seanjc@google.com>
> > > > > Cc: Paolo Bonzini <pbonzini@redhat.com>
> > > > > Cc: Thomas Gleixner <tglx@linutronix.de>
> > > > > Cc: Ingo Molnar <mingo@kernel.org>
> > > > > Cc: Borislav Petkov <bp@alien8.de>
> > > > > Cc: Dave Hansen <dave.hansen@linux.intel.com>
> > > > > Cc: "H. Peter Anvin" <hpa@zytor.com>
> > > > > ---
> > > > > arch/x86/kvm/vmx/vmenter.S | 6 ++----
> > > > > 1 file changed, 2 insertions(+), 4 deletions(-)
> > > > >
> > > > > diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
> > > > > index 0a6cf5bff2aa..c65de5de92ab 100644
> > > > > --- a/arch/x86/kvm/vmx/vmenter.S
> > > > > +++ b/arch/x86/kvm/vmx/vmenter.S
> > > > > @@ -118,13 +118,11 @@ SYM_FUNC_START(__vmx_vcpu_run)
> > > > > * and vmentry.
> > > > > */
> > > > > mov 2*WORD_SIZE(%_ASM_SP), %_ASM_DI
> > > > > - movl VMX_spec_ctrl(%_ASM_DI), %edi
> > > > > - movl PER_CPU_VAR(x86_spec_ctrl_current), %esi
> > > > > - cmp %edi, %esi
> > > > > + movl VMX_spec_ctrl(%_ASM_DI), %eax
> > > > > + cmp PER_CPU_VAR(x86_spec_ctrl_current), %eax
> > > >
> > > > Huh. There's a pre-existing bug lurking here, and in the SVM code. SPEC_CTRL
> > > > is an MSR, i.e. a 64-bit value, but the assembly code assumes bits 63:32 are always
> > > > zero.
> > >
> > > But MSBs are zero, MSR is defined in arch/x86/include/msr-index.h as:
> > >
> > > #define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */
> > >
> > > and "movl $..., %eax" zero-extends the value to full 64-bit width.
> > >
> > > FWIW, MSR_IA32_SPEC_CTR is handled in the same way in arch/x86/entry/entry.S:
> > >
> > > movl $MSR_IA32_PRED_CMD, %ecx
> >
> > That's the MSR index, not the value. I'm pointing out that:
> >
> > movl VMX_spec_ctrl(%_ASM_DI), %edi <== drops vmx->spec_ctrl[63:32]
> > movl PER_CPU_VAR(x86_spec_ctrl_current), %esi <== drop x86_spec_ctrl_current[63:32]
> > cmp %edi, %esi <== can get false negatives
> > je .Lspec_ctrl_done
> > mov $MSR_IA32_SPEC_CTRL, %ecx
> > xor %edx, %edx <== can clobber guest value
> > mov %edi, %eax
> > wrmsr
> >
> > The bug is _currently_ benign because neither KVM nor the kernel support setting
> > any of bits 63:32, but it's still a bug that needs to be fixed.
>
> Oh, I see it. Let me try to fix it in a new patch.
VMX patch is at [1]. SVM patch is a bit more involved, because new
32-bit code needs to clobber one additional register. The SVM patch is
attached to this message, but while I compile tested it, I have no
means of testing it with runtime tests. Can you please put it through
your torture tests?
[1] https://lore.kernel.org/lkml/20250820100007.356761-1-ubizjak@gmail.com/
Uros.
[-- Attachment #2: svm-vmenter.diff.txt --]
[-- Type: text/plain, Size: 1763 bytes --]
diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S
index 235c4af6b692..a1b9f2ac713c 100644
--- a/arch/x86/kvm/svm/vmenter.S
+++ b/arch/x86/kvm/svm/vmenter.S
@@ -52,11 +52,23 @@
* there must not be any returns or indirect branches between this code
* and vmentry.
*/
- movl SVM_spec_ctrl(%_ASM_DI), %eax
- cmp PER_CPU_VAR(x86_spec_ctrl_current), %eax
+#ifdef CONFIG_X86_64
+ mov SVM_spec_ctrl(%rdi), %rdx
+ cmp PER_CPU_VAR(x86_spec_ctrl_current), %rdx
+ je 801b
+ movl %edx, %eax
+ shr $32, %rdx
+#else
+ mov SVM_spec_ctrl(%edi), %eax
+ mov PER_CPU_VAR(x86_spec_ctrl_current), %ecx
+ xor %eax, %ecx
+ mov SVM_spec_ctrl + 4(%edi), %edx
+ mov PER_CPU_VAR(x86_spec_ctrl_current + 4), %esi
+ xor %edx, %esi
+ or %esi, %ecx
je 801b
+#endif
mov $MSR_IA32_SPEC_CTRL, %ecx
- xor %edx, %edx
wrmsr
jmp 801b
.endm
@@ -80,14 +92,31 @@
cmpb $0, \spec_ctrl_intercepted
jnz 998f
rdmsr
- movl %eax, SVM_spec_ctrl(%_ASM_DI)
+#ifdef CONFIG_X86_64
+ shl $32, %rdx
+ or %rax, %rdx
+ mov %rdx, SVM_spec_ctrl(%rdi)
998:
-
/* Now restore the host value of the MSR if different from the guest's. */
- movl PER_CPU_VAR(x86_spec_ctrl_current), %eax
- cmp SVM_spec_ctrl(%_ASM_DI), %eax
+ mov SVM_spec_ctrl(%rdi), %rdx
+ cmp PER_CPU_VAR(x86_spec_ctrl_current), %rdx
je 901b
- xor %edx, %edx
+ movl %edx, %eax
+ shr $32, %rdx
+#else
+ mov %eax, SVM_spec_ctrl(%edi)
+ mov %edx, SVM_spec_ctrl + 4(%edi)
+998:
+ /* Now restore the host value of the MSR if different from the guest's. */
+ mov SVM_spec_ctrl(%edi), %eax
+ mov PER_CPU_VAR(x86_spec_ctrl_current), %esi
+ xor %eax, %esi
+ mov SVM_spec_ctrl + 4(%edi), %edx
+ mov PER_CPU_VAR(x86_spec_ctrl_current + 4), %edi
+ xor %edx, %edi
+ or %edi, %esi
+ je 901b
+#endif
wrmsr
jmp 901b
.endm
^ permalink raw reply related [flat|nested] 6+ messages in thread
end of thread, other threads:[~2025-08-20 11:51 UTC | newest]
Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-08-07 6:36 [PATCH] KVM: VMX: Micro-optimize SPEC_CTRL handling in __vmx_vcpu_run() Uros Bizjak
2025-08-19 14:59 ` Sean Christopherson
2025-08-19 16:24 ` Uros Bizjak
2025-08-19 18:56 ` Sean Christopherson
2025-08-20 6:10 ` Uros Bizjak
2025-08-20 11:50 ` Uros Bizjak
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).