* [PATCH 0/3] Fix #GP at wrong rip for mov cr instructions
@ 2010-06-10 14:02 Avi Kivity
2010-06-10 14:02 ` [PATCH 1/3] KVM: Fix mov cr0 #GP at wrong instruction Avi Kivity
` (3 more replies)
0 siblings, 4 replies; 5+ messages in thread
From: Avi Kivity @ 2010-06-10 14:02 UTC (permalink / raw)
To: Marcelo Tosatti, Sheng Yang; +Cc: kvm
Three mov cr instructions fail in a similar way when they fault: the
fault rip points at the next instead of faulting instructions. This
patch set fixes the problem.
Avi Kivity (3):
KVM: Fix mov cr0 #GP at wrong instruction
KVM: Fix mov cr4 #GP at wrong instruction
KVM: Fix mov cr3 #GP at wrong instruction
arch/x86/include/asm/kvm_host.h | 6 +++---
arch/x86/kvm/mmu.c | 2 +-
arch/x86/kvm/svm.c | 6 +++---
arch/x86/kvm/vmx.c | 21 +++++++++++++++------
arch/x86/kvm/x86.c | 32 +++++++-------------------------
5 files changed, 29 insertions(+), 38 deletions(-)
^ permalink raw reply [flat|nested] 5+ messages in thread
* [PATCH 1/3] KVM: Fix mov cr0 #GP at wrong instruction
2010-06-10 14:02 [PATCH 0/3] Fix #GP at wrong rip for mov cr instructions Avi Kivity
@ 2010-06-10 14:02 ` Avi Kivity
2010-06-10 14:02 ` [PATCH 2/3] KVM: Fix mov cr4 " Avi Kivity
` (2 subsequent siblings)
3 siblings, 0 replies; 5+ messages in thread
From: Avi Kivity @ 2010-06-10 14:02 UTC (permalink / raw)
To: Marcelo Tosatti, Sheng Yang; +Cc: kvm
On Intel, we call skip_emulated_instruction() even if we injected a #GP,
resulting in the #GP pointing at the wrong address.
Fix by injecting the exception and skipping the instruction at the same place,
so we can do just one or the other.
Signed-off-by: Avi Kivity <avi@redhat.com>
---
arch/x86/include/asm/kvm_host.h | 2 +-
arch/x86/kvm/svm.c | 2 +-
arch/x86/kvm/vmx.c | 13 +++++++++++--
arch/x86/kvm/x86.c | 12 +++---------
4 files changed, 16 insertions(+), 13 deletions(-)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 0cd0f29..f0683c8 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -596,7 +596,7 @@ int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg);
int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason,
bool has_error_code, u32 error_code);
-void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
+int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 2ae0c39..6d1616d 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -807,7 +807,7 @@ static void init_vmcb(struct vcpu_svm *svm)
* svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0.
*/
svm->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
- kvm_set_cr0(&svm->vcpu, svm->vcpu.arch.cr0);
+ (void)kvm_set_cr0(&svm->vcpu, svm->vcpu.arch.cr0);
save->cr4 = X86_CR4_PAE;
/* rdx = ?? */
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 7fa6ea7..a6a5121 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3158,11 +3158,20 @@ vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
hypercall[2] = 0xc1;
}
+static void complete_insn_gp(struct kvm_vcpu *vcpu, int err)
+{
+ if (err)
+ kvm_inject_gp(vcpu, 0);
+ else
+ skip_emulated_instruction(vcpu);
+}
+
static int handle_cr(struct kvm_vcpu *vcpu)
{
unsigned long exit_qualification, val;
int cr;
int reg;
+ int err;
exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
cr = exit_qualification & 15;
@@ -3173,8 +3182,8 @@ static int handle_cr(struct kvm_vcpu *vcpu)
trace_kvm_cr_write(cr, val);
switch (cr) {
case 0:
- kvm_set_cr0(vcpu, val);
- skip_emulated_instruction(vcpu);
+ err = kvm_set_cr0(vcpu, val);
+ complete_insn_gp(vcpu, err);
return 1;
case 3:
kvm_set_cr3(vcpu, val);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 5fa8684..72df8fd 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -417,7 +417,7 @@ out:
return changed;
}
-static int __kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
+int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
{
unsigned long old_cr0 = kvm_read_cr0(vcpu);
unsigned long update_bits = X86_CR0_PG | X86_CR0_WP |
@@ -460,17 +460,11 @@ static int __kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
kvm_mmu_reset_context(vcpu);
return 0;
}
-
-void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
-{
- if (__kvm_set_cr0(vcpu, cr0))
- kvm_inject_gp(vcpu, 0);
-}
EXPORT_SYMBOL_GPL(kvm_set_cr0);
void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
{
- kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f));
+ (void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f));
}
EXPORT_SYMBOL_GPL(kvm_lmsw);
@@ -3652,7 +3646,7 @@ static int emulator_set_cr(int cr, unsigned long val, struct kvm_vcpu *vcpu)
switch (cr) {
case 0:
- res = __kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val));
+ res = kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val));
break;
case 2:
vcpu->arch.cr2 = val;
--
1.7.1
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [PATCH 2/3] KVM: Fix mov cr4 #GP at wrong instruction
2010-06-10 14:02 [PATCH 0/3] Fix #GP at wrong rip for mov cr instructions Avi Kivity
2010-06-10 14:02 ` [PATCH 1/3] KVM: Fix mov cr0 #GP at wrong instruction Avi Kivity
@ 2010-06-10 14:02 ` Avi Kivity
2010-06-10 14:02 ` [PATCH 3/3] KVM: Fix mov cr3 " Avi Kivity
2010-06-11 17:09 ` [PATCH 0/3] Fix #GP at wrong rip for mov cr instructions Marcelo Tosatti
3 siblings, 0 replies; 5+ messages in thread
From: Avi Kivity @ 2010-06-10 14:02 UTC (permalink / raw)
To: Marcelo Tosatti, Sheng Yang; +Cc: kvm
On Intel, we call skip_emulated_instruction() even if we injected a #GP,
resulting in the #GP pointing at the wrong address.
Fix by injecting the exception and skipping the instruction at the same place,
so we can do just one or the other.
Signed-off-by: Avi Kivity <avi@redhat.com>
---
arch/x86/include/asm/kvm_host.h | 2 +-
arch/x86/kvm/vmx.c | 4 ++--
arch/x86/kvm/x86.c | 10 ++--------
3 files changed, 5 insertions(+), 11 deletions(-)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index f0683c8..f52196d 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -598,7 +598,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason,
int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
-void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
+int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val);
int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val);
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index a6a5121..9d083e9 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3190,8 +3190,8 @@ static int handle_cr(struct kvm_vcpu *vcpu)
skip_emulated_instruction(vcpu);
return 1;
case 4:
- kvm_set_cr4(vcpu, val);
- skip_emulated_instruction(vcpu);
+ err = kvm_set_cr4(vcpu, val);
+ complete_insn_gp(vcpu, err);
return 1;
case 8: {
u8 cr8_prev = kvm_get_cr8(vcpu);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 72df8fd..60b5154 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -468,7 +468,7 @@ void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
}
EXPORT_SYMBOL_GPL(kvm_lmsw);
-int __kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
+int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
unsigned long old_cr4 = kvm_read_cr4(vcpu);
unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE;
@@ -494,12 +494,6 @@ int __kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
return 0;
}
-
-void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
-{
- if (__kvm_set_cr4(vcpu, cr4))
- kvm_inject_gp(vcpu, 0);
-}
EXPORT_SYMBOL_GPL(kvm_set_cr4);
static int __kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
@@ -3655,7 +3649,7 @@ static int emulator_set_cr(int cr, unsigned long val, struct kvm_vcpu *vcpu)
res = __kvm_set_cr3(vcpu, val);
break;
case 4:
- res = __kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));
+ res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));
break;
case 8:
res = __kvm_set_cr8(vcpu, val & 0xfUL);
--
1.7.1
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [PATCH 3/3] KVM: Fix mov cr3 #GP at wrong instruction
2010-06-10 14:02 [PATCH 0/3] Fix #GP at wrong rip for mov cr instructions Avi Kivity
2010-06-10 14:02 ` [PATCH 1/3] KVM: Fix mov cr0 #GP at wrong instruction Avi Kivity
2010-06-10 14:02 ` [PATCH 2/3] KVM: Fix mov cr4 " Avi Kivity
@ 2010-06-10 14:02 ` Avi Kivity
2010-06-11 17:09 ` [PATCH 0/3] Fix #GP at wrong rip for mov cr instructions Marcelo Tosatti
3 siblings, 0 replies; 5+ messages in thread
From: Avi Kivity @ 2010-06-10 14:02 UTC (permalink / raw)
To: Marcelo Tosatti, Sheng Yang; +Cc: kvm
On Intel, we call skip_emulated_instruction() even if we injected a #GP,
resulting in the #GP pointing at the wrong address.
Fix by injecting the exception and skipping the instruction at the same place,
so we can do just one or the other.
Signed-off-by: Avi Kivity <avi@redhat.com>
---
arch/x86/include/asm/kvm_host.h | 2 +-
arch/x86/kvm/mmu.c | 2 +-
arch/x86/kvm/svm.c | 4 ++--
arch/x86/kvm/vmx.c | 4 ++--
arch/x86/kvm/x86.c | 10 ++--------
5 files changed, 8 insertions(+), 14 deletions(-)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index f52196d..b127041 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -597,7 +597,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason,
bool has_error_code, u32 error_code);
int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
-void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
+int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 75ddaa1..fcf5555 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3201,7 +3201,7 @@ static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu,
static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu)
{
- kvm_set_cr3(vcpu, vcpu->arch.cr3);
+ (void)kvm_set_cr3(vcpu, vcpu->arch.cr3);
return 1;
}
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 6d1616d..f7a6fdc 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1963,7 +1963,7 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
svm->vmcb->save.cr3 = hsave->save.cr3;
svm->vcpu.arch.cr3 = hsave->save.cr3;
} else {
- kvm_set_cr3(&svm->vcpu, hsave->save.cr3);
+ (void)kvm_set_cr3(&svm->vcpu, hsave->save.cr3);
}
kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, hsave->save.rax);
kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, hsave->save.rsp);
@@ -2086,7 +2086,7 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
svm->vmcb->save.cr3 = nested_vmcb->save.cr3;
svm->vcpu.arch.cr3 = nested_vmcb->save.cr3;
} else
- kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3);
+ (void)kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3);
/* Guest paging mode is active - reset mmu */
kvm_mmu_reset_context(&svm->vcpu);
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 9d083e9..f15f6bd 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3186,8 +3186,8 @@ static int handle_cr(struct kvm_vcpu *vcpu)
complete_insn_gp(vcpu, err);
return 1;
case 3:
- kvm_set_cr3(vcpu, val);
- skip_emulated_instruction(vcpu);
+ err = kvm_set_cr3(vcpu, val);
+ complete_insn_gp(vcpu, err);
return 1;
case 4:
err = kvm_set_cr4(vcpu, val);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 60b5154..b31d3fe 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -496,7 +496,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
}
EXPORT_SYMBOL_GPL(kvm_set_cr4);
-static int __kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
+int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
{
if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) {
kvm_mmu_sync_roots(vcpu);
@@ -535,12 +535,6 @@ static int __kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
vcpu->arch.mmu.new_cr3(vcpu);
return 0;
}
-
-void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
-{
- if (__kvm_set_cr3(vcpu, cr3))
- kvm_inject_gp(vcpu, 0);
-}
EXPORT_SYMBOL_GPL(kvm_set_cr3);
int __kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
@@ -3646,7 +3640,7 @@ static int emulator_set_cr(int cr, unsigned long val, struct kvm_vcpu *vcpu)
vcpu->arch.cr2 = val;
break;
case 3:
- res = __kvm_set_cr3(vcpu, val);
+ res = kvm_set_cr3(vcpu, val);
break;
case 4:
res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));
--
1.7.1
^ permalink raw reply related [flat|nested] 5+ messages in thread
* Re: [PATCH 0/3] Fix #GP at wrong rip for mov cr instructions
2010-06-10 14:02 [PATCH 0/3] Fix #GP at wrong rip for mov cr instructions Avi Kivity
` (2 preceding siblings ...)
2010-06-10 14:02 ` [PATCH 3/3] KVM: Fix mov cr3 " Avi Kivity
@ 2010-06-11 17:09 ` Marcelo Tosatti
3 siblings, 0 replies; 5+ messages in thread
From: Marcelo Tosatti @ 2010-06-11 17:09 UTC (permalink / raw)
To: Avi Kivity; +Cc: Sheng Yang, kvm
On Thu, Jun 10, 2010 at 05:02:13PM +0300, Avi Kivity wrote:
> Three mov cr instructions fail in a similar way when they fault: the
> fault rip points at the next instead of faulting instructions. This
> patch set fixes the problem.
>
>
> Avi Kivity (3):
> KVM: Fix mov cr0 #GP at wrong instruction
> KVM: Fix mov cr4 #GP at wrong instruction
> KVM: Fix mov cr3 #GP at wrong instruction
>
> arch/x86/include/asm/kvm_host.h | 6 +++---
> arch/x86/kvm/mmu.c | 2 +-
> arch/x86/kvm/svm.c | 6 +++---
> arch/x86/kvm/vmx.c | 21 +++++++++++++++------
> arch/x86/kvm/x86.c | 32 +++++++-------------------------
> 5 files changed, 29 insertions(+), 38 deletions(-)
Applied, thanks.
^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2010-06-11 17:47 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2010-06-10 14:02 [PATCH 0/3] Fix #GP at wrong rip for mov cr instructions Avi Kivity
2010-06-10 14:02 ` [PATCH 1/3] KVM: Fix mov cr0 #GP at wrong instruction Avi Kivity
2010-06-10 14:02 ` [PATCH 2/3] KVM: Fix mov cr4 " Avi Kivity
2010-06-10 14:02 ` [PATCH 3/3] KVM: Fix mov cr3 " Avi Kivity
2010-06-11 17:09 ` [PATCH 0/3] Fix #GP at wrong rip for mov cr instructions Marcelo Tosatti
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox