* [PATCHv2] KVM: move DR register access handling into generic code
@ 2010-04-13 7:05 Gleb Natapov
2010-04-13 15:28 ` Jan Kiszka
2010-04-13 16:37 ` Marcelo Tosatti
0 siblings, 2 replies; 3+ messages in thread
From: Gleb Natapov @ 2010-04-13 7:05 UTC (permalink / raw)
To: kvm; +Cc: jan.kiszka, avi, mtosatti
Currently both SVM and VMX have their own DR handling code. Move it to
x86.c.
Changelog:
v1->v2
- kvm_set_dr() always return 1 in a case of error
Signed-off-by: Gleb Natapov <gleb@redhat.com>
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 0c49c88..5d5e0a9 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -496,8 +496,7 @@ struct kvm_x86_ops {
void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
- int (*get_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long *dest);
- int (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value);
+ void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value);
void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
@@ -602,6 +601,8 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
+int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val);
+int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val);
unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu);
void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index d04c7ad..c773a46 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1303,70 +1303,11 @@ static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
svm->vmcb->control.asid = sd->next_asid++;
}
-static int svm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *dest)
+static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
{
struct vcpu_svm *svm = to_svm(vcpu);
- switch (dr) {
- case 0 ... 3:
- *dest = vcpu->arch.db[dr];
- break;
- case 4:
- if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
- return EMULATE_FAIL; /* will re-inject UD */
- /* fall through */
- case 6:
- if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
- *dest = vcpu->arch.dr6;
- else
- *dest = svm->vmcb->save.dr6;
- break;
- case 5:
- if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
- return EMULATE_FAIL; /* will re-inject UD */
- /* fall through */
- case 7:
- if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
- *dest = vcpu->arch.dr7;
- else
- *dest = svm->vmcb->save.dr7;
- break;
- }
-
- return EMULATE_DONE;
-}
-
-static int svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value)
-{
- struct vcpu_svm *svm = to_svm(vcpu);
-
- switch (dr) {
- case 0 ... 3:
- vcpu->arch.db[dr] = value;
- if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
- vcpu->arch.eff_db[dr] = value;
- break;
- case 4:
- if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
- return EMULATE_FAIL; /* will re-inject UD */
- /* fall through */
- case 6:
- vcpu->arch.dr6 = (value & DR6_VOLATILE) | DR6_FIXED_1;
- break;
- case 5:
- if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
- return EMULATE_FAIL; /* will re-inject UD */
- /* fall through */
- case 7:
- vcpu->arch.dr7 = (value & DR7_VOLATILE) | DR7_FIXED_1;
- if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
- svm->vmcb->save.dr7 = vcpu->arch.dr7;
- vcpu->arch.switch_db_regs = (value & DR7_BP_EN_MASK);
- }
- break;
- }
-
- return EMULATE_DONE;
+ svm->vmcb->save.dr7 = value;
}
static int pf_interception(struct vcpu_svm *svm)
@@ -3298,8 +3239,7 @@ static struct kvm_x86_ops svm_x86_ops = {
.set_idt = svm_set_idt,
.get_gdt = svm_get_gdt,
.set_gdt = svm_set_gdt,
- .get_dr = svm_get_dr,
- .set_dr = svm_set_dr,
+ .set_dr7 = svm_set_dr7,
.cache_reg = svm_cache_reg,
.get_rflags = svm_get_rflags,
.set_rflags = svm_set_rflags,
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 41e63bb..53c42f0 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3082,19 +3082,9 @@ static int handle_cr(struct kvm_vcpu *vcpu)
return 0;
}
-static int check_dr_alias(struct kvm_vcpu *vcpu)
-{
- if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) {
- kvm_queue_exception(vcpu, UD_VECTOR);
- return -1;
- }
- return 0;
-}
-
static int handle_dr(struct kvm_vcpu *vcpu)
{
unsigned long exit_qualification;
- unsigned long val;
int dr, reg;
/* Do not handle if the CPL > 0, will trigger GP on re-entry */
@@ -3129,67 +3119,20 @@ static int handle_dr(struct kvm_vcpu *vcpu)
dr = exit_qualification & DEBUG_REG_ACCESS_NUM;
reg = DEBUG_REG_ACCESS_REG(exit_qualification);
if (exit_qualification & TYPE_MOV_FROM_DR) {
- switch (dr) {
- case 0 ... 3:
- val = vcpu->arch.db[dr];
- break;
- case 4:
- if (check_dr_alias(vcpu) < 0)
- return 1;
- /* fall through */
- case 6:
- val = vcpu->arch.dr6;
- break;
- case 5:
- if (check_dr_alias(vcpu) < 0)
- return 1;
- /* fall through */
- default: /* 7 */
- val = vcpu->arch.dr7;
- break;
- }
- kvm_register_write(vcpu, reg, val);
- } else {
- val = vcpu->arch.regs[reg];
- switch (dr) {
- case 0 ... 3:
- vcpu->arch.db[dr] = val;
- if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
- vcpu->arch.eff_db[dr] = val;
- break;
- case 4:
- if (check_dr_alias(vcpu) < 0)
- return 1;
- /* fall through */
- case 6:
- if (val & 0xffffffff00000000ULL) {
- kvm_inject_gp(vcpu, 0);
- return 1;
- }
- vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1;
- break;
- case 5:
- if (check_dr_alias(vcpu) < 0)
- return 1;
- /* fall through */
- default: /* 7 */
- if (val & 0xffffffff00000000ULL) {
- kvm_inject_gp(vcpu, 0);
- return 1;
- }
- vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
- if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
- vmcs_writel(GUEST_DR7, vcpu->arch.dr7);
- vcpu->arch.switch_db_regs =
- (val & DR7_BP_EN_MASK);
- }
- break;
- }
- }
+ unsigned long val;
+ if (!kvm_get_dr(vcpu, dr, &val))
+ kvm_register_write(vcpu, reg, val);
+ } else
+ kvm_set_dr(vcpu, dr, vcpu->arch.regs[reg]);
skip_emulated_instruction(vcpu);
return 1;
}
+static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val)
+{
+ vmcs_writel(GUEST_DR7, val);
+}
+
static int handle_cpuid(struct kvm_vcpu *vcpu)
{
kvm_emulate_cpuid(vcpu);
@@ -4180,6 +4123,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
.set_idt = vmx_set_idt,
.get_gdt = vmx_get_gdt,
.set_gdt = vmx_set_gdt,
+ .set_dr7 = vmx_set_dr7,
.cache_reg = vmx_cache_reg,
.get_rflags = vmx_get_rflags,
.set_rflags = vmx_set_rflags,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index fd5c3d3..399784d 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -561,6 +561,80 @@ unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_get_cr8);
+int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
+{
+ switch (dr) {
+ case 0 ... 3:
+ vcpu->arch.db[dr] = val;
+ if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
+ vcpu->arch.eff_db[dr] = val;
+ break;
+ case 4:
+ if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) {
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ return 1;
+ }
+ /* fall through */
+ case 6:
+ if (val & 0xffffffff00000000ULL) {
+ kvm_inject_gp(vcpu, 0);
+ return 1;
+ }
+ vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1;
+ break;
+ case 5:
+ if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) {
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ return 1;
+ }
+ /* fall through */
+ default: /* 7 */
+ if (val & 0xffffffff00000000ULL) {
+ kvm_inject_gp(vcpu, 0);
+ return 1;
+ }
+ vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
+ if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
+ kvm_x86_ops->set_dr7(vcpu, vcpu->arch.dr7);
+ vcpu->arch.switch_db_regs = (val & DR7_BP_EN_MASK);
+ }
+ break;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kvm_set_dr);
+
+int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
+{
+ switch (dr) {
+ case 0 ... 3:
+ *val = vcpu->arch.db[dr];
+ break;
+ case 4:
+ if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) {
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ return 1;
+ }
+ /* fall through */
+ case 6:
+ *val = vcpu->arch.dr6;
+ break;
+ case 5:
+ if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) {
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ return 1;
+ }
+ /* fall through */
+ default: /* 7 */
+ *val = vcpu->arch.dr7;
+ break;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kvm_get_dr);
+
static inline u32 bit(int bitno)
{
return 1 << (bitno & 31);
@@ -3481,14 +3555,14 @@ int emulate_clts(struct kvm_vcpu *vcpu)
int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
{
- return kvm_x86_ops->get_dr(ctxt->vcpu, dr, dest);
+ return kvm_get_dr(ctxt->vcpu, dr, dest);
}
int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
{
unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
- return kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask);
+ return kvm_set_dr(ctxt->vcpu, dr, value & mask);
}
void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
--
Gleb.
^ permalink raw reply related [flat|nested] 3+ messages in thread* Re: [PATCHv2] KVM: move DR register access handling into generic code
2010-04-13 7:05 [PATCHv2] KVM: move DR register access handling into generic code Gleb Natapov
@ 2010-04-13 15:28 ` Jan Kiszka
2010-04-13 16:37 ` Marcelo Tosatti
1 sibling, 0 replies; 3+ messages in thread
From: Jan Kiszka @ 2010-04-13 15:28 UTC (permalink / raw)
To: Gleb Natapov; +Cc: kvm@vger.kernel.org, avi@redhat.com, mtosatti@redhat.com
Gleb Natapov wrote:
> Currently both SVM and VMX have their own DR handling code. Move it to
> x86.c.
>
> Changelog:
> v1->v2
> - kvm_set_dr() always return 1 in a case of error
>
> Signed-off-by: Gleb Natapov <gleb@redhat.com>
Acked-by: Jan Kiszka <jan.kiszka@siemens.com>
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index 0c49c88..5d5e0a9 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -496,8 +496,7 @@ struct kvm_x86_ops {
> void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
> void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
> void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
> - int (*get_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long *dest);
> - int (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value);
> + void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value);
> void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
> unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
> void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
> @@ -602,6 +601,8 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
> void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
> void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
> void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
> +int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val);
> +int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val);
> unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu);
> void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
> void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> index d04c7ad..c773a46 100644
> --- a/arch/x86/kvm/svm.c
> +++ b/arch/x86/kvm/svm.c
> @@ -1303,70 +1303,11 @@ static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
> svm->vmcb->control.asid = sd->next_asid++;
> }
>
> -static int svm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *dest)
> +static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
> {
> struct vcpu_svm *svm = to_svm(vcpu);
>
> - switch (dr) {
> - case 0 ... 3:
> - *dest = vcpu->arch.db[dr];
> - break;
> - case 4:
> - if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
> - return EMULATE_FAIL; /* will re-inject UD */
> - /* fall through */
> - case 6:
> - if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
> - *dest = vcpu->arch.dr6;
> - else
> - *dest = svm->vmcb->save.dr6;
> - break;
> - case 5:
> - if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
> - return EMULATE_FAIL; /* will re-inject UD */
> - /* fall through */
> - case 7:
> - if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
> - *dest = vcpu->arch.dr7;
> - else
> - *dest = svm->vmcb->save.dr7;
> - break;
> - }
> -
> - return EMULATE_DONE;
> -}
> -
> -static int svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value)
> -{
> - struct vcpu_svm *svm = to_svm(vcpu);
> -
> - switch (dr) {
> - case 0 ... 3:
> - vcpu->arch.db[dr] = value;
> - if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
> - vcpu->arch.eff_db[dr] = value;
> - break;
> - case 4:
> - if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
> - return EMULATE_FAIL; /* will re-inject UD */
> - /* fall through */
> - case 6:
> - vcpu->arch.dr6 = (value & DR6_VOLATILE) | DR6_FIXED_1;
> - break;
> - case 5:
> - if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
> - return EMULATE_FAIL; /* will re-inject UD */
> - /* fall through */
> - case 7:
> - vcpu->arch.dr7 = (value & DR7_VOLATILE) | DR7_FIXED_1;
> - if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
> - svm->vmcb->save.dr7 = vcpu->arch.dr7;
> - vcpu->arch.switch_db_regs = (value & DR7_BP_EN_MASK);
> - }
> - break;
> - }
> -
> - return EMULATE_DONE;
> + svm->vmcb->save.dr7 = value;
> }
>
> static int pf_interception(struct vcpu_svm *svm)
> @@ -3298,8 +3239,7 @@ static struct kvm_x86_ops svm_x86_ops = {
> .set_idt = svm_set_idt,
> .get_gdt = svm_get_gdt,
> .set_gdt = svm_set_gdt,
> - .get_dr = svm_get_dr,
> - .set_dr = svm_set_dr,
> + .set_dr7 = svm_set_dr7,
> .cache_reg = svm_cache_reg,
> .get_rflags = svm_get_rflags,
> .set_rflags = svm_set_rflags,
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index 41e63bb..53c42f0 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -3082,19 +3082,9 @@ static int handle_cr(struct kvm_vcpu *vcpu)
> return 0;
> }
>
> -static int check_dr_alias(struct kvm_vcpu *vcpu)
> -{
> - if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) {
> - kvm_queue_exception(vcpu, UD_VECTOR);
> - return -1;
> - }
> - return 0;
> -}
> -
> static int handle_dr(struct kvm_vcpu *vcpu)
> {
> unsigned long exit_qualification;
> - unsigned long val;
> int dr, reg;
>
> /* Do not handle if the CPL > 0, will trigger GP on re-entry */
> @@ -3129,67 +3119,20 @@ static int handle_dr(struct kvm_vcpu *vcpu)
> dr = exit_qualification & DEBUG_REG_ACCESS_NUM;
> reg = DEBUG_REG_ACCESS_REG(exit_qualification);
> if (exit_qualification & TYPE_MOV_FROM_DR) {
> - switch (dr) {
> - case 0 ... 3:
> - val = vcpu->arch.db[dr];
> - break;
> - case 4:
> - if (check_dr_alias(vcpu) < 0)
> - return 1;
> - /* fall through */
> - case 6:
> - val = vcpu->arch.dr6;
> - break;
> - case 5:
> - if (check_dr_alias(vcpu) < 0)
> - return 1;
> - /* fall through */
> - default: /* 7 */
> - val = vcpu->arch.dr7;
> - break;
> - }
> - kvm_register_write(vcpu, reg, val);
> - } else {
> - val = vcpu->arch.regs[reg];
> - switch (dr) {
> - case 0 ... 3:
> - vcpu->arch.db[dr] = val;
> - if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
> - vcpu->arch.eff_db[dr] = val;
> - break;
> - case 4:
> - if (check_dr_alias(vcpu) < 0)
> - return 1;
> - /* fall through */
> - case 6:
> - if (val & 0xffffffff00000000ULL) {
> - kvm_inject_gp(vcpu, 0);
> - return 1;
> - }
> - vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1;
> - break;
> - case 5:
> - if (check_dr_alias(vcpu) < 0)
> - return 1;
> - /* fall through */
> - default: /* 7 */
> - if (val & 0xffffffff00000000ULL) {
> - kvm_inject_gp(vcpu, 0);
> - return 1;
> - }
> - vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
> - if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
> - vmcs_writel(GUEST_DR7, vcpu->arch.dr7);
> - vcpu->arch.switch_db_regs =
> - (val & DR7_BP_EN_MASK);
> - }
> - break;
> - }
> - }
> + unsigned long val;
> + if (!kvm_get_dr(vcpu, dr, &val))
> + kvm_register_write(vcpu, reg, val);
> + } else
> + kvm_set_dr(vcpu, dr, vcpu->arch.regs[reg]);
> skip_emulated_instruction(vcpu);
> return 1;
> }
>
> +static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val)
> +{
> + vmcs_writel(GUEST_DR7, val);
> +}
> +
> static int handle_cpuid(struct kvm_vcpu *vcpu)
> {
> kvm_emulate_cpuid(vcpu);
> @@ -4180,6 +4123,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
> .set_idt = vmx_set_idt,
> .get_gdt = vmx_get_gdt,
> .set_gdt = vmx_set_gdt,
> + .set_dr7 = vmx_set_dr7,
> .cache_reg = vmx_cache_reg,
> .get_rflags = vmx_get_rflags,
> .set_rflags = vmx_set_rflags,
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index fd5c3d3..399784d 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -561,6 +561,80 @@ unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
> }
> EXPORT_SYMBOL_GPL(kvm_get_cr8);
>
> +int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
> +{
> + switch (dr) {
> + case 0 ... 3:
> + vcpu->arch.db[dr] = val;
> + if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
> + vcpu->arch.eff_db[dr] = val;
> + break;
> + case 4:
> + if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) {
> + kvm_queue_exception(vcpu, UD_VECTOR);
> + return 1;
> + }
> + /* fall through */
> + case 6:
> + if (val & 0xffffffff00000000ULL) {
> + kvm_inject_gp(vcpu, 0);
> + return 1;
> + }
> + vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1;
> + break;
> + case 5:
> + if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) {
> + kvm_queue_exception(vcpu, UD_VECTOR);
> + return 1;
> + }
> + /* fall through */
> + default: /* 7 */
> + if (val & 0xffffffff00000000ULL) {
> + kvm_inject_gp(vcpu, 0);
> + return 1;
> + }
> + vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
> + if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
> + kvm_x86_ops->set_dr7(vcpu, vcpu->arch.dr7);
> + vcpu->arch.switch_db_regs = (val & DR7_BP_EN_MASK);
> + }
> + break;
> + }
> +
> + return 0;
> +}
> +EXPORT_SYMBOL_GPL(kvm_set_dr);
> +
> +int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
> +{
> + switch (dr) {
> + case 0 ... 3:
> + *val = vcpu->arch.db[dr];
> + break;
> + case 4:
> + if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) {
> + kvm_queue_exception(vcpu, UD_VECTOR);
> + return 1;
> + }
> + /* fall through */
> + case 6:
> + *val = vcpu->arch.dr6;
> + break;
> + case 5:
> + if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) {
> + kvm_queue_exception(vcpu, UD_VECTOR);
> + return 1;
> + }
> + /* fall through */
> + default: /* 7 */
> + *val = vcpu->arch.dr7;
> + break;
> + }
> +
> + return 0;
> +}
> +EXPORT_SYMBOL_GPL(kvm_get_dr);
> +
> static inline u32 bit(int bitno)
> {
> return 1 << (bitno & 31);
> @@ -3481,14 +3555,14 @@ int emulate_clts(struct kvm_vcpu *vcpu)
>
> int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
> {
> - return kvm_x86_ops->get_dr(ctxt->vcpu, dr, dest);
> + return kvm_get_dr(ctxt->vcpu, dr, dest);
> }
>
> int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
> {
> unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
>
> - return kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask);
> + return kvm_set_dr(ctxt->vcpu, dr, value & mask);
> }
>
> void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
> --
> Gleb.
^ permalink raw reply [flat|nested] 3+ messages in thread* Re: [PATCHv2] KVM: move DR register access handling into generic code
2010-04-13 7:05 [PATCHv2] KVM: move DR register access handling into generic code Gleb Natapov
2010-04-13 15:28 ` Jan Kiszka
@ 2010-04-13 16:37 ` Marcelo Tosatti
1 sibling, 0 replies; 3+ messages in thread
From: Marcelo Tosatti @ 2010-04-13 16:37 UTC (permalink / raw)
To: Gleb Natapov; +Cc: kvm, jan.kiszka, avi
On Tue, Apr 13, 2010 at 10:05:23AM +0300, Gleb Natapov wrote:
> Currently both SVM and VMX have their own DR handling code. Move it to
> x86.c.
>
> Changelog:
> v1->v2
> - kvm_set_dr() always return 1 in a case of error
>
> Signed-off-by: Gleb Natapov <gleb@redhat.com>
Applied, thanks.
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2010-04-13 16:39 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2010-04-13 7:05 [PATCHv2] KVM: move DR register access handling into generic code Gleb Natapov
2010-04-13 15:28 ` Jan Kiszka
2010-04-13 16:37 ` Marcelo Tosatti
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox