* [PATCH 2/2] KVM: x86: Preserve guest single-stepping on register
[not found] <4AC67D94.6090406@web.de>
@ 2009-10-02 22:31 ` Jan Kiszka
[not found] ` <4AC8B63B.8060602@redhat.com>
0 siblings, 1 reply; 5+ messages in thread
From: Jan Kiszka @ 2009-10-02 22:31 UTC (permalink / raw)
To: Avi Kivity, Marcelo Tosatti; +Cc: kvm-devel
Give user space more flexibility /wrt its IOCTL order. So far updating
the rflags via KVM_SET_REGS ignored potentially set single-step flags.
Now they will be kept.
Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
---
arch/x86/kvm/x86.c | 7 ++++++-
1 files changed, 6 insertions(+), 1 deletions(-)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index aa5d574..9fbb4c8 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3853,6 +3853,8 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
+ unsigned long rflags;
+
vcpu_load(vcpu);
kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax);
@@ -3876,8 +3878,11 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
#endif
kvm_rip_write(vcpu, regs->rip);
- kvm_x86_ops->set_rflags(vcpu, regs->rflags);
+ rflags = regs->rflags;
+ if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
+ rflags |= X86_EFLAGS_TF | X86_EFLAGS_RF;
+ kvm_x86_ops->set_rflags(vcpu, rflags);
vcpu->arch.exception.pending = false;
^ permalink raw reply related [flat|nested] 5+ messages in thread
* Re: [PATCH 2/2] KVM: x86: Preserve guest single-stepping on register
[not found] ` <4AC8B63B.8060602@redhat.com>
@ 2009-10-04 19:02 ` Jan Kiszka
2009-10-05 10:44 ` Avi Kivity
0 siblings, 1 reply; 5+ messages in thread
From: Jan Kiszka @ 2009-10-04 19:02 UTC (permalink / raw)
To: Avi Kivity; +Cc: Marcelo Tosatti, kvm-devel
[-- Attachment #1: Type: text/plain, Size: 4864 bytes --]
Avi Kivity wrote:
> On 10/03/2009 12:31 AM, Jan Kiszka wrote:
>> Give user space more flexibility /wrt its IOCTL order. So far updating
>> the rflags via KVM_SET_REGS ignored potentially set single-step flags.
>> Now they will be kept.
>>
>
>>
>> kvm_rip_write(vcpu, regs->rip);
>> - kvm_x86_ops->set_rflags(vcpu, regs->rflags);
>>
>> + rflags = regs->rflags;
>> + if (vcpu->guest_debug& KVM_GUESTDBG_SINGLESTEP)
>> + rflags |= X86_EFLAGS_TF | X86_EFLAGS_RF;
>> + kvm_x86_ops->set_rflags(vcpu, rflags);
>>
>>
>
> I think we need same on popf instruction emulation.
>
Hmmmm, good point. Mind reverting 2/2 and applying this one instead?
Jan
--------->
KVM: x86: Rework guest single-step flag injection and filtering
Push TF and RF injection and filtering on guest single-stepping into the
vender get/set_rflags callbacks. This makes the whole mechanism more
robust /wrt user space IOTCTL order and instruction emulations.
Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
---
arch/x86/kvm/svm.c | 8 +++++++-
arch/x86/kvm/vmx.c | 4 ++++
arch/x86/kvm/x86.c | 24 +++++++++---------------
3 files changed, 20 insertions(+), 16 deletions(-)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 279a2ae..407e1a7 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -797,11 +797,17 @@ static void svm_vcpu_put(struct kvm_vcpu *vcpu)
static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
{
- return to_svm(vcpu)->vmcb->save.rflags;
+ unsigned long rflags = to_svm(vcpu)->vmcb->save.rflags;
+
+ if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
+ rflags &= ~(unsigned long)(X86_EFLAGS_TF | X86_EFLAGS_RF);
+ return rflags;
}
static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
{
+ if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
+ rflags |= X86_EFLAGS_TF | X86_EFLAGS_RF;
to_svm(vcpu)->vmcb->save.rflags = rflags;
}
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 70020e5..8e678ef 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -787,6 +787,8 @@ static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
rflags = vmcs_readl(GUEST_RFLAGS);
if (to_vmx(vcpu)->rmode.vm86_active)
rflags &= ~(unsigned long)(X86_EFLAGS_IOPL | X86_EFLAGS_VM);
+ if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
+ rflags &= ~(unsigned long)(X86_EFLAGS_TF | X86_EFLAGS_RF);
return rflags;
}
@@ -794,6 +796,8 @@ static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
{
if (to_vmx(vcpu)->rmode.vm86_active)
rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
+ if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
+ rflags |= X86_EFLAGS_TF | X86_EFLAGS_RF;
vmcs_writel(GUEST_RFLAGS, rflags);
}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index aa5d574..5b562dd 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3840,12 +3840,6 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
regs->rip = kvm_rip_read(vcpu);
regs->rflags = kvm_x86_ops->get_rflags(vcpu);
- /*
- * Don't leak debug flags in case they were set for guest debugging
- */
- if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
- regs->rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
-
vcpu_put(vcpu);
return 0;
@@ -3872,13 +3866,11 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13);
kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14);
kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15);
-
#endif
kvm_rip_write(vcpu, regs->rip);
kvm_x86_ops->set_rflags(vcpu, regs->rflags);
-
vcpu->arch.exception.pending = false;
vcpu_put(vcpu);
@@ -4471,12 +4463,15 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
struct kvm_guest_debug *dbg)
{
unsigned long rflags;
- int old_debug;
int i;
vcpu_load(vcpu);
- old_debug = vcpu->guest_debug;
+ /*
+ * Read rflags as long as potentially injected trace flags are still
+ * filtered out.
+ */
+ rflags = kvm_x86_ops->get_rflags(vcpu);
vcpu->guest_debug = dbg->control;
if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE))
@@ -4493,11 +4488,10 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
vcpu->arch.switch_db_regs = (vcpu->arch.dr7 & DR7_BP_EN_MASK);
}
- rflags = kvm_x86_ops->get_rflags(vcpu);
- if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
- rflags |= X86_EFLAGS_TF | X86_EFLAGS_RF;
- else if (old_debug & KVM_GUESTDBG_SINGLESTEP)
- rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
+ /*
+ * Trigger an rflags update that will inject or remove the trace
+ * flags.
+ */
kvm_x86_ops->set_rflags(vcpu, rflags);
kvm_x86_ops->set_guest_debug(vcpu, dbg);
[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 257 bytes --]
^ permalink raw reply related [flat|nested] 5+ messages in thread
* Re: [PATCH 2/2] KVM: x86: Preserve guest single-stepping on register
2009-10-04 19:02 ` Jan Kiszka
@ 2009-10-05 10:44 ` Avi Kivity
2009-10-05 11:07 ` [PATCH v2 2/2] KVM: x86: Rework guest single-step flag injection and filtering Jan Kiszka
0 siblings, 1 reply; 5+ messages in thread
From: Avi Kivity @ 2009-10-05 10:44 UTC (permalink / raw)
To: Jan Kiszka; +Cc: Marcelo Tosatti, kvm-devel
On 10/04/2009 09:02 PM, Jan Kiszka wrote:
>
> Hmmmm, good point. Mind reverting 2/2 and applying this one instead?
>
> Jan
>
> --------->
>
> KVM: x86: Rework guest single-step flag injection and filtering
>
> Push TF and RF injection and filtering on guest single-stepping into the
> vender get/set_rflags callbacks. This makes the whole mechanism more
> robust /wrt user space IOTCTL order and instruction emulations.
>
> Signed-off-by: Jan Kiszka<jan.kiszka@siemens.com>
> ---
>
> arch/x86/kvm/svm.c | 8 +++++++-
> arch/x86/kvm/vmx.c | 4 ++++
> arch/x86/kvm/x86.c | 24 +++++++++---------------
> 3 files changed, 20 insertions(+), 16 deletions(-)
>
> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> index 279a2ae..407e1a7 100644
> --- a/arch/x86/kvm/svm.c
> +++ b/arch/x86/kvm/svm.c
> @@ -797,11 +797,17 @@ static void svm_vcpu_put(struct kvm_vcpu *vcpu)
>
> static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
> {
> - return to_svm(vcpu)->vmcb->save.rflags;
> + unsigned long rflags = to_svm(vcpu)->vmcb->save.rflags;
> +
> + if (vcpu->guest_debug& KVM_GUESTDBG_SINGLESTEP)
> + rflags&= ~(unsigned long)(X86_EFLAGS_TF | X86_EFLAGS_RF);
> + return rflags;
> }
>
> static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
> {
> + if (vcpu->guest_debug& KVM_GUESTDBG_SINGLESTEP)
> + rflags |= X86_EFLAGS_TF | X86_EFLAGS_RF;
> to_svm(vcpu)->vmcb->save.rflags = rflags;
> }
>
This code is duplicated in vmx. How about kvm_[gs]et_rflags to contain it?
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 5+ messages in thread
* [PATCH v2 2/2] KVM: x86: Rework guest single-step flag injection and filtering
2009-10-05 10:44 ` Avi Kivity
@ 2009-10-05 11:07 ` Jan Kiszka
2009-10-05 12:49 ` Avi Kivity
0 siblings, 1 reply; 5+ messages in thread
From: Jan Kiszka @ 2009-10-05 11:07 UTC (permalink / raw)
To: Avi Kivity; +Cc: Marcelo Tosatti, kvm-devel
Push TF and RF injection and filtering on guest single-stepping into the
vender get/set_rflags callbacks. This makes the whole mechanism more
robust /wrt user space IOTCTL order and instruction emulations.
Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
---
arch/x86/include/asm/kvm_host.h | 3 ++
arch/x86/kvm/x86.c | 77 +++++++++++++++++++++++----------------
2 files changed, 48 insertions(+), 32 deletions(-)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index e7f8708..179a919 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -614,6 +614,9 @@ void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data);
+unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu);
+void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
+
void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index aa5d574..d5db82b 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -235,6 +235,25 @@ bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl)
}
EXPORT_SYMBOL_GPL(kvm_require_cpl);
+unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu)
+{
+ unsigned long rflags;
+
+ rflags = kvm_x86_ops->get_rflags(vcpu);
+ if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
+ rflags &= ~(unsigned long)(X86_EFLAGS_TF | X86_EFLAGS_RF);
+ return rflags;
+}
+EXPORT_SYMBOL_GPL(kvm_get_rflags);
+
+void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
+{
+ if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
+ rflags |= X86_EFLAGS_TF | X86_EFLAGS_RF;
+ kvm_x86_ops->set_rflags(vcpu, rflags);
+}
+EXPORT_SYMBOL_GPL(kvm_set_rflags);
+
/*
* Load the pae pdptrs. Return true is they are all valid.
*/
@@ -2775,7 +2794,7 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
vcpu->arch.emulate_ctxt.vcpu = vcpu;
- vcpu->arch.emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
+ vcpu->arch.emulate_ctxt.eflags = kvm_get_rflags(vcpu);
vcpu->arch.emulate_ctxt.mode =
(vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
? X86EMUL_MODE_REAL : cs_l
@@ -2853,7 +2872,7 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
return EMULATE_DO_MMIO;
}
- kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
+ kvm_set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
if (vcpu->mmio_is_write) {
vcpu->mmio_needed = 0;
@@ -3289,7 +3308,7 @@ void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
unsigned long *rflags)
{
kvm_lmsw(vcpu, msw);
- *rflags = kvm_x86_ops->get_rflags(vcpu);
+ *rflags = kvm_get_rflags(vcpu);
}
unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
@@ -3327,7 +3346,7 @@ void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
switch (cr) {
case 0:
kvm_set_cr0(vcpu, mk_cr_64(vcpu->arch.cr0, val));
- *rflags = kvm_x86_ops->get_rflags(vcpu);
+ *rflags = kvm_get_rflags(vcpu);
break;
case 2:
vcpu->arch.cr2 = val;
@@ -3458,7 +3477,7 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu)
{
struct kvm_run *kvm_run = vcpu->run;
- kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
+ kvm_run->if_flag = (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
kvm_run->cr8 = kvm_get_cr8(vcpu);
kvm_run->apic_base = kvm_get_apic_base(vcpu);
if (irqchip_in_kernel(vcpu->kvm))
@@ -3838,13 +3857,7 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
#endif
regs->rip = kvm_rip_read(vcpu);
- regs->rflags = kvm_x86_ops->get_rflags(vcpu);
-
- /*
- * Don't leak debug flags in case they were set for guest debugging
- */
- if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
- regs->rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
+ regs->rflags = kvm_get_rflags(vcpu);
vcpu_put(vcpu);
@@ -3872,12 +3885,10 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13);
kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14);
kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15);
-
#endif
kvm_rip_write(vcpu, regs->rip);
- kvm_x86_ops->set_rflags(vcpu, regs->rflags);
-
+ kvm_set_rflags(vcpu, regs->rflags);
vcpu->arch.exception.pending = false;
@@ -4096,7 +4107,7 @@ static int is_vm86_segment(struct kvm_vcpu *vcpu, int seg)
{
return (seg != VCPU_SREG_LDTR) &&
(seg != VCPU_SREG_TR) &&
- (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_VM);
+ (kvm_get_rflags(vcpu) & X86_EFLAGS_VM);
}
int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
@@ -4124,7 +4135,7 @@ static void save_state_to_tss32(struct kvm_vcpu *vcpu,
{
tss->cr3 = vcpu->arch.cr3;
tss->eip = kvm_rip_read(vcpu);
- tss->eflags = kvm_x86_ops->get_rflags(vcpu);
+ tss->eflags = kvm_get_rflags(vcpu);
tss->eax = kvm_register_read(vcpu, VCPU_REGS_RAX);
tss->ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
tss->edx = kvm_register_read(vcpu, VCPU_REGS_RDX);
@@ -4148,7 +4159,7 @@ static int load_state_from_tss32(struct kvm_vcpu *vcpu,
kvm_set_cr3(vcpu, tss->cr3);
kvm_rip_write(vcpu, tss->eip);
- kvm_x86_ops->set_rflags(vcpu, tss->eflags | 2);
+ kvm_set_rflags(vcpu, tss->eflags | 2);
kvm_register_write(vcpu, VCPU_REGS_RAX, tss->eax);
kvm_register_write(vcpu, VCPU_REGS_RCX, tss->ecx);
@@ -4186,7 +4197,7 @@ static void save_state_to_tss16(struct kvm_vcpu *vcpu,
struct tss_segment_16 *tss)
{
tss->ip = kvm_rip_read(vcpu);
- tss->flag = kvm_x86_ops->get_rflags(vcpu);
+ tss->flag = kvm_get_rflags(vcpu);
tss->ax = kvm_register_read(vcpu, VCPU_REGS_RAX);
tss->cx = kvm_register_read(vcpu, VCPU_REGS_RCX);
tss->dx = kvm_register_read(vcpu, VCPU_REGS_RDX);
@@ -4207,7 +4218,7 @@ static int load_state_from_tss16(struct kvm_vcpu *vcpu,
struct tss_segment_16 *tss)
{
kvm_rip_write(vcpu, tss->ip);
- kvm_x86_ops->set_rflags(vcpu, tss->flag | 2);
+ kvm_set_rflags(vcpu, tss->flag | 2);
kvm_register_write(vcpu, VCPU_REGS_RAX, tss->ax);
kvm_register_write(vcpu, VCPU_REGS_RCX, tss->cx);
kvm_register_write(vcpu, VCPU_REGS_RDX, tss->dx);
@@ -4353,8 +4364,8 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
}
if (reason == TASK_SWITCH_IRET) {
- u32 eflags = kvm_x86_ops->get_rflags(vcpu);
- kvm_x86_ops->set_rflags(vcpu, eflags & ~X86_EFLAGS_NT);
+ u32 eflags = kvm_get_rflags(vcpu);
+ kvm_set_rflags(vcpu, eflags & ~X86_EFLAGS_NT);
}
/* set back link to prev task only if NT bit is set in eflags
@@ -4375,8 +4386,8 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
old_tss_base, &nseg_desc);
if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) {
- u32 eflags = kvm_x86_ops->get_rflags(vcpu);
- kvm_x86_ops->set_rflags(vcpu, eflags | X86_EFLAGS_NT);
+ u32 eflags = kvm_get_rflags(vcpu);
+ kvm_set_rflags(vcpu, eflags | X86_EFLAGS_NT);
}
if (reason != TASK_SWITCH_IRET) {
@@ -4471,12 +4482,15 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
struct kvm_guest_debug *dbg)
{
unsigned long rflags;
- int old_debug;
int i;
vcpu_load(vcpu);
- old_debug = vcpu->guest_debug;
+ /*
+ * Read rflags as long as potentially injected trace flags are still
+ * filtered out.
+ */
+ rflags = kvm_get_rflags(vcpu);
vcpu->guest_debug = dbg->control;
if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE))
@@ -4493,12 +4507,11 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
vcpu->arch.switch_db_regs = (vcpu->arch.dr7 & DR7_BP_EN_MASK);
}
- rflags = kvm_x86_ops->get_rflags(vcpu);
- if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
- rflags |= X86_EFLAGS_TF | X86_EFLAGS_RF;
- else if (old_debug & KVM_GUESTDBG_SINGLESTEP)
- rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
- kvm_x86_ops->set_rflags(vcpu, rflags);
+ /*
+ * Trigger an rflags update that will inject or remove the trace
+ * flags.
+ */
+ kvm_set_rflags(vcpu, rflags);
kvm_x86_ops->set_guest_debug(vcpu, dbg);
^ permalink raw reply related [flat|nested] 5+ messages in thread
* Re: [PATCH v2 2/2] KVM: x86: Rework guest single-step flag injection and filtering
2009-10-05 11:07 ` [PATCH v2 2/2] KVM: x86: Rework guest single-step flag injection and filtering Jan Kiszka
@ 2009-10-05 12:49 ` Avi Kivity
0 siblings, 0 replies; 5+ messages in thread
From: Avi Kivity @ 2009-10-05 12:49 UTC (permalink / raw)
To: Jan Kiszka; +Cc: Marcelo Tosatti, kvm-devel
On 10/05/2009 01:07 PM, Jan Kiszka wrote:
> Push TF and RF injection and filtering on guest single-stepping into the
> vender get/set_rflags callbacks. This makes the whole mechanism more
> robust /wrt user space IOTCTL order and instruction emulations.
>
Applied, thanks.
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2009-10-05 12:49 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
[not found] <4AC67D94.6090406@web.de>
2009-10-02 22:31 ` [PATCH 2/2] KVM: x86: Preserve guest single-stepping on register Jan Kiszka
[not found] ` <4AC8B63B.8060602@redhat.com>
2009-10-04 19:02 ` Jan Kiszka
2009-10-05 10:44 ` Avi Kivity
2009-10-05 11:07 ` [PATCH v2 2/2] KVM: x86: Rework guest single-step flag injection and filtering Jan Kiszka
2009-10-05 12:49 ` Avi Kivity
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).