public inbox for kvm@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/7] Rework irq injection infrastructure
@ 2007-12-04  9:44 Avi Kivity
       [not found] ` <11967614542283-git-send-email-avi-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
  0 siblings, 1 reply; 17+ messages in thread
From: Avi Kivity @ 2007-12-04  9:44 UTC (permalink / raw)
  To: kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f

The current irq and exception injection infrastructure is quite complex and
has been the source of a number of bugs in the past.  This patchset simplifies
irq and exception injection:

- Much more work is carried out in common code rather than vmx/svm specific
  code.  Information is kept in C variables rather than hardware registers

- Exception and interrupts are separated into two independent queues.  This
  will allow later optimization on AMD where the hardware supports two queues,
  and also simplifies the Intel case as now we don't need to check the hardware
  whether an exception is pending when injecting and interrupt.

- Interrupts are now only acked after they have been successfully injected,
  rather than when the injection is attempted.  This significantly simplifies
  the case where we fail to inject an interrupt due to a shadow page fault.

The patchset is also necessary for integrating the tpr optimization branch
and for injecting interrupts in big real mode.

 drivers/kvm/i8259.c       |   44 ++++++----
 drivers/kvm/irq.c         |   40 ++-------
 drivers/kvm/irq.h         |   17 +++-
 drivers/kvm/kvm_main.c    |    4 +-
 drivers/kvm/lapic.c       |   33 +++++--
 drivers/kvm/mmu.c         |    2 +-
 drivers/kvm/svm.c         |  175 ++++++++------------------------------
 drivers/kvm/vmx.c         |  207 ++++++++-------------------------------------
 drivers/kvm/x86.c         |  164 ++++++++++++++++++++++++++----------
 drivers/kvm/x86.h         |   35 ++++++--
 drivers/kvm/x86_emulate.c |    4 +-
 11 files changed, 291 insertions(+), 434 deletions(-)

-------------------------------------------------------------------------
SF.Net email is sponsored by: The Future of Linux Business White Paper
from Novell.  From the desktop to the data center, Linux is going
mainstream.  Let it simplify your IT future.
http://altfarm.mediaplex.com/ad/ck/8857-50307-18918-4

^ permalink raw reply	[flat|nested] 17+ messages in thread

* [PATCH 1/7] KVM: Generalize exception injection mechanism
       [not found] ` <11967614542283-git-send-email-avi-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
@ 2007-12-04  9:44   ` Avi Kivity
  2007-12-04  9:44   ` [PATCH 2/7] KVM: Replace page fault injection by the generalized exception queue Avi Kivity
                     ` (7 subsequent siblings)
  8 siblings, 0 replies; 17+ messages in thread
From: Avi Kivity @ 2007-12-04  9:44 UTC (permalink / raw)
  To: kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f; +Cc: Avi Kivity

Instead of each subarch doing its own thing, add an API for queuing an
injection, and manage failed exception injection centerally (i.e., if
an inject failed due to a shadow page fault, we need to requeue it).

Signed-off-by: Avi Kivity <avi-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
---
 drivers/kvm/svm.c |   21 +++++++++++++++++++++
 drivers/kvm/vmx.c |   20 ++++++++++++++++++++
 drivers/kvm/x86.c |   33 ++++++++++++++++++++++++++++++++-
 drivers/kvm/x86.h |   13 +++++++++++++
 4 files changed, 86 insertions(+), 1 deletions(-)

diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c
index 04e6b39..f4c61c8 100644
--- a/drivers/kvm/svm.c
+++ b/drivers/kvm/svm.c
@@ -188,6 +188,25 @@ static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
 	vcpu->shadow_efer = efer;
 }
 
+static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
+				bool has_error_code, u32 error_code)
+{
+	struct vcpu_svm *svm = to_svm(vcpu);
+
+	svm->vmcb->control.event_inj = nr
+		| SVM_EVTINJ_VALID
+		| (has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
+		| SVM_EVTINJ_TYPE_EXEPT;
+	svm->vmcb->control.event_inj_err = error_code;
+}
+
+static bool svm_exception_injected(struct kvm_vcpu *vcpu)
+{
+	struct vcpu_svm *svm = to_svm(vcpu);
+
+	return !(svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID);
+}
+
 static void svm_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
@@ -1712,6 +1731,8 @@ static struct kvm_x86_ops svm_x86_ops = {
 	.patch_hypercall = svm_patch_hypercall,
 	.get_irq = svm_get_irq,
 	.set_irq = svm_set_irq,
+	.queue_exception = svm_queue_exception,
+	.exception_injected = svm_exception_injected,
 	.inject_pending_irq = svm_intr_assist,
 	.inject_pending_vectors = do_interrupt_requests,
 
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
index 8e43feb..1ec1c28 100644
--- a/drivers/kvm/vmx.c
+++ b/drivers/kvm/vmx.c
@@ -595,6 +595,24 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
 	vcpu->interrupt_window_open = 1;
 }
 
+static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
+				bool has_error_code, u32 error_code)
+{
+	vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
+		     nr | INTR_TYPE_EXCEPTION
+		     | (has_error_code ? INTR_INFO_DELIEVER_CODE_MASK : 0)
+		     | INTR_INFO_VALID_MASK);
+	if (has_error_code)
+		vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
+}
+
+static bool vmx_exception_injected(struct kvm_vcpu *vcpu)
+{
+	struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+	return !(vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK);
+}
+
 static void vmx_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code)
 {
 	printk(KERN_DEBUG "inject_general_protection: rip 0x%lx\n",
@@ -2641,6 +2659,8 @@ static struct kvm_x86_ops vmx_x86_ops = {
 	.patch_hypercall = vmx_patch_hypercall,
 	.get_irq = vmx_get_irq,
 	.set_irq = vmx_inject_irq,
+	.queue_exception = vmx_queue_exception,
+	.exception_injected = vmx_exception_injected,
 	.inject_pending_irq = vmx_intr_assist,
 	.inject_pending_vectors = do_interrupt_requests,
 
diff --git a/drivers/kvm/x86.c b/drivers/kvm/x86.c
index c9e4b67..11440d1 100644
--- a/drivers/kvm/x86.c
+++ b/drivers/kvm/x86.c
@@ -133,6 +133,32 @@ static void inject_gp(struct kvm_vcpu *vcpu)
 	kvm_x86_ops->inject_gp(vcpu, 0);
 }
 
+void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
+{
+	WARN_ON(vcpu->exception.pending);
+	vcpu->exception.pending = true;
+	vcpu->exception.has_error_code = false;
+	vcpu->exception.nr = nr;
+}
+EXPORT_SYMBOL_GPL(kvm_queue_exception);
+
+void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
+{
+	WARN_ON(vcpu->exception.pending);
+	vcpu->exception.pending = true;
+	vcpu->exception.has_error_code = true;
+	vcpu->exception.nr = nr;
+	vcpu->exception.error_code = error_code;
+}
+EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
+
+static void __queue_exception(struct kvm_vcpu *vcpu)
+{
+	kvm_x86_ops->queue_exception(vcpu, vcpu->exception.nr,
+				     vcpu->exception.has_error_code,
+				     vcpu->exception.error_code);
+}
+
 /*
  * Load the pae pdptrs.  Return true is they are all valid.
  */
@@ -2370,7 +2396,9 @@ again:
 		goto out;
 	}
 
-	if (irqchip_in_kernel(vcpu->kvm))
+	if (vcpu->exception.pending)
+		__queue_exception(vcpu);
+	else if (irqchip_in_kernel(vcpu->kvm))
 		kvm_x86_ops->inject_pending_irq(vcpu);
 	else
 		kvm_x86_ops->inject_pending_vectors(vcpu, kvm_run);
@@ -2409,6 +2437,9 @@ again:
 		profile_hit(KVM_PROFILING, (void *)vcpu->rip);
 	}
 
+	if (vcpu->exception.pending && kvm_x86_ops->exception_injected(vcpu))
+		vcpu->exception.pending = false;
+
 	r = kvm_x86_ops->handle_exit(kvm_run, vcpu);
 
 	if (r > 0) {
diff --git a/drivers/kvm/x86.h b/drivers/kvm/x86.h
index 78ab1e1..7510e31 100644
--- a/drivers/kvm/x86.h
+++ b/drivers/kvm/x86.h
@@ -136,6 +136,13 @@ struct kvm_vcpu {
 	struct kvm_pio_request pio;
 	void *pio_data;
 
+	struct kvm_queued_exception {
+		bool pending;
+		bool has_error_code;
+		u8 nr;
+		u32 error_code;
+	} exception;
+
 	struct {
 		int active;
 		u8 save_iopl;
@@ -216,6 +223,9 @@ struct kvm_x86_ops {
 				unsigned char *hypercall_addr);
 	int (*get_irq)(struct kvm_vcpu *vcpu);
 	void (*set_irq)(struct kvm_vcpu *vcpu, int vec);
+	void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr,
+				bool has_error_code, u32 error_code);
+	bool (*exception_injected)(struct kvm_vcpu *vcpu);
 	void (*inject_pending_irq)(struct kvm_vcpu *vcpu);
 	void (*inject_pending_vectors)(struct kvm_vcpu *vcpu,
 				       struct kvm_run *run);
@@ -286,6 +296,9 @@ void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
 int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
 int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data);
 
+void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
+void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
+
 void fx_init(struct kvm_vcpu *vcpu);
 
 int emulator_read_std(unsigned long addr,
-- 
1.5.3


-------------------------------------------------------------------------
SF.Net email is sponsored by: The Future of Linux Business White Paper
from Novell.  From the desktop to the data center, Linux is going
mainstream.  Let it simplify your IT future.
http://altfarm.mediaplex.com/ad/ck/8857-50307-18918-4

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [PATCH 2/7] KVM: Replace page fault injection by the generalized exception queue
       [not found] ` <11967614542283-git-send-email-avi-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
  2007-12-04  9:44   ` [PATCH 1/7] KVM: Generalize exception injection mechanism Avi Kivity
@ 2007-12-04  9:44   ` Avi Kivity
  2007-12-04  9:44   ` [PATCH 3/7] KVM: Replace #GP " Avi Kivity
                     ` (6 subsequent siblings)
  8 siblings, 0 replies; 17+ messages in thread
From: Avi Kivity @ 2007-12-04  9:44 UTC (permalink / raw)
  To: kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f; +Cc: Avi Kivity

Signed-off-by: Avi Kivity <avi-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
---
 drivers/kvm/mmu.c |    2 +-
 drivers/kvm/svm.c |   35 -----------------------------------
 drivers/kvm/vmx.c |   32 --------------------------------
 drivers/kvm/x86.c |   17 ++++++++++++++++-
 drivers/kvm/x86.h |    4 ++--
 5 files changed, 19 insertions(+), 71 deletions(-)

diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
index 9b9d1b6..62a7415 100644
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -1081,7 +1081,7 @@ static void inject_page_fault(struct kvm_vcpu *vcpu,
 			      u64 addr,
 			      u32 err_code)
 {
-	kvm_x86_ops->inject_page_fault(vcpu, addr, err_code);
+	kvm_inject_page_fault(vcpu, addr, err_code);
 }
 
 static void paging_free(struct kvm_vcpu *vcpu)
diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c
index f4c61c8..ce77f15 100644
--- a/drivers/kvm/svm.c
+++ b/drivers/kvm/svm.c
@@ -225,12 +225,6 @@ static void inject_ud(struct kvm_vcpu *vcpu)
 						UD_VECTOR;
 }
 
-static int is_page_fault(uint32_t info)
-{
-	info &= SVM_EVTINJ_VEC_MASK | SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
-	return info == (PF_VECTOR | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_EXEPT);
-}
-
 static int is_external_interrupt(u32 info)
 {
 	info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
@@ -1624,34 +1618,6 @@ static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
 	}
 }
 
-static void svm_inject_page_fault(struct kvm_vcpu *vcpu,
-				  unsigned long  addr,
-				  uint32_t err_code)
-{
-	struct vcpu_svm *svm = to_svm(vcpu);
-	uint32_t exit_int_info = svm->vmcb->control.exit_int_info;
-
-	++vcpu->stat.pf_guest;
-
-	if (is_page_fault(exit_int_info)) {
-
-		svm->vmcb->control.event_inj_err = 0;
-		svm->vmcb->control.event_inj = 	SVM_EVTINJ_VALID |
-						SVM_EVTINJ_VALID_ERR |
-						SVM_EVTINJ_TYPE_EXEPT |
-						DF_VECTOR;
-		return;
-	}
-	vcpu->cr2 = addr;
-	svm->vmcb->save.cr2 = addr;
-	svm->vmcb->control.event_inj = 	SVM_EVTINJ_VALID |
-					SVM_EVTINJ_VALID_ERR |
-					SVM_EVTINJ_TYPE_EXEPT |
-					PF_VECTOR;
-	svm->vmcb->control.event_inj_err = err_code;
-}
-
-
 static int is_disabled(void)
 {
 	u64 vm_cr;
@@ -1721,7 +1687,6 @@ static struct kvm_x86_ops svm_x86_ops = {
 	.set_rflags = svm_set_rflags,
 
 	.tlb_flush = svm_flush_tlb,
-	.inject_page_fault = svm_inject_page_fault,
 
 	.inject_gp = svm_inject_gp,
 
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
index 1ec1c28..20e9dfc 100644
--- a/drivers/kvm/vmx.c
+++ b/drivers/kvm/vmx.c
@@ -2487,37 +2487,6 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 		asm("int $2");
 }
 
-static void vmx_inject_page_fault(struct kvm_vcpu *vcpu,
-				  unsigned long addr,
-				  u32 err_code)
-{
-	struct vcpu_vmx *vmx = to_vmx(vcpu);
-	u32 vect_info = vmx->idt_vectoring_info;
-
-	++vcpu->stat.pf_guest;
-
-	if (is_page_fault(vect_info)) {
-		printk(KERN_DEBUG "inject_page_fault: "
-		       "double fault 0x%lx @ 0x%lx\n",
-		       addr, vmcs_readl(GUEST_RIP));
-		vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, 0);
-		vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
-			     DF_VECTOR |
-			     INTR_TYPE_EXCEPTION |
-			     INTR_INFO_DELIEVER_CODE_MASK |
-			     INTR_INFO_VALID_MASK);
-		return;
-	}
-	vcpu->cr2 = addr;
-	vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, err_code);
-	vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
-		     PF_VECTOR |
-		     INTR_TYPE_EXCEPTION |
-		     INTR_INFO_DELIEVER_CODE_MASK |
-		     INTR_INFO_VALID_MASK);
-
-}
-
 static void vmx_free_vmcs(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -2649,7 +2618,6 @@ static struct kvm_x86_ops vmx_x86_ops = {
 	.set_rflags = vmx_set_rflags,
 
 	.tlb_flush = vmx_flush_tlb,
-	.inject_page_fault = vmx_inject_page_fault,
 
 	.inject_gp = vmx_inject_gp,
 
diff --git a/drivers/kvm/x86.c b/drivers/kvm/x86.c
index 11440d1..dc007a3 100644
--- a/drivers/kvm/x86.c
+++ b/drivers/kvm/x86.c
@@ -142,6 +142,21 @@ void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
 }
 EXPORT_SYMBOL_GPL(kvm_queue_exception);
 
+void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr,
+			   u32 error_code)
+{
+	++vcpu->stat.pf_guest;
+	if (vcpu->exception.pending && vcpu->exception.nr == PF_VECTOR) {
+		printk(KERN_DEBUG "kvm: inject_page_fault:"
+		       " double fault 0x%lx\n", addr);
+		vcpu->exception.nr = DF_VECTOR;
+		vcpu->exception.error_code = 0;
+		return;
+	}
+	vcpu->cr2 = addr;
+	kvm_queue_exception_e(vcpu, PF_VECTOR, error_code);
+}
+
 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
 {
 	WARN_ON(vcpu->exception.pending);
@@ -1601,7 +1616,7 @@ static int emulator_write_emulated_onepage(unsigned long addr,
 	gpa_t                 gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
 
 	if (gpa == UNMAPPED_GVA) {
-		kvm_x86_ops->inject_page_fault(vcpu, addr, 2);
+		kvm_inject_page_fault(vcpu, addr, 2);
 		return X86EMUL_PROPAGATE_FAULT;
 	}
 
diff --git a/drivers/kvm/x86.h b/drivers/kvm/x86.h
index 7510e31..49fcfde 100644
--- a/drivers/kvm/x86.h
+++ b/drivers/kvm/x86.h
@@ -211,8 +211,6 @@ struct kvm_x86_ops {
 	void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
 
 	void (*tlb_flush)(struct kvm_vcpu *vcpu);
-	void (*inject_page_fault)(struct kvm_vcpu *vcpu,
-				  unsigned long addr, u32 err_code);
 
 	void (*inject_gp)(struct kvm_vcpu *vcpu, unsigned err_code);
 
@@ -298,6 +296,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data);
 
 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
+void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2,
+			   u32 error_code);
 
 void fx_init(struct kvm_vcpu *vcpu);
 
-- 
1.5.3


-------------------------------------------------------------------------
SF.Net email is sponsored by: The Future of Linux Business White Paper
from Novell.  From the desktop to the data center, Linux is going
mainstream.  Let it simplify your IT future.
http://altfarm.mediaplex.com/ad/ck/8857-50307-18918-4

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [PATCH 3/7] KVM: Replace #GP injection by the generalized exception queue
       [not found] ` <11967614542283-git-send-email-avi-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
  2007-12-04  9:44   ` [PATCH 1/7] KVM: Generalize exception injection mechanism Avi Kivity
  2007-12-04  9:44   ` [PATCH 2/7] KVM: Replace page fault injection by the generalized exception queue Avi Kivity
@ 2007-12-04  9:44   ` Avi Kivity
  2007-12-04  9:44   ` [PATCH 4/7] KVM: Use generalized exception queue for injecting #UD Avi Kivity
                     ` (5 subsequent siblings)
  8 siblings, 0 replies; 17+ messages in thread
From: Avi Kivity @ 2007-12-04  9:44 UTC (permalink / raw)
  To: kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f; +Cc: Avi Kivity

Signed-off-by: Avi Kivity <avi-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
---
 drivers/kvm/svm.c         |   17 ++---------------
 drivers/kvm/vmx.c         |   18 ++----------------
 drivers/kvm/x86.c         |   43 +++++++++++++++++++------------------------
 drivers/kvm/x86.h         |    7 +++++--
 drivers/kvm/x86_emulate.c |    4 ++--
 5 files changed, 30 insertions(+), 59 deletions(-)

diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c
index ce77f15..b896614 100644
--- a/drivers/kvm/svm.c
+++ b/drivers/kvm/svm.c
@@ -207,17 +207,6 @@ static bool svm_exception_injected(struct kvm_vcpu *vcpu)
 	return !(svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID);
 }
 
-static void svm_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code)
-{
-	struct vcpu_svm *svm = to_svm(vcpu);
-
-	svm->vmcb->control.event_inj =		SVM_EVTINJ_VALID |
-						SVM_EVTINJ_VALID_ERR |
-						SVM_EVTINJ_TYPE_EXEPT |
-						GP_VECTOR;
-	svm->vmcb->control.event_inj_err = error_code;
-}
-
 static void inject_ud(struct kvm_vcpu *vcpu)
 {
 	to_svm(vcpu)->vmcb->control.event_inj = SVM_EVTINJ_VALID |
@@ -1115,7 +1104,7 @@ static int rdmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
 	u64 data;
 
 	if (svm_get_msr(&svm->vcpu, ecx, &data))
-		svm_inject_gp(&svm->vcpu, 0);
+		kvm_inject_gp(&svm->vcpu, 0);
 	else {
 		svm->vmcb->save.rax = data & 0xffffffff;
 		svm->vcpu.regs[VCPU_REGS_RDX] = data >> 32;
@@ -1176,7 +1165,7 @@ static int wrmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
 		| ((u64)(svm->vcpu.regs[VCPU_REGS_RDX] & -1u) << 32);
 	svm->next_rip = svm->vmcb->save.rip + 2;
 	if (svm_set_msr(&svm->vcpu, ecx, data))
-		svm_inject_gp(&svm->vcpu, 0);
+		kvm_inject_gp(&svm->vcpu, 0);
 	else
 		skip_emulated_instruction(&svm->vcpu);
 	return 1;
@@ -1688,8 +1677,6 @@ static struct kvm_x86_ops svm_x86_ops = {
 
 	.tlb_flush = svm_flush_tlb,
 
-	.inject_gp = svm_inject_gp,
-
 	.run = svm_vcpu_run,
 	.handle_exit = handle_exit,
 	.skip_emulated_instruction = skip_emulated_instruction,
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
index 20e9dfc..92660db 100644
--- a/drivers/kvm/vmx.c
+++ b/drivers/kvm/vmx.c
@@ -613,18 +613,6 @@ static bool vmx_exception_injected(struct kvm_vcpu *vcpu)
 	return !(vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK);
 }
 
-static void vmx_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code)
-{
-	printk(KERN_DEBUG "inject_general_protection: rip 0x%lx\n",
-	       vmcs_readl(GUEST_RIP));
-	vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
-	vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
-		     GP_VECTOR |
-		     INTR_TYPE_EXCEPTION |
-		     INTR_INFO_DELIEVER_CODE_MASK |
-		     INTR_INFO_VALID_MASK);
-}
-
 static void vmx_inject_ud(struct kvm_vcpu *vcpu)
 {
 	vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
@@ -2083,7 +2071,7 @@ static int handle_rdmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 	u64 data;
 
 	if (vmx_get_msr(vcpu, ecx, &data)) {
-		vmx_inject_gp(vcpu, 0);
+		kvm_inject_gp(vcpu, 0);
 		return 1;
 	}
 
@@ -2101,7 +2089,7 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 		| ((u64)(vcpu->regs[VCPU_REGS_RDX] & -1u) << 32);
 
 	if (vmx_set_msr(vcpu, ecx, data) != 0) {
-		vmx_inject_gp(vcpu, 0);
+		kvm_inject_gp(vcpu, 0);
 		return 1;
 	}
 
@@ -2619,8 +2607,6 @@ static struct kvm_x86_ops vmx_x86_ops = {
 
 	.tlb_flush = vmx_flush_tlb,
 
-	.inject_gp = vmx_inject_gp,
-
 	.run = vmx_vcpu_run,
 	.handle_exit = kvm_handle_exit,
 	.skip_emulated_instruction = skip_emulated_instruction,
diff --git a/drivers/kvm/x86.c b/drivers/kvm/x86.c
index dc007a3..6deb052 100644
--- a/drivers/kvm/x86.c
+++ b/drivers/kvm/x86.c
@@ -128,11 +128,6 @@ void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
 }
 EXPORT_SYMBOL_GPL(kvm_set_apic_base);
 
-static void inject_gp(struct kvm_vcpu *vcpu)
-{
-	kvm_x86_ops->inject_gp(vcpu, 0);
-}
-
 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
 {
 	WARN_ON(vcpu->exception.pending);
@@ -232,20 +227,20 @@ void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
 	if (cr0 & CR0_RESERVED_BITS) {
 		printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
 		       cr0, vcpu->cr0);
-		inject_gp(vcpu);
+		kvm_inject_gp(vcpu, 0);
 		return;
 	}
 
 	if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
 		printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
-		inject_gp(vcpu);
+		kvm_inject_gp(vcpu, 0);
 		return;
 	}
 
 	if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) {
 		printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
 		       "and a clear PE flag\n");
-		inject_gp(vcpu);
+		kvm_inject_gp(vcpu, 0);
 		return;
 	}
 
@@ -257,14 +252,14 @@ void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
 			if (!is_pae(vcpu)) {
 				printk(KERN_DEBUG "set_cr0: #GP, start paging "
 				       "in long mode while PAE is disabled\n");
-				inject_gp(vcpu);
+				kvm_inject_gp(vcpu, 0);
 				return;
 			}
 			kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
 			if (cs_l) {
 				printk(KERN_DEBUG "set_cr0: #GP, start paging "
 				       "in long mode while CS.L == 1\n");
-				inject_gp(vcpu);
+				kvm_inject_gp(vcpu, 0);
 				return;
 
 			}
@@ -273,7 +268,7 @@ void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
 		if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->cr3)) {
 			printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
 			       "reserved bits\n");
-			inject_gp(vcpu);
+			kvm_inject_gp(vcpu, 0);
 			return;
 		}
 
@@ -299,7 +294,7 @@ void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
 {
 	if (cr4 & CR4_RESERVED_BITS) {
 		printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
-		inject_gp(vcpu);
+		kvm_inject_gp(vcpu, 0);
 		return;
 	}
 
@@ -307,19 +302,19 @@ void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
 		if (!(cr4 & X86_CR4_PAE)) {
 			printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
 			       "in long mode\n");
-			inject_gp(vcpu);
+			kvm_inject_gp(vcpu, 0);
 			return;
 		}
 	} else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & X86_CR4_PAE)
 		   && !load_pdptrs(vcpu, vcpu->cr3)) {
 		printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
-		inject_gp(vcpu);
+		kvm_inject_gp(vcpu, 0);
 		return;
 	}
 
 	if (cr4 & X86_CR4_VMXE) {
 		printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
-		inject_gp(vcpu);
+		kvm_inject_gp(vcpu, 0);
 		return;
 	}
 	kvm_x86_ops->set_cr4(vcpu, cr4);
@@ -340,7 +335,7 @@ void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
 	if (is_long_mode(vcpu)) {
 		if (cr3 & CR3_L_MODE_RESERVED_BITS) {
 			printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
-			inject_gp(vcpu);
+			kvm_inject_gp(vcpu, 0);
 			return;
 		}
 	} else {
@@ -348,13 +343,13 @@ void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
 			if (cr3 & CR3_PAE_RESERVED_BITS) {
 				printk(KERN_DEBUG
 				       "set_cr3: #GP, reserved bits\n");
-				inject_gp(vcpu);
+				kvm_inject_gp(vcpu, 0);
 				return;
 			}
 			if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) {
 				printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
 				       "reserved bits\n");
-				inject_gp(vcpu);
+				kvm_inject_gp(vcpu, 0);
 				return;
 			}
 		}
@@ -375,7 +370,7 @@ void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
 	 * to debug) behavior on the guest side.
 	 */
 	if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
-		inject_gp(vcpu);
+		kvm_inject_gp(vcpu, 0);
 	else {
 		vcpu->cr3 = cr3;
 		vcpu->mmu.new_cr3(vcpu);
@@ -388,7 +383,7 @@ void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
 {
 	if (cr8 & CR8_RESERVED_BITS) {
 		printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
-		inject_gp(vcpu);
+		kvm_inject_gp(vcpu, 0);
 		return;
 	}
 	if (irqchip_in_kernel(vcpu->kvm))
@@ -436,14 +431,14 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
 	if (efer & EFER_RESERVED_BITS) {
 		printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
 		       efer);
-		inject_gp(vcpu);
+		kvm_inject_gp(vcpu, 0);
 		return;
 	}
 
 	if (is_paging(vcpu)
 	    && (vcpu->shadow_efer & EFER_LME) != (efer & EFER_LME)) {
 		printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
-		inject_gp(vcpu);
+		kvm_inject_gp(vcpu, 0);
 		return;
 	}
 
@@ -2047,7 +2042,7 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
 		 * String I/O in reverse.  Yuck.  Kill the guest, fix later.
 		 */
 		pr_unimpl(vcpu, "guest string pio down\n");
-		inject_gp(vcpu);
+		kvm_inject_gp(vcpu, 0);
 		return 1;
 	}
 	vcpu->run->io.count = now;
@@ -2062,7 +2057,7 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
 		vcpu->pio.guest_pages[i] = page;
 		mutex_unlock(&vcpu->kvm->lock);
 		if (!page) {
-			inject_gp(vcpu);
+			kvm_inject_gp(vcpu, 0);
 			free_pio_guest_pages(vcpu);
 			return 1;
 		}
diff --git a/drivers/kvm/x86.h b/drivers/kvm/x86.h
index 49fcfde..fb48b2f 100644
--- a/drivers/kvm/x86.h
+++ b/drivers/kvm/x86.h
@@ -212,8 +212,6 @@ struct kvm_x86_ops {
 
 	void (*tlb_flush)(struct kvm_vcpu *vcpu);
 
-	void (*inject_gp)(struct kvm_vcpu *vcpu, unsigned err_code);
-
 	void (*run)(struct kvm_vcpu *vcpu, struct kvm_run *run);
 	int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu);
 	void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
@@ -459,6 +457,11 @@ static inline u32 get_rdx_init_val(void)
 	return 0x600; /* P6 family */
 }
 
+static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
+{
+	kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
+}
+
 #define ASM_VMX_VMCLEAR_RAX       ".byte 0x66, 0x0f, 0xc7, 0x30"
 #define ASM_VMX_VMLAUNCH          ".byte 0x0f, 0x01, 0xc2"
 #define ASM_VMX_VMRESUME          ".byte 0x0f, 0x01, 0xc3"
diff --git a/drivers/kvm/x86_emulate.c b/drivers/kvm/x86_emulate.c
index f2a4708..3a3bc64 100644
--- a/drivers/kvm/x86_emulate.c
+++ b/drivers/kvm/x86_emulate.c
@@ -1791,7 +1791,7 @@ twobyte_insn:
 			| ((u64)c->regs[VCPU_REGS_RDX] << 32);
 		rc = kvm_set_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], msr_data);
 		if (rc) {
-			kvm_x86_ops->inject_gp(ctxt->vcpu, 0);
+			kvm_inject_gp(ctxt->vcpu, 0);
 			c->eip = ctxt->vcpu->rip;
 		}
 		rc = X86EMUL_CONTINUE;
@@ -1801,7 +1801,7 @@ twobyte_insn:
 		/* rdmsr */
 		rc = kvm_get_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], &msr_data);
 		if (rc) {
-			kvm_x86_ops->inject_gp(ctxt->vcpu, 0);
+			kvm_inject_gp(ctxt->vcpu, 0);
 			c->eip = ctxt->vcpu->rip;
 		} else {
 			c->regs[VCPU_REGS_RAX] = (u32)msr_data;
-- 
1.5.3


-------------------------------------------------------------------------
SF.Net email is sponsored by: The Future of Linux Business White Paper
from Novell.  From the desktop to the data center, Linux is going
mainstream.  Let it simplify your IT future.
http://altfarm.mediaplex.com/ad/ck/8857-50307-18918-4

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [PATCH 4/7] KVM: Use generalized exception queue for injecting #UD
       [not found] ` <11967614542283-git-send-email-avi-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
                     ` (2 preceding siblings ...)
  2007-12-04  9:44   ` [PATCH 3/7] KVM: Replace #GP " Avi Kivity
@ 2007-12-04  9:44   ` Avi Kivity
  2007-12-04  9:44   ` [PATCH 5/7] KVM: Add explicit acks to interrupt controller model Avi Kivity
                     ` (4 subsequent siblings)
  8 siblings, 0 replies; 17+ messages in thread
From: Avi Kivity @ 2007-12-04  9:44 UTC (permalink / raw)
  To: kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f; +Cc: Avi Kivity

Signed-off-by: Avi Kivity <avi-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
---
 drivers/kvm/svm.c |   12 ++----------
 drivers/kvm/vmx.c |   11 +----------
 2 files changed, 3 insertions(+), 20 deletions(-)

diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c
index b896614..8b1cc60 100644
--- a/drivers/kvm/svm.c
+++ b/drivers/kvm/svm.c
@@ -207,13 +207,6 @@ static bool svm_exception_injected(struct kvm_vcpu *vcpu)
 	return !(svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID);
 }
 
-static void inject_ud(struct kvm_vcpu *vcpu)
-{
-	to_svm(vcpu)->vmcb->control.event_inj = SVM_EVTINJ_VALID |
-						SVM_EVTINJ_TYPE_EXEPT |
-						UD_VECTOR;
-}
-
 static int is_external_interrupt(u32 info)
 {
 	info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
@@ -948,8 +941,7 @@ static int ud_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
 
 	er = emulate_instruction(&svm->vcpu, kvm_run, 0, 0, 0);
 	if (er != EMULATE_DONE)
-		inject_ud(&svm->vcpu);
-
+		kvm_queue_exception(&svm->vcpu, UD_VECTOR);
 	return 1;
 }
 
@@ -1027,7 +1019,7 @@ static int vmmcall_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
 static int invalid_op_interception(struct vcpu_svm *svm,
 				   struct kvm_run *kvm_run)
 {
-	inject_ud(&svm->vcpu);
+	kvm_queue_exception(&svm->vcpu, UD_VECTOR);
 	return 1;
 }
 
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
index 92660db..aa6bf2b 100644
--- a/drivers/kvm/vmx.c
+++ b/drivers/kvm/vmx.c
@@ -613,14 +613,6 @@ static bool vmx_exception_injected(struct kvm_vcpu *vcpu)
 	return !(vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK);
 }
 
-static void vmx_inject_ud(struct kvm_vcpu *vcpu)
-{
-	vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
-		     UD_VECTOR |
-		     INTR_TYPE_EXCEPTION |
-		     INTR_INFO_VALID_MASK);
-}
-
 /*
  * Swap MSR entry in host/guest MSR entry array.
  */
@@ -1866,8 +1858,7 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 	if (is_invalid_opcode(intr_info)) {
 		er = emulate_instruction(vcpu, kvm_run, 0, 0, 0);
 		if (er != EMULATE_DONE)
-			vmx_inject_ud(vcpu);
-
+			kvm_queue_exception(vcpu, UD_VECTOR);
 		return 1;
 	}
 
-- 
1.5.3


-------------------------------------------------------------------------
SF.Net email is sponsored by: The Future of Linux Business White Paper
from Novell.  From the desktop to the data center, Linux is going
mainstream.  Let it simplify your IT future.
http://altfarm.mediaplex.com/ad/ck/8857-50307-18918-4

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [PATCH 5/7] KVM: Add explicit acks to interrupt controller model
       [not found] ` <11967614542283-git-send-email-avi-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
                     ` (3 preceding siblings ...)
  2007-12-04  9:44   ` [PATCH 4/7] KVM: Use generalized exception queue for injecting #UD Avi Kivity
@ 2007-12-04  9:44   ` Avi Kivity
  2007-12-04  9:44   ` [PATCH 6/7] KVM: Move tpr threshold calculation into common code Avi Kivity
                     ` (3 subsequent siblings)
  8 siblings, 0 replies; 17+ messages in thread
From: Avi Kivity @ 2007-12-04  9:44 UTC (permalink / raw)
  To: kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f; +Cc: Avi Kivity

The current interrupt controller emulation model supports two functions
for pulling interrupts from the controller into the processor:

  kvm_cpu_has_interrupt(vcpu) - is an interrupt pending for the core
  kvm_cpu_get_interrupt(vcpu) - get pending interrupt and ack it

This presents a problem when we try fail to inject an interrupt, since it
has already been acked.  Currently subarch specific code carries this acked
interrupt around, but code is quite complex and difficult to follow.

This patch changes the model to

  kvm_cpu_get_interrupt(vcpu, irq) - get pending interrupt, if any
  irq->ack()                       - acknowledge interrupt

Which allows acking only after the core has accepted the interrupt.  Currently
we use the new model with the old semantics by calling ack() immediately
after we see a pending interrupt.

Signed-off-by: Avi Kivity <avi-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
---
 drivers/kvm/i8259.c    |   44 +++++++++++++++++++++++++-------------------
 drivers/kvm/irq.c      |   38 ++++++--------------------------------
 drivers/kvm/irq.h      |   13 +++++++++----
 drivers/kvm/kvm_main.c |    3 ++-
 drivers/kvm/lapic.c    |   20 ++++++++++++++------
 drivers/kvm/svm.c      |    6 ++++--
 drivers/kvm/vmx.c      |    6 ++++--
 7 files changed, 64 insertions(+), 66 deletions(-)

diff --git a/drivers/kvm/i8259.c b/drivers/kvm/i8259.c
index f0dc2ee..dd933d7 100644
--- a/drivers/kvm/i8259.c
+++ b/drivers/kvm/i8259.c
@@ -27,6 +27,7 @@
  */
 #include <linux/mm.h>
 #include "irq.h"
+#include "x86.h"
 
 /*
  * set irq level. If an edge is detected, then the IRR is set to 1
@@ -149,36 +150,41 @@ static inline void pic_intack(struct kvm_kpic_state *s, int irq)
 		s->irr &= ~(1 << irq);
 }
 
-int kvm_pic_read_irq(struct kvm_pic *s)
+static void kvm_pic_ack_irq(struct kvm_vcpu *vcpu, unsigned irq)
+{
+	struct kvm_pic *s;
+
+	s = pic_irqchip(vcpu->kvm);
+	s->output = 0;
+	if (irq < 8)
+		pic_intack(&s->pics[0], irq);
+	else {
+		pic_intack(&s->pics[0], 2);
+		pic_intack(&s->pics[1], irq - 8);
+	}
+	pic_update_irq(s);
+}
+
+bool kvm_pic_read_irq(struct kvm_pic *s, struct kvm_pending_irq *pirq)
 {
 	int irq, irq2, intno;
 
 	irq = pic_get_irq(&s->pics[0]);
 	if (irq >= 0) {
-		pic_intack(&s->pics[0], irq);
 		if (irq == 2) {
 			irq2 = pic_get_irq(&s->pics[1]);
-			if (irq2 >= 0)
-				pic_intack(&s->pics[1], irq2);
-			else
-				/*
-				 * spurious IRQ on slave controller
-				 */
-				irq2 = 7;
+			if (irq2 < 0)
+				irq2 = 7; /* spurious */
 			intno = s->pics[1].irq_base + irq2;
 			irq = irq2 + 8;
 		} else
 			intno = s->pics[0].irq_base + irq;
-	} else {
-		/*
-		 * spurious IRQ on host controller
-		 */
-		irq = 7;
-		intno = s->pics[0].irq_base + irq;
-	}
-	pic_update_irq(s);
-
-	return intno;
+		pirq->vector = intno;
+		pirq->ack = kvm_pic_ack_irq;
+		pirq->info = irq;
+		return true;
+	} else
+		return false;
 }
 
 void kvm_pic_reset(struct kvm_kpic_state *s)
diff --git a/drivers/kvm/irq.c b/drivers/kvm/irq.c
index 59b47c5..66d1a91 100644
--- a/drivers/kvm/irq.c
+++ b/drivers/kvm/irq.c
@@ -26,41 +26,15 @@
 #include "irq.h"
 
 /*
- * check if there is pending interrupt without
- * intack.
- */
-int kvm_cpu_has_interrupt(struct kvm_vcpu *v)
-{
-	struct kvm_pic *s;
-
-	if (kvm_apic_has_interrupt(v) == -1) {	/* LAPIC */
-		if (kvm_apic_accept_pic_intr(v)) {
-			s = pic_irqchip(v->kvm);	/* PIC */
-			return s->output;
-		} else
-			return 0;
-	}
-	return 1;
-}
-EXPORT_SYMBOL_GPL(kvm_cpu_has_interrupt);
-
-/*
  * Read pending interrupt vector and intack.
  */
-int kvm_cpu_get_interrupt(struct kvm_vcpu *v)
+bool kvm_cpu_get_interrupt(struct kvm_vcpu *v, struct kvm_pending_irq *irq)
 {
-	struct kvm_pic *s;
-	int vector;
-
-	vector = kvm_get_apic_interrupt(v);	/* APIC */
-	if (vector == -1) {
-		if (kvm_apic_accept_pic_intr(v)) {
-			s = pic_irqchip(v->kvm);
-			s->output = 0;		/* PIC */
-			vector = kvm_pic_read_irq(s);
-		}
-	}
-	return vector;
+	if (kvm_get_apic_interrupt(v, irq))
+		return true;
+	if (kvm_apic_accept_pic_intr(v))
+		return kvm_pic_read_irq(pic_irqchip(v->kvm), irq);
+	return false;
 }
 EXPORT_SYMBOL_GPL(kvm_cpu_get_interrupt);
 
diff --git a/drivers/kvm/irq.h b/drivers/kvm/irq.h
index 75f5f18..49af30b 100644
--- a/drivers/kvm/irq.h
+++ b/drivers/kvm/irq.h
@@ -26,6 +26,12 @@
 
 typedef void irq_request_func(void *opaque, int level);
 
+struct kvm_pending_irq {
+	unsigned vector;
+	void (*ack)(struct kvm_vcpu *vcpu, unsigned info);
+	unsigned info;
+};
+
 struct kvm_kpic_state {
 	u8 last_irr;	/* edge detection */
 	u8 irr;		/* interrupt request register */
@@ -56,9 +62,8 @@ struct kvm_pic {
 
 struct kvm_pic *kvm_create_pic(struct kvm *kvm);
 void kvm_pic_set_irq(void *opaque, int irq, int level);
-int kvm_pic_read_irq(struct kvm_pic *s);
-int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
-int kvm_cpu_has_interrupt(struct kvm_vcpu *v);
+bool kvm_pic_read_irq(struct kvm_pic *s, struct kvm_pending_irq *irq);
+bool kvm_cpu_get_interrupt(struct kvm_vcpu *v, struct kvm_pending_irq *irq);
 void kvm_pic_update_irq(struct kvm_pic *s);
 
 #define IOAPIC_NUM_PINS  KVM_IOAPIC_NUM_PINS
@@ -144,7 +149,7 @@ do {									\
 void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
 int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu);
 int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu);
-int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu);
+bool kvm_get_apic_interrupt(struct kvm_vcpu *vcpu, struct kvm_pending_irq *irq);
 int kvm_create_lapic(struct kvm_vcpu *vcpu);
 void kvm_lapic_reset(struct kvm_vcpu *vcpu);
 void kvm_pic_reset(struct kvm_kpic_state *s);
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index 7b5129e..41658d7 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -637,13 +637,14 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
 void kvm_vcpu_block(struct kvm_vcpu *vcpu)
 {
 	DECLARE_WAITQUEUE(wait, current);
+	struct kvm_pending_irq irq;
 
 	add_wait_queue(&vcpu->wq, &wait);
 
 	/*
 	 * We will block until either an interrupt or a signal wakes us up
 	 */
-	while (!kvm_cpu_has_interrupt(vcpu)
+	while (!kvm_cpu_get_interrupt(vcpu, &irq)
 	       && !signal_pending(current)
 	       && vcpu->mp_state != VCPU_MP_STATE_RUNNABLE
 	       && vcpu->mp_state != VCPU_MP_STATE_SIPI_RECEIVED) {
diff --git a/drivers/kvm/lapic.c b/drivers/kvm/lapic.c
index 5efa6c0..804a177 100644
--- a/drivers/kvm/lapic.c
+++ b/drivers/kvm/lapic.c
@@ -1044,18 +1044,26 @@ void kvm_apic_timer_intr_post(struct kvm_vcpu *vcpu, int vec)
 				apic->timer.period);
 }
 
-int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
+static void kvm_apic_ack_interrupt(struct kvm_vcpu *vcpu, unsigned vector)
 {
-	int vector = kvm_apic_has_interrupt(vcpu);
 	struct kvm_lapic *apic = vcpu->apic;
 
-	if (vector == -1)
-		return -1;
-
 	apic_set_vector(vector, apic->regs + APIC_ISR);
 	apic_update_ppr(apic);
 	apic_clear_irr(vector, apic);
-	return vector;
+}
+
+bool kvm_get_apic_interrupt(struct kvm_vcpu *vcpu, struct kvm_pending_irq *irq)
+{
+	int vector = kvm_apic_has_interrupt(vcpu);
+
+	if (vector == -1)
+		return false;
+
+	irq->vector = vector;
+	irq->ack = kvm_apic_ack_interrupt;
+	irq->info = vector;
+	return true;
 }
 
 void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu)
diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c
index 8b1cc60..155b266 100644
--- a/drivers/kvm/svm.c
+++ b/drivers/kvm/svm.c
@@ -1316,6 +1316,7 @@ static void svm_intr_assist(struct kvm_vcpu *vcpu)
 	struct vcpu_svm *svm = to_svm(vcpu);
 	struct vmcb *vmcb = svm->vmcb;
 	int intr_vector = -1;
+	struct kvm_pending_irq irq;
 
 	if ((vmcb->control.exit_int_info & SVM_EVTINJ_VALID) &&
 	    ((vmcb->control.exit_int_info & SVM_EVTINJ_TYPE_MASK) == 0)) {
@@ -1329,7 +1330,7 @@ static void svm_intr_assist(struct kvm_vcpu *vcpu)
 	if (vmcb->control.int_ctl & V_IRQ_MASK)
 		return;
 
-	if (!kvm_cpu_has_interrupt(vcpu))
+	if (!kvm_cpu_get_interrupt(vcpu, &irq))
 		return;
 
 	if (!(vmcb->save.rflags & X86_EFLAGS_IF) ||
@@ -1341,7 +1342,8 @@ static void svm_intr_assist(struct kvm_vcpu *vcpu)
 		return;
 	}
 	/* Okay, we can deliver the interrupt: grab it and update PIC state. */
-	intr_vector = kvm_cpu_get_interrupt(vcpu);
+	irq.ack(vcpu, irq.info);
+	intr_vector = irq.vector;
 	svm_inject_irq(svm, intr_vector);
 	kvm_timer_intr_post(vcpu, intr_vector);
 }
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
index aa6bf2b..ed14849 100644
--- a/drivers/kvm/vmx.c
+++ b/drivers/kvm/vmx.c
@@ -2249,11 +2249,12 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu)
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
 	u32 idtv_info_field, intr_info_field;
 	int has_ext_irq, interrupt_window_open;
+	struct kvm_pending_irq irq;
 	int vector;
 
 	update_tpr_threshold(vcpu);
 
-	has_ext_irq = kvm_cpu_has_interrupt(vcpu);
+	has_ext_irq = kvm_cpu_get_interrupt(vcpu, &irq);
 	intr_info_field = vmcs_read32(VM_ENTRY_INTR_INFO_FIELD);
 	idtv_info_field = vmx->idt_vectoring_info;
 	if (intr_info_field & INTR_INFO_VALID_MASK) {
@@ -2294,7 +2295,8 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu)
 		((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
 		 (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0);
 	if (interrupt_window_open) {
-		vector = kvm_cpu_get_interrupt(vcpu);
+		irq.ack(vcpu, irq.info);
+		vector = irq.vector;
 		vmx_inject_irq(vcpu, vector);
 		kvm_timer_intr_post(vcpu, vector);
 	} else
-- 
1.5.3


-------------------------------------------------------------------------
SF.Net email is sponsored by: The Future of Linux Business White Paper
from Novell.  From the desktop to the data center, Linux is going
mainstream.  Let it simplify your IT future.
http://altfarm.mediaplex.com/ad/ck/8857-50307-18918-4

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [PATCH 6/7] KVM: Move tpr threshold calculation into common code
       [not found] ` <11967614542283-git-send-email-avi-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
                     ` (4 preceding siblings ...)
  2007-12-04  9:44   ` [PATCH 5/7] KVM: Add explicit acks to interrupt controller model Avi Kivity
@ 2007-12-04  9:44   ` Avi Kivity
  2007-12-04  9:44   ` [PATCH 7/7] KVM: Ack interrupts only after they have successfully been injected Avi Kivity
                     ` (2 subsequent siblings)
  8 siblings, 0 replies; 17+ messages in thread
From: Avi Kivity @ 2007-12-04  9:44 UTC (permalink / raw)
  To: kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f; +Cc: Avi Kivity

This moves the tpr shadow processing into the local apic code from the vmx
interrupt injection path.  This will allow decoupling the irq injection path
from normal execution (which needs to update the tpr threshold even when no
irq is being injected).

Signed-off-by: Avi Kivity <avi-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
---
 drivers/kvm/irq.c      |    6 ++++--
 drivers/kvm/irq.h      |    8 +++++---
 drivers/kvm/kvm_main.c |    3 ++-
 drivers/kvm/lapic.c    |   17 ++++++++++++-----
 drivers/kvm/svm.c      |    3 ++-
 drivers/kvm/vmx.c      |   19 ++++++-------------
 6 files changed, 31 insertions(+), 25 deletions(-)

diff --git a/drivers/kvm/irq.c b/drivers/kvm/irq.c
index 66d1a91..8a95eef 100644
--- a/drivers/kvm/irq.c
+++ b/drivers/kvm/irq.c
@@ -28,9 +28,11 @@
 /*
  * Read pending interrupt vector and intack.
  */
-bool kvm_cpu_get_interrupt(struct kvm_vcpu *v, struct kvm_pending_irq *irq)
+bool kvm_cpu_get_interrupt(struct kvm_vcpu *v, struct kvm_pending_irq *irq,
+			   unsigned *tpr_threshold)
 {
-	if (kvm_get_apic_interrupt(v, irq))
+	*tpr_threshold = 0;
+	if (kvm_get_apic_interrupt(v, irq, tpr_threshold))
 		return true;
 	if (kvm_apic_accept_pic_intr(v))
 		return kvm_pic_read_irq(pic_irqchip(v->kvm), irq);
diff --git a/drivers/kvm/irq.h b/drivers/kvm/irq.h
index 49af30b..c12734d 100644
--- a/drivers/kvm/irq.h
+++ b/drivers/kvm/irq.h
@@ -63,7 +63,8 @@ struct kvm_pic {
 struct kvm_pic *kvm_create_pic(struct kvm *kvm);
 void kvm_pic_set_irq(void *opaque, int irq, int level);
 bool kvm_pic_read_irq(struct kvm_pic *s, struct kvm_pending_irq *irq);
-bool kvm_cpu_get_interrupt(struct kvm_vcpu *v, struct kvm_pending_irq *irq);
+bool kvm_cpu_get_interrupt(struct kvm_vcpu *v, struct kvm_pending_irq *irq,
+			   unsigned *tpr_threshold);
 void kvm_pic_update_irq(struct kvm_pic *s);
 
 #define IOAPIC_NUM_PINS  KVM_IOAPIC_NUM_PINS
@@ -147,9 +148,10 @@ do {									\
 #endif
 
 void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
-int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu);
+int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu, unsigned *tpr_threshold);
 int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu);
-bool kvm_get_apic_interrupt(struct kvm_vcpu *vcpu, struct kvm_pending_irq *irq);
+bool kvm_get_apic_interrupt(struct kvm_vcpu *vcpu, struct kvm_pending_irq *irq,
+			    unsigned *tpr_threshold);
 int kvm_create_lapic(struct kvm_vcpu *vcpu);
 void kvm_lapic_reset(struct kvm_vcpu *vcpu);
 void kvm_pic_reset(struct kvm_kpic_state *s);
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index 41658d7..7cc4508 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -638,13 +638,14 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
 {
 	DECLARE_WAITQUEUE(wait, current);
 	struct kvm_pending_irq irq;
+	unsigned tpr_threshold;
 
 	add_wait_queue(&vcpu->wq, &wait);
 
 	/*
 	 * We will block until either an interrupt or a signal wakes us up
 	 */
-	while (!kvm_cpu_get_interrupt(vcpu, &irq)
+	while (!kvm_cpu_get_interrupt(vcpu, &irq, &tpr_threshold)
 	       && !signal_pending(current)
 	       && vcpu->mp_state != VCPU_MP_STATE_RUNNABLE
 	       && vcpu->mp_state != VCPU_MP_STATE_SIPI_RECEIVED) {
diff --git a/drivers/kvm/lapic.c b/drivers/kvm/lapic.c
index 804a177..f2168d4 100644
--- a/drivers/kvm/lapic.c
+++ b/drivers/kvm/lapic.c
@@ -992,19 +992,25 @@ nomem:
 }
 EXPORT_SYMBOL_GPL(kvm_create_lapic);
 
-int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
+int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu, unsigned *tpr_threshold)
 {
 	struct kvm_lapic *apic = vcpu->apic;
 	int highest_irr;
+	unsigned tpr;
 
 	if (!apic || !apic_enabled(apic))
 		return -1;
 
 	apic_update_ppr(apic);
 	highest_irr = apic_find_highest_irr(apic);
-	if ((highest_irr == -1) ||
-	    ((highest_irr & 0xF0) <= apic_get_reg(apic, APIC_PROCPRI)))
+	if (highest_irr == -1)
 		return -1;
+	if ((highest_irr & 0xF0) <= apic_get_reg(apic, APIC_PROCPRI)) {
+		tpr = apic_get_reg(apic, APIC_TASKPRI) & 0xff;
+		if ((highest_irr & 0xF0) < tpr)
+			*tpr_threshold = highest_irr & 0xF0;
+		return -1;
+	}
 	return highest_irr;
 }
 
@@ -1053,9 +1059,10 @@ static void kvm_apic_ack_interrupt(struct kvm_vcpu *vcpu, unsigned vector)
 	apic_clear_irr(vector, apic);
 }
 
-bool kvm_get_apic_interrupt(struct kvm_vcpu *vcpu, struct kvm_pending_irq *irq)
+bool kvm_get_apic_interrupt(struct kvm_vcpu *vcpu, struct kvm_pending_irq *irq,
+			    unsigned *tpr_threshold)
 {
-	int vector = kvm_apic_has_interrupt(vcpu);
+	int vector = kvm_apic_has_interrupt(vcpu, tpr_threshold);
 
 	if (vector == -1)
 		return false;
diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c
index 155b266..10146a8 100644
--- a/drivers/kvm/svm.c
+++ b/drivers/kvm/svm.c
@@ -1317,6 +1317,7 @@ static void svm_intr_assist(struct kvm_vcpu *vcpu)
 	struct vmcb *vmcb = svm->vmcb;
 	int intr_vector = -1;
 	struct kvm_pending_irq irq;
+	unsigned tpr_threshold;
 
 	if ((vmcb->control.exit_int_info & SVM_EVTINJ_VALID) &&
 	    ((vmcb->control.exit_int_info & SVM_EVTINJ_TYPE_MASK) == 0)) {
@@ -1330,7 +1331,7 @@ static void svm_intr_assist(struct kvm_vcpu *vcpu)
 	if (vmcb->control.int_ctl & V_IRQ_MASK)
 		return;
 
-	if (!kvm_cpu_get_interrupt(vcpu, &irq))
+	if (!kvm_cpu_get_interrupt(vcpu, &irq, &tpr_threshold))
 		return;
 
 	if (!(vmcb->save.rflags & X86_EFLAGS_IF) ||
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
index ed14849..b788c6b 100644
--- a/drivers/kvm/vmx.c
+++ b/drivers/kvm/vmx.c
@@ -2218,21 +2218,12 @@ static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
 {
 }
 
-static void update_tpr_threshold(struct kvm_vcpu *vcpu)
+static void update_tpr_threshold(struct kvm_vcpu *vcpu, unsigned tpr_threshold)
 {
-	int max_irr, tpr;
-
 	if (!vm_need_tpr_shadow(vcpu->kvm))
 		return;
 
-	if (!kvm_lapic_enabled(vcpu) ||
-	    ((max_irr = kvm_lapic_find_highest_irr(vcpu)) == -1)) {
-		vmcs_write32(TPR_THRESHOLD, 0);
-		return;
-	}
-
-	tpr = (kvm_lapic_get_cr8(vcpu) & 0x0f) << 4;
-	vmcs_write32(TPR_THRESHOLD, (max_irr > tpr) ? tpr >> 4 : max_irr >> 4);
+	vmcs_write32(TPR_THRESHOLD, tpr_threshold >> 4);
 }
 
 static void enable_irq_window(struct kvm_vcpu *vcpu)
@@ -2251,10 +2242,12 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu)
 	int has_ext_irq, interrupt_window_open;
 	struct kvm_pending_irq irq;
 	int vector;
+	unsigned tpr_threshold;
+
+	has_ext_irq = kvm_cpu_get_interrupt(vcpu, &irq, &tpr_threshold);
 
-	update_tpr_threshold(vcpu);
+	update_tpr_threshold(vcpu, tpr_threshold);
 
-	has_ext_irq = kvm_cpu_get_interrupt(vcpu, &irq);
 	intr_info_field = vmcs_read32(VM_ENTRY_INTR_INFO_FIELD);
 	idtv_info_field = vmx->idt_vectoring_info;
 	if (intr_info_field & INTR_INFO_VALID_MASK) {
-- 
1.5.3


-------------------------------------------------------------------------
SF.Net email is sponsored by: The Future of Linux Business White Paper
from Novell.  From the desktop to the data center, Linux is going
mainstream.  Let it simplify your IT future.
http://altfarm.mediaplex.com/ad/ck/8857-50307-18918-4

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [PATCH 7/7] KVM: Ack interrupts only after they have successfully been injected
       [not found] ` <11967614542283-git-send-email-avi-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
                     ` (5 preceding siblings ...)
  2007-12-04  9:44   ` [PATCH 6/7] KVM: Move tpr threshold calculation into common code Avi Kivity
@ 2007-12-04  9:44   ` Avi Kivity
  2007-12-04 16:51   ` [PATCH 0/7] Rework irq injection infrastructure Joerg Roedel
  2007-12-06  7:50   ` Dong, Eddie
  8 siblings, 0 replies; 17+ messages in thread
From: Avi Kivity @ 2007-12-04  9:44 UTC (permalink / raw)
  To: kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f; +Cc: Avi Kivity

Instead of acking an interrupt when we *think* the guest is ready for it,
and then juggling it around in subarch-specific registers if it isn't (e.g.
page fault while trying to inject the interrupt), separate the injection and
ack.

Subarh specific code now provides two hooks: ->queue_interrupt() will attempt
to inject the interrupt, and ->interrupt_injected() will check whether this
actually succeeded (upon which common code will ack the interrupt).  This
allows much simpler management of pending interrupts.

Signed-off-by: Avi Kivity <avi-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
---
 drivers/kvm/svm.c |  113 ++++++++------------------------------------
 drivers/kvm/vmx.c |  137 +++++++++--------------------------------------------
 drivers/kvm/x86.c |   79 ++++++++++++++++++++++---------
 drivers/kvm/x86.h |   11 ++--
 4 files changed, 103 insertions(+), 237 deletions(-)

diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c
index 10146a8..9fb9ee1 100644
--- a/drivers/kvm/svm.c
+++ b/drivers/kvm/svm.c
@@ -46,8 +46,6 @@ MODULE_LICENSE("GPL");
 #define SVM_FEATURE_LBRV (1 << 1)
 #define SVM_DEATURE_SVML (1 << 2)
 
-static void kvm_reput_irq(struct vcpu_svm *svm);
-
 static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
 {
 	return container_of(vcpu, struct vcpu_svm, vcpu);
@@ -838,16 +836,6 @@ static int svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg)
 	return -EOPNOTSUPP;
 }
 
-static int svm_get_irq(struct kvm_vcpu *vcpu)
-{
-	struct vcpu_svm *svm = to_svm(vcpu);
-	u32 exit_int_info = svm->vmcb->control.exit_int_info;
-
-	if (is_external_interrupt(exit_int_info))
-		return exit_int_info & SVM_EVTINJ_VEC_MASK;
-	return -1;
-}
-
 static void load_host_msrs(struct kvm_vcpu *vcpu)
 {
 #ifdef CONFIG_X86_64
@@ -1245,8 +1233,6 @@ static int handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 	struct vcpu_svm *svm = to_svm(vcpu);
 	u32 exit_code = svm->vmcb->control.exit_code;
 
-	kvm_reput_irq(svm);
-
 	if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
 		kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
 		kvm_run->fail_entry.hardware_entry_failure_reason
@@ -1304,102 +1290,43 @@ static inline void svm_inject_irq(struct vcpu_svm *svm, int irq)
 		((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
 }
 
-static void svm_set_irq(struct kvm_vcpu *vcpu, int irq)
+static void svm_set_tpr_threshold(struct kvm_vcpu *vcpu)
 {
-	struct vcpu_svm *svm = to_svm(vcpu);
-
-	svm_inject_irq(svm, irq);
 }
 
-static void svm_intr_assist(struct kvm_vcpu *vcpu)
+static bool svm_queue_interrupt(struct kvm_vcpu *vcpu, unsigned vector)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
 	struct vmcb *vmcb = svm->vmcb;
-	int intr_vector = -1;
-	struct kvm_pending_irq irq;
-	unsigned tpr_threshold;
-
-	if ((vmcb->control.exit_int_info & SVM_EVTINJ_VALID) &&
-	    ((vmcb->control.exit_int_info & SVM_EVTINJ_TYPE_MASK) == 0)) {
-		intr_vector = vmcb->control.exit_int_info &
-			      SVM_EVTINJ_VEC_MASK;
-		vmcb->control.exit_int_info = 0;
-		svm_inject_irq(svm, intr_vector);
-		return;
-	}
 
 	if (vmcb->control.int_ctl & V_IRQ_MASK)
-		return;
-
-	if (!kvm_cpu_get_interrupt(vcpu, &irq, &tpr_threshold))
-		return;
+		return false;
 
 	if (!(vmcb->save.rflags & X86_EFLAGS_IF) ||
 	    (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) ||
-	    (vmcb->control.event_inj & SVM_EVTINJ_VALID)) {
+	    vcpu->exception.pending) {
 		/* unable to deliver irq, set pending irq */
 		vmcb->control.intercept |= (1ULL << INTERCEPT_VINTR);
 		svm_inject_irq(svm, 0x0);
-		return;
-	}
-	/* Okay, we can deliver the interrupt: grab it and update PIC state. */
-	irq.ack(vcpu, irq.info);
-	intr_vector = irq.vector;
-	svm_inject_irq(svm, intr_vector);
-	kvm_timer_intr_post(vcpu, intr_vector);
-}
-
-static void kvm_reput_irq(struct vcpu_svm *svm)
-{
-	struct vmcb_control_area *control = &svm->vmcb->control;
-
-	if ((control->int_ctl & V_IRQ_MASK)
-	    && !irqchip_in_kernel(svm->vcpu.kvm)) {
-		control->int_ctl &= ~V_IRQ_MASK;
-		push_irq(&svm->vcpu, control->int_vector);
+		return false;
 	}
-
-	svm->vcpu.interrupt_window_open =
-		!(control->int_state & SVM_INTERRUPT_SHADOW_MASK);
-}
-
-static void svm_do_inject_vector(struct vcpu_svm *svm)
-{
-	struct kvm_vcpu *vcpu = &svm->vcpu;
-	int word_index = __ffs(vcpu->irq_summary);
-	int bit_index = __ffs(vcpu->irq_pending[word_index]);
-	int irq = word_index * BITS_PER_LONG + bit_index;
-
-	clear_bit(bit_index, &vcpu->irq_pending[word_index]);
-	if (!vcpu->irq_pending[word_index])
-		clear_bit(word_index, &vcpu->irq_summary);
-	svm_inject_irq(svm, irq);
+	svm_inject_irq(svm, vector);
+	return true;
 }
 
-static void do_interrupt_requests(struct kvm_vcpu *vcpu,
-				       struct kvm_run *kvm_run)
+static bool svm_interrupt_injected(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
 	struct vmcb_control_area *control = &svm->vmcb->control;
+	bool injected;
 
-	svm->vcpu.interrupt_window_open =
-		(!(control->int_state & SVM_INTERRUPT_SHADOW_MASK) &&
-		 (svm->vmcb->save.rflags & X86_EFLAGS_IF));
-
-	if (svm->vcpu.interrupt_window_open && svm->vcpu.irq_summary)
-		/*
-		 * If interrupts enabled, and not blocked by sti or mov ss. Good.
-		 */
-		svm_do_inject_vector(svm);
+	injected = !(control->int_ctl & V_IRQ_MASK)
+		&& !(control->intercept & (1ULL << INTERCEPT_VINTR))
+		&& !(control->exit_int_info & SVM_EXITINTINFO_VALID);
 
-	/*
-	 * Interrupts blocked.  Wait for unblock.
-	 */
-	if (!svm->vcpu.interrupt_window_open &&
-	    (svm->vcpu.irq_summary || kvm_run->request_interrupt_window))
-		control->intercept |= 1ULL << INTERCEPT_VINTR;
-	 else
-		control->intercept &= ~(1ULL << INTERCEPT_VINTR);
+	if (!injected)
+		control->int_ctl &= ~V_IRQ_MASK;
+	return injected;
 }
 
 static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
@@ -1586,6 +1513,8 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 	stgi();
 
 	svm->next_rip = 0;
+	svm->vcpu.interrupt_window_open =
+		!(svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK);
 }
 
 static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
@@ -1676,13 +1605,11 @@ static struct kvm_x86_ops svm_x86_ops = {
 	.handle_exit = handle_exit,
 	.skip_emulated_instruction = skip_emulated_instruction,
 	.patch_hypercall = svm_patch_hypercall,
-	.get_irq = svm_get_irq,
-	.set_irq = svm_set_irq,
 	.queue_exception = svm_queue_exception,
 	.exception_injected = svm_exception_injected,
-	.inject_pending_irq = svm_intr_assist,
-	.inject_pending_vectors = do_interrupt_requests,
-
+	.queue_interrupt = svm_queue_interrupt,
+	.interrupt_injected = svm_interrupt_injected,
+	.set_tpr_threshold = svm_set_tpr_threshold,
 	.set_tss_addr = svm_set_tss_addr,
 };
 
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
index b788c6b..c180c04 100644
--- a/drivers/kvm/vmx.c
+++ b/drivers/kvm/vmx.c
@@ -861,21 +861,6 @@ static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg)
 	return 0;
 }
 
-static int vmx_get_irq(struct kvm_vcpu *vcpu)
-{
-	struct vcpu_vmx *vmx = to_vmx(vcpu);
-	u32 idtv_info_field;
-
-	idtv_info_field = vmx->idt_vectoring_info;
-	if (idtv_info_field & INTR_INFO_VALID_MASK) {
-		if (is_external_interrupt(idtv_info_field))
-			return idtv_info_field & VECTORING_INFO_VECTOR_MASK;
-		else
-			printk(KERN_DEBUG "pending exception: not handled yet\n");
-	}
-	return -1;
-}
-
 static __init int cpu_has_kvm_support(void)
 {
 	unsigned long ecx = cpuid_ecx(1);
@@ -1732,48 +1717,6 @@ static void vmx_inject_irq(struct kvm_vcpu *vcpu, int irq)
 			irq | INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
 }
 
-static void kvm_do_inject_irq(struct kvm_vcpu *vcpu)
-{
-	int word_index = __ffs(vcpu->irq_summary);
-	int bit_index = __ffs(vcpu->irq_pending[word_index]);
-	int irq = word_index * BITS_PER_LONG + bit_index;
-
-	clear_bit(bit_index, &vcpu->irq_pending[word_index]);
-	if (!vcpu->irq_pending[word_index])
-		clear_bit(word_index, &vcpu->irq_summary);
-	vmx_inject_irq(vcpu, irq);
-}
-
-
-static void do_interrupt_requests(struct kvm_vcpu *vcpu,
-				       struct kvm_run *kvm_run)
-{
-	u32 cpu_based_vm_exec_control;
-
-	vcpu->interrupt_window_open =
-		((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
-		 (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0);
-
-	if (vcpu->interrupt_window_open &&
-	    vcpu->irq_summary &&
-	    !(vmcs_read32(VM_ENTRY_INTR_INFO_FIELD) & INTR_INFO_VALID_MASK))
-		/*
-		 * If interrupts enabled, and not blocked by sti or mov ss. Good.
-		 */
-		kvm_do_inject_irq(vcpu);
-
-	cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
-	if (!vcpu->interrupt_window_open &&
-	    (vcpu->irq_summary || kvm_run->request_interrupt_window))
-		/*
-		 * Interrupts blocked.  Wait for unblock.
-		 */
-		cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
-	else
-		cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
-	vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
-}
-
 static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
 {
 	int ret;
@@ -2218,12 +2161,12 @@ static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
 {
 }
 
-static void update_tpr_threshold(struct kvm_vcpu *vcpu, unsigned tpr_threshold)
+static void update_tpr_threshold(struct kvm_vcpu *vcpu)
 {
 	if (!vm_need_tpr_shadow(vcpu->kvm))
 		return;
 
-	vmcs_write32(TPR_THRESHOLD, tpr_threshold >> 4);
+	vmcs_write32(TPR_THRESHOLD, vcpu->tpr_threshold >> 4);
 }
 
 static void enable_irq_window(struct kvm_vcpu *vcpu)
@@ -2235,65 +2178,30 @@ static void enable_irq_window(struct kvm_vcpu *vcpu)
 	vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
 }
 
-static void vmx_intr_assist(struct kvm_vcpu *vcpu)
+static bool vmx_queue_interrupt(struct kvm_vcpu *vcpu, unsigned vector)
 {
-	struct vcpu_vmx *vmx = to_vmx(vcpu);
-	u32 idtv_info_field, intr_info_field;
-	int has_ext_irq, interrupt_window_open;
-	struct kvm_pending_irq irq;
-	int vector;
-	unsigned tpr_threshold;
-
-	has_ext_irq = kvm_cpu_get_interrupt(vcpu, &irq, &tpr_threshold);
-
-	update_tpr_threshold(vcpu, tpr_threshold);
-
-	intr_info_field = vmcs_read32(VM_ENTRY_INTR_INFO_FIELD);
-	idtv_info_field = vmx->idt_vectoring_info;
-	if (intr_info_field & INTR_INFO_VALID_MASK) {
-		if (idtv_info_field & INTR_INFO_VALID_MASK) {
-			/* TODO: fault when IDT_Vectoring */
-			printk(KERN_ERR "Fault when IDT_Vectoring\n");
-		}
-		if (has_ext_irq)
-			enable_irq_window(vcpu);
-		return;
-	}
-	if (unlikely(idtv_info_field & INTR_INFO_VALID_MASK)) {
-		if ((idtv_info_field & VECTORING_INFO_TYPE_MASK)
-		    == INTR_TYPE_EXT_INTR
-		    && vcpu->rmode.active) {
-			u8 vect = idtv_info_field & VECTORING_INFO_VECTOR_MASK;
-
-			vmx_inject_irq(vcpu, vect);
-			if (unlikely(has_ext_irq))
-				enable_irq_window(vcpu);
-			return;
-		}
-
-		vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, idtv_info_field);
-		vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
-				vmcs_read32(VM_EXIT_INSTRUCTION_LEN));
+	int interrupt_window_open;
 
-		if (unlikely(idtv_info_field & INTR_INFO_DELIEVER_CODE_MASK))
-			vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
-				vmcs_read32(IDT_VECTORING_ERROR_CODE));
-		if (unlikely(has_ext_irq))
-			enable_irq_window(vcpu);
-		return;
-	}
-	if (!has_ext_irq)
-		return;
 	interrupt_window_open =
 		((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
 		 (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0);
-	if (interrupt_window_open) {
-		irq.ack(vcpu, irq.info);
-		vector = irq.vector;
+
+	if (!vcpu->exception.pending && interrupt_window_open) {
 		vmx_inject_irq(vcpu, vector);
-		kvm_timer_intr_post(vcpu, vector);
-	} else
+		return true;
+	} else {
 		enable_irq_window(vcpu);
+		return false;
+	}
+}
+
+static bool vmx_interrupt_injected(struct kvm_vcpu *vcpu)
+{
+	struct vcpu_vmx *vmx = to_vmx(vcpu);
+	bool valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK;
+	unsigned type = vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK;
+
+	return !(valid && (type == INTR_TYPE_EXT_INTR));
 }
 
 /*
@@ -2597,12 +2505,11 @@ static struct kvm_x86_ops vmx_x86_ops = {
 	.handle_exit = kvm_handle_exit,
 	.skip_emulated_instruction = skip_emulated_instruction,
 	.patch_hypercall = vmx_patch_hypercall,
-	.get_irq = vmx_get_irq,
-	.set_irq = vmx_inject_irq,
 	.queue_exception = vmx_queue_exception,
 	.exception_injected = vmx_exception_injected,
-	.inject_pending_irq = vmx_intr_assist,
-	.inject_pending_vectors = do_interrupt_requests,
+	.queue_interrupt = vmx_queue_interrupt,
+	.interrupt_injected = vmx_interrupt_injected,
+	.set_tpr_threshold = update_tpr_threshold,
 
 	.set_tss_addr = vmx_set_tss_addr,
 };
diff --git a/drivers/kvm/x86.c b/drivers/kvm/x86.c
index 6deb052..a196d5d 100644
--- a/drivers/kvm/x86.c
+++ b/drivers/kvm/x86.c
@@ -2365,9 +2365,51 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu,
 					 vcpu->irq_summary == 0);
 }
 
+static void ack_vector(struct kvm_vcpu *vcpu, unsigned vector)
+{
+	unsigned word_index = vector / BITS_PER_LONG;
+	unsigned bit_index = vector % BITS_PER_LONG;
+
+	clear_bit(bit_index, &vcpu->irq_pending[word_index]);
+	if (!vcpu->irq_pending[word_index])
+		clear_bit(word_index, &vcpu->irq_summary);
+}
+
+static bool get_pending_irq(struct kvm_vcpu *vcpu, struct kvm_pending_irq *irq,
+			    unsigned *tpr_threshold)
+{
+	unsigned word_index, bit_index;
+
+	if (irqchip_in_kernel(vcpu->kvm))
+		return kvm_cpu_get_interrupt(vcpu, irq, tpr_threshold);
+
+	*tpr_threshold = 0;
+
+	if (!vcpu->irq_summary)
+		return false;
+
+	word_index = __ffs(vcpu->irq_summary);
+	bit_index = __ffs(vcpu->irq_pending[word_index]);
+	irq->vector = word_index * BITS_PER_LONG + bit_index;
+	irq->ack = ack_vector;
+	irq->info = irq->vector;
+	return true;
+}
+
+static void update_tpr_threshold(struct kvm_vcpu *vcpu, unsigned tpr_threshold)
+{
+	if (tpr_threshold != vcpu->tpr_threshold) {
+		vcpu->tpr_threshold = tpr_threshold;
+		kvm_x86_ops->set_tpr_threshold(vcpu);
+	}
+}
+
 static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 {
 	int r;
+	bool irq_pending;
+	struct kvm_pending_irq irq;
+	unsigned tpr_threshold;
 
 	if (unlikely(vcpu->mp_state == VCPU_MP_STATE_SIPI_RECEIVED)) {
 		pr_debug("vcpu %d received sipi with vector # %x\n",
@@ -2408,10 +2450,12 @@ again:
 
 	if (vcpu->exception.pending)
 		__queue_exception(vcpu);
-	else if (irqchip_in_kernel(vcpu->kvm))
-		kvm_x86_ops->inject_pending_irq(vcpu);
-	else
-		kvm_x86_ops->inject_pending_vectors(vcpu, kvm_run);
+
+	irq_pending = get_pending_irq(vcpu, &irq, &tpr_threshold);
+	if (irq_pending)
+		irq_pending = kvm_x86_ops->queue_interrupt(vcpu, irq.vector);
+
+	update_tpr_threshold(vcpu, tpr_threshold);
 
 	vcpu->guest_mode = 1;
 	kvm_guest_enter();
@@ -2450,6 +2494,11 @@ again:
 	if (vcpu->exception.pending && kvm_x86_ops->exception_injected(vcpu))
 		vcpu->exception.pending = false;
 
+	if (irq_pending && kvm_x86_ops->interrupt_injected(vcpu)) {
+		irq.ack(vcpu, irq.info);
+		kvm_timer_intr_post(vcpu, irq.vector);
+	}
+
 	r = kvm_x86_ops->handle_exit(kvm_run, vcpu);
 
 	if (r > 0) {
@@ -2623,7 +2672,6 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
 				  struct kvm_sregs *sregs)
 {
 	struct descriptor_table dt;
-	int pending_vec;
 
 	vcpu_load(vcpu);
 
@@ -2653,14 +2701,10 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
 	sregs->efer = vcpu->shadow_efer;
 	sregs->apic_base = kvm_get_apic_base(vcpu);
 
-	if (irqchip_in_kernel(vcpu->kvm)) {
+	if (irqchip_in_kernel(vcpu->kvm))
 		memset(sregs->interrupt_bitmap, 0,
 		       sizeof sregs->interrupt_bitmap);
-		pending_vec = kvm_x86_ops->get_irq(vcpu);
-		if (pending_vec >= 0)
-			set_bit(pending_vec,
-				(unsigned long *)sregs->interrupt_bitmap);
-	} else
+	else
 		memcpy(sregs->interrupt_bitmap, vcpu->irq_pending,
 		       sizeof sregs->interrupt_bitmap);
 
@@ -2679,7 +2723,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
 				  struct kvm_sregs *sregs)
 {
 	int mmu_reset_needed = 0;
-	int i, pending_vec, max_bits;
+	int i;
 	struct descriptor_table dt;
 
 	vcpu_load(vcpu);
@@ -2724,17 +2768,6 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
 		for (i = 0; i < ARRAY_SIZE(vcpu->irq_pending); ++i)
 			if (vcpu->irq_pending[i])
 				__set_bit(i, &vcpu->irq_summary);
-	} else {
-		max_bits = (sizeof sregs->interrupt_bitmap) << 3;
-		pending_vec = find_first_bit(
-			(const unsigned long *)sregs->interrupt_bitmap,
-			max_bits);
-		/* Only pending external irq is handled here */
-		if (pending_vec < max_bits) {
-			kvm_x86_ops->set_irq(vcpu, pending_vec);
-			pr_debug("Set back pending irq %d\n",
-				 pending_vec);
-		}
 	}
 
 	set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
diff --git a/drivers/kvm/x86.h b/drivers/kvm/x86.h
index fb48b2f..52f3199 100644
--- a/drivers/kvm/x86.h
+++ b/drivers/kvm/x86.h
@@ -136,6 +136,8 @@ struct kvm_vcpu {
 	struct kvm_pio_request pio;
 	void *pio_data;
 
+	unsigned tpr_threshold;
+
 	struct kvm_queued_exception {
 		bool pending;
 		bool has_error_code;
@@ -217,15 +219,12 @@ struct kvm_x86_ops {
 	void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
 	void (*patch_hypercall)(struct kvm_vcpu *vcpu,
 				unsigned char *hypercall_addr);
-	int (*get_irq)(struct kvm_vcpu *vcpu);
-	void (*set_irq)(struct kvm_vcpu *vcpu, int vec);
 	void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr,
 				bool has_error_code, u32 error_code);
 	bool (*exception_injected)(struct kvm_vcpu *vcpu);
-	void (*inject_pending_irq)(struct kvm_vcpu *vcpu);
-	void (*inject_pending_vectors)(struct kvm_vcpu *vcpu,
-				       struct kvm_run *run);
-
+	bool (*queue_interrupt)(struct kvm_vcpu *vcpu, unsigned vector);
+	bool (*interrupt_injected)(struct kvm_vcpu *vcpu);
+	void (*set_tpr_threshold)(struct kvm_vcpu *vcpu);
 	int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
 };
 
-- 
1.5.3


-------------------------------------------------------------------------
SF.Net email is sponsored by: The Future of Linux Business White Paper
from Novell.  From the desktop to the data center, Linux is going
mainstream.  Let it simplify your IT future.
http://altfarm.mediaplex.com/ad/ck/8857-50307-18918-4

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* Re: [PATCH 0/7] Rework irq injection infrastructure
       [not found] ` <11967614542283-git-send-email-avi-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
                     ` (6 preceding siblings ...)
  2007-12-04  9:44   ` [PATCH 7/7] KVM: Ack interrupts only after they have successfully been injected Avi Kivity
@ 2007-12-04 16:51   ` Joerg Roedel
       [not found]     ` <20071204165101.GA23093-5C7GfCeVMHo@public.gmane.org>
  2007-12-06  7:50   ` Dong, Eddie
  8 siblings, 1 reply; 17+ messages in thread
From: Joerg Roedel @ 2007-12-04 16:51 UTC (permalink / raw)
  To: Avi Kivity; +Cc: kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f

On Tue, Dec 04, 2007 at 11:44:07AM +0200, Avi Kivity wrote:
> The patchset is also necessary for integrating the tpr optimization branch
> and for injecting interrupts in big real mode.

Interesting work. Do you have an updated tpr optimization patch which
applys on top of these patches?

Joerg

-- 
           |           AMD Saxony Limited Liability Company & Co. KG
 Operating |         Wilschdorfer Landstr. 101, 01109 Dresden, Germany
 System    |                  Register Court Dresden: HRA 4896
 Research  |              General Partner authorized to represent:
 Center    |             AMD Saxony LLC (Wilmington, Delaware, US)
           | General Manager of AMD Saxony LLC: Dr. Hans-R. Deppe, Thomas McCoy



-------------------------------------------------------------------------
SF.Net email is sponsored by: The Future of Linux Business White Paper
from Novell.  From the desktop to the data center, Linux is going
mainstream.  Let it simplify your IT future.
http://altfarm.mediaplex.com/ad/ck/8857-50307-18918-4

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH 0/7] Rework irq injection infrastructure
       [not found]     ` <20071204165101.GA23093-5C7GfCeVMHo@public.gmane.org>
@ 2007-12-04 16:56       ` Avi Kivity
  0 siblings, 0 replies; 17+ messages in thread
From: Avi Kivity @ 2007-12-04 16:56 UTC (permalink / raw)
  To: Joerg Roedel; +Cc: kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f

Joerg Roedel wrote:
> On Tue, Dec 04, 2007 at 11:44:07AM +0200, Avi Kivity wrote:
>   
>> The patchset is also necessary for integrating the tpr optimization branch
>> and for injecting interrupts in big real mode.
>>     
>
> Interesting work. Do you have an updated tpr optimization patch which
> applys on top of these patches?
>   

No, but I plan to rebase it soon.


-- 
error compiling committee.c: too many arguments to function


-------------------------------------------------------------------------
SF.Net email is sponsored by: The Future of Linux Business White Paper
from Novell.  From the desktop to the data center, Linux is going
mainstream.  Let it simplify your IT future.
http://altfarm.mediaplex.com/ad/ck/8857-50307-18918-4

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH 0/7] Rework irq injection infrastructure
       [not found] ` <11967614542283-git-send-email-avi-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
                     ` (7 preceding siblings ...)
  2007-12-04 16:51   ` [PATCH 0/7] Rework irq injection infrastructure Joerg Roedel
@ 2007-12-06  7:50   ` Dong, Eddie
       [not found]     ` <10EA09EFD8728347A513008B6B0DA77A0279CC10-wq7ZOvIWXbNpB2pF5aRoyrfspsVTdybXVpNB7YpNyf8@public.gmane.org>
  8 siblings, 1 reply; 17+ messages in thread
From: Dong, Eddie @ 2007-12-06  7:50 UTC (permalink / raw)
  To: Avi Kivity, kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f

[-- Attachment #1: Type: text/plain, Size: 2383 bytes --]

It is not a small change, I still need to study the whole patch :(
If I can get some question clarified earlier, that helps a lot.


>-----Original Message-----
>From: kvm-devel-bounces-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f@public.gmane.org 
>[mailto:kvm-devel-bounces-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f@public.gmane.org] On Behalf Of 
>Avi Kivity
>Sent: 2007年12月4日 17:44
>To: kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f@public.gmane.org
>Subject: [kvm-devel] [PATCH 0/7] Rework irq injection infrastructure
>
>The current irq and exception injection infrastructure is 
>quite complex and
>has been the source of a number of bugs in the past.  This 
>patchset simplifies
>irq and exception injection:
>
>- Much more work is carried out in common code rather than 
>vmx/svm specific
>  code.  Information is kept in C variables rather than 
>hardware registers
>
>- Exception and interrupts are separated into two independent 
>queues.  This
>  will allow later optimization on AMD where the hardware 
>supports two queues,
>  and also simplifies the Intel case as now we don't need to 
>check the hardware
>  whether an exception is pending when injecting and interrupt.
>
>- Interrupts are now only acked after they have been 
>successfully injected,

Mmm, how can you know if it is injected successfully?
>From the patch, it looks like you know this by checking
IDT_Vectoring in next VM Exit. That means the virtual 
interrupt controller state in memory is incorrect temply.

If the injection success & we can get VM Exit before 
next access to those virtual interrupt controller state,
that will be OK, but that means we eliminate future HW
optimization opportunity.  I think we just pay too much
for coding style reason to narrow HW optimization space.

If the injection fails, the previous logic will inject it back 
immediately; current logic will do normal irq inject path.
The potential differences are:
	1: If a new higher interrupt arrives, we inject new vector.
that is probably OK.
	2: If the fault VM Exit is due to nested exception, the
previous logic will probably inject double fault with the original
interrupt consumed (acked); the new logic will probably inject
double fault too, but keep the IRQ not consumed. I don't have
direct case, but worry about those kind of deviation from native
behavor.

thx,eddie


[-- Attachment #2: Type: text/plain, Size: 309 bytes --]

-------------------------------------------------------------------------
SF.Net email is sponsored by: The Future of Linux Business White Paper
from Novell.  From the desktop to the data center, Linux is going
mainstream.  Let it simplify your IT future.
http://altfarm.mediaplex.com/ad/ck/8857-50307-18918-4

[-- Attachment #3: Type: text/plain, Size: 186 bytes --]

_______________________________________________
kvm-devel mailing list
kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f@public.gmane.org
https://lists.sourceforge.net/lists/listinfo/kvm-devel

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH 0/7] Rework irq injection infrastructure
       [not found]     ` <10EA09EFD8728347A513008B6B0DA77A0279CC10-wq7ZOvIWXbNpB2pF5aRoyrfspsVTdybXVpNB7YpNyf8@public.gmane.org>
@ 2007-12-06  8:28       ` Avi Kivity
       [not found]         ` <4757B2C3.3000402-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
  0 siblings, 1 reply; 17+ messages in thread
From: Avi Kivity @ 2007-12-06  8:28 UTC (permalink / raw)
  To: Dong, Eddie; +Cc: kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f

[-- Attachment #1: Type: text/plain, Size: 3137 bytes --]

Dong, Eddie wrote:
> It is not a small change, I still need to study the whole patch :(
> If I can get some question clarified earlier, that helps a lot.
>
>
>   
>> -----Original Message-----
>> From: kvm-devel-bounces-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f@public.gmane.org 
>> [mailto:kvm-devel-bounces-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f@public.gmane.org] On Behalf Of 
>> Avi Kivity
>> Sent: 2007年12月4日 17:44
>> To: kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f@public.gmane.org
>> Subject: [kvm-devel] [PATCH 0/7] Rework irq injection infrastructure
>>
>> The current irq and exception injection infrastructure is 
>> quite complex and
>> has been the source of a number of bugs in the past.  This 
>> patchset simplifies
>> irq and exception injection:
>>
>> - Much more work is carried out in common code rather than 
>> vmx/svm specific
>>  code.  Information is kept in C variables rather than 
>> hardware registers
>>
>> - Exception and interrupts are separated into two independent 
>> queues.  This
>>  will allow later optimization on AMD where the hardware 
>> supports two queues,
>>  and also simplifies the Intel case as now we don't need to 
>> check the hardware
>>  whether an exception is pending when injecting and interrupt.
>>
>> - Interrupts are now only acked after they have been 
>> successfully injected,
>>     
>
> Mmm, how can you know if it is injected successfully?
> From the patch, it looks like you know this by checking
> IDT_Vectoring in next VM Exit. That means the virtual 
> interrupt controller state in memory is incorrect temply.
>
> If the injection success & we can get VM Exit before 
> next access to those virtual interrupt controller state,
> that will be OK, 

That's the idea.

> but that means we eliminate future HW
> optimization opportunity.  I think we just pay too much
> for coding style reason to narrow HW optimization space.
>   

I don't know enough about future hardware...

But the motivation here is not coding style, it's to be able to do the
injection non-atomically. The tpr-opt patchset wants to write the tpr to
memory, and that is a sleeping operation. Similarly, if we emulate big
real mode we need to simulate interrupt injection by writing to the stack.

Moving the logic to common code helps, but it could have been done with
the original system as well.

> If the injection fails, the previous logic will inject it back 
> immediately; current logic will do normal irq inject path.
> The potential differences are:
> 	1: If a new higher interrupt arrives, we inject new vector.
> that is probably OK.
> 	2: If the fault VM Exit is due to nested exception, the
> previous logic will probably inject double fault with the original
> interrupt consumed (acked); the new logic will probably inject
> double fault too, but keep the IRQ not consumed. I don't have
> direct case, but worry about those kind of deviation from native
> behavor.
>   

We can probably fix it to ack the interrupt when injecting a double fault.

-- 
error compiling committee.c: too many arguments to function



[-- Attachment #2: Type: text/plain, Size: 309 bytes --]

-------------------------------------------------------------------------
SF.Net email is sponsored by: The Future of Linux Business White Paper
from Novell.  From the desktop to the data center, Linux is going
mainstream.  Let it simplify your IT future.
http://altfarm.mediaplex.com/ad/ck/8857-50307-18918-4

[-- Attachment #3: Type: text/plain, Size: 186 bytes --]

_______________________________________________
kvm-devel mailing list
kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f@public.gmane.org
https://lists.sourceforge.net/lists/listinfo/kvm-devel

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH 0/7] Rework irq injection infrastructure
       [not found]         ` <4757B2C3.3000402-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
@ 2007-12-06  9:33           ` Dong, Eddie
       [not found]             ` <10EA09EFD8728347A513008B6B0DA77A0279CCE0-wq7ZOvIWXbNpB2pF5aRoyrfspsVTdybXVpNB7YpNyf8@public.gmane.org>
  0 siblings, 1 reply; 17+ messages in thread
From: Dong, Eddie @ 2007-12-06  9:33 UTC (permalink / raw)
  To: Avi Kivity; +Cc: kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f

>>
>> Mmm, how can you know if it is injected successfully?
>> From the patch, it looks like you know this by checking
>> IDT_Vectoring in next VM Exit. That means the virtual 
>> interrupt controller state in memory is incorrect temply.
>>
>> If the injection success & we can get VM Exit before 
>> next access to those virtual interrupt controller state,
>> that will be OK, 
>
>That's the idea.
>
>> but that means we eliminate future HW
>> optimization opportunity.  I think we just pay too much
>> for coding style reason to narrow HW optimization space.
>>   
>
>I don't know enough about future hardware...

One of the optimization, which can be used in pure SW is that
we can shadow APIC state as "RO" to guest so that all guest
read to APIC registers can run in full speed. T

This optimization is probably cleaner & less-intrusive. Dynamic
patching to modify guest code could trigger mine sooner or later:(


>
>But the motivation here is not coding style, it's to be able to do the
>injection non-atomically. The tpr-opt patchset wants to write 
>the tpr to
>memory, and that is a sleeping operation. Similarly, if we emulate big
>real mode we need to simulate interrupt injection by writing 
>to the stack.

OK. For  big real mode has issue, that is very easy
 to walk around, we can probably pre-load (pin) those 2 or 3 pages
before 
injecting real mode irq.

For dynamic patch stuff, what is the issue? I saw your previous code
works, right?

BTW, w/o this patch, your previous TPR optimization patch can work,
w/ this patch, it won't work. The key difference is when guest access
TPR,
per original patch, it will dynamically calcuate the vPPR value base on
vISR & vTPR. Since vISR is incorrect now (though temply), the patch
won't work correctly.

>
>Moving the logic to common code helps, but it could have been done with
>the original system as well.

Yes, probably we can split this 2 efforts. The part to make it common
can be 
easily in first.

>
>> If the injection fails, the previous logic will inject it back 
>> immediately; current logic will do normal irq inject path.
>> The potential differences are:
>> 	1: If a new higher interrupt arrives, we inject new vector.
>> that is probably OK.
>> 	2: If the fault VM Exit is due to nested exception, the
>> previous logic will probably inject double fault with the original
>> interrupt consumed (acked); the new logic will probably inject
>> double fault too, but keep the IRQ not consumed. I don't have
>> direct case, but worry about those kind of deviation from native
>> behavor.
>>   
>
>We can probably fix it to ack the interrupt when injecting a 
>double fault.

Yes, could be though not very clean. But I am not sure how many 
other reason will cause interrupt injection failed beside shadow fault.
We need to check all of them.

Eddie

-------------------------------------------------------------------------
SF.Net email is sponsored by: The Future of Linux Business White Paper
from Novell.  From the desktop to the data center, Linux is going
mainstream.  Let it simplify your IT future.
http://altfarm.mediaplex.com/ad/ck/8857-50307-18918-4

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH 0/7] Rework irq injection infrastructure
       [not found]             ` <10EA09EFD8728347A513008B6B0DA77A0279CCE0-wq7ZOvIWXbNpB2pF5aRoyrfspsVTdybXVpNB7YpNyf8@public.gmane.org>
@ 2007-12-06 10:10               ` Avi Kivity
       [not found]                 ` <4757CA93.1090704-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
  0 siblings, 1 reply; 17+ messages in thread
From: Avi Kivity @ 2007-12-06 10:10 UTC (permalink / raw)
  To: Dong, Eddie; +Cc: kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f

Dong, Eddie wrote:
>>> Mmm, how can you know if it is injected successfully?
>>> From the patch, it looks like you know this by checking
>>> IDT_Vectoring in next VM Exit. That means the virtual 
>>> interrupt controller state in memory is incorrect temply.
>>>
>>> If the injection success & we can get VM Exit before 
>>> next access to those virtual interrupt controller state,
>>> that will be OK, 
>>>       
>> That's the idea.
>>
>>     
>>> but that means we eliminate future HW
>>> optimization opportunity.  I think we just pay too much
>>> for coding style reason to narrow HW optimization space.
>>>   
>>>       
>> I don't know enough about future hardware...
>>     
>
> One of the optimization, which can be used in pure SW is that
> we can shadow APIC state as "RO" to guest so that all guest
> read to APIC registers can run in full speed. T
>
>   

This can still be done with unacked interrupts: modify the state to "as 
if" the interrupt was injected, but commit it only if the injection 
succeeds.  If injection fails, revert the change.  It isn't very nice 
though.

Another possibility is:

   get_interrupt()
    ... do all sleepy things prior to injection
    enter critical section
    ack interrupt
    enter guest

> This optimization is probably cleaner & less-intrusive. Dynamic
> patching to modify guest code could trigger mine sooner or later:(
>
>   

Dynamic patching is dangerous, but the only option to run Windows SMP on 
millions of machines out there.

>   
>> But the motivation here is not coding style, it's to be able to do the
>> injection non-atomically. The tpr-opt patchset wants to write 
>> the tpr to
>> memory, and that is a sleeping operation. Similarly, if we emulate big
>> real mode we need to simulate interrupt injection by writing 
>> to the stack.
>>     
>
> OK. For  big real mode has issue, that is very easy
>  to walk around, we can probably pre-load (pin) those 2 or 3 pages
> before 
> injecting real mode irq.
>   

Pinning has problems of its own.  It can be made to work though.  In any 
case, it can fail, so we need to ack the interrupt only after we have 
successfully pinned the page.

> For dynamic patch stuff, what is the issue? I saw your previous code
> works, right?
>
> BTW, w/o this patch, your previous TPR optimization patch can work,
> w/ this patch, it won't work. The key difference is when guest access
> TPR,
> per original patch, it will dynamically calcuate the vPPR value base on
> vISR & vTPR. Since vISR is incorrect now (though temply), the patch
> won't work correctly.
>
>   

Sure, it needs to be adjusted.

>> Moving the logic to common code helps, but it could have been done with
>> the original system as well.
>>     
>
> Yes, probably we can split this 2 efforts. The part to make it common
> can be 
> easily in first.
>
>   
>>> If the injection fails, the previous logic will inject it back 
>>> immediately; current logic will do normal irq inject path.
>>> The potential differences are:
>>> 	1: If a new higher interrupt arrives, we inject new vector.
>>> that is probably OK.
>>> 	2: If the fault VM Exit is due to nested exception, the
>>> previous logic will probably inject double fault with the original
>>> interrupt consumed (acked); the new logic will probably inject
>>> double fault too, but keep the IRQ not consumed. I don't have
>>> direct case, but worry about those kind of deviation from native
>>> behavor.
>>>   
>>>       
>> We can probably fix it to ack the interrupt when injecting a 
>> double fault.
>>     
>
> Yes, could be though not very clean. But I am not sure how many 
> other reason will cause interrupt injection failed beside shadow fault.
> We need to check all of them.
>   

Injection can fail (or rather, not happen at all) if we do sleepy 
operations and then get a signal.  In that case, it is better to leave 
the interrupt unacked.

What do you think about the first six patches?  I think we can merge 
them now, and continue discussion about the last, which actually splits 
the injection.  I'll think further about acking the interrupt before 
entry, but after possibly sleepy operations have been done.

-- 
error compiling committee.c: too many arguments to function


-------------------------------------------------------------------------
SF.Net email is sponsored by: The Future of Linux Business White Paper
from Novell.  From the desktop to the data center, Linux is going
mainstream.  Let it simplify your IT future.
http://altfarm.mediaplex.com/ad/ck/8857-50307-18918-4

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH 0/7] Rework irq injection infrastructure
       [not found]                 ` <4757CA93.1090704-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
@ 2007-12-06 10:26                   ` Dong, Eddie
       [not found]                     ` <10EA09EFD8728347A513008B6B0DA77A0279CCF2-wq7ZOvIWXbNpB2pF5aRoyrfspsVTdybXVpNB7YpNyf8@public.gmane.org>
  0 siblings, 1 reply; 17+ messages in thread
From: Dong, Eddie @ 2007-12-06 10:26 UTC (permalink / raw)
  To: Avi Kivity; +Cc: kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f

     
>>
>> One of the optimization, which can be used in pure SW is that
>> we can shadow APIC state as "RO" to guest so that all guest
>> read to APIC registers can run in full speed. T
>>
>>   
>
>This can still be done with unacked interrupts: modify the 
>state to "as 
>if" the interrupt was injected, but commit it only if the injection 
>succeeds.  If injection fails, revert the change.  It isn't very nice 
>though.

Agree.

>
>Another possibility is:
>
>   get_interrupt()
>    ... do all sleepy things prior to injection
>    enter critical section
>    ack interrupt
>    enter guest
>

Yes, that is safe and similar with current logic. Plus one condition:
if (real mode) for those sleepy things.

>> This optimization is probably cleaner & less-intrusive. Dynamic
>> patching to modify guest code could trigger mine sooner or later:(
>>
>>   
>
>Dynamic patching is dangerous, but the only option to run 
>Windows SMP on 
>millions of machines out there.

I know:)
RO of APIC page can solve this issue in most case.

>
>>   
>>> But the motivation here is not coding style, it's to be 
>able to do the
>>> injection non-atomically. The tpr-opt patchset wants to write 
>>> the tpr to
>>> memory, and that is a sleeping operation. Similarly, if we 
>emulate big
>>> real mode we need to simulate interrupt injection by writing 
>>> to the stack.
>>>     
>>
>> OK. For  big real mode has issue, that is very easy
>>  to walk around, we can probably pre-load (pin) those 2 or 3 pages
>> before 
>> injecting real mode irq.
>>   
>
>Pinning has problems of its own.  It can be made to work 
>though.  In any 
>case, it can fail, so we need to ack the interrupt only after we have 
>successfully pinned the page.

What I mean is we pre-load 2 pages where the stack are in, the logic may
be:

If (real mode)
	get 2 pages.
enter critical path
....
VM Resume.
put these 2 pages.
handle VM Exit normally.


Since real mode is just a boot/install time isseu, performance is not
critical. This way doesn't hurt us.


thx,eddie

-------------------------------------------------------------------------
SF.Net email is sponsored by: The Future of Linux Business White Paper
from Novell.  From the desktop to the data center, Linux is going
mainstream.  Let it simplify your IT future.
http://altfarm.mediaplex.com/ad/ck/8857-50307-18918-4

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH 0/7] Rework irq injection infrastructure
       [not found]                     ` <10EA09EFD8728347A513008B6B0DA77A0279CCF2-wq7ZOvIWXbNpB2pF5aRoyrfspsVTdybXVpNB7YpNyf8@public.gmane.org>
@ 2007-12-06 13:24                       ` Dong, Eddie
       [not found]                         ` <10EA09EFD8728347A513008B6B0DA77A0279CD1C-wq7ZOvIWXbNpB2pF5aRoyrfspsVTdybXVpNB7YpNyf8@public.gmane.org>
  0 siblings, 1 reply; 17+ messages in thread
From: Dong, Eddie @ 2007-12-06 13:24 UTC (permalink / raw)
  To: Dong, Eddie, Avi Kivity; +Cc: kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f

>
>What I mean is we pre-load 2 pages where the stack are in, the 
>logic may
>be:
>
>If (real mode)
>	get 2 pages.
>enter critical path
>....
>VM Resume.
>put these 2 pages.
>handle VM Exit normally.
>
>
With 2nd thinking, We can cache these 2 pages. Something like:
if (real mode)
	cache_2_pages (ss:esp)
enter critical path.

In cache_2_pages, old cache can be invalidated (put to user pages), new
pages are loaded (get user page) to avoid SW irq injection page fault.
When switch from real mode to protect mode, invalidate the page cache.

This just needs minimal change :)
Eddie

-------------------------------------------------------------------------
SF.Net email is sponsored by: The Future of Linux Business White Paper
from Novell.  From the desktop to the data center, Linux is going
mainstream.  Let it simplify your IT future.
http://altfarm.mediaplex.com/ad/ck/8857-50307-18918-4

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH 0/7] Rework irq injection infrastructure
       [not found]                         ` <10EA09EFD8728347A513008B6B0DA77A0279CD1C-wq7ZOvIWXbNpB2pF5aRoyrfspsVTdybXVpNB7YpNyf8@public.gmane.org>
@ 2007-12-06 14:19                           ` Avi Kivity
  0 siblings, 0 replies; 17+ messages in thread
From: Avi Kivity @ 2007-12-06 14:19 UTC (permalink / raw)
  To: Dong, Eddie; +Cc: kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f

Dong, Eddie wrote:
>> What I mean is we pre-load 2 pages where the stack are in, the 
>> logic may
>> be:
>>
>> If (real mode)
>> 	get 2 pages.
>> enter critical path
>> ....
>> VM Resume.
>> put these 2 pages.
>> handle VM Exit normally.
>>
>>
>>     
> With 2nd thinking, We can cache these 2 pages. Something like:
> if (real mode)
> 	cache_2_pages (ss:esp)
> enter critical path.
>
> In cache_2_pages, old cache can be invalidated (put to user pages), new
> pages are loaded (get user page) to avoid SW irq injection page fault.
> When switch from real mode to protect mode, invalidate the page cache.
>
>   

I prefer the first one.  Indefinitely holding on to pages is not good, 
we may want to remove those pages for swapping or page migration.


-- 
error compiling committee.c: too many arguments to function


-------------------------------------------------------------------------
SF.Net email is sponsored by: The Future of Linux Business White Paper
from Novell.  From the desktop to the data center, Linux is going
mainstream.  Let it simplify your IT future.
http://altfarm.mediaplex.com/ad/ck/8857-50307-18918-4

^ permalink raw reply	[flat|nested] 17+ messages in thread

end of thread, other threads:[~2007-12-06 14:19 UTC | newest]

Thread overview: 17+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2007-12-04  9:44 [PATCH 0/7] Rework irq injection infrastructure Avi Kivity
     [not found] ` <11967614542283-git-send-email-avi-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
2007-12-04  9:44   ` [PATCH 1/7] KVM: Generalize exception injection mechanism Avi Kivity
2007-12-04  9:44   ` [PATCH 2/7] KVM: Replace page fault injection by the generalized exception queue Avi Kivity
2007-12-04  9:44   ` [PATCH 3/7] KVM: Replace #GP " Avi Kivity
2007-12-04  9:44   ` [PATCH 4/7] KVM: Use generalized exception queue for injecting #UD Avi Kivity
2007-12-04  9:44   ` [PATCH 5/7] KVM: Add explicit acks to interrupt controller model Avi Kivity
2007-12-04  9:44   ` [PATCH 6/7] KVM: Move tpr threshold calculation into common code Avi Kivity
2007-12-04  9:44   ` [PATCH 7/7] KVM: Ack interrupts only after they have successfully been injected Avi Kivity
2007-12-04 16:51   ` [PATCH 0/7] Rework irq injection infrastructure Joerg Roedel
     [not found]     ` <20071204165101.GA23093-5C7GfCeVMHo@public.gmane.org>
2007-12-04 16:56       ` Avi Kivity
2007-12-06  7:50   ` Dong, Eddie
     [not found]     ` <10EA09EFD8728347A513008B6B0DA77A0279CC10-wq7ZOvIWXbNpB2pF5aRoyrfspsVTdybXVpNB7YpNyf8@public.gmane.org>
2007-12-06  8:28       ` Avi Kivity
     [not found]         ` <4757B2C3.3000402-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
2007-12-06  9:33           ` Dong, Eddie
     [not found]             ` <10EA09EFD8728347A513008B6B0DA77A0279CCE0-wq7ZOvIWXbNpB2pF5aRoyrfspsVTdybXVpNB7YpNyf8@public.gmane.org>
2007-12-06 10:10               ` Avi Kivity
     [not found]                 ` <4757CA93.1090704-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
2007-12-06 10:26                   ` Dong, Eddie
     [not found]                     ` <10EA09EFD8728347A513008B6B0DA77A0279CCF2-wq7ZOvIWXbNpB2pF5aRoyrfspsVTdybXVpNB7YpNyf8@public.gmane.org>
2007-12-06 13:24                       ` Dong, Eddie
     [not found]                         ` <10EA09EFD8728347A513008B6B0DA77A0279CD1C-wq7ZOvIWXbNpB2pF5aRoyrfspsVTdybXVpNB7YpNyf8@public.gmane.org>
2007-12-06 14:19                           ` Avi Kivity

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox