public inbox for kvm@vger.kernel.org
 help / color / mirror / Atom feed
From: Rusty Russell <rusty-8n+1lVoiYb80n/F98K4Iww@public.gmane.org>
To: Gregory Haskins <ghaskins-Et1tbQHTxzrQT0dZR+AlfA@public.gmane.org>
Cc: kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f@public.gmane.org
Subject: Re: [PATCH] KVM: Remove arch specific components from	the general code
Date: Thu, 26 Jul 2007 20:37:18 +1000	[thread overview]
Message-ID: <1185446238.4895.6.camel@localhost.localdomain> (raw)
In-Reply-To: <20070726042948.5893.58975.stgit-sLgBBP33vUGnsjUZhwzVf9HuzzzSOjJt@public.gmane.org>

On Thu, 2007-07-26 at 00:31 -0400, Gregory Haskins wrote:
> This is a cleanup patch to "de-VMX" the general code.  It was developed in the
> preempt-hooks branch, but it should apply elsewhere as well.

Hi Gregory,

	Great minds think alike.  This is a little rough, but I decided to send
it out tonight because it would make your life easier...

===
Dynamically allocate vcpus

This patch converts the vcpus array in "struct kvm" to a linked list
of VCPUs, and changes the "vcpu_create" and "vcpu_setup" hooks into
one "vcpu_create" call which does the allocation and initialization of
the vcpu (calling back into the kvm_vcpu_init core helper).

It is untested on SMP or SVM, and needs some love (kvm_vcpu_uninit?
What kind of function name is that?) but the idea is that SVM and VMX
can enclose the common "kvm_vcpu" in private structures.

Signed-off-by: Rusty Russell <rusty-8n+1lVoiYb80n/F98K4Iww@public.gmane.org>

diff -r 677938752656 drivers/kvm/kvm.h
--- a/drivers/kvm/kvm.h	Wed Jul 25 13:43:25 2007 +1000
+++ b/drivers/kvm/kvm.h	Wed Jul 25 16:16:56 2007 +1000
@@ -309,6 +309,7 @@ void kvm_io_bus_register_dev(struct kvm_
 			     struct kvm_io_device *dev);
 
 struct kvm_vcpu {
+	struct list_head list;
 	struct kvm *kvm;
 	int vcpu_id;
 	union {
@@ -429,8 +430,7 @@ struct kvm {
 	struct list_head active_mmu_pages;
 	int n_free_mmu_pages;
 	struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
-	int nvcpus;
-	struct kvm_vcpu vcpus[KVM_MAX_VCPUS];
+	struct list_head vcpus;
 	int memory_config_version;
 	int busy;
 	unsigned long rmap_overflow;
@@ -453,7 +453,8 @@ struct kvm_arch_ops {
 	int (*hardware_setup)(void);               /* __init */
 	void (*hardware_unsetup)(void);            /* __exit */
 
-	int (*vcpu_create)(struct kvm_vcpu *vcpu);
+	/* Create, but do not attach this VCPU */
+	struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id);
 	void (*vcpu_free)(struct kvm_vcpu *vcpu);
 
 	void (*vcpu_load)(struct kvm_vcpu *vcpu);
@@ -495,7 +496,6 @@ struct kvm_arch_ops {
 	void (*inject_gp)(struct kvm_vcpu *vcpu, unsigned err_code);
 
 	int (*run)(struct kvm_vcpu *vcpu, struct kvm_run *run);
-	int (*vcpu_setup)(struct kvm_vcpu *vcpu);
 	void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
 	void (*patch_hypercall)(struct kvm_vcpu *vcpu,
 				unsigned char *hypercall_addr);
@@ -513,6 +513,9 @@ extern struct kvm_arch_ops *kvm_arch_ops
 
 #define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt)
 #define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt)
+
+int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
+void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
 
 int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module);
 void kvm_exit_arch(void);
diff -r 677938752656 drivers/kvm/kvm_main.c
--- a/drivers/kvm/kvm_main.c	Wed Jul 25 13:43:25 2007 +1000
+++ b/drivers/kvm/kvm_main.c	Wed Jul 25 18:25:15 2007 +1000
@@ -139,11 +139,6 @@ unsigned long segment_base(u16 selector)
 }
 EXPORT_SYMBOL_GPL(segment_base);
 
-static inline int valid_vcpu(int n)
-{
-	return likely(n >= 0 && n < KVM_MAX_VCPUS);
-}
-
 int kvm_read_guest(struct kvm_vcpu *vcpu, gva_t addr, unsigned long size,
 		   void *dest)
 {
@@ -258,7 +253,7 @@ static void ack_flush(void *_completed)
 
 void kvm_flush_remote_tlbs(struct kvm *kvm)
 {
-	int i, cpu, needed;
+	int cpu, needed;
 	cpumask_t cpus;
 	struct kvm_vcpu *vcpu;
 	atomic_t completed;
@@ -266,8 +261,7 @@ void kvm_flush_remote_tlbs(struct kvm *k
 	atomic_set(&completed, 0);
 	cpus_clear(cpus);
 	needed = 0;
-	for (i = 0; i < kvm->nvcpus; ++i) {
-		vcpu = &kvm->vcpus[i];
+	list_for_each_entry(vcpu, &kvm->vcpus, list) {
 		if (test_and_set_bit(KVM_TLB_FLUSH, &vcpu->requests))
 			continue;
 		cpu = vcpu->cpu;
@@ -291,10 +285,62 @@ void kvm_flush_remote_tlbs(struct kvm *k
 	}
 }
 
+int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
+{
+	struct page *page;
+	int r;
+
+	mutex_init(&vcpu->mutex);
+	vcpu->cpu = -1;
+	vcpu->mmu.root_hpa = INVALID_PAGE;
+	vcpu->kvm = kvm;
+	vcpu->vcpu_id = id;
+
+	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+	if (!page) {
+		r = -ENOMEM;
+		goto fail;
+	}
+	vcpu->run = page_address(page);
+
+	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+	if (!page) {
+		r = -ENOMEM;
+		goto fail_free_run;
+	}
+	vcpu->pio_data = page_address(page);
+
+	vcpu->host_fx_image = (char*)ALIGN((hva_t)vcpu->fx_buf,
+					   FX_IMAGE_ALIGN);
+	vcpu->guest_fx_image = vcpu->host_fx_image + FX_IMAGE_SIZE;
+
+	r = kvm_mmu_create(vcpu);
+	if (r < 0)
+		goto fail_free_pio_data;
+
+	return 0;
+
+fail_free_pio_data:
+	free_page((unsigned long)vcpu->pio_data);
+fail_free_run:
+	free_page((unsigned long)vcpu->run);
+fail:
+	return -ENOMEM;
+}
+EXPORT_SYMBOL_GPL(kvm_vcpu_init);
+
+void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
+{
+	kvm_mmu_destroy(vcpu);
+	free_page((unsigned long)vcpu->pio_data);
+	free_page((unsigned long)vcpu->run);
+	
+}
+EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
+
 static struct kvm *kvm_create_vm(void)
 {
 	struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
-	int i;
 
 	if (!kvm)
 		return ERR_PTR(-ENOMEM);
@@ -303,14 +349,7 @@ static struct kvm *kvm_create_vm(void)
 	spin_lock_init(&kvm->lock);
 	INIT_LIST_HEAD(&kvm->active_mmu_pages);
 	kvm_io_bus_init(&kvm->mmio_bus);
-	for (i = 0; i < KVM_MAX_VCPUS; ++i) {
-		struct kvm_vcpu *vcpu = &kvm->vcpus[i];
-
-		mutex_init(&vcpu->mutex);
-		vcpu->cpu = -1;
-		vcpu->kvm = kvm;
-		vcpu->mmu.root_hpa = INVALID_PAGE;
-	}
+	INIT_LIST_HEAD(&kvm->vcpus);
 	spin_lock(&kvm_lock);
 	list_add(&kvm->vm_list, &vm_list);
 	spin_unlock(&kvm_lock);
@@ -362,41 +401,32 @@ static void free_pio_guest_pages(struct 
 
 static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
 {
-	if (!vcpu->vmcs)
-		return;
-
 	vcpu_load(vcpu);
 	kvm_mmu_unload(vcpu);
 	vcpu_put(vcpu);
 }
 
-static void kvm_free_vcpu(struct kvm_vcpu *vcpu)
-{
-	if (!vcpu->vmcs)
-		return;
-
-	vcpu_load(vcpu);
-	kvm_mmu_destroy(vcpu);
-	vcpu_put(vcpu);
-	kvm_arch_ops->vcpu_free(vcpu);
-	free_page((unsigned long)vcpu->run);
-	vcpu->run = NULL;
-	free_page((unsigned long)vcpu->pio_data);
-	vcpu->pio_data = NULL;
-	free_pio_guest_pages(vcpu);
-}
-
 static void kvm_free_vcpus(struct kvm *kvm)
 {
-	unsigned int i;
+	struct kvm_vcpu *vcpu;
 
 	/*
 	 * Unpin any mmu pages first.
 	 */
-	for (i = 0; i < KVM_MAX_VCPUS; ++i)
-		kvm_unload_vcpu_mmu(&kvm->vcpus[i]);
-	for (i = 0; i < KVM_MAX_VCPUS; ++i)
-		kvm_free_vcpu(&kvm->vcpus[i]);
+	list_for_each_entry(vcpu, &kvm->vcpus, list)
+		kvm_unload_vcpu_mmu(vcpu);
+
+	spin_lock(&kvm->lock);
+	while (!list_empty(&kvm->vcpus)) {
+		vcpu = list_first_entry(&kvm->vcpus, struct kvm_vcpu, list);
+		list_del(&vcpu->list);
+
+		/* Drop lock to free it, now it's detached. */
+		spin_unlock(&kvm->lock);
+		kvm_arch_ops->vcpu_free(vcpu);
+		spin_lock(&kvm->lock);
+	}
+	spin_unlock(&kvm->lock);
 }
 
 static void kvm_destroy_vm(struct kvm *kvm)
@@ -2375,79 +2405,51 @@ static int create_vcpu_fd(struct kvm_vcp
 /*
  * Creates some virtual cpus.  Good luck creating more than one.
  */
-static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
+static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, unsigned int id)
 {
 	int r;
-	struct kvm_vcpu *vcpu;
-	struct page *page;
-
-	r = -EINVAL;
-	if (!valid_vcpu(n))
-		goto out;
-
-	vcpu = &kvm->vcpus[n];
-	vcpu->vcpu_id = n;
-
-	mutex_lock(&vcpu->mutex);
-
-	if (vcpu->vmcs) {
-		mutex_unlock(&vcpu->mutex);
-		return -EEXIST;
-	}
-
-	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
-	r = -ENOMEM;
-	if (!page)
-		goto out_unlock;
-	vcpu->run = page_address(page);
-
-	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
-	r = -ENOMEM;
-	if (!page)
-		goto out_free_run;
-	vcpu->pio_data = page_address(page);
-
-	vcpu->host_fx_image = (char*)ALIGN((hva_t)vcpu->fx_buf,
-					   FX_IMAGE_ALIGN);
-	vcpu->guest_fx_image = vcpu->host_fx_image + FX_IMAGE_SIZE;
-	vcpu->cr0 = 0x10;
-
-	r = kvm_arch_ops->vcpu_create(vcpu);
+	struct kvm_vcpu *vcpu, *i;
+
+	vcpu = kvm_arch_ops->vcpu_create(kvm, id);
+	if (IS_ERR(vcpu))
+		return PTR_ERR(vcpu);
+
+	vcpu_load(vcpu);
+	r = kvm_mmu_setup(vcpu);
+	vcpu_put(vcpu);
 	if (r < 0)
-		goto out_free_vcpus;
-
-	r = kvm_mmu_create(vcpu);
-	if (r < 0)
-		goto out_free_vcpus;
-
-	kvm_arch_ops->vcpu_load(vcpu);
-	r = kvm_mmu_setup(vcpu);
-	if (r >= 0)
-		r = kvm_arch_ops->vcpu_setup(vcpu);
-	vcpu_put(vcpu);
-
-	if (r < 0)
-		goto out_free_vcpus;
-
+		goto free_vcpu;
+
+	spin_lock(&kvm->lock);
+	/* What do we care if they create duplicate CPU ids?  But be nice. */
+	list_for_each_entry(i, &kvm->vcpus, list) {
+		if (i->vcpu_id == id) {
+			r = -EEXIST;
+			spin_unlock(&kvm->lock);
+			goto mmu_unload;
+		}
+	}
+	list_add_tail(&vcpu->list, &kvm->vcpus);
+	spin_unlock(&kvm->lock);
+
+	/* Now it's all set up, let userspace reach it */
 	r = create_vcpu_fd(vcpu);
 	if (r < 0)
-		goto out_free_vcpus;
-
-	spin_lock(&kvm_lock);
-	if (n >= kvm->nvcpus)
-		kvm->nvcpus = n + 1;
-	spin_unlock(&kvm_lock);
-
+		goto unlink;
 	return r;
 
-out_free_vcpus:
-	kvm_free_vcpu(vcpu);
-out_free_run:
-	free_page((unsigned long)vcpu->run);
-	vcpu->run = NULL;
-out_unlock:
-	mutex_unlock(&vcpu->mutex);
-out:
+unlink:
+	spin_lock(&kvm->lock);
+	list_del(&vcpu->list);
+	spin_unlock(&kvm->lock);
+
+mmu_unload:
+	vcpu_load(vcpu);
+	kvm_mmu_unload(vcpu);
+	vcpu_put(vcpu);
+
+free_vcpu:
+	kvm_arch_ops->vcpu_free(vcpu);
 	return r;
 }
 
@@ -2935,12 +2937,11 @@ static void decache_vcpus_on_cpu(int cpu
 {
 	struct kvm *vm;
 	struct kvm_vcpu *vcpu;
-	int i;
 
 	spin_lock(&kvm_lock);
-	list_for_each_entry(vm, &vm_list, vm_list)
-		for (i = 0; i < KVM_MAX_VCPUS; ++i) {
-			vcpu = &vm->vcpus[i];
+	list_for_each_entry(vm, &vm_list, vm_list) {
+		spin_lock(&vm->lock);
+		list_for_each_entry(vcpu, &vm->vcpus, list) {
 			/*
 			 * If the vcpu is locked, then it is running on some
 			 * other cpu and therefore it is not cached on the
@@ -2957,6 +2958,8 @@ static void decache_vcpus_on_cpu(int cpu
 				mutex_unlock(&vcpu->mutex);
 			}
 		}
+		spin_unlock(&vm->lock);
+	}
 	spin_unlock(&kvm_lock);
 }
 
@@ -3072,14 +3075,14 @@ static u64 stat_get(void *_offset)
 	u64 total = 0;
 	struct kvm *kvm;
 	struct kvm_vcpu *vcpu;
-	int i;
 
 	spin_lock(&kvm_lock);
-	list_for_each_entry(kvm, &vm_list, vm_list)
-		for (i = 0; i < KVM_MAX_VCPUS; ++i) {
-			vcpu = &kvm->vcpus[i];
+	list_for_each_entry(kvm, &vm_list, vm_list) {
+		spin_lock(&kvm->lock);
+		list_for_each_entry(vcpu, &kvm->vcpus, list)
 			total += *(u32 *)((void *)vcpu + offset);
-		}
+		spin_unlock(&kvm->lock);
+	}
 	spin_unlock(&kvm_lock);
 	return total;
 }
diff -r 677938752656 drivers/kvm/svm.c
--- a/drivers/kvm/svm.c	Wed Jul 25 13:43:25 2007 +1000
+++ b/drivers/kvm/svm.c	Wed Jul 25 18:25:25 2007 +1000
@@ -454,11 +454,6 @@ static void init_sys_seg(struct vmcb_seg
 	seg->attrib = SVM_SELECTOR_P_MASK | type;
 	seg->limit = 0xffff;
 	seg->base = 0;
-}
-
-static int svm_vcpu_setup(struct kvm_vcpu *vcpu)
-{
-	return 0;
 }
 
 static void init_vmcb(struct vmcb *vmcb)
@@ -566,18 +561,29 @@ static void init_vmcb(struct vmcb *vmcb)
 	/* rdx = ?? */
 }
 
-static int svm_create_vcpu(struct kvm_vcpu *vcpu)
-{
+struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
+{
+	int err;
 	struct page *page;
-	int r;
-
-	r = -ENOMEM;
+	struct kvm_vcpu *vcpu = kzalloc(GFP_KERNEL, sizeof(*vcpu));
+
+	if (!vcpu)
+		return ERR_PTR(-ENOMEM);
+
+	err = kvm_vcpu_init(vcpu, kvm, id);
+	if (err)
+		goto free_vcpu;
+
 	vcpu->svm = kzalloc(sizeof *vcpu->svm, GFP_KERNEL);
-	if (!vcpu->svm)
-		goto out1;
+	if (!vcpu->svm) {
+		err = -ENOMEM;
+		goto uninit_vcpu;
+	}
 	page = alloc_page(GFP_KERNEL);
-	if (!page)
-		goto out2;
+	if (!page) {
+		err = -ENOMEM;
+		goto free_svm;
+	}
 
 	vcpu->svm->vmcb = page_address(page);
 	clear_page(vcpu->svm->vmcb);
@@ -591,22 +597,25 @@ static int svm_create_vcpu(struct kvm_vc
 	vcpu->apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
 	if (vcpu->vcpu_id == 0)
 		vcpu->apic_base |= MSR_IA32_APICBASE_BSP;
-
-	return 0;
-
-out2:
+	vcpu->cr0 = 0x10;
+
+	return vcpu;
+
+free_svm:
 	kfree(vcpu->svm);
-out1:
-	return r;
+uninit_vcpu:
+	kvm_vcpu_uninit(vcpu);
+free_vcpu:
+	kfree(vcpu);
+	return ERR_PTR(err);
 }
 
 static void svm_free_vcpu(struct kvm_vcpu *vcpu)
 {
-	if (!vcpu->svm)
-		return;
-	if (vcpu->svm->vmcb)
-		__free_page(pfn_to_page(vcpu->svm->vmcb_pa >> PAGE_SHIFT));
+	__free_page(pfn_to_page(vcpu->svm->vmcb_pa >> PAGE_SHIFT));
 	kfree(vcpu->svm);
+	kvm_vcpu_uninit(vcpu);
+	kfree(vcpu);
 }
 
 static void svm_vcpu_load(struct kvm_vcpu *vcpu)
@@ -1798,7 +1807,6 @@ static struct kvm_arch_ops svm_arch_ops 
 
 	.run = svm_vcpu_run,
 	.skip_emulated_instruction = skip_emulated_instruction,
-	.vcpu_setup = svm_vcpu_setup,
 	.patch_hypercall = svm_patch_hypercall,
 };
 
diff -r 677938752656 drivers/kvm/vmx.c
--- a/drivers/kvm/vmx.c	Wed Jul 25 13:43:25 2007 +1000
+++ b/drivers/kvm/vmx.c	Wed Jul 25 18:22:26 2007 +1000
@@ -2234,39 +2234,60 @@ static void vmx_free_vcpu(struct kvm_vcp
 static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
 {
 	vmx_free_vmcs(vcpu);
-}
-
-static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
-{
-	struct vmcs *vmcs;
+	kfree(vcpu->host_msrs);
+	kfree(vcpu->guest_msrs);
+	kvm_vcpu_uninit(vcpu);
+	kfree(vcpu);
+}
+
+static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
+{
+	int err;
+	struct kvm_vcpu *vcpu = kzalloc(sizeof(*vcpu), GFP_KERNEL);
+
+	if (!vcpu)
+		return ERR_PTR(-ENOMEM);
+
+	err = kvm_vcpu_init(vcpu, kvm, id);
+	if (err)
+		goto free_vcpu;
 
 	vcpu->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
-	if (!vcpu->guest_msrs)
-		return -ENOMEM;
+	if (!vcpu->guest_msrs) {
+		err = -ENOMEM;
+		goto uninit_vcpu;
+	}
 
 	vcpu->host_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
 	if (!vcpu->host_msrs)
-		goto out_free_guest_msrs;
-
-	vmcs = alloc_vmcs();
-	if (!vmcs)
-		goto out_free_msrs;
-
-	vmcs_clear(vmcs);
-	vcpu->vmcs = vmcs;
+		goto free_guest_msrs;
+
+	vcpu->vmcs = alloc_vmcs();
+	if (!vcpu->vmcs)
+		goto free_msrs;
+
+	vmcs_clear(vcpu->vmcs);
 	vcpu->launched = 0;
 
-	return 0;
-
-out_free_msrs:
+	vmx_vcpu_load(vcpu);
+	err = vmx_vcpu_setup(vcpu);
+	vmx_vcpu_put(vcpu);
+	if (err)
+		goto free_vmcs;
+
+	return vcpu;
+
+free_vmcs:
+	free_vmcs(vcpu->vmcs);
+free_msrs:
 	kfree(vcpu->host_msrs);
-	vcpu->host_msrs = NULL;
-
-out_free_guest_msrs:
+free_guest_msrs:
 	kfree(vcpu->guest_msrs);
-	vcpu->guest_msrs = NULL;
-
-	return -ENOMEM;
+uninit_vcpu:
+	kvm_vcpu_uninit(vcpu);
+free_vcpu:
+	kfree(vcpu);
+	return ERR_PTR(err);
 }
 
 static struct kvm_arch_ops vmx_arch_ops = {
@@ -2314,7 +2335,6 @@ static struct kvm_arch_ops vmx_arch_ops 
 
 	.run = vmx_vcpu_run,
 	.skip_emulated_instruction = skip_emulated_instruction,
-	.vcpu_setup = vmx_vcpu_setup,
 	.patch_hypercall = vmx_patch_hypercall,
 };
 



-------------------------------------------------------------------------
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems?  Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now >>  http://get.splunk.com/

  parent reply	other threads:[~2007-07-26 10:37 UTC|newest]

Thread overview: 17+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2007-07-26  4:31 [PATCH] KVM: Remove arch specific components from the general code Gregory Haskins
     [not found] ` <20070726042948.5893.58975.stgit-sLgBBP33vUGnsjUZhwzVf9HuzzzSOjJt@public.gmane.org>
2007-07-26 10:03   ` Avi Kivity
2007-07-26 10:37   ` Rusty Russell [this message]
     [not found]     ` <1185446238.4895.6.camel-bi+AKbBUZKY6gyzm1THtWbp2dZbC/Bob@public.gmane.org>
2007-07-26 11:05       ` Avi Kivity
     [not found]         ` <46A87FDD.7060308-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
2007-07-26 23:27           ` Rusty Russell
     [not found]             ` <1185492423.9484.9.camel-bi+AKbBUZKY6gyzm1THtWbp2dZbC/Bob@public.gmane.org>
2007-07-27  4:41               ` Avi Kivity
     [not found]                 ` <46A9776C.9010007-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
2007-07-27  5:23                   ` Rusty Russell
  -- strict thread matches above, loose matches on Subject: below --
2007-07-26 12:20 Gregory Haskins
     [not found] ` <46A8594E0200005A0002831D-Igcdv/6uVdMHoYOw/+koYqIwWpluYiW7@public.gmane.org>
2007-07-26 12:33   ` Avi Kivity
2007-07-26 13:16 Gregory Haskins
2007-07-26 13:16 Gregory Haskins
     [not found] ` <46A866860200005A00028332-Igcdv/6uVdMHoYOw/+koYqIwWpluYiW7@public.gmane.org>
2007-07-26 13:37   ` Avi Kivity
2007-07-26 13:51 Gregory Haskins
     [not found] ` <46A86E8A0200005A00028346-Igcdv/6uVdMHoYOw/+koYqIwWpluYiW7@public.gmane.org>
2007-07-26 13:59   ` Avi Kivity
2007-07-26 23:50 Gregory Haskins
     [not found] ` <46A8FB230200005A000283D9-Igcdv/6uVdMHoYOw/+koYqIwWpluYiW7@public.gmane.org>
2007-07-27  1:11   ` Rusty Russell
     [not found]     ` <1185498672.12151.11.camel-bi+AKbBUZKY6gyzm1THtWbp2dZbC/Bob@public.gmane.org>
2007-07-27  4:51       ` Avi Kivity

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1185446238.4895.6.camel@localhost.localdomain \
    --to=rusty-8n+1lvoiyb80n/f98k4iww@public.gmane.org \
    --cc=ghaskins-Et1tbQHTxzrQT0dZR+AlfA@public.gmane.org \
    --cc=kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f@public.gmane.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox