* [PATCH] Activate Virtualization On Demand v2
@ 2008-11-05 8:48 Alexander Graf
2008-11-05 10:06 ` Avi Kivity
` (3 more replies)
0 siblings, 4 replies; 22+ messages in thread
From: Alexander Graf @ 2008-11-05 8:48 UTC (permalink / raw)
To: kvm; +Cc: avi, kraxel, anthony, Sander.Vanleeuwen, zach, brogers
X86 CPUs need to have some magic happening to enable the virtualization
extensions on them. This magic can result in unpleasant results for
users, like blocking other VMMs from working (vmx) or using invalid TLB
entries (svm).
Currently KVM activates virtualization when the respective kernel module
is loaded. This blocks us from autoloading KVM modules without breaking
other VMMs.
To circumvent this problem at least a bit, this patch introduces on
demand activation of virtualization. This means, that instead
virtualization is enabled on creation of the first virtual machine
and disabled on removal of the last one.
So using this, KVM can be easily autoloaded, while keeping other
hypervisors usable.
v2 adds returns to non-x86 hardware_enables and adds IA64 change
Signed-off-by: Alexander Graf <agraf@suse.de>
---
arch/ia64/kvm/kvm-ia64.c | 8 +++--
arch/powerpc/kvm/powerpc.c | 3 +-
arch/s390/kvm/kvm-s390.c | 3 +-
arch/x86/kvm/svm.c | 13 +++++--
arch/x86/kvm/vmx.c | 7 ++++-
arch/x86/kvm/x86.c | 4 +-
include/asm-x86/kvm_host.h | 2 +-
include/linux/kvm_host.h | 2 +-
virt/kvm/kvm_main.c | 72 +++++++++++++++++++++++++++++++------------
9 files changed, 80 insertions(+), 34 deletions(-)
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index c25c75b..2bd79db 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -110,7 +110,7 @@ long ia64_pal_vp_create(u64 *vpd, u64 *host_iva, u64 *opt_handler)
static DEFINE_SPINLOCK(vp_lock);
-void kvm_arch_hardware_enable(void *garbage)
+int kvm_arch_hardware_enable(void *garbage)
{
long status;
long tmp_base;
@@ -124,7 +124,7 @@ void kvm_arch_hardware_enable(void *garbage)
slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
local_irq_restore(saved_psr);
if (slot < 0)
- return;
+ return -ENOMEM;
spin_lock(&vp_lock);
status = ia64_pal_vp_init_env(kvm_vsa_base ?
@@ -132,7 +132,7 @@ void kvm_arch_hardware_enable(void *garbage)
__pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, &tmp_base);
if (status != 0) {
printk(KERN_WARNING"kvm: Failed to Enable VT Support!!!!\n");
- return ;
+ return -EINVAL;
}
if (!kvm_vsa_base) {
@@ -141,6 +141,8 @@ void kvm_arch_hardware_enable(void *garbage)
}
spin_unlock(&vp_lock);
ia64_ptr_entry(0x3, slot);
+
+ return 0;
}
void kvm_arch_hardware_disable(void *garbage)
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 90a6fc4..dce664b 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -79,8 +79,9 @@ int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
return r;
}
-void kvm_arch_hardware_enable(void *garbage)
+int kvm_arch_hardware_enable(void *garbage)
{
+ return 0;
}
void kvm_arch_hardware_disable(void *garbage)
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 8b00eb2..91e08e5 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -70,9 +70,10 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
/* Section: not file related */
-void kvm_arch_hardware_enable(void *garbage)
+int kvm_arch_hardware_enable(void *garbage)
{
/* every s390 is virtualization enabled ;-) */
+ return 0;
}
void kvm_arch_hardware_disable(void *garbage)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 05efc4e..3a8a820 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -275,7 +275,7 @@ static void svm_hardware_disable(void *garbage)
wrmsrl(MSR_EFER, efer & ~MSR_EFER_SVME_MASK);
}
-static void svm_hardware_enable(void *garbage)
+static int svm_hardware_enable(void *garbage)
{
struct svm_cpu_data *svm_data;
@@ -284,16 +284,20 @@ static void svm_hardware_enable(void *garbage)
struct desc_struct *gdt;
int me = raw_smp_processor_id();
+ rdmsrl(MSR_EFER, efer);
+ if (efer & MSR_EFER_SVME_MASK)
+ return -EBUSY;
+
if (!has_svm()) {
printk(KERN_ERR "svm_cpu_init: err EOPNOTSUPP on %d\n", me);
- return;
+ return -EINVAL;
}
svm_data = per_cpu(svm_data, me);
if (!svm_data) {
printk(KERN_ERR "svm_cpu_init: svm_data is NULL on %d\n",
me);
- return;
+ return -EINVAL;
}
svm_data->asid_generation = 1;
@@ -304,11 +308,12 @@ static void svm_hardware_enable(void *garbage)
gdt = (struct desc_struct *)gdt_descr.address;
svm_data->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
- rdmsrl(MSR_EFER, efer);
wrmsrl(MSR_EFER, efer | MSR_EFER_SVME_MASK);
wrmsrl(MSR_VM_HSAVE_PA,
page_to_pfn(svm_data->save_area) << PAGE_SHIFT);
+
+ return 0;
}
static void svm_cpu_uninit(int cpu)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 64e2439..9a4f47c 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1059,12 +1059,15 @@ static __init int vmx_disabled_by_bios(void)
/* locked but not enabled */
}
-static void hardware_enable(void *garbage)
+static int hardware_enable(void *garbage)
{
int cpu = raw_smp_processor_id();
u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
u64 old;
+ if (read_cr4() & X86_CR4_VMXE)
+ return -EBUSY;
+
INIT_LIST_HEAD(&per_cpu(vcpus_on_cpu, cpu));
rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
if ((old & (FEATURE_CONTROL_LOCKED |
@@ -1079,6 +1082,8 @@ static void hardware_enable(void *garbage)
asm volatile (ASM_VMX_VMXON_RAX
: : "a"(&phys_addr), "m"(phys_addr)
: "memory", "cc");
+
+ return 0;
}
static void vmclear_local_vcpus(void)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index ceeac88..525cb67 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4030,9 +4030,9 @@ int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
return kvm_x86_ops->vcpu_reset(vcpu);
}
-void kvm_arch_hardware_enable(void *garbage)
+int kvm_arch_hardware_enable(void *garbage)
{
- kvm_x86_ops->hardware_enable(garbage);
+ return kvm_x86_ops->hardware_enable(garbage);
}
void kvm_arch_hardware_disable(void *garbage)
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h
index eebba6a..8d59d9f 100644
--- a/include/asm-x86/kvm_host.h
+++ b/include/asm-x86/kvm_host.h
@@ -424,7 +424,7 @@ struct descriptor_table {
struct kvm_x86_ops {
int (*cpu_has_kvm_support)(void); /* __init */
int (*disabled_by_bios)(void); /* __init */
- void (*hardware_enable)(void *dummy); /* __init */
+ int (*hardware_enable)(void *dummy); /* __init */
void (*hardware_disable)(void *dummy);
void (*check_processor_compatibility)(void *rtn);
int (*hardware_setup)(void); /* __init */
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 3a0fb77..d141741 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -272,7 +272,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu);
-void kvm_arch_hardware_enable(void *garbage);
+int kvm_arch_hardware_enable(void *garbage);
void kvm_arch_hardware_disable(void *garbage);
int kvm_arch_hardware_setup(void);
void kvm_arch_hardware_unsetup(void);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 4f43abe..5a9111e 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -64,6 +64,7 @@ DEFINE_SPINLOCK(kvm_lock);
LIST_HEAD(vm_list);
static cpumask_t cpus_hardware_enabled;
+static atomic_t kvm_usage_count = { 0 };
struct kmem_cache *kvm_vcpu_cache;
EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
@@ -74,6 +75,8 @@ struct dentry *kvm_debugfs_dir;
static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
unsigned long arg);
+static void hardware_enable(void *_r);
+static void hardware_disable(void *junk);
bool kvm_rebooting;
@@ -563,19 +566,27 @@ static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
static struct kvm *kvm_create_vm(void)
{
+ int r;
struct kvm *kvm = kvm_arch_create_vm();
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
struct page *page;
#endif
if (IS_ERR(kvm))
- goto out;
+ return kvm;
+
+ if (atomic_add_return(1, &kvm_usage_count) == 1) {
+ on_each_cpu(hardware_enable, &r, 1);
+
+ if (r)
+ goto out_err;
+ }
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!page) {
- kfree(kvm);
- return ERR_PTR(-ENOMEM);
+ r = -ENOMEM;
+ goto out_err;
}
kvm->coalesced_mmio_ring =
(struct kvm_coalesced_mmio_ring *)page_address(page);
@@ -583,15 +594,13 @@ static struct kvm *kvm_create_vm(void)
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
{
- int err;
kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
- err = mmu_notifier_register(&kvm->mmu_notifier, current->mm);
- if (err) {
+ r = mmu_notifier_register(&kvm->mmu_notifier, current->mm);
+ if (r) {
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
put_page(page);
#endif
- kfree(kvm);
- return ERR_PTR(err);
+ goto out_err;
}
}
#endif
@@ -610,8 +619,13 @@ static struct kvm *kvm_create_vm(void)
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
kvm_coalesced_mmio_init(kvm);
#endif
-out:
return kvm;
+
+out_err:
+ if (atomic_dec_and_test(&kvm_usage_count))
+ on_each_cpu(hardware_disable, NULL, 1);
+ kfree(kvm);
+ return ERR_PTR(r);
}
/*
@@ -660,6 +674,8 @@ static void kvm_destroy_vm(struct kvm *kvm)
mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
#endif
kvm_arch_destroy_vm(kvm);
+ if (atomic_dec_and_test(&kvm_usage_count))
+ on_each_cpu(hardware_disable, NULL, 1);
mmdrop(mm);
}
@@ -1767,14 +1783,23 @@ static struct miscdevice kvm_dev = {
&kvm_chardev_ops,
};
-static void hardware_enable(void *junk)
+static void hardware_enable(void *_r)
{
int cpu = raw_smp_processor_id();
+ int r;
if (cpu_isset(cpu, cpus_hardware_enabled))
return;
+ r = kvm_arch_hardware_enable(NULL);
+ if (_r)
+ *((int*)_r) = r;
+ if (r) {
+ printk(KERN_INFO "kvm: enabling virtualization on "
+ "CPU%d failed\n", cpu);
+ return;
+ }
+
cpu_set(cpu, cpus_hardware_enabled);
- kvm_arch_hardware_enable(NULL);
}
static void hardware_disable(void *junk)
@@ -1797,17 +1822,22 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
case CPU_DYING:
printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
cpu);
- hardware_disable(NULL);
+ if (atomic_read(&kvm_usage_count))
+ hardware_disable(NULL);
break;
case CPU_UP_CANCELED:
printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
cpu);
- smp_call_function_single(cpu, hardware_disable, NULL, 1);
+ if (atomic_read(&kvm_usage_count))
+ smp_call_function_single(cpu, hardware_disable,
+ NULL, 1);
break;
case CPU_ONLINE:
printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
cpu);
- smp_call_function_single(cpu, hardware_enable, NULL, 1);
+ if (atomic_read(&kvm_usage_count))
+ smp_call_function_single(cpu, hardware_enable,
+ NULL, 1);
break;
}
return NOTIFY_OK;
@@ -1835,7 +1865,8 @@ static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
*/
printk(KERN_INFO "kvm: exiting hardware virtualization\n");
kvm_rebooting = true;
- on_each_cpu(hardware_disable, NULL, 1);
+ if (atomic_read(&kvm_usage_count))
+ on_each_cpu(hardware_disable, NULL, 1);
}
return NOTIFY_OK;
}
@@ -1951,13 +1982,15 @@ static void kvm_exit_debug(void)
static int kvm_suspend(struct sys_device *dev, pm_message_t state)
{
- hardware_disable(NULL);
+ if (atomic_read(&kvm_usage_count))
+ hardware_disable(NULL);
return 0;
}
static int kvm_resume(struct sys_device *dev)
{
- hardware_enable(NULL);
+ if (atomic_read(&kvm_usage_count))
+ hardware_enable(NULL);
return 0;
}
@@ -2029,7 +2062,6 @@ int kvm_init(void *opaque, unsigned int vcpu_size,
goto out_free_1;
}
- on_each_cpu(hardware_enable, NULL, 1);
r = register_cpu_notifier(&kvm_cpu_notifier);
if (r)
goto out_free_2;
@@ -2075,7 +2107,6 @@ out_free_3:
unregister_reboot_notifier(&kvm_reboot_notifier);
unregister_cpu_notifier(&kvm_cpu_notifier);
out_free_2:
- on_each_cpu(hardware_disable, NULL, 1);
out_free_1:
kvm_arch_hardware_unsetup();
out_free_0:
@@ -2097,7 +2128,8 @@ void kvm_exit(void)
sysdev_class_unregister(&kvm_sysdev_class);
unregister_reboot_notifier(&kvm_reboot_notifier);
unregister_cpu_notifier(&kvm_cpu_notifier);
- on_each_cpu(hardware_disable, NULL, 1);
+ if (atomic_read(&kvm_usage_count))
+ on_each_cpu(hardware_disable, NULL, 1);
kvm_arch_hardware_unsetup();
kvm_arch_exit();
kvm_exit_debug();
--
1.6.0.2
^ permalink raw reply related [flat|nested] 22+ messages in thread* Re: [PATCH] Activate Virtualization On Demand v2
2008-11-05 8:48 [PATCH] Activate Virtualization On Demand v2 Alexander Graf
@ 2008-11-05 10:06 ` Avi Kivity
2008-11-05 10:28 ` Alexander Graf
2008-11-05 10:45 ` Zhang, Xiantao
` (2 subsequent siblings)
3 siblings, 1 reply; 22+ messages in thread
From: Avi Kivity @ 2008-11-05 10:06 UTC (permalink / raw)
To: Alexander Graf; +Cc: kvm, kraxel, anthony, Sander.Vanleeuwen, zach, brogers
Alexander Graf wrote:
> X86 CPUs need to have some magic happening to enable the virtualization
> extensions on them. This magic can result in unpleasant results for
> users, like blocking other VMMs from working (vmx) or using invalid TLB
> entries (svm).
>
> Currently KVM activates virtualization when the respective kernel module
> is loaded. This blocks us from autoloading KVM modules without breaking
> other VMMs.
>
> To circumvent this problem at least a bit, this patch introduces on
> demand activation of virtualization. This means, that instead
> virtualization is enabled on creation of the first virtual machine
> and disabled on removal of the last one.
>
> So using this, KVM can be easily autoloaded, while keeping other
> hypervisors usable.
>
> v2 adds returns to non-x86 hardware_enables and adds IA64 change
>
> @@ -563,19 +566,27 @@ static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
>
> static struct kvm *kvm_create_vm(void)
> {
> + int r;
> struct kvm *kvm = kvm_arch_create_vm();
> #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
> struct page *page;
> #endif
>
> if (IS_ERR(kvm))
> - goto out;
> + return kvm;
> +
> + if (atomic_add_return(1, &kvm_usage_count) == 1) {
> + on_each_cpu(hardware_enable, &r, 1);
> +
> + if (r)
> + goto out_err;
> + }
>
This can race -- if we're preempted immediately after
atomic_add_return(), a second vm creation will see the count elevated
and can start executing without virtualization enabled.
> +
> +out_err:
> + if (atomic_dec_and_test(&kvm_usage_count))
> + on_each_cpu(hardware_disable, NULL, 1);
>
Similar race.
> @@ -660,6 +674,8 @@ static void kvm_destroy_vm(struct kvm *kvm)
> mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
> #endif
> kvm_arch_destroy_vm(kvm);
> + if (atomic_dec_and_test(&kvm_usage_count))
> + on_each_cpu(hardware_disable, NULL, 1);
> mmdrop(mm);
> }
>
And again. I suggest returning to spinlocks (and placing the duplicated
disable code in a function).
>
> -static void hardware_enable(void *junk)
> +static void hardware_enable(void *_r)
> {
> int cpu = raw_smp_processor_id();
> + int r;
>
> if (cpu_isset(cpu, cpus_hardware_enabled))
> return;
> + r = kvm_arch_hardware_enable(NULL);
> + if (_r)
> + *((int*)_r) = r;
> + if (r) {
> + printk(KERN_INFO "kvm: enabling virtualization on "
> + "CPU%d failed\n", cpu);
> + return;
> + }
> +
> cpu_set(cpu, cpus_hardware_enabled);
> - kvm_arch_hardware_enable(NULL);
> }
>
We'll be in a nice fix if we can only enable virtualization on some
processors; that's the reason hardware_enable() was originally specified
as returning void.
I don't see an easy way out, but it's hardly a likely event.
> case CPU_UP_CANCELED:
> printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
> cpu);
> - smp_call_function_single(cpu, hardware_disable, NULL, 1);
> + if (atomic_read(&kvm_usage_count))
> + smp_call_function_single(cpu, hardware_disable,
> + NULL, 1);
> break;
> case CPU_ONLINE:
> printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
> cpu);
> - smp_call_function_single(cpu, hardware_enable, NULL, 1);
> + if (atomic_read(&kvm_usage_count))
> + smp_call_function_single(cpu, hardware_enable,
> + NULL, 1);
> break;
>
Are these called in a point where processes can't run? Otherwise
there's a race here.
> static int kvm_resume(struct sys_device *dev)
> {
> - hardware_enable(NULL);
> + if (atomic_read(&kvm_usage_count))
> + hardware_enable(NULL);
> return 0;
> }
>
Move the test to hardware_enable()? It's repeated too often.
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 22+ messages in thread* Re: [PATCH] Activate Virtualization On Demand v2
2008-11-05 10:06 ` Avi Kivity
@ 2008-11-05 10:28 ` Alexander Graf
2008-11-05 10:45 ` Avi Kivity
0 siblings, 1 reply; 22+ messages in thread
From: Alexander Graf @ 2008-11-05 10:28 UTC (permalink / raw)
To: Avi Kivity; +Cc: kvm, kraxel, anthony, Sander.Vanleeuwen, zach, brogers
Avi Kivity wrote:
> Alexander Graf wrote:
>> X86 CPUs need to have some magic happening to enable the virtualization
>> extensions on them. This magic can result in unpleasant results for
>> users, like blocking other VMMs from working (vmx) or using invalid TLB
>> entries (svm).
>>
>> Currently KVM activates virtualization when the respective kernel module
>> is loaded. This blocks us from autoloading KVM modules without breaking
>> other VMMs.
>>
>> To circumvent this problem at least a bit, this patch introduces on
>> demand activation of virtualization. This means, that instead
>> virtualization is enabled on creation of the first virtual machine
>> and disabled on removal of the last one.
>>
>> So using this, KVM can be easily autoloaded, while keeping other
>> hypervisors usable.
>>
>> v2 adds returns to non-x86 hardware_enables and adds IA64 change
[snip]
>
>> @@ -660,6 +674,8 @@ static void kvm_destroy_vm(struct kvm *kvm)
>> mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
>> #endif
>> kvm_arch_destroy_vm(kvm);
>> + if (atomic_dec_and_test(&kvm_usage_count))
>> + on_each_cpu(hardware_disable, NULL, 1);
>> mmdrop(mm);
>> }
>>
>
> And again. I suggest returning to spinlocks (and placing the
> duplicated disable code in a function).
OK.
>
>>
>> -static void hardware_enable(void *junk)
>> +static void hardware_enable(void *_r)
>> {
>> int cpu = raw_smp_processor_id();
>> + int r;
>>
>> if (cpu_isset(cpu, cpus_hardware_enabled))
>> return;
>> + r = kvm_arch_hardware_enable(NULL);
>> + if (_r)
>> + *((int*)_r) = r;
>> + if (r) {
>> + printk(KERN_INFO "kvm: enabling virtualization on "
>> + "CPU%d failed\n", cpu);
>> + return;
>> + }
>> +
>> cpu_set(cpu, cpus_hardware_enabled);
>> - kvm_arch_hardware_enable(NULL);
>> }
>>
>
> We'll be in a nice fix if we can only enable virtualization on some
> processors; that's the reason hardware_enable() was originally
> specified as returning void.
>
> I don't see an easy way out, but it's hardly a likely event.
I don't think there's any way we can circumvent that.
What I've wanted to ask for some time already: How does suspend/resume
work? I only see one suspend/resume hook that disables virt on the
currently running CPU. Why don't we have to loop through the CPUs to
enable/disable all of them?
At least for suspend-to-disk this sounds pretty necessary.
>> printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
>> cpu);
>> - smp_call_function_single(cpu, hardware_disable, NULL, 1);
>> + if (atomic_read(&kvm_usage_count))
>> + smp_call_function_single(cpu, hardware_disable,
>> + NULL, 1);
>> break;
>> case CPU_ONLINE:
>> printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
>> cpu);
>> - smp_call_function_single(cpu, hardware_enable, NULL, 1);
>> + if (atomic_read(&kvm_usage_count))
>> + smp_call_function_single(cpu, hardware_enable,
>> + NULL, 1);
>> break;
>>
> case CPU_UP_CANCELED:
>
> Are these called in a point where processes can't run? Otherwise
> there's a race here.
Yes.
static struct notifier_block kvm_cpu_notifier = {
.notifier_call = kvm_cpu_hotplug,
.priority = 20, /* must be > scheduler priority */
};
>
>> static int kvm_resume(struct sys_device *dev)
>> {
>> - hardware_enable(NULL);
>> + if (atomic_read(&kvm_usage_count))
>> + hardware_enable(NULL);
>> return 0;
>> }
>>
>
> Move the test to hardware_enable()? It's repeated too often.
What do we do about the on_each_cpu(hardware_enable) cases? We couldn't
tell when to activate/deactive virtualization then, as that's
semantically bound to "amount of VMs".
Alex
^ permalink raw reply [flat|nested] 22+ messages in thread* Re: [PATCH] Activate Virtualization On Demand v2
2008-11-05 10:28 ` Alexander Graf
@ 2008-11-05 10:45 ` Avi Kivity
2008-11-05 10:53 ` Alexander Graf
2008-11-05 11:23 ` Alexander Graf
0 siblings, 2 replies; 22+ messages in thread
From: Avi Kivity @ 2008-11-05 10:45 UTC (permalink / raw)
To: Alexander Graf; +Cc: kvm, kraxel, anthony, Sander.Vanleeuwen, zach, brogers
Alexander Graf wrote:
>> We'll be in a nice fix if we can only enable virtualization on some
>> processors; that's the reason hardware_enable() was originally
>> specified as returning void.
>>
>> I don't see an easy way out, but it's hardly a likely event.
>>
>
> I don't think there's any way we can circumvent that.
>
No. We can live with it though.
> What I've wanted to ask for some time already: How does suspend/resume
> work?
The question is important, even without the first word.
> I only see one suspend/resume hook that disables virt on the
> currently running CPU. Why don't we have to loop through the CPUs to
> enable/disable all of them?
> At least for suspend-to-disk this sounds pretty necessary.
>
>
Suspend first offlines all other cpus.
>>> static int kvm_resume(struct sys_device *dev)
>>> {
>>> - hardware_enable(NULL);
>>> + if (atomic_read(&kvm_usage_count))
>>> + hardware_enable(NULL);
>>> return 0;
>>> }
>>>
>>>
>> Move the test to hardware_enable()? It's repeated too often.
>>
>
> What do we do about the on_each_cpu(hardware_enable) cases? We couldn't
> tell when to activate/deactive virtualization then, as that's
> semantically bound to "amount of VMs".
>
I don't understand. Moving the test to within the IPI shouldn't affect
anything.
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 22+ messages in thread* Re: [PATCH] Activate Virtualization On Demand v2
2008-11-05 10:45 ` Avi Kivity
@ 2008-11-05 10:53 ` Alexander Graf
2008-11-05 11:23 ` Alexander Graf
1 sibling, 0 replies; 22+ messages in thread
From: Alexander Graf @ 2008-11-05 10:53 UTC (permalink / raw)
To: Avi Kivity; +Cc: kvm, kraxel, anthony, Sander.Vanleeuwen, zach, brogers
Avi Kivity wrote:
> Alexander Graf wrote:
>
>
>
>>> We'll be in a nice fix if we can only enable virtualization on some
>>> processors; that's the reason hardware_enable() was originally
>>> specified as returning void.
>>>
>>> I don't see an easy way out, but it's hardly a likely event.
>>>
>>
>> I don't think there's any way we can circumvent that.
>>
>
> No. We can live with it though.
>
>> What I've wanted to ask for some time already: How does suspend/resume
>> work?
>
> The question is important, even without the first word.
>
>> I only see one suspend/resume hook that disables virt on the
>> currently running CPU. Why don't we have to loop through the CPUs to
>> enable/disable all of them?
>> At least for suspend-to-disk this sounds pretty necessary.
>>
>>
>
> Suspend first offlines all other cpus.
Ah, ok.
>>>> {
>>>> - hardware_enable(NULL);
>>>> + if (atomic_read(&kvm_usage_count))
>>>> + hardware_enable(NULL);
>>>> return 0;
>>>> }
>>>>
>>> Move the test to hardware_enable()? It's repeated too often.
>>>
>>
>> What do we do about the on_each_cpu(hardware_enable) cases? We couldn't
>> tell when to activate/deactive virtualization then, as that's
>> semantically bound to "amount of VMs".
>>
> static int kvm_resume(struct sys_device *dev)
>
> I don't understand. Moving the test to within the IPI shouldn't
> affect anything.
Oh, you only want the test to be in hardware_enable and
hardware_disable. Now I see what you mean: modify and lock
kvm_usage_count outside, but test inside of hardware_enable.
^ permalink raw reply [flat|nested] 22+ messages in thread* Re: [PATCH] Activate Virtualization On Demand v2
2008-11-05 10:45 ` Avi Kivity
2008-11-05 10:53 ` Alexander Graf
@ 2008-11-05 11:23 ` Alexander Graf
1 sibling, 0 replies; 22+ messages in thread
From: Alexander Graf @ 2008-11-05 11:23 UTC (permalink / raw)
To: Avi Kivity; +Cc: kvm, kraxel, anthony, Sander.Vanleeuwen, zach, brogers
Avi Kivity wrote:
> Alexander Graf wrote:
[snip]
>>>> static int kvm_resume(struct sys_device *dev)
>>>> {
>>>> - hardware_enable(NULL);
>>>> + if (atomic_read(&kvm_usage_count))
>>>> + hardware_enable(NULL);
>>>> return 0;
>>>> }
>>>>
>>> Move the test to hardware_enable()? It's repeated too often.
>>>
>>
>> What do we do about the on_each_cpu(hardware_enable) cases? We couldn't
>> tell when to activate/deactive virtualization then, as that's
>> semantically bound to "amount of VMs".
>>
>
> I don't understand. Moving the test to within the IPI shouldn't
> affect anything.
>
Actually it's there already. Since we have the cpu_isset if here, we
won't get disabling when it's disabled or enabling when it's enabled on
a per-cpu basis. That code would've just saved us the IPIs :-).
So I'll add hardware_{en,dis}able_all functions that do the locking and
increase / decrease the counter. Disable is used twice, while enable
only once. But for the sake of code readability, I think it might be a
good idea to have both of them be functions.
Also the locking isn't really needed in most cases (CPU hotplug,
suspend, resume, reboot, exit), since we don't schedule there.
Alex
^ permalink raw reply [flat|nested] 22+ messages in thread
* RE: [PATCH] Activate Virtualization On Demand v2
2008-11-05 8:48 [PATCH] Activate Virtualization On Demand v2 Alexander Graf
2008-11-05 10:06 ` Avi Kivity
@ 2008-11-05 10:45 ` Zhang, Xiantao
2008-11-05 10:54 ` Alexander Graf
2008-11-05 10:58 ` Daniel P. Berrange
2008-11-05 13:06 ` Christian Borntraeger
3 siblings, 1 reply; 22+ messages in thread
From: Zhang, Xiantao @ 2008-11-05 10:45 UTC (permalink / raw)
To: Alexander Graf, kvm@vger.kernel.org
Cc: avi@redhat.com, kraxel@redhat.com, anthony@codemonkey.ws,
Sander.Vanleeuwen@sun.com, zach@vmware.com, brogers@novell.com
Alexander Graf wrote:
> X86 CPUs need to have some magic happening to enable the
> virtualization
> extensions on them. This magic can result in unpleasant results for
> users, like blocking other VMMs from working (vmx) or using invalid
> TLB
> entries (svm).
>
> Currently KVM activates virtualization when the respective kernel
> module
> is loaded. This blocks us from autoloading KVM modules without
> breaking
> other VMMs.
>
> To circumvent this problem at least a bit, this patch introduces on
> demand activation of virtualization. This means, that instead
> virtualization is enabled on creation of the first virtual machine
> and disabled on removal of the last one.
>
> So using this, KVM can be easily autoloaded, while keeping other
> hypervisors usable.
>
> v2 adds returns to non-x86 hardware_enables and adds IA64 change
>
> Signed-off-by: Alexander Graf <agraf@suse.de>
> ---
> arch/ia64/kvm/kvm-ia64.c | 8 +++--
> arch/powerpc/kvm/powerpc.c | 3 +-
> arch/s390/kvm/kvm-s390.c | 3 +-
> arch/x86/kvm/svm.c | 13 +++++--
> arch/x86/kvm/vmx.c | 7 ++++-
> arch/x86/kvm/x86.c | 4 +-
> include/asm-x86/kvm_host.h | 2 +-
> include/linux/kvm_host.h | 2 +-
> virt/kvm/kvm_main.c | 72
> +++++++++++++++++++++++++++++++------------ 9 files changed, 80
> insertions(+), 34 deletions(-)
>
> diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
> index c25c75b..2bd79db 100644
> --- a/arch/ia64/kvm/kvm-ia64.c
> +++ b/arch/ia64/kvm/kvm-ia64.c
> @@ -110,7 +110,7 @@ long ia64_pal_vp_create(u64 *vpd, u64 *host_iva,
> u64 *opt_handler)
>
> static DEFINE_SPINLOCK(vp_lock);
>
> -void kvm_arch_hardware_enable(void *garbage)
> +int kvm_arch_hardware_enable(void *garbage)
> {
> long status;
> long tmp_base;
> @@ -124,7 +124,7 @@ void kvm_arch_hardware_enable(void *garbage)
> slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
> local_irq_restore(saved_psr);
> if (slot < 0)
> - return;
> + return -ENOMEM;
Return -EINVAL maybe more accurate. Here slot < 0 means invalid or uncorrect parameters are passed to ia64_itr_entry.
Xiantao
^ permalink raw reply [flat|nested] 22+ messages in thread* Re: [PATCH] Activate Virtualization On Demand v2
2008-11-05 10:45 ` Zhang, Xiantao
@ 2008-11-05 10:54 ` Alexander Graf
0 siblings, 0 replies; 22+ messages in thread
From: Alexander Graf @ 2008-11-05 10:54 UTC (permalink / raw)
To: Zhang, Xiantao
Cc: kvm@vger.kernel.org, avi@redhat.com, kraxel@redhat.com,
anthony@codemonkey.ws, Sander.Vanleeuwen@sun.com, zach@vmware.com,
brogers@novell.com
Zhang, Xiantao wrote:
> Alexander Graf wrote:
>
>> X86 CPUs need to have some magic happening to enable the
>> virtualization
>> extensions on them. This magic can result in unpleasant results for
>> users, like blocking other VMMs from working (vmx) or using invalid
>> TLB
>> entries (svm).
>>
>> Currently KVM activates virtualization when the respective kernel
>> module
>> is loaded. This blocks us from autoloading KVM modules without
>> breaking
>> other VMMs.
>>
>> To circumvent this problem at least a bit, this patch introduces on
>> demand activation of virtualization. This means, that instead
>> virtualization is enabled on creation of the first virtual machine
>> and disabled on removal of the last one.
>>
>> So using this, KVM can be easily autoloaded, while keeping other
>> hypervisors usable.
>>
>> v2 adds returns to non-x86 hardware_enables and adds IA64 change
>>
>> Signed-off-by: Alexander Graf <agraf@suse.de>
>> ---
>> arch/ia64/kvm/kvm-ia64.c | 8 +++--
>> arch/powerpc/kvm/powerpc.c | 3 +-
>> arch/s390/kvm/kvm-s390.c | 3 +-
>> arch/x86/kvm/svm.c | 13 +++++--
>> arch/x86/kvm/vmx.c | 7 ++++-
>> arch/x86/kvm/x86.c | 4 +-
>> include/asm-x86/kvm_host.h | 2 +-
>> include/linux/kvm_host.h | 2 +-
>> virt/kvm/kvm_main.c | 72
>> +++++++++++++++++++++++++++++++------------ 9 files changed, 80
>> insertions(+), 34 deletions(-)
>>
>> diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
>> index c25c75b..2bd79db 100644
>> --- a/arch/ia64/kvm/kvm-ia64.c
>> +++ b/arch/ia64/kvm/kvm-ia64.c
>> @@ -110,7 +110,7 @@ long ia64_pal_vp_create(u64 *vpd, u64 *host_iva,
>> u64 *opt_handler)
>>
>> static DEFINE_SPINLOCK(vp_lock);
>>
>> -void kvm_arch_hardware_enable(void *garbage)
>> +int kvm_arch_hardware_enable(void *garbage)
>> {
>> long status;
>> long tmp_base;
>> @@ -124,7 +124,7 @@ void kvm_arch_hardware_enable(void *garbage)
>> slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
>> local_irq_restore(saved_psr);
>> if (slot < 0)
>> - return;
>> + return -ENOMEM;
>>
>
> Return -EINVAL maybe more accurate. Here slot < 0 means invalid or uncorrect parameters are passed to ia64_itr_entry.
> Xiantao
>
Okies.
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [PATCH] Activate Virtualization On Demand v2
2008-11-05 8:48 [PATCH] Activate Virtualization On Demand v2 Alexander Graf
2008-11-05 10:06 ` Avi Kivity
2008-11-05 10:45 ` Zhang, Xiantao
@ 2008-11-05 10:58 ` Daniel P. Berrange
2008-11-05 11:01 ` Alexander Graf
2008-11-05 13:06 ` Christian Borntraeger
3 siblings, 1 reply; 22+ messages in thread
From: Daniel P. Berrange @ 2008-11-05 10:58 UTC (permalink / raw)
To: Alexander Graf
Cc: kvm, avi, kraxel, anthony, Sander.Vanleeuwen, zach, brogers
On Wed, Nov 05, 2008 at 09:48:16AM +0100, Alexander Graf wrote:
> X86 CPUs need to have some magic happening to enable the virtualization
> extensions on them. This magic can result in unpleasant results for
> users, like blocking other VMMs from working (vmx) or using invalid TLB
> entries (svm).
>
> Currently KVM activates virtualization when the respective kernel module
> is loaded. This blocks us from autoloading KVM modules without breaking
> other VMMs.
>
> To circumvent this problem at least a bit, this patch introduces on
> demand activation of virtualization. This means, that instead
> virtualization is enabled on creation of the first virtual machine
> and disabled on removal of the last one.
Pardon my unfamiliarity with the code, but with this change applied,
will we still get an explicit error at time of loading kvm-intel.ko
if VMX were disabled in the BIOS ? Or will that error reporting be
pushed off to time when VMX is first activated ?
Daniel
--
|: Red Hat, Engineering, London -o- http://people.redhat.com/berrange/ :|
|: http://libvirt.org -o- http://virt-manager.org -o- http://ovirt.org :|
|: http://autobuild.org -o- http://search.cpan.org/~danberr/ :|
|: GnuPG: 7D3B9505 -o- F3C9 553F A1DA 4AC2 5648 23C1 B3DF F742 7D3B 9505 :|
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [PATCH] Activate Virtualization On Demand v2
2008-11-05 10:58 ` Daniel P. Berrange
@ 2008-11-05 11:01 ` Alexander Graf
0 siblings, 0 replies; 22+ messages in thread
From: Alexander Graf @ 2008-11-05 11:01 UTC (permalink / raw)
To: Daniel P. Berrange
Cc: kvm, avi, kraxel, anthony, Sander.Vanleeuwen, zach, brogers
Daniel P. Berrange wrote:
> On Wed, Nov 05, 2008 at 09:48:16AM +0100, Alexander Graf wrote:
>
>> X86 CPUs need to have some magic happening to enable the virtualization
>> extensions on them. This magic can result in unpleasant results for
>> users, like blocking other VMMs from working (vmx) or using invalid TLB
>> entries (svm).
>>
>> Currently KVM activates virtualization when the respective kernel module
>> is loaded. This blocks us from autoloading KVM modules without breaking
>> other VMMs.
>>
>> To circumvent this problem at least a bit, this patch introduces on
>> demand activation of virtualization. This means, that instead
>> virtualization is enabled on creation of the first virtual machine
>> and disabled on removal of the last one.
>>
>
> Pardon my unfamiliarity with the code, but with this change applied,
> will we still get an explicit error at time of loading kvm-intel.ko
> if VMX were disabled in the BIOS ? Or will that error reporting be
> pushed off to time when VMX is first activated ?
>
The BIOS and hardware support checks are in kvm_arch_init, which is
still called at load-time of the module.
Alex
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [PATCH] Activate Virtualization On Demand v2
2008-11-05 8:48 [PATCH] Activate Virtualization On Demand v2 Alexander Graf
` (2 preceding siblings ...)
2008-11-05 10:58 ` Daniel P. Berrange
@ 2008-11-05 13:06 ` Christian Borntraeger
2008-11-05 13:12 ` Avi Kivity
3 siblings, 1 reply; 22+ messages in thread
From: Christian Borntraeger @ 2008-11-05 13:06 UTC (permalink / raw)
To: Alexander Graf
Cc: kvm, avi, kraxel, anthony, Sander.Vanleeuwen, zach, brogers
Am Mittwoch, 5. November 2008 schrieb Alexander Graf:
> printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
> cpu);
[...]
> printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
> cpu);
[...]
> printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
> cpu);
[...]
> printk(KERN_INFO "kvm: exiting hardware virtualization\n");
When you are at it, could move these printk to the arches that atually
enable/disable virtualization?
For example you could do something like
if (callback) {
printk "...";
callback();
}
And then you could remove kvm_arch_hardware_enable/disable from s390 and
powerpc.
Havng these messages on s390 and powerpc makes absolutely no sense.
^ permalink raw reply [flat|nested] 22+ messages in thread* Re: [PATCH] Activate Virtualization On Demand v2
2008-11-05 13:06 ` Christian Borntraeger
@ 2008-11-05 13:12 ` Avi Kivity
0 siblings, 0 replies; 22+ messages in thread
From: Avi Kivity @ 2008-11-05 13:12 UTC (permalink / raw)
To: Christian Borntraeger
Cc: Alexander Graf, kvm, kraxel, anthony, Sander.Vanleeuwen, zach,
brogers
Christian Borntraeger wrote:
> When you are at it, could move these printk to the arches that atually
> enable/disable virtualization?
>
> For example you could do something like
> if (callback) {
> printk "...";
> callback();
> }
>
> And then you could remove kvm_arch_hardware_enable/disable from s390 and
> powerpc.
>
> Havng these messages on s390 and powerpc makes absolutely no sense.
>
In a separate patch, this is big enough already.
We could do this using kconfig
config KVM_NEEDS_HARDWARE_DETECT
bool
default n
and override it for x86/ia64. This would remove the need to implement
dummy callbacks in s390/ppc.
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 22+ messages in thread
* [PATCH] Activate Virtualization On Demand v2
@ 2009-06-15 11:30 Alexander Graf
2009-06-15 12:17 ` Christoph Hellwig
2009-06-16 14:01 ` Avi Kivity
0 siblings, 2 replies; 22+ messages in thread
From: Alexander Graf @ 2009-06-15 11:30 UTC (permalink / raw)
To: kvm
X86 CPUs need to have some magic happening to enable the virtualization
extensions on them. This magic can result in unpleasant results for
users, like blocking other VMMs from working (vmx) or using invalid TLB
entries (svm).
Currently KVM activates virtualization when the respective kernel module
is loaded. This blocks us from autoloading KVM modules without breaking
other VMMs.
To circumvent this problem at least a bit, this patch introduces on
demand activation of virtualization. This means, that instead
virtualization is enabled on creation of the first virtual machine
and disabled on destruction of the last one.
So using this, KVM can be easily autoloaded, while keeping other
hypervisors usable.
---
v2 uses kvm_lock and traces failures atomically
Signed-off-by: Alexander Graf <agraf@suse.de>
---
arch/ia64/kvm/kvm-ia64.c | 8 ++-
arch/powerpc/kvm/powerpc.c | 2 +-
arch/s390/kvm/kvm-s390.c | 2 +-
arch/x86/include/asm/kvm_host.h | 2 +-
arch/x86/kvm/svm.c | 13 ++++--
arch/x86/kvm/vmx.c | 7 +++-
arch/x86/kvm/x86.c | 4 +-
include/linux/kvm_host.h | 2 +-
virt/kvm/kvm_main.c | 82 +++++++++++++++++++++++++++++++++------
9 files changed, 96 insertions(+), 26 deletions(-)
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 906d597..3141a92 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -124,7 +124,7 @@ long ia64_pal_vp_create(u64 *vpd, u64 *host_iva, u64 *opt_handler)
static DEFINE_SPINLOCK(vp_lock);
-void kvm_arch_hardware_enable(void *garbage)
+int kvm_arch_hardware_enable(void *garbage)
{
long status;
long tmp_base;
@@ -137,7 +137,7 @@ void kvm_arch_hardware_enable(void *garbage)
slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
local_irq_restore(saved_psr);
if (slot < 0)
- return;
+ return -EINVAL;
spin_lock(&vp_lock);
status = ia64_pal_vp_init_env(kvm_vsa_base ?
@@ -145,7 +145,7 @@ void kvm_arch_hardware_enable(void *garbage)
__pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, &tmp_base);
if (status != 0) {
printk(KERN_WARNING"kvm: Failed to Enable VT Support!!!!\n");
- return ;
+ return -EINVAL;
}
if (!kvm_vsa_base) {
@@ -154,6 +154,8 @@ void kvm_arch_hardware_enable(void *garbage)
}
spin_unlock(&vp_lock);
ia64_ptr_entry(0x3, slot);
+
+ return 0;
}
void kvm_arch_hardware_disable(void *garbage)
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 9057335..6558ab7 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -80,7 +80,8 @@ int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
return r;
}
-void kvm_arch_hardware_enable(void *garbage)
+int kvm_arch_hardware_enable(void *garbage)
{
+ return 0;
}
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index cbfe91e..a14e676 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -70,7 +70,8 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
/* Section: not file related */
-void kvm_arch_hardware_enable(void *garbage)
+int kvm_arch_hardware_enable(void *garbage)
{
/* every s390 is virtualization enabled ;-) */
+ return 0;
}
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 4627627..72d5075 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -463,7 +463,7 @@ struct descriptor_table {
struct kvm_x86_ops {
int (*cpu_has_kvm_support)(void); /* __init */
int (*disabled_by_bios)(void); /* __init */
- void (*hardware_enable)(void *dummy); /* __init */
+ int (*hardware_enable)(void *dummy);
void (*hardware_disable)(void *dummy);
void (*check_processor_compatibility)(void *rtn);
int (*hardware_setup)(void); /* __init */
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 04ee964..47a8b94 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -245,7 +245,7 @@ static void svm_hardware_disable(void *garbage)
cpu_svm_disable();
}
-static void svm_hardware_enable(void *garbage)
+static int svm_hardware_enable(void *garbage)
{
struct svm_cpu_data *svm_data;
@@ -254,16 +254,20 @@ static void svm_hardware_enable(void *garbage)
struct desc_struct *gdt;
int me = raw_smp_processor_id();
+ rdmsrl(MSR_EFER, efer);
+ if (efer & EFER_SVME)
+ return -EBUSY;
+
if (!has_svm()) {
printk(KERN_ERR "svm_cpu_init: err EOPNOTSUPP on %d\n", me);
- return;
+ return -EINVAL;
}
svm_data = per_cpu(svm_data, me);
if (!svm_data) {
printk(KERN_ERR "svm_cpu_init: svm_data is NULL on %d\n",
me);
- return;
+ return -EINVAL;
}
svm_data->asid_generation = 1;
@@ -274,11 +278,12 @@ static void svm_hardware_enable(void *garbage)
gdt = (struct desc_struct *)gdt_descr.address;
svm_data->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
- rdmsrl(MSR_EFER, efer);
wrmsrl(MSR_EFER, efer | EFER_SVME);
wrmsrl(MSR_VM_HSAVE_PA,
page_to_pfn(svm_data->save_area) << PAGE_SHIFT);
+
+ return 0;
}
static void svm_cpu_uninit(int cpu)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index d3919ac..3df3b0a 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1068,12 +1068,15 @@ static __init int vmx_disabled_by_bios(void)
/* locked but not enabled */
}
-static void hardware_enable(void *garbage)
+static int hardware_enable(void *garbage)
{
int cpu = raw_smp_processor_id();
u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
u64 old;
+ if (read_cr4() & X86_CR4_VMXE)
+ return -EBUSY;
+
INIT_LIST_HEAD(&per_cpu(vcpus_on_cpu, cpu));
rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
if ((old & (FEATURE_CONTROL_LOCKED |
@@ -1088,6 +1091,8 @@ static void hardware_enable(void *garbage)
asm volatile (ASM_VMX_VMXON_RAX
: : "a"(&phys_addr), "m"(phys_addr)
: "memory", "cc");
+
+ return 0;
}
static void vmclear_local_vcpus(void)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 03431b2..bfef950 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4222,9 +4222,9 @@ int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
return kvm_x86_ops->vcpu_reset(vcpu);
}
-void kvm_arch_hardware_enable(void *garbage)
+int kvm_arch_hardware_enable(void *garbage)
{
- kvm_x86_ops->hardware_enable(garbage);
+ return kvm_x86_ops->hardware_enable(garbage);
}
void kvm_arch_hardware_disable(void *garbage)
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 11eb702..7678995 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -292,7 +292,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu);
-void kvm_arch_hardware_enable(void *garbage);
+int kvm_arch_hardware_enable(void *garbage);
void kvm_arch_hardware_disable(void *garbage);
int kvm_arch_hardware_setup(void);
void kvm_arch_hardware_unsetup(void);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 92ef725..ead53e4 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -65,6 +65,8 @@ DEFINE_SPINLOCK(kvm_lock);
LIST_HEAD(vm_list);
static cpumask_var_t cpus_hardware_enabled;
+static int kvm_usage_count = 0;
+static atomic_t hardware_enable_failed;
struct kmem_cache *kvm_vcpu_cache;
EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
@@ -75,6 +77,8 @@ struct dentry *kvm_debugfs_dir;
static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
unsigned long arg);
+static int hardware_enable_all(void);
+static void hardware_disable_all(void);
static bool kvm_rebooting;
@@ -931,6 +935,7 @@ static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
static struct kvm *kvm_create_vm(void)
{
+ int r = 0;
struct kvm *kvm = kvm_arch_create_vm();
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
struct page *page;
@@ -938,6 +943,11 @@ static struct kvm *kvm_create_vm(void)
if (IS_ERR(kvm))
goto out;
+
+ r = hardware_enable_all();
+ if (r)
+ goto out_err;
+
#ifdef CONFIG_HAVE_KVM_IRQCHIP
INIT_LIST_HEAD(&kvm->irq_routing);
INIT_HLIST_HEAD(&kvm->mask_notifier_list);
@@ -946,8 +956,8 @@ static struct kvm *kvm_create_vm(void)
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!page) {
- kfree(kvm);
- return ERR_PTR(-ENOMEM);
+ r = -ENOMEM;
+ goto out_err;
}
kvm->coalesced_mmio_ring =
(struct kvm_coalesced_mmio_ring *)page_address(page);
@@ -955,15 +965,13 @@ static struct kvm *kvm_create_vm(void)
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
{
- int err;
kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
- err = mmu_notifier_register(&kvm->mmu_notifier, current->mm);
- if (err) {
+ r = mmu_notifier_register(&kvm->mmu_notifier, current->mm);
+ if (r) {
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
put_page(page);
#endif
- kfree(kvm);
- return ERR_PTR(err);
+ goto out_err;
}
}
#endif
@@ -984,6 +992,11 @@ static struct kvm *kvm_create_vm(void)
#endif
out:
return kvm;
+
+out_err:
+ hardware_disable_all();
+ kfree(kvm);
+ return ERR_PTR(r);
}
/*
@@ -1036,6 +1049,7 @@ static void kvm_destroy_vm(struct kvm *kvm)
kvm_arch_flush_shadow(kvm);
#endif
kvm_arch_destroy_vm(kvm);
+ hardware_disable_all();
mmdrop(mm);
}
@@ -2332,11 +2346,41 @@ static struct miscdevice kvm_dev = {
static void hardware_enable(void *junk)
{
int cpu = raw_smp_processor_id();
+ int r;
if (cpumask_test_cpu(cpu, cpus_hardware_enabled))
return;
+
cpumask_set_cpu(cpu, cpus_hardware_enabled);
- kvm_arch_hardware_enable(NULL);
+
+ r = kvm_arch_hardware_enable(NULL);
+
+ if (r) {
+ cpumask_clear_cpu(cpu, cpus_hardware_enabled);
+ atomic_inc(&hardware_enable_failed);
+ printk(KERN_INFO "kvm: enabling virtualization on "
+ "CPU%d failed\n", cpu);
+ }
+}
+
+static int hardware_enable_all(void)
+{
+ int r = 0;
+
+ spin_lock(&kvm_lock);
+
+ kvm_usage_count++;
+ if (kvm_usage_count == 1) {
+ atomic_set(&hardware_enable_failed, 1);
+ on_each_cpu(hardware_enable, NULL, 1);
+
+ if (!atomic_dec_and_test(&hardware_enable_failed))
+ r = -EBUSY;
+ }
+
+ spin_unlock(&kvm_lock);
+
+ return r;
}
static void hardware_disable(void *junk)
@@ -2349,11 +2393,25 @@ static void hardware_disable(void *junk)
kvm_arch_hardware_disable(NULL);
}
+static void hardware_disable_all(void)
+{
+ BUG_ON(!kvm_usage_count);
+
+ spin_lock(&kvm_lock);
+ kvm_usage_count--;
+ if (!kvm_usage_count)
+ on_each_cpu(hardware_disable, NULL, 1);
+ spin_unlock(&kvm_lock);
+}
+
static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
void *v)
{
int cpu = (long)v;
+ if (!kvm_usage_count)
+ return NOTIFY_OK;
+
val &= ~CPU_TASKS_FROZEN;
switch (val) {
case CPU_DYING:
@@ -2513,13 +2571,15 @@ static void kvm_exit_debug(void)
static int kvm_suspend(struct sys_device *dev, pm_message_t state)
{
- hardware_disable(NULL);
+ if (kvm_usage_count)
+ hardware_disable(NULL);
return 0;
}
static int kvm_resume(struct sys_device *dev)
{
- hardware_enable(NULL);
+ if (kvm_usage_count)
+ hardware_enable(NULL);
return 0;
}
@@ -2596,7 +2656,6 @@ int kvm_init(void *opaque, unsigned int vcpu_size,
goto out_free_1;
}
- on_each_cpu(hardware_enable, NULL, 1);
r = register_cpu_notifier(&kvm_cpu_notifier);
if (r)
goto out_free_2;
@@ -2644,7 +2703,6 @@ out_free_3:
unregister_reboot_notifier(&kvm_reboot_notifier);
unregister_cpu_notifier(&kvm_cpu_notifier);
out_free_2:
- on_each_cpu(hardware_disable, NULL, 1);
out_free_1:
kvm_arch_hardware_unsetup();
out_free_0a:
--
1.6.0.2
^ permalink raw reply related [flat|nested] 22+ messages in thread* Re: [PATCH] Activate Virtualization On Demand v2
2009-06-15 11:30 Alexander Graf
@ 2009-06-15 12:17 ` Christoph Hellwig
2009-06-15 12:25 ` Alexander Graf
2009-06-16 14:02 ` Avi Kivity
2009-06-16 14:01 ` Avi Kivity
1 sibling, 2 replies; 22+ messages in thread
From: Christoph Hellwig @ 2009-06-15 12:17 UTC (permalink / raw)
To: Alexander Graf; +Cc: kvm
On Mon, Jun 15, 2009 at 01:30:05PM +0200, Alexander Graf wrote:
> X86 CPUs need to have some magic happening to enable the virtualization
> extensions on them. This magic can result in unpleasant results for
> users, like blocking other VMMs from working (vmx) or using invalid TLB
> entries (svm).
>
> Currently KVM activates virtualization when the respective kernel module
> is loaded. This blocks us from autoloading KVM modules without breaking
> other VMMs.
That will only become interesting if we every have such a thing in
mainline. So NACK, lots of complication for no good reason.
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [PATCH] Activate Virtualization On Demand v2
2009-06-15 12:17 ` Christoph Hellwig
@ 2009-06-15 12:25 ` Alexander Graf
2009-06-15 12:27 ` Christoph Hellwig
2009-06-16 14:02 ` Avi Kivity
1 sibling, 1 reply; 22+ messages in thread
From: Alexander Graf @ 2009-06-15 12:25 UTC (permalink / raw)
To: Christoph Hellwig; +Cc: kvm
On 15.06.2009, at 14:17, Christoph Hellwig wrote:
> On Mon, Jun 15, 2009 at 01:30:05PM +0200, Alexander Graf wrote:
>> X86 CPUs need to have some magic happening to enable the
>> virtualization
>> extensions on them. This magic can result in unpleasant results for
>> users, like blocking other VMMs from working (vmx) or using invalid
>> TLB
>> entries (svm).
>>
>> Currently KVM activates virtualization when the respective kernel
>> module
>> is loaded. This blocks us from autoloading KVM modules without
>> breaking
>> other VMMs.
>
> That will only become interesting if we every have such a thing in
> mainline. So NACK, lots of complication for no good reason.
>
I don't want to fight political battles here. Seriously - we're out of
kindergarden.
There are users out there who want to have VBox/VMware and kvm
installed in parallel and can't have both kernel modules loaded at the
same time. We're only hurting _our_ users, not the others if we keep
people from having kvm*.ko loaded.
Sigh.
Alex
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [PATCH] Activate Virtualization On Demand v2
2009-06-15 12:17 ` Christoph Hellwig
2009-06-15 12:25 ` Alexander Graf
@ 2009-06-16 14:02 ` Avi Kivity
1 sibling, 0 replies; 22+ messages in thread
From: Avi Kivity @ 2009-06-16 14:02 UTC (permalink / raw)
To: Christoph Hellwig; +Cc: Alexander Graf, kvm
On 06/15/2009 03:17 PM, Christoph Hellwig wrote:
> On Mon, Jun 15, 2009 at 01:30:05PM +0200, Alexander Graf wrote:
>
>> X86 CPUs need to have some magic happening to enable the virtualization
>> extensions on them. This magic can result in unpleasant results for
>> users, like blocking other VMMs from working (vmx) or using invalid TLB
>> entries (svm).
>>
>> Currently KVM activates virtualization when the respective kernel module
>> is loaded. This blocks us from autoloading KVM modules without breaking
>> other VMMs.
>>
>
> That will only become interesting if we every have such a thing in
> mainline. So NACK, lots of complication for no good reason.
>
If it were truly lots of complication, I might agree. But it isn't, and
we keep getting reports from users about it.
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [PATCH] Activate Virtualization On Demand v2
2009-06-15 11:30 Alexander Graf
2009-06-15 12:17 ` Christoph Hellwig
@ 2009-06-16 14:01 ` Avi Kivity
2009-06-16 14:08 ` Alexander Graf
1 sibling, 1 reply; 22+ messages in thread
From: Avi Kivity @ 2009-06-16 14:01 UTC (permalink / raw)
To: Alexander Graf; +Cc: kvm
On 06/15/2009 02:30 PM, Alexander Graf wrote:
> X86 CPUs need to have some magic happening to enable the virtualization
> extensions on them. This magic can result in unpleasant results for
> users, like blocking other VMMs from working (vmx) or using invalid TLB
> entries (svm).
>
> Currently KVM activates virtualization when the respective kernel module
> is loaded. This blocks us from autoloading KVM modules without breaking
> other VMMs.
>
> To circumvent this problem at least a bit, this patch introduces on
> demand activation of virtualization. This means, that instead
> virtualization is enabled on creation of the first virtual machine
> and disabled on destruction of the last one.
>
> So using this, KVM can be easily autoloaded, while keeping other
> hypervisors usable.
>
> +static int hardware_enable_all(void)
> +{
> + int r = 0;
> +
> + spin_lock(&kvm_lock);
> +
> + kvm_usage_count++;
> + if (kvm_usage_count == 1) {
> + atomic_set(&hardware_enable_failed, 1);
> + on_each_cpu(hardware_enable, NULL, 1);
> +
> + if (!atomic_dec_and_test(&hardware_enable_failed))
> + r = -EBUSY;
> + }
>
That's a little obfuscated. I suggest atomic_set(..., p) and
atomic_read(...).
> +
> static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
> void *v)
> {
> int cpu = (long)v;
>
> + if (!kvm_usage_count)
> + return NOTIFY_OK;
> +
> val&= ~CPU_TASKS_FROZEN;
> switch (val) {
> case CPU_DYING:
> @@ -2513,13 +2571,15 @@ static void kvm_exit_debug(void)
>
> static int kvm_suspend(struct sys_device *dev, pm_message_t state)
> {
> - hardware_disable(NULL);
> + if (kvm_usage_count)
> + hardware_disable(NULL);
> return 0;
> }
>
> static int kvm_resume(struct sys_device *dev)
> {
> - hardware_enable(NULL);
> + if (kvm_usage_count)
> + hardware_enable(NULL);
> return 0;
> }
>
>
Please tell me you tested suspend/resume with/without VMs and cpu
hotunplug/hotplug.
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 22+ messages in thread* Re: [PATCH] Activate Virtualization On Demand v2
2009-06-16 14:01 ` Avi Kivity
@ 2009-06-16 14:08 ` Alexander Graf
2009-06-16 15:13 ` Avi Kivity
0 siblings, 1 reply; 22+ messages in thread
From: Alexander Graf @ 2009-06-16 14:08 UTC (permalink / raw)
To: Avi Kivity; +Cc: kvm
Avi Kivity wrote:
> On 06/15/2009 02:30 PM, Alexander Graf wrote:
>> X86 CPUs need to have some magic happening to enable the virtualization
>> extensions on them. This magic can result in unpleasant results for
>> users, like blocking other VMMs from working (vmx) or using invalid TLB
>> entries (svm).
>>
>> Currently KVM activates virtualization when the respective kernel module
>> is loaded. This blocks us from autoloading KVM modules without breaking
>> other VMMs.
>>
>> To circumvent this problem at least a bit, this patch introduces on
>> demand activation of virtualization. This means, that instead
>> virtualization is enabled on creation of the first virtual machine
>> and disabled on destruction of the last one.
>>
>> So using this, KVM can be easily autoloaded, while keeping other
>> hypervisors usable.
>>
>> +static int hardware_enable_all(void)
>> +{
>> + int r = 0;
>> +
>> + spin_lock(&kvm_lock);
>> +
>> + kvm_usage_count++;
>> + if (kvm_usage_count == 1) {
>> + atomic_set(&hardware_enable_failed, 1);
>> + on_each_cpu(hardware_enable, NULL, 1);
>> +
>> + if (!atomic_dec_and_test(&hardware_enable_failed))
>> + r = -EBUSY;
>> + }
>>
>
> That's a little obfuscated. I suggest atomic_set(..., p) and
> atomic_read(...).
Ah, I was more searching for an atomic_test :-).
>> static int kvm_cpu_hotplug(struct notifier_block *notifier,
>> unsigned long val,
>> void *v)
>> {
>> int cpu = (long)v;
>>
>> + if (!kvm_usage_count)
>> + return NOTIFY_OK;
>> +
>> val&= ~CPU_TASKS_FROZEN;
>> switch (val) {
>> case CPU_DYING:
>> @@ -2513,13 +2571,15 @@ static void kvm_exit_debug(void)
>>
>> static int kvm_suspend(struct sys_device *dev, pm_message_t state)
>> {
>> - hardware_disable(NULL);
>> + if (kvm_usage_count)
>> + hardware_disable(NULL);
>> return 0;
>> }
>>
>> static int kvm_resume(struct sys_device *dev)
>> {
>> - hardware_enable(NULL);
>> + if (kvm_usage_count)
>> + hardware_enable(NULL);
>> return 0;
>> }
>>
>>
> +
>
> Please tell me you tested suspend/resume with/without VMs and cpu
> hotunplug/hotplug.
I tested cpu hotplugging. On the last round I tested suspend/resume, but
this time I couldn't because my machine can't do suspend :-(.
So I'll try hard and find a machine I can test it on for the next round.
Alex
^ permalink raw reply [flat|nested] 22+ messages in thread* Re: [PATCH] Activate Virtualization On Demand v2
2009-06-16 14:08 ` Alexander Graf
@ 2009-06-16 15:13 ` Avi Kivity
2009-06-17 21:56 ` Alexander Graf
0 siblings, 1 reply; 22+ messages in thread
From: Avi Kivity @ 2009-06-16 15:13 UTC (permalink / raw)
To: Alexander Graf; +Cc: kvm
On 06/16/2009 05:08 PM, Alexander Graf wrote:
>> Please tell me you tested suspend/resume with/without VMs and cpu
>> hotunplug/hotplug.
>>
>
> I tested cpu hotplugging. On the last round I tested suspend/resume, but
> this time I couldn't because my machine can't do suspend :-(.
> So I'll try hard and find a machine I can test it on for the next round.
>
I can test suspend/resume for you if you don't have a friendly machine.
I have a personal interest in keeping it working :)
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [PATCH] Activate Virtualization On Demand v2
2009-06-16 15:13 ` Avi Kivity
@ 2009-06-17 21:56 ` Alexander Graf
2009-06-18 8:35 ` Avi Kivity
0 siblings, 1 reply; 22+ messages in thread
From: Alexander Graf @ 2009-06-17 21:56 UTC (permalink / raw)
To: Avi Kivity; +Cc: kvm
On 16.06.2009, at 17:13, Avi Kivity wrote:
> On 06/16/2009 05:08 PM, Alexander Graf wrote:
>>> Please tell me you tested suspend/resume with/without VMs and cpu
>>> hotunplug/hotplug.
>>>
>>
>> I tested cpu hotplugging. On the last round I tested suspend/
>> resume, but
>> this time I couldn't because my machine can't do suspend :-(.
>> So I'll try hard and find a machine I can test it on for the next
>> round.
>>
>
> I can test suspend/resume for you if you don't have a friendly
> machine. I have a personal interest in keeping it working :)
Thinking about it again - there's only the atomic dec_and_test vs.
read thing and the suspend test missing.
Is the atomic operation as is really that confusing? If not, we can
keep the patch as is and you simply try s2ram on your notebook :-).
I'm pretty sure it works - it used to.
Alex
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [PATCH] Activate Virtualization On Demand v2
2009-06-17 21:56 ` Alexander Graf
@ 2009-06-18 8:35 ` Avi Kivity
0 siblings, 0 replies; 22+ messages in thread
From: Avi Kivity @ 2009-06-18 8:35 UTC (permalink / raw)
To: Alexander Graf; +Cc: kvm
On 06/18/2009 12:56 AM, Alexander Graf wrote:
>> I can test suspend/resume for you if you don't have a friendly
>> machine. I have a personal interest in keeping it working :)
>
>
> Thinking about it again - there's only the atomic dec_and_test vs.
> read thing and the suspend test missing.
>
> Is the atomic operation as is really that confusing?
Yes. It says, "something tricky is going on, see if you can find it".
> If not, we can keep the patch as is and you simply try s2ram on your
> notebook :-). I'm pretty sure it works - it used to.
It looks like it will work, but these things are tricky. I'll test an
updated patch. Please also test reboot on Intel with a VM spinning and
with no VMs loaded - Intel reboots are tricky too.
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 22+ messages in thread
end of thread, other threads:[~2009-06-18 8:34 UTC | newest]
Thread overview: 22+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2008-11-05 8:48 [PATCH] Activate Virtualization On Demand v2 Alexander Graf
2008-11-05 10:06 ` Avi Kivity
2008-11-05 10:28 ` Alexander Graf
2008-11-05 10:45 ` Avi Kivity
2008-11-05 10:53 ` Alexander Graf
2008-11-05 11:23 ` Alexander Graf
2008-11-05 10:45 ` Zhang, Xiantao
2008-11-05 10:54 ` Alexander Graf
2008-11-05 10:58 ` Daniel P. Berrange
2008-11-05 11:01 ` Alexander Graf
2008-11-05 13:06 ` Christian Borntraeger
2008-11-05 13:12 ` Avi Kivity
-- strict thread matches above, loose matches on Subject: below --
2009-06-15 11:30 Alexander Graf
2009-06-15 12:17 ` Christoph Hellwig
2009-06-15 12:25 ` Alexander Graf
2009-06-15 12:27 ` Christoph Hellwig
2009-06-16 14:02 ` Avi Kivity
2009-06-16 14:01 ` Avi Kivity
2009-06-16 14:08 ` Alexander Graf
2009-06-16 15:13 ` Avi Kivity
2009-06-17 21:56 ` Alexander Graf
2009-06-18 8:35 ` Avi Kivity
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox