* [PATCH] Activate Virtualization On Demand v3
@ 2008-11-05 10:41 Alexander Graf
2008-11-05 11:49 ` Avi Kivity
2008-11-05 20:58 ` Eduardo Habkost
0 siblings, 2 replies; 7+ messages in thread
From: Alexander Graf @ 2008-11-05 10:41 UTC (permalink / raw)
To: kvm; +Cc: avi, kraxel, anthony, Sander.Vanleeuwen, zach, brogers
X86 CPUs need to have some magic happening to enable the virtualization
extensions on them. This magic can result in unpleasant results for
users, like blocking other VMMs from working (vmx) or using invalid TLB
entries (svm).
Currently KVM activates virtualization when the respective kernel module
is loaded. This blocks us from autoloading KVM modules without breaking
other VMMs.
To circumvent this problem at least a bit, this patch introduces on
demand activation of virtualization. This means, that instead
virtualization is enabled on creation of the first virtual machine
and disabled on removal of the last one.
So using this, KVM can be easily autoloaded, while keeping other
hypervisors usable.
v2 adds returns to non-x86 hardware_enables and adds IA64 change
v3 changes:
- use spin_lock instead of atomics
- put locking to new functions hardware_{en,dis}able_all that get called
on VM creation/destruction
- remove usage counter checks where not necessary
- return -EINVAL for IA64 slot < 0 case
Signed-off-by: Alexander Graf <agraf@suse.de>
---
arch/ia64/kvm/kvm-ia64.c | 8 +++--
arch/powerpc/kvm/powerpc.c | 3 +-
arch/s390/kvm/kvm-s390.c | 3 +-
arch/x86/kvm/svm.c | 13 +++++--
arch/x86/kvm/vmx.c | 7 +++-
arch/x86/kvm/x86.c | 4 +-
include/asm-x86/kvm_host.h | 2 +-
include/linux/kvm_host.h | 2 +-
virt/kvm/kvm_main.c | 75 ++++++++++++++++++++++++++++++++++++-------
9 files changed, 90 insertions(+), 27 deletions(-)
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index c25c75b..a672cd2 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -110,7 +110,7 @@ long ia64_pal_vp_create(u64 *vpd, u64 *host_iva, u64 *opt_handler)
static DEFINE_SPINLOCK(vp_lock);
-void kvm_arch_hardware_enable(void *garbage)
+int kvm_arch_hardware_enable(void *garbage)
{
long status;
long tmp_base;
@@ -124,7 +124,7 @@ void kvm_arch_hardware_enable(void *garbage)
slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
local_irq_restore(saved_psr);
if (slot < 0)
- return;
+ return -EINVAL;
spin_lock(&vp_lock);
status = ia64_pal_vp_init_env(kvm_vsa_base ?
@@ -132,7 +132,7 @@ void kvm_arch_hardware_enable(void *garbage)
__pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, &tmp_base);
if (status != 0) {
printk(KERN_WARNING"kvm: Failed to Enable VT Support!!!!\n");
- return ;
+ return -EINVAL;
}
if (!kvm_vsa_base) {
@@ -141,6 +141,8 @@ void kvm_arch_hardware_enable(void *garbage)
}
spin_unlock(&vp_lock);
ia64_ptr_entry(0x3, slot);
+
+ return 0;
}
void kvm_arch_hardware_disable(void *garbage)
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 90a6fc4..dce664b 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -79,8 +79,9 @@ int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
return r;
}
-void kvm_arch_hardware_enable(void *garbage)
+int kvm_arch_hardware_enable(void *garbage)
{
+ return 0;
}
void kvm_arch_hardware_disable(void *garbage)
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 8b00eb2..91e08e5 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -70,9 +70,10 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
/* Section: not file related */
-void kvm_arch_hardware_enable(void *garbage)
+int kvm_arch_hardware_enable(void *garbage)
{
/* every s390 is virtualization enabled ;-) */
+ return 0;
}
void kvm_arch_hardware_disable(void *garbage)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 05efc4e..3a8a820 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -275,7 +275,7 @@ static void svm_hardware_disable(void *garbage)
wrmsrl(MSR_EFER, efer & ~MSR_EFER_SVME_MASK);
}
-static void svm_hardware_enable(void *garbage)
+static int svm_hardware_enable(void *garbage)
{
struct svm_cpu_data *svm_data;
@@ -284,16 +284,20 @@ static void svm_hardware_enable(void *garbage)
struct desc_struct *gdt;
int me = raw_smp_processor_id();
+ rdmsrl(MSR_EFER, efer);
+ if (efer & MSR_EFER_SVME_MASK)
+ return -EBUSY;
+
if (!has_svm()) {
printk(KERN_ERR "svm_cpu_init: err EOPNOTSUPP on %d\n", me);
- return;
+ return -EINVAL;
}
svm_data = per_cpu(svm_data, me);
if (!svm_data) {
printk(KERN_ERR "svm_cpu_init: svm_data is NULL on %d\n",
me);
- return;
+ return -EINVAL;
}
svm_data->asid_generation = 1;
@@ -304,11 +308,12 @@ static void svm_hardware_enable(void *garbage)
gdt = (struct desc_struct *)gdt_descr.address;
svm_data->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
- rdmsrl(MSR_EFER, efer);
wrmsrl(MSR_EFER, efer | MSR_EFER_SVME_MASK);
wrmsrl(MSR_VM_HSAVE_PA,
page_to_pfn(svm_data->save_area) << PAGE_SHIFT);
+
+ return 0;
}
static void svm_cpu_uninit(int cpu)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 64e2439..9a4f47c 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1059,12 +1059,15 @@ static __init int vmx_disabled_by_bios(void)
/* locked but not enabled */
}
-static void hardware_enable(void *garbage)
+static int hardware_enable(void *garbage)
{
int cpu = raw_smp_processor_id();
u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
u64 old;
+ if (read_cr4() & X86_CR4_VMXE)
+ return -EBUSY;
+
INIT_LIST_HEAD(&per_cpu(vcpus_on_cpu, cpu));
rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
if ((old & (FEATURE_CONTROL_LOCKED |
@@ -1079,6 +1082,8 @@ static void hardware_enable(void *garbage)
asm volatile (ASM_VMX_VMXON_RAX
: : "a"(&phys_addr), "m"(phys_addr)
: "memory", "cc");
+
+ return 0;
}
static void vmclear_local_vcpus(void)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index ceeac88..525cb67 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4030,9 +4030,9 @@ int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
return kvm_x86_ops->vcpu_reset(vcpu);
}
-void kvm_arch_hardware_enable(void *garbage)
+int kvm_arch_hardware_enable(void *garbage)
{
- kvm_x86_ops->hardware_enable(garbage);
+ return kvm_x86_ops->hardware_enable(garbage);
}
void kvm_arch_hardware_disable(void *garbage)
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h
index eebba6a..8d59d9f 100644
--- a/include/asm-x86/kvm_host.h
+++ b/include/asm-x86/kvm_host.h
@@ -424,7 +424,7 @@ struct descriptor_table {
struct kvm_x86_ops {
int (*cpu_has_kvm_support)(void); /* __init */
int (*disabled_by_bios)(void); /* __init */
- void (*hardware_enable)(void *dummy); /* __init */
+ int (*hardware_enable)(void *dummy); /* __init */
void (*hardware_disable)(void *dummy);
void (*check_processor_compatibility)(void *rtn);
int (*hardware_setup)(void); /* __init */
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 3a0fb77..d141741 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -272,7 +272,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu);
-void kvm_arch_hardware_enable(void *garbage);
+int kvm_arch_hardware_enable(void *garbage);
void kvm_arch_hardware_disable(void *garbage);
int kvm_arch_hardware_setup(void);
void kvm_arch_hardware_unsetup(void);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 4f43abe..cb11e3a 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -64,6 +64,8 @@ DEFINE_SPINLOCK(kvm_lock);
LIST_HEAD(vm_list);
static cpumask_t cpus_hardware_enabled;
+static int kvm_usage_count = 0;
+static DEFINE_SPINLOCK(kvm_usage_lock);
struct kmem_cache *kvm_vcpu_cache;
EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
@@ -74,6 +76,8 @@ struct dentry *kvm_debugfs_dir;
static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
unsigned long arg);
+static int hardware_enable_all(void);
+static void hardware_disable_all(void);
bool kvm_rebooting;
@@ -563,19 +567,25 @@ static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
static struct kvm *kvm_create_vm(void)
{
+ int r = 0;
struct kvm *kvm = kvm_arch_create_vm();
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
struct page *page;
#endif
if (IS_ERR(kvm))
- goto out;
+ return kvm;
+
+ r = hardware_enable_all();
+ if (r) {
+ goto out_err;
+ }
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!page) {
- kfree(kvm);
- return ERR_PTR(-ENOMEM);
+ r = -ENOMEM;
+ goto out_err;
}
kvm->coalesced_mmio_ring =
(struct kvm_coalesced_mmio_ring *)page_address(page);
@@ -583,15 +593,13 @@ static struct kvm *kvm_create_vm(void)
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
{
- int err;
kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
- err = mmu_notifier_register(&kvm->mmu_notifier, current->mm);
- if (err) {
+ r = mmu_notifier_register(&kvm->mmu_notifier, current->mm);
+ if (r) {
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
put_page(page);
#endif
- kfree(kvm);
- return ERR_PTR(err);
+ goto out_err;
}
}
#endif
@@ -610,8 +618,12 @@ static struct kvm *kvm_create_vm(void)
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
kvm_coalesced_mmio_init(kvm);
#endif
-out:
return kvm;
+
+out_err:
+ hardware_disable_all();
+ kfree(kvm);
+ return ERR_PTR(r);
}
/*
@@ -660,6 +672,7 @@ static void kvm_destroy_vm(struct kvm *kvm)
mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
#endif
kvm_arch_destroy_vm(kvm);
+ hardware_disable_all();
mmdrop(mm);
}
@@ -1767,14 +1780,40 @@ static struct miscdevice kvm_dev = {
&kvm_chardev_ops,
};
-static void hardware_enable(void *junk)
+static void hardware_enable(void *_r)
{
int cpu = raw_smp_processor_id();
+ int r;
+
+ /* If enabling a previous CPU failed already, let's not continue */
+ if (_r && *((int*)_r))
+ return;
if (cpu_isset(cpu, cpus_hardware_enabled))
return;
+ r = kvm_arch_hardware_enable(NULL);
+ if (_r)
+ *((int*)_r) = r;
+ if (r) {
+ printk(KERN_INFO "kvm: enabling virtualization on "
+ "CPU%d failed\n", cpu);
+ return;
+ }
+
cpu_set(cpu, cpus_hardware_enabled);
- kvm_arch_hardware_enable(NULL);
+}
+
+static int hardware_enable_all(void)
+{
+ int r = 0;
+
+ spin_lock(&kvm_usage_lock);
+ kvm_usage_count++;
+ if (kvm_usage_count == 1)
+ on_each_cpu(hardware_enable, &r, 1);
+ spin_unlock(&kvm_usage_lock);
+
+ return r;
}
static void hardware_disable(void *junk)
@@ -1787,6 +1826,18 @@ static void hardware_disable(void *junk)
kvm_arch_hardware_disable(NULL);
}
+static void hardware_disable_all(void)
+{
+ if (!kvm_usage_count)
+ return;
+
+ spin_lock(&kvm_usage_lock);
+ kvm_usage_count--;
+ if (!kvm_usage_count)
+ on_each_cpu(hardware_disable, NULL, 1);
+ spin_unlock(&kvm_usage_lock);
+}
+
static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
void *v)
{
@@ -2029,7 +2080,6 @@ int kvm_init(void *opaque, unsigned int vcpu_size,
goto out_free_1;
}
- on_each_cpu(hardware_enable, NULL, 1);
r = register_cpu_notifier(&kvm_cpu_notifier);
if (r)
goto out_free_2;
@@ -2075,7 +2125,6 @@ out_free_3:
unregister_reboot_notifier(&kvm_reboot_notifier);
unregister_cpu_notifier(&kvm_cpu_notifier);
out_free_2:
- on_each_cpu(hardware_disable, NULL, 1);
out_free_1:
kvm_arch_hardware_unsetup();
out_free_0:
--
1.6.0.2
^ permalink raw reply related [flat|nested] 7+ messages in thread* Re: [PATCH] Activate Virtualization On Demand v3
2008-11-05 10:41 [PATCH] Activate Virtualization On Demand v3 Alexander Graf
@ 2008-11-05 11:49 ` Avi Kivity
2008-11-05 11:55 ` Alexander Graf
2008-11-05 20:58 ` Eduardo Habkost
1 sibling, 1 reply; 7+ messages in thread
From: Avi Kivity @ 2008-11-05 11:49 UTC (permalink / raw)
To: Alexander Graf; +Cc: kvm, kraxel, anthony, Sander.Vanleeuwen, zach, brogers
Alexander Graf wrote:
> X86 CPUs need to have some magic happening to enable the virtualization
> extensions on them. This magic can result in unpleasant results for
> users, like blocking other VMMs from working (vmx) or using invalid TLB
> entries (svm).
>
> Currently KVM activates virtualization when the respective kernel module
> is loaded. This blocks us from autoloading KVM modules without breaking
> other VMMs.
>
> To circumvent this problem at least a bit, this patch introduces on
> demand activation of virtualization. This means, that instead
> virtualization is enabled on creation of the first virtual machine
> and disabled on removal of the last one.
>
> So using this, KVM can be easily autoloaded, while keeping other
> hypervisors usable.
>
> v2 adds returns to non-x86 hardware_enables and adds IA64 change
> v3 changes:
> - use spin_lock instead of atomics
> - put locking to new functions hardware_{en,dis}able_all that get called
> on VM creation/destruction
> - remove usage counter checks where not necessary
> - return -EINVAL for IA64 slot < 0 case
>
>
Is this v3 with all the latest changes? it precedes some messages where
you say you'll change things by about 40 minutes.
In any case, I'll defer applying until Eduardo's kdump/reboot changes go
in, since they touch the same places, and Eduardo's changes are much
harder to test.
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 7+ messages in thread* Re: [PATCH] Activate Virtualization On Demand v3
2008-11-05 11:49 ` Avi Kivity
@ 2008-11-05 11:55 ` Alexander Graf
2008-11-05 12:02 ` Avi Kivity
0 siblings, 1 reply; 7+ messages in thread
From: Alexander Graf @ 2008-11-05 11:55 UTC (permalink / raw)
To: Avi Kivity; +Cc: kvm, kraxel, anthony, Sander.Vanleeuwen, zach, brogers
Avi Kivity wrote:
> Alexander Graf wrote:
>> X86 CPUs need to have some magic happening to enable the virtualization
>> extensions on them. This magic can result in unpleasant results for
>> users, like blocking other VMMs from working (vmx) or using invalid TLB
>> entries (svm).
>>
>> Currently KVM activates virtualization when the respective kernel module
>> is loaded. This blocks us from autoloading KVM modules without breaking
>> other VMMs.
>>
>> To circumvent this problem at least a bit, this patch introduces on
>> demand activation of virtualization. This means, that instead
>> virtualization is enabled on creation of the first virtual machine
>> and disabled on removal of the last one.
>>
>> So using this, KVM can be easily autoloaded, while keeping other
>> hypervisors usable.
>>
>> v2 adds returns to non-x86 hardware_enables and adds IA64 change
>> v3 changes:
>> - use spin_lock instead of atomics
>> - put locking to new functions hardware_{en,dis}able_all that get
>> called
>> on VM creation/destruction
>> - remove usage counter checks where not necessary
>> - return -EINVAL for IA64 slot < 0 case
>>
>>
>
> Is this v3 with all the latest changes? it precedes some messages
> where you say you'll change things by about 40 minutes.
Yeah, my clock was off. Somehow I get host time drifts sometimes when
using KVM and my NTP client wasn't running. But maybe it's just me doing
something wrong.
> In any case, I'll defer applying until Eduardo's kdump/reboot changes
> go in, since they touch the same places, and Eduardo's changes are
> much harder to test.
I agree.
Apart from that, do I get an ACK for it, so I can at least put it into
our package and rest assured nothing obvious is wrong :-)?
Alex
^ permalink raw reply [flat|nested] 7+ messages in thread* Re: [PATCH] Activate Virtualization On Demand v3
2008-11-05 11:55 ` Alexander Graf
@ 2008-11-05 12:02 ` Avi Kivity
0 siblings, 0 replies; 7+ messages in thread
From: Avi Kivity @ 2008-11-05 12:02 UTC (permalink / raw)
To: Alexander Graf; +Cc: kvm, kraxel, anthony, Sander.Vanleeuwen, zach, brogers
Alexander Graf wrote:
>> In any case, I'll defer applying until Eduardo's kdump/reboot changes
>> go in, since they touch the same places, and Eduardo's changes are
>> much harder to test.
>>
>
> I agree.
> Apart from that, do I get an ACK for it, so I can at least put it into
> our package and rest assured nothing obvious is wrong :-)?
>
It seems fine.
(the part where you call on_each_cpu and have all functions write their
reply into the same r is icky, but should work AFAICS)
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH] Activate Virtualization On Demand v3
2008-11-05 10:41 [PATCH] Activate Virtualization On Demand v3 Alexander Graf
2008-11-05 11:49 ` Avi Kivity
@ 2008-11-05 20:58 ` Eduardo Habkost
2008-11-06 12:57 ` Alexander Graf
1 sibling, 1 reply; 7+ messages in thread
From: Eduardo Habkost @ 2008-11-05 20:58 UTC (permalink / raw)
To: Alexander Graf
Cc: kvm, avi, kraxel, anthony, Sander.Vanleeuwen, zach, brogers
On Wed, Nov 05, 2008 at 11:41:04AM +0100, Alexander Graf wrote:
> X86 CPUs need to have some magic happening to enable the virtualization
> extensions on them. This magic can result in unpleasant results for
> users, like blocking other VMMs from working (vmx) or using invalid TLB
> entries (svm).
>
> Currently KVM activates virtualization when the respective kernel module
> is loaded. This blocks us from autoloading KVM modules without breaking
> other VMMs.
>
> To circumvent this problem at least a bit, this patch introduces on
> demand activation of virtualization. This means, that instead
> virtualization is enabled on creation of the first virtual machine
> and disabled on removal of the last one.
>
> So using this, KVM can be easily autoloaded, while keeping other
> hypervisors usable.
>
> v2 adds returns to non-x86 hardware_enables and adds IA64 change
> v3 changes:
> - use spin_lock instead of atomics
> - put locking to new functions hardware_{en,dis}able_all that get called
> on VM creation/destruction
> - remove usage counter checks where not necessary
> - return -EINVAL for IA64 slot < 0 case
>
> Signed-off-by: Alexander Graf <agraf@suse.de>
> ---
<snip>
>
> -static void hardware_enable(void *junk)
> +static void hardware_enable(void *_r)
> {
> int cpu = raw_smp_processor_id();
> + int r;
> +
> + /* If enabling a previous CPU failed already, let's not continue */
> + if (_r && *((int*)_r))
> + return;
>
> if (cpu_isset(cpu, cpus_hardware_enabled))
> return;
> + r = kvm_arch_hardware_enable(NULL);
> + if (_r)
> + *((int*)_r) = r;
> + if (r) {
> + printk(KERN_INFO "kvm: enabling virtualization on "
> + "CPU%d failed\n", cpu);
> + return;
> + }
> +
> cpu_set(cpu, cpus_hardware_enabled);
> - kvm_arch_hardware_enable(NULL);
> +}
Doesn't on_each_cpu() run the function in parallel on all CPUs? If so,
there is a race between checking *_r and setting *_r.
> +
> +static int hardware_enable_all(void)
> +{
> + int r = 0;
> +
> + spin_lock(&kvm_usage_lock);
> + kvm_usage_count++;
> + if (kvm_usage_count == 1)
> + on_each_cpu(hardware_enable, &r, 1);
> + spin_unlock(&kvm_usage_lock);
> +
> + return r;
> }
>
<snip>
--
Eduardo
^ permalink raw reply [flat|nested] 7+ messages in thread* Re: [PATCH] Activate Virtualization On Demand v3
2008-11-05 20:58 ` Eduardo Habkost
@ 2008-11-06 12:57 ` Alexander Graf
2008-11-06 13:51 ` Eduardo Habkost
0 siblings, 1 reply; 7+ messages in thread
From: Alexander Graf @ 2008-11-06 12:57 UTC (permalink / raw)
To: Eduardo Habkost
Cc: kvm, avi, kraxel, anthony, Sander.Vanleeuwen, zach, brogers
On 05.11.2008, at 21:58, Eduardo Habkost wrote:
> On Wed, Nov 05, 2008 at 11:41:04AM +0100, Alexander Graf wrote:
>> X86 CPUs need to have some magic happening to enable the
>> virtualization
>> extensions on them. This magic can result in unpleasant results for
>> users, like blocking other VMMs from working (vmx) or using invalid
>> TLB
>> entries (svm).
>>
>> Currently KVM activates virtualization when the respective kernel
>> module
>> is loaded. This blocks us from autoloading KVM modules without
>> breaking
>> other VMMs.
>>
>> To circumvent this problem at least a bit, this patch introduces on
>> demand activation of virtualization. This means, that instead
>> virtualization is enabled on creation of the first virtual machine
>> and disabled on removal of the last one.
>>
>> So using this, KVM can be easily autoloaded, while keeping other
>> hypervisors usable.
>>
>> v2 adds returns to non-x86 hardware_enables and adds IA64 change
>> v3 changes:
>> - use spin_lock instead of atomics
>> - put locking to new functions hardware_{en,dis}able_all that get
>> called
>> on VM creation/destruction
>> - remove usage counter checks where not necessary
>> - return -EINVAL for IA64 slot < 0 case
>>
>> Signed-off-by: Alexander Graf <agraf@suse.de>
>> ---
> <snip>
>>
>> -static void hardware_enable(void *junk)
>> +static void hardware_enable(void *_r)
>> {
>> int cpu = raw_smp_processor_id();
>> + int r;
>> +
>> + /* If enabling a previous CPU failed already, let's not continue */
>> + if (_r && *((int*)_r))
>> + return;
>>
>> if (cpu_isset(cpu, cpus_hardware_enabled))
>> return;
>> + r = kvm_arch_hardware_enable(NULL);
>> + if (_r)
>> + *((int*)_r) = r;
>> + if (r) {
>> + printk(KERN_INFO "kvm: enabling virtualization on "
>> + "CPU%d failed\n", cpu);
>> + return;
>> + }
>> +
>> cpu_set(cpu, cpus_hardware_enabled);
>> - kvm_arch_hardware_enable(NULL);
>> +}
>
> Doesn't on_each_cpu() run the function in parallel on all CPUs? If so,
> there is a race between checking *_r and setting *_r.
Good question - it doesn't really hurt to write the value though, if
we only write it on error.
So I guess we could just remove the first check and check on if( r &&
_r) later on.
Alex
^ permalink raw reply [flat|nested] 7+ messages in thread* Re: [PATCH] Activate Virtualization On Demand v3
2008-11-06 12:57 ` Alexander Graf
@ 2008-11-06 13:51 ` Eduardo Habkost
0 siblings, 0 replies; 7+ messages in thread
From: Eduardo Habkost @ 2008-11-06 13:51 UTC (permalink / raw)
To: Alexander Graf
Cc: kvm, avi, kraxel, anthony, Sander.Vanleeuwen, zach, brogers
On Thu, Nov 06, 2008 at 01:57:52PM +0100, Alexander Graf wrote:
>
> On 05.11.2008, at 21:58, Eduardo Habkost wrote:
>
>> On Wed, Nov 05, 2008 at 11:41:04AM +0100, Alexander Graf wrote:
>>> X86 CPUs need to have some magic happening to enable the
>>> virtualization
>>> extensions on them. This magic can result in unpleasant results for
>>> users, like blocking other VMMs from working (vmx) or using invalid
>>> TLB
>>> entries (svm).
>>>
>>> Currently KVM activates virtualization when the respective kernel
>>> module
>>> is loaded. This blocks us from autoloading KVM modules without
>>> breaking
>>> other VMMs.
>>>
>>> To circumvent this problem at least a bit, this patch introduces on
>>> demand activation of virtualization. This means, that instead
>>> virtualization is enabled on creation of the first virtual machine
>>> and disabled on removal of the last one.
>>>
>>> So using this, KVM can be easily autoloaded, while keeping other
>>> hypervisors usable.
>>>
>>> v2 adds returns to non-x86 hardware_enables and adds IA64 change
>>> v3 changes:
>>> - use spin_lock instead of atomics
>>> - put locking to new functions hardware_{en,dis}able_all that get
>>> called
>>> on VM creation/destruction
>>> - remove usage counter checks where not necessary
>>> - return -EINVAL for IA64 slot < 0 case
>>>
>>> Signed-off-by: Alexander Graf <agraf@suse.de>
>>> ---
>> <snip>
>>>
>>> -static void hardware_enable(void *junk)
>>> +static void hardware_enable(void *_r)
>>> {
>>> int cpu = raw_smp_processor_id();
>>> + int r;
>>> +
>>> + /* If enabling a previous CPU failed already, let's not continue */
>>> + if (_r && *((int*)_r))
>>> + return;
>>>
>>> if (cpu_isset(cpu, cpus_hardware_enabled))
>>> return;
>>> + r = kvm_arch_hardware_enable(NULL);
>>> + if (_r)
>>> + *((int*)_r) = r;
>>> + if (r) {
>>> + printk(KERN_INFO "kvm: enabling virtualization on "
>>> + "CPU%d failed\n", cpu);
>>> + return;
>>> + }
>>> +
>>> cpu_set(cpu, cpus_hardware_enabled);
>>> - kvm_arch_hardware_enable(NULL);
>>> +}
>>
>> Doesn't on_each_cpu() run the function in parallel on all CPUs? If so,
>> there is a race between checking *_r and setting *_r.
>
> Good question - it doesn't really hurt to write the value though, if we
> only write it on error.
> So I guess we could just remove the first check and check on if( r &&
> _r) later on.
I think the first check doesn't hurt, and using if(_r && r) on the second
check should work. I am not sure if there are no pitfalls here due to
memory ordering or delayed write on some arches, however.
--
Eduardo
^ permalink raw reply [flat|nested] 7+ messages in thread
end of thread, other threads:[~2008-11-06 13:51 UTC | newest]
Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2008-11-05 10:41 [PATCH] Activate Virtualization On Demand v3 Alexander Graf
2008-11-05 11:49 ` Avi Kivity
2008-11-05 11:55 ` Alexander Graf
2008-11-05 12:02 ` Avi Kivity
2008-11-05 20:58 ` Eduardo Habkost
2008-11-06 12:57 ` Alexander Graf
2008-11-06 13:51 ` Eduardo Habkost
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox