public inbox for kvm@vger.kernel.org
 help / color / mirror / Atom feed
From: Alexander Graf <agraf@suse.de>
To: kvm@vger.kernel.org
Cc: avi@redhat.com, Sander.Vanleeuwen@sun.com, kraxel@redhat.com,
	anthony@codemonkey.ws, zach@vmware.com
Subject: [PATCH] [RFC] Activate VMX on demand
Date: Mon,  3 Nov 2008 17:19:41 +0100	[thread overview]
Message-ID: <1225729181-24431-1-git-send-email-agraf@suse.de> (raw)

This patch moves the actual VMX enablement from the initialization
of the module to the creation of a VCPU.

With this approach, KVM can be easily autoloaded and does not block
other VMMs while being modprobe'd.
This improves the user experience a lot, since now other VMMs can
run, even though KVM is loaded, so users do not have to manually
load/unload KVM or any other VMM module.

Compared to the previously suggested approach "2", which would
introduce a complete framework that brings almost no benefit,
this approach enables coexistence of multiple VMMs without much
intervention. Thanks to Gerd for pointing that out.

I verified that this approach works with VirtualBox.

Signed-off-by: Alexander Graf <agraf@suse.de>

---
 arch/x86/kvm/vmx.c |   82 +++++++++++++++++++++++++++++++++++++++++++++++----
 1 files changed, 75 insertions(+), 7 deletions(-)

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 64e2439..2c48590 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -116,6 +116,9 @@ static struct page *vmx_msr_bitmap;
 static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
 static DEFINE_SPINLOCK(vmx_vpid_lock);
 
+static int vmx_usage_count = 0;
+static DEFINE_SPINLOCK(vmx_usage_lock);
+
 static struct vmcs_config {
 	int size;
 	int order;
@@ -1059,10 +1062,68 @@ static __init int vmx_disabled_by_bios(void)
 	/* locked but not enabled */
 }
 
-static void hardware_enable(void *garbage)
+static void __vmx_off(void *garbage)
+{
+	asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc");
+	write_cr4(read_cr4() & ~X86_CR4_VMXE);
+}
+
+static void vmx_off(void)
+{
+	if (!vmx_usage_count)
+		return;
+
+	spin_lock(&vmx_usage_lock);
+
+	vmx_usage_count--;
+	if (vmx_usage_count == 0)
+		on_each_cpu(__vmx_off, NULL, 1);
+	
+	spin_unlock(&vmx_usage_lock);
+}
+
+static void __vmx_on(void *garbage)
 {
 	int cpu = raw_smp_processor_id();
 	u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
+
+	write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */
+	asm volatile (ASM_VMX_VMXON_RAX
+		      : : "a"(&phys_addr), "m"(phys_addr)
+		      : "memory", "cc");
+}
+
+static void __vmx_check(void *r)
+{
+	if (read_cr4() & X86_CR4_VMXE)
+		*((int*)r) = -EBUSY;
+}
+
+static int vmx_on(void)
+{
+	int r = 0;
+	spin_lock(&vmx_usage_lock);
+	vmx_usage_count++;
+	if (vmx_usage_count == 1) {
+		on_each_cpu(__vmx_check, &r, 1);
+		if (r)
+			goto out_1;
+
+		on_each_cpu(__vmx_on, NULL, 1);
+	}
+
+	goto out;
+
+out_1:
+	vmx_usage_count--;
+out:
+	spin_unlock(&vmx_usage_lock);
+	return r;
+}
+
+static void hardware_enable(void *garbage)
+{
+	int cpu = raw_smp_processor_id();
 	u64 old;
 
 	INIT_LIST_HEAD(&per_cpu(vcpus_on_cpu, cpu));
@@ -1075,10 +1136,9 @@ static void hardware_enable(void *garbage)
 		wrmsrl(MSR_IA32_FEATURE_CONTROL, old |
 		       FEATURE_CONTROL_LOCKED |
 		       FEATURE_CONTROL_VMXON_ENABLED);
-	write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */
-	asm volatile (ASM_VMX_VMXON_RAX
-		      : : "a"(&phys_addr), "m"(phys_addr)
-		      : "memory", "cc");
+
+	if (vmx_usage_count)
+		__vmx_on(garbage);
 }
 
 static void vmclear_local_vcpus(void)
@@ -1094,8 +1154,9 @@ static void vmclear_local_vcpus(void)
 static void hardware_disable(void *garbage)
 {
 	vmclear_local_vcpus();
-	asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc");
-	write_cr4(read_cr4() & ~X86_CR4_VMXE);
+
+	if (vmx_usage_count)
+		__vmx_off(garbage);
 }
 
 static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt,
@@ -3490,6 +3551,7 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
 	kfree(vmx->guest_msrs);
 	kvm_vcpu_uninit(vcpu);
 	kmem_cache_free(kvm_vcpu_cache, vmx);
+	vmx_off();
 }
 
 static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
@@ -3501,6 +3563,10 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
 	if (!vmx)
 		return ERR_PTR(-ENOMEM);
 
+	err = vmx_on();
+	if (err)
+		goto free_vmx;
+
 	allocate_vpid(vmx);
 
 	err = kvm_vcpu_init(&vmx->vcpu, kvm, id);
@@ -3550,6 +3616,8 @@ uninit_vcpu:
 	kvm_vcpu_uninit(&vmx->vcpu);
 free_vcpu:
 	kmem_cache_free(kvm_vcpu_cache, vmx);
+free_vmx:
+	vmx_off();
 	return ERR_PTR(err);
 }
 
-- 
1.6.0.2


             reply	other threads:[~2008-11-03 16:20 UTC|newest]

Thread overview: 6+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2008-11-03 16:19 Alexander Graf [this message]
2008-11-04 10:36 ` [PATCH] [RFC] Activate VMX on demand Avi Kivity
2008-11-04 13:13   ` Alexander Graf
2008-11-04 13:37     ` Avi Kivity
2008-11-04 13:44       ` Alexander Graf
2008-11-04 14:38         ` Avi Kivity

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1225729181-24431-1-git-send-email-agraf@suse.de \
    --to=agraf@suse.de \
    --cc=Sander.Vanleeuwen@sun.com \
    --cc=anthony@codemonkey.ws \
    --cc=avi@redhat.com \
    --cc=kraxel@redhat.com \
    --cc=kvm@vger.kernel.org \
    --cc=zach@vmware.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox