public inbox for kvm@vger.kernel.org
 help / color / mirror / Atom feed
From: Marcelo Tosatti <mtosatti-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
To: Avi Kivity <avi-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
Cc: kvm-devel
	<kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f@public.gmane.org>,
	mtosatti-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org
Subject: [patch 5/5] KVM: switch to mmu spinlock
Date: Thu, 20 Dec 2007 19:18:26 -0500	[thread overview]
Message-ID: <20071221002024.502714562@redhat.com> (raw)
In-Reply-To: 20071221001821.029994250@redhat.com

[-- Attachment #1: convert-to-spinlock --]
[-- Type: text/plain, Size: 7291 bytes --]

Convert the synchronization of the shadow handling to a separate mmu_lock
spinlock.

Also guard fetch() by mmap_sem in read-mode to protect against alias
and memslot changes.

Signed-off-by: Marcelo Tosatti <mtosatti-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>

Index: kvm.quilt/arch/x86/kvm/mmu.c
===================================================================
--- kvm.quilt.orig/arch/x86/kvm/mmu.c
+++ kvm.quilt/arch/x86/kvm/mmu.c
@@ -1031,7 +1031,7 @@ static void mmu_free_roots(struct kvm_vc
 
 	if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
 		return;
-	mutex_lock(&vcpu->kvm->lock);
+	spin_lock(&vcpu->kvm->mmu_lock);
 #ifdef CONFIG_X86_64
 	if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
 		hpa_t root = vcpu->arch.mmu.root_hpa;
@@ -1039,7 +1039,7 @@ static void mmu_free_roots(struct kvm_vc
 		sp = page_header(root);
 		--sp->root_count;
 		vcpu->arch.mmu.root_hpa = INVALID_PAGE;
-		mutex_unlock(&vcpu->kvm->lock);
+		spin_unlock(&vcpu->kvm->mmu_lock);
 		return;
 	}
 #endif
@@ -1053,7 +1053,7 @@ static void mmu_free_roots(struct kvm_vc
 		}
 		vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
 	}
-	mutex_unlock(&vcpu->kvm->lock);
+	spin_unlock(&vcpu->kvm->mmu_lock);
 	vcpu->arch.mmu.root_hpa = INVALID_PAGE;
 }
 
@@ -1256,9 +1256,9 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
 	r = mmu_topup_memory_caches(vcpu);
 	if (r)
 		goto out;
-	mutex_lock(&vcpu->kvm->lock);
+	spin_lock(&vcpu->kvm->mmu_lock);
 	mmu_alloc_roots(vcpu);
-	mutex_unlock(&vcpu->kvm->lock);
+	spin_unlock(&vcpu->kvm->mmu_lock);
 	kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
 	kvm_mmu_flush_tlb(vcpu);
 out:
@@ -1435,15 +1435,15 @@ int kvm_mmu_unprotect_page_virt(struct k
 	gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
 	up_read(&current->mm->mmap_sem);
 
-	mutex_lock(&vcpu->kvm->lock);
+	spin_lock(&vcpu->kvm->mmu_lock);
 	r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
-	mutex_unlock(&vcpu->kvm->lock);
+	spin_unlock(&vcpu->kvm->mmu_lock);
 	return r;
 }
 
 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
 {
-	mutex_lock(&vcpu->kvm->lock);
+	spin_lock(&vcpu->kvm->mmu_lock);
 	while (vcpu->kvm->arch.n_free_mmu_pages < KVM_REFILL_PAGES) {
 		struct kvm_mmu_page *sp;
 
@@ -1452,7 +1452,7 @@ void __kvm_mmu_free_some_pages(struct kv
 		kvm_mmu_zap_page(vcpu->kvm, sp);
 		++vcpu->kvm->stat.mmu_recycled;
 	}
-	mutex_unlock(&vcpu->kvm->lock);
+	spin_unlock(&vcpu->kvm->mmu_lock);
 }
 
 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
@@ -1584,10 +1584,10 @@ void kvm_mmu_zap_all(struct kvm *kvm)
 {
 	struct kvm_mmu_page *sp, *node;
 
-	mutex_lock(&kvm->lock);
+	spin_lock(&kvm->mmu_lock);
 	list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
 		kvm_mmu_zap_page(kvm, sp);
-	mutex_unlock(&kvm->lock);
+	spin_unlock(&kvm->mmu_lock);
 
 	kvm_flush_remote_tlbs(kvm);
 }
Index: kvm.quilt/arch/x86/kvm/paging_tmpl.h
===================================================================
--- kvm.quilt.orig/arch/x86/kvm/paging_tmpl.h
+++ kvm.quilt/arch/x86/kvm/paging_tmpl.h
@@ -217,9 +217,9 @@ walk:
 			goto walk;
 		}
 		pte |= PT_DIRTY_MASK;
-		mutex_lock(&vcpu->kvm->lock);
+		spin_lock(&vcpu->kvm->mmu_lock);
 		kvm_mmu_pte_write(vcpu, pte_gpa, (u8 *)&pte, sizeof(pte));
-		mutex_unlock(&vcpu->kvm->lock);
+		spin_unlock(&vcpu->kvm->mmu_lock);
 		walker->ptes[walker->level - 1] = pte;
 	}
 
@@ -386,7 +386,6 @@ static int FNAME(page_fault)(struct kvm_
 	 */
 	r = FNAME(walk_addr)(&walker, vcpu, addr, write_fault, user_fault,
 			     fetch_fault);
-	up_read(&current->mm->mmap_sem);
 
 	/*
 	 * The page is not mapped by the guest.  Let the guest handle it.
@@ -395,10 +394,11 @@ static int FNAME(page_fault)(struct kvm_
 		pgprintk("%s: guest page fault\n", __FUNCTION__);
 		inject_page_fault(vcpu, addr, walker.error_code);
 		vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
+		up_read(&current->mm->mmap_sem);
 		return 0;
 	}
 
-	mutex_lock(&vcpu->kvm->lock);
+	spin_lock(&vcpu->kvm->mmu_lock);
 	shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
 				  &write_pt);
 	pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __FUNCTION__,
@@ -411,13 +411,15 @@ static int FNAME(page_fault)(struct kvm_
 	 * mmio: emulate if accessible, otherwise its a guest fault.
 	 */
 	if (shadow_pte && is_io_pte(*shadow_pte)) {
-		mutex_unlock(&vcpu->kvm->lock);
+		spin_unlock(&vcpu->kvm->mmu_lock);
+		up_read(&current->mm->mmap_sem);
 		return 1;
 	}
 
 	++vcpu->stat.pf_fixed;
 	kvm_mmu_audit(vcpu, "post page fault (fixed)");
-	mutex_unlock(&vcpu->kvm->lock);
+	spin_unlock(&vcpu->kvm->mmu_lock);
+	up_read(&current->mm->mmap_sem);
 
 	return write_pt;
 }
Index: kvm.quilt/arch/x86/kvm/x86.c
===================================================================
--- kvm.quilt.orig/arch/x86/kvm/x86.c
+++ kvm.quilt/arch/x86/kvm/x86.c
@@ -1609,9 +1609,9 @@ static int emulator_write_phys(struct kv
 		up_read(&current->mm->mmap_sem);
 		return 0;
 	}
-	mutex_lock(&vcpu->kvm->lock);
+	spin_lock(&vcpu->kvm->mmu_lock);
 	kvm_mmu_pte_write(vcpu, gpa, val, bytes);
-	mutex_unlock(&vcpu->kvm->lock);
+	spin_unlock(&vcpu->kvm->mmu_lock);
 	up_read(&current->mm->mmap_sem);
 	return 1;
 }
Index: kvm.quilt/include/linux/kvm_host.h
===================================================================
--- kvm.quilt.orig/include/linux/kvm_host.h
+++ kvm.quilt/include/linux/kvm_host.h
@@ -104,7 +104,8 @@ struct kvm_memory_slot {
 };
 
 struct kvm {
-	struct mutex lock; /* protects everything except vcpus */
+	struct mutex lock; /* protects the vcpus array and APIC accesses */
+	spinlock_t mmu_lock;
 	struct mm_struct *mm; /* userspace tied to this vm */
 	int nmemslots;
 	struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
Index: kvm.quilt/virt/kvm/kvm_main.c
===================================================================
--- kvm.quilt.orig/virt/kvm/kvm_main.c
+++ kvm.quilt/virt/kvm/kvm_main.c
@@ -165,6 +165,7 @@ static struct kvm *kvm_create_vm(void)
 
 	kvm->mm = current->mm;
 	atomic_inc(&kvm->mm->mm_count);
+	spin_lock_init(&kvm->mmu_lock);
 	kvm_io_bus_init(&kvm->pio_bus);
 	mutex_init(&kvm->lock);
 	kvm_io_bus_init(&kvm->mmio_bus);
@@ -571,9 +572,7 @@ int kvm_read_guest_atomic(struct kvm *kv
 	addr = gfn_to_hva(kvm, gfn);
 	if (kvm_is_error_hva(addr))
 		return -EFAULT;
-	pagefault_disable();
 	r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
-	pagefault_enable();
 	if (r)
 		return -EFAULT;
 	return 0;
Index: kvm.quilt/arch/x86/kvm/vmx.c
===================================================================
--- kvm.quilt.orig/arch/x86/kvm/vmx.c
+++ kvm.quilt/arch/x86/kvm/vmx.c
@@ -1476,7 +1476,6 @@ static int alloc_apic_access_page(struct
 	struct kvm_userspace_memory_region kvm_userspace_mem;
 	int r = 0;
 
-	mutex_lock(&kvm->lock);
 	down_write(&current->mm->mmap_sem);
 	if (kvm->arch.apic_access_page)
 		goto out;
@@ -1490,7 +1489,6 @@ static int alloc_apic_access_page(struct
 	kvm->arch.apic_access_page = gfn_to_page(kvm, 0xfee00);
 out:
 	up_write(&current->mm->mmap_sem);
-	mutex_unlock(&kvm->lock);
 	return r;
 }
 

-- 


-------------------------------------------------------------------------
This SF.net email is sponsored by: Microsoft
Defy all challenges. Microsoft(R) Visual Studio 2005.
http://clk.atdmt.com/MRT/go/vse0120000070mrt/direct/01/

  parent reply	other threads:[~2007-12-21  0:18 UTC|newest]

Thread overview: 26+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2007-12-21  0:18 [patch 0/5] KVM shadow scalability enhancements Marcelo Tosatti
2007-12-21  0:18 ` [patch 1/5] KVM: concurrent guest walkers Marcelo Tosatti
2007-12-21  0:18 ` [patch 2/5] KVM: add kvm_read_guest_atomic() Marcelo Tosatti
2007-12-21  0:18 ` [patch 3/5] KVM: add kvm_follow_page() Marcelo Tosatti
     [not found]   ` <20071221002024.345682789-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2007-12-22 19:42     ` Avi Kivity
     [not found]       ` <476D68A3.3000200-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
2007-12-23  5:25         ` Marcelo Tosatti
2007-12-23  7:03           ` Avi Kivity
     [not found]             ` <476E083B.5040600-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
2007-12-23  8:41               ` Avi Kivity
     [not found]                 ` <476E1F23.3020609-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
2007-12-23  8:59                   ` Avi Kivity
     [not found]                     ` <476E236A.9000800-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
2007-12-23  9:26                       ` Andrew Morton
     [not found]                         ` <20071223012618.e8cf8320.akpm-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b@public.gmane.org>
2007-12-23 10:35                           ` Avi Kivity
     [not found]                             ` <476E39F2.4030202-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
2007-12-23 10:52                               ` Andrew Morton
     [not found]                                 ` <20071223025245.d839c86d.akpm-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b@public.gmane.org>
2007-12-23 11:01                                   ` Avi Kivity
     [not found]                                     ` <476E4005.90003-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
2007-12-23 20:15                                       ` Marcelo Tosatti
2007-12-23 20:41                                         ` Andrew Morton
2007-12-24  6:56                                         ` Avi Kivity
     [not found]                                           ` <476F5801.2030002-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
2007-12-24 14:26                                             ` Marcelo Tosatti
2007-12-24 14:36                                               ` Avi Kivity
2007-12-23 19:14                   ` Marcelo Tosatti
2007-12-24  6:50                     ` Avi Kivity
     [not found]                       ` <476F56A5.2020607-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
2007-12-24 14:28                         ` Marcelo Tosatti
2007-12-24 14:34                           ` Avi Kivity
2007-12-21  0:18 ` [patch 4/5] KVM: export follow_page() Marcelo Tosatti
     [not found]   ` <20071221002024.423895760-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2007-12-22 19:43     ` Avi Kivity
2007-12-21  0:18 ` Marcelo Tosatti [this message]
     [not found] ` <20071221001821.029994250-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2007-12-22 19:46   ` [patch 0/5] KVM shadow scalability enhancements Avi Kivity

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20071221002024.502714562@redhat.com \
    --to=mtosatti-h+wxahxf7alqt0dzr+alfa@public.gmane.org \
    --cc=avi-atKUWr5tajBWk0Htik3J/w@public.gmane.org \
    --cc=kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f@public.gmane.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox