* [PATCH] kvm mmu notifier
@ 2008-06-26 18:11 Andrea Arcangeli
2008-06-27 22:43 ` Marcelo Tosatti
0 siblings, 1 reply; 5+ messages in thread
From: Andrea Arcangeli @ 2008-06-26 18:11 UTC (permalink / raw)
To: kvm
Hello,
this is the mmu notifier patch for kvm updated according to last
comments.
Signed-off-by: Andrea Arcangeli <andrea@qumranet.com>
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index 8d45fab..ce3251c 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -21,6 +21,7 @@ config KVM
tristate "Kernel-based Virtual Machine (KVM) support"
depends on HAVE_KVM
select PREEMPT_NOTIFIERS
+ select MMU_NOTIFIER
select ANON_INODES
---help---
Support hosting fully virtualized guest machines using hardware
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 1fd8e3b..2749de7 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -653,6 +653,98 @@ static void rmap_write_protect(struct kvm *kvm, u64 gfn)
account_shadowed(kvm, gfn);
}
+static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp)
+{
+ u64 *spte;
+ int need_tlb_flush = 0;
+
+ while ((spte = rmap_next(kvm, rmapp, NULL))) {
+ BUG_ON(!(*spte & PT_PRESENT_MASK));
+ rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte);
+ rmap_remove(kvm, spte);
+ set_shadow_pte(spte, shadow_trap_nonpresent_pte);
+ need_tlb_flush = 1;
+ }
+ return need_tlb_flush;
+}
+
+int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
+{
+ int i;
+ int need_tlb_flush = 0;
+
+ /*
+ * If mmap_sem isn't taken, we can look the memslots with only
+ * the mmu_lock by skipping over the slots with userspace_addr == 0.
+ */
+ for (i = 0; i < kvm->nmemslots; i++) {
+ struct kvm_memory_slot *memslot = &kvm->memslots[i];
+ unsigned long start = memslot->userspace_addr;
+ unsigned long end;
+
+ /* mmu_lock protects userspace_addr */
+ if (!start)
+ continue;
+
+ end = start + (memslot->npages << PAGE_SHIFT);
+ if (hva >= start && hva < end) {
+ gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
+ need_tlb_flush |= kvm_unmap_rmapp(kvm,
+ &memslot->rmap[gfn_offset]);
+ }
+ }
+
+ return need_tlb_flush;
+}
+
+static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp)
+{
+ u64 *spte;
+ int young = 0;
+
+ spte = rmap_next(kvm, rmapp, NULL);
+ while (spte) {
+ int _young;
+ u64 _spte = *spte;
+ BUG_ON(!(_spte & PT_PRESENT_MASK));
+ _young = _spte & PT_ACCESSED_MASK;
+ if (_young) {
+ young = 1;
+ clear_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
+ }
+ spte = rmap_next(kvm, rmapp, spte);
+ }
+ return young;
+}
+
+int kvm_age_hva(struct kvm *kvm, unsigned long hva)
+{
+ int i;
+ int young = 0;
+
+ /*
+ * If mmap_sem isn't taken, we can look the memslots with only
+ * the mmu_lock by skipping over the slots with userspace_addr == 0.
+ */
+ for (i = 0; i < kvm->nmemslots; i++) {
+ struct kvm_memory_slot *memslot = &kvm->memslots[i];
+ unsigned long start = memslot->userspace_addr;
+ unsigned long end;
+
+ /* mmu_lock protects userspace_addr */
+ if (!start)
+ continue;
+
+ end = start + (memslot->npages << PAGE_SHIFT);
+ if (hva >= start && hva < end) {
+ gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
+ young |= kvm_age_rmapp(kvm, &memslot->rmap[gfn_offset]);
+ }
+ }
+
+ return young;
+}
+
#ifdef MMU_DEBUG
static int is_empty_shadow_page(u64 *spt)
{
@@ -1199,6 +1291,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
int r;
int largepage = 0;
pfn_t pfn;
+ int mmu_seq;
down_read(¤t->mm->mmap_sem);
if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
@@ -1206,6 +1299,8 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
largepage = 1;
}
+ mmu_seq = atomic_read(&vcpu->kvm->mmu_notifier_seq);
+ /* implicit mb(), we'll read before PT lock is unlocked */
pfn = gfn_to_pfn(vcpu->kvm, gfn);
up_read(¤t->mm->mmap_sem);
@@ -1216,6 +1311,11 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
}
spin_lock(&vcpu->kvm->mmu_lock);
+ if (unlikely(atomic_read(&vcpu->kvm->mmu_notifier_count)))
+ goto out_unlock;
+ smp_rmb();
+ if (unlikely(atomic_read(&vcpu->kvm->mmu_notifier_seq) != mmu_seq))
+ goto out_unlock;
kvm_mmu_free_some_pages(vcpu);
r = __direct_map(vcpu, v, write, largepage, gfn, pfn,
PT32E_ROOT_LEVEL);
@@ -1223,6 +1323,11 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
return r;
+
+out_unlock:
+ spin_unlock(&vcpu->kvm->mmu_lock);
+ kvm_release_pfn_clean(pfn);
+ return 0;
}
@@ -1341,6 +1446,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
int r;
int largepage = 0;
gfn_t gfn = gpa >> PAGE_SHIFT;
+ int mmu_seq;
ASSERT(vcpu);
ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
@@ -1354,6 +1460,8 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
gfn &= ~(KVM_PAGES_PER_HPAGE-1);
largepage = 1;
}
+ mmu_seq = atomic_read(&vcpu->kvm->mmu_notifier_seq);
+ /* implicit mb(), we'll read before PT lock is unlocked */
pfn = gfn_to_pfn(vcpu->kvm, gfn);
up_read(¤t->mm->mmap_sem);
if (is_error_pfn(pfn)) {
@@ -1361,12 +1469,22 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
return 1;
}
spin_lock(&vcpu->kvm->mmu_lock);
+ if (unlikely(atomic_read(&vcpu->kvm->mmu_notifier_count)))
+ goto out_unlock;
+ smp_rmb();
+ if (unlikely(atomic_read(&vcpu->kvm->mmu_notifier_seq) != mmu_seq))
+ goto out_unlock;
kvm_mmu_free_some_pages(vcpu);
r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK,
largepage, gfn, pfn, kvm_x86_ops->get_tdp_level());
spin_unlock(&vcpu->kvm->mmu_lock);
return r;
+
+out_unlock:
+ spin_unlock(&vcpu->kvm->mmu_lock);
+ kvm_release_pfn_clean(pfn);
+ return 0;
}
static void nonpaging_free(struct kvm_vcpu *vcpu)
@@ -1666,6 +1784,8 @@ static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
gfn &= ~(KVM_PAGES_PER_HPAGE-1);
vcpu->arch.update_pte.largepage = 1;
}
+ vcpu->arch.update_pte.mmu_seq = atomic_read(&vcpu->kvm->mmu_notifier_seq);
+ /* implicit mb(), we'll read before PT lock is unlocked */
pfn = gfn_to_pfn(vcpu->kvm, gfn);
up_read(¤t->mm->mmap_sem);
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 4d91822..38f81c4 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -263,6 +263,12 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
pfn = vcpu->arch.update_pte.pfn;
if (is_error_pfn(pfn))
return;
+ if (unlikely(atomic_read(&vcpu->kvm->mmu_notifier_count)))
+ return;
+ smp_rmb();
+ if (unlikely(atomic_read(&vcpu->kvm->mmu_notifier_seq) !=
+ vcpu->arch.update_pte.mmu_seq))
+ return;
kvm_get_pfn(pfn);
mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0,
gpte & PT_DIRTY_MASK, NULL, largepage, gpte_to_gfn(gpte),
@@ -380,6 +386,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
int r;
pfn_t pfn;
int largepage = 0;
+ int mmu_seq;
pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
kvm_mmu_audit(vcpu, "pre page fault");
@@ -413,6 +420,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
largepage = 1;
}
}
+ mmu_seq = atomic_read(&vcpu->kvm->mmu_notifier_seq);
+ /* implicit mb(), we'll read before PT lock is unlocked */
pfn = gfn_to_pfn(vcpu->kvm, walker.gfn);
up_read(¤t->mm->mmap_sem);
@@ -424,6 +433,11 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
}
spin_lock(&vcpu->kvm->mmu_lock);
+ if (unlikely(atomic_read(&vcpu->kvm->mmu_notifier_count)))
+ goto out_unlock;
+ smp_rmb();
+ if (unlikely(atomic_read(&vcpu->kvm->mmu_notifier_seq) != mmu_seq))
+ goto out_unlock;
kvm_mmu_free_some_pages(vcpu);
shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
largepage, &write_pt, pfn);
@@ -439,6 +453,11 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
spin_unlock(&vcpu->kvm->mmu_lock);
return write_pt;
+
+out_unlock:
+ spin_unlock(&vcpu->kvm->mmu_lock);
+ kvm_release_pfn_clean(pfn);
+ return 0;
}
static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 0fbc032..73430c0 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -27,6 +27,7 @@
#include <linux/module.h>
#include <linux/mman.h>
#include <linux/highmem.h>
+#include <linux/mmu_notifier.h>
#include <asm/uaccess.h>
#include <asm/msr.h>
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h
index c64d124..8547544 100644
--- a/include/asm-x86/kvm_host.h
+++ b/include/asm-x86/kvm_host.h
@@ -13,6 +13,7 @@
#include <linux/types.h>
#include <linux/mm.h>
+#include <linux/mmu_notifier.h>
#include <linux/kvm.h>
#include <linux/kvm_para.h>
@@ -251,6 +252,7 @@ struct kvm_vcpu_arch {
gfn_t gfn; /* presumed gfn during guest pte update */
pfn_t pfn; /* pfn corresponding to that gfn */
int largepage;
+ int mmu_seq;
} update_pte;
struct i387_fxsave_struct host_fx_image;
@@ -450,6 +452,9 @@ void kvm_mmu_set_base_ptes(u64 base_pte);
void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
u64 dirty_mask, u64 nx_mask, u64 x_mask);
+#define KVM_ARCH_WANT_MMU_NOTIFIER
+int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
+int kvm_age_hva(struct kvm *kvm, unsigned long hva);
int kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot);
void kvm_mmu_zap_all(struct kvm *kvm);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index d220b49..a79aaa0 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -121,6 +121,12 @@ struct kvm {
struct kvm_coalesced_mmio_dev *coalesced_mmio_dev;
struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
#endif
+
+#ifdef KVM_ARCH_WANT_MMU_NOTIFIER
+ struct mmu_notifier mmu_notifier;
+ atomic_t mmu_notifier_seq;
+ atomic_t mmu_notifier_count;
+#endif
};
/* The guest did something we don't support. */
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index f9427e2..ac719df 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -186,6 +186,136 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
+#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
+static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
+{
+ return container_of(mn, struct kvm, mmu_notifier);
+}
+
+static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long address)
+{
+ struct kvm *kvm = mmu_notifier_to_kvm(mn);
+ int need_tlb_flush;
+
+ /*
+ * When ->invalidate_page runs, the linux pte has been zapped
+ * already but the page is still allocated until
+ * ->invalidate_page returns. So if we increase the sequence
+ * here the kvm page fault will notice if the spte can't be
+ * established because the page is going to be freed. If
+ * instead the kvm page fault establishes the spte before
+ * ->invalidate_page runs, kvm_unmap_hva will release it
+ * before returning.
+
+ * No need of memory barriers as the sequence increase only
+ * need to be seen at spin_unlock time, and not at spin_lock
+ * time.
+ *
+ * Increasing the sequence after the spin_unlock would be
+ * unsafe because the kvm page fault could then establish the
+ * pte after kvm_unmap_hva returned, without noticing the page
+ * is going to be freed.
+ */
+ atomic_inc(&kvm->mmu_notifier_seq);
+ spin_lock(&kvm->mmu_lock);
+ need_tlb_flush = kvm_unmap_hva(kvm, address);
+ spin_unlock(&kvm->mmu_lock);
+
+ /* we've to flush the tlb before the pages can be freed */
+ if (need_tlb_flush)
+ kvm_flush_remote_tlbs(kvm);
+
+}
+
+static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end)
+{
+ struct kvm *kvm = mmu_notifier_to_kvm(mn);
+ int need_tlb_flush = 0;
+
+ /*
+ * The count increase must become visible at unlock time as no
+ * spte can be established without taking the mmu_lock and
+ * count is also read inside the mmu_lock critical section.
+ */
+ atomic_inc(&kvm->mmu_notifier_count);
+
+ spin_lock(&kvm->mmu_lock);
+ for (; start < end; start += PAGE_SIZE)
+ need_tlb_flush |= kvm_unmap_hva(kvm, start);
+ spin_unlock(&kvm->mmu_lock);
+
+ /* we've to flush the tlb before the pages can be freed */
+ if (need_tlb_flush)
+ kvm_flush_remote_tlbs(kvm);
+}
+
+static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end)
+{
+ struct kvm *kvm = mmu_notifier_to_kvm(mn);
+ /*
+ *
+ * This sequence increase will notify the kvm page fault that
+ * the page that is going to be mapped in the spte could have
+ * been freed.
+ *
+ * There's also an implicit mb() here in this comment,
+ * provided by the last PT lock taken to zap pagetables, and
+ * that the read side has to take too in follow_page(). The
+ * sequence increase in the worst case will become visible to
+ * the kvm page fault after the spin_lock of the last PT lock
+ * of the last PT-lock-protected critical section preceeding
+ * invalidate_range_end. So if the kvm page fault is about to
+ * establish the spte inside the mmu_lock, while we're freeing
+ * the pages, it will have to backoff and when it retries, it
+ * will have to take the PT lock before it can check the
+ * pagetables again. And after taking the PT lock it will
+ * re-establish the pte even if it will see the already
+ * increased sequence number before calling gfn_to_pfn.
+ */
+ atomic_inc(&kvm->mmu_notifier_seq);
+ /*
+ * The sequence increase must be visible before count
+ * decrease. The page fault has to read count before sequence
+ * for this write order to be effective.
+ */
+ wmb();
+ atomic_dec(&kvm->mmu_notifier_count);
+ BUG_ON(atomic_read(&kvm->mmu_notifier_count) < 0);
+}
+
+static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long address)
+{
+ struct kvm *kvm = mmu_notifier_to_kvm(mn);
+ int young;
+
+ spin_lock(&kvm->mmu_lock);
+ young = kvm_age_hva(kvm, address);
+ spin_unlock(&kvm->mmu_lock);
+
+ if (young)
+ kvm_flush_remote_tlbs(kvm);
+
+ return young;
+}
+
+static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
+ .invalidate_page = kvm_mmu_notifier_invalidate_page,
+ .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
+ .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
+ .clear_flush_young = kvm_mmu_notifier_clear_flush_young,
+};
+#endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
+
static struct kvm *kvm_create_vm(void)
{
struct kvm *kvm = kvm_arch_create_vm();
@@ -206,6 +336,21 @@ static struct kvm *kvm_create_vm(void)
(struct kvm_coalesced_mmio_ring *)page_address(page);
#endif
+#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
+ {
+ int err;
+ kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
+ err = mmu_notifier_register(&kvm->mmu_notifier, current->mm);
+ if (err) {
+#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
+ put_page(page);
+#endif
+ kfree(kvm);
+ return ERR_PTR(err);
+ }
+ }
+#endif
+
kvm->mm = current->mm;
atomic_inc(&kvm->mm->mm_count);
spin_lock_init(&kvm->mmu_lock);
@@ -266,6 +411,9 @@ static void kvm_destroy_vm(struct kvm *kvm)
if (kvm->coalesced_mmio_ring != NULL)
free_page((unsigned long)kvm->coalesced_mmio_ring);
#endif
+#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
+ mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
+#endif
kvm_arch_destroy_vm(kvm);
mmdrop(mm);
}
This is the kvm-userland compatibility bit to build against old
kernels without mmu notifier.
Signed-off-by: Andrea Arcangeli <andrea@qumranet.com>
diff --git a/kernel/include-compat/linux/mmu_notifier.h b/kernel/include-compat/linux/mmu_notifier.h
new file mode 100644
index 0000000..a6db4ba
--- /dev/null
+++ b/kernel/include-compat/linux/mmu_notifier.h
@@ -0,0 +1,6 @@
+#ifndef _LINUX_MMU_NOTIFIER_H
+#define _LINUX_MMU_NOTIFIER_H
+
+struct mmu_notifier {};
+
+#endif
^ permalink raw reply related [flat|nested] 5+ messages in thread
* Re: [PATCH] kvm mmu notifier
2008-06-26 18:11 [PATCH] kvm mmu notifier Andrea Arcangeli
@ 2008-06-27 22:43 ` Marcelo Tosatti
2008-06-28 1:17 ` Andrea Arcangeli
2008-07-03 15:17 ` Andrea Arcangeli
0 siblings, 2 replies; 5+ messages in thread
From: Marcelo Tosatti @ 2008-06-27 22:43 UTC (permalink / raw)
To: Andrea Arcangeli; +Cc: kvm
Hi Andrea,
Few comments below.
On Thu, Jun 26, 2008 at 08:11:16PM +0200, Andrea Arcangeli wrote:
> + }
> + return need_tlb_flush;
> +}
> +
> +int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
> +{
> + int i;
> + int need_tlb_flush = 0;
> +
> + /*
> + * If mmap_sem isn't taken, we can look the memslots with only
> + * the mmu_lock by skipping over the slots with userspace_addr == 0.
> + */
> + for (i = 0; i < kvm->nmemslots; i++) {
> + struct kvm_memory_slot *memslot = &kvm->memslots[i];
> + unsigned long start = memslot->userspace_addr;
> + unsigned long end;
> +
> + /* mmu_lock protects userspace_addr */
> + if (!start)
> + continue;
> +int kvm_age_hva(struct kvm *kvm, unsigned long hva)
> +{
> + int i;
> + int young = 0;
> +
> + /*
> + * If mmap_sem isn't taken, we can look the memslots with only
> + * the mmu_lock by skipping over the slots with userspace_addr == 0.
> + */
> + for (i = 0; i < kvm->nmemslots; i++) {
> + struct kvm_memory_slot *memslot = &kvm->memslots[i];
> + unsigned long start = memslot->userspace_addr;
> + unsigned long end;
> +
> + /* mmu_lock protects userspace_addr */
> + if (!start)
> + continue;
These two functions share the same memslot iteration code, you could
avoid duplication.
> + if (unlikely(atomic_read(&vcpu->kvm->mmu_notifier_count)))
> + return;
> + smp_rmb();
I don't think you need smp_rmb() on x86 since atomic operations
serialize. barrier() should suffice.
> spin_lock(&vcpu->kvm->mmu_lock);
> + if (unlikely(atomic_read(&vcpu->kvm->mmu_notifier_count)))
> + goto out_unlock;
> + smp_rmb();
> + if (unlikely(atomic_read(&vcpu->kvm->mmu_notifier_seq) != mmu_seq))
> + goto out_unlock;
Wrap this sequence in a well documented function?
> +static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
> + struct mm_struct *mm,
> + unsigned long start,
> + unsigned long end)
> +{
> + struct kvm *kvm = mmu_notifier_to_kvm(mn);
> + int need_tlb_flush = 0;
> +
> + /*
> + * The count increase must become visible at unlock time as no
> + * spte can be established without taking the mmu_lock and
> + * count is also read inside the mmu_lock critical section.
> + */
> + atomic_inc(&kvm->mmu_notifier_count);
> +
> + spin_lock(&kvm->mmu_lock);
> + for (; start < end; start += PAGE_SIZE)
> + need_tlb_flush |= kvm_unmap_hva(kvm, start);
> + spin_unlock(&kvm->mmu_lock);
You don't handle large mappings here at all, which means that there
might be external mappings even after ->range_start, ->range_end.
This is not a problem now because QEMU kills all the shadow mappings
before munmap() on hugetlbfs, but it will be a practical problem if
ballooning supports largepages (which will probably happen in the
future), or with fancy hugetlb features.
> + atomic_inc(&kvm->mmu_notifier_seq);
> + /*
> + * The sequence increase must be visible before count
> + * decrease. The page fault has to read count before sequence
> + * for this write order to be effective.
> + */
> + wmb();
smp_mb_after_atomic_inc ?
> +static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
> + struct mm_struct *mm,
> + unsigned long address)
> +{
> + struct kvm *kvm = mmu_notifier_to_kvm(mn);
> + int young;
> +
> + spin_lock(&kvm->mmu_lock);
> + young = kvm_age_hva(kvm, address);
> + spin_unlock(&kvm->mmu_lock);
> +
> + if (young)
> + kvm_flush_remote_tlbs(kvm);
Is it worth to flush remote TLB's just due to the young bit? Aging
can happen often.
- mmu_notifier_count could be a non-atomic type (range_end() does not grab
mmu_lock but could).
- why the MMU notifier API pass mm_struct instead of vma ? As it stands,
VM pte aging/swapping/nuking of QEMU non-guest mappings interferes with
guest pagefault processing for no reason.
- isnt the logic susceptible to mmu_seq wraparound ? :-)
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH] kvm mmu notifier
2008-06-27 22:43 ` Marcelo Tosatti
@ 2008-06-28 1:17 ` Andrea Arcangeli
2008-07-03 15:17 ` Andrea Arcangeli
1 sibling, 0 replies; 5+ messages in thread
From: Andrea Arcangeli @ 2008-06-28 1:17 UTC (permalink / raw)
To: Marcelo Tosatti; +Cc: kvm
On Fri, Jun 27, 2008 at 07:43:17PM -0300, Marcelo Tosatti wrote:
> These two functions share the same memslot iteration code, you could
> avoid duplication.
Ok, I'll look into cleaning this up a bit.
> > + if (unlikely(atomic_read(&vcpu->kvm->mmu_notifier_count)))
> > + return;
> > + smp_rmb();
>
> I don't think you need smp_rmb() on x86 since atomic operations
> serialize. barrier() should suffice.
atomic_read doesn't issue any atomic op, and reads are out of order on
x86. So it's needed.
> Wrap this sequence in a well documented function?
It would be nice cleanup, agreed.
> > +static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
> > + struct mm_struct *mm,
> > + unsigned long start,
> > + unsigned long end)
> > +{
> > + struct kvm *kvm = mmu_notifier_to_kvm(mn);
> > + int need_tlb_flush = 0;
> > +
> > + /*
> > + * The count increase must become visible at unlock time as no
> > + * spte can be established without taking the mmu_lock and
> > + * count is also read inside the mmu_lock critical section.
> > + */
> > + atomic_inc(&kvm->mmu_notifier_count);
> > +
> > + spin_lock(&kvm->mmu_lock);
> > + for (; start < end; start += PAGE_SIZE)
> > + need_tlb_flush |= kvm_unmap_hva(kvm, start);
> > + spin_unlock(&kvm->mmu_lock);
>
> You don't handle large mappings here at all, which means that there
> might be external mappings even after ->range_start, ->range_end.
Well if I don't handle large mappings it won't be because of the above
function. The above function is common code, it doesn't know anything
about the lowlevel spte implementation yet. If something it's a
business of kvm_unmap_hva to handle large mappings. I thought it does
but maybe not and we need to fix it.
> This is not a problem now because QEMU kills all the shadow mappings
> before munmap() on hugetlbfs, but it will be a practical problem if
> ballooning supports largepages (which will probably happen in the
> future), or with fancy hugetlb features.
We need to handle large mappings too, especially if we intend to
remove page pinning from them as well.
> > + atomic_inc(&kvm->mmu_notifier_seq);
> > + /*
> > + * The sequence increase must be visible before count
> > + * decrease. The page fault has to read count before sequence
> > + * for this write order to be effective.
> > + */
> > + wmb();
>
> smp_mb_after_atomic_inc ?
Surely more finegrined, agreed.
> Is it worth to flush remote TLB's just due to the young bit? Aging
> can happen often.
This is more important for the linux ptes than the sptes. Because we
do for the main pagetables, I do for secondary ones too to be
consistent. For both, if we don't flush the tlb, the young bit will
never be set again until the entry goes away of the tlb for other
reasons.
> - mmu_notifier_count could be a non-atomic type (range_end() does not grab
> mmu_lock but could).
Yes, I considered this but I preferred not to take the lock in
range_end. Not sure if it makes any real difference but this looks
nicer to me.
> - why the MMU notifier API pass mm_struct instead of vma ? As it stands,
> VM pte aging/swapping/nuking of QEMU non-guest mappings interferes with
> guest pagefault processing for no reason.
It doesn't interfere with guest pagefault, with the special exception
of do_wp_page which is by far not a fast path for KVM unless we truly
need the mmu notifier invoked anyway. The memslot <-> vma isn't an
identity. Registering in the vma would waste more ram and complicate
the unregister event. Initially we considered doing it in the vma but
that idea was obsoleted quickly.
> - isnt the logic susceptible to mmu_seq wraparound ? :-)
You mean a false negative because 4G invalidate events happened in
between get_user_pages and the spte establishment? I doubt it's a
practical matter but I like theoretical safe things, so that would be
the best argument to make mmu_notifier_seq 64bit instead of an
atomic_t.
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH] kvm mmu notifier
2008-06-27 22:43 ` Marcelo Tosatti
2008-06-28 1:17 ` Andrea Arcangeli
@ 2008-07-03 15:17 ` Andrea Arcangeli
2008-07-04 17:39 ` Marcelo Tosatti
1 sibling, 1 reply; 5+ messages in thread
From: Andrea Arcangeli @ 2008-07-03 15:17 UTC (permalink / raw)
To: Marcelo Tosatti; +Cc: kvm
Hi Marcelo and everyone,
I cleanedup the mmu notifier kvm patch. I also like math guarantee
that there won't be overflow (gfn_to_pfn may be slow compared to a
flood of invalidate_page unmapping ptes) so I changed the counter to
long and the sequence number to unsigned long. I'm quite sure it was
already more than impossible but going 64bit guarantees it
+100%. There isn't any significant downside doing so; reads had to be
protected anyway so we lose nothing for reads. And the spinlock was
already taken by all but range_end and it doesn't change anything to
increase the counter inside or outside the spinlock, so the only
difference is that I had to take the spinlock in range_end too (which
is a fairly slow path and the lock may be already hot because it was
taken by range_start before) so won't make much difference and it
simplifies the logic a lot too (now all sequence number/counter are
under the spinlock and all smb_wmb/rmb goes away).
Given mmu-notifier are in -mm and everyone seem to agrees about them
being merged during .27 merge window, I think it'd be fairly safe to
apply kvm mmu notifier patch.
I'm testing this on both mmu-notifier capable 2.6.26-rc7, as well as
on 2.6.24-r8 which is my "stable" kernel of choice right now without
mmu notifiers and it works fine on both.
Signed-off-by: Andrea Arcangeli <andrea@qumranet.com>
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index 8d45fab..ce3251c 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -21,6 +21,7 @@ config KVM
tristate "Kernel-based Virtual Machine (KVM) support"
depends on HAVE_KVM
select PREEMPT_NOTIFIERS
+ select MMU_NOTIFIER
select ANON_INODES
---help---
Support hosting fully virtualized guest machines using hardware
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 1fd8e3b..c8f9c4d 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -653,6 +653,80 @@ static void rmap_write_protect(struct kvm *kvm, u64 gfn)
account_shadowed(kvm, gfn);
}
+static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp)
+{
+ u64 *spte;
+ int need_tlb_flush = 0;
+
+ while ((spte = rmap_next(kvm, rmapp, NULL))) {
+ BUG_ON(!(*spte & PT_PRESENT_MASK));
+ rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte);
+ rmap_remove(kvm, spte);
+ set_shadow_pte(spte, shadow_trap_nonpresent_pte);
+ need_tlb_flush = 1;
+ }
+ return need_tlb_flush;
+}
+
+static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
+ int (*handler)(struct kvm *kvm, unsigned long *rmapp))
+{
+ int i;
+ int retval = 0;
+
+ /*
+ * If mmap_sem isn't taken, we can look the memslots with only
+ * the mmu_lock by skipping over the slots with userspace_addr == 0.
+ */
+ for (i = 0; i < kvm->nmemslots; i++) {
+ struct kvm_memory_slot *memslot = &kvm->memslots[i];
+ unsigned long start = memslot->userspace_addr;
+ unsigned long end;
+
+ /* mmu_lock protects userspace_addr */
+ if (!start)
+ continue;
+
+ end = start + (memslot->npages << PAGE_SHIFT);
+ if (hva >= start && hva < end) {
+ gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
+ retval |= handler(kvm, &memslot->rmap[gfn_offset]);
+ }
+ }
+
+ return retval;
+}
+
+int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
+{
+ return kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
+}
+
+static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp)
+{
+ u64 *spte;
+ int young = 0;
+
+ spte = rmap_next(kvm, rmapp, NULL);
+ while (spte) {
+ int _young;
+ u64 _spte = *spte;
+ BUG_ON(!(_spte & PT_PRESENT_MASK));
+ _young = _spte & PT_ACCESSED_MASK;
+ if (_young) {
+ young = 1;
+ clear_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
+ }
+ spte = rmap_next(kvm, rmapp, spte);
+ }
+ return young;
+}
+
+int kvm_age_hva(struct kvm *kvm, unsigned long hva)
+{
+ return kvm_handle_hva(kvm, hva, kvm_age_rmapp);
+}
+
#ifdef MMU_DEBUG
static int is_empty_shadow_page(u64 *spt)
{
@@ -1199,6 +1273,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
int r;
int largepage = 0;
pfn_t pfn;
+ unsigned long mmu_seq;
down_read(¤t->mm->mmap_sem);
if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
@@ -1206,6 +1281,8 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
largepage = 1;
}
+ mmu_seq = vcpu->kvm->mmu_notifier_seq;
+ /* implicit mb(), we'll read before PT lock is unlocked */
pfn = gfn_to_pfn(vcpu->kvm, gfn);
up_read(¤t->mm->mmap_sem);
@@ -1216,6 +1293,8 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
}
spin_lock(&vcpu->kvm->mmu_lock);
+ if (mmu_notifier_retry(vcpu, mmu_seq))
+ goto out_unlock;
kvm_mmu_free_some_pages(vcpu);
r = __direct_map(vcpu, v, write, largepage, gfn, pfn,
PT32E_ROOT_LEVEL);
@@ -1223,6 +1302,11 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
return r;
+
+out_unlock:
+ spin_unlock(&vcpu->kvm->mmu_lock);
+ kvm_release_pfn_clean(pfn);
+ return 0;
}
@@ -1341,6 +1425,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
int r;
int largepage = 0;
gfn_t gfn = gpa >> PAGE_SHIFT;
+ unsigned long mmu_seq;
ASSERT(vcpu);
ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
@@ -1354,6 +1439,8 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
gfn &= ~(KVM_PAGES_PER_HPAGE-1);
largepage = 1;
}
+ mmu_seq = vcpu->kvm->mmu_notifier_seq;
+ /* implicit mb(), we'll read before PT lock is unlocked */
pfn = gfn_to_pfn(vcpu->kvm, gfn);
up_read(¤t->mm->mmap_sem);
if (is_error_pfn(pfn)) {
@@ -1361,12 +1448,19 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
return 1;
}
spin_lock(&vcpu->kvm->mmu_lock);
+ if (mmu_notifier_retry(vcpu, mmu_seq))
+ goto out_unlock;
kvm_mmu_free_some_pages(vcpu);
r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK,
largepage, gfn, pfn, kvm_x86_ops->get_tdp_level());
spin_unlock(&vcpu->kvm->mmu_lock);
return r;
+
+out_unlock:
+ spin_unlock(&vcpu->kvm->mmu_lock);
+ kvm_release_pfn_clean(pfn);
+ return 0;
}
static void nonpaging_free(struct kvm_vcpu *vcpu)
@@ -1666,6 +1760,8 @@ static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
gfn &= ~(KVM_PAGES_PER_HPAGE-1);
vcpu->arch.update_pte.largepage = 1;
}
+ vcpu->arch.update_pte.mmu_seq = vcpu->kvm->mmu_notifier_seq;
+ /* implicit mb(), we'll read before PT lock is unlocked */
pfn = gfn_to_pfn(vcpu->kvm, gfn);
up_read(¤t->mm->mmap_sem);
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 4d91822..f72ac1f 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -263,6 +263,8 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
pfn = vcpu->arch.update_pte.pfn;
if (is_error_pfn(pfn))
return;
+ if (mmu_notifier_retry(vcpu, vcpu->arch.update_pte.mmu_seq))
+ return;
kvm_get_pfn(pfn);
mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0,
gpte & PT_DIRTY_MASK, NULL, largepage, gpte_to_gfn(gpte),
@@ -380,6 +382,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
int r;
pfn_t pfn;
int largepage = 0;
+ unsigned long mmu_seq;
pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
kvm_mmu_audit(vcpu, "pre page fault");
@@ -413,6 +416,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
largepage = 1;
}
}
+ mmu_seq = vcpu->kvm->mmu_notifier_seq;
+ /* implicit mb(), we'll read before PT lock is unlocked */
pfn = gfn_to_pfn(vcpu->kvm, walker.gfn);
up_read(¤t->mm->mmap_sem);
@@ -424,6 +429,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
}
spin_lock(&vcpu->kvm->mmu_lock);
+ if (mmu_notifier_retry(vcpu, mmu_seq))
+ goto out_unlock;
kvm_mmu_free_some_pages(vcpu);
shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
largepage, &write_pt, pfn);
@@ -439,6 +446,11 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
spin_unlock(&vcpu->kvm->mmu_lock);
return write_pt;
+
+out_unlock:
+ spin_unlock(&vcpu->kvm->mmu_lock);
+ kvm_release_pfn_clean(pfn);
+ return 0;
}
static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h
index bae1b76..79837b3 100644
--- a/include/asm-x86/kvm_host.h
+++ b/include/asm-x86/kvm_host.h
@@ -13,6 +13,7 @@
#include <linux/types.h>
#include <linux/mm.h>
+#include <linux/mmu_notifier.h>
#include <linux/kvm.h>
#include <linux/kvm_para.h>
@@ -257,6 +258,7 @@ struct kvm_vcpu_arch {
gfn_t gfn; /* presumed gfn during guest pte update */
pfn_t pfn; /* pfn corresponding to that gfn */
int largepage;
+ unsigned long mmu_seq;
} update_pte;
struct i387_fxsave_struct host_fx_image;
@@ -733,4 +735,8 @@ asmlinkage void kvm_handle_fault_on_reboot(void);
KVM_EX_ENTRY " 666b, 667b \n\t" \
".popsection"
+#define KVM_ARCH_WANT_MMU_NOTIFIER
+int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
+int kvm_age_hva(struct kvm *kvm, unsigned long hva);
+
#endif
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index d220b49..d5fb771 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -121,6 +121,12 @@ struct kvm {
struct kvm_coalesced_mmio_dev *coalesced_mmio_dev;
struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
#endif
+
+#ifdef KVM_ARCH_WANT_MMU_NOTIFIER
+ struct mmu_notifier mmu_notifier;
+ unsigned long mmu_notifier_seq;
+ long mmu_notifier_count;
+#endif
};
/* The guest did something we don't support. */
@@ -331,4 +337,22 @@ int kvm_trace_ioctl(unsigned int ioctl, unsigned long arg)
#define kvm_trace_cleanup() ((void)0)
#endif
+#ifdef KVM_ARCH_WANT_MMU_NOTIFIER
+static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_seq)
+{
+ if (unlikely(vcpu->kvm->mmu_notifier_count))
+ return 1;
+ /*
+ * Both reads happen under the mmu_lock and both values are
+ * modified under mmu_lock, so there's no need of smb_rmb()
+ * here in between, otherwise mmu_notifier_count should be
+ * read before mmu_notifier_seq, see
+ * mmu_notifier_invalidate_range_end write side.
+ */
+ if (vcpu->kvm->mmu_notifier_seq != mmu_seq)
+ return 1;
+ return 0;
+}
+#endif
+
#endif
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index b90da0b..221dac2 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -186,6 +186,123 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
+#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
+static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
+{
+ return container_of(mn, struct kvm, mmu_notifier);
+}
+
+static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long address)
+{
+ struct kvm *kvm = mmu_notifier_to_kvm(mn);
+ int need_tlb_flush;
+
+ /*
+ * When ->invalidate_page runs, the linux pte has been zapped
+ * already but the page is still allocated until
+ * ->invalidate_page returns. So if we increase the sequence
+ * here the kvm page fault will notice if the spte can't be
+ * established because the page is going to be freed. If
+ * instead the kvm page fault establishes the spte before
+ * ->invalidate_page runs, kvm_unmap_hva will release it
+ * before returning.
+ *
+ * The sequence increase only need to be seen at spin_unlock
+ * time, and not at spin_lock time.
+ *
+ * Increasing the sequence after the spin_unlock would be
+ * unsafe because the kvm page fault could then establish the
+ * pte after kvm_unmap_hva returned, without noticing the page
+ * is going to be freed.
+ */
+ spin_lock(&kvm->mmu_lock);
+ kvm->mmu_notifier_seq++;
+ need_tlb_flush = kvm_unmap_hva(kvm, address);
+ spin_unlock(&kvm->mmu_lock);
+
+ /* we've to flush the tlb before the pages can be freed */
+ if (need_tlb_flush)
+ kvm_flush_remote_tlbs(kvm);
+
+}
+
+static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end)
+{
+ struct kvm *kvm = mmu_notifier_to_kvm(mn);
+ int need_tlb_flush = 0;
+
+ spin_lock(&kvm->mmu_lock);
+ /*
+ * The count increase must become visible at unlock time as no
+ * spte can be established without taking the mmu_lock and
+ * count is also read inside the mmu_lock critical section.
+ */
+ kvm->mmu_notifier_count++;
+ for (; start < end; start += PAGE_SIZE)
+ need_tlb_flush |= kvm_unmap_hva(kvm, start);
+ spin_unlock(&kvm->mmu_lock);
+
+ /* we've to flush the tlb before the pages can be freed */
+ if (need_tlb_flush)
+ kvm_flush_remote_tlbs(kvm);
+}
+
+static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end)
+{
+ struct kvm *kvm = mmu_notifier_to_kvm(mn);
+
+ spin_lock(&kvm->mmu_lock);
+ /*
+ * This sequence increase will notify the kvm page fault that
+ * the page that is going to be mapped in the spte could have
+ * been freed.
+ */
+ kvm->mmu_notifier_seq++;
+ /*
+ * The above sequence increase must be visible before the
+ * below count decrease but both values are read by the kvm
+ * page fault under mmu_lock spinlock so we don't need to add
+ * a smb_wmb() here in between the two.
+ */
+ kvm->mmu_notifier_count--;
+ spin_unlock(&kvm->mmu_lock);
+
+ BUG_ON(kvm->mmu_notifier_count < 0);
+}
+
+static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long address)
+{
+ struct kvm *kvm = mmu_notifier_to_kvm(mn);
+ int young;
+
+ spin_lock(&kvm->mmu_lock);
+ young = kvm_age_hva(kvm, address);
+ spin_unlock(&kvm->mmu_lock);
+
+ if (young)
+ kvm_flush_remote_tlbs(kvm);
+
+ return young;
+}
+
+static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
+ .invalidate_page = kvm_mmu_notifier_invalidate_page,
+ .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
+ .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
+ .clear_flush_young = kvm_mmu_notifier_clear_flush_young,
+};
+#endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
+
static struct kvm *kvm_create_vm(void)
{
struct kvm *kvm = kvm_arch_create_vm();
@@ -206,6 +323,21 @@ static struct kvm *kvm_create_vm(void)
(struct kvm_coalesced_mmio_ring *)page_address(page);
#endif
+#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
+ {
+ int err;
+ kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
+ err = mmu_notifier_register(&kvm->mmu_notifier, current->mm);
+ if (err) {
+#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
+ put_page(page);
+#endif
+ kfree(kvm);
+ return ERR_PTR(err);
+ }
+ }
+#endif
+
kvm->mm = current->mm;
atomic_inc(&kvm->mm->mm_count);
spin_lock_init(&kvm->mmu_lock);
@@ -266,6 +398,9 @@ static void kvm_destroy_vm(struct kvm *kvm)
if (kvm->coalesced_mmio_ring != NULL)
free_page((unsigned long)kvm->coalesced_mmio_ring);
#endif
+#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
+ mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
+#endif
kvm_arch_destroy_vm(kvm);
mmdrop(mm);
}
This is the compat code for kvm-userland:
Signed-off-by: Andrea Arcangeli <andrea@qumranet.com>
diff --git a/kernel/include-compat/linux/mmu_notifier.h b/kernel/include-compat/linux/mmu_notifier.h
new file mode 100644
index 0000000..a6db4ba
--- /dev/null
+++ b/kernel/include-compat/linux/mmu_notifier.h
@@ -0,0 +1,6 @@
+#ifndef _LINUX_MMU_NOTIFIER_H
+#define _LINUX_MMU_NOTIFIER_H
+
+struct mmu_notifier {};
+
+#endif
And this is the locking change to allow a readonly-browsing of
memslots with only mmu_lock hold.
Signed-off-by: Andrea Arcangeli <andrea@qumranet.com>
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index c7ad235..8be6551 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3871,16 +3871,23 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
*/
if (!user_alloc) {
if (npages && !old.rmap) {
+ unsigned long userspace_addr;
+
down_write(¤t->mm->mmap_sem);
- memslot->userspace_addr = do_mmap(NULL, 0,
- npages * PAGE_SIZE,
- PROT_READ | PROT_WRITE,
- MAP_SHARED | MAP_ANONYMOUS,
- 0);
+ userspace_addr = do_mmap(NULL, 0,
+ npages * PAGE_SIZE,
+ PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_ANONYMOUS,
+ 0);
up_write(¤t->mm->mmap_sem);
- if (IS_ERR((void *)memslot->userspace_addr))
- return PTR_ERR((void *)memslot->userspace_addr);
+ if (IS_ERR((void *)userspace_addr))
+ return PTR_ERR((void *)userspace_addr);
+
+ /* set userspace_addr atomically for kvm_hva_to_rmapp */
+ spin_lock(&kvm->mmu_lock);
+ memslot->userspace_addr = userspace_addr;
+ spin_unlock(&kvm->mmu_lock);
} else {
if (!old.user_alloc && old.rmap) {
int ret;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 6a52c08..97bcc8d 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -342,7 +342,15 @@ int __kvm_set_memory_region(struct kvm *kvm,
memset(new.rmap, 0, npages * sizeof(*new.rmap));
new.user_alloc = user_alloc;
- new.userspace_addr = mem->userspace_addr;
+ /*
+ * hva_to_rmmap() serialzies with the mmu_lock and to be
+ * safe it has to ignore memslots with !user_alloc &&
+ * !userspace_addr.
+ */
+ if (user_alloc)
+ new.userspace_addr = mem->userspace_addr;
+ else
+ new.userspace_addr = 0;
}
if (npages && !new.lpage_info) {
int largepages = npages / KVM_PAGES_PER_HPAGE;
@@ -374,14 +382,18 @@ int __kvm_set_memory_region(struct kvm *kvm,
memset(new.dirty_bitmap, 0, dirty_bytes);
}
+ spin_lock(&kvm->mmu_lock);
if (mem->slot >= kvm->nmemslots)
kvm->nmemslots = mem->slot + 1;
*memslot = new;
+ spin_unlock(&kvm->mmu_lock);
r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc);
if (r) {
+ spin_lock(&kvm->mmu_lock);
*memslot = old;
+ spin_unlock(&kvm->mmu_lock);
goto out_free;
}
^ permalink raw reply related [flat|nested] 5+ messages in thread
* Re: [PATCH] kvm mmu notifier
2008-07-03 15:17 ` Andrea Arcangeli
@ 2008-07-04 17:39 ` Marcelo Tosatti
0 siblings, 0 replies; 5+ messages in thread
From: Marcelo Tosatti @ 2008-07-04 17:39 UTC (permalink / raw)
To: Andrea Arcangeli; +Cc: kvm
Hi Andrea,
On Thu, Jul 03, 2008 at 05:17:42PM +0200, Andrea Arcangeli wrote:
> +static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp)
> +{
> + u64 *spte;
> + int need_tlb_flush = 0;
> +
> + while ((spte = rmap_next(kvm, rmapp, NULL))) {
> + BUG_ON(!(*spte & PT_PRESENT_MASK));
> + rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte);
> + rmap_remove(kvm, spte);
There's a locking issue with rmap_remove. Other than concurrent memslot
changes which your "read-only browsing" patch covers, there's concurrent
alias changes.
The mmu_shrink path is similarly vulnerable at the moment (see
http://article.gmane.org/gmane.comp.emulators.kvm.devel/19102), we
should change memslots and aliases to be protected by mmu_lock.
> + set_shadow_pte(spte, shadow_trap_nonpresent_pte);
> + need_tlb_flush = 1;
> + }
> + return need_tlb_flush;
> +}
Still need to nuke large page mappings on invalidate_range, right?
Presently not an issue for invalidate_page.
Other than that looks much simpler, thanks.
^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2008-10-18 10:14 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2008-06-26 18:11 [PATCH] kvm mmu notifier Andrea Arcangeli
2008-06-27 22:43 ` Marcelo Tosatti
2008-06-28 1:17 ` Andrea Arcangeli
2008-07-03 15:17 ` Andrea Arcangeli
2008-07-04 17:39 ` Marcelo Tosatti
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox