From mboxrd@z Thu Jan 1 00:00:00 1970 From: izik eidus Subject: [ofa-general] Re: [PATCH] KVM swapping with mmu notifiers #v9 Date: Tue, 04 Mar 2008 02:44:07 +0200 Message-ID: <47CC9B57.5050402@qumranet.com> References: <20080219135851.GI7128@v2.random> <20080219231157.GC18912@wotan.suse.de> <20080220010941.GR7128@v2.random> <20080220103942.GU7128@v2.random> <20080221045430.GC15215@wotan.suse.de> <20080221144023.GC9427@v2.random> <20080221161028.GA14220@sgi.com> <20080227192610.GF28483@v2.random> <20080302155457.GK8091@v2.random> <20080303213707.GA8091@v2.random> <20080303220502.GA5301@v2.random> Mime-Version: 1.0 Content-Type: text/plain; charset=windows-1255; format=flowed Content-Transfer-Encoding: quoted-printable Cc: Nick Piggin , Peter Zijlstra , kvm-devel@lists.sourceforge.net, Kanoj Sarcar , Roland Dreier , Jack Steiner , linux-kernel@vger.kernel.org, Avi Kivity , linux-mm@kvack.org, daniel.blueman@quadrics.com, Robin Holt , general@lists.openfabrics.org, akpm@linux-foundation.org, Christoph Lameter To: Andrea Arcangeli Return-path: In-Reply-To: <20080303220502.GA5301@v2.random> List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Sender: general-bounces@lists.openfabrics.org Errors-To: general-bounces@lists.openfabrics.org List-Id: kvm.vger.kernel.org =F6=E9=E8=E5=E8 Andrea Arcangeli: > Notably the registration now requires the mmap_sem in write mode. > > Signed-off-by: Andrea Arcangeli > > diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig > index 41962e7..e1287ab 100644 > --- a/arch/x86/kvm/Kconfig > +++ b/arch/x86/kvm/Kconfig > @@ -21,6 +21,7 @@ config KVM > tristate "Kernel-based Virtual Machine (KVM) support" > depends on HAVE_KVM && EXPERIMENTAL > select PREEMPT_NOTIFIERS > + select MMU_NOTIFIER > select ANON_INODES > ---help--- > Support hosting fully virtualized guest machines using hardware > diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c > index 4583329..4067b0f 100644 > --- a/arch/x86/kvm/mmu.c > +++ b/arch/x86/kvm/mmu.c > @@ -642,6 +642,110 @@ static void rmap_write_protect(struct kvm *kvm, u= 64 gfn) > account_shadowed(kvm, gfn); > } > =20 > +static void kvm_unmap_spte(struct kvm *kvm, u64 *spte) > +{ > + struct page *page =3D pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PA= GE_SHIFT); > + get_page(page); > + rmap_remove(kvm, spte); > + set_shadow_pte(spte, shadow_trap_nonpresent_pte); > + kvm_flush_remote_tlbs(kvm); > + __free_page(page); > =20 i wrote to you about this before (i didnt get answer for this so i write=20 again) with large pages support i think we need to use here put_page > +} > + > +static void kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp) > +{ > + u64 *spte, *curr_spte; > + > + spte =3D rmap_next(kvm, rmapp, NULL); > + while (spte) { > + BUG_ON(!(*spte & PT_PRESENT_MASK)); > + rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte); > + curr_spte =3D spte; > + spte =3D rmap_next(kvm, rmapp, spte); > + kvm_unmap_spte(kvm, curr_spte); > + } > +} > + > +void kvm_unmap_hva(struct kvm *kvm, unsigned long hva) > +{ > + int i; > + > + /* > + * If mmap_sem isn't taken, we can look the memslots with only > + * the mmu_lock by skipping over the slots with userspace_addr =3D=3D= 0. > + */ > + spin_lock(&kvm->mmu_lock); > + for (i =3D 0; i < kvm->nmemslots; i++) { > + struct kvm_memory_slot *memslot =3D &kvm->memslots[i]; > + unsigned long start =3D memslot->userspace_addr; > + unsigned long end; > + > + /* mmu_lock protects userspace_addr */ > + if (!start) > + continue; > + > + end =3D start + (memslot->npages << PAGE_SHIFT); > + if (hva >=3D start && hva < end) { > + gfn_t gfn_offset =3D (hva - start) >> PAGE_SHIFT; > + kvm_unmap_rmapp(kvm, &memslot->rmap[gfn_offset]); > + } > + } > + spin_unlock(&kvm->mmu_lock); > +} > + > +static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp) > +{ > + u64 *spte; > + int young =3D 0; > + > + spte =3D rmap_next(kvm, rmapp, NULL); > + while (spte) { > + int _young; > + u64 _spte =3D *spte; > + BUG_ON(!(_spte & PT_PRESENT_MASK)); > + _young =3D _spte & PT_ACCESSED_MASK; > + if (_young) { > + young =3D !!_young; > + set_shadow_pte(spte, _spte & ~PT_ACCESSED_MASK); > + } > + spte =3D rmap_next(kvm, rmapp, spte); > + } > + return young; > +} > + > +int kvm_age_hva(struct kvm *kvm, unsigned long hva) > +{ > + int i; > + int young =3D 0; > + > + /* > + * If mmap_sem isn't taken, we can look the memslots with only > + * the mmu_lock by skipping over the slots with userspace_addr =3D=3D= 0. > + */ > + spin_lock(&kvm->mmu_lock); > + for (i =3D 0; i < kvm->nmemslots; i++) { > + struct kvm_memory_slot *memslot =3D &kvm->memslots[i]; > + unsigned long start =3D memslot->userspace_addr; > + unsigned long end; > + > + /* mmu_lock protects userspace_addr */ > + if (!start) > + continue; > + > + end =3D start + (memslot->npages << PAGE_SHIFT); > + if (hva >=3D start && hva < end) { > + gfn_t gfn_offset =3D (hva - start) >> PAGE_SHIFT; > + young |=3D kvm_age_rmapp(kvm, &memslot->rmap[gfn_offset]); > + } > + } > + spin_unlock(&kvm->mmu_lock); > + > + if (young) > + kvm_flush_remote_tlbs(kvm); > + > + return young; > +} > + > #ifdef MMU_DEBUG > static int is_empty_shadow_page(u64 *spt) > { > diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h > index 17f9d16..b014b19 100644 > --- a/arch/x86/kvm/paging_tmpl.h > +++ b/arch/x86/kvm/paging_tmpl.h > @@ -380,6 +380,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu,= gva_t addr, > int r; > struct page *page; > int largepage =3D 0; > + unsigned mmu_seq; > =20 > pgprintk("%s: addr %lx err %x\n", __FUNCTION__, addr, error_code); > kvm_mmu_audit(vcpu, "pre page fault"); > @@ -415,6 +416,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu,= gva_t addr, > largepage =3D 1; > } > } > + mmu_seq =3D read_seqbegin(&vcpu->kvm->arch.mmu_notifier_invalidate_lo= ck); > page =3D gfn_to_page(vcpu->kvm, walker.gfn); > up_read(¤t->mm->mmap_sem); > =20 > @@ -440,6 +442,15 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu= , gva_t addr, > ++vcpu->stat.pf_fixed; > kvm_mmu_audit(vcpu, "post page fault (fixed)"); > spin_unlock(&vcpu->kvm->mmu_lock); > + > + if (read_seqretry(&vcpu->kvm->arch.mmu_notifier_invalidate_lock, mmu_= seq)) { > + down_read(¤t->mm->mmap_sem); > + if (page !=3D gfn_to_page(vcpu->kvm, walker.gfn)) > + BUG(); > + up_read(¤t->mm->mmap_sem); > + kvm_release_page_clean(page); > + } > + > up_read(&vcpu->kvm->slots_lock); > =20 > return write_pt; > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c > index 6f09840..1dfb1c9 100644 > --- a/arch/x86/kvm/x86.c > +++ b/arch/x86/kvm/x86.c > @@ -25,6 +25,7 @@ > #include > #include > #include > +#include > =20 > #include > #include > @@ -3319,6 +3320,48 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) > free_page((unsigned long)vcpu->arch.pio_data); > } > =20 > +static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) > +{ > + struct kvm_arch *kvm_arch; > + kvm_arch =3D container_of(mn, struct kvm_arch, mmu_notifier); > + return container_of(kvm_arch, struct kvm, arch); > +} > + > +void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn, > + struct mm_struct *mm, > + unsigned long address) > +{ > + struct kvm *kvm =3D mmu_notifier_to_kvm(mn); > + BUG_ON(mm !=3D kvm->mm); > + write_seqlock(&kvm->arch.mmu_notifier_invalidate_lock); > + kvm_unmap_hva(kvm, address); > + write_sequnlock(&kvm->arch.mmu_notifier_invalidate_lock); > +} > + > +void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, > + struct mm_struct *mm, > + unsigned long start, > + unsigned long end) > +{ > + for (; start < end; start +=3D PAGE_SIZE) > + kvm_mmu_notifier_invalidate_page(mn, mm, start); > +} > + > +int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn, > + struct mm_struct *mm, > + unsigned long address) > +{ > + struct kvm *kvm =3D mmu_notifier_to_kvm(mn); > + BUG_ON(mm !=3D kvm->mm); > + return kvm_age_hva(kvm, address); > +} > + > +static const struct mmu_notifier_ops kvm_mmu_notifier_ops =3D { > + .invalidate_page =3D kvm_mmu_notifier_invalidate_page, > + .invalidate_range_end =3D kvm_mmu_notifier_invalidate_range_end, > + .clear_flush_young =3D kvm_mmu_notifier_clear_flush_young, > +}; > + > struct kvm *kvm_arch_create_vm(void) > { > struct kvm *kvm =3D kzalloc(sizeof(struct kvm), GFP_KERNEL); > @@ -3328,6 +3371,12 @@ struct kvm *kvm_arch_create_vm(void) > =20 > INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); > =20 > + kvm->arch.mmu_notifier.ops =3D &kvm_mmu_notifier_ops; > + down_write(¤t->mm->mmap_sem); > + mmu_notifier_register(&kvm->arch.mmu_notifier, current->mm); > + up_write(¤t->mm->mmap_sem); > + seqlock_init(&kvm->arch.mmu_notifier_invalidate_lock); > + > return kvm; > } > =20 > diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h > index 024b57c..305b7c3 100644 > --- a/include/asm-x86/kvm_host.h > +++ b/include/asm-x86/kvm_host.h > @@ -13,6 +13,7 @@ > =20 > #include > #include > +#include > =20 > #include > #include > @@ -303,6 +304,9 @@ struct kvm_arch{ > struct page *apic_access_page; > =20 > gpa_t wall_clock; > + > + struct mmu_notifier mmu_notifier; > + seqlock_t mmu_notifier_invalidate_lock; > }; > =20 > struct kvm_vm_stat { > @@ -422,6 +426,8 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu); > int kvm_mmu_setup(struct kvm_vcpu *vcpu); > void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte); > =20 > +void kvm_unmap_hva(struct kvm *kvm, unsigned long hva); > +int kvm_age_hva(struct kvm *kvm, unsigned long hva); > int kvm_mmu_reset_context(struct kvm_vcpu *vcpu); > void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot); > void kvm_mmu_zap_all(struct kvm *kvm); > > > As usual memslot browsing with mmu_lock. > > Signed-off-by: Andrea Arcangeli > > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c > index 6f09840..a519fd8 100644 > --- a/arch/x86/kvm/x86.c > +++ b/arch/x86/kvm/x86.c > @@ -3379,16 +3379,23 @@ int kvm_arch_set_memory_region(struct kvm *kvm, > */ > if (!user_alloc) { > if (npages && !old.rmap) { > + unsigned long userspace_addr; > + > down_write(¤t->mm->mmap_sem); > - memslot->userspace_addr =3D do_mmap(NULL, 0, > - npages * PAGE_SIZE, > - PROT_READ | PROT_WRITE, > - MAP_SHARED | MAP_ANONYMOUS, > - 0); > + userspace_addr =3D do_mmap(NULL, 0, > + npages * PAGE_SIZE, > + PROT_READ | PROT_WRITE, > + MAP_SHARED | MAP_ANONYMOUS, > + 0); > up_write(¤t->mm->mmap_sem); > =20 > - if (IS_ERR((void *)memslot->userspace_addr)) > - return PTR_ERR((void *)memslot->userspace_addr); > + if (IS_ERR((void *)userspace_addr)) > + return PTR_ERR((void *)userspace_addr); > + > + /* set userspace_addr atomically for kvm_hva_to_rmapp */ > + spin_lock(&kvm->mmu_lock); > + memslot->userspace_addr =3D userspace_addr; > + spin_unlock(&kvm->mmu_lock); > } else { > if (!old.user_alloc && old.rmap) { > int ret; > diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c > index 30bf832..8f3b6d6 100644 > --- a/virt/kvm/kvm_main.c > +++ b/virt/kvm/kvm_main.c > @@ -326,7 +326,15 @@ int __kvm_set_memory_region(struct kvm *kvm, > memset(new.rmap, 0, npages * sizeof(*new.rmap)); > =20 > new.user_alloc =3D user_alloc; > - new.userspace_addr =3D mem->userspace_addr; > + /* > + * hva_to_rmmap() serialzies with the mmu_lock and to be > + * safe it has to ignore memslots with !user_alloc && > + * !userspace_addr. > + */ > + if (user_alloc) > + new.userspace_addr =3D mem->userspace_addr; > + else > + new.userspace_addr =3D 0; > } > if (npages && !new.lpage_info) { > int largepages =3D npages / KVM_PAGES_PER_HPAGE; > @@ -355,14 +363,18 @@ int __kvm_set_memory_region(struct kvm *kvm, > memset(new.dirty_bitmap, 0, dirty_bytes); > } > =20 > + spin_lock(&kvm->mmu_lock); > if (mem->slot >=3D kvm->nmemslots) > kvm->nmemslots =3D mem->slot + 1; > =20 > *memslot =3D new; > + spin_unlock(&kvm->mmu_lock); > =20 > r =3D kvm_arch_set_memory_region(kvm, mem, old, user_alloc); > if (r) { > + spin_lock(&kvm->mmu_lock); > *memslot =3D old; > + spin_unlock(&kvm->mmu_lock); > goto out_free; > } > =20 > =20