From: Davidlohr Bueso <dbueso@suse.de>
To: akpm@linux-foundation.org, mingo@kernel.org
Cc: peterz@infradead.org, ldufour@linux.vnet.ibm.com, jack@suse.cz,
mhocko@kernel.org, kirill.shutemov@linux.intel.com,
mawilcox@microsoft.com, mgorman@techsingularity.net,
dave@stgolabs.net, linux-mm@kvack.org,
linux-kernel@vger.kernel.org, Davidlohr Bueso <dbueso@suse.de>
Subject: [PATCH 16/64] virt: use mm locking wrappers
Date: Mon, 5 Feb 2018 02:27:06 +0100 [thread overview]
Message-ID: <20180205012754.23615-17-dbueso@wotan.suse.de> (raw)
In-Reply-To: <20180205012754.23615-1-dbueso@wotan.suse.de>
From: Davidlohr Bueso <dave@stgolabs.net>
No change in semantics.
Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
---
virt/kvm/arm/mmu.c | 17 ++++++++++-------
virt/kvm/async_pf.c | 4 ++--
virt/kvm/kvm_main.c | 9 +++++----
3 files changed, 17 insertions(+), 13 deletions(-)
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index ec62d1cccab7..9a866a639c2c 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -815,9 +815,10 @@ void stage2_unmap_vm(struct kvm *kvm)
struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
int idx;
+ DEFINE_RANGE_LOCK_FULL(mmrange);
idx = srcu_read_lock(&kvm->srcu);
- down_read(¤t->mm->mmap_sem);
+ mm_read_lock(current->mm, &mmrange);
spin_lock(&kvm->mmu_lock);
slots = kvm_memslots(kvm);
@@ -825,7 +826,7 @@ void stage2_unmap_vm(struct kvm *kvm)
stage2_unmap_memslot(kvm, memslot);
spin_unlock(&kvm->mmu_lock);
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm, &mmrange);
srcu_read_unlock(&kvm->srcu, idx);
}
@@ -1317,6 +1318,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
pgprot_t mem_type = PAGE_S2;
bool logging_active = memslot_is_logging(memslot);
unsigned long flags = 0;
+ DEFINE_RANGE_LOCK_FULL(mmrange);
write_fault = kvm_is_write_fault(vcpu);
exec_fault = kvm_vcpu_trap_is_iabt(vcpu);
@@ -1328,11 +1330,11 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
}
/* Let's check if we will get back a huge page backed by hugetlbfs */
- down_read(¤t->mm->mmap_sem);
+ mm_read_lock(current->mm, &mmrange);
vma = find_vma_intersection(current->mm, hva, hva + 1);
if (unlikely(!vma)) {
kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm, &mmrange);
return -EFAULT;
}
@@ -1353,7 +1355,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
((memslot->base_gfn << PAGE_SHIFT) & ~PMD_MASK))
force_pte = true;
}
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm, &mmrange);
/* We need minimum second+third level pages */
ret = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES,
@@ -1889,6 +1891,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
hva_t reg_end = hva + mem->memory_size;
bool writable = !(mem->flags & KVM_MEM_READONLY);
int ret = 0;
+ DEFINE_RANGE_LOCK_FULL(mmrange);
if (change != KVM_MR_CREATE && change != KVM_MR_MOVE &&
change != KVM_MR_FLAGS_ONLY)
@@ -1902,7 +1905,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
(KVM_PHYS_SIZE >> PAGE_SHIFT))
return -EFAULT;
- down_read(¤t->mm->mmap_sem);
+ mm_read_lock(current->mm, &mmrange);
/*
* A memory region could potentially cover multiple VMAs, and any holes
* between them, so iterate over all of them to find out if we can map
@@ -1970,7 +1973,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
stage2_flush_memslot(kvm, memslot);
spin_unlock(&kvm->mmu_lock);
out:
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm, &mmrange);
return ret;
}
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
index 4cd2b93bb20c..ed559789d7cb 100644
--- a/virt/kvm/async_pf.c
+++ b/virt/kvm/async_pf.c
@@ -87,11 +87,11 @@ static void async_pf_execute(struct work_struct *work)
* mm and might be done in another context, so we must
* access remotely.
*/
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm, &mmrange);
get_user_pages_remote(NULL, mm, addr, 1, FOLL_WRITE, NULL, NULL,
&locked, &mmrange);
if (locked)
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm, &mmrange);
kvm_async_page_present_sync(vcpu, apf);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 86ec078f4c3b..92fd944e7e3a 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1222,6 +1222,7 @@ EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn)
{
struct vm_area_struct *vma;
+ DEFINE_RANGE_LOCK_FULL(mmrange);
unsigned long addr, size;
size = PAGE_SIZE;
@@ -1230,7 +1231,7 @@ unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn)
if (kvm_is_error_hva(addr))
return PAGE_SIZE;
- down_read(¤t->mm->mmap_sem);
+ mm_read_lock(current->mm, &mmrange);
vma = find_vma(current->mm, addr);
if (!vma)
goto out;
@@ -1238,7 +1239,7 @@ unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn)
size = vma_kernel_pagesize(vma);
out:
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm, &mmrange);
return size;
}
@@ -1494,7 +1495,7 @@ static kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
if (npages == 1)
return pfn;
- down_read(¤t->mm->mmap_sem);
+ mm_read_lock(current->mm, &mmrange);
if (npages == -EHWPOISON ||
(!async && check_user_page_hwpoison(addr, &mmrange))) {
pfn = KVM_PFN_ERR_HWPOISON;
@@ -1519,7 +1520,7 @@ static kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
pfn = KVM_PFN_ERR_FAULT;
}
exit:
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm, &mmrange);
return pfn;
}
--
2.13.6
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2018-02-05 1:34 UTC|newest]
Thread overview: 69+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-02-05 1:26 [RFC PATCH 00/64] mm: towards parallel address space operations Davidlohr Bueso
2018-02-05 1:26 ` [PATCH 01/64] interval-tree: build unconditionally Davidlohr Bueso
2018-02-05 1:26 ` [PATCH 02/64] Introduce range reader/writer lock Davidlohr Bueso
2018-02-05 1:26 ` [PATCH 03/64] mm: introduce mm locking wrappers Davidlohr Bueso
2018-02-05 1:26 ` [PATCH 04/64] mm: add a range parameter to the vm_fault structure Davidlohr Bueso
2018-02-05 1:26 ` [PATCH 05/64] mm,khugepaged: prepare passing of rangelock field to vm_fault Davidlohr Bueso
2018-02-05 1:26 ` [PATCH 06/64] mm: teach pagefault paths about range locking Davidlohr Bueso
2018-02-05 16:09 ` Laurent Dufour
2018-02-06 18:32 ` Davidlohr Bueso
2018-02-05 1:26 ` [PATCH 07/64] mm/hugetlb: teach hugetlb_fault() " Davidlohr Bueso
2018-02-05 1:26 ` [PATCH 08/64] mm: teach lock_page_or_retry() " Davidlohr Bueso
2018-02-05 1:26 ` [PATCH 09/64] mm/mmu_notifier: teach oom reaper " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 10/64] kernel/exit: teach exit_mm() " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 11/64] prctl: teach " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 12/64] fs/userfaultfd: teach userfaultfd_must_wait() " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 13/64] fs/proc: teach " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 14/64] fs/coredump: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 15/64] ipc: use mm locking wrappers Davidlohr Bueso
2018-02-05 1:27 ` Davidlohr Bueso [this message]
2018-02-05 1:27 ` [PATCH 17/64] kernel: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 18/64] mm/ksm: teach about range locking Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 19/64] mm/mlock: use mm locking wrappers Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 20/64] mm/madvise: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 21/64] mm: teach drop/take_all_locks() about range locking Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 22/64] mm: avoid mmap_sem trylock in vm_insert_page() Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 23/64] mm: huge pagecache: do not check mmap_sem state Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 24/64] mm/thp: disable mmap_sem is_locked checks Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 25/64] mm: use mm locking wrappers Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 26/64] fs: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 27/64] arch/{x86,sh,ppc}: teach bad_area() about range locking Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 28/64] arch/x86: use mm locking wrappers Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 29/64] arch/alpha: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 30/64] arch/tile: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 31/64] arch/sparc: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 32/64] arch/s390: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 33/64] arch/powerpc: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 34/64] arch/parisc: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 35/64] arch/ia64: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 36/64] arch/mips: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 37/64] arch/arc: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 38/64] arch/blackfin: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 39/64] arch/m68k: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 40/64] arch/sh: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 41/64] arch/cris: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 42/64] arch/frv: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 43/64] arch/hexagon: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 44/64] arch/score: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 45/64] arch/m32r: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 46/64] arch/metag: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 47/64] arch/microblaze: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 48/64] arch/tile: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 49/64] arch/xtensa: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 50/64] arch/unicore32: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 51/64] arch/mn10300: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 52/64] arch/openrisc: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 53/64] arch/nios2: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 54/64] arch/arm: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 55/64] arch/riscv: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 56/64] drivers/android: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 57/64] drivers/gpu: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 58/64] drivers/infiniband: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 59/64] drivers/iommu: use mm locking helpers Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 60/64] drivers/xen: use mm locking wrappers Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 61/64] staging/lustre: use generic range lock Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 62/64] drivers: use mm locking wrappers (the rest) Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 63/64] mm/mmap: hack drop down_write_nest_lock() Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 64/64] mm: convert mmap_sem to range mmap_lock Davidlohr Bueso
2018-02-05 16:53 ` [RFC PATCH 00/64] mm: towards parallel address space operations Laurent Dufour
2018-02-06 18:48 ` Davidlohr Bueso
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20180205012754.23615-17-dbueso@wotan.suse.de \
--to=dbueso@suse.de \
--cc=akpm@linux-foundation.org \
--cc=dave@stgolabs.net \
--cc=jack@suse.cz \
--cc=kirill.shutemov@linux.intel.com \
--cc=ldufour@linux.vnet.ibm.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mawilcox@microsoft.com \
--cc=mgorman@techsingularity.net \
--cc=mhocko@kernel.org \
--cc=mingo@kernel.org \
--cc=peterz@infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).