From: Davidlohr Bueso <dbueso@suse.de>
To: akpm@linux-foundation.org, mingo@kernel.org
Cc: peterz@infradead.org, ldufour@linux.vnet.ibm.com, jack@suse.cz,
mhocko@kernel.org, kirill.shutemov@linux.intel.com,
mawilcox@microsoft.com, mgorman@techsingularity.net,
dave@stgolabs.net, linux-mm@kvack.org,
linux-kernel@vger.kernel.org, Davidlohr Bueso <dbueso@suse.de>
Subject: [PATCH 54/64] arch/arm: use mm locking wrappers
Date: Mon, 5 Feb 2018 02:27:44 +0100 [thread overview]
Message-ID: <20180205012754.23615-55-dbueso@wotan.suse.de> (raw)
In-Reply-To: <20180205012754.23615-1-dbueso@wotan.suse.de>
From: Davidlohr Bueso <dave@stgolabs.net>
This becomes quite straightforward with the mmrange in place.
For those mmap_sem users that need mmrange, we simply add it
to the function as the mmap_sem usage is in the same context.
Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
---
arch/arm/kernel/process.c | 5 +++--
arch/arm/kernel/swp_emulate.c | 5 +++--
arch/arm/lib/uaccess_with_memcpy.c | 18 ++++++++++--------
arch/arm/mm/fault.c | 6 +++---
arch/arm64/kernel/traps.c | 5 +++--
arch/arm64/kernel/vdso.c | 12 +++++++-----
arch/arm64/mm/fault.c | 6 +++---
7 files changed, 32 insertions(+), 25 deletions(-)
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 1523cb18b109..39fd5bd204d7 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -424,6 +424,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
unsigned long addr;
unsigned long hint;
int ret = 0;
+ DEFINE_RANGE_LOCK_FULL(mmrange);
if (!signal_page)
signal_page = get_signal_page();
@@ -433,7 +434,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
npages = 1; /* for sigpage */
npages += vdso_total_pages;
- if (down_write_killable(&mm->mmap_sem))
+ if (mm_write_lock_killable(mm, &mmrange))
return -EINTR;
hint = sigpage_addr(mm, npages);
addr = get_unmapped_area(NULL, hint, npages << PAGE_SHIFT, 0, 0);
@@ -460,7 +461,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
arm_install_vdso(mm, addr + PAGE_SIZE);
up_fail:
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm, &mmrange);
return ret;
}
#endif
diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c
index 3bda08bee674..e01a469393fb 100644
--- a/arch/arm/kernel/swp_emulate.c
+++ b/arch/arm/kernel/swp_emulate.c
@@ -111,13 +111,14 @@ static const struct file_operations proc_status_fops = {
static void set_segfault(struct pt_regs *regs, unsigned long addr)
{
siginfo_t info;
+ DEFINE_RANGE_LOCK_FULL(mmrange);
- down_read(¤t->mm->mmap_sem);
+ mm_read_lock(current->mm, &mmrange);
if (find_vma(current->mm, addr) == NULL)
info.si_code = SEGV_MAPERR;
else
info.si_code = SEGV_ACCERR;
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm, &mmrange);
info.si_signo = SIGSEGV;
info.si_errno = 0;
diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
index 9b4ed1728616..24464fa0a78a 100644
--- a/arch/arm/lib/uaccess_with_memcpy.c
+++ b/arch/arm/lib/uaccess_with_memcpy.c
@@ -89,6 +89,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
{
unsigned long ua_flags;
int atomic;
+ DEFINE_RANGE_LOCK_FULL(mmrange);
if (uaccess_kernel()) {
memcpy((void *)to, from, n);
@@ -99,7 +100,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
atomic = faulthandler_disabled();
if (!atomic)
- down_read(¤t->mm->mmap_sem);
+ mm_read_lock(current->mm, &mmrange);
while (n) {
pte_t *pte;
spinlock_t *ptl;
@@ -107,11 +108,11 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
while (!pin_page_for_write(to, &pte, &ptl)) {
if (!atomic)
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm, &mmrange);
if (__put_user(0, (char __user *)to))
goto out;
if (!atomic)
- down_read(¤t->mm->mmap_sem);
+ mm_read_lock(current->mm, &mmrange);
}
tocopy = (~(unsigned long)to & ~PAGE_MASK) + 1;
@@ -131,7 +132,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
spin_unlock(ptl);
}
if (!atomic)
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm, &mmrange);
out:
return n;
@@ -161,23 +162,24 @@ static unsigned long noinline
__clear_user_memset(void __user *addr, unsigned long n)
{
unsigned long ua_flags;
+ DEFINE_RANGE_LOCK_FULL(mmrange);
if (uaccess_kernel()) {
memset((void *)addr, 0, n);
return 0;
}
- down_read(¤t->mm->mmap_sem);
+ mm_read_lock(current->mm, &mmrange);
while (n) {
pte_t *pte;
spinlock_t *ptl;
int tocopy;
while (!pin_page_for_write(addr, &pte, &ptl)) {
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm, &mmrange);
if (__put_user(0, (char __user *)addr))
goto out;
- down_read(¤t->mm->mmap_sem);
+ mm_read_lock(current->mm, &mmrange);
}
tocopy = (~(unsigned long)addr & ~PAGE_MASK) + 1;
@@ -195,7 +197,7 @@ __clear_user_memset(void __user *addr, unsigned long n)
else
spin_unlock(ptl);
}
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm, &mmrange);
out:
return n;
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 99ae40b5851a..6ce3e0707db5 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -291,11 +291,11 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
* validly references user space from well defined areas of the code,
* we can bug out early if this is from code which shouldn't.
*/
- if (!down_read_trylock(&mm->mmap_sem)) {
+ if (!mm_read_trylock(mm, &mmrange)) {
if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc))
goto no_context;
retry:
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm, &mmrange);
} else {
/*
* The above down_read_trylock() might have succeeded in
@@ -348,7 +348,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
}
}
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm, &mmrange);
/*
* Handle the "normal" case first - VM_FAULT_MAJOR
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index bbb0fde2780e..bf185655b142 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -351,13 +351,14 @@ void force_signal_inject(int signal, int code, struct pt_regs *regs,
void arm64_notify_segfault(struct pt_regs *regs, unsigned long addr)
{
int code;
+ DEFINE_RANGE_LOCK_FULL(mmrange);
- down_read(¤t->mm->mmap_sem);
+ mm_read_lock(current->mm, &mmrange);
if (find_vma(current->mm, addr) == NULL)
code = SEGV_MAPERR;
else
code = SEGV_ACCERR;
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm, &mmrange);
force_signal_inject(SIGSEGV, code, regs, addr);
}
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index 2d419006ad43..1b0006fe9668 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -94,8 +94,9 @@ int aarch32_setup_vectors_page(struct linux_binprm *bprm, int uses_interp)
};
void *ret;
+ DEFINE_RANGE_LOCK_FULL(mmrange);
- if (down_write_killable(&mm->mmap_sem))
+ if (mm_write_lock_killable(mm, &mmrange))
return -EINTR;
current->mm->context.vdso = (void *)addr;
@@ -104,7 +105,7 @@ int aarch32_setup_vectors_page(struct linux_binprm *bprm, int uses_interp)
VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC,
&spec);
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm, &mmrange);
return PTR_ERR_OR_ZERO(ret);
}
@@ -178,12 +179,13 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
struct mm_struct *mm = current->mm;
unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
void *ret;
+ DEFINE_RANGE_LOCK_FULL(mmrange);
vdso_text_len = vdso_pages << PAGE_SHIFT;
/* Be sure to map the data page */
vdso_mapping_len = vdso_text_len + PAGE_SIZE;
- if (down_write_killable(&mm->mmap_sem))
+ if (mm_write_lock_killable(mm, &mmrange))
return -EINTR;
vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
if (IS_ERR_VALUE(vdso_base)) {
@@ -206,12 +208,12 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
goto up_fail;
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm, &mmrange);
return 0;
up_fail:
mm->context.vdso = NULL;
- up_write(&mm->mmap_sem);
+ mm_write_unlock(mm, &mmrange);
return PTR_ERR(ret);
}
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 1f3ad9e4f214..555d533d52ab 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -434,11 +434,11 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
* validly references user space from well defined areas of the code,
* we can bug out early if this is from code which shouldn't.
*/
- if (!down_read_trylock(&mm->mmap_sem)) {
+ if (!mm_read_trylock(mm, &mmrange)) {
if (!user_mode(regs) && !search_exception_tables(regs->pc))
goto no_context;
retry:
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm, &mmrange);
} else {
/*
* The above down_read_trylock() might have succeeded in which
@@ -477,7 +477,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
goto retry;
}
}
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm, &mmrange);
/*
* Handle the "normal" (no error) case first.
--
2.13.6
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2018-02-05 1:29 UTC|newest]
Thread overview: 69+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-02-05 1:26 [RFC PATCH 00/64] mm: towards parallel address space operations Davidlohr Bueso
2018-02-05 1:26 ` [PATCH 01/64] interval-tree: build unconditionally Davidlohr Bueso
2018-02-05 1:26 ` [PATCH 02/64] Introduce range reader/writer lock Davidlohr Bueso
2018-02-05 1:26 ` [PATCH 03/64] mm: introduce mm locking wrappers Davidlohr Bueso
2018-02-05 1:26 ` [PATCH 04/64] mm: add a range parameter to the vm_fault structure Davidlohr Bueso
2018-02-05 1:26 ` [PATCH 05/64] mm,khugepaged: prepare passing of rangelock field to vm_fault Davidlohr Bueso
2018-02-05 1:26 ` [PATCH 06/64] mm: teach pagefault paths about range locking Davidlohr Bueso
2018-02-05 16:09 ` Laurent Dufour
2018-02-06 18:32 ` Davidlohr Bueso
2018-02-05 1:26 ` [PATCH 07/64] mm/hugetlb: teach hugetlb_fault() " Davidlohr Bueso
2018-02-05 1:26 ` [PATCH 08/64] mm: teach lock_page_or_retry() " Davidlohr Bueso
2018-02-05 1:26 ` [PATCH 09/64] mm/mmu_notifier: teach oom reaper " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 10/64] kernel/exit: teach exit_mm() " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 11/64] prctl: teach " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 12/64] fs/userfaultfd: teach userfaultfd_must_wait() " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 13/64] fs/proc: teach " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 14/64] fs/coredump: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 15/64] ipc: use mm locking wrappers Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 16/64] virt: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 17/64] kernel: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 18/64] mm/ksm: teach about range locking Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 19/64] mm/mlock: use mm locking wrappers Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 20/64] mm/madvise: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 21/64] mm: teach drop/take_all_locks() about range locking Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 22/64] mm: avoid mmap_sem trylock in vm_insert_page() Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 23/64] mm: huge pagecache: do not check mmap_sem state Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 24/64] mm/thp: disable mmap_sem is_locked checks Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 25/64] mm: use mm locking wrappers Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 26/64] fs: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 27/64] arch/{x86,sh,ppc}: teach bad_area() about range locking Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 28/64] arch/x86: use mm locking wrappers Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 29/64] arch/alpha: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 30/64] arch/tile: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 31/64] arch/sparc: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 32/64] arch/s390: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 33/64] arch/powerpc: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 34/64] arch/parisc: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 35/64] arch/ia64: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 36/64] arch/mips: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 37/64] arch/arc: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 38/64] arch/blackfin: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 39/64] arch/m68k: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 40/64] arch/sh: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 41/64] arch/cris: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 42/64] arch/frv: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 43/64] arch/hexagon: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 44/64] arch/score: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 45/64] arch/m32r: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 46/64] arch/metag: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 47/64] arch/microblaze: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 48/64] arch/tile: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 49/64] arch/xtensa: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 50/64] arch/unicore32: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 51/64] arch/mn10300: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 52/64] arch/openrisc: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 53/64] arch/nios2: " Davidlohr Bueso
2018-02-05 1:27 ` Davidlohr Bueso [this message]
2018-02-05 1:27 ` [PATCH 55/64] arch/riscv: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 56/64] drivers/android: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 57/64] drivers/gpu: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 58/64] drivers/infiniband: " Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 59/64] drivers/iommu: use mm locking helpers Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 60/64] drivers/xen: use mm locking wrappers Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 61/64] staging/lustre: use generic range lock Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 62/64] drivers: use mm locking wrappers (the rest) Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 63/64] mm/mmap: hack drop down_write_nest_lock() Davidlohr Bueso
2018-02-05 1:27 ` [PATCH 64/64] mm: convert mmap_sem to range mmap_lock Davidlohr Bueso
2018-02-05 16:53 ` [RFC PATCH 00/64] mm: towards parallel address space operations Laurent Dufour
2018-02-06 18:48 ` Davidlohr Bueso
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20180205012754.23615-55-dbueso@wotan.suse.de \
--to=dbueso@suse.de \
--cc=akpm@linux-foundation.org \
--cc=dave@stgolabs.net \
--cc=jack@suse.cz \
--cc=kirill.shutemov@linux.intel.com \
--cc=ldufour@linux.vnet.ibm.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mawilcox@microsoft.com \
--cc=mgorman@techsingularity.net \
--cc=mhocko@kernel.org \
--cc=mingo@kernel.org \
--cc=peterz@infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).