linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Davidlohr Bueso <dbueso@suse.de>
To: akpm@linux-foundation.org, mingo@kernel.org
Cc: peterz@infradead.org, ldufour@linux.vnet.ibm.com, jack@suse.cz,
	mhocko@kernel.org, kirill.shutemov@linux.intel.com,
	mawilcox@microsoft.com, mgorman@techsingularity.net,
	dave@stgolabs.net, linux-mm@kvack.org,
	linux-kernel@vger.kernel.org, Davidlohr Bueso <dbueso@suse.de>
Subject: [PATCH 17/64] kernel: use mm locking wrappers
Date: Mon,  5 Feb 2018 02:27:07 +0100	[thread overview]
Message-ID: <20180205012754.23615-18-dbueso@wotan.suse.de> (raw)
In-Reply-To: <20180205012754.23615-1-dbueso@wotan.suse.de>

From: Davidlohr Bueso <dave@stgolabs.net>

Most of the users are already aware of mmrange, so conversion
is straightforward. For those who don't, they all use mmap_sem
in the same function context. No change in semantics.

The dup_mmap() needs two ranges, one for the new and old mms.

Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
---
 kernel/acct.c               |  5 +++--
 kernel/events/core.c        |  5 +++--
 kernel/events/uprobes.c     | 17 +++++++++--------
 kernel/fork.c               | 16 ++++++++++------
 kernel/futex.c              |  4 ++--
 kernel/sched/fair.c         |  5 +++--
 kernel/trace/trace_output.c |  5 +++--
 7 files changed, 33 insertions(+), 24 deletions(-)

diff --git a/kernel/acct.c b/kernel/acct.c
index addf7732fb56..bc8826f68002 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -538,14 +538,15 @@ void acct_collect(long exitcode, int group_dead)
 
 	if (group_dead && current->mm) {
 		struct vm_area_struct *vma;
+		DEFINE_RANGE_LOCK_FULL(mmrange);
 
-		down_read(&current->mm->mmap_sem);
+		mm_read_lock(current->mm, &mmrange);
 		vma = current->mm->mmap;
 		while (vma) {
 			vsize += vma->vm_end - vma->vm_start;
 			vma = vma->vm_next;
 		}
-		up_read(&current->mm->mmap_sem);
+		mm_read_unlock(current->mm, &mmrange);
 	}
 
 	spin_lock_irq(&current->sighand->siglock);
diff --git a/kernel/events/core.c b/kernel/events/core.c
index f0549e79978b..b21d0942d225 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -8264,6 +8264,7 @@ static void perf_event_addr_filters_apply(struct perf_event *event)
 	struct mm_struct *mm = NULL;
 	unsigned int count = 0;
 	unsigned long flags;
+	DEFINE_RANGE_LOCK_FULL(mmrange);
 
 	/*
 	 * We may observe TASK_TOMBSTONE, which means that the event tear-down
@@ -8279,7 +8280,7 @@ static void perf_event_addr_filters_apply(struct perf_event *event)
 	if (!mm)
 		goto restart;
 
-	down_read(&mm->mmap_sem);
+	mm_read_lock(mm, &mmrange);
 
 	raw_spin_lock_irqsave(&ifh->lock, flags);
 	list_for_each_entry(filter, &ifh->list, entry) {
@@ -8299,7 +8300,7 @@ static void perf_event_addr_filters_apply(struct perf_event *event)
 	event->addr_filters_gen++;
 	raw_spin_unlock_irqrestore(&ifh->lock, flags);
 
-	up_read(&mm->mmap_sem);
+	mm_read_unlock(mm, &mmrange);
 
 	mmput(mm);
 
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 60e12b39182c..df6da03d5dc1 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -818,7 +818,7 @@ register_for_each_vma(struct uprobe *uprobe, struct uprobe_consumer *new)
 		if (err && is_register)
 			goto free;
 
-		down_write(&mm->mmap_sem);
+		mm_write_lock(mm, &mmrange);
 		vma = find_vma(mm, info->vaddr);
 		if (!vma || !valid_vma(vma, is_register) ||
 		    file_inode(vma->vm_file) != uprobe->inode)
@@ -842,7 +842,7 @@ register_for_each_vma(struct uprobe *uprobe, struct uprobe_consumer *new)
 		}
 
  unlock:
-		up_write(&mm->mmap_sem);
+		mm_write_unlock(mm, &mmrange);
  free:
 		mmput(mm);
 		info = free_map_info(info);
@@ -984,7 +984,7 @@ static int unapply_uprobe(struct uprobe *uprobe, struct mm_struct *mm)
 	int err = 0;
 	DEFINE_RANGE_LOCK_FULL(mmrange);
 
-	down_read(&mm->mmap_sem);
+	mm_read_lock(mm, &mmrange);
 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
 		unsigned long vaddr;
 		loff_t offset;
@@ -1001,7 +1001,7 @@ static int unapply_uprobe(struct uprobe *uprobe, struct mm_struct *mm)
 		vaddr = offset_to_vaddr(vma, uprobe->offset);
 		err |= remove_breakpoint(uprobe, mm, vaddr, &mmrange);
 	}
-	up_read(&mm->mmap_sem);
+	mm_read_unlock(mm, &mmrange);
 
 	return err;
 }
@@ -1150,8 +1150,9 @@ static int xol_add_vma(struct mm_struct *mm, struct xol_area *area)
 {
 	struct vm_area_struct *vma;
 	int ret;
+	DEFINE_RANGE_LOCK_FULL(mmrange);
 
-	if (down_write_killable(&mm->mmap_sem))
+	if (mm_write_lock_killable(mm, &mmrange))
 		return -EINTR;
 
 	if (mm->uprobes_state.xol_area) {
@@ -1181,7 +1182,7 @@ static int xol_add_vma(struct mm_struct *mm, struct xol_area *area)
 	/* pairs with get_xol_area() */
 	smp_store_release(&mm->uprobes_state.xol_area, area); /* ^^^ */
  fail:
-	up_write(&mm->mmap_sem);
+	mm_write_unlock(mm, &mmrange);
 
 	return ret;
 }
@@ -1748,7 +1749,7 @@ static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp)
 	struct vm_area_struct *vma;
 	DEFINE_RANGE_LOCK_FULL(mmrange);
 
-	down_read(&mm->mmap_sem);
+	mm_read_lock(mm, &mmrange);
 	vma = find_vma(mm, bp_vaddr);
 	if (vma && vma->vm_start <= bp_vaddr) {
 		if (valid_vma(vma, false)) {
@@ -1766,7 +1767,7 @@ static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp)
 
 	if (!uprobe && test_and_clear_bit(MMF_RECALC_UPROBES, &mm->flags))
 		mmf_recalc_uprobes(mm);
-	up_read(&mm->mmap_sem);
+	mm_read_unlock(mm, &mmrange);
 
 	return uprobe;
 }
diff --git a/kernel/fork.c b/kernel/fork.c
index 2113e252cb9d..060554e33111 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -401,9 +401,11 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
 	int retval;
 	unsigned long charge;
 	LIST_HEAD(uf);
+	DEFINE_RANGE_LOCK_FULL(old_mmrange);
+	DEFINE_RANGE_LOCK_FULL(mmrange); /* for the new mm */
 
 	uprobe_start_dup_mmap();
-	if (down_write_killable(&oldmm->mmap_sem)) {
+	if (mm_write_lock_killable(oldmm, &old_mmrange)) {
 		retval = -EINTR;
 		goto fail_uprobe_end;
 	}
@@ -412,7 +414,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
 	/*
 	 * Not linked in yet - no deadlock potential:
 	 */
-	down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING);
+        mm_write_lock_nested(mm, &mmrange, SINGLE_DEPTH_NESTING);
 
 	/* No ordering required: file already has been exposed. */
 	RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm));
@@ -522,9 +524,9 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
 	arch_dup_mmap(oldmm, mm);
 	retval = 0;
 out:
-	up_write(&mm->mmap_sem);
+	mm_write_unlock(mm, &mmrange);
 	flush_tlb_mm(oldmm);
-	up_write(&oldmm->mmap_sem);
+	mm_write_unlock(oldmm, &old_mmrange);
 	dup_userfaultfd_complete(&uf);
 fail_uprobe_end:
 	uprobe_end_dup_mmap();
@@ -554,9 +556,11 @@ static inline void mm_free_pgd(struct mm_struct *mm)
 #else
 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
 {
-	down_write(&oldmm->mmap_sem);
+	DEFINE_RANGE_LOCK_FULL(mmrange);
+
+	mm_write_lock(oldmm, &mmrange);
 	RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm));
-	up_write(&oldmm->mmap_sem);
+	mm_write_unlock(oldmm, &mmrange);
 	return 0;
 }
 #define mm_alloc_pgd(mm)	(0)
diff --git a/kernel/futex.c b/kernel/futex.c
index 09a0d86f80a0..6764240e87bb 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -727,10 +727,10 @@ static int fault_in_user_writeable(u32 __user *uaddr)
 	int ret;
 	DEFINE_RANGE_LOCK_FULL(mmrange);
 
-	down_read(&mm->mmap_sem);
+	mm_read_lock(mm, &mmrange);
 	ret = fixup_user_fault(current, mm, (unsigned long)uaddr,
 			       FAULT_FLAG_WRITE, NULL, &mmrange);
-	up_read(&mm->mmap_sem);
+	mm_read_unlock(mm, &mmrange);
 
 	return ret < 0 ? ret : 0;
 }
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 7b6535987500..01f8c533aa21 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2470,6 +2470,7 @@ void task_numa_work(struct callback_head *work)
 	struct vm_area_struct *vma;
 	unsigned long start, end;
 	unsigned long nr_pte_updates = 0;
+	DEFINE_RANGE_LOCK_FULL(mmrange);
 	long pages, virtpages;
 
 	SCHED_WARN_ON(p != container_of(work, struct task_struct, numa_work));
@@ -2521,7 +2522,7 @@ void task_numa_work(struct callback_head *work)
 		return;
 
 
-	if (!down_read_trylock(&mm->mmap_sem))
+	if (!mm_read_trylock(mm, &mmrange))
 		return;
 	vma = find_vma(mm, start);
 	if (!vma) {
@@ -2589,7 +2590,7 @@ void task_numa_work(struct callback_head *work)
 		mm->numa_scan_offset = start;
 	else
 		reset_ptenuma_scan(p);
-	up_read(&mm->mmap_sem);
+	mm_read_unlock(mm, &mmrange);
 
 	/*
 	 * Make sure tasks use at least 32x as much time to run other code
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index 90db994ac900..0c3f5193de41 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -395,8 +395,9 @@ static int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
 
 	if (mm) {
 		const struct vm_area_struct *vma;
+		DEFINE_RANGE_LOCK_FULL(mmrange);
 
-		down_read(&mm->mmap_sem);
+		mm_read_lock(mm, &mmrange);
 		vma = find_vma(mm, ip);
 		if (vma) {
 			file = vma->vm_file;
@@ -408,7 +409,7 @@ static int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
 				trace_seq_printf(s, "[+0x%lx]",
 						 ip - vmstart);
 		}
-		up_read(&mm->mmap_sem);
+		mm_read_unlock(mm, &mmrange);
 	}
 	if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file))
 		trace_seq_printf(s, " <" IP_FMT ">", ip);
-- 
2.13.6

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  parent reply	other threads:[~2018-02-05  1:28 UTC|newest]

Thread overview: 69+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-02-05  1:26 [RFC PATCH 00/64] mm: towards parallel address space operations Davidlohr Bueso
2018-02-05  1:26 ` [PATCH 01/64] interval-tree: build unconditionally Davidlohr Bueso
2018-02-05  1:26 ` [PATCH 02/64] Introduce range reader/writer lock Davidlohr Bueso
2018-02-05  1:26 ` [PATCH 03/64] mm: introduce mm locking wrappers Davidlohr Bueso
2018-02-05  1:26 ` [PATCH 04/64] mm: add a range parameter to the vm_fault structure Davidlohr Bueso
2018-02-05  1:26 ` [PATCH 05/64] mm,khugepaged: prepare passing of rangelock field to vm_fault Davidlohr Bueso
2018-02-05  1:26 ` [PATCH 06/64] mm: teach pagefault paths about range locking Davidlohr Bueso
2018-02-05 16:09   ` Laurent Dufour
2018-02-06 18:32     ` Davidlohr Bueso
2018-02-05  1:26 ` [PATCH 07/64] mm/hugetlb: teach hugetlb_fault() " Davidlohr Bueso
2018-02-05  1:26 ` [PATCH 08/64] mm: teach lock_page_or_retry() " Davidlohr Bueso
2018-02-05  1:26 ` [PATCH 09/64] mm/mmu_notifier: teach oom reaper " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 10/64] kernel/exit: teach exit_mm() " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 11/64] prctl: teach " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 12/64] fs/userfaultfd: teach userfaultfd_must_wait() " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 13/64] fs/proc: teach " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 14/64] fs/coredump: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 15/64] ipc: use mm locking wrappers Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 16/64] virt: " Davidlohr Bueso
2018-02-05  1:27 ` Davidlohr Bueso [this message]
2018-02-05  1:27 ` [PATCH 18/64] mm/ksm: teach about range locking Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 19/64] mm/mlock: use mm locking wrappers Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 20/64] mm/madvise: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 21/64] mm: teach drop/take_all_locks() about range locking Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 22/64] mm: avoid mmap_sem trylock in vm_insert_page() Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 23/64] mm: huge pagecache: do not check mmap_sem state Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 24/64] mm/thp: disable mmap_sem is_locked checks Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 25/64] mm: use mm locking wrappers Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 26/64] fs: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 27/64] arch/{x86,sh,ppc}: teach bad_area() about range locking Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 28/64] arch/x86: use mm locking wrappers Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 29/64] arch/alpha: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 30/64] arch/tile: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 31/64] arch/sparc: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 32/64] arch/s390: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 33/64] arch/powerpc: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 34/64] arch/parisc: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 35/64] arch/ia64: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 36/64] arch/mips: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 37/64] arch/arc: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 38/64] arch/blackfin: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 39/64] arch/m68k: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 40/64] arch/sh: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 41/64] arch/cris: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 42/64] arch/frv: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 43/64] arch/hexagon: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 44/64] arch/score: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 45/64] arch/m32r: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 46/64] arch/metag: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 47/64] arch/microblaze: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 48/64] arch/tile: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 49/64] arch/xtensa: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 50/64] arch/unicore32: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 51/64] arch/mn10300: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 52/64] arch/openrisc: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 53/64] arch/nios2: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 54/64] arch/arm: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 55/64] arch/riscv: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 56/64] drivers/android: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 57/64] drivers/gpu: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 58/64] drivers/infiniband: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 59/64] drivers/iommu: use mm locking helpers Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 60/64] drivers/xen: use mm locking wrappers Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 61/64] staging/lustre: use generic range lock Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 62/64] drivers: use mm locking wrappers (the rest) Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 63/64] mm/mmap: hack drop down_write_nest_lock() Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 64/64] mm: convert mmap_sem to range mmap_lock Davidlohr Bueso
2018-02-05 16:53 ` [RFC PATCH 00/64] mm: towards parallel address space operations Laurent Dufour
2018-02-06 18:48   ` Davidlohr Bueso

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180205012754.23615-18-dbueso@wotan.suse.de \
    --to=dbueso@suse.de \
    --cc=akpm@linux-foundation.org \
    --cc=dave@stgolabs.net \
    --cc=jack@suse.cz \
    --cc=kirill.shutemov@linux.intel.com \
    --cc=ldufour@linux.vnet.ibm.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mawilcox@microsoft.com \
    --cc=mgorman@techsingularity.net \
    --cc=mhocko@kernel.org \
    --cc=mingo@kernel.org \
    --cc=peterz@infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).