linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Davidlohr Bueso <dbueso@suse.de>
To: akpm@linux-foundation.org, mingo@kernel.org
Cc: peterz@infradead.org, ldufour@linux.vnet.ibm.com, jack@suse.cz,
	mhocko@kernel.org, kirill.shutemov@linux.intel.com,
	mawilcox@microsoft.com, mgorman@techsingularity.net,
	dave@stgolabs.net, linux-mm@kvack.org,
	linux-kernel@vger.kernel.org, Davidlohr Bueso <dbueso@suse.de>
Subject: [PATCH 32/64] arch/s390: use mm locking wrappers
Date: Mon,  5 Feb 2018 02:27:22 +0100	[thread overview]
Message-ID: <20180205012754.23615-33-dbueso@wotan.suse.de> (raw)
In-Reply-To: <20180205012754.23615-1-dbueso@wotan.suse.de>

From: Davidlohr Bueso <dave@stgolabs.net>

This becomes quite straightforward with the mmrange in place.

Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
---
 arch/s390/kernel/vdso.c  |  5 +++--
 arch/s390/kvm/gaccess.c  |  4 ++--
 arch/s390/kvm/kvm-s390.c | 24 ++++++++++++++----------
 arch/s390/kvm/priv.c     | 29 +++++++++++++++++------------
 arch/s390/mm/fault.c     |  6 +++---
 arch/s390/mm/gmap.c      | 45 ++++++++++++++++++++++++---------------------
 arch/s390/pci/pci_mmio.c |  5 +++--
 7 files changed, 66 insertions(+), 52 deletions(-)

diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index f3a1c7c6824e..0395c6b906fd 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -213,6 +213,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
 	unsigned long vdso_pages;
 	unsigned long vdso_base;
 	int rc;
+	DEFINE_RANGE_LOCK_FULL(mmrange);
 
 	if (!vdso_enabled)
 		return 0;
@@ -239,7 +240,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
 	 * it at vdso_base which is the "natural" base for it, but we might
 	 * fail and end up putting it elsewhere.
 	 */
-	if (down_write_killable(&mm->mmap_sem))
+	if (mm_write_lock_killable(mm, &mmrange))
 		return -EINTR;
 	vdso_base = get_unmapped_area(NULL, 0, vdso_pages << PAGE_SHIFT, 0, 0);
 	if (IS_ERR_VALUE(vdso_base)) {
@@ -270,7 +271,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
 	rc = 0;
 
 out_up:
-	up_write(&mm->mmap_sem);
+	mm_write_unlock(mm, &mmrange);
 	return rc;
 }
 
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
index ff739b86df36..28c2c14319c8 100644
--- a/arch/s390/kvm/gaccess.c
+++ b/arch/s390/kvm/gaccess.c
@@ -1179,7 +1179,7 @@ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
 	int rc;
 	DEFINE_RANGE_LOCK_FULL(mmrange);
 
-	down_read(&sg->mm->mmap_sem);
+	mm_read_lock(sg->mm, &mmrange);
 	/*
 	 * We don't want any guest-2 tables to change - so the parent
 	 * tables/pointers we read stay valid - unshadowing is however
@@ -1209,6 +1209,6 @@ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
 	if (!rc)
 		rc = gmap_shadow_page(sg, saddr, __pte(pte.val), &mmrange);
 	ipte_unlock(vcpu);
-	up_read(&sg->mm->mmap_sem);
+	mm_read_unlock(sg->mm, &mmrange);
 	return rc;
 }
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index ba4c7092335a..942aeb6cbf1c 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -1420,6 +1420,7 @@ static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
 {
 	uint8_t *keys;
 	uint64_t hva;
+	DEFINE_RANGE_LOCK_FULL(mmrange);
 	int srcu_idx, i, r = 0;
 
 	if (args->flags != 0)
@@ -1437,7 +1438,7 @@ static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
 	if (!keys)
 		return -ENOMEM;
 
-	down_read(&current->mm->mmap_sem);
+	mm_read_lock(current->mm, &mmrange);
 	srcu_idx = srcu_read_lock(&kvm->srcu);
 	for (i = 0; i < args->count; i++) {
 		hva = gfn_to_hva(kvm, args->start_gfn + i);
@@ -1451,7 +1452,7 @@ static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
 			break;
 	}
 	srcu_read_unlock(&kvm->srcu, srcu_idx);
-	up_read(&current->mm->mmap_sem);
+	mm_read_unlock(current->mm, &mmrange);
 
 	if (!r) {
 		r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
@@ -1468,6 +1469,7 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
 {
 	uint8_t *keys;
 	uint64_t hva;
+	DEFINE_RANGE_LOCK_FULL(mmrange);
 	int srcu_idx, i, r = 0;
 
 	if (args->flags != 0)
@@ -1493,7 +1495,7 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
 	if (r)
 		goto out;
 
-	down_read(&current->mm->mmap_sem);
+	mm_read_lock(current->mm, &mmrange);
 	srcu_idx = srcu_read_lock(&kvm->srcu);
 	for (i = 0; i < args->count; i++) {
 		hva = gfn_to_hva(kvm, args->start_gfn + i);
@@ -1513,7 +1515,7 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
 			break;
 	}
 	srcu_read_unlock(&kvm->srcu, srcu_idx);
-	up_read(&current->mm->mmap_sem);
+	mm_read_unlock(current->mm, &mmrange);
 out:
 	kvfree(keys);
 	return r;
@@ -1543,6 +1545,7 @@ static int kvm_s390_get_cmma_bits(struct kvm *kvm,
 	unsigned long bufsize, hva, pgstev, i, next, cur;
 	int srcu_idx, peek, r = 0, rr;
 	u8 *res;
+	DEFINE_RANGE_LOCK_FULL(mmrange);
 
 	cur = args->start_gfn;
 	i = next = pgstev = 0;
@@ -1586,7 +1589,7 @@ static int kvm_s390_get_cmma_bits(struct kvm *kvm,
 
 	args->start_gfn = cur;
 
-	down_read(&kvm->mm->mmap_sem);
+	mm_read_lock(kvm->mm, &mmrange);
 	srcu_idx = srcu_read_lock(&kvm->srcu);
 	while (i < bufsize) {
 		hva = gfn_to_hva(kvm, cur);
@@ -1620,7 +1623,7 @@ static int kvm_s390_get_cmma_bits(struct kvm *kvm,
 		cur++;
 	}
 	srcu_read_unlock(&kvm->srcu, srcu_idx);
-	up_read(&kvm->mm->mmap_sem);
+	mm_read_unlock(kvm->mm, &mmrange);
 	args->count = i;
 	args->remaining = s ? atomic64_read(&s->dirty_pages) : 0;
 
@@ -1643,6 +1646,7 @@ static int kvm_s390_set_cmma_bits(struct kvm *kvm,
 	unsigned long hva, mask, pgstev, i;
 	uint8_t *bits;
 	int srcu_idx, r = 0;
+	DEFINE_RANGE_LOCK_FULL(mmrange);
 
 	mask = args->mask;
 
@@ -1668,7 +1672,7 @@ static int kvm_s390_set_cmma_bits(struct kvm *kvm,
 		goto out;
 	}
 
-	down_read(&kvm->mm->mmap_sem);
+	mm_read_lock(kvm->mm, &mmrange);
 	srcu_idx = srcu_read_lock(&kvm->srcu);
 	for (i = 0; i < args->count; i++) {
 		hva = gfn_to_hva(kvm, args->start_gfn + i);
@@ -1683,12 +1687,12 @@ static int kvm_s390_set_cmma_bits(struct kvm *kvm,
 		set_pgste_bits(kvm->mm, hva, mask, pgstev);
 	}
 	srcu_read_unlock(&kvm->srcu, srcu_idx);
-	up_read(&kvm->mm->mmap_sem);
+	mm_read_unlock(kvm->mm, &mmrange);
 
 	if (!kvm->mm->context.use_cmma) {
-		down_write(&kvm->mm->mmap_sem);
+		mm_write_lock(kvm->mm, &mmrange);
 		kvm->mm->context.use_cmma = 1;
-		up_write(&kvm->mm->mmap_sem);
+		mm_write_unlock(kvm->mm, &mmrange);
 	}
 out:
 	vfree(bits);
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index c4c4e157c036..7bb37eca557e 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -246,6 +246,7 @@ static int handle_iske(struct kvm_vcpu *vcpu)
 	unsigned char key;
 	int reg1, reg2;
 	int rc;
+	DEFINE_RANGE_LOCK_FULL(mmrange);
 
 	vcpu->stat.instruction_iske++;
 
@@ -265,9 +266,9 @@ static int handle_iske(struct kvm_vcpu *vcpu)
 	if (kvm_is_error_hva(addr))
 		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
 
-	down_read(&current->mm->mmap_sem);
+	mm_read_lock(current->mm, &mmrange);
 	rc = get_guest_storage_key(current->mm, addr, &key);
-	up_read(&current->mm->mmap_sem);
+	mm_read_unlock(current->mm, &mmrange);
 	if (rc)
 		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
 	vcpu->run->s.regs.gprs[reg1] &= ~0xff;
@@ -280,6 +281,7 @@ static int handle_rrbe(struct kvm_vcpu *vcpu)
 	unsigned long addr;
 	int reg1, reg2;
 	int rc;
+	DEFINE_RANGE_LOCK_FULL(mmrange);
 
 	vcpu->stat.instruction_rrbe++;
 
@@ -299,9 +301,9 @@ static int handle_rrbe(struct kvm_vcpu *vcpu)
 	if (kvm_is_error_hva(addr))
 		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
 
-	down_read(&current->mm->mmap_sem);
+	mm_read_lock(current->mm, &mmrange);
 	rc = reset_guest_reference_bit(current->mm, addr);
-	up_read(&current->mm->mmap_sem);
+	mm_read_unlock(current->mm, &mmrange);
 	if (rc < 0)
 		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
 
@@ -351,16 +353,17 @@ static int handle_sske(struct kvm_vcpu *vcpu)
 	}
 
 	while (start != end) {
+		DEFINE_RANGE_LOCK_FULL(mmrange);
 		unsigned long addr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start));
 
 		if (kvm_is_error_hva(addr))
 			return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
 
-		down_read(&current->mm->mmap_sem);
+		mm_read_lock(current->mm, &mmrange);
 		rc = cond_set_guest_storage_key(current->mm, addr, key, &oldkey,
 						m3 & SSKE_NQ, m3 & SSKE_MR,
 						m3 & SSKE_MC);
-		up_read(&current->mm->mmap_sem);
+		mm_read_unlock(current->mm, &mmrange);
 		if (rc < 0)
 			return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
 		start += PAGE_SIZE;
@@ -953,13 +956,14 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
 
 		if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) {
 			int rc = kvm_s390_skey_check_enable(vcpu);
+			DEFINE_RANGE_LOCK_FULL(mmrange);
 
 			if (rc)
 				return rc;
-			down_read(&current->mm->mmap_sem);
+			mm_read_lock(current->mm, &mmrange);
 			rc = cond_set_guest_storage_key(current->mm, useraddr,
 							key, NULL, nq, mr, mc);
-			up_read(&current->mm->mmap_sem);
+			mm_read_unlock(current->mm, &mmrange);
 			if (rc < 0)
 				return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
 		}
@@ -1046,6 +1050,7 @@ static int handle_essa(struct kvm_vcpu *vcpu)
 	unsigned long *cbrlo;
 	struct gmap *gmap;
 	int i, orc;
+	DEFINE_RANGE_LOCK_FULL(mmrange);
 
 	VCPU_EVENT(vcpu, 4, "ESSA: release %d pages", entries);
 	gmap = vcpu->arch.gmap;
@@ -1073,9 +1078,9 @@ static int handle_essa(struct kvm_vcpu *vcpu)
 		 * already correct, we do nothing and avoid the lock.
 		 */
 		if (vcpu->kvm->mm->context.use_cmma == 0) {
-			down_write(&vcpu->kvm->mm->mmap_sem);
+			mm_write_lock(vcpu->kvm->mm, &mmrange);
 			vcpu->kvm->mm->context.use_cmma = 1;
-			up_write(&vcpu->kvm->mm->mmap_sem);
+			mm_write_unlock(vcpu->kvm->mm, &mmrange);
 		}
 		/*
 		 * If we are here, we are supposed to have CMMA enabled in
@@ -1098,10 +1103,10 @@ static int handle_essa(struct kvm_vcpu *vcpu)
 	}
 	vcpu->arch.sie_block->cbrlo &= PAGE_MASK;	/* reset nceo */
 	cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo);
-	down_read(&gmap->mm->mmap_sem);
+	mm_read_lock(gmap->mm, &mmrange);
 	for (i = 0; i < entries; ++i)
 		__gmap_zap(gmap, cbrlo[i]);
-	up_read(&gmap->mm->mmap_sem);
+	mm_read_unlock(gmap->mm, &mmrange);
 	return 0;
 }
 
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 17ba3c402f9d..0d6b63fa629e 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -463,7 +463,7 @@ static inline int do_exception(struct pt_regs *regs, int access)
 		flags |= FAULT_FLAG_USER;
 	if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
 		flags |= FAULT_FLAG_WRITE;
-	down_read(&mm->mmap_sem);
+	mm_read_lock(mm, &mmrange);
 
 	gmap = NULL;
 	if (IS_ENABLED(CONFIG_PGSTE) && type == GMAP_FAULT) {
@@ -546,7 +546,7 @@ static inline int do_exception(struct pt_regs *regs, int access)
 			flags &= ~(FAULT_FLAG_ALLOW_RETRY |
 				   FAULT_FLAG_RETRY_NOWAIT);
 			flags |= FAULT_FLAG_TRIED;
-			down_read(&mm->mmap_sem);
+			mm_read_lock(mm, &mmrange);
 			goto retry;
 		}
 	}
@@ -564,7 +564,7 @@ static inline int do_exception(struct pt_regs *regs, int access)
 	}
 	fault = 0;
 out_up:
-	up_read(&mm->mmap_sem);
+	mm_read_unlock(mm, &mmrange);
 out:
 	return fault;
 }
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index b12a44813022..9419ae7b7f56 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -395,6 +395,7 @@ int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
 {
 	unsigned long off;
 	int flush;
+	DEFINE_RANGE_LOCK_FULL(mmrange);
 
 	BUG_ON(gmap_is_shadow(gmap));
 	if ((to | len) & (PMD_SIZE - 1))
@@ -403,10 +404,10 @@ int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
 		return -EINVAL;
 
 	flush = 0;
-	down_write(&gmap->mm->mmap_sem);
+	mm_write_lock(gmap->mm, &mmrange);
 	for (off = 0; off < len; off += PMD_SIZE)
 		flush |= __gmap_unmap_by_gaddr(gmap, to + off);
-	up_write(&gmap->mm->mmap_sem);
+	mm_write_unlock(gmap->mm, &mmrange);
 	if (flush)
 		gmap_flush_tlb(gmap);
 	return 0;
@@ -427,6 +428,7 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from,
 {
 	unsigned long off;
 	int flush;
+	DEFINE_RANGE_LOCK_FULL(mmrange);
 
 	BUG_ON(gmap_is_shadow(gmap));
 	if ((from | to | len) & (PMD_SIZE - 1))
@@ -436,7 +438,7 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from,
 		return -EINVAL;
 
 	flush = 0;
-	down_write(&gmap->mm->mmap_sem);
+	mm_write_lock(gmap->mm, &mmrange);
 	for (off = 0; off < len; off += PMD_SIZE) {
 		/* Remove old translation */
 		flush |= __gmap_unmap_by_gaddr(gmap, to + off);
@@ -446,7 +448,7 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from,
 				      (void *) from + off))
 			break;
 	}
-	up_write(&gmap->mm->mmap_sem);
+	mm_write_unlock(gmap->mm, &mmrange);
 	if (flush)
 		gmap_flush_tlb(gmap);
 	if (off >= len)
@@ -492,10 +494,11 @@ EXPORT_SYMBOL_GPL(__gmap_translate);
 unsigned long gmap_translate(struct gmap *gmap, unsigned long gaddr)
 {
 	unsigned long rc;
+	DEFINE_RANGE_LOCK_FULL(mmrange);
 
-	down_read(&gmap->mm->mmap_sem);
+	mm_read_lock(gmap->mm, &mmrange);
 	rc = __gmap_translate(gmap, gaddr);
-	up_read(&gmap->mm->mmap_sem);
+	mm_read_unlock(gmap->mm, &mmrange);
 	return rc;
 }
 EXPORT_SYMBOL_GPL(gmap_translate);
@@ -623,8 +626,7 @@ int gmap_fault(struct gmap *gmap, unsigned long gaddr,
 	bool unlocked;
 	DEFINE_RANGE_LOCK_FULL(mmrange);
 
-	down_read(&gmap->mm->mmap_sem);
-
+	mm_read_lock(gmap->mm, &mmrange);
 retry:
 	unlocked = false;
 	vmaddr = __gmap_translate(gmap, gaddr);
@@ -646,7 +648,7 @@ int gmap_fault(struct gmap *gmap, unsigned long gaddr,
 
 	rc = __gmap_link(gmap, gaddr, vmaddr);
 out_up:
-	up_read(&gmap->mm->mmap_sem);
+	mm_read_unlock(gmap->mm, &mmrange);
 	return rc;
 }
 EXPORT_SYMBOL_GPL(gmap_fault);
@@ -678,8 +680,9 @@ void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
 {
 	unsigned long gaddr, vmaddr, size;
 	struct vm_area_struct *vma;
+	DEFINE_RANGE_LOCK_FULL(mmrange);
 
-	down_read(&gmap->mm->mmap_sem);
+	mm_read_lock(gmap->mm, &mmrange);
 	for (gaddr = from; gaddr < to;
 	     gaddr = (gaddr + PMD_SIZE) & PMD_MASK) {
 		/* Find the vm address for the guest address */
@@ -694,7 +697,7 @@ void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
 		size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK));
 		zap_page_range(vma, vmaddr, size);
 	}
-	up_read(&gmap->mm->mmap_sem);
+	mm_read_unlock(gmap->mm, &mmrange);
 }
 EXPORT_SYMBOL_GPL(gmap_discard);
 
@@ -942,9 +945,9 @@ int gmap_mprotect_notify(struct gmap *gmap, unsigned long gaddr,
 		return -EINVAL;
 	if (!MACHINE_HAS_ESOP && prot == PROT_READ)
 		return -EINVAL;
-	down_read(&gmap->mm->mmap_sem);
+	mm_read_lock(gmap->mm, &mmrange);
 	rc = gmap_protect_range(gmap, gaddr, len, prot, PGSTE_IN_BIT, &mmrange);
-	up_read(&gmap->mm->mmap_sem);
+	mm_read_unlock(gmap->mm, &mmrange);
 	return rc;
 }
 EXPORT_SYMBOL_GPL(gmap_mprotect_notify);
@@ -1536,11 +1539,11 @@ struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce,
 	}
 	spin_unlock(&parent->shadow_lock);
 	/* protect after insertion, so it will get properly invalidated */
-	down_read(&parent->mm->mmap_sem);
+	mm_read_lock(parent->mm, &mmrange);
 	rc = gmap_protect_range(parent, asce & _ASCE_ORIGIN,
 				((asce & _ASCE_TABLE_LENGTH) + 1) * PAGE_SIZE,
 				PROT_READ, PGSTE_VSIE_BIT, &mmrange);
-	up_read(&parent->mm->mmap_sem);
+	mm_read_unlock(parent->mm, &mmrange);
 	spin_lock(&parent->shadow_lock);
 	new->initialized = true;
 	if (rc) {
@@ -2176,12 +2179,12 @@ int s390_enable_sie(void)
 	/* Fail if the page tables are 2K */
 	if (!mm_alloc_pgste(mm))
 		return -EINVAL;
-	down_write(&mm->mmap_sem);
+	mm_write_lock(mm, &mmrange);
 	mm->context.has_pgste = 1;
 	/* split thp mappings and disable thp for future mappings */
 	thp_split_mm(mm);
 	zap_zero_pages(mm, &mmrange);
-	up_write(&mm->mmap_sem);
+	mm_write_unlock(mm, &mmrange);
 	return 0;
 }
 EXPORT_SYMBOL_GPL(s390_enable_sie);
@@ -2206,7 +2209,7 @@ int s390_enable_skey(void)
 	int rc = 0;
 	DEFINE_RANGE_LOCK_FULL(mmrange);
 
-	down_write(&mm->mmap_sem);
+	mm_write_lock(mm, &mmrange);
 	if (mm_use_skey(mm))
 		goto out_up;
 
@@ -2225,7 +2228,7 @@ int s390_enable_skey(void)
 	walk_page_range(0, TASK_SIZE, &walk, &mmrange);
 
 out_up:
-	up_write(&mm->mmap_sem);
+	mm_write_unlock(mm, &mmrange);
 	return rc;
 }
 EXPORT_SYMBOL_GPL(s390_enable_skey);
@@ -2245,9 +2248,9 @@ void s390_reset_cmma(struct mm_struct *mm)
 	struct mm_walk walk = { .pte_entry = __s390_reset_cmma };
 	DEFINE_RANGE_LOCK_FULL(mmrange);
 
-	down_write(&mm->mmap_sem);
+	mm_write_lock(mm, &mmrange);
 	walk.mm = mm;
 	walk_page_range(0, TASK_SIZE, &walk, &mmrange);
-	up_write(&mm->mmap_sem);
+	mm_write_unlock(mm, &mmrange);
 }
 EXPORT_SYMBOL_GPL(s390_reset_cmma);
diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c
index 7d42a8794f10..bea541d5e181 100644
--- a/arch/s390/pci/pci_mmio.c
+++ b/arch/s390/pci/pci_mmio.c
@@ -17,8 +17,9 @@ static long get_pfn(unsigned long user_addr, unsigned long access,
 {
 	struct vm_area_struct *vma;
 	long ret;
+	DEFINE_RANGE_LOCK_FULL(mmrange);
 
-	down_read(&current->mm->mmap_sem);
+	mm_read_lock(current->mm, &mmrange);
 	ret = -EINVAL;
 	vma = find_vma(current->mm, user_addr);
 	if (!vma)
@@ -28,7 +29,7 @@ static long get_pfn(unsigned long user_addr, unsigned long access,
 		goto out;
 	ret = follow_pfn(vma, user_addr, pfn);
 out:
-	up_read(&current->mm->mmap_sem);
+	mm_read_unlock(current->mm, &mmrange);
 	return ret;
 }
 
-- 
2.13.6

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  parent reply	other threads:[~2018-02-05  1:28 UTC|newest]

Thread overview: 69+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-02-05  1:26 [RFC PATCH 00/64] mm: towards parallel address space operations Davidlohr Bueso
2018-02-05  1:26 ` [PATCH 01/64] interval-tree: build unconditionally Davidlohr Bueso
2018-02-05  1:26 ` [PATCH 02/64] Introduce range reader/writer lock Davidlohr Bueso
2018-02-05  1:26 ` [PATCH 03/64] mm: introduce mm locking wrappers Davidlohr Bueso
2018-02-05  1:26 ` [PATCH 04/64] mm: add a range parameter to the vm_fault structure Davidlohr Bueso
2018-02-05  1:26 ` [PATCH 05/64] mm,khugepaged: prepare passing of rangelock field to vm_fault Davidlohr Bueso
2018-02-05  1:26 ` [PATCH 06/64] mm: teach pagefault paths about range locking Davidlohr Bueso
2018-02-05 16:09   ` Laurent Dufour
2018-02-06 18:32     ` Davidlohr Bueso
2018-02-05  1:26 ` [PATCH 07/64] mm/hugetlb: teach hugetlb_fault() " Davidlohr Bueso
2018-02-05  1:26 ` [PATCH 08/64] mm: teach lock_page_or_retry() " Davidlohr Bueso
2018-02-05  1:26 ` [PATCH 09/64] mm/mmu_notifier: teach oom reaper " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 10/64] kernel/exit: teach exit_mm() " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 11/64] prctl: teach " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 12/64] fs/userfaultfd: teach userfaultfd_must_wait() " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 13/64] fs/proc: teach " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 14/64] fs/coredump: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 15/64] ipc: use mm locking wrappers Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 16/64] virt: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 17/64] kernel: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 18/64] mm/ksm: teach about range locking Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 19/64] mm/mlock: use mm locking wrappers Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 20/64] mm/madvise: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 21/64] mm: teach drop/take_all_locks() about range locking Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 22/64] mm: avoid mmap_sem trylock in vm_insert_page() Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 23/64] mm: huge pagecache: do not check mmap_sem state Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 24/64] mm/thp: disable mmap_sem is_locked checks Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 25/64] mm: use mm locking wrappers Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 26/64] fs: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 27/64] arch/{x86,sh,ppc}: teach bad_area() about range locking Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 28/64] arch/x86: use mm locking wrappers Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 29/64] arch/alpha: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 30/64] arch/tile: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 31/64] arch/sparc: " Davidlohr Bueso
2018-02-05  1:27 ` Davidlohr Bueso [this message]
2018-02-05  1:27 ` [PATCH 33/64] arch/powerpc: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 34/64] arch/parisc: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 35/64] arch/ia64: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 36/64] arch/mips: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 37/64] arch/arc: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 38/64] arch/blackfin: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 39/64] arch/m68k: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 40/64] arch/sh: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 41/64] arch/cris: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 42/64] arch/frv: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 43/64] arch/hexagon: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 44/64] arch/score: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 45/64] arch/m32r: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 46/64] arch/metag: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 47/64] arch/microblaze: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 48/64] arch/tile: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 49/64] arch/xtensa: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 50/64] arch/unicore32: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 51/64] arch/mn10300: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 52/64] arch/openrisc: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 53/64] arch/nios2: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 54/64] arch/arm: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 55/64] arch/riscv: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 56/64] drivers/android: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 57/64] drivers/gpu: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 58/64] drivers/infiniband: " Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 59/64] drivers/iommu: use mm locking helpers Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 60/64] drivers/xen: use mm locking wrappers Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 61/64] staging/lustre: use generic range lock Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 62/64] drivers: use mm locking wrappers (the rest) Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 63/64] mm/mmap: hack drop down_write_nest_lock() Davidlohr Bueso
2018-02-05  1:27 ` [PATCH 64/64] mm: convert mmap_sem to range mmap_lock Davidlohr Bueso
2018-02-05 16:53 ` [RFC PATCH 00/64] mm: towards parallel address space operations Laurent Dufour
2018-02-06 18:48   ` Davidlohr Bueso

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180205012754.23615-33-dbueso@wotan.suse.de \
    --to=dbueso@suse.de \
    --cc=akpm@linux-foundation.org \
    --cc=dave@stgolabs.net \
    --cc=jack@suse.cz \
    --cc=kirill.shutemov@linux.intel.com \
    --cc=ldufour@linux.vnet.ibm.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mawilcox@microsoft.com \
    --cc=mgorman@techsingularity.net \
    --cc=mhocko@kernel.org \
    --cc=mingo@kernel.org \
    --cc=peterz@infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).