linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Laurent Dufour <ldufour@linux.ibm.com>
To: akpm@linux-foundation.org, mhocko@kernel.org,
	peterz@infradead.org, kirill@shutemov.name, ak@linux.intel.com,
	dave@stgolabs.net, jack@suse.cz,
	Matthew Wilcox <willy@infradead.org>,
	aneesh.kumar@linux.ibm.com, benh@kernel.crashing.org,
	mpe@ellerman.id.au, paulus@samba.org,
	Thomas Gleixner <tglx@linutronix.de>,
	Ingo Molnar <mingo@redhat.com>,
	hpa@zytor.com, Will Deacon <will.deacon@arm.com>,
	Sergey Senozhatsky <sergey.senozhatsky@gmail.com>,
	sergey.senozhatsky.work@gmail.com,
	Andrea Arcangeli <aarcange@redhat.com>,
	Alexei Starovoitov <alexei.starovoitov@gmail.com>,
	kemi.wang@intel.com, Daniel Jordan <daniel.m.jordan@oracle.com>,
	David Rientjes <rientjes@google.com>,
	Jerome Glisse <jglisse@redhat.com>,
	Ganesh Mahendran <opensource.ganesh@gmail.com>,
	Minchan Kim <minchan@kernel.org>,
	Punit Agrawal <punitagrawal@gmail.com>,
	vinayak menon <vinayakm.list@gmail.com>,
	Yang Shi <yang.shi@linux.alibaba.com>,
	zhong jiang <zhongjiang@huawei.com>,
	Haiyan Song <haiyanx.song@intel.com>,
	Balbir Singh <bsingharora@gmail.com>,
	sj38.park@gmail.com, Michel Lespinasse <walken@google.com>,
	Mike Rapoport <rppt@linux.ibm.com>
Cc: linux-kernel@vger.kernel.org, linux-mm@kvack.org,
	haren@linux.vnet.ibm.com, npiggin@gmail.com,
	paulmck@linux.vnet.ibm.com, Tim Chen <tim.c.chen@linux.intel.com>,
	linuxppc-dev@lists.ozlabs.org, x86@kernel.org
Subject: [PATCH v12 05/31] mm: prepare for FAULT_FLAG_SPECULATIVE
Date: Tue, 16 Apr 2019 15:44:56 +0200	[thread overview]
Message-ID: <20190416134522.17540-6-ldufour@linux.ibm.com> (raw)
In-Reply-To: <20190416134522.17540-1-ldufour@linux.ibm.com>

From: Peter Zijlstra <peterz@infradead.org>

When speculating faults (without holding mmap_sem) we need to validate
that the vma against which we loaded pages is still valid when we're
ready to install the new PTE.

Therefore, replace the pte_offset_map_lock() calls that (re)take the
PTL with pte_map_lock() which can fail in case we find the VMA changed
since we started the fault.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>

[Port to 4.12 kernel]
[Remove the comment about the fault_env structure which has been
 implemented as the vm_fault structure in the kernel]
[move pte_map_lock()'s definition upper in the file]
[move the define of FAULT_FLAG_SPECULATIVE later in the series]
[review error path in do_swap_page(), do_anonymous_page() and
 wp_page_copy()]
Signed-off-by: Laurent Dufour <ldufour@linux.ibm.com>
---
 mm/memory.c | 87 +++++++++++++++++++++++++++++++++++------------------
 1 file changed, 58 insertions(+), 29 deletions(-)

diff --git a/mm/memory.c b/mm/memory.c
index c6ddadd9d2b7..fc3698d13cb5 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2073,6 +2073,13 @@ int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
 }
 EXPORT_SYMBOL_GPL(apply_to_page_range);
 
+static inline bool pte_map_lock(struct vm_fault *vmf)
+{
+	vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd,
+				       vmf->address, &vmf->ptl);
+	return true;
+}
+
 /*
  * handle_pte_fault chooses page fault handler according to an entry which was
  * read non-atomically.  Before making any commitment, on those architectures
@@ -2261,25 +2268,26 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
 	int page_copied = 0;
 	struct mem_cgroup *memcg;
 	struct mmu_notifier_range range;
+	int ret = VM_FAULT_OOM;
 
 	if (unlikely(anon_vma_prepare(vma)))
-		goto oom;
+		goto out;
 
 	if (is_zero_pfn(pte_pfn(vmf->orig_pte))) {
 		new_page = alloc_zeroed_user_highpage_movable(vma,
 							      vmf->address);
 		if (!new_page)
-			goto oom;
+			goto out;
 	} else {
 		new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
 				vmf->address);
 		if (!new_page)
-			goto oom;
+			goto out;
 		cow_user_page(new_page, old_page, vmf->address, vma);
 	}
 
 	if (mem_cgroup_try_charge_delay(new_page, mm, GFP_KERNEL, &memcg, false))
-		goto oom_free_new;
+		goto out_free_new;
 
 	__SetPageUptodate(new_page);
 
@@ -2291,7 +2299,10 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
 	/*
 	 * Re-check the pte - we dropped the lock
 	 */
-	vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl);
+	if (!pte_map_lock(vmf)) {
+		ret = VM_FAULT_RETRY;
+		goto out_uncharge;
+	}
 	if (likely(pte_same(*vmf->pte, vmf->orig_pte))) {
 		if (old_page) {
 			if (!PageAnon(old_page)) {
@@ -2378,12 +2389,14 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
 		put_page(old_page);
 	}
 	return page_copied ? VM_FAULT_WRITE : 0;
-oom_free_new:
+out_uncharge:
+	mem_cgroup_cancel_charge(new_page, memcg, false);
+out_free_new:
 	put_page(new_page);
-oom:
+out:
 	if (old_page)
 		put_page(old_page);
-	return VM_FAULT_OOM;
+	return ret;
 }
 
 /**
@@ -2405,8 +2418,8 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
 vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf)
 {
 	WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED));
-	vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address,
-				       &vmf->ptl);
+	if (!pte_map_lock(vmf))
+		return VM_FAULT_RETRY;
 	/*
 	 * We might have raced with another page fault while we released the
 	 * pte_offset_map_lock.
@@ -2527,8 +2540,11 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf)
 			get_page(vmf->page);
 			pte_unmap_unlock(vmf->pte, vmf->ptl);
 			lock_page(vmf->page);
-			vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
-					vmf->address, &vmf->ptl);
+			if (!pte_map_lock(vmf)) {
+				unlock_page(vmf->page);
+				put_page(vmf->page);
+				return VM_FAULT_RETRY;
+			}
 			if (!pte_same(*vmf->pte, vmf->orig_pte)) {
 				unlock_page(vmf->page);
 				pte_unmap_unlock(vmf->pte, vmf->ptl);
@@ -2744,11 +2760,15 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
 
 		if (!page) {
 			/*
-			 * Back out if somebody else faulted in this pte
-			 * while we released the pte lock.
+			 * Back out if the VMA has changed in our back during
+			 * a speculative page fault or if somebody else
+			 * faulted in this pte while we released the pte lock.
 			 */
-			vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
-					vmf->address, &vmf->ptl);
+			if (!pte_map_lock(vmf)) {
+				delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
+				ret = VM_FAULT_RETRY;
+				goto out;
+			}
 			if (likely(pte_same(*vmf->pte, vmf->orig_pte)))
 				ret = VM_FAULT_OOM;
 			delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
@@ -2801,10 +2821,13 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
 	}
 
 	/*
-	 * Back out if somebody else already faulted in this pte.
+	 * Back out if the VMA has changed in our back during a speculative
+	 * page fault or if somebody else already faulted in this pte.
 	 */
-	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
-			&vmf->ptl);
+	if (!pte_map_lock(vmf)) {
+		ret = VM_FAULT_RETRY;
+		goto out_cancel_cgroup;
+	}
 	if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte)))
 		goto out_nomap;
 
@@ -2882,8 +2905,9 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
 out:
 	return ret;
 out_nomap:
-	mem_cgroup_cancel_charge(page, memcg, false);
 	pte_unmap_unlock(vmf->pte, vmf->ptl);
+out_cancel_cgroup:
+	mem_cgroup_cancel_charge(page, memcg, false);
 out_page:
 	unlock_page(page);
 out_release:
@@ -2934,8 +2958,8 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
 			!mm_forbids_zeropage(vma->vm_mm)) {
 		entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address),
 						vma->vm_page_prot));
-		vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
-				vmf->address, &vmf->ptl);
+		if (!pte_map_lock(vmf))
+			return VM_FAULT_RETRY;
 		if (!pte_none(*vmf->pte))
 			goto unlock;
 		ret = check_stable_address_space(vma->vm_mm);
@@ -2971,14 +2995,16 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
 	if (vma->vm_flags & VM_WRITE)
 		entry = pte_mkwrite(pte_mkdirty(entry));
 
-	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
-			&vmf->ptl);
-	if (!pte_none(*vmf->pte))
+	if (!pte_map_lock(vmf)) {
+		ret = VM_FAULT_RETRY;
 		goto release;
+	}
+	if (!pte_none(*vmf->pte))
+		goto unlock_and_release;
 
 	ret = check_stable_address_space(vma->vm_mm);
 	if (ret)
-		goto release;
+		goto unlock_and_release;
 
 	/* Deliver the page fault to userland, check inside PT lock */
 	if (userfaultfd_missing(vma)) {
@@ -3000,10 +3026,12 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
 unlock:
 	pte_unmap_unlock(vmf->pte, vmf->ptl);
 	return ret;
+unlock_and_release:
+	pte_unmap_unlock(vmf->pte, vmf->ptl);
 release:
 	mem_cgroup_cancel_charge(page, memcg, false);
 	put_page(page);
-	goto unlock;
+	return ret;
 oom_free_page:
 	put_page(page);
 oom:
@@ -3118,8 +3146,9 @@ static vm_fault_t pte_alloc_one_map(struct vm_fault *vmf)
 	 * pte_none() under vmf->ptl protection when we return to
 	 * alloc_set_pte().
 	 */
-	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
-			&vmf->ptl);
+	if (!pte_map_lock(vmf))
+		return VM_FAULT_RETRY;
+
 	return 0;
 }
 
-- 
2.21.0


  parent reply	other threads:[~2019-04-16 13:46 UTC|newest]

Thread overview: 98+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-04-16 13:44 [PATCH v12 00/31] Speculative page faults Laurent Dufour
2019-04-16 13:44 ` [PATCH v12 01/31] mm: introduce CONFIG_SPECULATIVE_PAGE_FAULT Laurent Dufour
2019-04-18 21:47   ` Jerome Glisse
2019-04-23 15:21     ` Laurent Dufour
2019-04-16 13:44 ` [PATCH v12 02/31] x86/mm: define ARCH_SUPPORTS_SPECULATIVE_PAGE_FAULT Laurent Dufour
2019-04-18 21:48   ` Jerome Glisse
2019-04-16 13:44 ` [PATCH v12 03/31] powerpc/mm: set ARCH_SUPPORTS_SPECULATIVE_PAGE_FAULT Laurent Dufour
2019-04-18 21:49   ` Jerome Glisse
2019-04-16 13:44 ` [PATCH v12 04/31] arm64/mm: define ARCH_SUPPORTS_SPECULATIVE_PAGE_FAULT Laurent Dufour
2019-04-16 14:27   ` Mark Rutland
2019-04-16 14:31     ` Laurent Dufour
2019-04-16 14:41       ` Mark Rutland
2019-04-18 21:51         ` Jerome Glisse
2019-04-23 15:36           ` Laurent Dufour
2019-04-23 16:19             ` Mark Rutland
2019-04-24 10:34               ` Laurent Dufour
2019-04-16 13:44 ` Laurent Dufour [this message]
2019-04-18 22:04   ` [PATCH v12 05/31] mm: prepare for FAULT_FLAG_SPECULATIVE Jerome Glisse
2019-04-23 15:45     ` Laurent Dufour
2019-04-16 13:44 ` [PATCH v12 06/31] mm: introduce pte_spinlock " Laurent Dufour
2019-04-18 22:05   ` Jerome Glisse
2019-04-16 13:44 ` [PATCH v12 07/31] mm: make pte_unmap_same compatible with SPF Laurent Dufour
2019-04-18 22:10   ` Jerome Glisse
2019-04-23 15:43   ` Matthew Wilcox
2019-04-23 15:47     ` Laurent Dufour
2019-04-16 13:44 ` [PATCH v12 08/31] mm: introduce INIT_VMA() Laurent Dufour
2019-04-18 22:22   ` Jerome Glisse
2019-04-16 13:45 ` [PATCH v12 09/31] mm: VMA sequence count Laurent Dufour
2019-04-18 22:48   ` Jerome Glisse
2019-04-19 15:45     ` Laurent Dufour
2019-04-22 15:51       ` Jerome Glisse
2019-04-16 13:45 ` [PATCH v12 10/31] mm: protect VMA modifications using " Laurent Dufour
2019-04-22 19:43   ` Jerome Glisse
2019-04-16 13:45 ` [PATCH v12 11/31] mm: protect mremap() against SPF hanlder Laurent Dufour
2019-04-22 19:51   ` Jerome Glisse
2019-04-23 15:51     ` Laurent Dufour
2019-04-16 13:45 ` [PATCH v12 12/31] mm: protect SPF handler against anon_vma changes Laurent Dufour
2019-04-22 19:53   ` Jerome Glisse
2019-04-16 13:45 ` [PATCH v12 13/31] mm: cache some VMA fields in the vm_fault structure Laurent Dufour
2019-04-22 20:06   ` Jerome Glisse
2019-04-16 13:45 ` [PATCH v12 14/31] mm/migrate: Pass vm_fault pointer to migrate_misplaced_page() Laurent Dufour
2019-04-22 20:09   ` Jerome Glisse
2019-04-16 13:45 ` [PATCH v12 15/31] mm: introduce __lru_cache_add_active_or_unevictable Laurent Dufour
2019-04-22 20:11   ` Jerome Glisse
2019-04-16 13:45 ` [PATCH v12 16/31] mm: introduce __vm_normal_page() Laurent Dufour
2019-04-22 20:15   ` Jerome Glisse
2019-04-16 13:45 ` [PATCH v12 17/31] mm: introduce __page_add_new_anon_rmap() Laurent Dufour
2019-04-22 20:18   ` Jerome Glisse
2019-04-16 13:45 ` [PATCH v12 18/31] mm: protect against PTE changes done by dup_mmap() Laurent Dufour
2019-04-22 20:32   ` Jerome Glisse
2019-04-24 10:33     ` Laurent Dufour
2019-04-16 13:45 ` [PATCH v12 19/31] mm: protect the RB tree with a sequence lock Laurent Dufour
2019-04-22 20:33   ` Jerome Glisse
2019-04-16 13:45 ` [PATCH v12 20/31] mm: introduce vma reference counter Laurent Dufour
2019-04-22 20:36   ` Jerome Glisse
2019-04-24 14:26     ` Laurent Dufour
2019-04-16 13:45 ` [PATCH v12 21/31] mm: Introduce find_vma_rcu() Laurent Dufour
2019-04-22 20:57   ` Jerome Glisse
2019-04-24 14:39     ` Laurent Dufour
2019-04-23  9:27   ` Peter Zijlstra
2019-04-23 18:13     ` Davidlohr Bueso
2019-04-24  7:57     ` Laurent Dufour
2019-04-16 13:45 ` [PATCH v12 22/31] mm: provide speculative fault infrastructure Laurent Dufour
2019-04-22 21:26   ` Jerome Glisse
2019-04-24 14:56     ` Laurent Dufour
2019-04-24 15:13       ` Jerome Glisse
2019-04-16 13:45 ` [PATCH v12 23/31] mm: don't do swap readahead during speculative page fault Laurent Dufour
2019-04-22 21:36   ` Jerome Glisse
2019-04-24 14:57     ` Laurent Dufour
2019-04-16 13:45 ` [PATCH v12 24/31] mm: adding speculative page fault failure trace events Laurent Dufour
2019-04-16 13:45 ` [PATCH v12 25/31] perf: add a speculative page fault sw event Laurent Dufour
2019-04-16 13:45 ` [PATCH v12 26/31] perf tools: add support for the SPF perf event Laurent Dufour
2019-04-16 13:45 ` [PATCH v12 27/31] mm: add speculative page fault vmstats Laurent Dufour
2019-04-16 13:45 ` [PATCH v12 28/31] x86/mm: add speculative pagefault handling Laurent Dufour
2019-04-16 13:45 ` [PATCH v12 29/31] powerpc/mm: add speculative page fault Laurent Dufour
2019-04-16 13:45 ` [PATCH v12 30/31] arm64/mm: " Laurent Dufour
2019-04-16 13:45 ` [PATCH v12 31/31] mm: Add a speculative page fault switch in sysctl Laurent Dufour
2019-04-22 21:29 ` [PATCH v12 00/31] Speculative page faults Michel Lespinasse
2019-04-23  9:38   ` Peter Zijlstra
2019-04-24  7:33     ` Laurent Dufour
2019-04-27  1:53       ` Michel Lespinasse
2019-04-23 10:47   ` Michal Hocko
2019-04-23 12:41     ` Matthew Wilcox
2019-04-23 12:48       ` Peter Zijlstra
2019-04-23 13:42       ` Michal Hocko
2019-04-24 18:01   ` Laurent Dufour
2019-04-27  6:00     ` Michel Lespinasse
2019-04-23 11:35 ` Anshuman Khandual
2019-06-06  6:51 ` Haiyan Song
2019-06-14  8:37   ` Laurent Dufour
2019-06-14  8:44     ` Laurent Dufour
2019-06-20  8:19       ` Haiyan Song
2020-07-06  9:25         ` Chinwen Chang
2020-07-06 12:27           ` Laurent Dufour
2020-07-07  5:31             ` Chinwen Chang
2020-12-14  2:03               ` Joel Fernandes
2020-12-14  9:36                 ` Laurent Dufour
2020-12-14 18:10                   ` Joel Fernandes

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190416134522.17540-6-ldufour@linux.ibm.com \
    --to=ldufour@linux.ibm.com \
    --cc=aarcange@redhat.com \
    --cc=ak@linux.intel.com \
    --cc=akpm@linux-foundation.org \
    --cc=alexei.starovoitov@gmail.com \
    --cc=aneesh.kumar@linux.ibm.com \
    --cc=benh@kernel.crashing.org \
    --cc=bsingharora@gmail.com \
    --cc=daniel.m.jordan@oracle.com \
    --cc=dave@stgolabs.net \
    --cc=haiyanx.song@intel.com \
    --cc=haren@linux.vnet.ibm.com \
    --cc=hpa@zytor.com \
    --cc=jack@suse.cz \
    --cc=jglisse@redhat.com \
    --cc=kemi.wang@intel.com \
    --cc=kirill@shutemov.name \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linuxppc-dev@lists.ozlabs.org \
    --cc=mhocko@kernel.org \
    --cc=minchan@kernel.org \
    --cc=mingo@redhat.com \
    --cc=mpe@ellerman.id.au \
    --cc=npiggin@gmail.com \
    --cc=opensource.ganesh@gmail.com \
    --cc=paulmck@linux.vnet.ibm.com \
    --cc=paulus@samba.org \
    --cc=peterz@infradead.org \
    --cc=punitagrawal@gmail.com \
    --cc=rientjes@google.com \
    --cc=rppt@linux.ibm.com \
    --cc=sergey.senozhatsky.work@gmail.com \
    --cc=sergey.senozhatsky@gmail.com \
    --cc=sj38.park@gmail.com \
    --cc=tglx@linutronix.de \
    --cc=tim.c.chen@linux.intel.com \
    --cc=vinayakm.list@gmail.com \
    --cc=walken@google.com \
    --cc=will.deacon@arm.com \
    --cc=willy@infradead.org \
    --cc=x86@kernel.org \
    --cc=yang.shi@linux.alibaba.com \
    --cc=zhongjiang@huawei.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).