From: Anshuman Khandual <khandual@linux.vnet.ibm.com>
To: Laurent Dufour <ldufour@linux.vnet.ibm.com>,
paulmck@linux.vnet.ibm.com, peterz@infradead.org,
akpm@linux-foundation.org, kirill@shutemov.name,
ak@linux.intel.com, mhocko@kernel.org, dave@stgolabs.net,
jack@suse.cz, Matthew Wilcox <willy@infradead.org>
Cc: linux-kernel@vger.kernel.org, linux-mm@kvack.org,
haren@linux.vnet.ibm.com, khandual@linux.vnet.ibm.com,
npiggin@gmail.com, bsingharora@gmail.com,
Tim Chen <tim.c.chen@linux.intel.com>
Subject: Re: [RFC v5 02/11] mm: Prepare for FAULT_FLAG_SPECULATIVE
Date: Tue, 8 Aug 2017 15:54:01 +0530 [thread overview]
Message-ID: <7e770060-32b2-c136-5d34-2f078800df21@linux.vnet.ibm.com> (raw)
In-Reply-To: <1497635555-25679-3-git-send-email-ldufour@linux.vnet.ibm.com>
On 06/16/2017 11:22 PM, Laurent Dufour wrote:
> From: Peter Zijlstra <peterz@infradead.org>
>
> When speculating faults (without holding mmap_sem) we need to validate
> that the vma against which we loaded pages is still valid when we're
> ready to install the new PTE.
>
> Therefore, replace the pte_offset_map_lock() calls that (re)take the
> PTL with pte_map_lock() which can fail in case we find the VMA changed
> since we started the fault.
Where we are checking if VMA has changed or not since the fault ?
>
> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
>
> [Port to 4.12 kernel]
> [Remove the comment about the fault_env structure which has been
> implemented as the vm_fault structure in the kernel]
> Signed-off-by: Laurent Dufour <ldufour@linux.vnet.ibm.com>
> ---
> include/linux/mm.h | 1 +
> mm/memory.c | 55 ++++++++++++++++++++++++++++++++++++++----------------
> 2 files changed, 40 insertions(+), 16 deletions(-)
>
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index b892e95d4929..6b7ec2a76953 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -286,6 +286,7 @@ extern pgprot_t protection_map[16];
> #define FAULT_FLAG_USER 0x40 /* The fault originated in userspace */
> #define FAULT_FLAG_REMOTE 0x80 /* faulting for non current tsk/mm */
> #define FAULT_FLAG_INSTRUCTION 0x100 /* The fault was during an instruction fetch */
> +#define FAULT_FLAG_SPECULATIVE 0x200 /* Speculative fault, not holding mmap_sem */
We are not using this yet, may be can wait till late in the series.
>
> #define FAULT_FLAG_TRACE \
> { FAULT_FLAG_WRITE, "WRITE" }, \
> diff --git a/mm/memory.c b/mm/memory.c
> index fd952f05e016..40834444ea0d 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -2240,6 +2240,12 @@ static inline void wp_page_reuse(struct vm_fault *vmf)
> pte_unmap_unlock(vmf->pte, vmf->ptl);
> }
>
> +static bool pte_map_lock(struct vm_fault *vmf)
> +{
> + vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl);
> + return true;
> +}
This is always true ? Then we should not have all these if (!pte_map_lock(vmf))
check blocks down below.
> +
> /*
> * Handle the case of a page which we actually need to copy to a new page.
> *
> @@ -2267,6 +2273,7 @@ static int wp_page_copy(struct vm_fault *vmf)
> const unsigned long mmun_start = vmf->address & PAGE_MASK;
> const unsigned long mmun_end = mmun_start + PAGE_SIZE;
> struct mem_cgroup *memcg;
> + int ret = VM_FAULT_OOM;
>
If we remove the check block over pte_map_lock(), adding VM_FAULT_OOM
becomes redundant here.
> if (unlikely(anon_vma_prepare(vma)))
> goto oom;
> @@ -2294,7 +2301,11 @@ static int wp_page_copy(struct vm_fault *vmf)
> /*
> * Re-check the pte - we dropped the lock
> */
> - vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl);
> + if (!pte_map_lock(vmf)) {
> + mem_cgroup_cancel_charge(new_page, memcg, false);
> + ret = VM_FAULT_RETRY;
> + goto oom_free_new;
> + }
> if (likely(pte_same(*vmf->pte, vmf->orig_pte))) {
> if (old_page) {
> if (!PageAnon(old_page)) {
> @@ -2382,7 +2393,7 @@ static int wp_page_copy(struct vm_fault *vmf)
> oom:
> if (old_page)
> put_page(old_page);
> - return VM_FAULT_OOM;
> + return ret;
> }
>
> /**
> @@ -2403,8 +2414,8 @@ static int wp_page_copy(struct vm_fault *vmf)
> int finish_mkwrite_fault(struct vm_fault *vmf)
> {
> WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED));
> - vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address,
> - &vmf->ptl);
> + if (!pte_map_lock(vmf))
> + return VM_FAULT_RETRY;
Cant fail.
> /*
> * We might have raced with another page fault while we released the
> * pte_offset_map_lock.
> @@ -2522,8 +2533,11 @@ static int do_wp_page(struct vm_fault *vmf)
> get_page(vmf->page);
> pte_unmap_unlock(vmf->pte, vmf->ptl);
> lock_page(vmf->page);
> - vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
> - vmf->address, &vmf->ptl);
> + if (!pte_map_lock(vmf)) {
> + unlock_page(vmf->page);
> + put_page(vmf->page);
> + return VM_FAULT_RETRY;
> + }
Same here.
> if (!pte_same(*vmf->pte, vmf->orig_pte)) {
> unlock_page(vmf->page);
> pte_unmap_unlock(vmf->pte, vmf->ptl);
> @@ -2681,8 +2695,10 @@ int do_swap_page(struct vm_fault *vmf)
> * Back out if somebody else faulted in this pte
> * while we released the pte lock.
> */
> - vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
> - vmf->address, &vmf->ptl);
> + if (!pte_map_lock(vmf)) {
> + delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
> + return VM_FAULT_RETRY;
> + }
> if (likely(pte_same(*vmf->pte, vmf->orig_pte)))
> ret = VM_FAULT_OOM;
> delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
> @@ -2738,8 +2754,11 @@ int do_swap_page(struct vm_fault *vmf)
> /*
> * Back out if somebody else already faulted in this pte.
> */
> - vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
> - &vmf->ptl);
> + if (!pte_map_lock(vmf)) {
> + ret = VM_FAULT_RETRY;
> + mem_cgroup_cancel_charge(page, memcg, false);
> + goto out_page;
> + }
> if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte)))
> goto out_nomap;
>
> @@ -2903,8 +2922,8 @@ static int do_anonymous_page(struct vm_fault *vmf)
> !mm_forbids_zeropage(vma->vm_mm)) {
> entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address),
> vma->vm_page_prot));
> - vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
> - vmf->address, &vmf->ptl);
> + if (!pte_map_lock(vmf))
> + return VM_FAULT_RETRY;
> if (!pte_none(*vmf->pte))
> goto unlock;
> /* Deliver the page fault to userland, check inside PT lock */
> @@ -2936,8 +2955,11 @@ static int do_anonymous_page(struct vm_fault *vmf)
> if (vma->vm_flags & VM_WRITE)
> entry = pte_mkwrite(pte_mkdirty(entry));
>
> - vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
> - &vmf->ptl);
> + if (!pte_map_lock(vmf)) {
> + mem_cgroup_cancel_charge(page, memcg, false);
> + put_page(page);
> + return VM_FAULT_RETRY;
> + }
> if (!pte_none(*vmf->pte))
> goto release;
>
> @@ -3057,8 +3079,9 @@ static int pte_alloc_one_map(struct vm_fault *vmf)
> * pte_none() under vmf->ptl protection when we return to
> * alloc_set_pte().
> */
> - vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
> - &vmf->ptl);
> + if (!pte_map_lock(vmf))
> + return VM_FAULT_RETRY;
> +
> return 0;
All these 'if' blocks seem redundant, unless I am missing something.
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2017-08-08 10:25 UTC|newest]
Thread overview: 37+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-06-16 17:52 [RFC v5 00/11] Speculative page faults Laurent Dufour
2017-06-16 17:52 ` [RFC v5 01/11] mm: Dont assume page-table invariance during faults Laurent Dufour
2017-07-07 7:07 ` Balbir Singh
2017-07-10 17:48 ` Laurent Dufour
2017-07-11 4:26 ` Balbir Singh
2017-08-08 10:04 ` Anshuman Khandual
2017-08-08 9:45 ` Anshuman Khandual
2017-08-08 12:11 ` Laurent Dufour
2017-06-16 17:52 ` [RFC v5 02/11] mm: Prepare for FAULT_FLAG_SPECULATIVE Laurent Dufour
2017-08-08 10:24 ` Anshuman Khandual [this message]
2017-08-08 10:42 ` Peter Zijlstra
2017-06-16 17:52 ` [RFC v5 03/11] mm: Introduce pte_spinlock " Laurent Dufour
2017-08-08 10:35 ` Anshuman Khandual
2017-08-08 12:16 ` Laurent Dufour
2017-06-16 17:52 ` [RFC v5 04/11] mm: VMA sequence count Laurent Dufour
2017-08-08 10:59 ` Anshuman Khandual
2017-08-08 11:04 ` Peter Zijlstra
2017-06-16 17:52 ` [RFC v5 05/11] mm: fix lock dependency against mapping->i_mmap_rwsem Laurent Dufour
2017-08-08 11:17 ` Anshuman Khandual
2017-08-08 12:20 ` Laurent Dufour
2017-08-08 12:49 ` Jan Kara
2017-08-08 13:08 ` Laurent Dufour
2017-08-08 13:15 ` Peter Zijlstra
2017-08-08 13:34 ` Laurent Dufour
2017-06-16 17:52 ` [RFC v5 06/11] mm: Protect VMA modifications using VMA sequence count Laurent Dufour
2017-06-16 17:52 ` [RFC v5 07/11] mm: RCU free VMAs Laurent Dufour
2017-06-16 17:52 ` [RFC v5 08/11] mm: Provide speculative fault infrastructure Laurent Dufour
2017-06-16 17:52 ` [RFC v5 09/11] mm: Try spin lock in speculative path Laurent Dufour
2017-07-05 18:50 ` Peter Zijlstra
2017-07-06 13:46 ` Laurent Dufour
2017-07-06 14:48 ` Peter Zijlstra
2017-07-06 15:29 ` Laurent Dufour
2017-07-06 16:13 ` Peter Zijlstra
2017-06-16 17:52 ` [RFC v5 10/11] x86/mm: Add speculative pagefault handling Laurent Dufour
2017-06-16 17:52 ` [RFC v5 11/11] powerpc/mm: Add speculative page fault Laurent Dufour
2017-07-03 17:32 ` [RFC v5 00/11] Speculative page faults Laurent Dufour
2017-07-07 1:54 ` Balbir Singh
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=7e770060-32b2-c136-5d34-2f078800df21@linux.vnet.ibm.com \
--to=khandual@linux.vnet.ibm.com \
--cc=ak@linux.intel.com \
--cc=akpm@linux-foundation.org \
--cc=bsingharora@gmail.com \
--cc=dave@stgolabs.net \
--cc=haren@linux.vnet.ibm.com \
--cc=jack@suse.cz \
--cc=kirill@shutemov.name \
--cc=ldufour@linux.vnet.ibm.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mhocko@kernel.org \
--cc=npiggin@gmail.com \
--cc=paulmck@linux.vnet.ibm.com \
--cc=peterz@infradead.org \
--cc=tim.c.chen@linux.intel.com \
--cc=willy@infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).