From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mx0a-001b2d01.pphosted.com (mx0a-001b2d01.pphosted.com [148.163.156.1]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by lists.ozlabs.org (Postfix) with ESMTPS id 3zJ8nV6jn5zF0kQ for ; Sat, 13 Jan 2018 04:27:10 +1100 (AEDT) Received: from pps.filterd (m0098409.ppops.net [127.0.0.1]) by mx0a-001b2d01.pphosted.com (8.16.0.22/8.16.0.22) with SMTP id w0CHPIjr103172 for ; Fri, 12 Jan 2018 12:27:09 -0500 Received: from e06smtp11.uk.ibm.com (e06smtp11.uk.ibm.com [195.75.94.107]) by mx0a-001b2d01.pphosted.com with ESMTP id 2fey0rs7f1-1 (version=TLSv1.2 cipher=AES256-SHA bits=256 verify=NOT) for ; Fri, 12 Jan 2018 12:27:08 -0500 Received: from localhost by e06smtp11.uk.ibm.com with IBM ESMTP SMTP Gateway: Authorized Use Only! Violators will be prosecuted for from ; Fri, 12 Jan 2018 17:27:06 -0000 From: Laurent Dufour To: paulmck@linux.vnet.ibm.com, peterz@infradead.org, akpm@linux-foundation.org, kirill@shutemov.name, ak@linux.intel.com, mhocko@kernel.org, dave@stgolabs.net, jack@suse.cz, Matthew Wilcox , benh@kernel.crashing.org, mpe@ellerman.id.au, paulus@samba.org, Thomas Gleixner , Ingo Molnar , hpa@zytor.com, Will Deacon , Sergey Senozhatsky , Andrea Arcangeli , Alexei Starovoitov , kemi.wang@intel.com, sergey.senozhatsky.work@gmail.com Cc: linux-kernel@vger.kernel.org, linux-mm@kvack.org, haren@linux.vnet.ibm.com, khandual@linux.vnet.ibm.com, npiggin@gmail.com, bsingharora@gmail.com, Tim Chen , linuxppc-dev@lists.ozlabs.org, x86@kernel.org Subject: [PATCH v6 14/24] mm: Introduce __vm_normal_page() Date: Fri, 12 Jan 2018 18:25:58 +0100 In-Reply-To: <1515777968-867-1-git-send-email-ldufour@linux.vnet.ibm.com> References: <1515777968-867-1-git-send-email-ldufour@linux.vnet.ibm.com> Message-Id: <1515777968-867-15-git-send-email-ldufour@linux.vnet.ibm.com> List-Id: Linux on PowerPC Developers Mail List List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , When dealing with the speculative fault path we should use the VMA's field cached value stored in the vm_fault structure. Currently vm_normal_page() is using the pointer to the VMA to fetch the vm_flags value. This patch provides a new __vm_normal_page() which is receiving the vm_flags flags value as parameter. Note: The speculative path is turned on for architecture providing support for special PTE flag. So only the first block of vm_normal_page is used during the speculative path. Signed-off-by: Laurent Dufour --- include/linux/mm.h | 7 +++++-- mm/memory.c | 18 ++++++++++-------- 2 files changed, 15 insertions(+), 10 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index d77a11067b94..0d1b8d2d1e4f 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1260,8 +1260,11 @@ struct zap_details { pgoff_t last_index; /* Highest page->index to unmap */ }; -struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr, - pte_t pte, bool with_public_device); +struct page *__vm_normal_page(struct vm_area_struct *vma, unsigned long addr, + pte_t pte, bool with_public_device, + unsigned long vma_flags); +#define _vm_normal_page(vma, addr, pte, with_public_device) \ + __vm_normal_page(vma, addr, pte, with_public_device, (vma)->vm_flags) #define vm_normal_page(vma, addr, pte) _vm_normal_page(vma, addr, pte, false) struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, diff --git a/mm/memory.c b/mm/memory.c index c24891d5676f..a7cb109bf25a 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -826,8 +826,9 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, #else # define HAVE_PTE_SPECIAL 0 #endif -struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr, - pte_t pte, bool with_public_device) +struct page *__vm_normal_page(struct vm_area_struct *vma, unsigned long addr, + pte_t pte, bool with_public_device, + unsigned long vma_flags) { unsigned long pfn = pte_pfn(pte); @@ -836,7 +837,7 @@ struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr, goto check_pfn; if (vma->vm_ops && vma->vm_ops->find_special_page) return vma->vm_ops->find_special_page(vma, addr); - if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) + if (vma_flags & (VM_PFNMAP | VM_MIXEDMAP)) return NULL; if (is_zero_pfn(pfn)) return NULL; @@ -868,8 +869,8 @@ struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr, /* !HAVE_PTE_SPECIAL case follows: */ - if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { - if (vma->vm_flags & VM_MIXEDMAP) { + if (unlikely(vma_flags & (VM_PFNMAP|VM_MIXEDMAP))) { + if (vma_flags & VM_MIXEDMAP) { if (!pfn_valid(pfn)) return NULL; goto out; @@ -878,7 +879,7 @@ struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr, off = (addr - vma->vm_start) >> PAGE_SHIFT; if (pfn == vma->vm_pgoff + off) return NULL; - if (!is_cow_mapping(vma->vm_flags)) + if (!is_cow_mapping(vma_flags)) return NULL; } } @@ -2722,7 +2723,8 @@ static int do_wp_page(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; - vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte); + vmf->page = __vm_normal_page(vma, vmf->address, vmf->orig_pte, false, + vmf->vma_flags); if (!vmf->page) { /* * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a @@ -3837,7 +3839,7 @@ static int do_numa_page(struct vm_fault *vmf) ptep_modify_prot_commit(vma->vm_mm, vmf->address, vmf->pte, pte); update_mmu_cache(vma, vmf->address, vmf->pte); - page = vm_normal_page(vma, vmf->address, pte); + page = __vm_normal_page(vma, vmf->address, pte, false, vmf->vma_flags); if (!page) { pte_unmap_unlock(vmf->pte, vmf->ptl); return 0; -- 2.7.4