From: Jan Kara <jack@suse.cz>
To: linux-mm@kvack.org
Cc: linux-fsdevel@vger.kernel.org, linux-nvdimm@lists.01.org,
Dan Williams <dan.j.williams@intel.com>,
Ross Zwisler <ross.zwisler@linux.intel.com>,
"Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>,
Jan Kara <jack@suse.cz>
Subject: [PATCH 05/20] mm: Trim __do_fault() arguments
Date: Tue, 27 Sep 2016 18:08:09 +0200 [thread overview]
Message-ID: <1474992504-20133-6-git-send-email-jack@suse.cz> (raw)
In-Reply-To: <1474992504-20133-1-git-send-email-jack@suse.cz>
Use vm_fault structure to pass cow_page, page, and entry in and out of
the function. That reduces number of __do_fault() arguments from 4 to 1.
Signed-off-by: Jan Kara <jack@suse.cz>
---
mm/memory.c | 53 +++++++++++++++++++++++------------------------------
1 file changed, 23 insertions(+), 30 deletions(-)
diff --git a/mm/memory.c b/mm/memory.c
index b7f1f535e079..ba7760fb7db2 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2844,26 +2844,22 @@ oom:
* released depending on flags and vma->vm_ops->fault() return value.
* See filemap_fault() and __lock_page_retry().
*/
-static int __do_fault(struct vm_fault *vmf, struct page *cow_page,
- struct page **page, void **entry)
+static int __do_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
int ret;
- vmf->cow_page = cow_page;
-
ret = vma->vm_ops->fault(vma, vmf);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
return ret;
- if (ret & VM_FAULT_DAX_LOCKED) {
- *entry = vmf->entry;
+ if (ret & VM_FAULT_DAX_LOCKED)
return ret;
- }
if (unlikely(PageHWPoison(vmf->page))) {
if (ret & VM_FAULT_LOCKED)
unlock_page(vmf->page);
put_page(vmf->page);
+ vmf->page = NULL;
return VM_FAULT_HWPOISON;
}
@@ -2872,7 +2868,6 @@ static int __do_fault(struct vm_fault *vmf, struct page *cow_page,
else
VM_BUG_ON_PAGE(!PageLocked(vmf->page), vmf->page);
- *page = vmf->page;
return ret;
}
@@ -3169,7 +3164,6 @@ out:
static int do_read_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
- struct page *fault_page;
int ret = 0;
/*
@@ -3183,24 +3177,23 @@ static int do_read_fault(struct vm_fault *vmf)
return ret;
}
- ret = __do_fault(vmf, NULL, &fault_page, NULL);
+ ret = __do_fault(vmf);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
return ret;
- ret |= alloc_set_pte(vmf, NULL, fault_page);
+ ret |= alloc_set_pte(vmf, NULL, vmf->page);
if (vmf->pte)
pte_unmap_unlock(vmf->pte, vmf->ptl);
- unlock_page(fault_page);
+ unlock_page(vmf->page);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
- put_page(fault_page);
+ put_page(vmf->page);
return ret;
}
static int do_cow_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
- struct page *fault_page, *new_page;
- void *fault_entry;
+ struct page *new_page;
struct mem_cgroup *memcg;
int ret;
@@ -3217,20 +3210,21 @@ static int do_cow_fault(struct vm_fault *vmf)
return VM_FAULT_OOM;
}
- ret = __do_fault(vmf, new_page, &fault_page, &fault_entry);
+ vmf->cow_page = new_page;
+ ret = __do_fault(vmf);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
goto uncharge_out;
if (!(ret & VM_FAULT_DAX_LOCKED))
- copy_user_highpage(new_page, fault_page, vmf->address, vma);
+ copy_user_highpage(new_page, vmf->page, vmf->address, vma);
__SetPageUptodate(new_page);
ret |= alloc_set_pte(vmf, memcg, new_page);
if (vmf->pte)
pte_unmap_unlock(vmf->pte, vmf->ptl);
if (!(ret & VM_FAULT_DAX_LOCKED)) {
- unlock_page(fault_page);
- put_page(fault_page);
+ unlock_page(vmf->page);
+ put_page(vmf->page);
} else {
dax_unlock_mapping_entry(vma->vm_file->f_mapping, vmf->pgoff);
}
@@ -3246,12 +3240,11 @@ uncharge_out:
static int do_shared_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
- struct page *fault_page;
struct address_space *mapping;
int dirtied = 0;
int ret, tmp;
- ret = __do_fault(vmf, NULL, &fault_page, NULL);
+ ret = __do_fault(vmf);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
return ret;
@@ -3260,26 +3253,26 @@ static int do_shared_fault(struct vm_fault *vmf)
* about to become writable
*/
if (vma->vm_ops->page_mkwrite) {
- unlock_page(fault_page);
- tmp = do_page_mkwrite(vma, fault_page, vmf->address);
+ unlock_page(vmf->page);
+ tmp = do_page_mkwrite(vma, vmf->page, vmf->address);
if (unlikely(!tmp ||
(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
- put_page(fault_page);
+ put_page(vmf->page);
return tmp;
}
}
- ret |= alloc_set_pte(vmf, NULL, fault_page);
+ ret |= alloc_set_pte(vmf, NULL, vmf->page);
if (vmf->pte)
pte_unmap_unlock(vmf->pte, vmf->ptl);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE |
VM_FAULT_RETRY))) {
- unlock_page(fault_page);
- put_page(fault_page);
+ unlock_page(vmf->page);
+ put_page(vmf->page);
return ret;
}
- if (set_page_dirty(fault_page))
+ if (set_page_dirty(vmf->page))
dirtied = 1;
/*
* Take a local copy of the address_space - page.mapping may be zeroed
@@ -3287,8 +3280,8 @@ static int do_shared_fault(struct vm_fault *vmf)
* pinned by vma->vm_file's reference. We rely on unlock_page()'s
* release semantics to prevent the compiler from undoing this copying.
*/
- mapping = page_rmapping(fault_page);
- unlock_page(fault_page);
+ mapping = page_rmapping(vmf->page);
+ unlock_page(vmf->page);
if ((dirtied || vma->vm_ops->page_mkwrite) && mapping) {
/*
* Some device drivers do not set page.mapping but still
--
2.6.6
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2016-09-27 16:08 UTC|newest]
Thread overview: 72+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-09-27 16:08 [PATCH 0/20 v3] dax: Clear dirty bits after flushing caches Jan Kara
2016-09-27 16:08 ` [PATCH 01/20] mm: Change type of vmf->virtual_address Jan Kara
2016-09-30 9:07 ` Christoph Hellwig
2016-10-14 18:02 ` Ross Zwisler
2016-09-27 16:08 ` [PATCH 02/20] mm: Join struct fault_env and vm_fault Jan Kara
2016-09-30 9:10 ` Christoph Hellwig
2016-10-03 7:43 ` Jan Kara
2016-09-27 16:08 ` [PATCH 03/20] mm: Use pgoff in struct vm_fault instead of passing it separately Jan Kara
2016-10-14 18:42 ` Ross Zwisler
2016-10-17 9:01 ` Jan Kara
2016-09-27 16:08 ` [PATCH 04/20] mm: Use passed vm_fault structure in __do_fault() Jan Kara
2016-10-14 19:05 ` Ross Zwisler
2016-09-27 16:08 ` Jan Kara [this message]
2016-10-14 20:31 ` [PATCH 05/20] mm: Trim __do_fault() arguments Ross Zwisler
2016-10-17 9:04 ` Jan Kara
2016-09-27 16:08 ` [PATCH 06/20] mm: Use pass vm_fault structure for in wp_pfn_shared() Jan Kara
2016-10-14 21:04 ` Ross Zwisler
2016-09-27 16:08 ` [PATCH 07/20] mm: Add orig_pte field into vm_fault Jan Kara
2016-10-17 16:45 ` Ross Zwisler
2016-10-18 10:13 ` Jan Kara
2016-09-27 16:08 ` [PATCH 08/20] mm: Allow full handling of COW faults in ->fault handlers Jan Kara
2016-10-17 16:50 ` Ross Zwisler
2016-09-27 16:08 ` [PATCH 09/20] mm: Factor out functionality to finish page faults Jan Kara
2016-10-17 17:38 ` Ross Zwisler
2016-10-17 17:40 ` Ross Zwisler
2016-10-18 9:44 ` Jan Kara
2016-09-27 16:08 ` [PATCH 10/20] mm: Move handling of COW faults into DAX code Jan Kara
2016-10-17 19:29 ` Ross Zwisler
2016-10-18 10:32 ` Jan Kara
2016-09-27 16:08 ` [PATCH 11/20] mm: Remove unnecessary vma->vm_ops check Jan Kara
2016-10-17 19:40 ` Ross Zwisler
2016-10-18 10:37 ` Jan Kara
2016-09-27 16:08 ` [PATCH 12/20] mm: Factor out common parts of write fault handling Jan Kara
2016-10-17 22:08 ` Ross Zwisler
2016-10-18 10:50 ` Jan Kara
2016-10-18 17:32 ` Ross Zwisler
2016-09-27 16:08 ` [PATCH 13/20] mm: Pass vm_fault structure into do_page_mkwrite() Jan Kara
2016-10-17 22:29 ` Ross Zwisler
2016-09-27 16:08 ` [PATCH 14/20] mm: Use vmf->page during WP faults Jan Kara
2016-10-18 17:56 ` Ross Zwisler
2016-09-27 16:08 ` [PATCH 15/20] mm: Move part of wp_page_reuse() into the single call site Jan Kara
2016-10-18 17:59 ` Ross Zwisler
2016-09-27 16:08 ` [PATCH 16/20] mm: Provide helper for finishing mkwrite faults Jan Kara
2016-10-18 18:35 ` Ross Zwisler
2016-10-19 7:16 ` Jan Kara
2016-10-19 17:21 ` Ross Zwisler
2016-10-20 8:48 ` Jan Kara
2016-09-27 16:08 ` [PATCH 17/20] mm: Export follow_pte() Jan Kara
2016-10-18 18:37 ` Ross Zwisler
2016-09-27 16:08 ` [PATCH 18/20] dax: Make cache flushing protected by entry lock Jan Kara
2016-10-18 19:20 ` Ross Zwisler
2016-10-19 7:19 ` Jan Kara
2016-10-19 18:25 ` Ross Zwisler
2016-09-27 16:08 ` [PATCH 19/20] dax: Protect PTE modification on WP fault by radix tree " Jan Kara
2016-10-18 19:53 ` Ross Zwisler
2016-10-19 7:25 ` Jan Kara
2016-10-19 17:25 ` Ross Zwisler
2016-09-27 16:08 ` [PATCH 20/20] dax: Clear dirty entry tags on cache flush Jan Kara
2016-10-18 22:12 ` Ross Zwisler
2016-10-19 7:30 ` Jan Kara
2016-10-19 16:38 ` Ross Zwisler
2016-09-30 9:14 ` [PATCH 0/20 v3] dax: Clear dirty bits after flushing caches Christoph Hellwig
2016-10-03 7:59 ` Jan Kara
2016-10-03 8:03 ` Christoph Hellwig
2016-10-03 8:15 ` Jan Kara
2016-10-03 9:32 ` Christoph Hellwig
2016-10-03 11:13 ` Jan Kara
2016-10-13 20:34 ` Ross Zwisler
2016-10-17 8:47 ` Jan Kara
2016-10-17 18:59 ` Ross Zwisler
2016-10-18 9:49 ` Jan Kara
-- strict thread matches above, loose matches on Subject: below --
2016-11-18 9:17 [PATCH 0/20 v5] " Jan Kara
2016-11-18 9:17 ` [PATCH 05/20] mm: Trim __do_fault() arguments Jan Kara
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1474992504-20133-6-git-send-email-jack@suse.cz \
--to=jack@suse.cz \
--cc=dan.j.williams@intel.com \
--cc=kirill.shutemov@linux.intel.com \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux-nvdimm@lists.01.org \
--cc=ross.zwisler@linux.intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).