From: jglisse@redhat.com
To: linux-mm@kvack.org, linux-fsdevel@vger.kernel.org,
linux-block@vger.kernel.org
Cc: linux-kernel@vger.kernel.org,
"Jérôme Glisse" <jglisse@redhat.com>,
"Andrew Morton" <akpm@linux-foundation.org>,
"Mel Gorman" <mgorman@techsingularity.net>,
"Alexander Viro" <viro@zeniv.linux.org.uk>
Subject: [RFC PATCH 63/79] mm/page: convert page's index lookup to be against specific mapping
Date: Wed, 4 Apr 2018 15:18:17 -0400 [thread overview]
Message-ID: <20180404191831.5378-28-jglisse@redhat.com> (raw)
In-Reply-To: <20180404191831.5378-1-jglisse@redhat.com>
From: Jérôme Glisse <jglisse@redhat.com>
This patch switch mm to lookup the page index or offset value to be
against specific mapping. The page index value only have a meaning
against a mapping.
Using coccinelle:
---------------------------------------------------------------------
@@
struct page *P;
expression E;
@@
-P->index = E
+page_set_index(P, E)
@@
struct page *P;
@@
-P->index
+page_index(P)
@@
struct page *P;
@@
-page_index(P) << PAGE_SHIFT
+page_offset(P)
@@
expression E;
@@
-page_index(E)
+_page_index(E, mapping)
@@
expression E1, E2;
@@
-page_set_index(E1, E2)
+_page_set_index(E1, mapping, E2)
@@
expression E;
@@
-page_to_index(E)
+_page_to_index(E, mapping)
@@
expression E;
@@
-page_to_pgoff(E)
+_page_to_pgoff(E, mapping)
@@
expression E;
@@
-page_offset(E)
+_page_offset(E, mapping)
@@
expression E;
@@
-page_file_offset(E)
+_page_file_offset(E, mapping)
---------------------------------------------------------------------
Signed-off-by: Jérôme Glisse <jglisse@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: linux-mm@kvack.org
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: linux-fsdevel@vger.kernel.org
---
mm/filemap.c | 26 ++++++++++++++------------
mm/page-writeback.c | 16 +++++++++-------
mm/shmem.c | 11 +++++++----
mm/truncate.c | 11 ++++++-----
4 files changed, 36 insertions(+), 28 deletions(-)
diff --git a/mm/filemap.c b/mm/filemap.c
index 012a53964215..a41c7cfb6351 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -118,7 +118,8 @@ static int page_cache_tree_insert(struct address_space *mapping,
void **slot;
int error;
- error = __radix_tree_create(&mapping->page_tree, page->index, 0,
+ error = __radix_tree_create(&mapping->page_tree,
+ _page_index(page, mapping), 0,
&node, &slot);
if (error)
return error;
@@ -155,7 +156,8 @@ static void page_cache_tree_delete(struct address_space *mapping,
struct radix_tree_node *node;
void **slot;
- __radix_tree_lookup(&mapping->page_tree, page->index + i,
+ __radix_tree_lookup(&mapping->page_tree,
+ _page_index(page, mapping) + i,
&node, &slot);
VM_BUG_ON_PAGE(!node && nr != 1, page);
@@ -791,12 +793,12 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
void (*freepage)(struct page *);
unsigned long flags;
- pgoff_t offset = old->index;
+ pgoff_t offset = _page_index(old, mapping);
freepage = mapping->a_ops->freepage;
get_page(new);
new->mapping = mapping;
- new->index = offset;
+ _page_set_index(new, mapping, offset);
spin_lock_irqsave(&mapping->tree_lock, flags);
__delete_from_page_cache(old, NULL);
@@ -850,7 +852,7 @@ static int __add_to_page_cache_locked(struct page *page,
get_page(page);
page->mapping = mapping;
- page->index = offset;
+ _page_set_index(page, mapping, offset);
spin_lock_irq(&mapping->tree_lock);
error = page_cache_tree_insert(mapping, page, shadowp);
@@ -1500,7 +1502,7 @@ struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset)
put_page(page);
goto repeat;
}
- VM_BUG_ON_PAGE(page_to_pgoff(page) != offset, page);
+ VM_BUG_ON_PAGE(_page_to_pgoff(page, mapping) != offset, page);
}
return page;
}
@@ -1559,7 +1561,7 @@ struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
put_page(page);
goto repeat;
}
- VM_BUG_ON_PAGE(page->index != offset, page);
+ VM_BUG_ON_PAGE(_page_index(page, mapping) != offset, page);
}
if (page && (fgp_flags & FGP_ACCESSED))
@@ -1751,7 +1753,7 @@ unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
pages[ret] = page;
if (++ret == nr_pages) {
- *start = pages[ret - 1]->index + 1;
+ *start = _page_index(pages[ret - 1], mapping) + 1;
goto out;
}
}
@@ -1837,7 +1839,7 @@ unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
* otherwise we can get both false positives and false
* negatives, which is just confusing to the caller.
*/
- if (page->mapping == NULL || page_to_pgoff(page) != iter.index) {
+ if (page->mapping == NULL || _page_to_pgoff(page, mapping) != iter.index) {
put_page(page);
break;
}
@@ -1923,7 +1925,7 @@ unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
pages[ret] = page;
if (++ret == nr_pages) {
- *index = pages[ret - 1]->index + 1;
+ *index = _page_index(pages[ret - 1], mapping) + 1;
goto out;
}
}
@@ -2540,7 +2542,7 @@ int filemap_fault(struct vm_fault *vmf)
put_page(page);
goto retry_find;
}
- VM_BUG_ON_PAGE(page->index != offset, page);
+ VM_BUG_ON_PAGE(_page_index(page, mapping) != offset, page);
/*
* We have a locked page in the page cache, now we need to check
@@ -2667,7 +2669,7 @@ void filemap_map_pages(struct vm_fault *vmf,
goto unlock;
max_idx = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
- if (page->index >= max_idx)
+ if (_page_index(page, mapping) >= max_idx)
goto unlock;
if (file->f_ra.mmap_miss > 0)
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 3c14d44639c8..ed9424f84715 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2201,7 +2201,7 @@ int write_cache_pages(struct address_space *mapping,
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
- done_index = page->index;
+ done_index = _page_index(page, mapping);
lock_page(page);
@@ -2251,7 +2251,8 @@ int write_cache_pages(struct address_space *mapping,
* not be suitable for data integrity
* writeout).
*/
- done_index = page->index + 1;
+ done_index = _page_index(page,
+ mapping) + 1;
done = 1;
break;
}
@@ -2470,7 +2471,8 @@ int __set_page_dirty_nobuffers(struct page *page)
BUG_ON(page_mapping(page) != mapping);
WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
account_page_dirtied(page, mapping);
- radix_tree_tag_set(&mapping->page_tree, page_index(page),
+ radix_tree_tag_set(&mapping->page_tree,
+ _page_index(page, mapping),
PAGECACHE_TAG_DIRTY);
spin_unlock_irqrestore(&mapping->tree_lock, flags);
unlock_page_memcg(page);
@@ -2732,7 +2734,7 @@ int test_clear_page_writeback(struct page *page)
ret = TestClearPageWriteback(page);
if (ret) {
radix_tree_tag_clear(&mapping->page_tree,
- page_index(page),
+ _page_index(page, mapping),
PAGECACHE_TAG_WRITEBACK);
if (bdi_cap_account_writeback(bdi)) {
struct bdi_writeback *wb = inode_to_wb(inode);
@@ -2785,7 +2787,7 @@ int __test_set_page_writeback(struct page *page, bool keep_write)
PAGECACHE_TAG_WRITEBACK);
radix_tree_tag_set(&mapping->page_tree,
- page_index(page),
+ _page_index(page, mapping),
PAGECACHE_TAG_WRITEBACK);
if (bdi_cap_account_writeback(bdi))
inc_wb_stat(inode_to_wb(inode), WB_WRITEBACK);
@@ -2800,11 +2802,11 @@ int __test_set_page_writeback(struct page *page, bool keep_write)
}
if (!PageDirty(page))
radix_tree_tag_clear(&mapping->page_tree,
- page_index(page),
+ _page_index(page, mapping),
PAGECACHE_TAG_DIRTY);
if (!keep_write)
radix_tree_tag_clear(&mapping->page_tree,
- page_index(page),
+ _page_index(page, mapping),
PAGECACHE_TAG_TOWRITE);
spin_unlock_irqrestore(&mapping->tree_lock, flags);
} else {
diff --git a/mm/shmem.c b/mm/shmem.c
index 7fee65df10b4..7f3168d547c8 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -588,7 +588,7 @@ static int shmem_add_to_page_cache(struct page *page,
page_ref_add(page, nr);
page->mapping = mapping;
- page->index = index;
+ _page_set_index(page, mapping, index);
spin_lock_irq(&mapping->tree_lock);
if (PageTransHuge(page)) {
@@ -644,7 +644,9 @@ static void shmem_delete_from_page_cache(struct page *page, void *radswap)
VM_BUG_ON_PAGE(PageCompound(page), page);
spin_lock_irq(&mapping->tree_lock);
- error = shmem_radix_tree_replace(mapping, page->index, page, radswap);
+ error = shmem_radix_tree_replace(mapping, _page_index(page, mapping),
+ page,
+ radswap);
page->mapping = NULL;
mapping->nrpages--;
__dec_node_page_state(page, NR_FILE_PAGES);
@@ -822,7 +824,8 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
continue;
}
- VM_BUG_ON_PAGE(page_to_pgoff(page) != index, page);
+ VM_BUG_ON_PAGE(_page_to_pgoff(page, mapping) != index,
+ page);
if (!trylock_page(page))
continue;
@@ -1267,7 +1270,7 @@ static int shmem_writepage(struct address_space *_mapping, struct page *page,
VM_BUG_ON_PAGE(PageCompound(page), page);
BUG_ON(!PageLocked(page));
mapping = page->mapping;
- index = page->index;
+ index = _page_index(page, mapping);
inode = mapping->host;
info = SHMEM_I(inode);
if (info->flags & VM_LOCKED)
diff --git a/mm/truncate.c b/mm/truncate.c
index a9415c96c966..57d4d0948f40 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -181,7 +181,8 @@ truncate_cleanup_page(struct address_space *mapping, struct page *page)
{
if (page_mapped(page)) {
pgoff_t nr = PageTransHuge(page) ? HPAGE_PMD_NR : 1;
- unmap_mapping_pages(mapping, page->index, nr, false);
+ unmap_mapping_pages(mapping, _page_index(page, mapping), nr,
+ false);
}
if (page_has_private(page))
@@ -353,7 +354,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
if (!trylock_page(page))
continue;
- WARN_ON(page_to_index(page) != index);
+ WARN_ON(_page_to_index(page, mapping) != index);
if (PageWriteback(page)) {
unlock_page(page);
continue;
@@ -447,7 +448,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
continue;
lock_page(page);
- WARN_ON(page_to_index(page) != index);
+ WARN_ON(_page_to_index(page, mapping) != index);
wait_on_page_writeback(page);
truncate_inode_page(mapping, page);
unlock_page(page);
@@ -571,7 +572,7 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
if (!trylock_page(page))
continue;
- WARN_ON(page_to_index(page) != index);
+ WARN_ON(_page_to_index(page, mapping) != index);
/* Middle of THP: skip */
if (PageTransTail(page)) {
@@ -701,7 +702,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
}
lock_page(page);
- WARN_ON(page_to_index(page) != index);
+ WARN_ON(_page_to_index(page, mapping) != index);
if (page_is_truncated(page, mapping)) {
unlock_page(page);
continue;
--
2.14.3
next prev parent reply other threads:[~2018-04-04 19:18 UTC|newest]
Thread overview: 50+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-04-04 19:17 [RFC PATCH 00/79] Generic page write protection and a solution to page waitqueue jglisse
2018-04-04 19:17 ` [RFC PATCH 04/79] pipe: add inode field to struct pipe_inode_info jglisse
2018-04-04 19:17 ` [RFC PATCH 05/79] mm/swap: add an helper to get address_space from swap_entry_t jglisse
2018-04-04 19:17 ` [RFC PATCH 06/79] mm/page: add helpers to dereference struct page index field jglisse
2018-04-04 19:17 ` [RFC PATCH 07/79] mm/page: add helpers to find mapping give a page and buffer head jglisse
2018-04-04 19:17 ` [RFC PATCH 08/79] mm/page: add helpers to find page mapping and private given a bio jglisse
2018-04-04 19:17 ` [RFC PATCH 09/79] fs: add struct address_space to read_cache_page() callback argument jglisse
2018-04-04 19:17 ` [RFC PATCH 20/79] fs: add struct address_space to write_cache_pages() " jglisse
2018-04-04 19:17 ` [RFC PATCH 22/79] fs: add struct inode to block_read_full_page() arguments jglisse
2018-04-04 19:17 ` [RFC PATCH 24/79] fs: add struct inode to nobh_writepage() arguments jglisse
2018-04-04 19:18 ` [RFC PATCH 26/79] fs: add struct address_space to mpage_readpage() arguments jglisse
2018-04-04 19:18 ` [RFC PATCH 27/79] fs: add struct address_space to fscache_read*() callback arguments jglisse
2018-04-04 19:18 ` [RFC PATCH 28/79] fs: introduce page_is_truncated() helper jglisse
2018-04-04 19:18 ` [RFC PATCH 29/79] fs/block: add struct address_space to bdev_write_page() arguments jglisse
2018-04-04 19:18 ` [RFC PATCH 30/79] fs/block: add struct address_space to __block_write_begin() arguments jglisse
2018-04-04 19:18 ` [RFC PATCH 31/79] fs/block: add struct address_space to __block_write_begin_int() args jglisse
2018-04-04 19:18 ` [RFC PATCH 32/79] fs/block: do not rely on page->mapping get it from the context jglisse
2018-04-04 19:18 ` [RFC PATCH 33/79] fs/journal: add struct super_block to jbd2_journal_forget() arguments jglisse
2018-04-04 19:18 ` [RFC PATCH 34/79] fs/journal: add struct inode to jbd2_journal_revoke() arguments jglisse
2018-04-04 19:18 ` [RFC PATCH 35/79] fs/buffer: add struct address_space and struct page to end_io callback jglisse
2018-04-04 19:18 ` [RFC PATCH 36/79] fs/buffer: add struct super_block to bforget() arguments jglisse
2018-04-04 19:18 ` [RFC PATCH 37/79] fs/buffer: add struct super_block to __bforget() arguments jglisse
2018-04-04 19:18 ` [RFC PATCH 38/79] fs/buffer: add first buffer flag for first buffer_head in a page jglisse
2018-04-04 19:18 ` [RFC PATCH 39/79] fs/buffer: add struct address_space to clean_page_buffers() arguments jglisse
2018-04-04 19:18 ` [RFC PATCH 50/79] fs: stop relying on mapping field of struct page, get it from context jglisse
2018-04-04 19:18 ` [RFC PATCH 51/79] " jglisse
2018-04-04 19:18 ` [RFC PATCH 52/79] fs/buffer: use _page_has_buffers() instead of page_has_buffers() jglisse
2018-04-04 19:18 ` jglisse [this message]
2018-04-04 19:18 ` [RFC PATCH 64/79] mm/buffer: " jglisse
2018-04-04 19:18 ` [RFC PATCH 65/79] mm/swap: add struct swap_info_struct swap_readpage() arguments jglisse
2018-04-04 19:18 ` [RFC PATCH 68/79] mm/vma_address: convert page's index lookup to be against specific mapping jglisse
2018-04-04 19:18 ` [RFC PATCH 69/79] fs/journal: add struct address_space to jbd2_journal_try_to_free_buffers() arguments jglisse
2018-04-04 19:18 ` [RFC PATCH 70/79] mm: add struct address_space to mark_buffer_dirty() jglisse
2018-04-04 19:18 ` [RFC PATCH 71/79] mm: add struct address_space to set_page_dirty() jglisse
2018-04-04 19:18 ` [RFC PATCH 72/79] mm: add struct address_space to set_page_dirty_lock() jglisse
2018-04-04 19:18 ` [RFC PATCH 73/79] mm: pass down struct address_space to set_page_dirty() jglisse
2018-04-04 19:18 ` [RFC PATCH 74/79] mm/page_ronly: add config option for generic read only page framework jglisse
2018-04-04 19:18 ` [RFC PATCH 75/79] mm/page_ronly: add page read only core structure and helpers jglisse
2018-04-04 19:18 ` [RFC PATCH 76/79] mm/ksm: have ksm select PAGE_RONLY config jglisse
2018-04-04 19:18 ` [RFC PATCH 77/79] mm/ksm: hide set_page_stable_node() and page_stable_node() jglisse
2018-04-04 19:18 ` [RFC PATCH 78/79] mm/ksm: rename PAGE_MAPPING_KSM to PAGE_MAPPING_RONLY jglisse
2018-04-04 19:18 ` [RFC PATCH 79/79] mm/ksm: set page->mapping to page_ronly struct instead of stable_node jglisse
2018-04-18 14:13 ` [RFC PATCH 00/79] Generic page write protection and a solution to page waitqueue Jan Kara
2018-04-18 15:54 ` Jerome Glisse
2018-04-18 16:20 ` Darrick J. Wong
2018-04-19 10:32 ` Jan Kara
2018-04-19 14:52 ` Jerome Glisse
2018-04-20 19:57 ` Tim Chen
2018-04-20 22:19 ` Jerome Glisse
2018-04-20 23:48 ` Tim Chen
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20180404191831.5378-28-jglisse@redhat.com \
--to=jglisse@redhat.com \
--cc=akpm@linux-foundation.org \
--cc=linux-block@vger.kernel.org \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mgorman@techsingularity.net \
--cc=viro@zeniv.linux.org.uk \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).