From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
To: linux-fsdevel@vger.kernel.org, linux-mm@kvack.org
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>
Subject: [PATCH 33/48] mm: Add unmap_mapping_folio()
Date: Wed, 8 Dec 2021 04:22:41 +0000 [thread overview]
Message-ID: <20211208042256.1923824-34-willy@infradead.org> (raw)
In-Reply-To: <20211208042256.1923824-1-willy@infradead.org>
Convert both callers of unmap_mapping_page() to call unmap_mapping_folio()
instead. Also move zap_details from linux/mm.h to mm/internal.h
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
include/linux/mm.h | 24 ------------------------
mm/internal.h | 25 ++++++++++++++++++++++++-
mm/memory.c | 27 +++++++++++++--------------
mm/truncate.c | 4 ++--
4 files changed, 39 insertions(+), 41 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 145f045b0ddc..c9cdb26802fb 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1825,28 +1825,6 @@ static inline bool can_do_mlock(void) { return false; }
extern int user_shm_lock(size_t, struct ucounts *);
extern void user_shm_unlock(size_t, struct ucounts *);
-/*
- * Parameter block passed down to zap_pte_range in exceptional cases.
- */
-struct zap_details {
- struct address_space *zap_mapping; /* Check page->mapping if set */
- struct page *single_page; /* Locked page to be unmapped */
-};
-
-/*
- * We set details->zap_mappings when we want to unmap shared but keep private
- * pages. Return true if skip zapping this page, false otherwise.
- */
-static inline bool
-zap_skip_check_mapping(struct zap_details *details, struct page *page)
-{
- if (!details || !page)
- return false;
-
- return details->zap_mapping &&
- (details->zap_mapping != page_rmapping(page));
-}
-
struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
pte_t pte);
struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
@@ -1892,7 +1870,6 @@ extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
extern int fixup_user_fault(struct mm_struct *mm,
unsigned long address, unsigned int fault_flags,
bool *unlocked);
-void unmap_mapping_page(struct page *page);
void unmap_mapping_pages(struct address_space *mapping,
pgoff_t start, pgoff_t nr, bool even_cows);
void unmap_mapping_range(struct address_space *mapping,
@@ -1913,7 +1890,6 @@ static inline int fixup_user_fault(struct mm_struct *mm, unsigned long address,
BUG();
return -EFAULT;
}
-static inline void unmap_mapping_page(struct page *page) { }
static inline void unmap_mapping_pages(struct address_space *mapping,
pgoff_t start, pgoff_t nr, bool even_cows) { }
static inline void unmap_mapping_range(struct address_space *mapping,
diff --git a/mm/internal.h b/mm/internal.h
index 3b79a5c9427a..3f359f4830da 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -74,6 +74,28 @@ static inline bool can_madv_lru_vma(struct vm_area_struct *vma)
return !(vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP));
}
+/*
+ * Parameter block passed down to zap_pte_range in exceptional cases.
+ */
+struct zap_details {
+ struct address_space *zap_mapping; /* Check page->mapping if set */
+ struct folio *single_folio; /* Locked folio to be unmapped */
+};
+
+/*
+ * We set details->zap_mappings when we want to unmap shared but keep private
+ * pages. Return true if skip zapping this page, false otherwise.
+ */
+static inline bool
+zap_skip_check_mapping(struct zap_details *details, struct page *page)
+{
+ if (!details || !page)
+ return false;
+
+ return details->zap_mapping &&
+ (details->zap_mapping != page_rmapping(page));
+}
+
void unmap_page_range(struct mmu_gather *tlb,
struct vm_area_struct *vma,
unsigned long addr, unsigned long end,
@@ -388,6 +410,7 @@ void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma);
#ifdef CONFIG_MMU
+void unmap_mapping_folio(struct folio *folio);
extern long populate_vma_page_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end, int *locked);
extern long faultin_vma_page_range(struct vm_area_struct *vma,
@@ -491,8 +514,8 @@ static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
}
return fpin;
}
-
#else /* !CONFIG_MMU */
+static inline void unmap_mapping_folio(struct folio *folio) { }
static inline void clear_page_mlock(struct page *page) { }
static inline void mlock_vma_page(struct page *page) { }
static inline void vunmap_range_noflush(unsigned long start, unsigned long end)
diff --git a/mm/memory.c b/mm/memory.c
index 8f1de811a1dc..a86027026f2a 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1443,8 +1443,8 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
else if (zap_huge_pmd(tlb, vma, pmd, addr))
goto next;
/* fall through */
- } else if (details && details->single_page &&
- PageTransCompound(details->single_page) &&
+ } else if (details && details->single_folio &&
+ folio_test_pmd_mappable(details->single_folio) &&
next - addr == HPAGE_PMD_SIZE && pmd_none(*pmd)) {
spinlock_t *ptl = pmd_lock(tlb->mm, pmd);
/*
@@ -3332,31 +3332,30 @@ static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
}
/**
- * unmap_mapping_page() - Unmap single page from processes.
- * @page: The locked page to be unmapped.
+ * unmap_mapping_folio() - Unmap single folio from processes.
+ * @folio: The locked folio to be unmapped.
*
- * Unmap this page from any userspace process which still has it mmaped.
+ * Unmap this folio from any userspace process which still has it mmaped.
* Typically, for efficiency, the range of nearby pages has already been
* unmapped by unmap_mapping_pages() or unmap_mapping_range(). But once
- * truncation or invalidation holds the lock on a page, it may find that
- * the page has been remapped again: and then uses unmap_mapping_page()
+ * truncation or invalidation holds the lock on a folio, it may find that
+ * the page has been remapped again: and then uses unmap_mapping_folio()
* to unmap it finally.
*/
-void unmap_mapping_page(struct page *page)
+void unmap_mapping_folio(struct folio *folio)
{
- struct address_space *mapping = page->mapping;
+ struct address_space *mapping = folio->mapping;
struct zap_details details = { };
pgoff_t first_index;
pgoff_t last_index;
- VM_BUG_ON(!PageLocked(page));
- VM_BUG_ON(PageTail(page));
+ VM_BUG_ON(!folio_test_locked(folio));
- first_index = page->index;
- last_index = page->index + thp_nr_pages(page) - 1;
+ first_index = folio->index;
+ last_index = folio->index + folio_nr_pages(folio) - 1;
details.zap_mapping = mapping;
- details.single_page = page;
+ details.single_folio = folio;
i_mmap_lock_write(mapping);
if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
diff --git a/mm/truncate.c b/mm/truncate.c
index ab86b07c1e9c..c98feea75a10 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -180,7 +180,7 @@ void do_invalidatepage(struct page *page, unsigned int offset,
static void truncate_cleanup_folio(struct folio *folio)
{
if (folio_mapped(folio))
- unmap_mapping_page(&folio->page);
+ unmap_mapping_folio(folio);
if (folio_has_private(folio))
do_invalidatepage(&folio->page, 0, folio_size(folio));
@@ -670,7 +670,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
wait_on_page_writeback(page);
if (page_mapped(page))
- unmap_mapping_page(page);
+ unmap_mapping_folio(page_folio(page));
BUG_ON(page_mapped(page));
ret2 = do_launder_page(mapping, page);
--
2.33.0
next prev parent reply other threads:[~2021-12-08 6:45 UTC|newest]
Thread overview: 107+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-12-08 4:22 [PATCH 00/48] Folios for 5.17 Matthew Wilcox (Oracle)
2021-12-08 4:22 ` [PATCH 01/48] filemap: Remove PageHWPoison check from next_uptodate_page() Matthew Wilcox (Oracle)
2021-12-23 6:48 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 02/48] fs/writeback: Convert inode_switch_wbs_work_fn to folios Matthew Wilcox (Oracle)
2021-12-23 6:50 ` Christoph Hellwig
2021-12-23 13:50 ` Matthew Wilcox
2021-12-08 4:22 ` [PATCH 03/48] mm/doc: Add documentation for folio_test_uptodate Matthew Wilcox (Oracle)
2021-12-23 6:51 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 04/48] mm/writeback: Improve __folio_mark_dirty() comment Matthew Wilcox (Oracle)
2021-12-23 6:52 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 05/48] pagevec: Add folio_batch Matthew Wilcox (Oracle)
2021-12-23 6:54 ` Christoph Hellwig
2021-12-23 14:18 ` Matthew Wilcox
2021-12-24 6:13 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 06/48] iov_iter: Add copy_folio_to_iter() Matthew Wilcox (Oracle)
2021-12-23 6:55 ` Christoph Hellwig
2021-12-23 14:22 ` Matthew Wilcox
2021-12-08 4:22 ` [PATCH 07/48] iov_iter: Convert iter_xarray to use folios Matthew Wilcox (Oracle)
2021-12-23 6:57 ` Christoph Hellwig
2021-12-23 14:31 ` Matthew Wilcox
2021-12-23 15:24 ` David Howells
2021-12-24 6:14 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 08/48] mm: Add folio_test_pmd_mappable() Matthew Wilcox (Oracle)
2021-12-23 6:58 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 09/48] filemap: Add folio_put_wait_locked() Matthew Wilcox (Oracle)
2021-12-23 7:00 ` Christoph Hellwig
2021-12-23 14:32 ` Matthew Wilcox
2021-12-08 4:22 ` [PATCH 10/48] filemap: Convert page_cache_delete to take a folio Matthew Wilcox (Oracle)
2021-12-23 7:01 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 11/48] filemap: Add filemap_unaccount_folio() Matthew Wilcox (Oracle)
2021-12-23 7:03 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 12/48] filemap: Convert tracing of page cache operations to folio Matthew Wilcox (Oracle)
2021-12-23 7:04 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 13/48] filemap: Add filemap_remove_folio and __filemap_remove_folio Matthew Wilcox (Oracle)
2021-12-23 7:06 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 14/48] filemap: Convert find_get_entry to return a folio Matthew Wilcox (Oracle)
2021-12-23 7:08 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 15/48] filemap: Remove thp_contains() Matthew Wilcox (Oracle)
2021-12-23 7:09 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 16/48] filemap: Convert filemap_get_read_batch to use folios Matthew Wilcox (Oracle)
2021-12-23 7:10 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 17/48] filemap: Convert find_get_pages_contig to folios Matthew Wilcox (Oracle)
2021-12-23 7:16 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 18/48] filemap: Convert filemap_read_page to take a folio Matthew Wilcox (Oracle)
2021-12-23 7:16 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 19/48] filemap: Convert filemap_create_page to folio Matthew Wilcox (Oracle)
2021-12-23 7:17 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 20/48] filemap: Convert filemap_range_uptodate to folios Matthew Wilcox (Oracle)
2021-12-23 7:18 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 21/48] readahead: Convert page_cache_async_ra() to take a folio Matthew Wilcox (Oracle)
2021-12-23 7:19 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 22/48] readahead: Convert page_cache_ra_unbounded to folios Matthew Wilcox (Oracle)
2021-12-23 7:19 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 23/48] filemap: Convert do_async_mmap_readahead to take a folio Matthew Wilcox (Oracle)
2021-12-23 7:23 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 24/48] filemap: Convert filemap_fault to folio Matthew Wilcox (Oracle)
2021-12-23 7:25 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 25/48] filemap: Add read_cache_folio and read_mapping_folio Matthew Wilcox (Oracle)
2021-12-23 7:39 ` Christoph Hellwig
2021-12-23 15:18 ` Matthew Wilcox
2021-12-23 16:20 ` Matthew Wilcox
2021-12-23 18:36 ` Matthew Wilcox
2021-12-08 4:22 ` [PATCH 26/48] filemap: Convert filemap_get_pages to use folios Matthew Wilcox (Oracle)
2021-12-23 7:40 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 27/48] filemap: Convert page_cache_delete_batch to folios Matthew Wilcox (Oracle)
2021-12-23 7:40 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 28/48] filemap: Use folios in next_uptodate_page Matthew Wilcox (Oracle)
2021-12-08 4:22 ` [PATCH 29/48] filemap: Use a folio in filemap_map_pages Matthew Wilcox (Oracle)
2021-12-08 4:22 ` [PATCH 30/48] filemap: Use a folio in filemap_page_mkwrite Matthew Wilcox (Oracle)
2021-12-08 4:22 ` [PATCH 31/48] filemap: Add filemap_release_folio() Matthew Wilcox (Oracle)
2021-12-08 4:22 ` [PATCH 32/48] truncate: Add truncate_cleanup_folio() Matthew Wilcox (Oracle)
2021-12-08 4:22 ` Matthew Wilcox (Oracle) [this message]
2021-12-23 7:36 ` [PATCH 33/48] mm: Add unmap_mapping_folio() Christoph Hellwig
2022-01-02 16:11 ` Matthew Wilcox
2022-01-03 7:53 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 34/48] shmem: Convert part of shmem_undo_range() to use a folio Matthew Wilcox (Oracle)
2021-12-23 7:39 ` Christoph Hellwig
2021-12-08 4:22 ` [PATCH 35/48] truncate,shmem: Add truncate_inode_folio() Matthew Wilcox (Oracle)
2021-12-08 4:22 ` [PATCH 36/48] truncate: Skip known-truncated indices Matthew Wilcox (Oracle)
2021-12-08 4:22 ` [PATCH 37/48] truncate: Convert invalidate_inode_pages2_range() to use a folio Matthew Wilcox (Oracle)
2021-12-08 4:22 ` [PATCH 38/48] truncate: Add invalidate_complete_folio2() Matthew Wilcox (Oracle)
2021-12-08 4:22 ` [PATCH 39/48] filemap: Convert filemap_read() to use a folio Matthew Wilcox (Oracle)
[not found] ` <YcQxqnwGyDj1rf1c@infradead.org>
2022-01-01 16:14 ` Matthew Wilcox
2021-12-08 4:22 ` [PATCH 40/48] filemap: Convert filemap_get_read_batch() to use a folio_batch Matthew Wilcox (Oracle)
2021-12-08 4:22 ` [PATCH 41/48] filemap: Return only folios from find_get_entries() Matthew Wilcox (Oracle)
2021-12-08 4:22 ` [PATCH 42/48] mm: Convert find_lock_entries() to use a folio_batch Matthew Wilcox (Oracle)
2021-12-08 11:29 ` kernel test robot
2021-12-08 14:30 ` Matthew Wilcox
2021-12-08 4:22 ` [PATCH 43/48] mm: Remove pagevec_remove_exceptionals() Matthew Wilcox (Oracle)
2021-12-08 4:22 ` [PATCH 44/48] fs: Convert vfs_dedupe_file_range_compare to folios Matthew Wilcox (Oracle)
2021-12-08 4:22 ` [PATCH 45/48] truncate: Convert invalidate_inode_pages2_range " Matthew Wilcox (Oracle)
2021-12-08 4:22 ` [PATCH 46/48] truncate,shmem: Handle truncates that split large folios Matthew Wilcox (Oracle)
2021-12-08 16:43 ` Matthew Wilcox
2021-12-08 4:22 ` [PATCH 47/48] XArray: Add xas_advance() Matthew Wilcox (Oracle)
2021-12-08 4:22 ` [PATCH 48/48] mm: Use multi-index entries in the page cache Matthew Wilcox (Oracle)
2021-12-26 22:26 ` [PATCH 00/48] Folios for 5.17 William Kucharski
2022-01-03 1:27 ` Matthew Wilcox
2022-01-03 19:28 ` William Kucharski
2022-01-02 16:19 ` Matthew Wilcox
2022-01-02 23:46 ` William Kucharski
2022-01-03 1:29 ` Hugh Dickins
2022-01-03 1:44 ` Matthew Wilcox
2022-01-03 9:29 ` Christoph Hellwig
2022-01-08 5:32 ` Matthew Wilcox
2022-01-08 16:47 ` Hugh Dickins
2022-01-08 16:53 ` Matthew Wilcox
2022-01-08 17:20 ` Hugh Dickins
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20211208042256.1923824-34-willy@infradead.org \
--to=willy@infradead.org \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-mm@kvack.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).