From: Yin Fengwei <fengwei.yin@intel.com>
To: linux-mm@kvack.org, akpm@linux-foundation.org, willy@infradead.org
Cc: fengwei.yin@intel.com
Subject: [PATCH 3/5] rmap: cleanup exit path of try_to_unmap_one_page()
Date: Thu, 23 Feb 2023 16:31:58 +0800 [thread overview]
Message-ID: <20230223083200.3149015-4-fengwei.yin@intel.com> (raw)
In-Reply-To: <20230223083200.3149015-1-fengwei.yin@intel.com>
Cleanup exit path of try_to_unmap_one_page() by removing
some duplicated code.
Move page_vma_mapped_walk_done() back to try_to_unmap_one().
Change subpage to page as folio has no concept of subpage.
Signed-off-by: Yin Fengwei <fengwei.yin@intel.com>
---
mm/rmap.c | 74 ++++++++++++++++++++++---------------------------------
1 file changed, 30 insertions(+), 44 deletions(-)
diff --git a/mm/rmap.c b/mm/rmap.c
index 879e90bbf6aa..097774c809a0 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1536,7 +1536,7 @@ static bool try_to_unmap_one_hugetlb(struct folio *folio,
*
* See Documentation/mm/mmu_notifier.rst
*/
- page_remove_rmap(&folio->page, vma, folio_test_hugetlb(folio));
+ page_remove_rmap(&folio->page, vma, true);
if (vma->vm_flags & VM_LOCKED)
mlock_drain_local();
folio_put(folio);
@@ -1550,15 +1550,13 @@ static bool try_to_unmap_one_page(struct folio *folio,
struct page_vma_mapped_walk pvmw, unsigned long address,
enum ttu_flags flags)
{
- bool anon_exclusive, ret = true;
- struct page *subpage;
+ bool anon_exclusive;
+ struct page *page;
struct mm_struct *mm = vma->vm_mm;
pte_t pteval;
- subpage = folio_page(folio,
- pte_pfn(*pvmw.pte) - folio_pfn(folio));
- anon_exclusive = folio_test_anon(folio) &&
- PageAnonExclusive(subpage);
+ page = folio_page(folio, pte_pfn(*pvmw.pte) - folio_pfn(folio));
+ anon_exclusive = folio_test_anon(folio) && PageAnonExclusive(page);
flush_cache_page(vma, address, pte_pfn(*pvmw.pte));
/* Nuke the page table entry. */
@@ -1586,15 +1584,14 @@ static bool try_to_unmap_one_page(struct folio *folio,
pte_install_uffd_wp_if_needed(vma, address, pvmw.pte, pteval);
/* Set the dirty flag on the folio now the pte is gone. */
- if (pte_dirty(pteval))
+ if (pte_dirty(pteval) && !folio_test_dirty(folio))
folio_mark_dirty(folio);
/* Update high watermark before we lower rss */
update_hiwater_rss(mm);
- if (PageHWPoison(subpage) && !(flags & TTU_IGNORE_HWPOISON)) {
- pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
- dec_mm_counter(mm, mm_counter(&folio->page));
+ if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) {
+ pteval = swp_entry_to_pte(make_hwpoison_entry(page));
set_pte_at(mm, address, pvmw.pte, pteval);
} else if (pte_unused(pteval) && !userfaultfd_armed(vma)) {
/*
@@ -1607,12 +1604,11 @@ static bool try_to_unmap_one_page(struct folio *folio,
* migration) will not expect userfaults on already
* copied pages.
*/
- dec_mm_counter(mm, mm_counter(&folio->page));
/* We have to invalidate as we cleared the pte */
mmu_notifier_invalidate_range(mm, address,
address + PAGE_SIZE);
} else if (folio_test_anon(folio)) {
- swp_entry_t entry = { .val = page_private(subpage) };
+ swp_entry_t entry = { .val = page_private(page) };
pte_t swp_pte;
/*
* Store the swap location in the pte.
@@ -1621,12 +1617,10 @@ static bool try_to_unmap_one_page(struct folio *folio,
if (unlikely(folio_test_swapbacked(folio) !=
folio_test_swapcache(folio))) {
WARN_ON_ONCE(1);
- ret = false;
/* We have to invalidate as we cleared the pte */
mmu_notifier_invalidate_range(mm, address,
address + PAGE_SIZE);
- page_vma_mapped_walk_done(&pvmw);
- goto discard;
+ goto exit;
}
/* MADV_FREE page check */
@@ -1658,7 +1652,6 @@ static bool try_to_unmap_one_page(struct folio *folio,
/* Invalidate as we cleared the pte */
mmu_notifier_invalidate_range(mm,
address, address + PAGE_SIZE);
- dec_mm_counter(mm, MM_ANONPAGES);
goto discard;
}
@@ -1666,43 +1659,30 @@ static bool try_to_unmap_one_page(struct folio *folio,
* If the folio was redirtied, it cannot be
* discarded. Remap the page to page table.
*/
- set_pte_at(mm, address, pvmw.pte, pteval);
folio_set_swapbacked(folio);
- ret = false;
- page_vma_mapped_walk_done(&pvmw);
- goto discard;
+ goto exit_restore_pte;
}
- if (swap_duplicate(entry) < 0) {
- set_pte_at(mm, address, pvmw.pte, pteval);
- ret = false;
- page_vma_mapped_walk_done(&pvmw);
- goto discard;
- }
+ if (swap_duplicate(entry) < 0)
+ goto exit_restore_pte;
+
if (arch_unmap_one(mm, vma, address, pteval) < 0) {
swap_free(entry);
- set_pte_at(mm, address, pvmw.pte, pteval);
- ret = false;
- page_vma_mapped_walk_done(&pvmw);
- goto discard;
+ goto exit_restore_pte;
}
/* See page_try_share_anon_rmap(): clear PTE first. */
- if (anon_exclusive &&
- page_try_share_anon_rmap(subpage)) {
+ if (anon_exclusive && page_try_share_anon_rmap(page)) {
swap_free(entry);
- set_pte_at(mm, address, pvmw.pte, pteval);
- ret = false;
- page_vma_mapped_walk_done(&pvmw);
- goto discard;
+ goto exit_restore_pte;
}
+
if (list_empty(&mm->mmlist)) {
spin_lock(&mmlist_lock);
if (list_empty(&mm->mmlist))
list_add(&mm->mmlist, &init_mm.mmlist);
spin_unlock(&mmlist_lock);
}
- dec_mm_counter(mm, MM_ANONPAGES);
inc_mm_counter(mm, MM_SWAPENTS);
swp_pte = swp_entry_to_pte(entry);
if (anon_exclusive)
@@ -1713,8 +1693,7 @@ static bool try_to_unmap_one_page(struct folio *folio,
swp_pte = pte_swp_mkuffd_wp(swp_pte);
set_pte_at(mm, address, pvmw.pte, swp_pte);
/* Invalidate as we cleared the pte */
- mmu_notifier_invalidate_range(mm, address,
- address + PAGE_SIZE);
+ mmu_notifier_invalidate_range(mm, address, address + PAGE_SIZE);
} else {
/*
* This is a locked file-backed folio,
@@ -1727,11 +1706,16 @@ static bool try_to_unmap_one_page(struct folio *folio,
*
* See Documentation/mm/mmu_notifier.rst
*/
- dec_mm_counter(mm, mm_counter_file(&folio->page));
}
discard:
- return ret;
+ dec_mm_counter(vma->vm_mm, mm_counter(&folio->page));
+ return true;
+
+exit_restore_pte:
+ set_pte_at(mm, address, pvmw.pte, pteval);
+exit:
+ return false;
}
/*
@@ -1809,8 +1793,10 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
pte_pfn(*pvmw.pte) - folio_pfn(folio));
ret = try_to_unmap_one_page(folio, vma,
range, pvmw, address, flags);
- if (!ret)
+ if (!ret) {
+ page_vma_mapped_walk_done(&pvmw);
break;
+ }
/*
* No need to call mmu_notifier_invalidate_range() it has be
@@ -1819,7 +1805,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
*
* See Documentation/mm/mmu_notifier.rst
*/
- page_remove_rmap(subpage, vma, folio_test_hugetlb(folio));
+ page_remove_rmap(subpage, vma, false);
if (vma->vm_flags & VM_LOCKED)
mlock_drain_local();
folio_put(folio);
--
2.30.2
next prev parent reply other threads:[~2023-02-23 8:30 UTC|newest]
Thread overview: 13+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-02-23 8:31 [PATCH 0/5] batched remove rmap in try_to_unmap_one() Yin Fengwei
2023-02-23 8:31 ` [PATCH 1/5] rmap: move hugetlb try_to_unmap to dedicated function Yin Fengwei
2023-02-23 17:28 ` Matthew Wilcox
2023-02-24 0:20 ` Mike Kravetz
2023-02-24 0:52 ` Yin, Fengwei
2023-02-24 2:51 ` HORIGUCHI NAOYA(堀口 直也)
2023-02-24 4:41 ` Yin, Fengwei
2023-02-24 19:21 ` Mike Kravetz
2023-02-26 11:44 ` Yin, Fengwei
2023-02-23 8:31 ` [PATCH 2/5] rmap: move page unmap operation " Yin Fengwei
2023-02-23 8:31 ` Yin Fengwei [this message]
2023-02-23 8:31 ` [PATCH 4/5] rmap:addd folio_remove_rmap_range() Yin Fengwei
2023-02-23 8:32 ` [PATCH 5/5] try_to_unmap_one: batched remove rmap, update folio refcount Yin Fengwei
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230223083200.3149015-4-fengwei.yin@intel.com \
--to=fengwei.yin@intel.com \
--cc=akpm@linux-foundation.org \
--cc=linux-mm@kvack.org \
--cc=willy@infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).