From: David Hildenbrand <david@redhat.com>
To: linux-kernel@vger.kernel.org
Cc: linux-mm@kvack.org, David Hildenbrand <david@redhat.com>,
Andrew Morton <akpm@linux-foundation.org>,
Linus Torvalds <torvalds@linux-foundation.org>,
Ryan Roberts <ryan.roberts@arm.com>,
Matthew Wilcox <willy@infradead.org>,
Hugh Dickins <hughd@google.com>,
Yin Fengwei <fengwei.yin@intel.com>,
Yang Shi <shy828301@gmail.com>, Ying Huang <ying.huang@intel.com>,
Zi Yan <ziy@nvidia.com>, Peter Zijlstra <peterz@infradead.org>,
Ingo Molnar <mingo@redhat.com>, Will Deacon <will@kernel.org>,
Waiman Long <longman@redhat.com>,
"Paul E. McKenney" <paulmck@kernel.org>
Subject: [PATCH WIP v1 05/20] mm/rmap: abstract total mapcount operations for partially-mappable folios
Date: Fri, 24 Nov 2023 14:26:10 +0100 [thread overview]
Message-ID: <20231124132626.235350-6-david@redhat.com> (raw)
In-Reply-To: <20231124132626.235350-1-david@redhat.com>
Let's prepare for doing additional accounting whenever modifying the total
mapcount of partially-mappable (!hugetlb) folios. Pass the VMA as well.
Signed-off-by: David Hildenbrand <david@redhat.com>
---
include/linux/rmap.h | 41 ++++++++++++++++++++++++++++++++++++++++-
mm/rmap.c | 23 ++++++++++++-----------
2 files changed, 52 insertions(+), 12 deletions(-)
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 6cb497f6feab..9d5c2ed6ced5 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -168,6 +168,39 @@ static inline void anon_vma_merge(struct vm_area_struct *vma,
struct anon_vma *folio_get_anon_vma(struct folio *folio);
+static inline void folio_set_large_mapcount(struct folio *folio,
+ int count, struct vm_area_struct *vma)
+{
+ VM_WARN_ON_FOLIO(!folio_test_large_rmappable(folio), folio);
+ VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
+ /* increment count (starts at -1) */
+ atomic_set(&folio->_total_mapcount, count - 1);
+}
+
+static inline void folio_inc_large_mapcount(struct folio *folio,
+ struct vm_area_struct *vma)
+{
+ VM_WARN_ON_FOLIO(!folio_test_large_rmappable(folio), folio);
+ VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
+ atomic_inc(&folio->_total_mapcount);
+}
+
+static inline void folio_add_large_mapcount(struct folio *folio,
+ int count, struct vm_area_struct *vma)
+{
+ VM_WARN_ON_FOLIO(!folio_test_large_rmappable(folio), folio);
+ VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
+ atomic_add(count, &folio->_total_mapcount);
+}
+
+static inline void folio_dec_large_mapcount(struct folio *folio,
+ struct vm_area_struct *vma)
+{
+ VM_WARN_ON_FOLIO(!folio_test_large_rmappable(folio), folio);
+ VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
+ atomic_dec(&folio->_total_mapcount);
+}
+
/* RMAP flags, currently only relevant for some anon rmap operations. */
typedef int __bitwise rmap_t;
@@ -219,11 +252,17 @@ static inline void __page_dup_rmap(struct page *page,
return;
}
+ if (unlikely(folio_test_hugetlb(folio))) {
+ atomic_inc(&folio->_entire_mapcount);
+ atomic_inc(&folio->_total_mapcount);
+ return;
+ }
+
if (compound)
atomic_inc(&folio->_entire_mapcount);
else
atomic_inc(&page->_mapcount);
- atomic_inc(&folio->_total_mapcount);
+ folio_inc_large_mapcount(folio, dst_vma);
}
static inline void page_dup_file_rmap(struct page *page,
diff --git a/mm/rmap.c b/mm/rmap.c
index 38765796dca8..689ad85cf87e 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1105,8 +1105,8 @@ int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff,
}
static unsigned int __folio_add_rmap_range(struct folio *folio,
- struct page *page, unsigned int nr_pages, bool compound,
- int *nr_pmdmapped)
+ struct page *page, unsigned int nr_pages,
+ struct vm_area_struct *vma, bool compound, int *nr_pmdmapped)
{
atomic_t *mapped = &folio->_nr_pages_mapped;
int first, count, nr = 0;
@@ -1130,7 +1130,7 @@ static unsigned int __folio_add_rmap_range(struct folio *folio,
nr++;
}
} while (page++, --count > 0);
- atomic_add(nr_pages, &folio->_total_mapcount);
+ folio_add_large_mapcount(folio, nr_pages, vma);
} else if (folio_test_pmd_mappable(folio)) {
/* That test is redundant: it's for safety or to optimize out */
@@ -1148,7 +1148,7 @@ static unsigned int __folio_add_rmap_range(struct folio *folio,
nr = 0;
}
}
- atomic_inc(&folio->_total_mapcount);
+ folio_inc_large_mapcount(folio, vma);
} else {
VM_WARN_ON_ONCE_FOLIO(true, folio);
}
@@ -1258,7 +1258,8 @@ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma,
unsigned int nr, nr_pmdmapped = 0;
bool compound = flags & RMAP_COMPOUND;
- nr = __folio_add_rmap_range(folio, page, 1, compound, &nr_pmdmapped);
+ nr = __folio_add_rmap_range(folio, page, 1, vma, compound,
+ &nr_pmdmapped);
if (nr_pmdmapped)
__lruvec_stat_mod_folio(folio, NR_ANON_THPS, nr_pmdmapped);
if (nr)
@@ -1329,8 +1330,7 @@ void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
}
if (folio_test_large(folio))
- /* increment count (starts at -1) */
- atomic_set(&folio->_total_mapcount, 0);
+ folio_set_large_mapcount(folio, 1, vma);
__lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr);
__folio_set_anon(folio, vma, address, true);
@@ -1355,7 +1355,7 @@ void folio_add_file_rmap_range(struct folio *folio, struct page *page,
{
unsigned int nr, nr_pmdmapped = 0;
- nr = __folio_add_rmap_range(folio, page, nr_pages, compound,
+ nr = __folio_add_rmap_range(folio, page, nr_pages, vma, compound,
&nr_pmdmapped);
if (nr_pmdmapped)
__lruvec_stat_mod_folio(folio, folio_test_swapbacked(folio) ?
@@ -1411,16 +1411,17 @@ void page_remove_rmap(struct page *page, struct vm_area_struct *vma,
VM_BUG_ON_PAGE(compound && !PageHead(page), page);
- if (folio_test_large(folio))
- atomic_dec(&folio->_total_mapcount);
-
/* Hugetlb pages are not counted in NR_*MAPPED */
if (unlikely(folio_test_hugetlb(folio))) {
/* hugetlb pages are always mapped with pmds */
atomic_dec(&folio->_entire_mapcount);
+ atomic_dec(&folio->_total_mapcount);
return;
}
+ if (folio_test_large(folio))
+ folio_dec_large_mapcount(folio, vma);
+
/* Is page being unmapped by PTE? Is this its last map to be removed? */
if (likely(!compound)) {
last = atomic_add_negative(-1, &page->_mapcount);
--
2.41.0
next prev parent reply other threads:[~2023-11-24 13:26 UTC|newest]
Thread overview: 26+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-11-24 13:26 [PATCH WIP v1 00/20] mm: precise "mapped shared" vs. "mapped exclusively" detection for PTE-mapped THP / partially-mappable folios David Hildenbrand
2023-11-24 13:26 ` [PATCH WIP v1 01/20] mm/rmap: factor out adding folio range into __folio_add_rmap_range() David Hildenbrand
2023-11-24 13:26 ` [PATCH WIP v1 02/20] mm: add a total mapcount for large folios David Hildenbrand
2023-11-24 13:26 ` [PATCH WIP v1 03/20] mm: convert folio_estimated_sharers() to folio_mapped_shared() and improve it David Hildenbrand
2023-11-24 13:26 ` [PATCH WIP v1 04/20] mm/rmap: pass dst_vma to page_try_dup_anon_rmap() and page_dup_file_rmap() David Hildenbrand
2023-11-24 13:26 ` David Hildenbrand [this message]
2023-11-24 13:26 ` [PATCH WIP v1 06/20] atomic_seqcount: new (raw) seqcount variant to support concurrent writers David Hildenbrand
2023-11-24 13:26 ` [PATCH WIP v1 07/20] mm/rmap_id: track if one ore multiple MMs map a partially-mappable folio David Hildenbrand
2023-12-17 19:13 ` Nadav Amit
2023-12-18 14:04 ` David Hildenbrand
2023-12-18 14:34 ` Nadav Amit
2023-11-24 13:26 ` [PATCH WIP v1 08/20] mm: pass MM to folio_mapped_shared() David Hildenbrand
2023-11-24 13:26 ` [PATCH WIP v1 09/20] mm: improve folio_mapped_shared() for partially-mappable folios using rmap IDs David Hildenbrand
2023-11-24 13:26 ` [PATCH WIP v1 10/20] mm/memory: COW reuse support for PTE-mapped THP with " David Hildenbrand
2023-11-24 13:26 ` [PATCH WIP v1 11/20] mm/rmap_id: support for 1, 2 and 3 values by manual calculation David Hildenbrand
2023-11-24 13:26 ` [PATCH WIP v1 12/20] mm/rmap: introduce folio_add_anon_rmap_range() David Hildenbrand
2023-11-24 13:26 ` [PATCH WIP v1 13/20] mm/huge_memory: batch rmap operations in __split_huge_pmd_locked() David Hildenbrand
2023-11-24 13:26 ` [PATCH WIP v1 14/20] mm/huge_memory: avoid folio_refcount() < folio_mapcount() " David Hildenbrand
2023-11-24 13:26 ` [PATCH WIP v1 15/20] mm/rmap_id: verify precalculated subids with CONFIG_DEBUG_VM David Hildenbrand
2023-11-24 13:26 ` [PATCH WIP v1 16/20] atomic_seqcount: support a single exclusive writer in the absence of other writers David Hildenbrand
2023-11-24 13:26 ` [PATCH WIP v1 17/20] mm/rmap_id: reduce atomic RMW operations when we are the exclusive writer David Hildenbrand
2023-11-24 13:26 ` [PATCH WIP v1 18/20] atomic_seqcount: use atomic add-return instead of atomic cmpxchg on 64bit David Hildenbrand
2023-11-24 13:26 ` [PATCH WIP v1 19/20] mm/rmap: factor out removing folio range into __folio_remove_rmap_range() David Hildenbrand
2023-11-24 13:26 ` [PATCH WIP v1 20/20] mm/rmap: perform all mapcount operations of large folios under the rmap seqcount David Hildenbrand
2023-11-24 20:55 ` [PATCH WIP v1 00/20] mm: precise "mapped shared" vs. "mapped exclusively" detection for PTE-mapped THP / partially-mappable folios Linus Torvalds
2023-11-25 17:02 ` David Hildenbrand
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20231124132626.235350-6-david@redhat.com \
--to=david@redhat.com \
--cc=akpm@linux-foundation.org \
--cc=fengwei.yin@intel.com \
--cc=hughd@google.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=longman@redhat.com \
--cc=mingo@redhat.com \
--cc=paulmck@kernel.org \
--cc=peterz@infradead.org \
--cc=ryan.roberts@arm.com \
--cc=shy828301@gmail.com \
--cc=torvalds@linux-foundation.org \
--cc=will@kernel.org \
--cc=willy@infradead.org \
--cc=ying.huang@intel.com \
--cc=ziy@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).