From: Jordan Niethe <jniethe@nvidia.com>
To: linux-mm@kvack.org
Cc: balbirs@nvidia.com, matthew.brost@intel.com,
akpm@linux-foundation.org, linux-kernel@vger.kernel.org,
dri-devel@lists.freedesktop.org, david@redhat.com,
ziy@nvidia.com, apopple@nvidia.com, lorenzo.stoakes@oracle.com,
lyude@redhat.com, dakr@kernel.org, airlied@gmail.com,
simona@ffwll.ch, rcampbell@nvidia.com, mpenttil@redhat.com,
jgg@nvidia.com, willy@infradead.org
Subject: [RFC PATCH 3/6] mm/page_vma_mapped: Add flags to page_vma_mapped_walk::pfn to track device private PFNs
Date: Fri, 28 Nov 2025 15:41:43 +1100 [thread overview]
Message-ID: <20251128044146.80050-4-jniethe@nvidia.com> (raw)
In-Reply-To: <20251128044146.80050-1-jniethe@nvidia.com>
A future change will remove device private pages from the physical
address space. This will mean that device private pages no longer have
normal PFN and must be handled separately.
Prepare for this by modifying page_vma_mapped_walk::pfn to contain flags
as well as a PFN. Introduce a PVMW_PFN_DEVICE_PRIVATE flag to indicate
that a page_vma_mapped_walk::pfn contains a PFN for a device private
page.
Signed-off-by: Jordan Niethe <jniethe@nvidia.com>
Signed-off-by: Alistair Popple <apopple@nvidia.com>
---
include/linux/rmap.h | 26 +++++++++++++++++++++++++-
mm/page_vma_mapped.c | 6 +++---
mm/rmap.c | 4 ++--
mm/vmscan.c | 2 +-
4 files changed, 31 insertions(+), 7 deletions(-)
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index daa92a58585d..79e5c733d9c8 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -939,9 +939,33 @@ struct page_vma_mapped_walk {
unsigned int flags;
};
+/* pfn is a device private offset */
+#define PVMW_PFN_DEVICE_PRIVATE (1UL << 0)
+#define PVMW_PFN_SHIFT 1
+
+static inline unsigned long page_vma_walk_pfn(unsigned long pfn)
+{
+ return (pfn << PVMW_PFN_SHIFT);
+}
+
+static inline unsigned long folio_page_vma_walk_pfn(const struct folio *folio)
+{
+ return page_vma_walk_pfn(folio_pfn(folio));
+}
+
+static inline struct page *page_vma_walk_pfn_to_page(unsigned long pvmw_pfn)
+{
+ return pfn_to_page(pvmw_pfn >> PVMW_PFN_SHIFT);
+}
+
+static inline struct folio *page_vma_walk_pfn_to_folio(unsigned long pvmw_pfn)
+{
+ return page_folio(page_vma_walk_pfn_to_page(pvmw_pfn));
+}
+
#define DEFINE_FOLIO_VMA_WALK(name, _folio, _vma, _address, _flags) \
struct page_vma_mapped_walk name = { \
- .pfn = folio_pfn(_folio), \
+ .pfn = folio_page_vma_walk_pfn(_folio), \
.nr_pages = folio_nr_pages(_folio), \
.pgoff = folio_pgoff(_folio), \
.vma = _vma, \
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index c498a91b6706..9146bd084435 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -133,9 +133,9 @@ static bool check_pte(struct page_vma_mapped_walk *pvmw, unsigned long pte_nr)
pfn = pte_pfn(ptent);
}
- if ((pfn + pte_nr - 1) < pvmw->pfn)
+ if ((pfn + pte_nr - 1) < (pvmw->pfn >> PVMW_PFN_SHIFT))
return false;
- if (pfn > (pvmw->pfn + pvmw->nr_pages - 1))
+ if (pfn > ((pvmw->pfn >> PVMW_PFN_SHIFT) + pvmw->nr_pages - 1))
return false;
return true;
}
@@ -346,7 +346,7 @@ unsigned long page_mapped_in_vma(const struct page *page,
{
const struct folio *folio = page_folio(page);
struct page_vma_mapped_walk pvmw = {
- .pfn = page_to_pfn(page),
+ .pfn = folio_page_vma_walk_pfn(folio),
.nr_pages = 1,
.vma = vma,
.flags = PVMW_SYNC,
diff --git a/mm/rmap.c b/mm/rmap.c
index ac4f783d6ec2..e94500318f92 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1129,7 +1129,7 @@ static bool mapping_wrprotect_range_one(struct folio *folio,
{
struct wrprotect_file_state *state = (struct wrprotect_file_state *)arg;
struct page_vma_mapped_walk pvmw = {
- .pfn = state->pfn,
+ .pfn = page_vma_walk_pfn(state->pfn),
.nr_pages = state->nr_pages,
.pgoff = state->pgoff,
.vma = vma,
@@ -1207,7 +1207,7 @@ int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff,
struct vm_area_struct *vma)
{
struct page_vma_mapped_walk pvmw = {
- .pfn = pfn,
+ .pfn = page_vma_walk_pfn(pfn),
.nr_pages = nr_pages,
.pgoff = pgoff,
.vma = vma,
diff --git a/mm/vmscan.c b/mm/vmscan.c
index b2fc8b626d3d..e07ad830e30a 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -4238,7 +4238,7 @@ bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
pte_t *pte = pvmw->pte;
unsigned long addr = pvmw->address;
struct vm_area_struct *vma = pvmw->vma;
- struct folio *folio = pfn_folio(pvmw->pfn);
+ struct folio *folio = page_vma_walk_pfn_to_folio(pvmw->pfn);
struct mem_cgroup *memcg = folio_memcg(folio);
struct pglist_data *pgdat = folio_pgdat(folio);
struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
--
2.34.1
next prev parent reply other threads:[~2025-11-28 4:42 UTC|newest]
Thread overview: 26+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-11-28 4:41 [RFC PATCH 0/6] Remove device private pages from physical address space Jordan Niethe
2025-11-28 4:41 ` [RFC PATCH 1/6] mm/hmm: Add flag to track device private PFNs Jordan Niethe
2025-11-28 18:36 ` Matthew Brost
2025-12-02 1:20 ` Jordan Niethe
2025-12-03 4:25 ` Balbir Singh
2025-11-28 4:41 ` [RFC PATCH 2/6] mm/migrate_device: Add migrate PFN " Jordan Niethe
2025-11-28 4:41 ` Jordan Niethe [this message]
2025-11-28 4:41 ` [RFC PATCH 4/6] mm: Add a new swap type for migration entries with " Jordan Niethe
2025-12-01 2:43 ` Chih-En Lin
2025-12-02 1:42 ` Jordan Niethe
2025-11-28 4:41 ` [RFC PATCH 5/6] mm/util: Add flag to track device private PFNs in page snapshots Jordan Niethe
2025-11-28 4:41 ` [RFC PATCH 6/6] mm: Remove device private pages from the physical address space Jordan Niethe
2025-11-28 17:51 ` Jason Gunthorpe
2025-12-02 2:28 ` Jordan Niethe
2025-12-02 4:10 ` Alistair Popple
2025-11-28 7:40 ` [RFC PATCH 0/6] Remove device private pages from " David Hildenbrand (Red Hat)
2025-11-30 23:33 ` Alistair Popple
2025-11-28 15:09 ` Matthew Wilcox
2025-12-02 1:31 ` Jordan Niethe
2025-11-28 16:07 ` Mika Penttilä
2025-12-02 1:32 ` Jordan Niethe
2025-11-28 19:22 ` Matthew Brost
2025-11-30 23:23 ` Alistair Popple
2025-12-01 1:51 ` Matthew Brost
2025-12-02 1:40 ` Jordan Niethe
2025-12-02 22:20 ` Balbir Singh
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251128044146.80050-4-jniethe@nvidia.com \
--to=jniethe@nvidia.com \
--cc=airlied@gmail.com \
--cc=akpm@linux-foundation.org \
--cc=apopple@nvidia.com \
--cc=balbirs@nvidia.com \
--cc=dakr@kernel.org \
--cc=david@redhat.com \
--cc=dri-devel@lists.freedesktop.org \
--cc=jgg@nvidia.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=lorenzo.stoakes@oracle.com \
--cc=lyude@redhat.com \
--cc=matthew.brost@intel.com \
--cc=mpenttil@redhat.com \
--cc=rcampbell@nvidia.com \
--cc=simona@ffwll.ch \
--cc=willy@infradead.org \
--cc=ziy@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox