From: Balbir Singh <balbirs@nvidia.com>
To: linux-mm@kvack.org
Cc: linux-kernel@vger.kernel.org, "Balbir Singh" <balbirs@nvidia.com>,
"Karol Herbst" <kherbst@redhat.com>,
"Lyude Paul" <lyude@redhat.com>,
"Danilo Krummrich" <dakr@kernel.org>,
"David Airlie" <airlied@gmail.com>,
"Simona Vetter" <simona@ffwll.ch>,
"Jérôme Glisse" <jglisse@redhat.com>,
"Shuah Khan" <shuah@kernel.org>,
"David Hildenbrand" <david@redhat.com>,
"Barry Song" <baohua@kernel.org>,
"Baolin Wang" <baolin.wang@linux.alibaba.com>,
"Ryan Roberts" <ryan.roberts@arm.com>,
"Matthew Wilcox" <willy@infradead.org>,
"Peter Xu" <peterx@redhat.com>, "Zi Yan" <ziy@nvidia.com>,
"Kefeng Wang" <wangkefeng.wang@huawei.com>,
"Jane Chu" <jane.chu@oracle.com>,
"Alistair Popple" <apopple@nvidia.com>,
"Donet Tom" <donettom@linux.ibm.com>,
"Ralph Campbell" <rcampbell@nvidia.com>,
"Mika Penttilä" <mpenttil@redhat.com>,
"Matthew Brost" <matthew.brost@intel.com>,
"Francois Dugast" <francois.dugast@intel.com>
Subject: [v2 06/11] mm/memremap: add folio_split support
Date: Wed, 30 Jul 2025 19:21:34 +1000 [thread overview]
Message-ID: <20250730092139.3890844-7-balbirs@nvidia.com> (raw)
In-Reply-To: <20250730092139.3890844-1-balbirs@nvidia.com>
When a zone device page is split (via huge pmd folio split). The
driver callback for folio_split is invoked to let the device driver
know that the folio size has been split into a smaller order.
The HMM test driver has been updated to handle the split, since the
test driver uses backing pages, it requires a mechanism of reorganizing
the backing pages (backing pages are used to create a mirror device)
again into the right sized order pages. This is supported by exporting
prep_compound_page().
Cc: Karol Herbst <kherbst@redhat.com>
Cc: Lyude Paul <lyude@redhat.com>
Cc: Danilo Krummrich <dakr@kernel.org>
Cc: David Airlie <airlied@gmail.com>
Cc: Simona Vetter <simona@ffwll.ch>
Cc: "Jérôme Glisse" <jglisse@redhat.com>
Cc: Shuah Khan <shuah@kernel.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: Barry Song <baohua@kernel.org>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Peter Xu <peterx@redhat.com>
Cc: Zi Yan <ziy@nvidia.com>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Jane Chu <jane.chu@oracle.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Donet Tom <donettom@linux.ibm.com>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Cc: Mika Penttilä <mpenttil@redhat.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: Francois Dugast <francois.dugast@intel.com>
Signed-off-by: Balbir Singh <balbirs@nvidia.com>
---
include/linux/memremap.h | 29 +++++++++++++++++++++++++++++
include/linux/mm.h | 1 +
lib/test_hmm.c | 35 +++++++++++++++++++++++++++++++++++
mm/huge_memory.c | 9 ++++++++-
4 files changed, 73 insertions(+), 1 deletion(-)
diff --git a/include/linux/memremap.h b/include/linux/memremap.h
index 0c5141a7d58c..20f4b5ebbc93 100644
--- a/include/linux/memremap.h
+++ b/include/linux/memremap.h
@@ -100,6 +100,13 @@ struct dev_pagemap_ops {
*/
int (*memory_failure)(struct dev_pagemap *pgmap, unsigned long pfn,
unsigned long nr_pages, int mf_flags);
+
+ /*
+ * Used for private (un-addressable) device memory only.
+ * This callback is used when a folio is split into
+ * a smaller folio
+ */
+ void (*folio_split)(struct folio *head, struct folio *tail);
};
#define PGMAP_ALTMAP_VALID (1 << 0)
@@ -229,6 +236,23 @@ static inline void zone_device_page_init(struct page *page)
zone_device_folio_init(folio, 0);
}
+static inline void zone_device_private_split_cb(struct folio *original_folio,
+ struct folio *new_folio)
+{
+ if (folio_is_device_private(original_folio)) {
+ if (!original_folio->pgmap->ops->folio_split) {
+ if (new_folio) {
+ new_folio->pgmap = original_folio->pgmap;
+ new_folio->page.mapping =
+ original_folio->page.mapping;
+ }
+ } else {
+ original_folio->pgmap->ops->folio_split(original_folio,
+ new_folio);
+ }
+ }
+}
+
#else
static inline void *devm_memremap_pages(struct device *dev,
struct dev_pagemap *pgmap)
@@ -263,6 +287,11 @@ static inline unsigned long memremap_compat_align(void)
{
return PAGE_SIZE;
}
+
+static inline void zone_device_private_split_cb(struct folio *original_folio,
+ struct folio *new_folio)
+{
+}
#endif /* CONFIG_ZONE_DEVICE */
static inline void put_dev_pagemap(struct dev_pagemap *pgmap)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 8e3a4c5b78ff..d0ecf8386dd9 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1183,6 +1183,7 @@ static inline struct folio *virt_to_folio(const void *x)
void __folio_put(struct folio *folio);
void split_page(struct page *page, unsigned int order);
+void prep_compound_page(struct page *page, unsigned int order);
void folio_copy(struct folio *dst, struct folio *src);
int folio_mc_copy(struct folio *dst, struct folio *src);
diff --git a/lib/test_hmm.c b/lib/test_hmm.c
index 4850f9026694..a8d0d24b4b7a 100644
--- a/lib/test_hmm.c
+++ b/lib/test_hmm.c
@@ -1653,9 +1653,44 @@ static vm_fault_t dmirror_devmem_fault(struct vm_fault *vmf)
return ret;
}
+static void dmirror_devmem_folio_split(struct folio *head, struct folio *tail)
+{
+ struct page *rpage = BACKING_PAGE(folio_page(head, 0));
+ struct page *rpage_tail;
+ struct folio *rfolio;
+ unsigned long offset = 0;
+
+ if (!rpage) {
+ tail->page.zone_device_data = NULL;
+ return;
+ }
+
+ rfolio = page_folio(rpage);
+
+ if (tail == NULL) {
+ folio_reset_order(rfolio);
+ rfolio->mapping = NULL;
+ folio_set_count(rfolio, 1);
+ return;
+ }
+
+ offset = folio_pfn(tail) - folio_pfn(head);
+
+ rpage_tail = folio_page(rfolio, offset);
+ tail->page.zone_device_data = rpage_tail;
+ rpage_tail->zone_device_data = rpage->zone_device_data;
+ clear_compound_head(rpage_tail);
+ rpage_tail->mapping = NULL;
+
+ folio_page(tail, 0)->mapping = folio_page(head, 0)->mapping;
+ tail->pgmap = head->pgmap;
+ folio_set_count(page_folio(rpage_tail), 1);
+}
+
static const struct dev_pagemap_ops dmirror_devmem_ops = {
.page_free = dmirror_devmem_free,
.migrate_to_ram = dmirror_devmem_fault,
+ .folio_split = dmirror_devmem_folio_split,
};
static int dmirror_device_init(struct dmirror_device *mdevice, int id)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 713dd433d352..75b368e7e33f 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2955,9 +2955,16 @@ int split_device_private_folio(struct folio *folio)
VM_WARN_ON(ret);
for (new_folio = folio_next(folio); new_folio != end_folio;
new_folio = folio_next(new_folio)) {
+ zone_device_private_split_cb(folio, new_folio);
folio_ref_unfreeze(new_folio, 1 + folio_expected_ref_count(
new_folio));
}
+
+ /*
+ * Mark the end of the folio split for device private THP
+ * split
+ */
+ zone_device_private_split_cb(folio, NULL);
folio_ref_unfreeze(folio, 1 + folio_expected_ref_count(folio));
return ret;
}
@@ -3979,7 +3986,6 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
ret = __split_unmapped_folio(folio, new_order, split_at, &xas,
mapping, uniform_split);
-
/*
* Unfreeze after-split folios and put them back to the right
* list. @folio should be kept frozon until page cache
@@ -4030,6 +4036,7 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
__filemap_remove_folio(new_folio, NULL);
folio_put_refs(new_folio, nr_pages);
}
+
/*
* Unfreeze @folio only after all page cache entries, which
* used to point to it, have been updated with new folios.
--
2.50.1
next prev parent reply other threads:[~2025-07-30 9:23 UTC|newest]
Thread overview: 71+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-07-30 9:21 [v2 00/11] THP support for zone device page migration Balbir Singh
2025-07-30 9:21 ` [v2 01/11] mm/zone_device: support large zone device private folios Balbir Singh
2025-07-30 9:50 ` David Hildenbrand
2025-08-04 23:43 ` Balbir Singh
2025-08-05 4:22 ` Balbir Singh
2025-08-05 10:57 ` David Hildenbrand
2025-08-05 11:01 ` Balbir Singh
2025-08-05 12:58 ` David Hildenbrand
2025-08-05 21:15 ` Matthew Brost
2025-08-06 12:19 ` Balbir Singh
2025-07-30 9:21 ` [v2 02/11] mm/thp: zone_device awareness in THP handling code Balbir Singh
2025-07-30 11:16 ` Mika Penttilä
2025-07-30 11:27 ` Zi Yan
2025-07-30 11:30 ` Zi Yan
2025-07-30 11:42 ` Mika Penttilä
2025-07-30 12:08 ` Mika Penttilä
2025-07-30 12:25 ` Zi Yan
2025-07-30 12:49 ` Mika Penttilä
2025-07-30 15:10 ` Zi Yan
2025-07-30 15:40 ` Mika Penttilä
2025-07-30 15:58 ` Zi Yan
2025-07-30 16:29 ` Mika Penttilä
2025-07-31 7:15 ` David Hildenbrand
2025-07-31 8:39 ` Balbir Singh
2025-07-31 11:26 ` Zi Yan
2025-07-31 12:32 ` David Hildenbrand
2025-07-31 13:34 ` Zi Yan
2025-07-31 19:09 ` David Hildenbrand
2025-08-01 0:49 ` Balbir Singh
2025-08-01 1:09 ` Zi Yan
2025-08-01 7:01 ` David Hildenbrand
2025-08-01 1:16 ` Mika Penttilä
2025-08-01 4:44 ` Balbir Singh
2025-08-01 5:57 ` Balbir Singh
2025-08-01 6:01 ` Mika Penttilä
2025-08-01 7:04 ` David Hildenbrand
2025-08-01 8:01 ` Balbir Singh
2025-08-01 8:46 ` David Hildenbrand
2025-08-01 11:10 ` Zi Yan
2025-08-01 12:20 ` Mika Penttilä
2025-08-01 12:28 ` Zi Yan
2025-08-02 1:17 ` Balbir Singh
2025-08-02 10:37 ` Balbir Singh
2025-08-02 12:13 ` Mika Penttilä
2025-08-04 22:46 ` Balbir Singh
2025-08-04 23:26 ` Mika Penttilä
2025-08-05 4:10 ` Balbir Singh
2025-08-05 4:24 ` Mika Penttilä
2025-08-05 5:19 ` Mika Penttilä
2025-08-05 10:27 ` Balbir Singh
2025-08-05 10:35 ` Mika Penttilä
2025-08-05 10:36 ` Balbir Singh
2025-08-05 10:46 ` Mika Penttilä
2025-07-30 20:05 ` kernel test robot
2025-07-30 9:21 ` [v2 03/11] mm/migrate_device: THP migration of zone device pages Balbir Singh
2025-07-31 16:19 ` kernel test robot
2025-07-30 9:21 ` [v2 04/11] mm/memory/fault: add support for zone device THP fault handling Balbir Singh
2025-07-30 9:21 ` [v2 05/11] lib/test_hmm: test cases and support for zone device private THP Balbir Singh
2025-07-31 11:17 ` kernel test robot
2025-07-30 9:21 ` Balbir Singh [this message]
2025-07-30 9:21 ` [v2 07/11] mm/thp: add split during migration support Balbir Singh
2025-07-31 10:04 ` kernel test robot
2025-07-30 9:21 ` [v2 08/11] lib/test_hmm: add test case for split pages Balbir Singh
2025-07-30 9:21 ` [v2 09/11] selftests/mm/hmm-tests: new tests for zone device THP migration Balbir Singh
2025-07-30 9:21 ` [v2 10/11] gpu/drm/nouveau: add THP migration support Balbir Singh
2025-07-30 9:21 ` [v2 11/11] selftests/mm/hmm-tests: new throughput tests including THP Balbir Singh
2025-07-30 11:30 ` [v2 00/11] THP support for zone device page migration David Hildenbrand
2025-07-30 23:18 ` Alistair Popple
2025-07-31 8:41 ` Balbir Singh
2025-07-31 8:56 ` David Hildenbrand
2025-08-05 21:34 ` Matthew Brost
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250730092139.3890844-7-balbirs@nvidia.com \
--to=balbirs@nvidia.com \
--cc=airlied@gmail.com \
--cc=apopple@nvidia.com \
--cc=baohua@kernel.org \
--cc=baolin.wang@linux.alibaba.com \
--cc=dakr@kernel.org \
--cc=david@redhat.com \
--cc=donettom@linux.ibm.com \
--cc=francois.dugast@intel.com \
--cc=jane.chu@oracle.com \
--cc=jglisse@redhat.com \
--cc=kherbst@redhat.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=lyude@redhat.com \
--cc=matthew.brost@intel.com \
--cc=mpenttil@redhat.com \
--cc=peterx@redhat.com \
--cc=rcampbell@nvidia.com \
--cc=ryan.roberts@arm.com \
--cc=shuah@kernel.org \
--cc=simona@ffwll.ch \
--cc=wangkefeng.wang@huawei.com \
--cc=willy@infradead.org \
--cc=ziy@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).