From: Matthew Brost <matthew.brost@intel.com>
To: intel-xe@lists.freedesktop.org, dri-devel@lists.freedesktop.org
Cc: leonro@nvidia.com, jgg@ziepe.ca, francois.dugast@intel.com,
thomas.hellstrom@linux.intel.com,
himal.prasad.ghimiray@intel.com
Subject: [PATCH v5 1/5] drm/pagemap: Add helper to access zone_device_data
Date: Thu, 19 Feb 2026 12:10:53 -0800 [thread overview]
Message-ID: <20260219201057.1010391-2-matthew.brost@intel.com> (raw)
In-Reply-To: <20260219201057.1010391-1-matthew.brost@intel.com>
From: Francois Dugast <francois.dugast@intel.com>
This new helper helps ensure all accesses to zone_device_data use the
correct API whether the page is part of a folio or not.
Suggested-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Signed-off-by: Francois Dugast <francois.dugast@intel.com>
---
drivers/gpu/drm/drm_gpusvm.c | 7 +++++--
drivers/gpu/drm/drm_pagemap.c | 21 ++++++++++++---------
include/drm/drm_pagemap.h | 14 ++++++++++++++
3 files changed, 31 insertions(+), 11 deletions(-)
diff --git a/drivers/gpu/drm/drm_gpusvm.c b/drivers/gpu/drm/drm_gpusvm.c
index 81626b00b755..1490d1929b1a 100644
--- a/drivers/gpu/drm/drm_gpusvm.c
+++ b/drivers/gpu/drm/drm_gpusvm.c
@@ -1488,12 +1488,15 @@ int drm_gpusvm_get_pages(struct drm_gpusvm *gpusvm,
order = drm_gpusvm_hmm_pfn_to_order(pfns[i], i, npages);
if (is_device_private_page(page) ||
is_device_coherent_page(page)) {
+ struct drm_pagemap_zdd *__zdd =
+ drm_pagemap_page_zone_device_data(page);
+
if (!ctx->allow_mixed &&
- zdd != page->zone_device_data && i > 0) {
+ zdd != __zdd && i > 0) {
err = -EOPNOTSUPP;
goto err_unmap;
}
- zdd = page->zone_device_data;
+ zdd = __zdd;
if (pagemap != page_pgmap(page)) {
if (pagemap) {
err = -EOPNOTSUPP;
diff --git a/drivers/gpu/drm/drm_pagemap.c b/drivers/gpu/drm/drm_pagemap.c
index f83a76f7f37c..01a06d1fd1a0 100644
--- a/drivers/gpu/drm/drm_pagemap.c
+++ b/drivers/gpu/drm/drm_pagemap.c
@@ -244,7 +244,7 @@ static int drm_pagemap_migrate_map_pages(struct device *dev,
order = folio_order(folio);
if (is_device_private_page(page)) {
- struct drm_pagemap_zdd *zdd = page->zone_device_data;
+ struct drm_pagemap_zdd *zdd = drm_pagemap_page_zone_device_data(page);
struct drm_pagemap *dpagemap = zdd->dpagemap;
struct drm_pagemap_addr addr;
@@ -315,7 +315,7 @@ static void drm_pagemap_migrate_unmap_pages(struct device *dev,
goto next;
if (is_zone_device_page(page)) {
- struct drm_pagemap_zdd *zdd = page->zone_device_data;
+ struct drm_pagemap_zdd *zdd = drm_pagemap_page_zone_device_data(page);
struct drm_pagemap *dpagemap = zdd->dpagemap;
dpagemap->ops->device_unmap(dpagemap, dev, &pagemap_addr[i]);
@@ -593,7 +593,8 @@ int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
pages[i] = NULL;
if (src_page && is_device_private_page(src_page)) {
- struct drm_pagemap_zdd *src_zdd = src_page->zone_device_data;
+ struct drm_pagemap_zdd *src_zdd =
+ drm_pagemap_page_zone_device_data(src_page);
if (page_pgmap(src_page) == pagemap &&
!mdetails->can_migrate_same_pagemap) {
@@ -715,8 +716,8 @@ static int drm_pagemap_migrate_populate_ram_pfn(struct vm_area_struct *vas,
goto next;
if (fault_page) {
- if (src_page->zone_device_data !=
- fault_page->zone_device_data)
+ if (drm_pagemap_page_zone_device_data(src_page) !=
+ drm_pagemap_page_zone_device_data(fault_page))
goto next;
}
@@ -1057,7 +1058,7 @@ static int __drm_pagemap_migrate_to_ram(struct vm_area_struct *vas,
void *buf;
int i, err = 0;
- zdd = page->zone_device_data;
+ zdd = drm_pagemap_page_zone_device_data(page);
if (time_before64(get_jiffies_64(), zdd->devmem_allocation->timeslice_expiration))
return 0;
@@ -1140,7 +1141,9 @@ static int __drm_pagemap_migrate_to_ram(struct vm_area_struct *vas,
*/
static void drm_pagemap_folio_free(struct folio *folio)
{
- drm_pagemap_zdd_put(folio->page.zone_device_data);
+ struct page *page = folio_page(folio, 0);
+
+ drm_pagemap_zdd_put(drm_pagemap_page_zone_device_data(page));
}
/**
@@ -1156,7 +1159,7 @@ static void drm_pagemap_folio_free(struct folio *folio)
*/
static vm_fault_t drm_pagemap_migrate_to_ram(struct vm_fault *vmf)
{
- struct drm_pagemap_zdd *zdd = vmf->page->zone_device_data;
+ struct drm_pagemap_zdd *zdd = drm_pagemap_page_zone_device_data(vmf->page);
int err;
err = __drm_pagemap_migrate_to_ram(vmf->vma,
@@ -1222,7 +1225,7 @@ EXPORT_SYMBOL_GPL(drm_pagemap_devmem_init);
*/
struct drm_pagemap *drm_pagemap_page_to_dpagemap(struct page *page)
{
- struct drm_pagemap_zdd *zdd = page->zone_device_data;
+ struct drm_pagemap_zdd *zdd = drm_pagemap_page_zone_device_data(page);
return zdd->devmem_allocation->dpagemap;
}
diff --git a/include/drm/drm_pagemap.h b/include/drm/drm_pagemap.h
index c848f578e3da..72f6828f2604 100644
--- a/include/drm/drm_pagemap.h
+++ b/include/drm/drm_pagemap.h
@@ -4,6 +4,7 @@
#include <linux/dma-direction.h>
#include <linux/hmm.h>
+#include <linux/memremap.h>
#include <linux/types.h>
#define NR_PAGES(order) (1U << (order))
@@ -341,6 +342,19 @@ struct drm_pagemap_migrate_details {
u32 source_peer_migrates : 1;
};
+/**
+ * drm_pagemap_page_zone_device_data() - Page to zone_device_data
+ * @page: Pointer to the page
+ *
+ * Return: Page's zone_device_data
+ */
+static inline struct drm_pagemap_zdd *drm_pagemap_page_zone_device_data(struct page *page)
+{
+ struct folio *folio = page_folio(page);
+
+ return folio_zone_device_data(folio);
+}
+
#if IS_ENABLED(CONFIG_ZONE_DEVICE)
int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
--
2.34.1
next prev parent reply other threads:[~2026-02-19 20:11 UTC|newest]
Thread overview: 14+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-02-19 20:10 [PATCH v5 0/5] Use new dma-map IOVA alloc, link, and sync API in GPU SVM and DRM pagemap Matthew Brost
2026-02-19 20:10 ` Matthew Brost [this message]
2026-02-19 20:10 ` [PATCH v5 2/5] drm/gpusvm: Use dma-map IOVA alloc, link, and sync API in GPU SVM Matthew Brost
2026-02-19 20:10 ` [PATCH v5 3/5] drm/pagemap: Drop source_peer_migrates flag and assume true Matthew Brost
2026-02-19 20:53 ` Matthew Brost
2026-04-02 10:33 ` Francois Dugast
2026-02-19 20:10 ` [PATCH v5 4/5] drm/pagemap: Split drm_pagemap_migrate_map_pages into device / system Matthew Brost
2026-04-02 14:12 ` Francois Dugast
2026-02-19 20:10 ` [PATCH v5 5/5] drm/pagemap: Use dma-map IOVA alloc, link, and sync API for DRM pagemap Matthew Brost
2026-04-02 15:59 ` Francois Dugast
2026-04-08 16:46 ` Matthew Brost
2026-02-19 20:18 ` ✓ CI.KUnit: success for Use new dma-map IOVA alloc, link, and sync API in GPU SVM and DRM pagemap (rev5) Patchwork
2026-02-20 8:47 ` ✓ Xe.CI.BAT: " Patchwork
2026-02-20 14:26 ` ✗ Xe.CI.FULL: failure " Patchwork
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260219201057.1010391-2-matthew.brost@intel.com \
--to=matthew.brost@intel.com \
--cc=dri-devel@lists.freedesktop.org \
--cc=francois.dugast@intel.com \
--cc=himal.prasad.ghimiray@intel.com \
--cc=intel-xe@lists.freedesktop.org \
--cc=jgg@ziepe.ca \
--cc=leonro@nvidia.com \
--cc=thomas.hellstrom@linux.intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox