From: "Thomas Hellström" <thomas.hellstrom@linux.intel.com>
To: intel-xe@lists.freedesktop.org
Cc: "Thomas Hellström" <thomas.hellstrom@linux.intel.com>,
"Matthew Brost" <matthew.brost@intel.com>,
dri-devel@lists.freedesktop.org, himal.prasad.ghimiray@intel.com,
apopple@nvidia.com, airlied@gmail.com,
"Simona Vetter" <simona.vetter@ffwll.ch>,
felix.kuehling@amd.com,
"Christian König" <christian.koenig@amd.com>,
dakr@kernel.org, "Mrozek, Michal" <michal.mrozek@intel.com>,
"Joonas Lahtinen" <joonas.lahtinen@linux.intel.com>
Subject: [PATCH v2 02/17] drm/pagemap, drm/xe: Add refcounting to struct drm_pagemap
Date: Tue, 11 Nov 2025 17:43:52 +0100 [thread overview]
Message-ID: <20251111164408.113070-3-thomas.hellstrom@linux.intel.com> (raw)
In-Reply-To: <20251111164408.113070-1-thomas.hellstrom@linux.intel.com>
With the end goal of being able to free unused pagemaps
and allocate them on demand, add a refcount to struct drm_pagemap,
remove the xe embedded drm_pagemap, allocating and freeing it
explicitly.
v2:
- Make the drm_pagemap pointer in drm_gpusvm_pages reference-counted.
Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com> #v1
---
drivers/gpu/drm/drm_gpusvm.c | 4 ++-
drivers/gpu/drm/drm_pagemap.c | 51 ++++++++++++++++++++++++++++++
drivers/gpu/drm/xe/xe_svm.c | 26 ++++++++++-----
drivers/gpu/drm/xe/xe_vram_types.h | 2 +-
include/drm/drm_pagemap.h | 36 +++++++++++++++++++++
5 files changed, 109 insertions(+), 10 deletions(-)
diff --git a/drivers/gpu/drm/drm_gpusvm.c b/drivers/gpu/drm/drm_gpusvm.c
index 73e550c8ff8c..1f96375d1f2b 100644
--- a/drivers/gpu/drm/drm_gpusvm.c
+++ b/drivers/gpu/drm/drm_gpusvm.c
@@ -1038,6 +1038,7 @@ static void __drm_gpusvm_unmap_pages(struct drm_gpusvm *gpusvm,
flags.has_dma_mapping = false;
WRITE_ONCE(svm_pages->flags.__flags, flags.__flags);
+ drm_pagemap_put(svm_pages->dpagemap);
svm_pages->dpagemap = NULL;
}
}
@@ -1431,7 +1432,8 @@ int drm_gpusvm_get_pages(struct drm_gpusvm *gpusvm,
if (pagemap) {
flags.has_devmem_pages = true;
- svm_pages->dpagemap = dpagemap;
+ drm_pagemap_put(svm_pages->dpagemap);
+ svm_pages->dpagemap = drm_pagemap_get(dpagemap);
}
/* WRITE_ONCE pairs with READ_ONCE for opportunistic checks */
diff --git a/drivers/gpu/drm/drm_pagemap.c b/drivers/gpu/drm/drm_pagemap.c
index 22c44807e3fe..4b8692f0b2a2 100644
--- a/drivers/gpu/drm/drm_pagemap.c
+++ b/drivers/gpu/drm/drm_pagemap.c
@@ -538,6 +538,57 @@ static int drm_pagemap_migrate_populate_ram_pfn(struct vm_area_struct *vas,
return -ENOMEM;
}
+static void drm_pagemap_release(struct kref *ref)
+{
+ struct drm_pagemap *dpagemap = container_of(ref, typeof(*dpagemap), ref);
+
+ kfree(dpagemap);
+}
+
+/**
+ * drm_pagemap_create() - Create a struct drm_pagemap.
+ * @dev: Pointer to a struct device providing the device-private memory.
+ * @pagemap: Pointer to a pre-setup struct dev_pagemap providing the struct pages.
+ * @ops: Pointer to the struct drm_pagemap_ops.
+ *
+ * Allocate and initialize a struct drm_pagemap.
+ *
+ * Return: A refcounted pointer to a struct drm_pagemap on success.
+ * Error pointer on error.
+ */
+struct drm_pagemap *
+drm_pagemap_create(struct device *dev,
+ struct dev_pagemap *pagemap,
+ const struct drm_pagemap_ops *ops)
+{
+ struct drm_pagemap *dpagemap = kzalloc(sizeof(*dpagemap), GFP_KERNEL);
+
+ if (!dpagemap)
+ return ERR_PTR(-ENOMEM);
+
+ kref_init(&dpagemap->ref);
+ dpagemap->dev = dev;
+ dpagemap->ops = ops;
+ dpagemap->pagemap = pagemap;
+
+ return dpagemap;
+}
+EXPORT_SYMBOL(drm_pagemap_create);
+
+/**
+ * drm_pagemap_put() - Put a struct drm_pagemap reference
+ * @dpagemap: Pointer to a struct drm_pagemap object.
+ *
+ * Puts a struct drm_pagemap reference and frees the drm_pagemap object
+ * if the refount reaches zero.
+ */
+void drm_pagemap_put(struct drm_pagemap *dpagemap)
+{
+ if (likely(dpagemap))
+ kref_put(&dpagemap->ref, drm_pagemap_release);
+}
+EXPORT_SYMBOL(drm_pagemap_put);
+
/**
* drm_pagemap_evict_to_ram() - Evict GPU SVM range to RAM
* @devmem_allocation: Pointer to the device memory allocation
diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
index 894e8f092e3f..a3f97cf9c254 100644
--- a/drivers/gpu/drm/xe/xe_svm.c
+++ b/drivers/gpu/drm/xe/xe_svm.c
@@ -860,7 +860,7 @@ static int xe_drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
struct mm_struct *mm,
unsigned long timeslice_ms)
{
- struct xe_vram_region *vr = container_of(dpagemap, typeof(*vr), dpagemap);
+ struct xe_vram_region *vr = container_of(dpagemap->pagemap, typeof(*vr), pagemap);
struct xe_device *xe = vr->xe;
struct device *dev = xe->drm.dev;
struct drm_buddy_block *block;
@@ -1371,7 +1371,7 @@ u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end)
static struct drm_pagemap *tile_local_pagemap(struct xe_tile *tile)
{
- return &tile->mem.vram->dpagemap;
+ return tile->mem.vram->dpagemap;
}
/**
@@ -1481,6 +1481,15 @@ int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
return ret;
}
+ vr->dpagemap = drm_pagemap_create(dev, &vr->pagemap,
+ &xe_drm_pagemap_ops);
+ if (IS_ERR(vr->dpagemap)) {
+ drm_err(&xe->drm, "Failed to create drm_pagemap tile %d memory: %pe\n",
+ tile->id, vr->dpagemap);
+ ret = PTR_ERR(vr->dpagemap);
+ goto out_no_dpagemap;
+ }
+
vr->pagemap.type = MEMORY_DEVICE_PRIVATE;
vr->pagemap.range.start = res->start;
vr->pagemap.range.end = res->end;
@@ -1488,22 +1497,23 @@ int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
vr->pagemap.ops = drm_pagemap_pagemap_ops_get();
vr->pagemap.owner = xe_svm_devm_owner(xe);
addr = devm_memremap_pages(dev, &vr->pagemap);
-
- vr->dpagemap.dev = dev;
- vr->dpagemap.ops = &xe_drm_pagemap_ops;
-
if (IS_ERR(addr)) {
- devm_release_mem_region(dev, res->start, resource_size(res));
ret = PTR_ERR(addr);
drm_err(&xe->drm, "Failed to remap tile %d memory, errno %pe\n",
tile->id, ERR_PTR(ret));
- return ret;
+ goto out_failed_memremap;
}
vr->hpa_base = res->start;
drm_dbg(&xe->drm, "Added tile %d memory [%llx-%llx] to devm, remapped to %pr\n",
tile->id, vr->io_start, vr->io_start + vr->usable_size, res);
return 0;
+
+out_failed_memremap:
+ drm_pagemap_put(vr->dpagemap);
+out_no_dpagemap:
+ devm_release_mem_region(dev, res->start, resource_size(res));
+ return ret;
}
#else
int xe_svm_alloc_vram(struct xe_tile *tile,
diff --git a/drivers/gpu/drm/xe/xe_vram_types.h b/drivers/gpu/drm/xe/xe_vram_types.h
index 83772dcbf1af..c0d2c5ee8c10 100644
--- a/drivers/gpu/drm/xe/xe_vram_types.h
+++ b/drivers/gpu/drm/xe/xe_vram_types.h
@@ -72,7 +72,7 @@ struct xe_vram_region {
* @dpagemap: The struct drm_pagemap of the ZONE_DEVICE memory
* pages of this tile.
*/
- struct drm_pagemap dpagemap;
+ struct drm_pagemap *dpagemap;
/**
* @hpa_base: base host physical address
*
diff --git a/include/drm/drm_pagemap.h b/include/drm/drm_pagemap.h
index f6e7e234c089..2c7de928865b 100644
--- a/include/drm/drm_pagemap.h
+++ b/include/drm/drm_pagemap.h
@@ -129,11 +129,15 @@ struct drm_pagemap_ops {
* struct drm_pagemap: Additional information for a struct dev_pagemap
* used for device p2p handshaking.
* @ops: The struct drm_pagemap_ops.
+ * @ref: Reference count.
* @dev: The struct drevice owning the device-private memory.
+ * @pagemap: Pointer to the underlying dev_pagemap.
*/
struct drm_pagemap {
const struct drm_pagemap_ops *ops;
+ struct kref ref;
struct device *dev;
+ struct dev_pagemap *pagemap;
};
struct drm_pagemap_devmem;
@@ -202,6 +206,37 @@ struct drm_pagemap_devmem_ops {
unsigned long npages);
};
+struct drm_pagemap *drm_pagemap_create(struct device *dev,
+ struct dev_pagemap *pagemap,
+ const struct drm_pagemap_ops *ops);
+
+#if IS_ENABLED(CONFIG_DRM_GPUSVM)
+
+void drm_pagemap_put(struct drm_pagemap *dpagemap);
+
+#else
+
+static inline void drm_pagemap_put(struct drm_pagemap *dpagemap)
+{
+}
+
+#endif /* IS_ENABLED(CONFIG_DRM_GPUSVM) */
+
+/**
+ * drm_pagemap_get() - Obtain a reference on a struct drm_pagemap
+ * @dpagemap: Pointer to the struct drm_pagemap.
+ *
+ * Return: Pointer to the struct drm_pagemap.
+ */
+static inline struct drm_pagemap *
+drm_pagemap_get(struct drm_pagemap *dpagemap)
+{
+ if (likely(dpagemap))
+ kref_get(&dpagemap->ref);
+
+ return dpagemap;
+}
+
/**
* struct drm_pagemap_devmem - Structure representing a GPU SVM device memory allocation
*
@@ -246,3 +281,4 @@ int drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
unsigned long timeslice_ms);
#endif
+
--
2.51.1
next prev parent reply other threads:[~2025-11-11 16:44 UTC|newest]
Thread overview: 33+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-11-11 16:43 [PATCH v2 00/17] Dynamic drm_pagemaps and Initial multi-device SVM Thomas Hellström
2025-11-11 16:43 ` [PATCH v2 01/17] drm/xe/svm: Fix a debug printout Thomas Hellström
2025-11-12 4:29 ` Ghimiray, Himal Prasad
2025-11-11 16:43 ` Thomas Hellström [this message]
2025-11-12 6:07 ` [PATCH v2 02/17] drm/pagemap, drm/xe: Add refcounting to struct drm_pagemap Ghimiray, Himal Prasad
2025-11-21 10:19 ` Thomas Hellström
2025-11-11 16:43 ` [PATCH v2 03/17] drm/pagemap: Add a refcounted drm_pagemap backpointer to struct drm_pagemap_zdd Thomas Hellström
2025-11-11 16:43 ` [PATCH v2 04/17] drm/pagemap, drm/xe: Manage drm_pagemap provider lifetimes Thomas Hellström
2025-11-18 0:44 ` Matthew Brost
2025-11-11 16:43 ` [PATCH v2 05/17] drm/pagemap: Add a drm_pagemap cache and shrinker Thomas Hellström
2025-11-19 19:28 ` Matthew Brost
2025-11-11 16:43 ` [PATCH v2 06/17] drm/xe: Use the " Thomas Hellström
2025-11-11 16:43 ` [PATCH v2 07/17] drm/pagemap: Remove the drm_pagemap_create() interface Thomas Hellström
2025-11-11 16:43 ` [PATCH v2 08/17] drm/pagemap_util: Add a utility to assign an owner to a set of interconnected gpus Thomas Hellström
2025-11-11 16:43 ` [PATCH v2 09/17] drm/xe: Use the drm_pagemap_util helper to get a svm pagemap owner Thomas Hellström
2025-11-11 16:44 ` [PATCH v2 10/17] drm/xe: Pass a drm_pagemap pointer around with the memory advise attributes Thomas Hellström
2025-11-11 16:44 ` [PATCH v2 11/17] drm/xe: Use the vma attibute drm_pagemap to select where to migrate Thomas Hellström
2025-11-12 5:22 ` kernel test robot
2025-11-12 7:16 ` kernel test robot
2025-11-13 4:51 ` kernel test robot
2025-11-11 16:44 ` [PATCH v2 12/17] drm/xe: Simplify madvise_preferred_mem_loc() Thomas Hellström
2025-11-11 16:44 ` [PATCH v2 13/17] drm/xe/uapi: Extend the madvise functionality to support foreign pagemap placement for svm Thomas Hellström
2025-11-11 16:44 ` [PATCH v2 14/17] drm/xe: Support pcie p2p dma as a fast interconnect Thomas Hellström
2025-11-11 16:44 ` [PATCH v2 15/17] drm/xe/vm: Add a couple of VM debug printouts Thomas Hellström
2025-11-11 16:44 ` [PATCH v2 16/17] drm/pagemap, drm/xe: Support migration over interconnect Thomas Hellström
2025-11-11 16:44 ` [PATCH v2 17/17] drm/xe/svm: Document how xe keeps drm_pagemap references Thomas Hellström
2025-11-18 0:49 ` Matthew Brost
2025-11-11 17:07 ` ✗ CI.checkpatch: warning for Dynamic drm_pagemaps and Initial multi-device SVM (rev2) Patchwork
2025-11-11 17:08 ` ✓ CI.KUnit: success " Patchwork
2025-11-11 17:45 ` ✓ Xe.CI.BAT: " Patchwork
2025-11-12 2:53 ` ✗ Xe.CI.Full: failure " Patchwork
2025-11-18 6:15 ` [PATCH v2 00/17] Dynamic drm_pagemaps and Initial multi-device SVM Alistair Popple
2025-11-18 9:31 ` Thomas Hellström
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251111164408.113070-3-thomas.hellstrom@linux.intel.com \
--to=thomas.hellstrom@linux.intel.com \
--cc=airlied@gmail.com \
--cc=apopple@nvidia.com \
--cc=christian.koenig@amd.com \
--cc=dakr@kernel.org \
--cc=dri-devel@lists.freedesktop.org \
--cc=felix.kuehling@amd.com \
--cc=himal.prasad.ghimiray@intel.com \
--cc=intel-xe@lists.freedesktop.org \
--cc=joonas.lahtinen@linux.intel.com \
--cc=matthew.brost@intel.com \
--cc=michal.mrozek@intel.com \
--cc=simona.vetter@ffwll.ch \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).