dri-devel.lists.freedesktop.org archive mirror
 help / color / mirror / Atom feed
From: "Thomas Hellström" <thomas.hellstrom@linux.intel.com>
To: intel-xe@lists.freedesktop.org
Cc: "Thomas Hellström" <thomas.hellstrom@linux.intel.com>,
	dri-devel@lists.freedesktop.org, himal.prasad.ghimiray@intel.com,
	apopple@nvidia.com, airlied@gmail.com,
	"Simona Vetter" <simona.vetter@ffwll.ch>,
	felix.kuehling@amd.com, "Matthew Brost" <matthew.brost@intel.com>,
	"Christian König" <christian.koenig@amd.com>,
	dakr@kernel.org, "Mrozek, Michal" <michal.mrozek@intel.com>,
	"Joonas Lahtinen" <joonas.lahtinen@linux.intel.com>
Subject: [PATCH v4 18/22] drm/pagemap, drm/xe: Clean up the use of the device-private page owner
Date: Thu, 11 Dec 2025 17:59:05 +0100	[thread overview]
Message-ID: <20251211165909.219710-19-thomas.hellstrom@linux.intel.com> (raw)
In-Reply-To: <20251211165909.219710-1-thomas.hellstrom@linux.intel.com>

Use the dev_pagemap->owner field wherever possible, simplifying
the code slightly.

v3: New patch

Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
---
 drivers/gpu/drm/drm_pagemap.c | 32 +++++++++++---------------------
 drivers/gpu/drm/xe/xe_svm.c   |  3 +--
 include/drm/drm_pagemap.h     |  3 +--
 3 files changed, 13 insertions(+), 25 deletions(-)

diff --git a/drivers/gpu/drm/drm_pagemap.c b/drivers/gpu/drm/drm_pagemap.c
index 5b4337011924..77f8ea5ed802 100644
--- a/drivers/gpu/drm/drm_pagemap.c
+++ b/drivers/gpu/drm/drm_pagemap.c
@@ -65,7 +65,6 @@
  * @refcount: Reference count for the zdd
  * @devmem_allocation: device memory allocation
  * @dpagemap: Refcounted pointer to the underlying struct drm_pagemap.
- * @device_private_page_owner: Device private pages owner
  *
  * This structure serves as a generic wrapper installed in
  * page->zone_device_data. It provides infrastructure for looking up a device
@@ -78,13 +77,11 @@ struct drm_pagemap_zdd {
 	struct kref refcount;
 	struct drm_pagemap_devmem *devmem_allocation;
 	struct drm_pagemap *dpagemap;
-	void *device_private_page_owner;
 };
 
 /**
  * drm_pagemap_zdd_alloc() - Allocate a zdd structure.
  * @dpagemap: Pointer to the underlying struct drm_pagemap.
- * @device_private_page_owner: Device private pages owner
  *
  * This function allocates and initializes a new zdd structure. It sets up the
  * reference count and initializes the destroy work.
@@ -92,7 +89,7 @@ struct drm_pagemap_zdd {
  * Return: Pointer to the allocated zdd on success, ERR_PTR() on failure.
  */
 static struct drm_pagemap_zdd *
-drm_pagemap_zdd_alloc(struct drm_pagemap *dpagemap, void *device_private_page_owner)
+drm_pagemap_zdd_alloc(struct drm_pagemap *dpagemap)
 {
 	struct drm_pagemap_zdd *zdd;
 
@@ -102,7 +99,6 @@ drm_pagemap_zdd_alloc(struct drm_pagemap *dpagemap, void *device_private_page_ow
 
 	kref_init(&zdd->refcount);
 	zdd->devmem_allocation = NULL;
-	zdd->device_private_page_owner = device_private_page_owner;
 	zdd->dpagemap = drm_pagemap_get(dpagemap);
 
 	return zdd;
@@ -307,7 +303,6 @@ npages_in_range(unsigned long start, unsigned long end)
  * @end: End of the virtual address range to migrate.
  * @timeslice_ms: The time requested for the migrated pagemap pages to
  * be present in @mm before being allowed to be migrated back.
- * @pgmap_owner: Not used currently, since only system memory is considered.
  *
  * This function migrates the specified virtual address range to device memory.
  * It performs the necessary setup and invokes the driver-specific operations for
@@ -325,14 +320,15 @@ npages_in_range(unsigned long start, unsigned long end)
 int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
 				  struct mm_struct *mm,
 				  unsigned long start, unsigned long end,
-				  unsigned long timeslice_ms,
-				  void *pgmap_owner)
+				  unsigned long timeslice_ms)
 {
 	const struct drm_pagemap_devmem_ops *ops = devmem_allocation->ops;
+	struct drm_pagemap *dpagemap = devmem_allocation->dpagemap;
+	struct dev_pagemap *pagemap = dpagemap->pagemap;
 	struct migrate_vma migrate = {
 		.start		= start,
 		.end		= end,
-		.pgmap_owner	= pgmap_owner,
+		.pgmap_owner	= pagemap->owner,
 		.flags		= MIGRATE_VMA_SELECT_SYSTEM,
 	};
 	unsigned long i, npages = npages_in_range(start, end);
@@ -374,7 +370,7 @@ int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
 	pagemap_addr = buf + (2 * sizeof(*migrate.src) * npages);
 	pages = buf + (2 * sizeof(*migrate.src) + sizeof(*pagemap_addr)) * npages;
 
-	zdd = drm_pagemap_zdd_alloc(devmem_allocation->dpagemap, pgmap_owner);
+	zdd = drm_pagemap_zdd_alloc(dpagemap);
 	if (!zdd) {
 		err = -ENOMEM;
 		goto err_free;
@@ -787,8 +783,7 @@ EXPORT_SYMBOL_GPL(drm_pagemap_evict_to_ram);
 /**
  * __drm_pagemap_migrate_to_ram() - Migrate GPU SVM range to RAM (internal)
  * @vas: Pointer to the VM area structure
- * @device_private_page_owner: Device private pages owner
- * @page: Pointer to the page for fault handling (can be NULL)
+ * @page: Pointer to the page for fault handling.
  * @fault_addr: Fault address
  * @size: Size of migration
  *
@@ -799,14 +794,13 @@ EXPORT_SYMBOL_GPL(drm_pagemap_evict_to_ram);
  * Return: 0 on success, negative error code on failure.
  */
 static int __drm_pagemap_migrate_to_ram(struct vm_area_struct *vas,
-					void *device_private_page_owner,
 					struct page *page,
 					unsigned long fault_addr,
 					unsigned long size)
 {
 	struct migrate_vma migrate = {
 		.vma		= vas,
-		.pgmap_owner	= device_private_page_owner,
+		.pgmap_owner	= page_pgmap(page)->owner,
 		.flags		= MIGRATE_VMA_SELECT_DEVICE_PRIVATE |
 		MIGRATE_VMA_SELECT_DEVICE_COHERENT,
 		.fault_page	= page,
@@ -821,12 +815,9 @@ static int __drm_pagemap_migrate_to_ram(struct vm_area_struct *vas,
 	void *buf;
 	int i, err = 0;
 
-	if (page) {
-		zdd = page->zone_device_data;
-		if (time_before64(get_jiffies_64(),
-				  zdd->devmem_allocation->timeslice_expiration))
-			return 0;
-	}
+	zdd = page->zone_device_data;
+	if (time_before64(get_jiffies_64(), zdd->devmem_allocation->timeslice_expiration))
+		return 0;
 
 	start = ALIGN_DOWN(fault_addr, size);
 	end = ALIGN(fault_addr + 1, size);
@@ -940,7 +931,6 @@ static vm_fault_t drm_pagemap_migrate_to_ram(struct vm_fault *vmf)
 	int err;
 
 	err = __drm_pagemap_migrate_to_ram(vmf->vma,
-					   zdd->device_private_page_owner,
 					   vmf->page, vmf->address,
 					   zdd->devmem_allocation->size);
 
diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
index 489b3e3e38dc..2fa73143fcd5 100644
--- a/drivers/gpu/drm/xe/xe_svm.c
+++ b/drivers/gpu/drm/xe/xe_svm.c
@@ -1110,8 +1110,7 @@ static int xe_drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
 		/* Ensure the device has a pm ref while there are device pages active. */
 		xe_pm_runtime_get_noresume(xe);
 		err = drm_pagemap_migrate_to_devmem(&bo->devmem_allocation, mm,
-						    start, end, timeslice_ms,
-						    xpagemap->pagemap.owner);
+						    start, end, timeslice_ms);
 		if (err)
 			xe_svm_devmem_release(&bo->devmem_allocation);
 		xe_bo_unlock(bo);
diff --git a/include/drm/drm_pagemap.h b/include/drm/drm_pagemap.h
index 78cc9011829f..f73afece42ba 100644
--- a/include/drm/drm_pagemap.h
+++ b/include/drm/drm_pagemap.h
@@ -320,8 +320,7 @@ struct drm_pagemap_devmem {
 int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
 				  struct mm_struct *mm,
 				  unsigned long start, unsigned long end,
-				  unsigned long timeslice_ms,
-				  void *pgmap_owner);
+				  unsigned long timeslice_ms);
 
 int drm_pagemap_evict_to_ram(struct drm_pagemap_devmem *devmem_allocation);
 
-- 
2.51.1


  parent reply	other threads:[~2025-12-11 17:00 UTC|newest]

Thread overview: 38+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-12-11 16:58 [PATCH v4 00/22] Dynamic drm_pagemaps and Initial multi-device SVM Thomas Hellström
2025-12-11 16:58 ` [PATCH v4 01/22] drm/xe/svm: Fix a debug printout Thomas Hellström
2025-12-11 16:58 ` [PATCH v4 02/22] drm/pagemap, drm/xe: Ensure that the devmem allocation is idle before use Thomas Hellström
2025-12-12  8:56   ` Thomas Hellström
2025-12-12  9:24   ` Ghimiray, Himal Prasad
2025-12-12 10:15     ` Thomas Hellström
2025-12-12 10:17       ` Ghimiray, Himal Prasad
2025-12-11 16:58 ` [PATCH v4 03/22] drm/pagemap, drm/xe: Add refcounting to struct drm_pagemap Thomas Hellström
2025-12-12 11:24   ` Ghimiray, Himal Prasad
2025-12-11 16:58 ` [PATCH v4 04/22] drm/pagemap: Add a refcounted drm_pagemap backpointer to struct drm_pagemap_zdd Thomas Hellström
2025-12-11 16:58 ` [PATCH v4 05/22] drm/pagemap, drm/xe: Manage drm_pagemap provider lifetimes Thomas Hellström
2025-12-11 16:58 ` [PATCH v4 06/22] drm/pagemap: Add a drm_pagemap cache and shrinker Thomas Hellström
2025-12-11 16:58 ` [PATCH v4 07/22] drm/xe: Use the " Thomas Hellström
2025-12-11 16:58 ` [PATCH v4 08/22] drm/pagemap: Remove the drm_pagemap_create() interface Thomas Hellström
2025-12-11 16:58 ` [PATCH v4 09/22] drm/pagemap_util: Add a utility to assign an owner to a set of interconnected gpus Thomas Hellström
2025-12-11 16:58 ` [PATCH v4 10/22] drm/xe: Use the drm_pagemap_util helper to get a svm pagemap owner Thomas Hellström
2025-12-11 16:58 ` [PATCH v4 11/22] drm/xe: Pass a drm_pagemap pointer around with the memory advise attributes Thomas Hellström
2025-12-11 16:58 ` [PATCH v4 12/22] drm/xe: Use the vma attibute drm_pagemap to select where to migrate Thomas Hellström
2025-12-12  9:51   ` Ghimiray, Himal Prasad
2025-12-11 16:59 ` [PATCH v4 13/22] drm/xe: Simplify madvise_preferred_mem_loc() Thomas Hellström
2025-12-11 16:59 ` [PATCH v4 14/22] drm/xe/uapi: Extend the madvise functionality to support foreign pagemap placement for svm Thomas Hellström
2025-12-11 16:59 ` [PATCH v4 15/22] drm/xe: Support pcie p2p dma as a fast interconnect Thomas Hellström
2025-12-11 16:59 ` [PATCH v4 16/22] drm/xe/vm: Add a couple of VM debug printouts Thomas Hellström
2025-12-11 16:59 ` [PATCH v4 17/22] drm/xe/svm: Document how xe keeps drm_pagemap references Thomas Hellström
2025-12-11 16:59 ` Thomas Hellström [this message]
2025-12-12 10:09   ` [PATCH v4 18/22] drm/pagemap, drm/xe: Clean up the use of the device-private page owner Ghimiray, Himal Prasad
2025-12-11 16:59 ` [PATCH v4 19/22] drm/gpusvm: Introduce a function to scan the current migration state Thomas Hellström
2025-12-12 11:21   ` Ghimiray, Himal Prasad
2025-12-12 11:35     ` Thomas Hellström
2025-12-16  0:58       ` Matthew Brost
2025-12-16 23:55   ` Matthew Brost
2025-12-17  6:57     ` Ghimiray, Himal Prasad
2025-12-11 16:59 ` [PATCH v4 20/22] drm/xe: Use drm_gpusvm_scan_mm() Thomas Hellström
2025-12-16  1:06   ` Matthew Brost
2025-12-17  6:58   ` Ghimiray, Himal Prasad
2025-12-11 16:59 ` [PATCH v4 21/22] drm/pagemap, drm/xe: Support destination migration over interconnect Thomas Hellström
2025-12-18  1:20   ` Matthew Brost
2025-12-11 16:59 ` [PATCH v4 22/22] drm/pagemap: Support source " Thomas Hellström

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20251211165909.219710-19-thomas.hellstrom@linux.intel.com \
    --to=thomas.hellstrom@linux.intel.com \
    --cc=airlied@gmail.com \
    --cc=apopple@nvidia.com \
    --cc=christian.koenig@amd.com \
    --cc=dakr@kernel.org \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=felix.kuehling@amd.com \
    --cc=himal.prasad.ghimiray@intel.com \
    --cc=intel-xe@lists.freedesktop.org \
    --cc=joonas.lahtinen@linux.intel.com \
    --cc=matthew.brost@intel.com \
    --cc=michal.mrozek@intel.com \
    --cc=simona.vetter@ffwll.ch \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).