dri-devel.lists.freedesktop.org archive mirror
 help / color / mirror / Atom feed
From: "Thomas Hellström" <thomas.hellstrom@linux.intel.com>
To: intel-xe@lists.freedesktop.org
Cc: "Thomas Hellström" <thomas.hellstrom@linux.intel.com>,
	dri-devel@lists.freedesktop.org, himal.prasad.ghimiray@intel.com,
	apopple@nvidia.com, airlied@gmail.com,
	"Simona Vetter" <simona.vetter@ffwll.ch>,
	felix.kuehling@amd.com, "Matthew Brost" <matthew.brost@intel.com>,
	"Christian König" <christian.koenig@amd.com>,
	dakr@kernel.org, "Mrozek, Michal" <michal.mrozek@intel.com>,
	"Joonas Lahtinen" <joonas.lahtinen@linux.intel.com>
Subject: [PATCH v4 22/22] drm/pagemap: Support source migration over interconnect
Date: Thu, 11 Dec 2025 17:59:09 +0100	[thread overview]
Message-ID: <20251211165909.219710-23-thomas.hellstrom@linux.intel.com> (raw)
In-Reply-To: <20251211165909.219710-1-thomas.hellstrom@linux.intel.com>

Support source interconnect migration by using the copy_to_ram() op
of the source device private pages.

Source interconnect migration is required to flush the L2 cache of
the source device, which among other things is a requirement for
correct global atomic operation. It also enables the source GPU to
potentially decompress any compressed content which is not
understood by peers, and finally for the PCIe case, it's expected
that writes over PCIe will be faster than reads.

The implementation can probably be improved by coalescing subregions
with the same source.

Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
---
 drivers/gpu/drm/drm_pagemap.c | 200 ++++++++++++++++++++++++++++------
 1 file changed, 164 insertions(+), 36 deletions(-)

diff --git a/drivers/gpu/drm/drm_pagemap.c b/drivers/gpu/drm/drm_pagemap.c
index 56bedb622264..fd4b96a21d94 100644
--- a/drivers/gpu/drm/drm_pagemap.c
+++ b/drivers/gpu/drm/drm_pagemap.c
@@ -205,11 +205,11 @@ static void drm_pagemap_get_devmem_page(struct page *page,
 
 /**
  * drm_pagemap_migrate_map_pages() - Map migration pages for GPU SVM migration
- * @dev: The device for which the pages are being mapped
- * @local_dpagemap: The drm_pagemap pointer of the local drm_pagemap.
- * @pagemap_addr: Array to store DMA information corresponding to mapped pages
- * @migrate_pfn: Array of migrate page frame numbers to map
- * @npages: Number of pages to map
+ * @dev: The device performing the migration.
+ * @local_dpagemap: The drm_pagemap local to the migrating device.
+ * @pagemap_addr: Array to store DMA information corresponding to mapped pages.
+ * @migrate_pfn: Array of page frame numbers of system pages or peer pages to map.
+ * @npages: Number of system pages or peer pages to map.
  * @dir: Direction of data transfer (e.g., DMA_BIDIRECTIONAL)
  * @mdetails: Details governing the migration behaviour.
  *
@@ -228,8 +228,7 @@ static int drm_pagemap_migrate_map_pages(struct device *dev,
 					 enum dma_data_direction dir,
 					 const struct drm_pagemap_migrate_details *mdetails)
 {
-	unsigned long i;
-	unsigned long num_peer_pages = 0;
+	unsigned long num_peer_pages = 0, num_local_pages = 0, i;
 
 	for (i = 0; i < npages;) {
 		struct page *page = migrate_pfn_to_page(migrate_pfn[i]);
@@ -248,10 +247,15 @@ static int drm_pagemap_migrate_map_pages(struct device *dev,
 			struct drm_pagemap *dpagemap = zdd->dpagemap;
 			struct drm_pagemap_addr addr;
 
-			if (dpagemap == local_dpagemap && !mdetails->can_migrate_same_pagemap)
-				goto next;
+			if (dpagemap == local_dpagemap) {
+				if (!mdetails->can_migrate_same_pagemap)
+					goto next;
+
+				num_local_pages += NR_PAGES(order);
+			} else {
+				num_peer_pages += NR_PAGES(order);
+			}
 
-			num_peer_pages += NR_PAGES(order);
 			addr = dpagemap->ops->device_map(dpagemap, dev, page, order, dir);
 			if (dma_mapping_error(dev, addr.addr))
 				return -EFAULT;
@@ -275,6 +279,9 @@ static int drm_pagemap_migrate_map_pages(struct device *dev,
 	if (num_peer_pages)
 		drm_dbg(local_dpagemap->drm, "Migrating %lu peer pages over interconnect.\n",
 			num_peer_pages);
+	if (num_local_pages)
+		drm_dbg(local_dpagemap->drm, "Migrating %lu local pages over interconnect.\n",
+			num_local_pages);
 
 	return 0;
 }
@@ -327,6 +334,115 @@ npages_in_range(unsigned long start, unsigned long end)
 	return (end - start) >> PAGE_SHIFT;
 }
 
+static int
+drm_pagemap_migrate_remote_to_local(struct drm_pagemap_devmem *devmem,
+				    struct device *remote_device,
+				    struct drm_pagemap *remote_dpagemap,
+				    unsigned long local_pfns[],
+				    struct page *remote_pages[],
+				    struct drm_pagemap_addr pagemap_addr[],
+				    unsigned long npages,
+				    const struct drm_pagemap_devmem_ops *ops,
+				    const struct drm_pagemap_migrate_details *mdetails)
+
+{
+	int err = drm_pagemap_migrate_map_pages(remote_device, remote_dpagemap,
+						pagemap_addr, local_pfns,
+						npages, DMA_FROM_DEVICE, mdetails);
+
+	if (err)
+		goto out;
+
+	err = ops->copy_to_ram(remote_pages, pagemap_addr, npages,
+			       devmem->pre_migrate_fence);
+out:
+	drm_pagemap_migrate_unmap_pages(remote_device, pagemap_addr, local_pfns,
+					npages, DMA_FROM_DEVICE);
+	return err;
+}
+
+static int
+drm_pagemap_migrate_sys_to_dev(struct drm_pagemap_devmem *devmem,
+			       unsigned long sys_pfns[],
+			       struct page *local_pages[],
+			       struct drm_pagemap_addr pagemap_addr[],
+			       unsigned long npages,
+			       const struct drm_pagemap_devmem_ops *ops,
+			       const struct drm_pagemap_migrate_details *mdetails)
+{
+	int err = drm_pagemap_migrate_map_pages(devmem->dev, devmem->dpagemap,
+						pagemap_addr, sys_pfns, npages,
+						DMA_TO_DEVICE, mdetails);
+
+	if (err)
+		goto out;
+
+	err = ops->copy_to_devmem(local_pages, pagemap_addr, npages,
+				  devmem->pre_migrate_fence);
+out:
+	drm_pagemap_migrate_unmap_pages(devmem->dev, pagemap_addr, sys_pfns, npages,
+					DMA_TO_DEVICE);
+	return err;
+}
+
+/**
+ * struct migrate_range_loc - Cursor into the loop over migrate_pfns for migrating to
+ * device.
+ * @start: The current loop index.
+ * @device: migrating device.
+ * @dpagemap: Pointer to struct drm_pagemap used by the migrating device.
+ * @ops: The copy ops to be used for the migrating device.
+ */
+struct migrate_range_loc {
+	unsigned long start;
+	struct device *device;
+	struct drm_pagemap *dpagemap;
+	const struct drm_pagemap_devmem_ops *ops;
+};
+
+static int drm_pagemap_migrate_range(struct drm_pagemap_devmem *devmem,
+				     unsigned long src_pfns[],
+				     unsigned long dst_pfns[],
+				     struct page *pages[],
+				     struct drm_pagemap_addr pagemap_addr[],
+				     struct migrate_range_loc *last,
+				     const struct migrate_range_loc *cur,
+				     const struct drm_pagemap_migrate_details *mdetails)
+{
+	int ret = 0;
+
+	if (cur->start == 0)
+		goto out;
+
+	if (cur->start <= last->start)
+		return 0;
+
+	if (cur->dpagemap == last->dpagemap && cur->ops == last->ops)
+		return 0;
+
+	if (last->dpagemap)
+		ret = drm_pagemap_migrate_remote_to_local(devmem,
+							  last->device,
+							  last->dpagemap,
+							  &dst_pfns[last->start],
+							  &pages[last->start],
+							  &pagemap_addr[last->start],
+							  cur->start - last->start,
+							  last->ops, mdetails);
+
+	else
+		ret = drm_pagemap_migrate_sys_to_dev(devmem,
+						     &src_pfns[last->start],
+						     &pages[last->start],
+						     &pagemap_addr[last->start],
+						     cur->start - last->start,
+						     last->ops, mdetails);
+
+out:
+	*last = *cur;
+	return ret;
+}
+
 /**
  * drm_pagemap_migrate_to_devmem() - Migrate a struct mm_struct range to device memory
  * @devmem_allocation: The device memory allocation to migrate to.
@@ -368,6 +484,7 @@ int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
 	};
 	unsigned long i, npages = npages_in_range(start, end);
 	unsigned long own_pages = 0, migrated_pages = 0;
+	struct migrate_range_loc cur, last = {.device = dpagemap->drm->dev, .ops = ops};
 	struct vm_area_struct *vas;
 	struct drm_pagemap_zdd *zdd = NULL;
 	struct page **pages;
@@ -463,44 +580,55 @@ int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
 	if (err)
 		goto err_finalize;
 
-	err = drm_pagemap_migrate_map_pages(devmem_allocation->dev,
-					    devmem_allocation->dpagemap, pagemap_addr,
-					    migrate.src, npages, DMA_TO_DEVICE,
-					    mdetails);
-
-	if (err) {
-		drm_pagemap_migrate_unmap_pages(devmem_allocation->dev, pagemap_addr,
-						migrate.src, npages, DMA_TO_DEVICE);
-
-		goto err_finalize;
-	}
-
 	own_pages = 0;
+
 	for (i = 0; i < npages; ++i) {
 		struct page *page = pfn_to_page(migrate.dst[i]);
 		struct page *src_page = migrate_pfn_to_page(migrate.src[i]);
+		cur.start = i;
 
-		if (unlikely(src_page && is_zone_device_page(src_page) &&
-			     page_pgmap(src_page) == pagemap &&
-			     !mdetails->can_migrate_same_pagemap)) {
-			migrate.dst[i] = 0;
-			pages[i] = NULL;
-			own_pages++;
-			continue;
+		pages[i] = NULL;
+		if (src_page && is_device_private_page(src_page)) {
+			struct drm_pagemap_zdd *src_zdd = src_page->zone_device_data;
+
+			if (page_pgmap(src_page) == pagemap &&
+			    !mdetails->can_migrate_same_pagemap) {
+				migrate.dst[i] = 0;
+				own_pages++;
+				continue;
+			}
+			if (mdetails->source_peer_migrates) {
+				cur.dpagemap = src_zdd->dpagemap;
+				cur.ops = src_zdd->devmem_allocation->ops;
+				cur.device = cur.dpagemap->drm->dev;
+				pages[i] = src_page;
+			}
+		}
+		if (!pages[i]) {
+			cur.dpagemap = NULL;
+			cur.ops = ops;
+			cur.device = dpagemap->drm->dev;
+			pages[i] = page;
 		}
-		pages[i] = page;
 		migrate.dst[i] = migrate_pfn(migrate.dst[i]);
 		drm_pagemap_get_devmem_page(page, zdd);
-	}
-	drm_WARN_ON(dpagemap->drm, !!own_pages);
 
-	err = ops->copy_to_devmem(pages, pagemap_addr, npages,
-				  devmem_allocation->pre_migrate_fence);
-	drm_pagemap_migrate_unmap_pages(devmem_allocation->dev, pagemap_addr,
-					migrate.src, npages, DMA_TO_DEVICE);
+		/* If we switched the migrating drm_pagemap, migrate previous pages now */
+		err = drm_pagemap_migrate_range(devmem_allocation, migrate.src, migrate.dst,
+						pages, pagemap_addr, &last, &cur,
+						mdetails);
+		if (err)
+			goto err_finalize;
+	}
+	cur.start = npages;
+	cur.ops = NULL; /* Force migration */
+	err = drm_pagemap_migrate_range(devmem_allocation, migrate.src, migrate.dst,
+					pages, pagemap_addr, &last, &cur, mdetails);
 	if (err)
 		goto err_finalize;
 
+	drm_WARN_ON(dpagemap->drm, !!own_pages);
+
 	/* Upon success bind devmem allocation to range and zdd */
 	devmem_allocation->timeslice_expiration = get_jiffies_64() +
 		msecs_to_jiffies(mdetails->timeslice_ms);
-- 
2.51.1


      parent reply	other threads:[~2025-12-11 17:00 UTC|newest]

Thread overview: 38+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-12-11 16:58 [PATCH v4 00/22] Dynamic drm_pagemaps and Initial multi-device SVM Thomas Hellström
2025-12-11 16:58 ` [PATCH v4 01/22] drm/xe/svm: Fix a debug printout Thomas Hellström
2025-12-11 16:58 ` [PATCH v4 02/22] drm/pagemap, drm/xe: Ensure that the devmem allocation is idle before use Thomas Hellström
2025-12-12  8:56   ` Thomas Hellström
2025-12-12  9:24   ` Ghimiray, Himal Prasad
2025-12-12 10:15     ` Thomas Hellström
2025-12-12 10:17       ` Ghimiray, Himal Prasad
2025-12-11 16:58 ` [PATCH v4 03/22] drm/pagemap, drm/xe: Add refcounting to struct drm_pagemap Thomas Hellström
2025-12-12 11:24   ` Ghimiray, Himal Prasad
2025-12-11 16:58 ` [PATCH v4 04/22] drm/pagemap: Add a refcounted drm_pagemap backpointer to struct drm_pagemap_zdd Thomas Hellström
2025-12-11 16:58 ` [PATCH v4 05/22] drm/pagemap, drm/xe: Manage drm_pagemap provider lifetimes Thomas Hellström
2025-12-11 16:58 ` [PATCH v4 06/22] drm/pagemap: Add a drm_pagemap cache and shrinker Thomas Hellström
2025-12-11 16:58 ` [PATCH v4 07/22] drm/xe: Use the " Thomas Hellström
2025-12-11 16:58 ` [PATCH v4 08/22] drm/pagemap: Remove the drm_pagemap_create() interface Thomas Hellström
2025-12-11 16:58 ` [PATCH v4 09/22] drm/pagemap_util: Add a utility to assign an owner to a set of interconnected gpus Thomas Hellström
2025-12-11 16:58 ` [PATCH v4 10/22] drm/xe: Use the drm_pagemap_util helper to get a svm pagemap owner Thomas Hellström
2025-12-11 16:58 ` [PATCH v4 11/22] drm/xe: Pass a drm_pagemap pointer around with the memory advise attributes Thomas Hellström
2025-12-11 16:58 ` [PATCH v4 12/22] drm/xe: Use the vma attibute drm_pagemap to select where to migrate Thomas Hellström
2025-12-12  9:51   ` Ghimiray, Himal Prasad
2025-12-11 16:59 ` [PATCH v4 13/22] drm/xe: Simplify madvise_preferred_mem_loc() Thomas Hellström
2025-12-11 16:59 ` [PATCH v4 14/22] drm/xe/uapi: Extend the madvise functionality to support foreign pagemap placement for svm Thomas Hellström
2025-12-11 16:59 ` [PATCH v4 15/22] drm/xe: Support pcie p2p dma as a fast interconnect Thomas Hellström
2025-12-11 16:59 ` [PATCH v4 16/22] drm/xe/vm: Add a couple of VM debug printouts Thomas Hellström
2025-12-11 16:59 ` [PATCH v4 17/22] drm/xe/svm: Document how xe keeps drm_pagemap references Thomas Hellström
2025-12-11 16:59 ` [PATCH v4 18/22] drm/pagemap, drm/xe: Clean up the use of the device-private page owner Thomas Hellström
2025-12-12 10:09   ` Ghimiray, Himal Prasad
2025-12-11 16:59 ` [PATCH v4 19/22] drm/gpusvm: Introduce a function to scan the current migration state Thomas Hellström
2025-12-12 11:21   ` Ghimiray, Himal Prasad
2025-12-12 11:35     ` Thomas Hellström
2025-12-16  0:58       ` Matthew Brost
2025-12-16 23:55   ` Matthew Brost
2025-12-17  6:57     ` Ghimiray, Himal Prasad
2025-12-11 16:59 ` [PATCH v4 20/22] drm/xe: Use drm_gpusvm_scan_mm() Thomas Hellström
2025-12-16  1:06   ` Matthew Brost
2025-12-17  6:58   ` Ghimiray, Himal Prasad
2025-12-11 16:59 ` [PATCH v4 21/22] drm/pagemap, drm/xe: Support destination migration over interconnect Thomas Hellström
2025-12-18  1:20   ` Matthew Brost
2025-12-11 16:59 ` Thomas Hellström [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20251211165909.219710-23-thomas.hellstrom@linux.intel.com \
    --to=thomas.hellstrom@linux.intel.com \
    --cc=airlied@gmail.com \
    --cc=apopple@nvidia.com \
    --cc=christian.koenig@amd.com \
    --cc=dakr@kernel.org \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=felix.kuehling@amd.com \
    --cc=himal.prasad.ghimiray@intel.com \
    --cc=intel-xe@lists.freedesktop.org \
    --cc=joonas.lahtinen@linux.intel.com \
    --cc=matthew.brost@intel.com \
    --cc=michal.mrozek@intel.com \
    --cc=simona.vetter@ffwll.ch \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).