From: Junhua Shen <Junhua.Shen@amd.com>
To: <Alexander.Deucher@amd.com>, <Felix.Kuehling@amd.com>,
<Christian.Koenig@amd.com>, <Oak.Zeng@amd.com>,
<Jenny-Jing.Liu@amd.com>, <Philip.Yang@amd.com>,
<Xiaogang.Chen@amd.com>, <Ray.Huang@amd.com>,
<honglei1.huang@amd.com>, <Lingshan.Zhu@amd.com>
Cc: <amd-gfx@lists.freedesktop.org>,
<dri-devel@lists.freedesktop.org>,
"Junhua Shen" <Junhua.Shen@amd.com>
Subject: [PATCH v3 2/5] drm/amdgpu: implement drm_pagemap SDMA migration callbacks
Date: Mon, 27 Apr 2026 18:05:19 +0800 [thread overview]
Message-ID: <20260427100522.7014-3-Junhua.Shen@amd.com> (raw)
In-Reply-To: <20260427100522.7014-1-Junhua.Shen@amd.com>
Implement the drm_pagemap_devmem_ops and drm_pagemap_ops callbacks
that the DRM GPUSVM migration framework requires:
drm_pagemap_ops (top-level entry points):
- device_map: convert ZONE_DEVICE page to GPU PTE address
- populate_mm: allocate VRAM BO and trigger migration
drm_pagemap_devmem_ops (per-BO migration mechanics):
- populate_devmem_pfn: walk BO buddy blocks to build PFN array
- copy_to_devmem: SDMA copy system RAM -> VRAM via GART window
- copy_to_ram: SDMA copy VRAM -> system RAM via GART window
- devmem_release: free BO when all pages migrate back
Signed-off-by: Junhua Shen <Junhua.Shen@amd.com>
---
drivers/gpu/drm/amd/amdgpu/amdgpu_migrate.c | 616 +++++++++++++++++++-
1 file changed, 613 insertions(+), 3 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_migrate.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_migrate.c
index 170e2eadc106..42092651b4d5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_migrate.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_migrate.c
@@ -64,12 +64,20 @@
#include <linux/memremap.h>
#include <linux/migrate.h>
-#include "amdgpu_amdkfd.h"
#include "amdgpu_migrate.h"
#include "amdgpu.h"
+#include "amdgpu_ttm.h"
+#include "amdgpu_res_cursor.h"
+
+#define AMDGPU_MIGRATE_TRACE(fmt, ...) \
+ pr_debug("%s: " fmt, __func__, ##__VA_ARGS__)
+
+/* SDMA copy direction */
+#define FROM_RAM_TO_VRAM 0
+#define FROM_VRAM_TO_RAM 1
static inline struct amdgpu_pagemap *
-dpagemap_to_apagemap(struct drm_pagemap *dpagemap)
+to_amdgpu_pagemap(struct drm_pagemap *dpagemap)
{
return container_of(dpagemap, struct amdgpu_pagemap, dpagemap);
}
@@ -94,8 +102,610 @@ amdgpu_svm_page_to_apagemap(struct page *page)
return container_of(pgmap, struct amdgpu_pagemap, pgmap);
}
+/* drm_pagemap_devmem_ops — per-BO migration mechanics */
+
+/**
+ * struct amdgpu_svm_bo - Wrapper linking drm_pagemap_devmem to amdgpu_bo
+ *
+ * @devmem: drm_pagemap device memory allocation (passed to framework)
+ * @bo: The backing VRAM amdgpu_bo
+ *
+ * It is allocated per-migration in populate_mm() and freed by
+ * devmem_release() when all device-private pages have migrated
+ * back to system memory.
+ *
+ * Lifecycle is managed by the drm_pagemap framework's internal zdd refcount:
+ * - zdd->devmem_allocation points to &svm_bo->devmem
+ * - When zdd refcount drops to zero, framework calls devmem_release()
+ * - devmem_release() frees both the BO reference and the svm_bo itself
+ */
+struct amdgpu_svm_bo {
+ struct amdgpu_bo *bo;
+ struct drm_pagemap_devmem devmem;
+};
+
+static inline struct amdgpu_svm_bo *
+to_amdgpu_svm_bo(struct drm_pagemap_devmem *devmem_allocation)
+{
+ return container_of(devmem_allocation, struct amdgpu_svm_bo, devmem);
+}
+
+/**
+ * amdgpu_svm_devmem_release - Release BO when all device pages migrate back
+ *
+ * Called by the drm_pagemap framework (via drm_pagemap_zdd_destroy) when the
+ * last device-private page backed by this allocation has been migrated back
+ * to system memory (or the owning process exits).
+ *
+ * Frees both the amdgpu_bo reference and the wrapper amdgpu_svm_bo itself.
+ */
+static void
+amdgpu_svm_devmem_release(struct drm_pagemap_devmem *devmem_allocation)
+{
+ struct amdgpu_svm_bo *svm_bo = to_amdgpu_svm_bo(devmem_allocation);
+
+ AMDGPU_MIGRATE_TRACE("Release svm_bo=%px bo=%px\n", svm_bo, svm_bo->bo);
+ amdgpu_bo_unref(&svm_bo->bo);
+ kfree(svm_bo);
+}
+
+/**
+ * amdgpu_svm_populate_devmem_pfn - Convert BO VRAM allocation to PFN array
+ * @devmem_allocation: The devmem allocation in the amdgpu_svm_bo wrapper
+ * @npages: Number of PFN entries to fill
+ * @pfn: Output PFN array
+ *
+ * Iterates over the BO's TTM vram_mgr buddy blocks and converts each
+ * block's VRAM offset to ZONE_DEVICE PFNs:
+ *
+ * PFN = PHYS_PFN(block_offset + apagemap.hpa_base) + page_index
+ *
+ * This is called by drm_pagemap_migrate_to_devmem() to build the
+ * destination PFN array for migrate_vma_pages().
+ *
+ * Return: 0 on success
+ */
+static int
+amdgpu_svm_populate_devmem_pfn(struct drm_pagemap_devmem *devmem_allocation,
+ unsigned long npages, unsigned long *pfn)
+{
+ struct amdgpu_pagemap *svm_dm = to_amdgpu_pagemap(devmem_allocation->dpagemap);
+ struct amdgpu_svm_bo *svm_bo = to_amdgpu_svm_bo(devmem_allocation);
+ struct amdgpu_bo *bo = svm_bo->bo;
+ struct amdgpu_res_cursor cursor;
+ unsigned long i = 0;
+ int ret;
+
+ ret = amdgpu_bo_reserve(bo, false);
+ if (ret)
+ return ret;
+
+ amdgpu_res_first(bo->tbo.resource, 0, npages << PAGE_SHIFT, &cursor);
+ while (cursor.remaining && i < npages) {
+ u64 pfn_base = PHYS_PFN(cursor.start + svm_dm->hpa_base);
+ u64 pages = cursor.size >> PAGE_SHIFT;
+ unsigned long j;
+
+ for (j = 0; j < pages && i < npages; j++, i++)
+ pfn[i] = pfn_base + j;
+
+ amdgpu_res_next(&cursor, cursor.size);
+ }
+
+ amdgpu_bo_unreserve(bo);
+
+ AMDGPU_MIGRATE_TRACE("populate_devmem_pfn: npages=%lu first_pfn=0x%lx\n",
+ npages, npages > 0 ? pfn[0] : 0);
+
+ return 0;
+}
+
+/* SDMA copy helpers — GART window based data transfer */
+
+/**
+ * amdgpu_svm_direct_mapping_addr - Convert VRAM offset to MC address
+ * @adev: AMDGPU device
+ * @vram_offset: Byte offset within VRAM
+ *
+ * Return: MC address suitable for SDMA src/dst
+ */
+static u64
+amdgpu_svm_direct_mapping_addr(struct amdgpu_device *adev, u64 vram_offset)
+{
+ return vram_offset + amdgpu_ttm_domain_start(adev, TTM_PL_VRAM);
+}
+
+/**
+ * amdgpu_svm_gart_map - Map system DMA addresses into GART window
+ * @ring: SDMA ring for the GART update job
+ * @npages: Number of pages to map
+ * @addr: Array of system memory DMA addresses
+ * @gart_addr: Output — GART base address to use in SDMA copy
+ * @flags: PTE flags (e.g. writeable for RAM-to-VRAM src)
+ *
+ * Builds GART PTEs pointing at the given DMA addresses, submits an
+ * SDMA job to update the GART entries, and returns the GART address
+ * that can be used as src or dst in a subsequent amdgpu_copy_buffer().
+ *
+ * Uses GART window 0, protected by gtt_window_lock.
+ *
+ * Return: 0 on success, negative error code on failure
+ */
+static int
+amdgpu_svm_gart_map(struct amdgpu_ring *ring,
+ struct amdgpu_ttm_buffer_entity *entity,
+ u64 npages,
+ dma_addr_t *addr, u64 *gart_addr, u64 flags)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_job *job;
+ unsigned int num_dw, num_bytes;
+ struct dma_fence *fence;
+ u64 src_addr, dst_addr;
+ u64 pte_flags;
+ void *cpu_addr;
+ int r;
+
+ /* Use entity's GART window 0 */
+ *gart_addr = amdgpu_compute_gart_address(&adev->gmc, entity, 0);
+
+ num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
+ num_bytes = npages * 8 * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
+
+ r = amdgpu_job_alloc_with_ib(adev, &entity->base,
+ AMDGPU_FENCE_OWNER_UNDEFINED,
+ num_dw * 4 + num_bytes,
+ AMDGPU_IB_POOL_DELAYED,
+ &job,
+ AMDGPU_KERNEL_JOB_ID_KFD_GART_MAP);
+ if (r)
+ return r;
+
+ src_addr = num_dw * 4;
+ src_addr += job->ibs[0].gpu_addr;
+
+ dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
+ dst_addr += (entity->gart_window_offs[0] >> AMDGPU_GPU_PAGE_SHIFT) * 8;
+ amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
+ dst_addr, num_bytes, 0);
+
+ amdgpu_ring_pad_ib(ring, &job->ibs[0]);
+ WARN_ON(job->ibs[0].length_dw > num_dw);
+
+ pte_flags = AMDGPU_PTE_VALID | AMDGPU_PTE_READABLE;
+ pte_flags |= AMDGPU_PTE_SYSTEM | AMDGPU_PTE_SNOOPED;
+ if (flags & AMDGPU_PTE_WRITEABLE)
+ pte_flags |= AMDGPU_PTE_WRITEABLE;
+ pte_flags |= adev->gart.gart_pte_flags;
+
+ cpu_addr = &job->ibs[0].ptr[num_dw];
+
+ amdgpu_gart_map(adev, 0, npages, addr, pte_flags, cpu_addr);
+ fence = amdgpu_job_submit(job);
+ dma_fence_put(fence);
+
+ return 0;
+}
+
+/**
+ * amdgpu_svm_copy_memory_gart - SDMA copy between system RAM and VRAM
+ * @adev: AMDGPU device
+ * @sys: Array of DMA addresses for system memory pages
+ * @vram: Array of VRAM byte offsets (relative to start of VRAM)
+ * @npages: Number of pages to copy
+ * @direction: FROM_RAM_TO_VRAM or FROM_VRAM_TO_RAM
+ * @mfence: In/out — carries the last SDMA fence for serialization
+ *
+ * Maps system memory pages into the GART window and uses SDMA to copy
+ * data to/from VRAM. Handles splitting into AMDGPU_GTT_MAX_TRANSFER_SIZE
+ * chunks. Acquires entity->lock internally to protect the GART window,
+ * matching the KFD svm_migrate_copy_memory_gart() pattern.
+ *
+ * Return: 0 on success, negative error code on failure
+ */
+static int
+amdgpu_svm_copy_memory_gart(struct amdgpu_device *adev, dma_addr_t *sys,
+ u64 *vram, u64 npages, int direction,
+ struct dma_fence **mfence)
+{
+ const u64 max_pages = AMDGPU_GTT_MAX_TRANSFER_SIZE;
+ struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
+ struct amdgpu_ttm_buffer_entity *entity = &adev->mman.move_entity;
+ u64 gart_s, gart_d;
+ struct dma_fence *next;
+ u64 size;
+ int r;
+
+ mutex_lock(&entity->lock);
+
+ while (npages) {
+ size = min(max_pages, npages);
+
+ if (direction == FROM_VRAM_TO_RAM) {
+ gart_s = amdgpu_svm_direct_mapping_addr(adev, *vram);
+ r = amdgpu_svm_gart_map(ring, entity, size, sys,
+ &gart_d, AMDGPU_PTE_WRITEABLE);
+ } else {
+ r = amdgpu_svm_gart_map(ring, entity, size, sys,
+ &gart_s, 0);
+ gart_d = amdgpu_svm_direct_mapping_addr(adev, *vram);
+ }
+ if (r) {
+ dev_err(adev->dev, "failed %d to map GART for SDMA\n", r);
+ goto out_unlock;
+ }
+
+ AMDGPU_MIGRATE_TRACE("SDMA_COPY: %s npages=%llu vram_off=0x%llx\n",
+ direction == FROM_RAM_TO_VRAM ? "RAM->VRAM" : "VRAM->RAM",
+ size, (u64)*vram);
+
+ r = amdgpu_copy_buffer(adev, entity, gart_s, gart_d,
+ size * PAGE_SIZE,
+ NULL, &next, true, 0);
+ if (r) {
+ dev_err(adev->dev, "failed %d to copy buffer\n", r);
+ goto out_unlock;
+ }
+
+ dma_fence_put(*mfence);
+ *mfence = next;
+ npages -= size;
+ if (npages) {
+ sys += size;
+ vram += size;
+ }
+ }
+
+out_unlock:
+ mutex_unlock(&entity->lock);
+
+ return r;
+}
+
+/**
+ * amdgpu_svm_copy_to_devmem - SDMA copy system memory -> VRAM
+ * @pages: Array of destination ZONE_DEVICE pages (VRAM-backed)
+ * @pagemap_addr: Array of source DMA addresses (system memory, already mapped)
+ * @npages: Number of pages to copy
+ *
+ * Builds parallel sys[] and vram[] arrays from the framework-provided
+ * pagemap_addr and device pages, then submits batched SDMA copies via
+ * the GART window.
+ *
+ * Return: 0 on success, negative error code on failure
+ */
+static int
+amdgpu_svm_copy_to_devmem(struct page **pages,
+ struct drm_pagemap_addr *pagemap_addr,
+ unsigned long npages,
+ struct dma_fence *pre_migrate_fence)
+{
+ struct amdgpu_device *adev;
+ struct amdgpu_pagemap *svm_dm;
+ struct dma_fence *mfence = NULL;
+ dma_addr_t *sys;
+ u64 *vram;
+ unsigned long i, j;
+ int ret = 0;
+
+ if (!npages)
+ return 0;
+
+ /*
+ * Find the first non-NULL page to derive the device.
+ * The pages array may contain NULL entries for positions where
+ * no valid device page exists.
+ */
+ for (i = 0; i < npages; i++) {
+ if (pages[i])
+ break;
+ }
+ if (i == npages)
+ return 0;
+
+ svm_dm = amdgpu_svm_page_to_apagemap(pages[i]);
+ adev = svm_dm->adev;
+
+ sys = kvcalloc(npages, sizeof(*sys), GFP_KERNEL);
+ vram = kvcalloc(npages, sizeof(*vram), GFP_KERNEL);
+ if (!sys || !vram) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ for (i = 0, j = 0; i < npages; i++) {
+ if (!pagemap_addr[i].addr)
+ goto flush;
+
+ sys[j] = pagemap_addr[i].addr;
+ vram[j] = ((u64)page_to_pfn(pages[i]) << PAGE_SHIFT) -
+ svm_dm->hpa_base;
+
+ /* Check if next vram page is contiguous with current */
+ if (j > 0 && vram[j] != vram[j - 1] + PAGE_SIZE)
+ goto flush;
+
+ j++;
+ continue;
+flush:
+ if (j) {
+ ret = amdgpu_svm_copy_memory_gart(adev, sys, vram, j,
+ FROM_RAM_TO_VRAM,
+ &mfence);
+ if (ret)
+ goto out_fence;
+ j = 0;
+ }
+ /* Re-process current page if it was valid but broke contiguity */
+ if (pagemap_addr[i].addr) {
+ sys[0] = pagemap_addr[i].addr;
+ vram[0] = ((u64)page_to_pfn(pages[i]) << PAGE_SHIFT) -
+ svm_dm->hpa_base;
+ j = 1;
+ }
+ }
+
+ /* Flush remaining batch */
+ if (j)
+ ret = amdgpu_svm_copy_memory_gart(adev, sys, vram, j,
+ FROM_RAM_TO_VRAM, &mfence);
+
+out_fence:
+ if (mfence) {
+ dma_fence_wait(mfence, false);
+ dma_fence_put(mfence);
+ }
+
+ AMDGPU_MIGRATE_TRACE("copy_to_devmem done: npages=%ld ret=%d\n",
+ npages, ret);
+
+out_free:
+ kvfree(vram);
+ kvfree(sys);
+ return ret;
+}
+
+/**
+ * amdgpu_svm_copy_to_ram - SDMA copy VRAM -> system memory
+ * @pages: Array of source ZONE_DEVICE pages (VRAM-backed)
+ * @pagemap_addr: Array of destination DMA addresses (system memory, already mapped)
+ * @npages: Number of pages to copy
+ *
+ * Mirror of copy_to_devmem with src/dst swapped.
+ *
+ * Return: 0 on success, negative error code on failure
+ */
+static int
+amdgpu_svm_copy_to_ram(struct page **pages,
+ struct drm_pagemap_addr *pagemap_addr,
+ unsigned long npages,
+ struct dma_fence *pre_migrate_fence)
+{
+ struct amdgpu_device *adev;
+ struct amdgpu_pagemap *svm_dm;
+ struct dma_fence *mfence = NULL;
+ dma_addr_t *sys;
+ u64 *vram;
+ unsigned long i, j;
+ int ret = 0;
+
+ if (!npages)
+ return 0;
+
+ for (i = 0; i < npages; i++) {
+ if (pages[i])
+ break;
+ }
+ if (i == npages)
+ return 0;
+
+ svm_dm = amdgpu_svm_page_to_apagemap(pages[i]);
+ adev = svm_dm->adev;
+
+ sys = kvcalloc(npages, sizeof(*sys), GFP_KERNEL);
+ vram = kvcalloc(npages, sizeof(*vram), GFP_KERNEL);
+ if (!sys || !vram) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ for (i = 0, j = 0; i < npages; i++) {
+ if (!pagemap_addr[i].addr || !pages[i])
+ goto flush;
+
+ vram[j] = ((u64)page_to_pfn(pages[i]) << PAGE_SHIFT) -
+ svm_dm->hpa_base;
+ sys[j] = pagemap_addr[i].addr;
+
+ /* Check if next vram page is contiguous with current */
+ if (j > 0 && vram[j] != vram[j - 1] + PAGE_SIZE)
+ goto flush;
+
+ j++;
+ continue;
+flush:
+ if (j) {
+ ret = amdgpu_svm_copy_memory_gart(adev, sys, vram, j,
+ FROM_VRAM_TO_RAM,
+ &mfence);
+ if (ret)
+ goto out_fence;
+ j = 0;
+ }
+ /* Re-process current page if it was valid but broke contiguity */
+ if (pagemap_addr[i].addr && pages[i]) {
+ vram[0] = ((u64)page_to_pfn(pages[i]) << PAGE_SHIFT) -
+ svm_dm->hpa_base;
+ sys[0] = pagemap_addr[i].addr;
+ j = 1;
+ }
+ }
+
+ /* Flush remaining batch */
+ if (j)
+ ret = amdgpu_svm_copy_memory_gart(adev, sys, vram, j,
+ FROM_VRAM_TO_RAM, &mfence);
+
+out_fence:
+ if (mfence) {
+ dma_fence_wait(mfence, false);
+ dma_fence_put(mfence);
+ }
+
+ AMDGPU_MIGRATE_TRACE("copy_to_ram done: npages=%ld ret=%d\n", npages, ret);
+
+out_free:
+ kvfree(vram);
+ kvfree(sys);
+ return ret;
+}
+
+static const struct drm_pagemap_devmem_ops amdgpu_pagemap_ops = {
+ .devmem_release = amdgpu_svm_devmem_release,
+ .populate_devmem_pfn = amdgpu_svm_populate_devmem_pfn,
+ .copy_to_devmem = amdgpu_svm_copy_to_devmem,
+ .copy_to_ram = amdgpu_svm_copy_to_ram,
+};
+
+/* drm_pagemap_ops — top-level migration entry points */
+
+/**
+ * amdgpu_svm_device_map - Convert ZONE_DEVICE page to GPU PTE address
+ * @dpagemap: The drm_pagemap for this device
+ * @dev: Requesting device (for P2P check)
+ * @page: ZONE_DEVICE page backed by VRAM
+ * @order: Page order (0 = 4K, 9 = 2M, etc.)
+ * @dir: DMA direction (unused for local VRAM)
+ *
+ * Address conversion chain:
+ * page -> PFN -> HPA -> VRAM offset -> PTE address
+ *
+ * HPA = page_to_pfn(page) << PAGE_SHIFT
+ * VRAM offset = HPA - apagemap.hpa_base
+ * PTE address = VRAM offset + adev->vm_manager.vram_base_offset
+ *
+ * Return: drm_pagemap_addr with PTE address and AMDGPU_INTERCONNECT_VRAM protocol
+ */
+static struct drm_pagemap_addr
+amdgpu_svm_device_map(struct drm_pagemap *dpagemap,
+ struct device *dev,
+ struct page *page,
+ unsigned int order,
+ enum dma_data_direction dir)
+{
+ struct amdgpu_pagemap *svm_dm = to_amdgpu_pagemap(dpagemap);
+ struct amdgpu_device *adev = dpagemap_to_adev(dpagemap);
+ dma_addr_t addr;
+
+ if (dpagemap->drm->dev == dev) {
+ /* Same device: return VRAM PTE address */
+ u64 hpa = (u64)page_to_pfn(page) << PAGE_SHIFT;
+ u64 vram_offset = hpa - svm_dm->hpa_base;
+
+ addr = vram_offset + adev->vm_manager.vram_base_offset;
+ } else {
+ /* Cross-device P2P: not yet supported */
+ addr = DMA_MAPPING_ERROR;
+ }
+
+ return drm_pagemap_addr_encode(addr,
+ AMDGPU_INTERCONNECT_VRAM, order, dir);
+}
+
+/**
+ * amdgpu_svm_bo_alloc - Allocate an amdgpu_svm_bo wrapper with VRAM backing
+ * @adev: AMDGPU device
+ * @dpagemap: The drm_pagemap for this device
+ * @mm: mm_struct of the owning process
+ * @size: Allocation size in bytes
+ *
+ * Return: Pointer to allocated amdgpu_svm_bo on success, ERR_PTR on failure
+ */
+static struct amdgpu_svm_bo *
+amdgpu_svm_bo_alloc(struct amdgpu_device *adev,
+ struct drm_pagemap *dpagemap,
+ struct mm_struct *mm, unsigned long size)
+{
+ struct amdgpu_svm_bo *svm_bo;
+ struct amdgpu_bo_param bp = {};
+ struct amdgpu_bo *bo;
+ int ret;
+
+ svm_bo = kzalloc(sizeof(*svm_bo), GFP_KERNEL);
+ if (!svm_bo)
+ return ERR_PTR(-ENOMEM);
+
+ bp.size = size;
+ bp.bo_ptr_size = sizeof(struct amdgpu_bo);
+ bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
+ bp.type = ttm_bo_type_device;
+ bp.flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
+ AMDGPU_GEM_CREATE_VRAM_CLEARED;
+
+ ret = amdgpu_bo_create(adev, &bp, &bo);
+ if (ret) {
+ AMDGPU_MIGRATE_TRACE("Failed to create SVM BO\n");
+ kfree(svm_bo);
+ return ERR_PTR(ret);
+ }
+
+ amdgpu_bo_unreserve(bo);
+ svm_bo->bo = bo;
+
+ drm_pagemap_devmem_init(&svm_bo->devmem,
+ adev->dev, mm,
+ &amdgpu_pagemap_ops,
+ dpagemap, size, NULL);
+
+ return svm_bo;
+}
+
+/**
+ * amdgpu_svm_populate_mm - Allocate VRAM BO and migrate pages
+ * @dpagemap: The drm_pagemap for this device
+ * @start: Start virtual address of the range to migrate
+ * @end: End virtual address (exclusive)
+ * @mm: mm_struct of the owning process
+ * @timeslice_ms: Maximum time to spend migrating (for fairness)
+ *
+ * Core migration entry point called by drm_pagemap_populate_mm().
+ * Allocates an amdgpu_svm_bo via amdgpu_svm_bo_alloc(), then calls
+ * drm_pagemap_migrate_to_devmem() to execute the actual migration.
+ *
+ * Return: 0 on success, negative error code on failure
+ */
+static int
+amdgpu_svm_populate_mm(struct drm_pagemap *dpagemap,
+ unsigned long start, unsigned long end,
+ struct mm_struct *mm,
+ unsigned long timeslice_ms)
+{
+ struct amdgpu_device *adev = dpagemap_to_adev(dpagemap);
+ struct drm_pagemap_migrate_details mdetails = {
+ .timeslice_ms = timeslice_ms,
+ };
+ struct amdgpu_svm_bo *svm_bo;
+ int ret;
+
+ svm_bo = amdgpu_svm_bo_alloc(adev, dpagemap, mm, end - start);
+ if (IS_ERR(svm_bo))
+ return PTR_ERR(svm_bo);
+
+ AMDGPU_MIGRATE_TRACE("populate_mm: [0x%lx-0x%lx] size=%lu\n",
+ start, end, end - start);
+
+ ret = drm_pagemap_migrate_to_devmem(&svm_bo->devmem,
+ mm, start, end,
+ &mdetails);
+
+ return ret;
+}
-const struct drm_pagemap_ops amdgpu_svm_drm_pagemap_ops = { };
+const struct drm_pagemap_ops amdgpu_svm_drm_pagemap_ops = {
+ .device_map = amdgpu_svm_device_map,
+ .populate_mm = amdgpu_svm_populate_mm,
+};
/**
* amdgpu_svm_migration_init - Register ZONE_DEVICE and initialize drm_pagemap
--
2.34.1
next prev parent reply other threads:[~2026-04-27 10:06 UTC|newest]
Thread overview: 8+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-04-27 10:05 [PATCH v3 0/5] drm/amdgpu: SVM VRAM migration via drm_pagemap Junhua Shen
2026-04-27 10:05 ` [PATCH v3 1/5] drm/amdgpu: add VRAM migration infrastructure for drm_pagemap Junhua Shen
2026-04-27 10:05 ` Junhua Shen [this message]
2026-04-27 22:20 ` [PATCH v3 2/5] drm/amdgpu: implement drm_pagemap SDMA migration callbacks Felix Kuehling
2026-04-28 7:39 ` Junhua Shen
2026-04-27 10:05 ` [PATCH v3 3/5] drm/amdgpu: introduce SVM range migration decision layer Junhua Shen
2026-04-27 10:05 ` [PATCH v3 4/5] drm/amdgpu: add SVM attr prefetch/force-trigger functionality Junhua Shen
2026-04-27 10:05 ` [PATCH v3 5/5] drm/amdgpu: integrate VRAM migration into SVM range map path Junhua Shen
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260427100522.7014-3-Junhua.Shen@amd.com \
--to=junhua.shen@amd.com \
--cc=Alexander.Deucher@amd.com \
--cc=Christian.Koenig@amd.com \
--cc=Felix.Kuehling@amd.com \
--cc=Jenny-Jing.Liu@amd.com \
--cc=Lingshan.Zhu@amd.com \
--cc=Oak.Zeng@amd.com \
--cc=Philip.Yang@amd.com \
--cc=Ray.Huang@amd.com \
--cc=Xiaogang.Chen@amd.com \
--cc=amd-gfx@lists.freedesktop.org \
--cc=dri-devel@lists.freedesktop.org \
--cc=honglei1.huang@amd.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox