Intel-XE Archive on lore.kernel.org
 help / color / mirror / Atom feed
From: Matthew Brost <matthew.brost@intel.com>
To: intel-xe@lists.freedesktop.org, dri-devel@lists.freedesktop.org
Cc: airlied@gmail.com, christian.koenig@amd.com,
	thomas.hellstrom@linux.intel.com, matthew.auld@intel.com,
	daniel@ffwll.ch
Subject: [RFC PATCH 23/28] drm/xe: Add SVM VRAM migration
Date: Tue, 27 Aug 2024 19:48:56 -0700	[thread overview]
Message-ID: <20240828024901.2582335-24-matthew.brost@intel.com> (raw)
In-Reply-To: <20240828024901.2582335-1-matthew.brost@intel.com>

Migration is implemented with range granularity, with VRAM backing being
a VM private TTM BO (i.e., shares dma-resv with VM). The lifetime of the
TTM BO is limited to when the SVM range is in VRAM (i.e., when a VRAM
SVM range is migrated to SRAM, the TTM BO is destroyed).

The design choice for using TTM BO for VRAM backing store, as opposed to
direct buddy allocation, is as follows:

- DRM buddy allocations are not at page granularity, offering no
  advantage over a BO.
- DRM buddy allocations do not solve locking inversion problems between
  mmap lock and dma-resv locks.
- Unified eviction is required (SVM VRAM and TTM BOs need to be able to
  evict each other).
- For exhaustive eviction [1], SVM VRAM allocations will almost certainly
  require a dma-resv.
- Likely allocation size is 2M which makes of size of BO (872)
  acceptable per allocation (872 / 2M == .0004158).

With this, using TTM BO for VRAM backing store seems to be an obvious
choice as it allows leveraging of the TTM eviction code.

Current migration policy is migrate any SVM range greater than or equal
to 64k once.

[1] https://patchwork.freedesktop.org/series/133643/

Signed-off-by: Matthew Brost matthew.brost@intel.com
---
 drivers/gpu/drm/xe/xe_svm.c | 81 ++++++++++++++++++++++++++++++++++++-
 drivers/gpu/drm/xe/xe_svm.h |  1 +
 2 files changed, 81 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
index 4372c02a341f..fd8987e0a506 100644
--- a/drivers/gpu/drm/xe/xe_svm.c
+++ b/drivers/gpu/drm/xe/xe_svm.c
@@ -217,8 +217,13 @@ static void xe_svm_invalidate(struct drm_gpusvm *gpusvm,
 static int __xe_svm_garbage_collector(struct xe_vm *vm,
 				      struct xe_svm_range *range)
 {
+	struct drm_gpusvm_ctx ctx = {};
 	struct dma_fence *fence;
 
+	/* Evict any pages holding references to vram allocation */
+	if (range->base.flags.partial_unmap && IS_DGFX(vm->xe))
+		drm_gpusvm_migrate_to_sram(&vm->svm.gpusvm, &range->base, &ctx);
+
 	xe_vm_lock(vm, false);
 	fence = xe_vm_range_unbind(vm, range);
 	xe_vm_unlock(vm);
@@ -504,21 +509,77 @@ static bool xe_svm_range_is_valid(struct xe_svm_range *range,
 	return (range->tile_present & ~range->tile_invalidated) & BIT(tile->id);
 }
 
+static struct xe_mem_region *tile_to_mr(struct xe_tile *tile)
+{
+	return &tile->mem.vram;
+}
+
+static struct xe_bo *xe_svm_alloc_vram(struct xe_vm *vm, struct xe_tile *tile,
+				       struct xe_svm_range *range,
+				       const struct drm_gpusvm_ctx *ctx)
+{
+	struct xe_mem_region *mr = tile_to_mr(tile);
+	struct drm_buddy_block *block;
+	struct list_head *blocks;
+	struct xe_bo *bo;
+	ktime_t end = 0;
+	int err;
+
+retry:
+	xe_vm_lock(vm, false);
+	bo = xe_bo_create(tile_to_xe(tile), tile, vm, range->base.va.end -
+			  range->base.va.start, ttm_bo_type_device,
+			  XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+			  XE_BO_FLAG_SYSTEM_ALLOC | XE_BO_FLAG_SKIP_CLEAR);
+	xe_vm_unlock(vm);
+	if (IS_ERR(bo)) {
+		err = PTR_ERR(bo);
+		if (xe_vm_validate_should_retry(NULL, err, &end))
+			goto retry;
+		return bo;
+	}
+
+	blocks = &to_xe_ttm_vram_mgr_resource(bo->ttm.resource)->blocks;
+	list_for_each_entry(block, blocks, link)
+		block->private = mr;
+
+	/*
+	 * Take ref because as soon as drm_gpusvm_migrate_to_vram succeeds the
+	 * creation ref can be dropped upon CPU fault or unmap.
+	 */
+	xe_bo_get(bo);
+
+	err = drm_gpusvm_migrate_to_vram(&vm->svm.gpusvm, &range->base,
+					 bo, ctx);
+	if (err) {
+		xe_bo_put(bo);	/* Local ref */
+		xe_bo_put(bo);	/* Creation ref */
+		return ERR_PTR(err);
+	}
+
+	return bo;
+}
+
 int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
 			    struct xe_tile *tile, u64 fault_addr,
 			    bool atomic)
 {
-	struct drm_gpusvm_ctx ctx = { .read_only = xe_vma_read_only(vma), };
+	struct drm_gpusvm_ctx ctx = { .read_only = xe_vma_read_only(vma),
+		.vram_possible = IS_DGFX(vm->xe), };
 	struct xe_svm_range *range;
 	struct drm_gpusvm_range *r;
 	struct drm_exec exec;
 	struct dma_fence *fence;
+	struct xe_bo *bo = NULL;
 	ktime_t end = 0;
 	int err;
 
 	lockdep_assert_held_write(&vm->lock);
 
 retry:
+	xe_bo_put(bo);
+	bo = NULL;
+
 	/* Always process UNMAPs first so view SVM ranges is current */
 	err = xe_svm_garbage_collector(vm);
 	if (err)
@@ -534,6 +595,22 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
 	if (xe_svm_range_is_valid(range, tile))
 		return 0;
 
+	/* XXX: Add migration policy, for now migrate range once */
+	if (IS_DGFX(vm->xe) && !range->migrated &&
+	    range->base.flags.migrate_vram &&
+	    (range->base.va.end - range->base.va.start) >= SZ_64K) {
+		range->migrated = true;
+
+		bo = xe_svm_alloc_vram(vm, tile, range, &ctx);
+		if (IS_ERR(bo)) {
+			drm_info(&vm->xe->drm,
+				 "VRAM allocation failed, falling back to retrying, asid=%u, errno %ld\n",
+				 vm->usm.asid, PTR_ERR(bo));
+			bo = NULL;
+			goto retry;
+		}
+	}
+
 	err = drm_gpusvm_range_get_pages(&vm->svm.gpusvm, r, &ctx);
 	if (err == -EFAULT || err == -EPERM)	/* Corner where CPU mappings have change */
 	       goto retry;
@@ -567,6 +644,8 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
 	dma_fence_put(fence);
 
 err_out:
+	xe_bo_put(bo);
+
 	return err;
 }
 
diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
index 8b72e91cc37d..3f432483a230 100644
--- a/drivers/gpu/drm/xe/xe_svm.h
+++ b/drivers/gpu/drm/xe/xe_svm.h
@@ -18,6 +18,7 @@ struct xe_svm_range {
 	struct list_head garbage_collector_link;
 	u8 tile_present;
 	u8 tile_invalidated;
+	u8 migrated	:1;
 };
 
 int xe_devm_add(struct xe_tile *tile, struct xe_mem_region *mr);
-- 
2.34.1


  parent reply	other threads:[~2024-08-28  2:48 UTC|newest]

Thread overview: 100+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-08-28  2:48 [RFC PATCH 00/28] Introduce GPU SVM and Xe SVM implementation Matthew Brost
2024-08-28  2:48 ` [RFC PATCH 01/28] dma-buf: Split out dma fence array create into alloc and arm functions Matthew Brost
2024-08-28  2:48 ` [RFC PATCH 02/28] drm/xe: Invalidate media_gt TLBs in PT code Matthew Brost
2024-08-28  2:48 ` [RFC PATCH 03/28] drm/xe: Retry BO allocation Matthew Brost
2024-08-28  2:48 ` [RFC PATCH 04/28] mm/migrate: Add migrate_device_vma_range Matthew Brost
2024-08-29  9:03   ` Daniel Vetter
2024-08-29 15:58     ` Matthew Brost
2024-08-28  2:48 ` [RFC PATCH 05/28] drm/gpusvm: Add support for GPU Shared Virtual Memory Matthew Brost
2024-08-28 14:31   ` Daniel Vetter
2024-08-28 14:46     ` Christian König
2024-08-28 15:43       ` Matthew Brost
2024-08-28 16:06         ` Alex Deucher
2024-08-28 16:25         ` Daniel Vetter
2024-08-29 16:40           ` Matthew Brost
2024-09-02 11:29             ` Daniel Vetter
2024-08-30  5:00     ` Matthew Brost
2024-09-02 11:36       ` Daniel Vetter
2024-08-28 18:50   ` Daniel Vetter
2024-08-29 16:49     ` Matthew Brost
2024-09-02 11:40       ` Daniel Vetter
2024-08-29  9:16   ` Thomas Hellström
2024-08-29 17:45     ` Matthew Brost
2024-08-29 18:13       ` Matthew Brost
2024-08-29 19:18       ` Thomas Hellström
2024-08-29 20:56         ` Matthew Brost
2024-08-30  8:18           ` Thomas Hellström
2024-08-30 13:58             ` Matthew Brost
2024-09-02  9:57               ` Thomas Hellström
2024-08-30  9:57           ` Thomas Hellström
2024-08-30 13:47             ` Matthew Brost
2024-09-02  9:45               ` Thomas Hellström
2024-09-02 12:33           ` Daniel Vetter
2024-09-04 12:27             ` Thomas Hellström
2024-09-24  8:41               ` Simona Vetter
2024-08-30  1:35     ` Matthew Brost
2024-08-29  9:45   ` Daniel Vetter
2024-08-29 17:27     ` Matthew Brost
2024-09-02 11:53       ` Daniel Vetter
2024-09-02 17:03         ` Matthew Brost
2024-09-11 16:06           ` Matthew Brost
2024-08-30  9:16   ` Thomas Hellström
2024-09-02 12:20     ` Daniel Vetter
2024-09-06 18:41   ` Zeng, Oak
2024-09-24  9:25     ` Simona Vetter
2024-09-25 16:34       ` Zeng, Oak
2024-09-24 10:42   ` Thomas Hellström
2024-09-24 16:30     ` Matthew Brost
2024-09-25 21:12       ` Matthew Brost
2024-10-09 10:50   ` Thomas Hellström
2024-10-16  3:18     ` Matthew Brost
2024-10-16  6:27       ` Thomas Hellström
2024-10-16  8:24         ` Matthew Brost
2024-08-28  2:48 ` [RFC PATCH 06/28] drm/xe/uapi: Add DRM_XE_VM_BIND_FLAG_SYSTEM_ALLOCATON flag Matthew Brost
2024-08-28  2:48 ` [RFC PATCH 07/28] drm/xe: Add SVM init / fini to faulting VMs Matthew Brost
2024-08-28  2:48 ` [RFC PATCH 08/28] drm/xe: Add dma_addr res cursor Matthew Brost
2024-08-28  2:48 ` [RFC PATCH 09/28] drm/xe: Add SVM range invalidation Matthew Brost
2024-08-28  2:48 ` [RFC PATCH 10/28] drm/gpuvm: Add DRM_GPUVA_OP_USER Matthew Brost
2024-08-28  2:48 ` [RFC PATCH 11/28] drm/xe: Add (re)bind to SVM page fault handler Matthew Brost
2024-08-28  2:48 ` [RFC PATCH 12/28] drm/xe: Add SVM garbage collector Matthew Brost
2024-08-28  2:48 ` [RFC PATCH 13/28] drm/xe: Add unbind to " Matthew Brost
2024-08-28  2:48 ` [RFC PATCH 14/28] drm/xe: Do not allow system allocator VMA unbind if the GPU has bindings Matthew Brost
2024-08-28  2:48 ` [RFC PATCH 15/28] drm/xe: Enable system allocator uAPI Matthew Brost
2024-08-28  2:48 ` [RFC PATCH 16/28] drm/xe: Add migrate layer functions for SVM support Matthew Brost
2024-08-28  2:48 ` [RFC PATCH 17/28] drm/xe: Add SVM device memory mirroring Matthew Brost
2024-08-28  2:48 ` [RFC PATCH 18/28] drm/xe: Add GPUSVM copy SRAM / VRAM vfunc functions Matthew Brost
2024-08-28  2:48 ` [RFC PATCH 19/28] drm/xe: Update PT layer to understand ranges in VRAM Matthew Brost
2024-08-28  2:48 ` [RFC PATCH 20/28] drm/xe: Add Xe SVM populate_vram_pfn vfunc Matthew Brost
2024-08-28  2:48 ` [RFC PATCH 21/28] drm/xe: Add Xe SVM vram_release vfunc Matthew Brost
2024-08-28  2:48 ` [RFC PATCH 22/28] drm/xe: Add BO flags required for SVM Matthew Brost
2024-08-28  2:48 ` Matthew Brost [this message]
2024-08-28 16:06   ` [RFC PATCH 23/28] drm/xe: Add SVM VRAM migration Daniel Vetter
2024-08-28 18:22     ` Daniel Vetter
2024-08-29  9:24     ` Christian König
2024-08-29  9:53       ` Thomas Hellström
2024-08-29 11:02         ` Daniel Vetter
2024-08-29 22:12           ` Matthew Brost
2024-08-29 22:23             ` Matthew Brost
2024-09-02 11:01             ` Christian König
2024-09-02 12:50               ` Daniel Vetter
2024-09-02 12:48             ` Daniel Vetter
2024-09-02 22:20               ` Matthew Brost
2024-09-03  8:07                 ` Simona Vetter
2024-08-29 14:30         ` Christian König
2024-08-29 21:53           ` Matthew Brost
2024-08-29 21:48       ` Matthew Brost
2024-09-02 13:02         ` Daniel Vetter
2024-08-28  2:48 ` [RFC PATCH 24/28] drm/xe: Basic SVM BO eviction Matthew Brost
2024-08-29 10:14   ` Daniel Vetter
2024-08-29 15:55     ` Matthew Brost
2024-09-02 13:05       ` Daniel Vetter
2024-08-28  2:48 ` [RFC PATCH 25/28] drm/xe: Add SVM debug Matthew Brost
2024-08-28  2:48 ` [RFC PATCH 26/28] drm/xe: Add modparam for SVM notifier size Matthew Brost
2024-08-28  2:49 ` [RFC PATCH 27/28] drm/xe: Add modparam for SVM prefault Matthew Brost
2024-08-28  2:49 ` [RFC PATCH 28/28] drm/gpusvm: Ensure all pages migrated upon eviction Matthew Brost
2024-08-28  2:55 ` ✓ CI.Patch_applied: success for Introduce GPU SVM and Xe SVM implementation Patchwork
2024-08-28  2:55 ` ✗ CI.checkpatch: warning " Patchwork
2024-08-28  2:56 ` ✗ CI.KUnit: failure " Patchwork
2024-09-24  9:16 ` [RFC PATCH 00/28] " Simona Vetter
2024-09-24 19:36   ` Matthew Brost
2024-09-25 11:41     ` Simona Vetter

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240828024901.2582335-24-matthew.brost@intel.com \
    --to=matthew.brost@intel.com \
    --cc=airlied@gmail.com \
    --cc=christian.koenig@amd.com \
    --cc=daniel@ffwll.ch \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=intel-xe@lists.freedesktop.org \
    --cc=matthew.auld@intel.com \
    --cc=thomas.hellstrom@linux.intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox