From: Matthew Brost <matthew.brost@intel.com>
To: intel-xe@lists.freedesktop.org, dri-devel@lists.freedesktop.org
Cc: himal.prasad.ghimiray@intel.com, apopple@nvidia.com,
airlied@gmail.com, thomas.hellstrom@linux.intel.com,
simona.vetter@ffwll.ch, felix.kuehling@amd.com, dakr@kernel.org
Subject: [PATCH v6 09/32] drm/xe: Add SVM init / close / fini to faulting VMs
Date: Mon, 24 Feb 2025 20:42:48 -0800 [thread overview]
Message-ID: <20250225044311.3178695-10-matthew.brost@intel.com> (raw)
In-Reply-To: <20250225044311.3178695-1-matthew.brost@intel.com>
Add SVM init / close / fini to faulting VMs. Minimual implementation
acting as a placeholder for follow on patches.
v2:
- Add close function
v3:
- Better commit message (Thomas)
- Kernel doc (Thomas)
- Update chunk array to be unsigned long (Thomas)
- Use new drm_gpusvm.h header location (Thomas)
- Newlines between functions in xe_svm.h (Thomas)
- Call drm_gpusvm_driver_set_lock in init (Thomas)
v6:
- Only compile if CONFIG_DRM_GPUSVM selected (CI, Lucas)
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
---
drivers/gpu/drm/xe/Makefile | 1 +
drivers/gpu/drm/xe/xe_svm.c | 73 ++++++++++++++++++++++++++++++++
drivers/gpu/drm/xe/xe_svm.h | 35 +++++++++++++++
drivers/gpu/drm/xe/xe_vm.c | 12 ++++++
drivers/gpu/drm/xe/xe_vm_types.h | 7 +++
5 files changed, 128 insertions(+)
create mode 100644 drivers/gpu/drm/xe/xe_svm.c
create mode 100644 drivers/gpu/drm/xe/xe_svm.h
diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
index 5db71b6f0534..db423e50a2a2 100644
--- a/drivers/gpu/drm/xe/Makefile
+++ b/drivers/gpu/drm/xe/Makefile
@@ -123,6 +123,7 @@ xe-y += xe_bb.o \
xe_wopcm.o
xe-$(CONFIG_HMM_MIRROR) += xe_hmm.o
+xe-$(CONFIG_DRM_GPUSVM) += xe_svm.o
# graphics hardware monitoring (HWMON) support
xe-$(CONFIG_HWMON) += xe_hwmon.o
diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
new file mode 100644
index 000000000000..79da859f02b1
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_svm.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#include "xe_svm.h"
+#include "xe_vm.h"
+#include "xe_vm_types.h"
+
+static void xe_svm_invalidate(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_notifier *notifier,
+ const struct mmu_notifier_range *mmu_range)
+{
+ /* TODO: Implement */
+}
+
+static const struct drm_gpusvm_ops gpusvm_ops = {
+ .invalidate = xe_svm_invalidate,
+};
+
+static const unsigned long fault_chunk_sizes[] = {
+ SZ_2M,
+ SZ_64K,
+ SZ_4K,
+};
+
+/**
+ * xe_svm_init() - SVM initialize
+ * @vm: The VM.
+ *
+ * Initialize SVM state which is embedded within the VM.
+ *
+ * Return: 0 on success, negative error code on error.
+ */
+int xe_svm_init(struct xe_vm *vm)
+{
+ int err;
+
+ err = drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM", &vm->xe->drm,
+ current->mm, NULL, 0, vm->size,
+ SZ_512M, &gpusvm_ops, fault_chunk_sizes,
+ ARRAY_SIZE(fault_chunk_sizes));
+ if (err)
+ return err;
+
+ drm_gpusvm_driver_set_lock(&vm->svm.gpusvm, &vm->lock);
+
+ return 0;
+}
+
+/**
+ * xe_svm_close() - SVM close
+ * @vm: The VM.
+ *
+ * Close SVM state (i.e., stop and flush all SVM actions).
+ */
+void xe_svm_close(struct xe_vm *vm)
+{
+ xe_assert(vm->xe, xe_vm_is_closed(vm));
+}
+
+/**
+ * xe_svm_fini() - SVM finalize
+ * @vm: The VM.
+ *
+ * Finalize SVM state which is embedded within the VM.
+ */
+void xe_svm_fini(struct xe_vm *vm)
+{
+ xe_assert(vm->xe, xe_vm_is_closed(vm));
+
+ drm_gpusvm_fini(&vm->svm.gpusvm);
+}
diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
new file mode 100644
index 000000000000..ac79b208304d
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_svm.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef _XE_SVM_H_
+#define _XE_SVM_H_
+
+struct xe_vm;
+
+#if IS_ENABLED(CONFIG_DRM_GPUSVM)
+int xe_svm_init(struct xe_vm *vm);
+
+void xe_svm_fini(struct xe_vm *vm);
+
+void xe_svm_close(struct xe_vm *vm);
+#else
+static inline
+int xe_svm_init(struct xe_vm *vm)
+{
+ return 0;
+}
+
+static inline
+void xe_svm_fini(struct xe_vm *vm)
+{
+}
+
+static inline
+void xe_svm_close(struct xe_vm *vm)
+{
+}
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 2ab1fd7fa4a4..1454f98e0cf6 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -35,6 +35,7 @@
#include "xe_pt.h"
#include "xe_pxp.h"
#include "xe_res_cursor.h"
+#include "xe_svm.h"
#include "xe_sync.h"
#include "xe_trace_bo.h"
#include "xe_wa.h"
@@ -1550,6 +1551,12 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
}
}
+ if (flags & XE_VM_FLAG_FAULT_MODE) {
+ err = xe_svm_init(vm);
+ if (err)
+ goto err_close;
+ }
+
if (number_tiles > 1)
vm->composite_fence_ctx = dma_fence_context_alloc(1);
@@ -1595,6 +1602,8 @@ void xe_vm_close_and_put(struct xe_vm *vm)
xe_vm_close(vm);
if (xe_vm_in_preempt_fence_mode(vm))
flush_work(&vm->preempt.rebind_work);
+ if (xe_vm_in_fault_mode(vm))
+ xe_svm_close(vm);
down_write(&vm->lock);
for_each_tile(tile, xe, id) {
@@ -1663,6 +1672,9 @@ void xe_vm_close_and_put(struct xe_vm *vm)
xe_vma_destroy_unlocked(vma);
}
+ if (xe_vm_in_fault_mode(vm))
+ xe_svm_fini(vm);
+
up_write(&vm->lock);
down_write(&xe->usm.lock);
diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
index 9f125d07d30f..18ec9003a48a 100644
--- a/drivers/gpu/drm/xe/xe_vm_types.h
+++ b/drivers/gpu/drm/xe/xe_vm_types.h
@@ -6,6 +6,7 @@
#ifndef _XE_VM_TYPES_H_
#define _XE_VM_TYPES_H_
+#include <drm/drm_gpusvm.h>
#include <drm/drm_gpuvm.h>
#include <linux/dma-resv.h>
@@ -140,6 +141,12 @@ struct xe_vm {
/** @gpuvm: base GPUVM used to track VMAs */
struct drm_gpuvm gpuvm;
+ /** @svm: Shared virtual memory state */
+ struct {
+ /** @svm.gpusvm: base GPUSVM used to track fault allocations */
+ struct drm_gpusvm gpusvm;
+ } svm;
+
struct xe_device *xe;
/* exec queue used for (un)binding vma's */
--
2.34.1
next prev parent reply other threads:[~2025-02-25 4:42 UTC|newest]
Thread overview: 56+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-02-25 4:42 [PATCH v6 00/32] Introduce GPU SVM and Xe SVM implementation Matthew Brost
2025-02-25 4:42 ` [PATCH v6 01/32] drm/xe: Retry BO allocation Matthew Brost
2025-02-25 4:42 ` [PATCH v6 02/32] mm/migrate: Add migrate_device_pfns Matthew Brost
2025-02-25 4:42 ` [PATCH v6 03/32] mm/migrate: Trylock device page in do_swap_page Matthew Brost
2025-02-25 4:42 ` [PATCH v6 04/32] drm/pagemap: Add DRM pagemap Matthew Brost
2025-02-25 15:06 ` Matthew Auld
2025-02-25 18:16 ` Matthew Brost
2025-02-25 4:42 ` [PATCH v6 05/32] drm/xe/bo: Introduce xe_bo_put_async Matthew Brost
2025-02-25 4:42 ` [PATCH v6 06/32] drm/gpusvm: Add support for GPU Shared Virtual Memory Matthew Brost
2025-02-25 15:14 ` Matthew Auld
2025-02-25 18:16 ` Matthew Brost
2025-02-25 4:42 ` [PATCH v6 07/32] drm/xe: Select DRM_GPUSVM Kconfig Matthew Brost
2025-02-25 4:42 ` [PATCH v6 08/32] drm/xe/uapi: Add DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR Matthew Brost
2025-02-25 4:42 ` Matthew Brost [this message]
2025-02-25 4:42 ` [PATCH v6 10/32] drm/xe: Add dma_addr res cursor Matthew Brost
2025-02-25 4:42 ` [PATCH v6 11/32] drm/xe: Nuke VM's mapping upon close Matthew Brost
2025-02-25 18:05 ` Matthew Auld
2025-02-25 18:14 ` Matthew Brost
2025-02-25 4:42 ` [PATCH v6 12/32] drm/xe: Add SVM range invalidation and page fault Matthew Brost
2025-02-25 4:42 ` [PATCH v6 13/32] drm/gpuvm: Add DRM_GPUVA_OP_DRIVER Matthew Brost
2025-02-25 4:42 ` [PATCH v6 14/32] drm/xe: Add (re)bind to SVM page fault handler Matthew Brost
2025-02-26 17:00 ` Thomas Hellström
2025-02-26 17:18 ` Ghimiray, Himal Prasad
2025-02-25 4:42 ` [PATCH v6 15/32] drm/xe: Add SVM garbage collector Matthew Brost
2025-02-25 4:42 ` [PATCH v6 16/32] drm/xe: Add unbind to " Matthew Brost
2025-02-25 4:42 ` [PATCH v6 17/32] drm/xe: Do not allow CPU address mirror VMA unbind if the GPU has bindings Matthew Brost
2025-02-27 17:01 ` Thomas Hellström
2025-02-25 4:42 ` [PATCH v6 18/32] drm/xe: Enable CPU address mirror uAPI Matthew Brost
2025-02-25 4:42 ` [PATCH v6 19/32] drm/xe/uapi: Add DRM_XE_QUERY_CONFIG_FLAG_HAS_CPU_ADDR_MIRROR Matthew Brost
2025-02-25 4:42 ` [PATCH v6 20/32] drm/xe: Add migrate layer functions for SVM support Matthew Brost
2025-02-25 4:43 ` [PATCH v6 21/32] drm/xe: Add SVM device memory mirroring Matthew Brost
2025-02-25 4:43 ` [PATCH v6 22/32] drm/xe: Add drm_gpusvm_devmem to xe_bo Matthew Brost
2025-02-25 4:43 ` [PATCH v6 23/32] drm/xe: Add drm_pagemap ops to SVM Matthew Brost
2025-02-25 4:43 ` [PATCH v6 24/32] drm/xe: Add GPUSVM device memory copy vfunc functions Matthew Brost
2025-02-25 4:43 ` [PATCH v6 25/32] drm/xe: Add Xe SVM populate_devmem_pfn GPU SVM vfunc Matthew Brost
2025-02-25 4:43 ` [PATCH v6 26/32] drm/xe: Add Xe SVM devmem_release " Matthew Brost
2025-02-25 4:43 ` [PATCH v6 27/32] drm/xe: Add SVM VRAM migration Matthew Brost
2025-02-26 16:47 ` Thomas Hellström
2025-02-26 17:16 ` Ghimiray, Himal Prasad
2025-02-25 4:43 ` [PATCH v6 28/32] drm/xe: Basic SVM BO eviction Matthew Brost
2025-02-25 4:43 ` [PATCH v6 29/32] drm/xe: Add SVM debug Matthew Brost
2025-02-25 4:43 ` [PATCH v6 30/32] drm/xe: Add modparam for SVM notifier size Matthew Brost
2025-02-25 4:43 ` [PATCH v6 31/32] drm/xe: Add always_migrate_to_vram modparam Matthew Brost
2025-02-25 4:43 ` [PATCH v6 32/32] drm/doc: gpusvm: Add GPU SVM documentation Matthew Brost
2025-02-28 2:34 ` Alistair Popple
2025-02-28 4:36 ` Matthew Brost
2025-02-28 5:53 ` Alistair Popple
2025-03-01 0:35 ` Matthew Brost
2025-02-25 4:50 ` ✓ CI.Patch_applied: success for Introduce GPU SVM and Xe SVM implementation (rev6) Patchwork
2025-02-25 4:51 ` ✗ CI.checkpatch: warning " Patchwork
2025-02-25 4:52 ` ✓ CI.KUnit: success " Patchwork
2025-02-25 5:08 ` ✓ CI.Build: " Patchwork
2025-02-25 5:10 ` ✗ CI.Hooks: failure " Patchwork
2025-02-25 5:12 ` ✗ CI.checksparse: warning " Patchwork
2025-02-25 5:32 ` ✓ Xe.CI.BAT: success " Patchwork
2025-02-25 9:55 ` ✗ Xe.CI.Full: failure " Patchwork
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250225044311.3178695-10-matthew.brost@intel.com \
--to=matthew.brost@intel.com \
--cc=airlied@gmail.com \
--cc=apopple@nvidia.com \
--cc=dakr@kernel.org \
--cc=dri-devel@lists.freedesktop.org \
--cc=felix.kuehling@amd.com \
--cc=himal.prasad.ghimiray@intel.com \
--cc=intel-xe@lists.freedesktop.org \
--cc=simona.vetter@ffwll.ch \
--cc=thomas.hellstrom@linux.intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox