From: Oak Zeng <oak.zeng@intel.com>
To: intel-xe@lists.freedesktop.org
Cc: himal.prasad.ghimiray@intel.com, krishnaiah.bommu@intel.com,
matthew.brost@intel.com, Thomas.Hellstrom@linux.intel.com,
brian.welty@intel.com
Subject: [v2 25/31] drm/xe/svm: Add vm to xe_svm process
Date: Tue, 9 Apr 2024 16:17:36 -0400 [thread overview]
Message-ID: <20240409201742.3042626-26-oak.zeng@intel.com> (raw)
In-Reply-To: <20240409201742.3042626-1-oak.zeng@intel.com>
One shared virtual address space (xe_svm) works across CPU
and multiple GPUs under one CPU process. Each xe_svm process
can have multiple gpu vm, for example, one gpu vm for one
gpu card. Add gpu vm to the current xe_svm process during
xe_vm creation to note this gpu vm participate the shared
virtual address space with the current CPU process, also
remove xe_vm from xe_svm on xe_vm destroy.
FIXME: right now we blindly add all xe_vm to svm. Should
we introduce uAPI to allow user decide which xe_vm participate
svm?
Signed-off-by: Oak Zeng <oak.zeng@intel.com>
---
drivers/gpu/drm/xe/xe_svm.c | 45 ++++++++++++++++++++++++++++++++
drivers/gpu/drm/xe/xe_svm.h | 3 +++
drivers/gpu/drm/xe/xe_vm.c | 5 ++++
drivers/gpu/drm/xe/xe_vm_types.h | 2 ++
4 files changed, 55 insertions(+)
diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
index 416cfc81c053..1f4c2d32121a 100644
--- a/drivers/gpu/drm/xe/xe_svm.c
+++ b/drivers/gpu/drm/xe/xe_svm.c
@@ -8,6 +8,7 @@
#include <linux/kernel.h>
#include <linux/hashtable.h>
#include "xe_svm.h"
+#include "xe_vm_types.h"
#define XE_MAX_SVM_PROCESS 5 /* Maximumly support 32 SVM process*/
DEFINE_HASHTABLE(xe_svm_table, XE_MAX_SVM_PROCESS);
@@ -75,3 +76,47 @@ struct xe_svm *xe_lookup_svm_by_mm(struct mm_struct *mm)
return NULL;
}
+
+/**
+ * xe_svm_add_vm() - add a gpu vm to the current svm process
+ *
+ * @vm: The gpu vm to add to the current svm process.
+ *
+ * One shared virtual address space (xe_svm) works across CPU
+ * and multiple GPUs. So each xe_svm process can have N gpu
+ * vm, for example, one gpu vm for on gpu card. This function
+ * add a gpu vm to the current xe_svm process.
+ */
+void xe_svm_add_vm(struct xe_vm *vm)
+{
+ struct xe_svm *svm;
+
+ svm = xe_lookup_svm_by_mm(current->mm);
+ if (!svm)
+ svm = xe_create_svm();
+
+ mutex_lock(&svm->mutex);
+ list_add(&vm->svm_link, &svm->vm_list);
+ mutex_unlock(&svm->mutex);
+}
+
+/**
+ * xe_svm_remove_vm() - remove a gpu vm from svm process
+ *
+ * @vm: The gpu vm to remove from svm process.
+ */
+void xe_svm_remove_vm(struct xe_vm *vm)
+{
+ struct xe_svm *svm;
+
+ svm = xe_lookup_svm_by_mm(current->mm);
+ if (!svm)
+ return;
+
+ mutex_lock(&svm->mutex);
+ list_del(&vm->svm_link);
+ mutex_unlock(&svm->mutex);
+
+ if (list_empty(&svm->vm_list))
+ xe_destroy_svm(svm);
+}
diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
index 066740fb93f5..f601dffe3fc1 100644
--- a/drivers/gpu/drm/xe/xe_svm.h
+++ b/drivers/gpu/drm/xe/xe_svm.h
@@ -11,6 +11,7 @@
#include "xe_device.h"
#include "xe_assert.h"
+struct xe_vm;
/**
* struct xe_svm - data structure to represent a shared
@@ -33,6 +34,8 @@ struct xe_svm {
extern struct xe_svm *xe_create_svm(void);
void xe_destroy_svm(struct xe_svm *svm);
extern struct xe_svm *xe_lookup_svm_by_mm(struct mm_struct *mm);
+void xe_svm_add_vm(struct xe_vm *vm);
+void xe_svm_remove_vm(struct xe_vm *vm);
/**
* xe_mem_region_pfn_to_dpa() - Calculate page's dpa from pfn
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 61d336f24a65..498b36469d00 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -40,6 +40,7 @@
#include "xe_trace.h"
#include "xe_wa.h"
#include "xe_hmm.h"
+#include "xe_svm.h"
static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm)
{
@@ -1347,6 +1348,7 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
INIT_LIST_HEAD(&vm->userptr.repin_list);
INIT_LIST_HEAD(&vm->userptr.invalidated);
INIT_LIST_HEAD(&vm->userptr.fault_invalidated);
+ INIT_LIST_HEAD(&vm->svm_link);
init_rwsem(&vm->userptr.notifier_lock);
spin_lock_init(&vm->userptr.invalidated_lock);
INIT_WORK(&vm->userptr.garbage_collector, vm_userptr_garbage_collector);
@@ -1445,6 +1447,8 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
xe->usm.num_vm_in_non_fault_mode++;
mutex_unlock(&xe->usm.lock);
+ /** FIXME: Should we add vm to svm conditionally? Per uAPI?*/
+ xe_svm_add_vm(vm);
trace_xe_vm_create(vm);
return vm;
@@ -1562,6 +1566,7 @@ void xe_vm_close_and_put(struct xe_vm *vm)
for_each_tile(tile, xe, id)
xe_range_fence_tree_fini(&vm->rftree[id]);
+ xe_svm_remove_vm(vm);
xe_vm_put(vm);
}
diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
index d1f5949d4a3b..eb797195c374 100644
--- a/drivers/gpu/drm/xe/xe_vm_types.h
+++ b/drivers/gpu/drm/xe/xe_vm_types.h
@@ -394,6 +394,8 @@ struct xe_vm {
bool batch_invalidate_tlb;
/** @xef: XE file handle for tracking this VM's drm client */
struct xe_file *xef;
+ /** @svm_link: used to link this vm to xe_svm's vm_list*/
+ struct list_head svm_link;
};
#endif
--
2.26.3
next prev parent reply other threads:[~2024-04-09 20:05 UTC|newest]
Thread overview: 72+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-04-09 20:17 [v2 00/31] Basic system allocator support in xe driver Oak Zeng
2024-04-09 20:17 ` [v2 01/31] drm/xe: Refactor vm_bind Oak Zeng
2024-04-09 20:17 ` [v2 02/31] drm/xe/svm: Add SVM document Oak Zeng
2024-04-09 20:17 ` [v2 03/31] drm/xe: Invalidate userptr VMA on page pin fault Oak Zeng
2024-04-09 20:17 ` [v2 04/31] drm/xe: Drop unused arguments from vm_bind_ioctl_ops_parse Oak Zeng
2024-04-09 20:17 ` [v2 05/31] drm/xe: Fix op->tile_mask for fault mode Oak Zeng
2024-04-09 20:17 ` [v2 06/31] drm/xe/uapi: Add DRM_XE_VM_BIND_FLAG_SYSTEM_ALLOCATOR flag Oak Zeng
2024-04-09 20:17 ` [v2 07/31] drm/xe: Create userptr if page fault occurs on system_allocator VMA Oak Zeng
2024-04-09 20:17 ` [v2 08/31] drm/xe: Add faulted userptr VMA garbage collector Oak Zeng
2024-04-09 20:17 ` [v2 09/31] drm/xe: Introduce helper to populate userptr Oak Zeng
2024-04-09 20:17 ` [v2 10/31] drm/xe: Introduce a helper to free sg table Oak Zeng
2024-04-09 20:17 ` [v2 11/31] drm/xe: Use hmm_range_fault to populate user pages Oak Zeng
2024-04-09 20:17 ` [v2 12/31] drm/xe/svm: Remap and provide memmap backing for GPU vram Oak Zeng
2024-04-10 21:09 ` Matthew Brost
2024-04-16 19:01 ` Matthew Brost
2024-04-09 20:17 ` [v2 13/31] drm/xe/svm: Introduce DRM_XE_SVM kernel config Oak Zeng
2024-04-10 21:13 ` Matthew Brost
2024-06-04 18:57 ` Zeng, Oak
2024-04-09 20:17 ` [v2 14/31] drm/xe: Introduce helper to get tile from memory region Oak Zeng
2024-04-10 21:17 ` Matthew Brost
2024-04-09 20:17 ` [v2 15/31] drm/xe: Introduce a helper to get dpa from pfn Oak Zeng
2024-04-10 21:35 ` Matthew Brost
2024-04-09 20:17 ` [v2 16/31] drm/xe/svm: Get xe memory region from page Oak Zeng
2024-04-10 21:38 ` Matthew Brost
2024-04-09 20:17 ` [v2 17/31] drm/xe: Get xe_vma from xe_userptr Oak Zeng
2024-04-10 21:42 ` Matthew Brost
2024-04-09 20:17 ` [v2 18/31] drm/xe/svm: Build userptr sg table for device pages Oak Zeng
2024-04-10 21:52 ` Matthew Brost
2024-04-09 20:17 ` [v2 19/31] drm/xe/svm: Determine a vma is backed by device memory Oak Zeng
2024-04-10 21:56 ` Matthew Brost
2024-06-05 2:29 ` Zeng, Oak
2024-04-09 20:17 ` [v2 20/31] drm/xe: add xe lock document Oak Zeng
2024-04-09 20:17 ` [v2 21/31] drm/xe/svm: Introduce svm migration function Oak Zeng
2024-04-10 22:06 ` Matthew Brost
2024-04-09 20:17 ` [v2 22/31] drm/xe/svm: implement functions to allocate and free device memory Oak Zeng
2024-04-10 22:23 ` Matthew Brost
2024-04-15 20:13 ` Zeng, Oak
2024-04-15 21:19 ` Matthew Brost
2024-06-05 22:16 ` Zeng, Oak
2024-06-05 23:37 ` Matthew Brost
2024-06-06 3:30 ` Zeng, Oak
2024-06-06 4:44 ` Matthew Brost
2024-04-17 20:55 ` Matthew Brost
2024-04-09 20:17 ` [v2 23/31] drm/xe/svm: Trace buddy block allocation and free Oak Zeng
2024-04-09 20:17 ` [v2 24/31] drm/xe/svm: Create and destroy xe svm Oak Zeng
2024-04-10 22:25 ` Matthew Brost
2024-04-09 20:17 ` Oak Zeng [this message]
2024-04-09 20:17 ` [v2 26/31] drm/xe: Make function lookup_vma public Oak Zeng
2024-04-10 22:26 ` Matthew Brost
2024-04-09 20:17 ` [v2 27/31] drm/xe/svm: Handle CPU page fault Oak Zeng
2024-04-11 2:07 ` Matthew Brost
2024-04-12 17:24 ` Zeng, Oak
2024-04-12 18:10 ` Matthew Brost
2024-04-12 18:39 ` Zeng, Oak
2024-06-07 4:44 ` Zeng, Oak
2024-06-07 4:30 ` Zeng, Oak
2024-04-09 20:17 ` [v2 28/31] drm/xe/svm: Introduce helper to migrate vma to vram Oak Zeng
2024-04-11 2:49 ` Matthew Brost
2024-04-12 21:21 ` Zeng, Oak
2024-04-15 19:40 ` Matthew Brost
2024-06-07 17:12 ` Zeng, Oak
2024-06-07 17:56 ` Matthew Brost
2024-06-07 18:10 ` Matthew Brost
2024-04-09 20:17 ` [v2 29/31] drm/xe/svm: trace svm migration Oak Zeng
2024-04-09 20:17 ` [v2 30/31] drm/xe/svm: Add a helper to determine a vma is fault userptr Oak Zeng
2024-04-11 2:50 ` Matthew Brost
2024-04-09 20:17 ` [v2 31/31] drm/xe/svm: Migration from sram to vram for system allocator Oak Zeng
2024-04-11 2:55 ` Matthew Brost
2024-06-07 17:22 ` Zeng, Oak
2024-06-07 18:18 ` Matthew Brost
2024-06-07 18:23 ` Matthew Brost
2024-04-09 20:52 ` ✗ CI.Patch_applied: failure for Basic system allocator support in xe driver Patchwork
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240409201742.3042626-26-oak.zeng@intel.com \
--to=oak.zeng@intel.com \
--cc=Thomas.Hellstrom@linux.intel.com \
--cc=brian.welty@intel.com \
--cc=himal.prasad.ghimiray@intel.com \
--cc=intel-xe@lists.freedesktop.org \
--cc=krishnaiah.bommu@intel.com \
--cc=matthew.brost@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox