From: Nishit Sharma <nishit.sharma@intel.com>
To: igt-dev@lists.freedesktop.org
Subject: [PATCH i-g-t v4 8/8] tests/intel/xe_multi-gpusvm.c: Add SVM multi-GPU migration test
Date: Mon, 10 Nov 2025 04:02:04 +0000 [thread overview]
Message-ID: <20251110040220.223836-1-nishit.sharma@intel.com> (raw)
In-Reply-To: <20251104153201.677938-1-nishit.sharma@intel.com>
This test allocates a buffer in SVM, accesses it from GPU 1, then GPU 2
and then the CPU. It verifies that the buffer migrates correctly between
devices and remains accessible across all agents in a multi-GPU environment.
---
tests/intel/xe_multi_gpusvm.c | 197 ++++++++++++++++++++++++++++++++++
tests/intel/xe_multisvm.c | 2 +
2 files changed, 199 insertions(+)
diff --git a/tests/intel/xe_multi_gpusvm.c b/tests/intel/xe_multi_gpusvm.c
index a41c8b7ad..28d47f274 100644
--- a/tests/intel/xe_multi_gpusvm.c
+++ b/tests/intel/xe_multi_gpusvm.c
@@ -64,6 +64,11 @@
* Description:
* This tests aunches simultaneous workloads on both GPUs accessing the
* same SVM buffer synchronizes with fences, and verifies data integrity
+ *
+ * SUBTEST: migrate-test-multi-gpu
+ * Description:
+ * This test allocates an SVM buffer, accesses it from GPU 1, GPU 2, and CPU,
+ * and verifies migration and accessibility between devices
*/
#define MAX_XE_REGIONS 8
@@ -1255,6 +1260,181 @@ multigpu_access_test(struct xe_svm_gpu_info *gpu0,
xe_vm_destroy(gpu1->fd, vm[1]);
}
+static void
+multigpu_migrate_test(struct xe_svm_gpu_info *gpu0,
+ struct xe_svm_gpu_info *gpu1,
+ struct drm_xe_engine_class_instance *eci,
+ bool prefetch_req)
+{
+ uint64_t addr;
+ uint32_t vm[2];
+ uint32_t exec_queue[2];
+ uint32_t batch_bo, batch1_bo[2];
+ uint64_t batch_addr, batch1_addr[2];
+ struct drm_xe_sync sync = {};
+ volatile uint64_t *sync_addr;
+ int value = 60;
+ uint64_t *data1;
+ void *copy_dst;
+
+ vm[0] = xe_vm_create(gpu0->fd, DRM_XE_VM_CREATE_FLAG_LR_MODE | DRM_XE_VM_CREATE_FLAG_FAULT_MODE, 0);
+ exec_queue[0] = xe_exec_queue_create(gpu0->fd, vm[0], eci, 0);
+ xe_vm_bind_lr_sync(gpu0->fd, vm[0], 0, 0, 0, 1ull << gpu0->va_bits, DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR);
+
+ vm[1] = xe_vm_create(gpu1->fd, DRM_XE_VM_CREATE_FLAG_LR_MODE | DRM_XE_VM_CREATE_FLAG_FAULT_MODE, 0);
+ exec_queue[1] = xe_exec_queue_create(gpu1->fd, vm[1], eci, 0);
+ xe_vm_bind_lr_sync(gpu1->fd, vm[1], 0, 0, 0, 1ull << gpu1->va_bits, DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR);
+
+ data1 = aligned_alloc(SZ_2M, SZ_4K);
+ igt_assert(data1);
+ addr = to_user_pointer(data1);
+
+ copy_dst = aligned_alloc(SZ_2M, SZ_4K);
+ igt_assert(copy_dst);
+
+ xe_vm_madvise(gpu0->fd, vm[0], addr, SZ_4K, 0,
+ DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC,
+ DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM, 0, 0);
+
+ store_dword_batch_init(gpu0->fd, vm[0], addr, &batch1_bo[0], &batch1_addr[0], value);
+
+ /* Place destination in GPU0 local memory location to test */
+ xe_vm_madvise(gpu0->fd, vm[0], addr, SZ_4K, 0,
+ DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC,
+ gpu0->fd, 0, gpu0->vram_regions[0]);
+
+ sync_addr = malloc(sizeof(*sync_addr));
+ igt_assert(!!sync_addr);
+ sync.flags = DRM_XE_SYNC_FLAG_SIGNAL;
+ sync.type = DRM_XE_SYNC_TYPE_USER_FENCE;
+ sync.addr = to_user_pointer((uint64_t *)sync_addr);
+ sync.timeline_value = BIND_SYNC_VAL;
+ *sync_addr = 0;
+
+ /* prefetch full buffer for GPU0 */
+ if (prefetch_req) {
+ xe_vm_prefetch_async(gpu0->fd, vm[0], 0, 0, addr, SZ_4K, &sync, 1,
+ DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC);
+ if (*sync_addr != BIND_SYNC_VAL)
+ xe_wait_ufence(gpu0->fd, (uint64_t *)sync_addr, BIND_SYNC_VAL, exec_queue[0],
+ NSEC_PER_SEC * 10);
+ }
+ free((void *)sync_addr);
+
+ sync_addr = (void *)((char *)batch1_addr[0] + SZ_4K);
+ sync.addr = to_user_pointer((uint64_t *)sync_addr);
+ sync.timeline_value = EXEC_SYNC_VAL;
+ *sync_addr = 0;
+
+ /* Execute STORE command on GPU0 */
+ xe_exec_sync(gpu0->fd, exec_queue[0], batch1_addr[0], &sync, 1);
+ if (*sync_addr != EXEC_SYNC_VAL)
+ xe_wait_ufence(gpu0->fd, (uint64_t *)sync_addr, EXEC_SYNC_VAL, exec_queue[0],
+ NSEC_PER_SEC * 10);
+
+ igt_assert_eq(*(uint64_t *)addr, value);
+
+ /* Creating batch for GPU1 using addr as Src which have value from GPU0 */
+ store_dword_batch_init(gpu1->fd, vm[1], addr, &batch1_bo[1], &batch1_addr[1], value + 10);
+
+ /* Place destination in GPU1 local memory location to test */
+ xe_vm_madvise(gpu1->fd, vm[1], addr, SZ_4K, 0,
+ DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC,
+ gpu1->fd, 0, gpu1->vram_regions[0]);
+
+ sync_addr = malloc(sizeof(*sync_addr));
+ igt_assert(!!sync_addr);
+ sync.flags = DRM_XE_SYNC_FLAG_SIGNAL;
+ sync.type = DRM_XE_SYNC_TYPE_USER_FENCE;
+ sync.addr = to_user_pointer((uint64_t *)sync_addr);
+ sync.timeline_value = BIND_SYNC_VAL;
+ *sync_addr = 0;
+
+ /* prefetch full buffer for GPU1 */
+ if (prefetch_req) {
+ xe_vm_prefetch_async(gpu1->fd, vm[1], 0, 0, addr,
+ SZ_4K, &sync, 1,
+ DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC);
+ if (*sync_addr != BIND_SYNC_VAL)
+ xe_wait_ufence(gpu1->fd, (uint64_t *)sync_addr, BIND_SYNC_VAL, exec_queue[1],
+ NSEC_PER_SEC * 10);
+ }
+ free((void *)sync_addr);
+
+ sync_addr = (void *)((char *)batch1_addr[1] + SZ_4K);
+ sync.addr = to_user_pointer((uint64_t *)sync_addr);
+ sync.timeline_value = EXEC_SYNC_VAL;
+ *sync_addr = 0;
+
+ /* Execute COPY command on GPU1 */
+ xe_exec_sync(gpu1->fd, exec_queue[1], batch1_addr[1], &sync, 1);
+ if (*sync_addr != EXEC_SYNC_VAL)
+ xe_wait_ufence(gpu1->fd, (uint64_t *)sync_addr, EXEC_SYNC_VAL, exec_queue[1],
+ NSEC_PER_SEC * 10);
+
+ igt_assert_eq(*(uint64_t *)addr, value + 10);
+
+ /* CPU writes 10, memset set bytes no integer hence memset fills 4 bytes with 0x0A */
+ memset((void *)(uintptr_t)addr, 10, sizeof(int));
+ igt_assert_eq(*(uint64_t *)addr, 0x0A0A0A0A);
+
+ /* Creating batch for GPU1 using addr as Src which have value from GPU0 */
+ batch_init(gpu1->fd, vm[1], addr, to_user_pointer(copy_dst),
+ SZ_4K, &batch1_bo[1], &batch1_addr[1]);
+
+ /* Place destination in GPU1 local memory location to test */
+ xe_vm_madvise(gpu1->fd, vm[1], addr, SZ_4K, 0,
+ DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC,
+ gpu1->fd, 0, gpu1->vram_regions[0]);
+
+ sync_addr = malloc(sizeof(*sync_addr));
+ igt_assert(!!sync_addr);
+ sync.flags = DRM_XE_SYNC_FLAG_SIGNAL;
+ sync.type = DRM_XE_SYNC_TYPE_USER_FENCE;
+ sync.addr = to_user_pointer((uint64_t *)sync_addr);
+ sync.timeline_value = BIND_SYNC_VAL;
+ *sync_addr = 0;
+
+ /* prefetch full buffer for GPU1 */
+ if (prefetch_req) {
+ xe_vm_prefetch_async(gpu1->fd, vm[1], 0, 0, addr,
+ SZ_4K, &sync, 1,
+ DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC);
+ if (*sync_addr != BIND_SYNC_VAL)
+ xe_wait_ufence(gpu1->fd, (uint64_t *)sync_addr, BIND_SYNC_VAL, exec_queue[1],
+ NSEC_PER_SEC * 10);
+ }
+ free((void *)sync_addr);
+
+ sync_addr = (void *)((char *)batch1_addr[1] + SZ_4K);
+ sync.addr = to_user_pointer((uint64_t *)sync_addr);
+ sync.timeline_value = EXEC_SYNC_VAL;
+ *sync_addr = 0;
+
+ /* Execute COPY command on GPU1 */
+ xe_exec_sync(gpu1->fd, exec_queue[1], batch1_addr[1], &sync, 1);
+ if (*sync_addr != EXEC_SYNC_VAL)
+ xe_wait_ufence(gpu1->fd, (uint64_t *)sync_addr, EXEC_SYNC_VAL, exec_queue[1],
+ NSEC_PER_SEC * 10);
+
+ igt_assert_eq(*(uint64_t *)copy_dst, 0x0A0A0A0A);
+
+ munmap((void *)batch1_addr[0], BATCH_SIZE(gpu0->fd));
+ munmap((void *)batch1_addr[1], BATCH_SIZE(gpu1->fd));
+ batch_fini(gpu0->fd, vm[0], batch1_bo[0], batch1_addr[0]);
+ batch_fini(gpu1->fd, vm[1], batch1_bo[1], batch1_addr[1]);
+ free(data1);
+ free(copy_dst);
+
+ xe_vm_unbind_lr_sync(gpu0->fd, vm[0], 0, 0, 1ull << gpu0->va_bits);
+ xe_exec_queue_destroy(gpu0->fd, exec_queue[0]);
+ xe_vm_destroy(gpu0->fd, vm[0]);
+
+ xe_vm_unbind_lr_sync(gpu1->fd, vm[1], 0, 0, 1ull << gpu1->va_bits);
+ xe_exec_queue_destroy(gpu1->fd, exec_queue[1]);
+ xe_vm_destroy(gpu1->fd, vm[1]);
+}
+
static void
gpu_atomic_inc(struct xe_svm_gpu_info *src_gpu,
struct xe_svm_gpu_info *dst_gpu,
@@ -1328,6 +1508,18 @@ gpu_access_test(struct xe_svm_gpu_info *src_gpu,
multigpu_access_test(src_gpu, dst_gpu, eci, no_prefetch);
}
+static void
+gpu_migrate_test(struct xe_svm_gpu_info *src_gpu,
+ struct xe_svm_gpu_info *dst_gpu,
+ struct drm_xe_engine_class_instance *eci,
+ bool no_prefetch)
+{
+ igt_assert(src_gpu);
+ igt_assert(dst_gpu);
+
+ multigpu_migrate_test(src_gpu, dst_gpu, eci, no_prefetch);
+}
+
igt_main
{
struct xe_svm_gpu_info gpus[MAX_XE_GPUS];
@@ -1390,6 +1582,11 @@ igt_main
gpu_access_test(&gpus[0], &gpus[1], &eci, 1);
}
+ igt_subtest("migrate-test-multi-gpu") {
+ gpu_migrate_test(&gpus[0], &gpus[1], &eci, 0);
+ gpu_migrate_test(&gpus[0], &gpus[1], &eci, 1);
+ }
+
igt_fixture {
int cnt;
diff --git a/tests/intel/xe_multisvm.c b/tests/intel/xe_multisvm.c
index a57b3d62a..7bb41c62f 100644
--- a/tests/intel/xe_multisvm.c
+++ b/tests/intel/xe_multisvm.c
@@ -47,6 +47,7 @@ struct xe_svm_gpu_info {
int fd;
};
+#if 0
static void xe_vm_bind_lr_sync(int fd, uint32_t vm, uint32_t bo, uint64_t offset,
uint64_t addr, uint64_t size, uint32_t flags)
{
@@ -84,6 +85,7 @@ static void xe_vm_unbind_lr_sync(int fd, uint32_t vm, uint64_t offset,
xe_wait_ufence(fd, (uint64_t *)sync_addr, BIND_SYNC_VAL, 0, NSEC_PER_SEC * 10);
free((void *)sync_addr);
}
+#endif
static void batch_init(int fd, uint32_t vm, uint64_t src_addr, uint64_t dst_addr,
uint64_t copy_size, uint32_t *bo, uint64_t *addr)
--
2.48.1
next prev parent reply other threads:[~2025-11-10 4:02 UTC|newest]
Thread overview: 21+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-11-04 15:31 [PATCH i-g-t v2 0/7] Madvise feature in SVM for Multi-GPU configs nishit.sharma
2025-11-04 15:31 ` [PATCH i-g-t v2 1/7] tests/intel/xe_multi_gpusvm.c: Add SVM multi-GPU cross-GPU memory access test nishit.sharma
2025-11-04 15:31 ` [PATCH i-g-t v2 2/7] tests/intel/xe_multi_gpusvm.c: Add SVM multi-GPU atomic operations nishit.sharma
2025-11-04 15:31 ` [PATCH i-g-t v2 3/7] tests/intel/xe_multi_gpusvm.c: Add SVM multi-GPU coherency test nishit.sharma
2025-11-04 15:31 ` [PATCH i-g-t v2 4/7] tests/intel/xe_multi_gpusvm.c: Add SVM multi-GPU performance test nishit.sharma
2025-11-04 15:31 ` [PATCH i-g-t v2 5/7] tests/intel/xe_multi_gpusvm.c: Add SVM multi-GPU fault handling test nishit.sharma
2025-11-04 15:31 ` [PATCH i-g-t v2 6/7] tests/intel/xe_multi_gpusvm.c: Add SVM multi-GPU simultaneous access test nishit.sharma
2025-11-10 3:50 ` [PATCH i-g-t v4 8/8] tests/intel/xe_multi-gpusvm.c: Add SVM multi-GPU migration test Nishit Sharma
2025-11-10 3:59 ` Nishit Sharma
2025-11-10 4:02 ` Nishit Sharma [this message]
2025-11-13 17:00 ` [PATCH i-g-t v7 00/10] Madvise feature in SVM for Multi-GPU configs Nishit Sharma
2025-11-13 17:00 ` [PATCH i-g-t v7 01/10] lib/xe: Add instance parameter to xe_vm_madvise and introduce lr_sync helpers Nishit Sharma
2025-11-13 17:00 ` [PATCH i-g-t v7 02/10] tests/intel/xe_exec_system_allocator: Add parameter in madvise call Nishit Sharma
2025-11-13 17:00 ` [PATCH i-g-t v7 03/10] tests/intel/xe_multi_gpusvm: Add SVM multi-GPU cross-GPU memory access test Nishit Sharma
2025-11-13 17:00 ` [PATCH i-g-t v7 04/10] tests/intel/xe_multi_gpusvm: Add SVM multi-GPU atomic operations Nishit Sharma
2025-11-13 17:00 ` [PATCH i-g-t v7 05/10] tests/intel/xe_multi_gpusvm: Add SVM multi-GPU coherency test Nishit Sharma
2025-11-13 17:00 ` [PATCH i-g-t v7 06/10] tests/intel/xe_multi_gpusvm: Add SVM multi-GPU performance test Nishit Sharma
2025-11-13 17:00 ` [PATCH i-g-t v7 07/10] tests/intel/xe_multi_gpusvm: Add SVM multi-GPU fault handling test Nishit Sharma
2025-11-13 17:00 ` [PATCH i-g-t v7 08/10] tests/intel/xe_multi_gpusvm: Add SVM multi-GPU simultaneous access test Nishit Sharma
2025-11-13 17:00 ` [PATCH i-g-t v7 09/10] tests/intel/xe_multi_gpusvm: Add SVM multi-GPU conflicting madvise test Nishit Sharma
2025-11-13 17:00 ` [PATCH i-g-t v7 10/10] tests/intel/xe_multi-gpusvm: Add SVM multi-GPU migration test Nishit Sharma
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251110040220.223836-1-nishit.sharma@intel.com \
--to=nishit.sharma@intel.com \
--cc=igt-dev@lists.freedesktop.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).