igt-dev.lists.freedesktop.org archive mirror
 help / color / mirror / Atom feed
From: nishit.sharma@intel.com
To: igt-dev@lists.freedesktop.org, nishit.sharma@intel.com
Subject: [PATCH i-g-t v2 3/7] tests/intel/xe_multi_gpusvm.c: Add SVM multi-GPU coherency test
Date: Tue,  4 Nov 2025 15:31:55 +0000	[thread overview]
Message-ID: <20251104153201.677938-4-nishit.sharma@intel.com> (raw)
In-Reply-To: <20251104153201.677938-1-nishit.sharma@intel.com>

From: Nishit Sharma <nishit.sharma@intel.com>

This test verifies memory coherency in a multi-GPU environment using SVM.
GPU 1 writes to a shared buffer, GPU 2 reads and checks for correct data
without explicit synchronization, and the test is repeated with CPU and
both GPUs to ensure consistent memory visibility across agents.

Signed-off-by: Nishit Sharma <nishit.sharma@intel.com>
---
 tests/intel/xe_multi_gpusvm.c | 222 +++++++++++++++++++++++++++++++++-
 1 file changed, 220 insertions(+), 2 deletions(-)

diff --git a/tests/intel/xe_multi_gpusvm.c b/tests/intel/xe_multi_gpusvm.c
index 71bf01ba8..2c64de209 100644
--- a/tests/intel/xe_multi_gpusvm.c
+++ b/tests/intel/xe_multi_gpusvm.c
@@ -34,8 +34,13 @@
  * SUBTEST: atomic-inc-gpu-op
  * Description:
  * 	This test does atomic operation in multi-gpu by executing atomic
- *	operation on GPU1 and then atomic operation on GPU2 using same
- *	adress
+ * 	operation on GPU1 and then atomic operation on GPU2 using same
+ * 	adress
+ *
+ * SUBTEST: coherency-multi-gpu
+ * Description:
+ * 	This test checks coherency in multi-gpu by writing from GPU0
+ * 	reading from GPU1 and verify and repeating with CPU and both GPUs
  */
 
 #define MAX_XE_REGIONS	8
@@ -94,6 +99,35 @@ atomic_batch_init(int fd, uint32_t vm, uint64_t src_addr,
 	*addr = batch_addr;
 }
 
+static void
+store_dword_batch_init(int fd, uint32_t vm, uint64_t src_addr,
+                       uint32_t *bo, uint64_t *addr, int value)
+{
+        uint32_t batch_bo_size = BATCH_SIZE(fd);
+        uint32_t batch_bo;
+        uint64_t batch_addr;
+        void *batch;
+        uint32_t *cmd;
+        int i = 0;
+
+        batch_bo = xe_bo_create(fd, vm, batch_bo_size, vram_if_possible(fd, 0), 0);
+        batch = xe_bo_map(fd, batch_bo, batch_bo_size);
+        cmd = (uint32_t *) batch;
+
+        cmd[i++] = MI_STORE_DWORD_IMM_GEN4;
+        cmd[i++] = src_addr;
+        cmd[i++] = src_addr >> 32;
+        cmd[i++] = value;
+        cmd[i++] = MI_BATCH_BUFFER_END;
+
+        batch_addr = to_user_pointer(batch);
+
+        /* Punch a gap in the SVM map where we map the batch_bo */
+        xe_vm_bind_lr_sync(fd, vm, batch_bo, 0, batch_addr, batch_bo_size, 0);
+        *bo = batch_bo;
+        *addr = batch_addr;
+}
+
 static void batch_init(int fd, uint32_t vm, uint64_t src_addr,
 		       uint64_t dst_addr, uint64_t copy_size,
 		       uint32_t *bo, uint64_t *addr)
@@ -265,6 +299,172 @@ gpu_mem_access(struct xe_svm_gpu_info *src_gpu,
 	copy_src_dst(src_gpu, dst_gpu, eci);
 }
 
+static void
+coherency_test_multigpu(struct xe_svm_gpu_info *gpu0,
+			struct xe_svm_gpu_info *gpu1,
+			struct drm_xe_engine_class_instance *eci,
+			bool coh_fail_set,
+			bool prefetch_req)
+{
+        uint64_t addr;
+        uint32_t vm[2];
+        uint32_t exec_queue[2];
+        uint32_t batch_bo, batch1_bo[2];
+        uint64_t batch_addr, batch1_addr[2];
+        struct drm_xe_sync sync = {};
+        volatile uint64_t *sync_addr;
+        int value = 60;
+	uint64_t *data1;
+	void *copy_dst;
+
+        vm[0] = xe_vm_create(gpu0->fd, DRM_XE_VM_CREATE_FLAG_LR_MODE | DRM_XE_VM_CREATE_FLAG_FAULT_MODE, 0);
+        exec_queue[0] = xe_exec_queue_create(gpu0->fd, vm[0], eci, 0);
+        xe_vm_bind_lr_sync(gpu0->fd, vm[0], 0, 0, 0, 1ull <<  gpu0->va_bits, DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR);
+
+        vm[1] = xe_vm_create(gpu1->fd, DRM_XE_VM_CREATE_FLAG_LR_MODE | DRM_XE_VM_CREATE_FLAG_FAULT_MODE, 0);
+        exec_queue[1] = xe_exec_queue_create(gpu1->fd, vm[1], eci, 0);
+        xe_vm_bind_lr_sync(gpu1->fd, vm[1], 0, 0, 0, 1ull <<  gpu1->va_bits, DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR);
+
+        data1 = aligned_alloc(SZ_2M, SZ_4K);
+       	igt_assert(data1);
+	addr = to_user_pointer(data1);
+
+	copy_dst = aligned_alloc(SZ_2M, SZ_4K);
+	igt_assert(copy_dst);
+
+        store_dword_batch_init(gpu0->fd, vm[0], addr, &batch_bo, &batch_addr, value);
+
+        /* Place destination in GPU0 local memory location to test */
+        xe_vm_madvise(gpu0->fd, vm[0], addr, SZ_4K, 0,
+                      DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC,
+                      gpu0->fd, 0, gpu0->vram_regions[0]);
+
+        sync_addr = malloc(sizeof(*sync_addr));
+        igt_assert(!!sync_addr);
+        sync.flags = DRM_XE_SYNC_FLAG_SIGNAL;
+        sync.type = DRM_XE_SYNC_TYPE_USER_FENCE;
+        sync.addr = to_user_pointer((uint64_t *)sync_addr);
+        sync.timeline_value = BIND_SYNC_VAL;
+        *sync_addr = 0;
+
+	/* prefetch full buffer for GPU0 */
+	if (prefetch_req) {
+		xe_vm_prefetch_async(gpu0->fd, vm[0], 0, 0, addr, SZ_4K, &sync, 1,
+				     DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC);
+		if (*sync_addr != BIND_SYNC_VAL)
+			xe_wait_ufence(gpu0->fd, (uint64_t *)sync_addr, BIND_SYNC_VAL, exec_queue[0],
+				       NSEC_PER_SEC * 10);
+	}
+        free((void *)sync_addr);
+
+        sync_addr = (void *)((char *)batch_addr + SZ_4K);
+        sync.addr = to_user_pointer((uint64_t *)sync_addr);
+        sync.timeline_value = EXEC_SYNC_VAL;
+        *sync_addr = 0;
+
+        /* Execute STORE command on GPU0 */
+        xe_exec_sync(gpu0->fd, exec_queue[0], batch_addr, &sync, 1);
+        if (*sync_addr != EXEC_SYNC_VAL)
+                xe_wait_ufence(gpu0->fd, (uint64_t *)sync_addr, EXEC_SYNC_VAL, exec_queue[0],
+			       NSEC_PER_SEC * 10);
+
+        igt_assert_eq(*(uint64_t *)addr, value);
+
+	/* Creating batch for GPU1 using addr as Src which have value from GPU0 */
+	batch_init(gpu1->fd, vm[1], addr, to_user_pointer(copy_dst),
+			SZ_4K, &batch_bo, &batch_addr);
+
+        /* Place destination in GPU1 local memory location to test */
+        xe_vm_madvise(gpu1->fd, vm[1], addr, SZ_4K, 0,
+                      DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC,
+                      gpu1->fd, 0, gpu1->vram_regions[0]);
+
+        sync_addr = malloc(sizeof(*sync_addr));
+        igt_assert(!!sync_addr);
+        sync.flags = DRM_XE_SYNC_FLAG_SIGNAL;
+        sync.type = DRM_XE_SYNC_TYPE_USER_FENCE;
+        sync.addr = to_user_pointer((uint64_t *)sync_addr);
+        sync.timeline_value = BIND_SYNC_VAL;
+        *sync_addr = 0;
+
+	/* prefetch full buffer for GPU1 */
+	if (prefetch_req) {
+		xe_vm_prefetch_async(gpu1->fd, vm[1], 0, 0, addr,
+				     SZ_4K, &sync, 1,
+				     DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC);
+		if (*sync_addr != BIND_SYNC_VAL)
+			xe_wait_ufence(gpu1->fd, (uint64_t *)sync_addr, BIND_SYNC_VAL, exec_queue[1],
+				       NSEC_PER_SEC * 10);
+	}
+        free((void *)sync_addr);
+
+        sync_addr = (void *)((char *)batch_addr + SZ_4K);
+        sync.addr = to_user_pointer((uint64_t *)sync_addr);
+        sync.timeline_value = EXEC_SYNC_VAL;
+        *sync_addr = 0;
+
+        /* Execute COPY command on GPU1 */
+        xe_exec_sync(gpu1->fd, exec_queue[1], batch_addr, &sync, 1);
+        if (*sync_addr != EXEC_SYNC_VAL)
+                xe_wait_ufence(gpu1->fd, (uint64_t *)sync_addr, EXEC_SYNC_VAL, exec_queue[1],
+			       NSEC_PER_SEC * 10);
+
+        igt_assert_eq(*(uint64_t *)copy_dst, value);
+
+        /* CPU writes 10, memset set bytes no integer hence memset fills 4 bytes with 0x0A */
+        memset((void *)(uintptr_t)addr, 10, sizeof(int));
+        igt_assert_eq(*(uint64_t *)addr, 0x0A0A0A0A);
+
+	/* Coherency fail scenario */
+        store_dword_batch_init(gpu0->fd, vm[0], addr, &batch1_bo[0], &batch1_addr[0], value + 10);
+        store_dword_batch_init(gpu1->fd, vm[1], addr, &batch1_bo[1], &batch1_addr[1], value + 20);
+
+	if (coh_fail_set) {
+		igt_info("coherency fail impl\n");
+
+		sync_addr = (void *)((char *)batch1_addr[0] + SZ_4K);
+		sync.addr = to_user_pointer((uint64_t *)sync_addr);
+		sync.timeline_value = EXEC_SYNC_VAL;
+		*sync_addr = 0;
+
+		/* Execute STORE command on GPU1 */
+		xe_exec_sync(gpu0->fd, exec_queue[0], batch1_addr[0], &sync, 1);
+		if (*sync_addr != EXEC_SYNC_VAL)
+			xe_wait_ufence(gpu0->fd, (uint64_t *)sync_addr, EXEC_SYNC_VAL, exec_queue[0],
+					NSEC_PER_SEC * 10);
+
+		sync_addr = (void *)((char *)batch1_addr[1] + SZ_4K);
+		sync.addr = to_user_pointer((uint64_t *)sync_addr);
+		sync.timeline_value = EXEC_SYNC_VAL;
+		*sync_addr = 0;
+
+		/* Execute STORE command on GPU2 */
+		xe_exec_sync(gpu1->fd, exec_queue[1], batch1_addr[1], &sync, 1);
+		if (*sync_addr != EXEC_SYNC_VAL)
+			xe_wait_ufence(gpu1->fd, (uint64_t *)sync_addr, EXEC_SYNC_VAL, exec_queue[1],
+					NSEC_PER_SEC * 10);
+
+		igt_assert_f(*(uint64_t *)addr == (value + 10),
+				"GPU2 has overwritten value at addr\n");
+	}
+
+        /* CPU writes 11, memset set bytes no integer hence memset fills 4 bytes with 0x0B */
+        memset((void *)(uintptr_t)addr, 11, sizeof(int));
+        igt_assert_eq(*(uint64_t *)addr, 0x0B0B0B0B);
+
+        munmap((void *)batch_addr, BATCH_SIZE(gpu0->fd));
+        batch_fini(gpu0->fd, vm[0], batch_bo, batch_addr);
+        free(data1);
+
+        xe_vm_unbind_lr_sync(gpu0->fd, vm[0], 0, 0, 1ull << gpu0->va_bits);
+        xe_exec_queue_destroy(gpu0->fd, exec_queue[0]);
+        xe_vm_destroy(gpu0->fd, vm[0]);
+
+        xe_vm_unbind_lr_sync(gpu1->fd, vm[1], 0, 0, 1ull << gpu1->va_bits);
+        xe_exec_queue_destroy(gpu1->fd, exec_queue[1]);
+        xe_vm_destroy(gpu1->fd, vm[1]);
+}
+
 static void
 atomic_inc_op(struct xe_svm_gpu_info *gpu0,
 	      struct xe_svm_gpu_info *gpu1,
@@ -403,6 +603,19 @@ gpu_atomic_inc(struct xe_svm_gpu_info *src_gpu,
 	atomic_inc_op(src_gpu, dst_gpu, eci, prefetch_req);
 }
 
+static void
+gpu_coherecy_test(struct xe_svm_gpu_info *src_gpu,
+		  struct xe_svm_gpu_info *dst_gpu,
+		  struct drm_xe_engine_class_instance *eci,
+		  bool coh_fail,
+		  bool prefetch_req)
+{
+	igt_assert(src_gpu);
+	igt_assert(dst_gpu);
+
+        coherency_test_multigpu(src_gpu, dst_gpu, eci, coh_fail, prefetch_req);
+}
+
 igt_main
 {
 	struct xe_svm_gpu_info gpus[MAX_XE_GPUS];
@@ -442,6 +655,11 @@ igt_main
 		gpu_atomic_inc(&gpus[0], &gpus[1], &eci, 0);
 	}
 
+	igt_subtest("coherency-multi-gpu") {
+		gpu_coherecy_test(&gpus[0], &gpus[1], &eci, 0, 1);
+		gpu_coherecy_test(&gpus[0], &gpus[1], &eci, 1, 0);
+	}
+
 	igt_fixture {
 		int cnt;
 
-- 
2.48.1


  parent reply	other threads:[~2025-11-04 15:32 UTC|newest]

Thread overview: 21+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-11-04 15:31 [PATCH i-g-t v2 0/7] Madvise feature in SVM for Multi-GPU configs nishit.sharma
2025-11-04 15:31 ` [PATCH i-g-t v2 1/7] tests/intel/xe_multi_gpusvm.c: Add SVM multi-GPU cross-GPU memory access test nishit.sharma
2025-11-04 15:31 ` [PATCH i-g-t v2 2/7] tests/intel/xe_multi_gpusvm.c: Add SVM multi-GPU atomic operations nishit.sharma
2025-11-04 15:31 ` nishit.sharma [this message]
2025-11-04 15:31 ` [PATCH i-g-t v2 4/7] tests/intel/xe_multi_gpusvm.c: Add SVM multi-GPU performance test nishit.sharma
2025-11-04 15:31 ` [PATCH i-g-t v2 5/7] tests/intel/xe_multi_gpusvm.c: Add SVM multi-GPU fault handling test nishit.sharma
2025-11-04 15:31 ` [PATCH i-g-t v2 6/7] tests/intel/xe_multi_gpusvm.c: Add SVM multi-GPU simultaneous access test nishit.sharma
2025-11-10  3:50 ` [PATCH i-g-t v4 8/8] tests/intel/xe_multi-gpusvm.c: Add SVM multi-GPU migration test Nishit Sharma
2025-11-10  3:59 ` Nishit Sharma
2025-11-10  4:02 ` Nishit Sharma
2025-11-13 17:00 ` [PATCH i-g-t v7 00/10] Madvise feature in SVM for Multi-GPU configs Nishit Sharma
2025-11-13 17:00   ` [PATCH i-g-t v7 01/10] lib/xe: Add instance parameter to xe_vm_madvise and introduce lr_sync helpers Nishit Sharma
2025-11-13 17:00   ` [PATCH i-g-t v7 02/10] tests/intel/xe_exec_system_allocator: Add parameter in madvise call Nishit Sharma
2025-11-13 17:00   ` [PATCH i-g-t v7 03/10] tests/intel/xe_multi_gpusvm: Add SVM multi-GPU cross-GPU memory access test Nishit Sharma
2025-11-13 17:00   ` [PATCH i-g-t v7 04/10] tests/intel/xe_multi_gpusvm: Add SVM multi-GPU atomic operations Nishit Sharma
2025-11-13 17:00   ` [PATCH i-g-t v7 05/10] tests/intel/xe_multi_gpusvm: Add SVM multi-GPU coherency test Nishit Sharma
2025-11-13 17:00   ` [PATCH i-g-t v7 06/10] tests/intel/xe_multi_gpusvm: Add SVM multi-GPU performance test Nishit Sharma
2025-11-13 17:00   ` [PATCH i-g-t v7 07/10] tests/intel/xe_multi_gpusvm: Add SVM multi-GPU fault handling test Nishit Sharma
2025-11-13 17:00   ` [PATCH i-g-t v7 08/10] tests/intel/xe_multi_gpusvm: Add SVM multi-GPU simultaneous access test Nishit Sharma
2025-11-13 17:00   ` [PATCH i-g-t v7 09/10] tests/intel/xe_multi_gpusvm: Add SVM multi-GPU conflicting madvise test Nishit Sharma
2025-11-13 17:00   ` [PATCH i-g-t v7 10/10] tests/intel/xe_multi-gpusvm: Add SVM multi-GPU migration test Nishit Sharma

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20251104153201.677938-4-nishit.sharma@intel.com \
    --to=nishit.sharma@intel.com \
    --cc=igt-dev@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).