From: nishit.sharma@intel.com
To: igt-dev@lists.freedesktop.org
Subject: [PATCH i-g-t v7 09/10] tests/intel/xe_multi_gpusvm: Add SVM multi-GPU conflicting madvise test
Date: Thu, 13 Nov 2025 17:16:20 +0000 [thread overview]
Message-ID: <20251113171621.635811-10-nishit.sharma@intel.com> (raw)
In-Reply-To: <20251113171621.635811-1-nishit.sharma@intel.com>
From: Nishit Sharma <nishit.sharma@intel.com>
This test calls madvise operations on GPU0 with the preferred location set
to GPU1 and vice versa. It reports conflicts when conflicting memory advice
is given for shared SVM buffers in a multi-GPU environment.
Signed-off-by: Nishit Sharma <nishit.sharma@intel.com>
---
tests/intel/xe_multi_gpusvm.c | 143 ++++++++++++++++++++++++++++++++++
1 file changed, 143 insertions(+)
diff --git a/tests/intel/xe_multi_gpusvm.c b/tests/intel/xe_multi_gpusvm.c
index dc2a8f9c8..afbf010e6 100644
--- a/tests/intel/xe_multi_gpusvm.c
+++ b/tests/intel/xe_multi_gpusvm.c
@@ -59,6 +59,11 @@
* Description:
* This tests aunches simultaneous workloads on both GPUs accessing the
* same SVM buffer synchronizes with fences, and verifies data integrity
+ *
+ * SUBTEST: conflicting-madvise-gpu
+ * Description:
+ * This test checks conflicting madvise by allocating shared buffer
+ * prefetches from both and checks for migration conflicts
*/
#define MAX_XE_REGIONS 8
@@ -69,6 +74,8 @@
#define EXEC_SYNC_VAL 0x676767
#define COPY_SIZE SZ_64M
#define ATOMIC_OP_VAL 56
+#define USER_FENCE_VALUE 0xdeadbeefdeadbeefull
+#define FIVE_SEC (5LL * NSEC_PER_SEC)
struct xe_svm_gpu_info {
bool supports_faults;
@@ -136,6 +143,11 @@ static void gpu_simult_test_wrapper(struct xe_svm_gpu_info *src,
struct drm_xe_engine_class_instance *eci,
void *extra_args);
+static void gpu_conflict_test_wrapper(struct xe_svm_gpu_info *src,
+ struct xe_svm_gpu_info *dst,
+ struct drm_xe_engine_class_instance *eci,
+ void *extra_args);
+
static void
create_vm_and_queue(struct xe_svm_gpu_info *gpu, struct drm_xe_engine_class_instance *eci,
uint32_t *vm, uint32_t *exec_queue)
@@ -798,6 +810,116 @@ pagefault_test_multigpu(struct xe_svm_gpu_info *gpu0,
cleanup_vm_and_queue(gpu1, vm[1], exec_queue[1]);
}
+#define XE_BO_FLAG_SYSTEM BIT(1)
+#define XE_BO_FLAG_CPU_ADDR_MIRROR BIT(24)
+
+static void
+conflicting_madvise(struct xe_svm_gpu_info *gpu0,
+ struct xe_svm_gpu_info *gpu1,
+ struct drm_xe_engine_class_instance *eci,
+ bool no_prefetch)
+{
+ uint64_t addr;
+ uint32_t vm[2];
+ uint32_t exec_queue[2];
+ uint32_t batch_bo[2];
+ void *data;
+ uint64_t batch_addr[2];
+ struct drm_xe_sync sync[2] = {};
+ volatile uint64_t *sync_addr[2];
+ int local_fd;
+ uint16_t local_vram;
+
+ create_vm_and_queue(gpu0, eci, &vm[0], &exec_queue[0]);
+ create_vm_and_queue(gpu1, eci, &vm[1], &exec_queue[1]);
+
+ data = aligned_alloc(SZ_2M, SZ_4K);
+ igt_assert(data);
+ addr = to_user_pointer(data);
+
+ xe_vm_madvise(gpu0->fd, vm[0], addr, SZ_4K, 0,
+ DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC,
+ DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM, 0, 0);
+
+ store_dword_batch_init(gpu0->fd, vm[0], addr, &batch_bo[0], &batch_addr[0], 10);
+ store_dword_batch_init(gpu1->fd, vm[0], addr, &batch_bo[1], &batch_addr[1], 20);
+
+ /* Place destination in an optionally remote location to test */
+ local_fd = gpu0->fd;
+ local_vram = gpu0->vram_regions[0];
+ xe_multigpu_madvise(gpu0->fd, vm[0], addr, SZ_4K,
+ 0, DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC,
+ gpu1->fd, 0, gpu1->vram_regions[0], exec_queue[0],
+ local_fd, local_vram);
+
+ local_fd = gpu1->fd;
+ local_vram = gpu1->vram_regions[0];
+ xe_multigpu_madvise(gpu1->fd, vm[1], addr, SZ_4K,
+ 0, DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC,
+ gpu0->fd, 0, gpu0->vram_regions[0], exec_queue[0],
+ local_fd, local_vram);
+
+ setup_sync(&sync[0], &sync_addr[0], BIND_SYNC_VAL);
+ setup_sync(&sync[1], &sync_addr[1], BIND_SYNC_VAL);
+
+ /* For simultaneous access need to call xe_wait_ufence for both gpus after prefetch */
+ if(!no_prefetch) {
+ xe_vm_prefetch_async(gpu0->fd, vm[0], 0, 0, addr,
+ SZ_4K, &sync[0], 1,
+ DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC);
+
+ xe_vm_prefetch_async(gpu1->fd, vm[1], 0, 0, addr,
+ SZ_4K, &sync[1], 1,
+ DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC);
+
+ if (*sync_addr[0] != BIND_SYNC_VAL)
+ xe_wait_ufence(gpu0->fd, (uint64_t *)sync_addr[0], BIND_SYNC_VAL, exec_queue[0],
+ NSEC_PER_SEC * 10);
+ free((void *)sync_addr[0]);
+ if (*sync_addr[1] != BIND_SYNC_VAL)
+ xe_wait_ufence(gpu1->fd, (uint64_t *)sync_addr[1], BIND_SYNC_VAL, exec_queue[1],
+ NSEC_PER_SEC * 10);
+ free((void *)sync_addr[1]);
+ }
+
+ if (no_prefetch) {
+ free((void *)sync_addr[0]);
+ free((void *)sync_addr[1]);
+ }
+
+ for (int i = 0; i < 1; i++) {
+ sync_addr[0] = (void *)((char *)batch_addr[0] + SZ_4K);
+ sync[0].addr = to_user_pointer((uint64_t *)sync_addr[0]);
+ sync[0].timeline_value = EXEC_SYNC_VAL;
+
+ sync_addr[1] = (void *)((char *)batch_addr[1] + SZ_4K);
+ sync[1].addr = to_user_pointer((uint64_t *)sync_addr[1]);
+ sync[1].timeline_value = EXEC_SYNC_VAL;
+ *sync_addr[0] = 0;
+ *sync_addr[1] = 0;
+
+ xe_exec_sync(gpu0->fd, exec_queue[0], batch_addr[0], &sync[0], 1);
+ if (*sync_addr[0] != EXEC_SYNC_VAL)
+ xe_wait_ufence(gpu0->fd, (uint64_t *)sync_addr[0], EXEC_SYNC_VAL, exec_queue[0],
+ NSEC_PER_SEC * 10);
+ xe_exec_sync(gpu1->fd, exec_queue[1], batch_addr[1], &sync[1], 1);
+ if (*sync_addr[1] != EXEC_SYNC_VAL)
+ xe_wait_ufence(gpu1->fd, (uint64_t *)sync_addr[1], EXEC_SYNC_VAL, exec_queue[1],
+ NSEC_PER_SEC * 10);
+ }
+
+ igt_assert_eq(*(uint64_t *)addr, 20);
+
+ munmap((void *)batch_addr[0], BATCH_SIZE(gpu0->fd));
+ munmap((void *)batch_addr[1], BATCH_SIZE(gpu0->fd));
+ batch_fini(gpu0->fd, vm[0], batch_bo[0], batch_addr[0]);
+ batch_fini(gpu1->fd, vm[1], batch_bo[1], batch_addr[1]);
+ free(data);
+
+ cleanup_vm_and_queue(gpu0, vm[0], exec_queue[0]);
+ cleanup_vm_and_queue(gpu1, vm[1], exec_queue[1]);
+}
+
static void
atomic_inc_op(struct xe_svm_gpu_info *gpu0,
struct xe_svm_gpu_info *gpu1,
@@ -1012,6 +1134,19 @@ multigpu_access_test(struct xe_svm_gpu_info *gpu0,
cleanup_vm_and_queue(gpu1, vm[1], exec_queue[1]);
}
+static void
+gpu_conflict_test_wrapper(struct xe_svm_gpu_info *src,
+ struct xe_svm_gpu_info *dst,
+ struct drm_xe_engine_class_instance *eci,
+ void *extra_args)
+{
+ struct multigpu_ops_args *args = (struct multigpu_ops_args *)extra_args;
+ igt_assert(src);
+ igt_assert(dst);
+
+ conflicting_madvise(src, dst, eci, args->prefetch_req);
+}
+
static void
gpu_latency_test_wrapper(struct xe_svm_gpu_info *src,
struct xe_svm_gpu_info *dst,
@@ -1108,6 +1243,14 @@ igt_main
for_each_gpu_pair(gpu_cnt, gpus, &eci, gpu_coherecy_test_wrapper, &coh_args);
}
+ igt_subtest("conflicting-madvise-gpu") {
+ struct multigpu_ops_args conflict_args;
+ conflict_args.prefetch_req = 1;
+ for_each_gpu_pair(gpu_cnt, gpus, &eci, gpu_conflict_test_wrapper, &conflict_args);
+ conflict_args.prefetch_req = 0;
+ for_each_gpu_pair(gpu_cnt, gpus, &eci, gpu_conflict_test_wrapper, &conflict_args);
+ }
+
igt_subtest("latency-multi-gpu") {
struct multigpu_ops_args latency_args;
latency_args.prefetch_req = 1;
--
2.48.1
next prev parent reply other threads:[~2025-11-13 17:16 UTC|newest]
Thread overview: 22+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-11-13 17:16 [PATCH i-g-t v7 00/10] Madvise feature in SVM for Multi-GPU configs nishit.sharma
2025-11-13 17:16 ` [PATCH i-g-t v7 01/10] lib/xe: Add instance parameter to xe_vm_madvise and introduce lr_sync helpers nishit.sharma
2025-11-13 17:16 ` [PATCH i-g-t v7 02/10] tests/intel/xe_exec_system_allocator: Add parameter in madvise call nishit.sharma
2025-11-13 17:16 ` [PATCH i-g-t v7 03/10] tests/intel/xe_multi_gpusvm: Add SVM multi-GPU cross-GPU memory access test nishit.sharma
2025-11-18 13:36 ` Gurram, Pravalika
2025-11-19 13:00 ` Gurram, Pravalika
2025-11-13 17:16 ` [PATCH i-g-t v7 04/10] tests/intel/xe_multi_gpusvm: Add SVM multi-GPU atomic operations nishit.sharma
2025-11-13 17:16 ` [PATCH i-g-t v7 05/10] tests/intel/xe_multi_gpusvm: Add SVM multi-GPU coherency test nishit.sharma
2025-11-13 17:16 ` [PATCH i-g-t v7 06/10] tests/intel/xe_multi_gpusvm: Add SVM multi-GPU performance test nishit.sharma
2025-11-13 17:16 ` [PATCH i-g-t v7 07/10] tests/intel/xe_multi_gpusvm: Add SVM multi-GPU fault handling test nishit.sharma
2025-11-13 17:16 ` [PATCH i-g-t v7 08/10] tests/intel/xe_multi_gpusvm: Add SVM multi-GPU simultaneous access test nishit.sharma
2025-11-13 17:16 ` nishit.sharma [this message]
2025-11-13 17:16 ` [PATCH i-g-t v7 10/10] tests/intel/xe_multi-gpusvm: Add SVM multi-GPU migration test nishit.sharma
2025-11-14 0:43 ` ✓ i915.CI.BAT: success for Madvise feature in SVM for Multi-GPU configs Patchwork
2025-11-14 1:06 ` ✓ Xe.CI.BAT: " Patchwork
2025-11-14 10:28 ` ✗ Xe.CI.Full: failure " Patchwork
2025-11-14 21:13 ` ✓ i915.CI.Full: success " Patchwork
-- strict thread matches above, loose matches on Subject: below --
2025-11-13 17:15 [PATCH i-g-t v7 00/10] " nishit.sharma
2025-11-13 17:15 ` [PATCH i-g-t v7 09/10] tests/intel/xe_multi_gpusvm: Add SVM multi-GPU conflicting madvise test nishit.sharma
2025-11-13 17:09 [PATCH i-g-t v7 00/10] SVM madvise feature in multi-GPU config nishit.sharma
2025-11-13 17:10 ` [PATCH i-g-t v7 09/10] tests/intel/xe_multi_gpusvm: Add SVM multi-GPU conflicting madvise test nishit.sharma
2025-11-13 17:04 [PATCH i-g-t v7 00/10] Add SVM madvise feature for multi-GPU configurations nishit.sharma
2025-11-13 17:04 ` [PATCH i-g-t v7 09/10] tests/intel/xe_multi_gpusvm: Add SVM multi-GPU conflicting madvise test nishit.sharma
2025-11-13 16:49 [PATCH i-g-t v7 00/10] Madvise feature in SVM for Multi-GPU configs nishit.sharma
2025-11-13 16:50 ` [PATCH i-g-t v7 09/10] tests/intel/xe_multi_gpusvm: Add SVM multi-GPU conflicting madvise test nishit.sharma
2025-11-04 15:31 [PATCH i-g-t v2 0/7] Madvise feature in SVM for Multi-GPU configs nishit.sharma
2025-11-13 17:00 ` [PATCH i-g-t v7 00/10] " Nishit Sharma
2025-11-13 17:00 ` [PATCH i-g-t v7 09/10] tests/intel/xe_multi_gpusvm: Add SVM multi-GPU conflicting madvise test Nishit Sharma
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251113171621.635811-10-nishit.sharma@intel.com \
--to=nishit.sharma@intel.com \
--cc=igt-dev@lists.freedesktop.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).