From: nishit.sharma@intel.com
To: igt-dev@lists.freedesktop.org, nishit.sharma@intel.com,
sai.gowtham.ch@intel.com
Subject: [PATCH i-g-t v14 10/11] tests/intel/xe_multigpu_svm: Add SVM multi-GPU conflicting madvise test
Date: Mon, 5 Jan 2026 08:47:49 +0000 [thread overview]
Message-ID: <20260105084750.190346-11-nishit.sharma@intel.com> (raw)
In-Reply-To: <20260105084750.190346-1-nishit.sharma@intel.com>
From: Nishit Sharma <nishit.sharma@intel.com>
This test calls madvise operations on GPU0 with the preferred location set
to GPU1 and vice versa. It reports conflicts when conflicting memory advice
is given for shared SVM buffers in a multi-GPU environment.
Signed-off-by: Nishit Sharma <nishit.sharma@intel.com>
Reviewed-by: Pravalika Gurram <pravalika.gurram@intel.com>
Acked-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
---
tests/intel/xe_multigpu_svm.c | 74 ++++++++++++++++++++++++-----------
1 file changed, 51 insertions(+), 23 deletions(-)
diff --git a/tests/intel/xe_multigpu_svm.c b/tests/intel/xe_multigpu_svm.c
index 856af1615..832f0a729 100644
--- a/tests/intel/xe_multigpu_svm.c
+++ b/tests/intel/xe_multigpu_svm.c
@@ -113,6 +113,19 @@
* Test concurrent atomic memory operations with prefetch where
* multiple GPUs simultaneously access shared memory to validate
* coherency with memory migration and local VRAM access
+ *
+ * SUBTEST: mgpu-atomic-op-conflict
+ * Description:
+ * Multi-GPU atomic operation with conflicting madvise regions
+ *
+ * SUBTEST: mgpu-coherency-conflict
+ * Description:
+ * Multi-GPU coherency test with conflicting madvise regions
+ *
+ * SUBTEST: mgpu-pagefault-conflict
+ * Description:
+ * Multi-GPU page fault test with conflicting madvise regions
+ *
*/
#define MAX_XE_REGIONS 8
@@ -126,6 +139,9 @@
#define BATCH_VALUE 60
#define NUM_ITER 200
+#define USER_FENCE_VALUE 0xdeadbeefdeadbeefull
+#define FIVE_SEC (5LL * NSEC_PER_SEC)
+
#define MULTIGPU_PREFETCH BIT(1)
#define MULTIGPU_XGPU_ACCESS BIT(2)
#define MULTIGPU_ATOMIC_OP BIT(3)
@@ -135,6 +151,7 @@
#define MULTIGPU_PERF_REM_COPY BIT(7)
#define MULTIGPU_PFAULT_OP BIT(8)
#define MULTIGPU_CONC_ACCESS BIT(9)
+#define MULTIGPU_CONFLICT BIT(10)
#define INIT 2
#define STORE 3
@@ -484,7 +501,8 @@ store_dword_batch_init_1k(int fd, uint32_t vm, uint64_t src_addr,
}
static void
-gpu_madvise_exec_sync(struct xe_svm_gpu_info *gpu, uint32_t vm, uint32_t exec_queue,
+gpu_madvise_exec_sync(struct xe_svm_gpu_info *gpu, struct xe_svm_gpu_info *xgpu,
+ uint32_t vm, uint32_t exec_queue,
uint64_t dst_addr, uint64_t *batch_addr, unsigned int flags,
double *perf)
{
@@ -492,9 +510,15 @@ gpu_madvise_exec_sync(struct xe_svm_gpu_info *gpu, uint32_t vm, uint32_t exec_qu
struct timespec t_start, t_end;
uint64_t *sync_addr;
- xe_multigpu_madvise(gpu->fd, vm, dst_addr, SZ_4K, 0,
- DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC,
- gpu->fd, 0, gpu->vram_regions[0], exec_queue);
+ if (flags & MULTIGPU_CONFLICT) {
+ xe_multigpu_madvise(gpu->fd, vm, dst_addr, SZ_4K, 0,
+ DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC,
+ xgpu->fd, 0, xgpu->vram_regions[0], exec_queue);
+ } else {
+ xe_multigpu_madvise(gpu->fd, vm, dst_addr, SZ_4K, 0,
+ DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC,
+ gpu->fd, 0, gpu->vram_regions[0], exec_queue);
+ }
setup_sync(&sync, &sync_addr, BIND_SYNC_VAL);
xe_multigpu_prefetch(gpu->fd, vm, dst_addr, SZ_4K, &sync,
@@ -646,17 +670,14 @@ atomic_inc_op(struct xe_svm_gpu_info *gpu1,
&batch_bo[0], &batch_addr[0], flags, ATOMIC);
/*GPU1: Madvise and Prefetch Ops */
- gpu_madvise_exec_sync(gpu1, vm[0], exec_queue[0], addr, &batch_addr[0], flags, NULL);
+ gpu_madvise_exec_sync(gpu1, gpu2, vm[0], exec_queue[0], addr, &batch_addr[0], flags, NULL);
- /* GPU2 --> copy from GPU1 */
gpu_batch_create(gpu2, vm[1], exec_queue[1], addr, to_user_pointer(copy_dst),
&batch_bo[1], &batch_addr[1], flags, INIT);
- /*GPU2: Madvise and Prefetch Ops */
- gpu_madvise_exec_sync(gpu2, vm[1], exec_queue[1], to_user_pointer(copy_dst),
+ gpu_madvise_exec_sync(gpu2, gpu1, vm[1], exec_queue[1], to_user_pointer(copy_dst),
&batch_addr[1], flags, NULL);
- /* NOW CPU can read copy_dst (GPU2 ATOMIC op) */
final_value = *(uint32_t *)copy_dst;
igt_assert_eq(final_value, ATOMIC_OP_VAL);
@@ -665,7 +686,8 @@ atomic_inc_op(struct xe_svm_gpu_info *gpu1,
&batch_bo[1], &batch_addr[1], flags, ATOMIC);
/*GPU2: Madvise and Prefetch Ops */
- gpu_madvise_exec_sync(gpu2, vm[1], exec_queue[1], to_user_pointer(copy_dst),
+ gpu_madvise_exec_sync(gpu2, gpu1, vm[1], exec_queue[1],
+ to_user_pointer(copy_dst),
&batch_addr[1], flags, NULL);
/* GPU1 --> copy from GPU2 */
@@ -673,10 +695,11 @@ atomic_inc_op(struct xe_svm_gpu_info *gpu1,
&batch_bo[0], &batch_addr[0], flags, INIT);
/*GPU1: Madvise and Prefetch Ops */
- gpu_madvise_exec_sync(gpu1, vm[0], exec_queue[1], addr, &batch_addr[0], flags, NULL);
+ gpu_madvise_exec_sync(gpu1, gpu2, vm[0], exec_queue[0], addr,
+ &batch_addr[0], flags, NULL);
- /* NOW CPU can read addr (GPU1 ATOMIC op) */
final_value = *(uint32_t *)addr;
+ /* NOW CPU can read copy_dst (GPU1 ATOMIC op) */
igt_assert_eq(final_value, ATOMIC_OP_VAL + 1);
munmap((void *)batch_addr[0], BATCH_SIZE(gpu1->fd));
@@ -724,7 +747,7 @@ coherency_test_multigpu(struct xe_svm_gpu_info *gpu1,
&batch_bo[0], &batch_addr[0], flags, DWORD);
/*GPU1: Madvise and Prefetch Ops */
- gpu_madvise_exec_sync(gpu1, vm[0], exec_queue[0], addr, &batch_addr[0],
+ gpu_madvise_exec_sync(gpu1, gpu2, vm[0], exec_queue[0], addr, &batch_addr[0],
flags, NULL);
/* GPU2 --> copy from GPU1 */
@@ -732,11 +755,11 @@ coherency_test_multigpu(struct xe_svm_gpu_info *gpu1,
&batch_bo[1], &batch_addr[1], flags, INIT);
/*GPU2: Madvise and Prefetch Ops */
- gpu_madvise_exec_sync(gpu2, vm[1], exec_queue[1], to_user_pointer(copy_dst),
+ gpu_madvise_exec_sync(gpu2, gpu1, vm[1], exec_queue[1], to_user_pointer(copy_dst),
&batch_addr[1], flags, NULL);
- /* verifying copy_dst (GPU2 INIT op) have correct value */
final_value = READ_ONCE(*(uint32_t *)copy_dst);
+ /* verifying copy_dst (GPU2 INIT op) have correct value */
igt_assert_eq(final_value, BATCH_VALUE);
if (flags & MULTIGPU_COH_FAIL) {
@@ -792,7 +815,7 @@ coherency_test_multigpu(struct xe_svm_gpu_info *gpu1,
&batch_bo[1], &batch_addr[1], flags, INIT);
/*GPU2: Madvise and Prefetch Ops */
- gpu_madvise_exec_sync(gpu2, vm[1], exec_queue[1], to_user_pointer(result),
+ gpu_madvise_exec_sync(gpu2, gpu1, vm[1], exec_queue[1], to_user_pointer(result),
&batch_addr[1], flags, NULL);
/* Check which write won (or if we got a mix) */
@@ -864,7 +887,7 @@ latency_test_multigpu(struct xe_svm_gpu_info *gpu1,
&batch_bo[0], &batch_addr[0], flags, DWORD);
/*GPU1: Madvise and Prefetch Ops */
- gpu_madvise_exec_sync(gpu1, vm[0], exec_queue[0], addr, &batch_addr[0],
+ gpu_madvise_exec_sync(gpu1, gpu2, vm[0], exec_queue[0], addr, &batch_addr[0],
flags, &gpu1_latency);
gpu1_bw = (SZ_1K / (gpu1_latency / 1e9)) / (1024.0 * 1024.0); //Written 1k
@@ -886,7 +909,7 @@ latency_test_multigpu(struct xe_svm_gpu_info *gpu1,
&batch_bo[1], &batch_addr[1], flags, INIT);
/*GPU2: Madvise and Prefetch Ops */
- gpu_madvise_exec_sync(gpu2, vm[1], exec_queue[1], to_user_pointer(copy_dst),
+ gpu_madvise_exec_sync(gpu2, gpu1, vm[1], exec_queue[1], to_user_pointer(copy_dst),
&batch_addr[1], flags, &gpu2_latency);
gpu2_bw = (SZ_1K / (gpu2_latency / 1e9)) / (1024.0 * 1024.0);
@@ -898,15 +921,15 @@ latency_test_multigpu(struct xe_svm_gpu_info *gpu1,
&batch_bo[0], &batch_addr[0], flags, DWORD);
/*GPU1: Madvise and Prefetch Ops */
- gpu_madvise_exec_sync(gpu1, vm[0], exec_queue[0], addr, &batch_addr[0],
- flags, &gpu1_latency);
+ gpu_madvise_exec_sync(gpu1, gpu2, vm[0], exec_queue[0], addr,
+ &batch_addr[0], flags, &gpu1_latency);
/*GPU2: Copy data from addr (written by GPU1) to its own buffer (copy_dst) */
gpu_batch_create(gpu2, vm[1], exec_queue[1], addr, to_user_pointer(copy_dst),
&batch_bo[1], &batch_addr[1], flags, INIT);
/*GPU2: Madvise and Prefetch Ops */
- gpu_madvise_exec_sync(gpu2, vm[1], exec_queue[1], to_user_pointer(copy_dst),
+ gpu_madvise_exec_sync(gpu2, gpu1, vm[1], exec_queue[1], to_user_pointer(copy_dst),
&batch_addr[1], flags, &gpu2_latency);
gpu2_latency += gpu1_latency;
@@ -1012,7 +1035,8 @@ pagefault_test_multigpu(struct xe_svm_gpu_info *gpu1,
&batch_bo[0], &batch_addr[0], flags, DWORD);
/*GPU1: Madvise and Prefetch Ops */
- gpu_madvise_exec_sync(gpu1, vm[0], exec_queue[0], addr, &batch_addr[0], flags, NULL);
+ gpu_madvise_exec_sync(gpu1, gpu2, vm[0], exec_queue[0], addr, &batch_addr[0],
+ flags, NULL);
pf_count_gpu1_after = xe_gt_stats_get_count(gpu1->fd, eci->gt_id, pf_count_stat);
@@ -1049,7 +1073,8 @@ pagefault_test_multigpu(struct xe_svm_gpu_info *gpu1,
free(sync_addr);
/*GPU2: Madvise and Prefetch Ops */
- gpu_madvise_exec_sync(gpu2, vm[1], exec_queue[1], addr1, &batch_addr[1], flags, NULL);
+ gpu_madvise_exec_sync(gpu2, gpu1, vm[1], exec_queue[1], addr1, &batch_addr[1],
+ flags, NULL);
pf_count_gpu2_after = xe_gt_stats_get_count(gpu2->fd, eci->gt_id, pf_count_stat);
@@ -1349,9 +1374,11 @@ int igt_main()
{ "xgpu-access-prefetch", MULTIGPU_PREFETCH | MULTIGPU_XGPU_ACCESS },
{ "atomic-op-basic", MULTIGPU_ATOMIC_OP },
{ "atomic-op-prefetch", MULTIGPU_PREFETCH | MULTIGPU_ATOMIC_OP },
+ { "atomic-op-conflict", MULTIGPU_CONFLICT | MULTIGPU_ATOMIC_OP },
{ "coherency-basic", MULTIGPU_COH_OP },
{ "coherency-fail-basic", MULTIGPU_COH_OP | MULTIGPU_COH_FAIL },
{ "coherency-prefetch", MULTIGPU_PREFETCH | MULTIGPU_COH_OP },
+ { "coherency-conflict", MULTIGPU_CONFLICT | MULTIGPU_COH_OP },
{ "coherency-fail-prefetch",
MULTIGPU_PREFETCH | MULTIGPU_COH_OP | MULTIGPU_COH_FAIL},
{ "latency-basic", MULTIGPU_PERF_OP },
@@ -1362,6 +1389,7 @@ int igt_main()
MULTIGPU_PREFETCH | MULTIGPU_PERF_OP | MULTIGPU_PERF_REM_COPY },
{ "pagefault-basic", MULTIGPU_PFAULT_OP },
{ "pagefault-prefetch", MULTIGPU_PREFETCH | MULTIGPU_PFAULT_OP },
+ { "pagefault-conflict", MULTIGPU_CONFLICT | MULTIGPU_PFAULT_OP },
{ "concurrent-access-basic", MULTIGPU_CONC_ACCESS },
{ "concurrent-access-prefetch", MULTIGPU_PREFETCH | MULTIGPU_CONC_ACCESS },
{ NULL },
--
2.48.1
next prev parent reply other threads:[~2026-01-05 8:47 UTC|newest]
Thread overview: 15+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-01-05 8:47 [PATCH i-g-t v14 00/11] Madvise feature in SVM for Multi-GPU configs nishit.sharma
2026-01-05 8:47 ` [PATCH i-g-t v14 01/11] drm-uapi/xe: Sync with madvise interface nishit.sharma
2026-04-02 19:23 ` Dixit, Ashutosh
2026-01-05 8:47 ` [PATCH i-g-t v14 02/11] lib/xe: Add instance parameter to xe_vm_madvise nishit.sharma
2026-01-05 8:47 ` [PATCH i-g-t v14 03/11] lib/xe: Add synchronous helpers for VM bind/unbind operations nishit.sharma
2026-01-05 8:47 ` [PATCH i-g-t v14 04/11] tests/intel/xe_multigpu_svm: Add SVM multi-GPU xGPU memory access test nishit.sharma
2026-01-05 8:47 ` [PATCH i-g-t v14 05/11] tests/intel/xe_multigpu_svm: Add SVM multi-GPU atomic operations nishit.sharma
2026-01-05 8:47 ` [PATCH i-g-t v14 06/11] tests/intel/xe_multigpu_svm: Add SVM multi-GPU coherency test nishit.sharma
2026-01-05 8:47 ` [PATCH i-g-t v14 07/11] tests/intel/xe_multigpu_svm: Add SVM multi-GPU performance test nishit.sharma
2026-01-05 8:47 ` [PATCH i-g-t v14 08/11] tests/intel/xe_multigpu_svm: Add SVM multi-GPU fault handling test nishit.sharma
2026-01-05 8:53 ` Ch, Sai Gowtham
2026-01-05 8:47 ` [PATCH i-g-t v14 09/11] tests/intel/xe_multigpu_svm: Add SVM multi-GPU simultaneous access test nishit.sharma
2026-01-05 8:47 ` nishit.sharma [this message]
2026-01-05 8:47 ` [PATCH i-g-t v14 11/11] tests/intel/xe_multigpu_svm: Add SVM multi-GPU migration test nishit.sharma
2026-01-05 14:44 ` ✗ Fi.CI.BUILD: failure for Madvise feature in SVM for Multi-GPU configs Patchwork
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260105084750.190346-11-nishit.sharma@intel.com \
--to=nishit.sharma@intel.com \
--cc=igt-dev@lists.freedesktop.org \
--cc=sai.gowtham.ch@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox