From: nishit.sharma@intel.com
To: igt-dev@lists.freedesktop.org, thomas.hellstrom@intel.com,
nishit.sharma@intel.com
Subject: [PATCH v7 07/10] tests/intel/xe_multi_gpusvm.c: Add SVM multi-GPU fault handling test
Date: Thu, 13 Nov 2025 16:28:32 +0000 [thread overview]
Message-ID: <20251113162834.633575-8-nishit.sharma@intel.com> (raw)
In-Reply-To: <20251113162834.633575-1-nishit.sharma@intel.com>
From: Nishit Sharma <nishit.sharma@intel.com>
This test intentionally triggers page faults by accessing regions without
prefetch for both GPUs in a multi-GPU environment.
Signed-off-by: Nishit Sharma <nishit.sharma@intel.com>
---
tests/intel/xe_multi_gpusvm.c | 102 ++++++++++++++++++++++++++++++++++
1 file changed, 102 insertions(+)
diff --git a/tests/intel/xe_multi_gpusvm.c b/tests/intel/xe_multi_gpusvm.c
index 2c8e62e34..6feb543ae 100644
--- a/tests/intel/xe_multi_gpusvm.c
+++ b/tests/intel/xe_multi_gpusvm.c
@@ -15,6 +15,7 @@
#include "time.h"
+#include "xe/xe_gt.h"
#include "xe/xe_ioctl.h"
#include "xe/xe_query.h"
#include "xe/xe_util.h"
@@ -48,6 +49,11 @@
* Description:
* This test measures and compares latency and bandwidth for buffer access
* from CPU, local GPU, and remote GPU
+ *
+ * SUBTEST: pagefault-multi-gpu
+ * Description:
+ * This test intentionally triggers page faults by accessing unmapped SVM
+ * regions from both GPUs
*/
#define MAX_XE_REGIONS 8
@@ -115,6 +121,11 @@ static void gpu_latency_test_wrapper(struct xe_svm_gpu_info *src,
struct drm_xe_engine_class_instance *eci,
void *extra_args);
+static void gpu_fault_test_wrapper(struct xe_svm_gpu_info *src,
+ struct xe_svm_gpu_info *dst,
+ struct drm_xe_engine_class_instance *eci,
+ void *extra_args);
+
static void
create_vm_and_queue(struct xe_svm_gpu_info *gpu, struct drm_xe_engine_class_instance *eci,
uint32_t *vm, uint32_t *exec_queue)
@@ -707,6 +718,76 @@ latency_test_multigpu(struct xe_svm_gpu_info *gpu0,
cleanup_vm_and_queue(gpu1, vm[1], exec_queue[1]);
}
+static void
+pagefault_test_multigpu(struct xe_svm_gpu_info *gpu0,
+ struct xe_svm_gpu_info *gpu1,
+ struct drm_xe_engine_class_instance *eci,
+ bool prefetch_req)
+{
+ uint64_t addr;
+ uint32_t vm[2];
+ uint32_t exec_queue[2];
+ uint32_t batch_bo;
+ uint64_t batch_addr;
+ struct drm_xe_sync sync = {};
+ volatile uint64_t *sync_addr;
+ int value = 60, pf_count_1, pf_count_2;
+ void *data;
+ const char *pf_count_stat = "svm_pagefault_count";
+
+ create_vm_and_queue(gpu0, eci, &vm[0], &exec_queue[0]);
+ create_vm_and_queue(gpu1, eci, &vm[1], &exec_queue[1]);
+
+ data = aligned_alloc(SZ_2M, SZ_4K);
+ igt_assert(data);
+ addr = to_user_pointer(data);
+
+ pf_count_1 = xe_gt_stats_get_count(gpu0->fd, eci->gt_id, pf_count_stat);
+
+ /* checking pagefault count on GPU */
+ store_dword_batch_init(gpu0->fd, vm[0], addr, &batch_bo, &batch_addr, value);
+
+ xe_multigpu_madvise(gpu0->fd, vm[0], addr, SZ_4K, 0,
+ DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC,
+ gpu0->fd, 0, gpu0->vram_regions[0], exec_queue[0],
+ 0, 0);
+
+ setup_sync(&sync, &sync_addr, BIND_SYNC_VAL);
+ xe_multigpu_prefetch(gpu0->fd, vm[0], addr, SZ_4K, &sync,
+ sync_addr, exec_queue[0], prefetch_req);
+
+ sync_addr = (void *)((char *)batch_addr + SZ_4K);
+ sync.addr = to_user_pointer((uint64_t *)sync_addr);
+ sync.timeline_value = EXEC_SYNC_VAL;
+ *sync_addr = 0;
+
+ /* Execute STORE command on GPU */
+ xe_exec_sync(gpu0->fd, exec_queue[0], batch_addr, &sync, 1);
+ if (*sync_addr != EXEC_SYNC_VAL)
+ xe_wait_ufence(gpu0->fd, (uint64_t *)sync_addr, EXEC_SYNC_VAL, exec_queue[0],
+ NSEC_PER_SEC * 10);
+
+ pf_count_2 = xe_gt_stats_get_count(gpu0->fd, eci->gt_id, pf_count_stat);
+
+ if (pf_count_2 != pf_count_1) {
+ igt_warn("GPU pf: pf_count_2(%d) != pf_count_1(%d) prefetch_req :%d\n",
+ pf_count_2, pf_count_1, prefetch_req);
+ }
+
+ igt_assert_eq(*(uint64_t *)addr, value);
+
+ /* CPU writes 11, memset set bytes no integer hence memset fills 4 bytes with 0x0B */
+ memset((void *)(uintptr_t)addr, 11, sizeof(int));
+ igt_assert_eq(*(uint64_t *)addr, 0x0B0B0B0B);
+
+ munmap((void *)batch_addr, BATCH_SIZE(gpu0->fd));
+ batch_fini(gpu0->fd, vm[0], batch_bo, batch_addr);
+ free(data);
+
+ cleanup_vm_and_queue(gpu0, vm[0], exec_queue[0]);
+ cleanup_vm_and_queue(gpu1, vm[1], exec_queue[1]);
+}
+
static void
atomic_inc_op(struct xe_svm_gpu_info *gpu0,
struct xe_svm_gpu_info *gpu1,
@@ -832,6 +913,19 @@ gpu_latency_test_wrapper(struct xe_svm_gpu_info *src,
latency_test_multigpu(src, dst, eci, args->op_mod, args->prefetch_req);
}
+static void
+gpu_fault_test_wrapper(struct xe_svm_gpu_info *src,
+ struct xe_svm_gpu_info *dst,
+ struct drm_xe_engine_class_instance *eci,
+ void *extra_args)
+{
+ struct multigpu_ops_args *args = (struct multigpu_ops_args *)extra_args;
+ igt_assert(src);
+ igt_assert(dst);
+
+ pagefault_test_multigpu(src, dst, eci, args->prefetch_req);
+}
+
igt_main
{
struct xe_svm_gpu_info gpus[MAX_XE_GPUS];
@@ -899,6 +993,14 @@ igt_main
for_each_gpu_pair(gpu_cnt, gpus, &eci, gpu_latency_test_wrapper, &latency_args);
}
+ igt_subtest("pagefault-multi-gpu") {
+ struct multigpu_ops_args fault_args;
+ fault_args.prefetch_req = 1;
+ for_each_gpu_pair(gpu_cnt, gpus, &eci, gpu_fault_test_wrapper, &fault_args);
+ fault_args.prefetch_req = 0;
+ for_each_gpu_pair(gpu_cnt, gpus, &eci, gpu_fault_test_wrapper, &fault_args);
+ }
+
igt_fixture {
int cnt;
--
2.48.1
next prev parent reply other threads:[~2025-11-13 16:28 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-11-13 16:28 [PATCH v7 00/10] Madvise feature in SVM for Multi-GPU configs nishit.sharma
2025-11-13 16:28 ` [PATCH v7 01/10] lib/xe: Add instance parameter to xe_vm_madvise and introduce lr_sync helpers nishit.sharma
2025-11-13 16:28 ` [PATCH v7 02/10] tests/intel/xe_exec_system_allocator: Add parameter in madvise call nishit.sharma
2025-11-13 16:28 ` [PATCH v7 03/10] tests/intel/xe_multi_gpusvm: Add SVM multi-GPU cross-GPU memory access test nishit.sharma
2025-11-13 16:28 ` [PATCH v7 04/10] tests/intel/xe_multi_gpusvm.c: Add SVM multi-GPU atomic operations nishit.sharma
2025-11-13 16:28 ` [PATCH v7 05/10] tests/intel/xe_multi_gpusvm.c: Add SVM multi-GPU coherency test nishit.sharma
2025-11-13 16:28 ` [PATCH v7 06/10] tests/intel/xe_multi_gpusvm.c: Add SVM multi-GPU performance test nishit.sharma
2025-11-13 16:28 ` nishit.sharma [this message]
2025-11-13 16:28 ` [PATCH v7 08/10] tests/intel/xe_multi_gpusvm.c: Add SVM multi-GPU simultaneous access test nishit.sharma
2025-11-13 16:28 ` [PATCH v7 09/10] tests/intel/xe_multi_gpusvm.c: Add SVM multi-GPU conflicting madvise test nishit.sharma
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251113162834.633575-8-nishit.sharma@intel.com \
--to=nishit.sharma@intel.com \
--cc=igt-dev@lists.freedesktop.org \
--cc=thomas.hellstrom@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).