public inbox for igt-dev@lists.freedesktop.org
 help / color / mirror / Atom feed
* [PATCH i-g-t] tests/intel/xe_compute: Add VM page fault property reporting test
@ 2026-01-08  5:19 nishit.sharma
  0 siblings, 0 replies; 4+ messages in thread
From: nishit.sharma @ 2026-01-08  5:19 UTC (permalink / raw)
  To: jonathan.cavitt, igt-dev, nishit.sharma

From: Nishit Sharma <nishit.sharma@intel.com>

Add new subtest 'vm-get-property-pagefault-reporting' to validate the
DRM_IOCTL_XE_VM_GET_PROPERTY ioctl for querying VM page fault information.

This test exercises the kernel's page fault tracking mechanism for VMs
created with fault-mode enabled. It verifies:
- Initial page fault count is zero for newly created VMs
- Page faults are correctly tracked and reported after triggering
- Reported fault count respects the maximum limit (≤50 faults)
- All reported faults maintain consistent fault address, access type,
  and fault type
- VM isolation is maintained (faults in VM1 don't affect VM2)

Signed-off-by: Nishit Sharma <nishit.sharma@intel.com>
---
 tests/intel/xe_compute.c | 169 +++++++++++++++++++++++++++++++++++++++
 1 file changed, 169 insertions(+)

diff --git a/tests/intel/xe_compute.c b/tests/intel/xe_compute.c
index 310093fc5..57d1bcb08 100644
--- a/tests/intel/xe_compute.c
+++ b/tests/intel/xe_compute.c
@@ -510,6 +510,172 @@ test_compute_square(int fd)
 		      "GPU not supported\n");
 }
 
+static void print_pf(struct xe_vm_fault *fault)
+{
+	igt_debug("FAULT:\n");
+	igt_debug("address = 0x%08x%08x\n",
+		  upper_32_bits(fault->address),
+		  lower_32_bits(fault->address));
+	igt_debug("address precision = %u\n", fault->address_precision);
+	igt_debug("access type = %u\n", fault->access_type);
+	igt_debug("fault type = %u\n", fault->fault_type);
+	igt_debug("fault level = %u\n", fault->fault_level);
+	igt_debug("\n");
+}
+
+/**
+ * SUBTEST: vm-get-property-pagefault-reporting
+ * Description: Test VM page fault reporting via xe_vm_get_property ioctl
+ * Functionality: VM page fault isolation and query
+ * Test category: functionality test
+ */
+static void test_vm_pagefault_reporting(void)
+{
+	struct drm_xe_engine_class_instance *hwe;
+	struct drm_xe_vm_get_property property = {0};
+	struct xe_vm_fault *faults_1, f0, f;
+	uint32_t vm1, vm2;
+	uint32_t exec_queue1;
+	uint64_t addr = 0x1a0000;
+	struct drm_xe_sync sync = {
+		.type = DRM_XE_SYNC_TYPE_USER_FENCE,
+		.flags = DRM_XE_SYNC_FLAG_SIGNAL,
+	};
+	struct drm_xe_exec exec = {};
+	uint64_t *sync_val;
+	int ret;
+	int fd, fault_count, final_fault_count, check_count;
+	uint32_t bo;
+	bool found_compute = false;
+
+	fd = drm_open_driver(DRIVER_XE);
+	xe_device_get(fd);
+
+	igt_require(xe_has_vram(fd)); /* Fault mode typically needs VRAM */
+
+	/* Create fault-mode VM1 */
+	vm1 = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_LR_MODE |
+			   DRM_XE_VM_CREATE_FLAG_FAULT_MODE, 0);
+	igt_assert(vm1);
+
+	/* Step 1: Query initial pagefault count (should be 0) */
+	property.vm_id = vm1;
+	property.property = DRM_XE_VM_GET_PROPERTY_FAULTS;
+	property.data = 0;
+	property.size = 0;
+
+	ret = igt_ioctl(fd, DRM_IOCTL_XE_VM_GET_PROPERTY, &property);
+	igt_assert_eq(ret, 0);
+
+	if (property.size > 0) {
+		faults_1 = malloc(property.size);
+		igt_assert(faults_1);
+		property.data = to_user_pointer(faults_1);
+		ret = igt_ioctl(fd, DRM_IOCTL_XE_VM_GET_PROPERTY, &property);
+		igt_assert_eq(ret, 0);
+		free(faults_1);
+	}
+
+	fault_count = property.size / sizeof(struct xe_vm_fault);
+	f0 = faults_1[0];
+
+	for (int i = 0; i < fault_count; i++) {
+		f = faults_1[i];
+		print_pf(&f);
+		igt_assert_eq(f.address, f0.address);
+		igt_assert_eq(f.access_type, f0.access_type);
+		igt_assert_eq(f.fault_type, f0.fault_type);
+	}
+	free(faults_1);
+
+	/* Step 2: Trigger a page fault via compute job */
+	xe_for_each_engine(fd, hwe) {
+		if (hwe->engine_class != DRM_XE_ENGINE_CLASS_COMPUTE)
+			continue;
+		found_compute = true;
+
+		exec_queue1 = xe_exec_queue_create(fd, vm1, hwe, 0);
+		sync_val = mmap(NULL, 4096, PROT_WRITE, MAP_SHARED | MAP_ANON, -1, 0);
+		igt_assert(sync_val != MAP_FAILED);
+		*sync_val = 0;
+		sync.addr = to_user_pointer(sync_val);
+
+		/* Create a faulting job (access unmapped address) */
+		bo = xe_bo_create(fd, vm1, 4096, vram_if_possible(fd, 0), 0);
+		exec.exec_queue_id = exec_queue1;
+		exec.num_batch_buffer = 1;
+		exec.address = addr; /* Unmapped - will fault */
+		exec.syncs = to_user_pointer(&sync);
+
+		/* This exec should trigger page fault */
+		ret = igt_ioctl(fd, DRM_IOCTL_XE_EXEC, &exec);
+
+		/* Wait for fault handling */
+		usleep(100000); /* 100ms for page fault to be recorded */
+
+		gem_close(fd, bo);
+
+		/* Step 3: Query pagefault count after fault */
+		faults_1 = malloc(property.size);
+		igt_assert(faults_1);
+		property.data = to_user_pointer(faults_1);
+		property.size = 0;
+		ret = igt_ioctl(fd, DRM_IOCTL_XE_VM_GET_PROPERTY, &property);
+		igt_assert_eq(ret, 0);
+
+		if (property.size > 0) {
+			faults_1 = malloc(property.size);
+			igt_assert(faults_1);
+			property.data = to_user_pointer(faults_1);
+			ret = igt_ioctl(fd, DRM_IOCTL_XE_VM_GET_PROPERTY, &property);
+			igt_assert_eq(ret, 0);
+
+			final_fault_count = property.size / sizeof(struct xe_vm_fault);
+			check_count = final_fault_count > 50 ? 50 : final_fault_count;
+			f0 = faults_1[0];
+
+			for (int i = 0; i < check_count; i++) {
+				f = faults_1[i];
+				print_pf(&f);
+				igt_assert_eq(f.address, f0.address);
+				igt_assert_eq(f.access_type, f0.access_type);
+				igt_assert_eq(f.fault_type, f0.fault_type);
+			}
+			free(faults_1);
+		}
+
+		munmap(sync_val, 4096);
+		xe_exec_queue_destroy(fd, exec_queue1);
+	}
+	igt_require(found_compute);
+
+	/* Step 4: Create VM2 for isolation test */
+	vm2 = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_LR_MODE |
+			   DRM_XE_VM_CREATE_FLAG_FAULT_MODE, 0);
+	igt_assert(vm2);
+
+	property.vm_id = vm2;
+	property.property = DRM_XE_VM_GET_PROPERTY_FAULTS;
+	property.data = 0;
+	property.size = 0;
+	ret = igt_ioctl(fd, DRM_IOCTL_XE_VM_GET_PROPERTY, &property);
+	igt_assert_eq(ret, 0);
+
+	if (property.size > 0) {
+		faults_1 = malloc(property.size);
+		igt_assert(faults_1);
+		property.data = to_user_pointer(faults_1);
+		ret = igt_ioctl(fd, DRM_IOCTL_XE_VM_GET_PROPERTY, &property);
+		igt_assert_eq(ret, 0);
+		free(faults_1);
+	}
+
+	/* Cleanup */
+	xe_vm_destroy(fd, vm1);
+	xe_vm_destroy(fd, vm2);
+	drm_close_driver(fd);
+}
+
 int igt_main()
 {
 	int xe, ccs_mode[4];
@@ -565,6 +731,9 @@ int igt_main()
 		test_eu_busy(5 * LOOP_DURATION_2s);
 	}
 
+	igt_subtest("vm-get-property-pagefault-reporting")
+		test_vm_pagefault_reporting();
+
 	igt_fixture() {
 		if (!sriov_enabled)
 			igt_restore_ccs_mode(ccs_mode, ARRAY_SIZE(ccs_mode));
-- 
2.43.0


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH i-g-t] tests/intel/xe_compute: Add VM page fault property reporting test
@ 2026-03-26  6:05 nishit.sharma
  2026-03-26  6:52 ` Dandamudi, Priyanka
  0 siblings, 1 reply; 4+ messages in thread
From: nishit.sharma @ 2026-03-26  6:05 UTC (permalink / raw)
  To: igt-dev, jonathan.cavitt

From: Nishit Sharma <nishit.sharma@intel.com>

Add new subtest 'vm-get-property-pagefault-reporting' to validate the
DRM_IOCTL_XE_VM_GET_PROPERTY ioctl for querying VM page fault information.

This test exercises the kernel's page fault tracking mechanism for VMs
created with fault-mode enabled. It verifies:
- Initial page fault count is zero for newly created VMs
- Page faults are correctly tracked and reported after triggering
- Reported fault count respects the maximum limit (≤50 faults)
- All reported faults maintain consistent fault address, access type,
  and fault type
- VM isolation is maintained (faults in VM1 don't affect VM2)

Signed-off-by: Nishit Sharma <nishit.sharma@intel.com>
---
 tests/intel/xe_compute.c | 167 +++++++++++++++++++++++++++++++++++++++
 1 file changed, 167 insertions(+)

diff --git a/tests/intel/xe_compute.c b/tests/intel/xe_compute.c
index 762e66fcd..673e8ad36 100644
--- a/tests/intel/xe_compute.c
+++ b/tests/intel/xe_compute.c
@@ -25,6 +25,7 @@
 #define LOOP_DURATION_2s	(1000000ull * 2)
 #define DURATION_MARGIN		0.2
 #define MIN_BUSYNESS		95.0
+#define EXEC_SYNC_VAL		0x676767
 
 bool sriov_enabled;
 
@@ -510,6 +511,169 @@ test_compute_square(int fd)
 		      "GPU not supported\n");
 }
 
+static void print_pf(struct xe_vm_fault *fault)
+{
+	igt_debug("FAULT:\n");
+	igt_debug("address = 0x%08x%08x\n",
+		  upper_32_bits(fault->address),
+		  lower_32_bits(fault->address));
+	igt_debug("address precision = %u\n", fault->address_precision);
+	igt_debug("access type = %u\n", fault->access_type);
+	igt_debug("fault type = %u\n", fault->fault_type);
+	igt_debug("fault level = %u\n", fault->fault_level);
+	igt_debug("\n");
+}
+
+/**
+ * SUBTEST: vm-get-property-pagefault-reporting
+ * Description: Test VM page fault reporting via xe_vm_get_property ioctl
+ * Functionality: VM page fault isolation and query
+ * Test category: functionality test
+ */
+static void test_vm_pagefault_reporting(void)
+{
+	struct drm_xe_engine_class_instance *hwe;
+	struct drm_xe_vm_get_property query = {0};
+	struct xe_vm_fault *faults_1, f0, f;
+	uint32_t vm1, vm2;
+	uint32_t exec_queue1;
+	uint64_t addr = 0x1a0000;
+	struct drm_xe_sync sync = {
+		.type = DRM_XE_SYNC_TYPE_USER_FENCE,
+		.flags = DRM_XE_SYNC_FLAG_SIGNAL,
+	};
+	struct drm_xe_exec exec = {};
+	uint64_t *sync_val;
+	int fd, fault_count, final_fault_count, check_count;
+	uint32_t bo;
+	bool found_compute = false;
+
+	fd = drm_open_driver(DRIVER_XE);
+	xe_device_get(fd);
+
+	igt_require(xe_has_vram(fd)); /* Fault mode typically needs VRAM */
+
+	/* Create fault-mode VM1 */
+	vm1 = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_LR_MODE |
+			   DRM_XE_VM_CREATE_FLAG_FAULT_MODE, 0);
+	igt_assert(vm1);
+
+	/* Step 1: Query initial pagefault count (should be 0) */
+	query.vm_id = vm1;
+	query.property = DRM_XE_VM_GET_PROPERTY_FAULTS;
+	query.data = 0;
+	query.size = 0;
+
+	xe_vm_get_property(fd, vm1, &query);
+	igt_assert_eq(query.size, 0);
+
+	if (query.size > 0) {
+		faults_1 = malloc(query.size);
+		igt_assert(faults_1);
+		query.data = to_user_pointer(faults_1);
+		xe_vm_get_property(fd, vm1, &query);
+	}
+
+	fault_count = query.size / sizeof(struct xe_vm_fault);
+	f0 = faults_1[0];
+
+	for (int i = 0; i < fault_count; i++) {
+		f = faults_1[i];
+		print_pf(&f);
+		igt_assert_eq(f.address, f0.address);
+		igt_assert_eq(f.access_type, f0.access_type);
+		igt_assert_eq(f.fault_type, f0.fault_type);
+	}
+	free(faults_1);
+
+	/* Step 2: Trigger a page fault via compute job */
+	xe_for_each_engine(fd, hwe) {
+		if (hwe->engine_class != DRM_XE_ENGINE_CLASS_COMPUTE)
+			continue;
+		found_compute = true;
+
+		exec_queue1 = xe_exec_queue_create(fd, vm1, hwe, 0);
+		sync_val = mmap(NULL, 4096, PROT_WRITE, MAP_SHARED | MAP_ANON, -1, 0);
+		igt_assert(sync_val != MAP_FAILED);
+		*sync_val = 0;
+		sync.addr = to_user_pointer(sync_val);
+
+		/* Create a faulting job (access unmapped address) */
+		bo = xe_bo_create(fd, vm1, 4096, vram_if_possible(fd, 0), 0);
+		exec.exec_queue_id = exec_queue1;
+		exec.num_batch_buffer = 1;
+		exec.address = addr; /* Unmapped - will fault */
+		exec.syncs = to_user_pointer(&sync);
+
+		/* This exec should trigger page fault */
+		igt_ioctl(fd, DRM_IOCTL_XE_EXEC, &exec);
+
+		/* Wait for fault handling */
+		usleep(100000); /* 100ms for page fault to be recorded */
+
+		gem_close(fd, bo);
+
+		/* Step 3: Query pagefault count after fault */
+		faults_1 = malloc(query.size);
+		igt_assert(faults_1);
+		query.data = to_user_pointer(faults_1);
+		query.size = 0;
+		xe_vm_get_property(fd, vm1, &query);
+		igt_assert_lt(0, query.size);
+
+		if (query.size > 0) {
+			faults_1 = malloc(query.size);
+			igt_assert(faults_1);
+			query.data = to_user_pointer(faults_1);
+			xe_vm_get_property(fd, vm1, &query);
+			igt_assert_lt(0, query.size);
+
+			final_fault_count = query.size / sizeof(struct xe_vm_fault);
+			check_count = final_fault_count > 50 ? 50 : final_fault_count;
+			f0 = faults_1[0];
+
+			for (int i = 0; i < check_count; i++) {
+				f = faults_1[i];
+				print_pf(&f);
+				igt_assert_eq(f.address, f0.address);
+				igt_assert_eq(f.access_type, f0.access_type);
+				igt_assert_eq(f.fault_type, f0.fault_type);
+			}
+			free(faults_1);
+		}
+
+		munmap(sync_val, 4096);
+		xe_exec_queue_destroy(fd, exec_queue1);
+	}
+	igt_require(found_compute);
+
+	/* Step 4: Create VM2 for isolation test */
+	vm2 = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_LR_MODE |
+			   DRM_XE_VM_CREATE_FLAG_FAULT_MODE, 0);
+	igt_assert(vm2);
+
+	query.vm_id = vm2;
+	query.property = DRM_XE_VM_GET_PROPERTY_FAULTS;
+	query.data = 0;
+	query.size = 0;
+	xe_vm_get_property(fd, vm1, &query);
+	igt_assert_eq(query.size, 0);
+
+	if (query.size > 0) {
+		faults_1 = malloc(query.size);
+		igt_assert(faults_1);
+		query.data = to_user_pointer(faults_1);
+		xe_vm_get_property(fd, vm1, &query);
+		igt_assert_lt(0, query.size);
+		free(faults_1);
+	}
+
+	/* Cleanup */
+	xe_vm_destroy(fd, vm1);
+	xe_vm_destroy(fd, vm2);
+	drm_close_driver(fd);
+}
+
 /**
  * SUBTEST: compute-square-userenv
  * Mega feature: Compute
@@ -717,6 +881,9 @@ int igt_main()
 		test_eu_busy(5 * LOOP_DURATION_2s);
 	}
 
+	igt_subtest("vm-get-property-pagefault-reporting")
+		test_vm_pagefault_reporting();
+
 	igt_fixture() {
 		if (!sriov_enabled)
 			igt_restore_ccs_mode(ccs_mode, ARRAY_SIZE(ccs_mode));
-- 
2.43.0


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* RE: [PATCH i-g-t] tests/intel/xe_compute: Add VM page fault property reporting test
  2026-03-26  6:05 [PATCH i-g-t] tests/intel/xe_compute: Add VM page fault property reporting test nishit.sharma
@ 2026-03-26  6:52 ` Dandamudi, Priyanka
  2026-03-26  6:54   ` Dandamudi, Priyanka
  0 siblings, 1 reply; 4+ messages in thread
From: Dandamudi, Priyanka @ 2026-03-26  6:52 UTC (permalink / raw)
  To: Sharma, Nishit, igt-dev@lists.freedesktop.org, Cavitt, Jonathan



> -----Original Message-----
> From: igt-dev <igt-dev-bounces@lists.freedesktop.org> On Behalf Of
> nishit.sharma@intel.com
> Sent: 26 March 2026 11:36 AM
> To: igt-dev@lists.freedesktop.org; Cavitt, Jonathan
> <jonathan.cavitt@intel.com>
> Subject: [PATCH i-g-t] tests/intel/xe_compute: Add VM page fault property
> reporting test
> 
> From: Nishit Sharma <nishit.sharma@intel.com>
> 
> Add new subtest 'vm-get-property-pagefault-reporting' to validate the
> DRM_IOCTL_XE_VM_GET_PROPERTY ioctl for querying VM page fault
> information.
> 
> This test exercises the kernel's page fault tracking mechanism for VMs
> created with fault-mode enabled. It verifies:
> - Initial page fault count is zero for newly created VMs
> - Page faults are correctly tracked and reported after triggering
> - Reported fault count respects the maximum limit (≤50 faults)
> - All reported faults maintain consistent fault address, access type,
>   and fault type
> - VM isolation is maintained (faults in VM1 don't affect VM2)
> 
> Signed-off-by: Nishit Sharma <nishit.sharma@intel.com>
> ---
>  tests/intel/xe_compute.c | 167
> +++++++++++++++++++++++++++++++++++++++
>  1 file changed, 167 insertions(+)
> 
> diff --git a/tests/intel/xe_compute.c b/tests/intel/xe_compute.c index
> 762e66fcd..673e8ad36 100644
> --- a/tests/intel/xe_compute.c
> +++ b/tests/intel/xe_compute.c
> @@ -25,6 +25,7 @@
>  #define LOOP_DURATION_2s	(1000000ull * 2)
>  #define DURATION_MARGIN		0.2
>  #define MIN_BUSYNESS		95.0
> +#define EXEC_SYNC_VAL		0x676767
Define but never used.
> 

>  bool sriov_enabled;
> 
> @@ -510,6 +511,169 @@ test_compute_square(int fd)
>  		      "GPU not supported\n");
>  }
> 
> +static void print_pf(struct xe_vm_fault *fault) {
> +	igt_debug("FAULT:\n");
> +	igt_debug("address = 0x%08x%08x\n",
> +		  upper_32_bits(fault->address),
> +		  lower_32_bits(fault->address));
> +	igt_debug("address precision = %u\n", fault->address_precision);
> +	igt_debug("access type = %u\n", fault->access_type);
> +	igt_debug("fault type = %u\n", fault->fault_type);
> +	igt_debug("fault level = %u\n", fault->fault_level);
> +	igt_debug("\n");
> +}
> +
> +/**
> + * SUBTEST: vm-get-property-pagefault-reporting
> + * Description: Test VM page fault reporting via xe_vm_get_property
> +ioctl
> + * Functionality: VM page fault isolation and query
> + * Test category: functionality test
> + */
> +static void test_vm_pagefault_reporting(void) {
> +	struct drm_xe_engine_class_instance *hwe;
> +	struct drm_xe_vm_get_property query = {0};
> +	struct xe_vm_fault *faults_1, f0, f;
> +	uint32_t vm1, vm2;
> +	uint32_t exec_queue1;
> +	uint64_t addr = 0x1a0000;
> +	struct drm_xe_sync sync = {
> +		.type = DRM_XE_SYNC_TYPE_USER_FENCE,
> +		.flags = DRM_XE_SYNC_FLAG_SIGNAL,
> +	};
> +	struct drm_xe_exec exec = {};
> +	uint64_t *sync_val;
> +	int fd, fault_count, final_fault_count, check_count;
> +	uint32_t bo;
> +	bool found_compute = false;
> +
> +	fd = drm_open_driver(DRIVER_XE);
> +	xe_device_get(fd);
> +
> +	igt_require(xe_has_vram(fd)); /* Fault mode typically needs VRAM */
> +
> +	/* Create fault-mode VM1 */
> +	vm1 = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_LR_MODE |
> +			   DRM_XE_VM_CREATE_FLAG_FAULT_MODE, 0);
> +	igt_assert(vm1);
> +
> +	/* Step 1: Query initial pagefault count (should be 0) */
> +	query.vm_id = vm1;
> +	query.property = DRM_XE_VM_GET_PROPERTY_FAULTS;
> +	query.data = 0;
> +	query.size = 0;
> +
> +	xe_vm_get_property(fd, vm1, &query);
> +	igt_assert_eq(query.size, 0);
> +
> +	if (query.size > 0) {
If the above assert is true then it would never go into this condition, if not the test would fail at the assert.
Please check this. Is that the above should be igt_assert_neq?
> +		faults_1 = malloc(query.size);
> +		igt_assert(faults_1);
> +		query.data = to_user_pointer(faults_1);
> +		xe_vm_get_property(fd, vm1, &query);
> +	}
> +
> +	fault_count = query.size / sizeof(struct xe_vm_fault);
> +	f0 = faults_1[0];
> +Until, this query.size is creating problem, check this out.
> +	for (int i = 0; i < fault_count; i++) {
> +		f = faults_1[i];
> +		print_pf(&f);
> +		igt_assert_eq(f.address, f0.address);
> +		igt_assert_eq(f.access_type, f0.access_type);
> +		igt_assert_eq(f.fault_type, f0.fault_type);
> +	}
> +	free(faults_1);
> +
> +	/* Step 2: Trigger a page fault via compute job */
> +	xe_for_each_engine(fd, hwe) {
> +		if (hwe->engine_class != DRM_XE_ENGINE_CLASS_COMPUTE)
> +			continue;
> +		found_compute = true;
> +
> +		exec_queue1 = xe_exec_queue_create(fd, vm1, hwe, 0);
> +		sync_val = mmap(NULL, 4096, PROT_WRITE, MAP_SHARED |
> MAP_ANON, -1, 0);
> +		igt_assert(sync_val != MAP_FAILED);
> +		*sync_val = 0;
> +		sync.addr = to_user_pointer(sync_val);
> +
> +		/* Create a faulting job (access unmapped address) */
> +		bo = xe_bo_create(fd, vm1, 4096, vram_if_possible(fd, 0), 0);
> +		exec.exec_queue_id = exec_queue1;
> +		exec.num_batch_buffer = 1;
> +		exec.address = addr; /* Unmapped - will fault */
> +		exec.syncs = to_user_pointer(&sync);
> +
> +		/* This exec should trigger page fault */
> +		igt_ioctl(fd, DRM_IOCTL_XE_EXEC, &exec);
> +
> +		/* Wait for fault handling */
> +		usleep(100000); /* 100ms for page fault to be recorded */
Instead of usleep, you can wait on wait_user_ufence.
> +
> +		gem_close(fd, bo);
> +
> +		/* Step 3: Query pagefault count after fault */
> +		faults_1 = malloc(query.size);
> +		igt_assert(faults_1);
> +		query.data = to_user_pointer(faults_1);
> +		query.size = 0;
> +		xe_vm_get_property(fd, vm1, &query);
> +		igt_assert_lt(0, query.size);
> +
> +		if (query.size > 0) {
No need of this if condition as you are asserting above, check this once.
> +			faults_1 = malloc(query.size);
> +			igt_assert(faults_1);
> +			query.data = to_user_pointer(faults_1);
> +			xe_vm_get_property(fd, vm1, &query);
> +			igt_assert_lt(0, query.size);
> +
> +			final_fault_count = query.size / sizeof(struct
> xe_vm_fault);
> +			check_count = final_fault_count > 50 ? 50 :
> final_fault_count;
> +			f0 = faults_1[0];
> +
> +			for (int i = 0; i < check_count; i++) {
> +				f = faults_1[i];
> +				print_pf(&f);
> +				igt_assert_eq(f.address, f0.address);
> +				igt_assert_eq(f.access_type, f0.access_type);
> +				igt_assert_eq(f.fault_type, f0.fault_type);
> +			}
> +			free(faults_1);
> +		}
> +
> +		munmap(sync_val, 4096);
> +		xe_exec_queue_destroy(fd, exec_queue1);
> +	}
> +	igt_require(found_compute);
> +
> +	/* Step 4: Create VM2 for isolation test */
> +	vm2 = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_LR_MODE |
> +			   DRM_XE_VM_CREATE_FLAG_FAULT_MODE, 0);
> +	igt_assert(vm2);
> +
> +	query.vm_id = vm2;
> +	query.property = DRM_XE_VM_GET_PROPERTY_FAULTS;
> +	query.data = 0;
> +	query.size = 0;
> +	xe_vm_get_property(fd, vm1, &query);
You are creating vm2 and getting vm1 property.
> +	igt_assert_eq(query.size, 0);
> +
Same problem related to query.size as above.
> +	if (query.size > 0) {
> +		faults_1 = malloc(query.size);
> +		igt_assert(faults_1);
> +		query.data = to_user_pointer(faults_1);
> +		xe_vm_get_property(fd, vm1, &query);
Trying to use vm1 ??
> +		igt_assert_lt(0, query.size);
> +		free(faults_1);
> +	}
> +
> +	/* Cleanup */
> +	xe_vm_destroy(fd, vm1);
> +	xe_vm_destroy(fd, vm2);
> +	drm_close_driver(fd);
> +}
> +
>  /**
>   * SUBTEST: compute-square-userenv
>   * Mega feature: Compute
> @@ -717,6 +881,9 @@ int igt_main()
>  		test_eu_busy(5 * LOOP_DURATION_2s);
>  	}
> 
> +	igt_subtest("vm-get-property-pagefault-reporting")
> +		test_vm_pagefault_reporting();
> +
>  	igt_fixture() {
>  		if (!sriov_enabled)
>  			igt_restore_ccs_mode(ccs_mode,
> ARRAY_SIZE(ccs_mode));
> --
> 2.43.0


^ permalink raw reply	[flat|nested] 4+ messages in thread

* RE: [PATCH i-g-t] tests/intel/xe_compute: Add VM page fault property reporting test
  2026-03-26  6:52 ` Dandamudi, Priyanka
@ 2026-03-26  6:54   ` Dandamudi, Priyanka
  0 siblings, 0 replies; 4+ messages in thread
From: Dandamudi, Priyanka @ 2026-03-26  6:54 UTC (permalink / raw)
  To: igt-dev@lists.freedesktop.org, Cavitt,  Jonathan, Sharma, Nishit



> -----Original Message-----
> From: Dandamudi, Priyanka
> Sent: 26 March 2026 12:23 PM
> To: 'nishit.sharma@intel.com' <nishit.sharma@intel.com>; igt-
> dev@lists.freedesktop.org; Cavitt, Jonathan <jonathan.cavitt@intel.com>
> Subject: RE: [PATCH i-g-t] tests/intel/xe_compute: Add VM page fault
> property reporting test
> 
> 
> 
> > -----Original Message-----
> > From: igt-dev <igt-dev-bounces@lists.freedesktop.org> On Behalf Of
> > nishit.sharma@intel.com
> > Sent: 26 March 2026 11:36 AM
> > To: igt-dev@lists.freedesktop.org; Cavitt, Jonathan
> > <jonathan.cavitt@intel.com>
> > Subject: [PATCH i-g-t] tests/intel/xe_compute: Add VM page fault
> > property reporting test
> >
> > From: Nishit Sharma <nishit.sharma@intel.com>
> >
> > Add new subtest 'vm-get-property-pagefault-reporting' to validate the
> > DRM_IOCTL_XE_VM_GET_PROPERTY ioctl for querying VM page fault
> > information.
> >
> > This test exercises the kernel's page fault tracking mechanism for VMs
> > created with fault-mode enabled. It verifies:
> > - Initial page fault count is zero for newly created VMs
> > - Page faults are correctly tracked and reported after triggering
> > - Reported fault count respects the maximum limit (≤50 faults)
> > - All reported faults maintain consistent fault address, access type,
> >   and fault type
> > - VM isolation is maintained (faults in VM1 don't affect VM2)
> >
> > Signed-off-by: Nishit Sharma <nishit.sharma@intel.com>
> > ---
> >  tests/intel/xe_compute.c | 167
> > +++++++++++++++++++++++++++++++++++++++
> >  1 file changed, 167 insertions(+)
> >
> > diff --git a/tests/intel/xe_compute.c b/tests/intel/xe_compute.c index
> > 762e66fcd..673e8ad36 100644
> > --- a/tests/intel/xe_compute.c
> > +++ b/tests/intel/xe_compute.c
> > @@ -25,6 +25,7 @@
> >  #define LOOP_DURATION_2s	(1000000ull * 2)
> >  #define DURATION_MARGIN		0.2
> >  #define MIN_BUSYNESS		95.0
> > +#define EXEC_SYNC_VAL		0x676767
> Define but never used.
> >
> 
> >  bool sriov_enabled;
> >
> > @@ -510,6 +511,169 @@ test_compute_square(int fd)
> >  		      "GPU not supported\n");
> >  }
> >
> > +static void print_pf(struct xe_vm_fault *fault) {
> > +	igt_debug("FAULT:\n");
> > +	igt_debug("address = 0x%08x%08x\n",
> > +		  upper_32_bits(fault->address),
> > +		  lower_32_bits(fault->address));
> > +	igt_debug("address precision = %u\n", fault->address_precision);
> > +	igt_debug("access type = %u\n", fault->access_type);
> > +	igt_debug("fault type = %u\n", fault->fault_type);
> > +	igt_debug("fault level = %u\n", fault->fault_level);
> > +	igt_debug("\n");
> > +}
> > +
> > +/**
> > + * SUBTEST: vm-get-property-pagefault-reporting
> > + * Description: Test VM page fault reporting via xe_vm_get_property
> > +ioctl
> > + * Functionality: VM page fault isolation and query
> > + * Test category: functionality test
> > + */
> > +static void test_vm_pagefault_reporting(void) {
> > +	struct drm_xe_engine_class_instance *hwe;
> > +	struct drm_xe_vm_get_property query = {0};
> > +	struct xe_vm_fault *faults_1, f0, f;
> > +	uint32_t vm1, vm2;
> > +	uint32_t exec_queue1;
> > +	uint64_t addr = 0x1a0000;
> > +	struct drm_xe_sync sync = {
> > +		.type = DRM_XE_SYNC_TYPE_USER_FENCE,
> > +		.flags = DRM_XE_SYNC_FLAG_SIGNAL,
> > +	};
> > +	struct drm_xe_exec exec = {};
> > +	uint64_t *sync_val;
> > +	int fd, fault_count, final_fault_count, check_count;
> > +	uint32_t bo;
> > +	bool found_compute = false;
> > +
> > +	fd = drm_open_driver(DRIVER_XE);
> > +	xe_device_get(fd);
> > +
> > +	igt_require(xe_has_vram(fd)); /* Fault mode typically needs VRAM */
> > +
> > +	/* Create fault-mode VM1 */
> > +	vm1 = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_LR_MODE |
> > +			   DRM_XE_VM_CREATE_FLAG_FAULT_MODE, 0);
> > +	igt_assert(vm1);
> > +
> > +	/* Step 1: Query initial pagefault count (should be 0) */
> > +	query.vm_id = vm1;
> > +	query.property = DRM_XE_VM_GET_PROPERTY_FAULTS;
> > +	query.data = 0;
> > +	query.size = 0;
> > +
> > +	xe_vm_get_property(fd, vm1, &query);
> > +	igt_assert_eq(query.size, 0);
> > +
> > +	if (query.size > 0) {
> If the above assert is true then it would never go into this condition, if not
> the test would fail at the assert.
> Please check this. Is that the above should be igt_assert_neq?
> > +		faults_1 = malloc(query.size);
> > +		igt_assert(faults_1);
> > +		query.data = to_user_pointer(faults_1);
> > +		xe_vm_get_property(fd, vm1, &query);
> > +	}
> > +
> > +	fault_count = query.size / sizeof(struct xe_vm_fault);
> > +	f0 = faults_1[0];
> > +Until, this query.size is creating problem, check this out.
> > +	for (int i = 0; i < fault_count; i++) {
> > +		f = faults_1[i];
> > +		print_pf(&f);
> > +		igt_assert_eq(f.address, f0.address);
> > +		igt_assert_eq(f.access_type, f0.access_type);
> > +		igt_assert_eq(f.fault_type, f0.fault_type);
> > +	}
> > +	free(faults_1);
> > +
> > +	/* Step 2: Trigger a page fault via compute job */
> > +	xe_for_each_engine(fd, hwe) {
> > +		if (hwe->engine_class != DRM_XE_ENGINE_CLASS_COMPUTE)
> > +			continue;
> > +		found_compute = true;
> > +
> > +		exec_queue1 = xe_exec_queue_create(fd, vm1, hwe, 0);
> > +		sync_val = mmap(NULL, 4096, PROT_WRITE, MAP_SHARED |
> > MAP_ANON, -1, 0);
> > +		igt_assert(sync_val != MAP_FAILED);
> > +		*sync_val = 0;
> > +		sync.addr = to_user_pointer(sync_val);
> > +
> > +		/* Create a faulting job (access unmapped address) */
> > +		bo = xe_bo_create(fd, vm1, 4096, vram_if_possible(fd, 0), 0);
> > +		exec.exec_queue_id = exec_queue1;
> > +		exec.num_batch_buffer = 1;
> > +		exec.address = addr; /* Unmapped - will fault */
> > +		exec.syncs = to_user_pointer(&sync);
> > +
> > +		/* This exec should trigger page fault */
> > +		igt_ioctl(fd, DRM_IOCTL_XE_EXEC, &exec);
> > +
> > +		/* Wait for fault handling */
> > +		usleep(100000); /* 100ms for page fault to be recorded */
> Instead of usleep, you can wait on wait_user_ufence.
> > +
> > +		gem_close(fd, bo);
> > +
> > +		/* Step 3: Query pagefault count after fault */
> > +		faults_1 = malloc(query.size);
> > +		igt_assert(faults_1);
> > +		query.data = to_user_pointer(faults_1);
> > +		query.size = 0;
> > +		xe_vm_get_property(fd, vm1, &query);
> > +		igt_assert_lt(0, query.size);
> > +
> > +		if (query.size > 0) {
> No need of this if condition as you are asserting above, check this once.
> > +			faults_1 = malloc(query.size);
> > +			igt_assert(faults_1);
> > +			query.data = to_user_pointer(faults_1);
> > +			xe_vm_get_property(fd, vm1, &query);
> > +			igt_assert_lt(0, query.size);
> > +
> > +			final_fault_count = query.size / sizeof(struct
> > xe_vm_fault);
> > +			check_count = final_fault_count > 50 ? 50 :
> > final_fault_count;
> > +			f0 = faults_1[0];
> > +
> > +			for (int i = 0; i < check_count; i++) {
> > +				f = faults_1[i];
> > +				print_pf(&f);
> > +				igt_assert_eq(f.address, f0.address);
> > +				igt_assert_eq(f.access_type, f0.access_type);
> > +				igt_assert_eq(f.fault_type, f0.fault_type);
> > +			}
> > +			free(faults_1);
> > +		}
> > +
> > +		munmap(sync_val, 4096);
> > +		xe_exec_queue_destroy(fd, exec_queue1);
> > +	}
> > +	igt_require(found_compute);
> > +
> > +	/* Step 4: Create VM2 for isolation test */
> > +	vm2 = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_LR_MODE |
> > +			   DRM_XE_VM_CREATE_FLAG_FAULT_MODE, 0);
> > +	igt_assert(vm2);
> > +
> > +	query.vm_id = vm2;
> > +	query.property = DRM_XE_VM_GET_PROPERTY_FAULTS;
> > +	query.data = 0;
> > +	query.size = 0;
> > +	xe_vm_get_property(fd, vm1, &query);
> You are creating vm2 and getting vm1 property.
> > +	igt_assert_eq(query.size, 0);
> > +
> Same problem related to query.size as above.
> > +	if (query.size > 0) {
> > +		faults_1 = malloc(query.size);
> > +		igt_assert(faults_1);
> > +		query.data = to_user_pointer(faults_1);
> > +		xe_vm_get_property(fd, vm1, &query);
> Trying to use vm1 ??
> > +		igt_assert_lt(0, query.size);
> > +		free(faults_1);
> > +	}
> > +
> > +	/* Cleanup */
> > +	xe_vm_destroy(fd, vm1);
> > +	xe_vm_destroy(fd, vm2);
> > +	drm_close_driver(fd);
> > +}
> > +
> >  /**
> >   * SUBTEST: compute-square-userenv
> >   * Mega feature: Compute
> > @@ -717,6 +881,9 @@ int igt_main()
> >  		test_eu_busy(5 * LOOP_DURATION_2s);
> >  	}
> >
> > +	igt_subtest("vm-get-property-pagefault-reporting")
> > +		test_vm_pagefault_reporting();
> > +
> >  	igt_fixture() {
> >  		if (!sriov_enabled)
> >  			igt_restore_ccs_mode(ccs_mode,
> > ARRAY_SIZE(ccs_mode));
> > --
> > 2.43.0


^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2026-03-26  6:54 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-03-26  6:05 [PATCH i-g-t] tests/intel/xe_compute: Add VM page fault property reporting test nishit.sharma
2026-03-26  6:52 ` Dandamudi, Priyanka
2026-03-26  6:54   ` Dandamudi, Priyanka
  -- strict thread matches above, loose matches on Subject: below --
2026-01-08  5:19 nishit.sharma

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox