igt-dev.lists.freedesktop.org archive mirror
 help / color / mirror / Atom feed
* [PATCH i-g-t] tests/intel/xe_exec_system_allocator: Added 64k alignment support
@ 2025-11-18  5:29 Sobin Thomas
  2025-11-18  5:52 ` Sharma, Nishit
                   ` (4 more replies)
  0 siblings, 5 replies; 6+ messages in thread
From: Sobin Thomas @ 2025-11-18  5:29 UTC (permalink / raw)
  To: igt-dev; +Cc: nishit.sharma, Sobin Thomas

In this test only 4k alignment was there, so some tests fail
in some hardware like PVC there is need for 64k page alignment
So modified the test to support 64k page alignment

Signed-off-by: Sobin Thomas <sobin.thomas@intel.com>
---
 tests/intel/xe_exec_system_allocator.c | 42 ++++++++++++++++----------
 1 file changed, 26 insertions(+), 16 deletions(-)

diff --git a/tests/intel/xe_exec_system_allocator.c b/tests/intel/xe_exec_system_allocator.c
index b88967e58..314836ef9 100644
--- a/tests/intel/xe_exec_system_allocator.c
+++ b/tests/intel/xe_exec_system_allocator.c
@@ -311,11 +311,11 @@ static void touch_all_pages(int fd, uint32_t exec_queue, void *ptr,
 	u64 *exec_ufence = NULL;
 	int64_t timeout = FIVE_SEC;
 
-	exec_ufence = mmap(NULL, SZ_4K, PROT_READ |
+	exec_ufence = mmap(NULL, SZ_64K, PROT_READ |
 			   PROT_WRITE, MAP_SHARED |
 			   MAP_ANONYMOUS, -1, 0);
 	igt_assert(exec_ufence != MAP_FAILED);
-	memset(exec_ufence, 5, SZ_4K);
+	memset(exec_ufence, 5, SZ_64K);
 	sync[0].addr = to_user_pointer(exec_ufence);
 
 	for (i = 0; i < n_writes; ++i, addr += stride) {
@@ -368,7 +368,7 @@ static void touch_all_pages(int fd, uint32_t exec_queue, void *ptr,
 		}
 		igt_assert_eq(ret, 0);
 	}
-	munmap(exec_ufence, SZ_4K);
+	munmap(exec_ufence, SZ_64K);
 }
 
 static int va_bits;
@@ -825,11 +825,11 @@ partial(int fd, struct drm_xe_engine_class_instance *eci, unsigned int flags)
 	xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, 0, FIVE_SEC);
 	data[0].vm_sync = 0;
 
-	exec_ufence = mmap(NULL, SZ_4K, PROT_READ |
+	exec_ufence = mmap(NULL, SZ_64K, PROT_READ |
 			   PROT_WRITE, MAP_SHARED |
 			   MAP_ANONYMOUS, -1, 0);
 	igt_assert(exec_ufence != MAP_FAILED);
-	memset(exec_ufence, 5, SZ_4K);
+	memset(exec_ufence, 5, SZ_64K);
 
 	for (i = 0; i < 2; i++) {
 		uint64_t addr = to_user_pointer(data);
@@ -879,7 +879,7 @@ partial(int fd, struct drm_xe_engine_class_instance *eci, unsigned int flags)
 	}
 
 	xe_exec_queue_destroy(fd, exec_queue);
-	munmap(exec_ufence, SZ_4K);
+	munmap(exec_ufence, SZ_64K);
 	__aligned_free(&alloc);
 	if (new)
 		munmap(new, bo_size / 2);
@@ -1201,6 +1201,7 @@ xe_vm_parse_execute_madvise(int fd, uint32_t vm, struct test_exec_data *data,
 			    struct drm_xe_sync *sync, uint8_t (*pat_value)(int))
 {
 	uint32_t bo_flags, bo = 0;
+	uint64_t split_addr, split_size;
 
 	if (flags & MADVISE_ATOMIC_DEVICE)
 		xe_vm_madvise_atomic_attr(fd, vm, to_user_pointer(data), bo_size,
@@ -1252,16 +1253,22 @@ xe_vm_parse_execute_madvise(int fd, uint32_t vm, struct test_exec_data *data,
 
 	if (flags & MADVISE_SPLIT_VMA) {
 		if (bo_size)
-			bo_size = ALIGN(bo_size, SZ_4K);
+			bo_size = ALIGN(bo_size, SZ_64K);
+
+		split_addr = to_user_pointer(data) + bo_size/2;
+		split_addr = ALIGN(split_addr, SZ_64K);
+		split_size = bo_size / 2;
+		split_size = ALIGN(split_size, SZ_64K);
 
 		bo_flags = DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM;
 		bo = xe_bo_create(fd, vm, bo_size, vram_if_possible(fd, eci->gt_id),
 				  bo_flags);
-		xe_vm_bind_async(fd, vm, 0, bo, 0, to_user_pointer(data) + bo_size / 2,
-				 bo_size / 2, 0, 0);
 
-		__xe_vm_bind_assert(fd, vm, 0, 0, 0, to_user_pointer(data) + bo_size / 2,
-				    bo_size / 2, DRM_XE_VM_BIND_OP_MAP,
+		xe_vm_bind_async(fd, vm, 0, bo, 0, split_addr,
+				 split_size, 0, 0);
+
+		__xe_vm_bind_assert(fd, vm, 0, 0, 0, split_addr,
+				    split_size, DRM_XE_VM_BIND_OP_MAP,
 				    DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR, sync,
 				    1, 0, 0);
 		xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, 0, FIVE_SEC);
@@ -1269,7 +1276,7 @@ xe_vm_parse_execute_madvise(int fd, uint32_t vm, struct test_exec_data *data,
 		gem_close(fd, bo);
 		bo = 0;
 
-		xe_vm_madvise_atomic_attr(fd, vm, to_user_pointer(data), bo_size / 2,
+		xe_vm_madvise_atomic_attr(fd, vm, split_addr, split_size,
 					  DRM_XE_ATOMIC_GLOBAL);
 	}
 
@@ -1509,6 +1516,7 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
 		}
 	}
 
+
 	for (i = 0; i < n_exec_queues; i++)
 		exec_queues[i] = xe_exec_queue_create(fd, vm, eci, 0);
 
@@ -1520,6 +1528,7 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
 	data[0].vm_sync = 0;
 
 	addr = to_user_pointer(data);
+	addr = ALIGN(addr, SZ_64K);
 
 	if (flags & MADVISE_OP)
 		xe_vm_parse_execute_madvise(fd, vm, data, bo_size, eci, addr, flags, sync,
@@ -1550,14 +1559,15 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
 		igt_assert(exec_ufence != MAP_FAILED);
 		memset(exec_ufence, 5, SZ_4K);
 	}
+	aligned_alloc_type = __aligned_alloc(SZ_64K, SZ_64K);
 
-	aligned_alloc_type = __aligned_alloc(SZ_4K, SZ_4K);
 	bind_ufence = aligned_alloc_type.ptr;
 	igt_assert(bind_ufence);
 	__aligned_partial_free(&aligned_alloc_type);
-	bind_sync = xe_bo_create(fd, vm, SZ_4K, system_memory(fd),
+
+	bind_sync = xe_bo_create(fd, vm, SZ_64K, system_memory(fd),
 				 bo_flags);
-	bind_ufence = xe_bo_map_fixed(fd, bind_sync, SZ_4K,
+	bind_ufence = xe_bo_map_fixed(fd, bind_sync, SZ_64K,
 				      to_user_pointer(bind_ufence));
 
 	if (!(flags & FAULT) && flags & PREFETCH) {
@@ -1580,7 +1590,7 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
 		if (exec_ufence) {
 			xe_vm_prefetch_async(fd, vm, 0, 0,
 					     to_user_pointer(exec_ufence),
-					     SZ_4K, sync, 1, 0);
+					     SZ_64K, sync, 1, 0);
 			xe_wait_ufence(fd, bind_ufence, USER_FENCE_VALUE, 0,
 				       FIVE_SEC);
 			bind_ufence[0] = 0;
-- 
2.51.0


^ permalink raw reply related	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2025-11-18  8:01 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-11-18  5:29 [PATCH i-g-t] tests/intel/xe_exec_system_allocator: Added 64k alignment support Sobin Thomas
2025-11-18  5:52 ` Sharma, Nishit
2025-11-18  6:25 ` ✓ Xe.CI.BAT: success for " Patchwork
2025-11-18  6:30 ` ✓ i915.CI.BAT: " Patchwork
2025-11-18  7:43 ` ✗ Xe.CI.Full: failure " Patchwork
2025-11-18  8:01 ` ✗ i915.CI.Full: " Patchwork

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).