public inbox for igt-dev@lists.freedesktop.org
 help / color / mirror / Atom feed
From: nishit.sharma@intel.com
To: igt-dev@lists.freedesktop.org, nishit.sharma@intel.com,
	thomas.hellstrom@intel.com
Subject: [PATCH i-g-t] tests/intel/xe_svm_usrptr_madvise: Unify batch buffer address and sync
Date: Thu,  5 Mar 2026 16:54:27 +0000	[thread overview]
Message-ID: <20260305165427.65184-1-nishit.sharma@intel.com> (raw)

From: Nishit Sharma <nishit.sharma@intel.com>

This patch refactors the SVM userptr copy test and batch buffer setup to
support both Ponte Vecchio (PVC) and BMG platforms.
The following changes were made:

Batch buffer address assignment is now conditional based on
platform requirements. Synchronization logic for batch execution is unified
into helper functions, which select the appropriate sync address and value
depending on the hardware. Changes were required because different
platforms have distinct requirements for how batch buffers and
synchronization are managed.

Signed-off-by: Nishit Sharma <nishit.sharma@intel.com>
---
 tests/intel/xe_svm_usrptr_madvise.c | 74 +++++++++++++++++++++--------
 1 file changed, 53 insertions(+), 21 deletions(-)

diff --git a/tests/intel/xe_svm_usrptr_madvise.c b/tests/intel/xe_svm_usrptr_madvise.c
index bfa5864e4..f54bd3f8a 100644
--- a/tests/intel/xe_svm_usrptr_madvise.c
+++ b/tests/intel/xe_svm_usrptr_madvise.c
@@ -91,14 +91,16 @@ setup_sync(struct drm_xe_sync *sync, uint64_t **sync_addr,
 	**sync_addr = 0;
 }
 
+#define BATCH_VA_BASE 0x5000000
 static void
 gpu_batch_init(int fd, uint32_t vm, uint64_t src_addr,
 	       uint64_t dst_addr, uint64_t copy_size,
-	       uint32_t *bo, uint64_t *addr)
+	       uint32_t *bo, uint64_t *addr, void **batch_map)
 {
 	uint32_t width = copy_size / 256;
 	uint32_t height = 1;
-	uint32_t batch_bo_size = BATCH_SIZE(fd);
+	uint64_t alignment = xe_get_default_alignment(fd);
+	uint32_t batch_bo_size = ALIGN(BATCH_SIZE(fd), alignment);
 	uint32_t batch_bo;
 	uint64_t batch_addr;
 	void *batch;
@@ -127,40 +129,69 @@ gpu_batch_init(int fd, uint32_t vm, uint64_t src_addr,
 	cmd[i++] = MI_BATCH_BUFFER_END;
 	cmd[i++] = MI_BATCH_BUFFER_END;
 
-	batch_addr = to_user_pointer(batch);
+	if (IS_PONTEVECCHIO(dev_id))
+		batch_addr = BATCH_VA_BASE;
+	else
+		batch_addr = to_user_pointer(batch);
 
 	/* Punch a gap in the SVM map where we map the batch_bo */
 	xe_vm_bind_lr_sync(fd, vm, batch_bo, 0, batch_addr, batch_bo_size, 0);
 	*bo = batch_bo;
 	*addr = batch_addr;
+	*batch_map = batch;
 }
 
 static void
 gpu_copy_batch_create(int fd, uint32_t vm, uint32_t exec_queue,
 		      uint64_t src_addr, uint64_t dst_addr,
-		      uint32_t *batch_bo, uint64_t *batch_addr)
+		      uint32_t *batch_bo, uint64_t *batch_addr, void **batch_map)
+{
+	gpu_batch_init(fd, vm, src_addr, dst_addr, SZ_16K, batch_bo, batch_addr, batch_map);
+}
+
+static void
+xe_sync_exec(int fd, uint32_t exec_queue, uint64_t *batch_addr,
+	     struct drm_xe_sync *sync, void *sync_addr_ptr,
+	     uint64_t sync_addr_val, bool is_pvc)
+{
+	sync->addr = sync_addr_val;
+	sync->timeline_value = EXEC_SYNC_VAL;
+	WRITE_ONCE(*(uint64_t *)sync_addr_ptr, 0);
+	xe_exec_sync(fd, exec_queue, *batch_addr, sync, 1);
+	if (READ_ONCE(*(uint64_t *)sync_addr_ptr) != EXEC_SYNC_VAL)
+		xe_wait_ufence(fd, (uint64_t *)sync_addr_ptr, EXEC_SYNC_VAL,
+			       exec_queue, NSEC_PER_SEC * 10);
+}
+
+static void
+xe_sync_setup(int fd, uint32_t exec_queue, uint64_t *batch_addr,
+	      void *batch_map, struct drm_xe_sync *sync, bool is_pvc)
 {
-	gpu_batch_init(fd, vm, src_addr, dst_addr, SZ_4K, batch_bo, batch_addr);
+	if (is_pvc) {
+		uint64_t *sync_addr_cpu = (uint64_t *)((char *)batch_map + SZ_4K);
+
+		xe_sync_exec(fd, exec_queue, batch_addr, sync, sync_addr_cpu,
+			     *batch_addr + SZ_4K, true);
+	} else {
+		uint64_t *sync_addr = (uint64_t *)((char *)from_user_pointer(*batch_addr) + SZ_4K);
+
+		xe_sync_exec(fd, exec_queue, batch_addr, sync, sync_addr,
+			     to_user_pointer((uint64_t *)sync_addr), false);
+	}
 }
 
 static void
 gpu_exec_sync(int fd, uint32_t vm, uint32_t exec_queue,
-	      uint64_t *batch_addr)
+	      uint64_t *batch_addr, void *batch_map)
 {
 	struct drm_xe_sync sync = {};
 	uint64_t *sync_addr;
+	uint16_t dev_id = intel_get_drm_devid(fd);
 
 	setup_sync(&sync, &sync_addr, BIND_SYNC_VAL);
 
-	sync_addr = (uint64_t *)((char *)from_user_pointer(*batch_addr) + SZ_4K);
-	sync.addr = to_user_pointer((uint64_t *)sync_addr);
-	sync.timeline_value = EXEC_SYNC_VAL;
-	WRITE_ONCE(*sync_addr, 0);
-
-	xe_exec_sync(fd, exec_queue, *batch_addr, &sync, 1);
-	if (READ_ONCE(*sync_addr) != EXEC_SYNC_VAL)
-		xe_wait_ufence(fd, (uint64_t *)sync_addr, EXEC_SYNC_VAL, exec_queue,
-			       NSEC_PER_SEC * 10);
+	xe_sync_setup(fd, exec_queue, batch_addr, batch_map, &sync,
+		      IS_PONTEVECCHIO(dev_id));
 }
 
 static void test_svm_userptr_copy(int fd)
@@ -169,6 +200,7 @@ static void test_svm_userptr_copy(int fd)
 	uint8_t *svm_ptr, *userptr_ptr, *bo_map;
 	uint32_t bo, batch_bo;
 	uint64_t bo_gpu_va, userptr_gpu_va, batch_addr;
+	void *batch_map;
 
 	struct drm_xe_engine_class_instance eci = { .engine_class = DRM_XE_ENGINE_CLASS_COPY };
 	uint32_t vm, exec_queue;
@@ -202,17 +234,17 @@ static void test_svm_userptr_copy(int fd)
 		      DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE, 0, 0);
 
 	gpu_copy_batch_create(fd, vm, exec_queue, to_user_pointer(svm_ptr),
-			      to_user_pointer(userptr_ptr), &batch_bo, &batch_addr);
-	gpu_exec_sync(fd, vm, exec_queue, &batch_addr);
+			      to_user_pointer(userptr_ptr), &batch_bo, &batch_addr, &batch_map);
+	gpu_exec_sync(fd, vm, exec_queue, &batch_addr, batch_map);
 
 	gpu_copy_batch_create(fd, vm, exec_queue, userptr_gpu_va, bo_gpu_va,
-			      &batch_bo, &batch_addr);
-	gpu_exec_sync(fd, vm, exec_queue, &batch_addr);
+			      &batch_bo, &batch_addr, &batch_map);
+	gpu_exec_sync(fd, vm, exec_queue, &batch_addr, batch_map);
 
-	igt_assert(memcmp(svm_ptr, userptr_ptr, SZ_4K) == 0);
+	igt_assert(memcmp(svm_ptr, userptr_ptr, 64) == 0);
 
 	bo_map = xe_bo_map(fd, bo, size);
-	igt_assert(memcmp(bo_map, svm_ptr, SZ_4K) == 0);
+	igt_assert(memcmp(bo_map, svm_ptr, 64) == 0);
 
 	xe_vm_bind_lr_sync(fd, vm, 0, 0, batch_addr, BATCH_SIZE(fd),
 			   DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR);
-- 
2.48.1


             reply	other threads:[~2026-03-05 16:54 UTC|newest]

Thread overview: 4+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-03-05 16:54 nishit.sharma [this message]
2026-03-06  0:22 ` ✓ i915.CI.BAT: success for tests/intel/xe_svm_usrptr_madvise: Unify batch buffer address and sync Patchwork
2026-03-06  0:50 ` ✓ Xe.CI.BAT: " Patchwork
2026-03-06 19:22 ` ✗ Xe.CI.FULL: failure " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20260305165427.65184-1-nishit.sharma@intel.com \
    --to=nishit.sharma@intel.com \
    --cc=igt-dev@lists.freedesktop.org \
    --cc=thomas.hellstrom@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox