Igt-dev Archive on lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v5 0/2] RFC: drm-uapi/xe: add exec_queue_id member to drm_xe_wait_user_fence structure
@ 2023-12-12  5:10 Bommu Krishnaiah
  2023-12-12  5:10 ` [PATCH v5 1/2] " Bommu Krishnaiah
                   ` (2 more replies)
  0 siblings, 3 replies; 7+ messages in thread
From: Bommu Krishnaiah @ 2023-12-12  5:10 UTC (permalink / raw)
  To: igt-dev; +Cc: Bommu Krishnaiah

Remove the num_engines/instances members from drm_xe_wait_user_fence structure
and add a exec_queue_id member

exec_queue-reset-wait subtest to excess behaviour when exec_queue reset happen

about test
Don't wait till timeout on user fence when exec_queue reset is detected
and return return  proper error code

I am able to see exec_queue reset was happened and xe_wait_user_fence_ioctl returned EIO 

test result
below igt tests are passing
xe_exec_balancer.c
xe_exec_compute_mode.c
xe_exec_fault_mode.c
xe_exec_reset.c
xe_exec_threads.c
xe_waitfence.c


Bommu Krishnaiah (2):
  drm-uapi/xe: add exec_queue_id member to drm_xe_wait_user_fence
    structure
  drm-uapi/xe: Don't wait on user_fence during exec queue reset

 include/drm-uapi/xe_drm.h          |  28 ++++----
 lib/xe/xe_ioctl.c                  |  29 ++++----
 lib/xe/xe_ioctl.h                  |  11 ++-
 tests/intel/xe_evict.c             |   4 +-
 tests/intel/xe_exec_balancer.c     |  15 ++--
 tests/intel/xe_exec_compute_mode.c |  18 ++---
 tests/intel/xe_exec_fault_mode.c   |  21 +++---
 tests/intel/xe_exec_reset.c        |   6 +-
 tests/intel/xe_exec_threads.c      |  15 ++--
 tests/intel/xe_waitfence.c         | 111 +++++++++++++++++++++++++----
 10 files changed, 169 insertions(+), 89 deletions(-)

-- 
2.25.1

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH v5 1/2] drm-uapi/xe: add exec_queue_id member to drm_xe_wait_user_fence structure
  2023-12-12  5:10 [PATCH v5 0/2] RFC: drm-uapi/xe: add exec_queue_id member to drm_xe_wait_user_fence structure Bommu Krishnaiah
@ 2023-12-12  5:10 ` Bommu Krishnaiah
  2023-12-12  5:39   ` Rodrigo Vivi
  2023-12-12  5:10 ` [PATCH v5 2/2] drm-uapi/xe: Don't wait on user_fence during exec queue reset Bommu Krishnaiah
  2023-12-12  5:35 ` ✗ Fi.CI.BUILD: failure for RFC: drm-uapi/xe: add exec_queue_id member to drm_xe_wait_user_fence structure (rev7) Patchwork
  2 siblings, 1 reply; 7+ messages in thread
From: Bommu Krishnaiah @ 2023-12-12  5:10 UTC (permalink / raw)
  To: igt-dev; +Cc: Bommu Krishnaiah, Rodrigo Vivi

remove the num_engines/instances members from drm_xe_wait_user_fence structure
and add a exec_queue_id member

Right now this is only checking if the engine list is sane and nothing
else. In the end every operation with this IOCTL is a soft check.
So, let's formalize that and only use this IOCTL to wait on the fence.

exec_queue_id member will help to user space to get proper error code
from kernel while in exec_queue reset

Signed-off-by: Bommu Krishnaiah <krishnaiah.bommu@intel.com>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: Francois Dugast <francois.dugast@intel.com>
---
 include/drm-uapi/xe_drm.h          | 28 ++++++++++++++--------------
 lib/xe/xe_ioctl.c                  | 29 +++++++++++++----------------
 lib/xe/xe_ioctl.h                  | 11 ++++-------
 tests/intel/xe_evict.c             |  4 ++--
 tests/intel/xe_exec_balancer.c     | 15 ++++++++-------
 tests/intel/xe_exec_compute_mode.c | 18 +++++++++---------
 tests/intel/xe_exec_fault_mode.c   | 21 ++++++++++++---------
 tests/intel/xe_exec_reset.c        |  6 +++---
 tests/intel/xe_exec_threads.c      | 15 ++++++++-------
 tests/intel/xe_waitfence.c         | 29 ++++++++++++++---------------
 10 files changed, 87 insertions(+), 89 deletions(-)

diff --git a/include/drm-uapi/xe_drm.h b/include/drm-uapi/xe_drm.h
index 590f7b7af..fd06e4920 100644
--- a/include/drm-uapi/xe_drm.h
+++ b/include/drm-uapi/xe_drm.h
@@ -129,7 +129,6 @@ struct xe_user_extension {
  * It is returned as part of the @drm_xe_engine, but it also is used as
  * the input of engine selection for both @drm_xe_exec_queue_create and
  * @drm_xe_query_engine_cycles
- *
  */
 struct drm_xe_engine_class_instance {
 #define DRM_XE_ENGINE_CLASS_RENDER		0
@@ -143,9 +142,11 @@ struct drm_xe_engine_class_instance {
 	 */
 #define DRM_XE_ENGINE_CLASS_VM_BIND_ASYNC	5
 #define DRM_XE_ENGINE_CLASS_VM_BIND_SYNC	6
+	/** @engine_class: engine class id */
 	__u16 engine_class;
-
+	/** @engine_instance: engine instance id */
 	__u16 engine_instance;
+	/** @gt_id: Unique ID of this GT within the PCI Device */
 	__u16 gt_id;
 	/** @pad: MBZ */
 	__u16 pad;
@@ -736,6 +737,12 @@ struct drm_xe_vm_bind_op {
 	 *
 	 * Note: For userptr and externally imported dma-buf the kernel expects
 	 * either 1WAY or 2WAY for the @pat_index.
+	 *
+	 * For DRM_XE_VM_BIND_FLAG_NULL bindings there are no KMD restrictions
+	 * on the @pat_index. For such mappings there is no actual memory being
+	 * mapped (the address in the PTE is invalid), so the various PAT memory
+	 * attributes likely do not apply.  Simply leaving as zero is one
+	 * option (still a valid pat_index).
 	 */
 	__u16 pat_index;
 
@@ -1024,8 +1031,7 @@ struct drm_xe_wait_user_fence {
 	/** @op: wait operation (type of comparison) */
 	__u16 op;
 
-#define DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP	(1 << 0)	/* e.g. Wait on VM bind */
-#define DRM_XE_UFENCE_WAIT_FLAG_ABSTIME	(1 << 1)
+#define DRM_XE_UFENCE_WAIT_FLAG_ABSTIME	(1 << 0)
 	/** @flags: wait flags */
 	__u16 flags;
 
@@ -1058,17 +1064,11 @@ struct drm_xe_wait_user_fence {
 	 */
 	__s64 timeout;
 
-	/**
-	 * @num_engines: number of engine instances to wait on, must be zero
-	 * when DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP set
-	 */
-	__u64 num_engines;
+	/** @exec_queue_id: exec_queue_id returned from xe_exec_queue_create_ioctl */
+	__u32 exec_queue_id;
 
-	/**
-	 * @instances: user pointer to array of drm_xe_engine_class_instance to
-	 * wait on, must be NULL when DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP set
-	 */
-	__u64 instances;
+	/** @pad2: MBZ */
+	__u32 pad2;
 
 	/** @reserved: Reserved */
 	__u64 reserved[2];
diff --git a/lib/xe/xe_ioctl.c b/lib/xe/xe_ioctl.c
index cdad3d7e2..8f466318d 100644
--- a/lib/xe/xe_ioctl.c
+++ b/lib/xe/xe_ioctl.c
@@ -481,7 +481,7 @@ void xe_exec_wait(int fd, uint32_t exec_queue, uint64_t addr)
  * @fd: xe device fd
  * @addr: address of value to compare
  * @value: expected value (equal) in @address
- * @eci: engine class instance
+ * @exec_queue: exec_queue id
  * @timeout: pointer to time to wait in nanoseconds
  *
  * Function compares @value with memory pointed by @addr until they are equal.
@@ -490,17 +490,15 @@ void xe_exec_wait(int fd, uint32_t exec_queue, uint64_t addr)
  * signalled. Returns 0 on success, -errno of ioctl on error.
  */
 int __xe_wait_ufence(int fd, uint64_t *addr, uint64_t value,
-		     struct drm_xe_engine_class_instance *eci,
-		     int64_t *timeout)
+		     uint32_t exec_queue, int64_t *timeout)
 {
 	struct drm_xe_wait_user_fence wait = {
 		.addr = to_user_pointer(addr),
 		.op = DRM_XE_UFENCE_WAIT_OP_EQ,
-		.flags = !eci ? DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP : 0,
+		.flags = 0,
 		.value = value,
 		.mask = DRM_XE_UFENCE_WAIT_MASK_U64,
-		.num_engines = eci ? 1 :0,
-		.instances = eci ? to_user_pointer(eci) : 0,
+		.exec_queue_id = exec_queue,
 	};
 
 	igt_assert(timeout);
@@ -518,7 +516,7 @@ int __xe_wait_ufence(int fd, uint64_t *addr, uint64_t value,
  * @fd: xe device fd
  * @addr: address of value to compare
  * @value: expected value (equal) in @address
- * @eci: engine class instance
+ * @exec_queue: exec_queue id
  * @timeout: time to wait in nanoseconds
  *
  * Function compares @value with memory pointed by @addr until they are equal.
@@ -527,10 +525,9 @@ int __xe_wait_ufence(int fd, uint64_t *addr, uint64_t value,
  * Returns elapsed time in nanoseconds if user fence was signalled.
  */
 int64_t xe_wait_ufence(int fd, uint64_t *addr, uint64_t value,
-		       struct drm_xe_engine_class_instance *eci,
-		       int64_t timeout)
+		       uint32_t exec_queue, int64_t timeout)
 {
-	igt_assert_eq(__xe_wait_ufence(fd, addr, value, eci, &timeout), 0);
+	igt_assert_eq(__xe_wait_ufence(fd, addr, value, exec_queue, &timeout), 0);
 	return timeout;
 }
 
@@ -539,8 +536,9 @@ int64_t xe_wait_ufence(int fd, uint64_t *addr, uint64_t value,
  * @fd: xe device fd
  * @addr: address of value to compare
  * @value: expected value (equal) in @address
- * @eci: engine class instance
+ * @exec_queue: exec_queue id
  * @timeout: absolute time when wait expire
+ * @flag: wait flag
  *
  * Function compares @value with memory pointed by @addr until they are equal.
  * Asserts that ioctl returned without error.
@@ -548,18 +546,17 @@ int64_t xe_wait_ufence(int fd, uint64_t *addr, uint64_t value,
  * Returns elapsed time in nanoseconds if user fence was signalled.
  */
 int64_t xe_wait_ufence_abstime(int fd, uint64_t *addr, uint64_t value,
-			       struct drm_xe_engine_class_instance *eci,
-			       int64_t timeout)
+			       uint32_t exec_queue, int64_t timeout,
+			       uint16_t flag)
 {
 	struct drm_xe_wait_user_fence wait = {
 		.addr = to_user_pointer(addr),
 		.op = DRM_XE_UFENCE_WAIT_OP_EQ,
-		.flags = !eci ? DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP | DRM_XE_UFENCE_WAIT_FLAG_ABSTIME : 0,
+		.flags = flag,
 		.value = value,
 		.mask = DRM_XE_UFENCE_WAIT_MASK_U64,
 		.timeout = timeout,
-		.num_engines = eci ? 1 : 0,
-		.instances = eci ? to_user_pointer(eci) : 0,
+		.exec_queue_id = exec_queue,
 	};
 	struct timespec ts;
 
diff --git a/lib/xe/xe_ioctl.h b/lib/xe/xe_ioctl.h
index c8d46fb29..05480e40b 100644
--- a/lib/xe/xe_ioctl.h
+++ b/lib/xe/xe_ioctl.h
@@ -92,14 +92,11 @@ void xe_exec_sync(int fd, uint32_t exec_queue, uint64_t addr,
 		  struct drm_xe_sync *sync, uint32_t num_syncs);
 void xe_exec_wait(int fd, uint32_t exec_queue, uint64_t addr);
 int __xe_wait_ufence(int fd, uint64_t *addr, uint64_t value,
-		     struct drm_xe_engine_class_instance *eci,
-		     int64_t *timeout);
+		     uint32_t exec_queue, int64_t *timeout);
 int64_t xe_wait_ufence(int fd, uint64_t *addr, uint64_t value,
-		       struct drm_xe_engine_class_instance *eci,
-		       int64_t timeout);
-int64_t xe_wait_ufence_abstime(int fd, uint64_t *addr, uint64_t value,
-			       struct drm_xe_engine_class_instance *eci,
-			       int64_t timeout);
+		       uint32_t exec_queue, int64_t timeout);
+int64_t xe_wait_ufence_abstime(int fd, uint64_t *addr, uint64_t value, uint32_t
+			       exec_queue, int64_t timeout, uint16_t flag);
 void xe_force_gt_reset(int fd, int gt);
 
 #endif /* XE_IOCTL_H */
diff --git a/tests/intel/xe_evict.c b/tests/intel/xe_evict.c
index 89dc46fae..0ac83a3f7 100644
--- a/tests/intel/xe_evict.c
+++ b/tests/intel/xe_evict.c
@@ -317,7 +317,7 @@ test_evict_cm(int fd, struct drm_xe_engine_class_instance *eci,
 			}
 #define TWENTY_SEC	MS_TO_NS(20000)
 			xe_wait_ufence(fd, &data[i].vm_sync, USER_FENCE_VALUE,
-				       NULL, TWENTY_SEC);
+				       bind_exec_queues[0], TWENTY_SEC);
 		}
 		sync[0].addr = addr + (char *)&data[i].exec_sync -
 			(char *)data;
@@ -352,7 +352,7 @@ test_evict_cm(int fd, struct drm_xe_engine_class_instance *eci,
 		data = xe_bo_map(fd, __bo,
 				 ALIGN(sizeof(*data) * n_execs, 0x1000));
 		xe_wait_ufence(fd, &data[i].exec_sync, USER_FENCE_VALUE,
-			       NULL, TWENTY_SEC);
+			       exec_queues[i % n_exec_queues], TWENTY_SEC);
 		igt_assert_eq(data[i].data, 0xc0ffee);
 	}
 	munmap(data, ALIGN(sizeof(*data) * n_execs, 0x1000));
diff --git a/tests/intel/xe_exec_balancer.c b/tests/intel/xe_exec_balancer.c
index 79ff65e89..5dded3ce4 100644
--- a/tests/intel/xe_exec_balancer.c
+++ b/tests/intel/xe_exec_balancer.c
@@ -483,7 +483,7 @@ test_cm(int fd, int gt, int class, int n_exec_queues, int n_execs,
 					 bo_size, sync, 1);
 
 #define ONE_SEC	MS_TO_NS(1000)
-	xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, NULL, ONE_SEC);
+	xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, 0, ONE_SEC);
 	data[0].vm_sync = 0;
 
 	for (i = 0; i < n_execs; i++) {
@@ -514,7 +514,7 @@ test_cm(int fd, int gt, int class, int n_exec_queues, int n_execs,
 
 		if (flags & REBIND && i + 1 != n_execs) {
 			xe_wait_ufence(fd, &data[i].exec_sync, USER_FENCE_VALUE,
-				       NULL, ONE_SEC);
+				       exec_queues[e], ONE_SEC);
 			xe_vm_unbind_async(fd, vm, 0, 0, addr, bo_size, NULL,
 					   0);
 
@@ -529,7 +529,7 @@ test_cm(int fd, int gt, int class, int n_exec_queues, int n_execs,
 							 addr, bo_size, sync,
 							 1);
 			xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE,
-				       NULL, ONE_SEC);
+				       0, ONE_SEC);
 			data[0].vm_sync = 0;
 		}
 
@@ -542,7 +542,8 @@ test_cm(int fd, int gt, int class, int n_exec_queues, int n_execs,
 				 * an invalidate.
 				 */
 				xe_wait_ufence(fd, &data[i].exec_sync,
-					       USER_FENCE_VALUE, NULL, ONE_SEC);
+					       USER_FENCE_VALUE, exec_queues[e],
+					       ONE_SEC);
 				igt_assert_eq(data[i].data, 0xc0ffee);
 			} else if (i * 2 != n_execs) {
 				/*
@@ -571,8 +572,8 @@ test_cm(int fd, int gt, int class, int n_exec_queues, int n_execs,
 
 	j = flags & INVALIDATE && n_execs ? n_execs - 1 : 0;
 	for (i = j; i < n_execs; i++)
-		xe_wait_ufence(fd, &data[i].exec_sync, USER_FENCE_VALUE, NULL,
-			       ONE_SEC);
+		xe_wait_ufence(fd, &data[i].exec_sync, USER_FENCE_VALUE,
+			       exec_queues[i % n_exec_queues], ONE_SEC);
 
 	/* Wait for all execs to complete */
 	if (flags & INVALIDATE)
@@ -580,7 +581,7 @@ test_cm(int fd, int gt, int class, int n_exec_queues, int n_execs,
 
 	sync[0].addr = to_user_pointer(&data[0].vm_sync);
 	xe_vm_unbind_async(fd, vm, 0, 0, addr, bo_size, sync, 1);
-	xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, NULL, ONE_SEC);
+	xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, 0, ONE_SEC);
 
 	for (i = (flags & INVALIDATE && n_execs) ? n_execs - 1 : 0;
 	     i < n_execs; i++)
diff --git a/tests/intel/xe_exec_compute_mode.c b/tests/intel/xe_exec_compute_mode.c
index 7d3004d65..6123d2b29 100644
--- a/tests/intel/xe_exec_compute_mode.c
+++ b/tests/intel/xe_exec_compute_mode.c
@@ -171,8 +171,8 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
 
 	fence_timeout = igt_run_in_simulation() ? HUNDRED_SEC : ONE_SEC;
 
-	xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, NULL,
-		       fence_timeout);
+	xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE,
+		       bind_exec_queues[0], fence_timeout);
 	data[0].vm_sync = 0;
 
 	for (i = 0; i < n_execs; i++) {
@@ -198,7 +198,7 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
 
 		if (flags & REBIND && i + 1 != n_execs) {
 			xe_wait_ufence(fd, &data[i].exec_sync, USER_FENCE_VALUE,
-				       NULL, fence_timeout);
+				       exec_queues[e], fence_timeout);
 			xe_vm_unbind_async(fd, vm, bind_exec_queues[e], 0,
 					   addr, bo_size, NULL, 0);
 
@@ -214,7 +214,7 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
 							 addr, bo_size, sync,
 							 1);
 			xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE,
-				       NULL, fence_timeout);
+				       bind_exec_queues[e], fence_timeout);
 			data[0].vm_sync = 0;
 		}
 
@@ -227,7 +227,7 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
 				 * an invalidate.
 				 */
 				xe_wait_ufence(fd, &data[i].exec_sync,
-					       USER_FENCE_VALUE, NULL,
+					       USER_FENCE_VALUE, exec_queues[e],
 					       fence_timeout);
 				igt_assert_eq(data[i].data, 0xc0ffee);
 			} else if (i * 2 != n_execs) {
@@ -257,8 +257,8 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
 
 	j = flags & INVALIDATE ? n_execs - 1 : 0;
 	for (i = j; i < n_execs; i++)
-		xe_wait_ufence(fd, &data[i].exec_sync, USER_FENCE_VALUE, NULL,
-			       fence_timeout);
+		xe_wait_ufence(fd, &data[i].exec_sync, USER_FENCE_VALUE,
+			       exec_queues[i % n_exec_queues], fence_timeout);
 
 	/* Wait for all execs to complete */
 	if (flags & INVALIDATE)
@@ -267,8 +267,8 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
 	sync[0].addr = to_user_pointer(&data[0].vm_sync);
 	xe_vm_unbind_async(fd, vm, bind_exec_queues[0], 0, addr, bo_size,
 			   sync, 1);
-	xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, NULL,
-		       fence_timeout);
+	xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE,
+		       bind_exec_queues[0], fence_timeout);
 
 	for (i = j; i < n_execs; i++)
 		igt_assert_eq(data[i].data, 0xc0ffee);
diff --git a/tests/intel/xe_exec_fault_mode.c b/tests/intel/xe_exec_fault_mode.c
index ee7cbb604..3dda33469 100644
--- a/tests/intel/xe_exec_fault_mode.c
+++ b/tests/intel/xe_exec_fault_mode.c
@@ -195,15 +195,16 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
 	}
 
 #define ONE_SEC	MS_TO_NS(1000)
-	xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, NULL, ONE_SEC);
+	xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE,
+		       bind_exec_queues[0], ONE_SEC);
 	data[0].vm_sync = 0;
 
 	if (flags & PREFETCH) {
 		/* Should move to system memory */
 		xe_vm_prefetch_async(fd, vm, bind_exec_queues[0], 0, addr,
 				     bo_size, sync, 1, 0);
-		xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, NULL,
-			       ONE_SEC);
+		xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE,
+			       bind_exec_queues[0], ONE_SEC);
 		data[0].vm_sync = 0;
 	}
 
@@ -230,7 +231,7 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
 
 		if (flags & REBIND && i + 1 != n_execs) {
 			xe_wait_ufence(fd, &data[i].exec_sync, USER_FENCE_VALUE,
-				       NULL, ONE_SEC);
+				       exec_queues[e], ONE_SEC);
 			xe_vm_unbind_async(fd, vm, bind_exec_queues[e], 0,
 					   addr, bo_size, NULL, 0);
 
@@ -246,7 +247,7 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
 							 addr, bo_size, sync,
 							 1);
 			xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE,
-				       NULL, ONE_SEC);
+				       bind_exec_queues[e], ONE_SEC);
 			data[0].vm_sync = 0;
 		}
 
@@ -259,7 +260,8 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
 				 * an invalidate.
 				 */
 				xe_wait_ufence(fd, &data[i].exec_sync,
-					       USER_FENCE_VALUE, NULL, ONE_SEC);
+					       USER_FENCE_VALUE, exec_queues[e],
+					       ONE_SEC);
 				igt_assert_eq(data[i].data, 0xc0ffee);
 			} else if (i * 2 != n_execs) {
 				/*
@@ -289,14 +291,15 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
 	if (!(flags & INVALID_FAULT)) {
 		j = flags & INVALIDATE ? n_execs - 1 : 0;
 		for (i = j; i < n_execs; i++)
-			xe_wait_ufence(fd, &data[i].exec_sync,
-				       USER_FENCE_VALUE, NULL, ONE_SEC);
+			xe_wait_ufence(fd, &data[i].exec_sync, USER_FENCE_VALUE,
+				       exec_queues[i % n_exec_queues], ONE_SEC);
 	}
 
 	sync[0].addr = to_user_pointer(&data[0].vm_sync);
 	xe_vm_unbind_async(fd, vm, bind_exec_queues[0], 0, addr, bo_size,
 			   sync, 1);
-	xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, NULL, ONE_SEC);
+	xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE,
+		       bind_exec_queues[0], ONE_SEC);
 
 	if (!(flags & INVALID_FAULT)) {
 		for (i = j; i < n_execs; i++)
diff --git a/tests/intel/xe_exec_reset.c b/tests/intel/xe_exec_reset.c
index 094b34896..8e6c2e2e4 100644
--- a/tests/intel/xe_exec_reset.c
+++ b/tests/intel/xe_exec_reset.c
@@ -564,7 +564,7 @@ test_compute_mode(int fd, struct drm_xe_engine_class_instance *eci,
 	xe_vm_bind_async(fd, vm, 0, bo, 0, addr, bo_size, sync, 1);
 
 #define THREE_SEC	MS_TO_NS(3000)
-	xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, NULL, THREE_SEC);
+	xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, 0, THREE_SEC);
 	data[0].vm_sync = 0;
 
 	for (i = 0; i < n_execs; i++) {
@@ -621,7 +621,7 @@ test_compute_mode(int fd, struct drm_xe_engine_class_instance *eci,
 		int err;
 
 		err = __xe_wait_ufence(fd, &data[i].exec_sync, USER_FENCE_VALUE,
-				       NULL, &timeout);
+				       exec_queues[i % n_exec_queues], &timeout);
 		if (flags & GT_RESET)
 			/* exec races with reset: may timeout or complete */
 			igt_assert(err == -ETIME || !err);
@@ -631,7 +631,7 @@ test_compute_mode(int fd, struct drm_xe_engine_class_instance *eci,
 
 	sync[0].addr = to_user_pointer(&data[0].vm_sync);
 	xe_vm_unbind_async(fd, vm, 0, 0, addr, bo_size, sync, 1);
-	xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, NULL, THREE_SEC);
+	xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, 0, THREE_SEC);
 
 	if (!(flags & GT_RESET)) {
 		for (i = 1; i < n_execs; i++)
diff --git a/tests/intel/xe_exec_threads.c b/tests/intel/xe_exec_threads.c
index fcb926698..ca2dd421e 100644
--- a/tests/intel/xe_exec_threads.c
+++ b/tests/intel/xe_exec_threads.c
@@ -331,7 +331,7 @@ test_compute_mode(int fd, uint32_t vm, uint64_t addr, uint64_t userptr,
 
 	fence_timeout = igt_run_in_simulation() ? THIRTY_SEC : THREE_SEC;
 
-	xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, NULL, fence_timeout);
+	xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, 0, fence_timeout);
 	data[0].vm_sync = 0;
 
 	for (i = 0; i < n_execs; i++) {
@@ -359,7 +359,7 @@ test_compute_mode(int fd, uint32_t vm, uint64_t addr, uint64_t userptr,
 			for (j = i - 0x20; j <= i; ++j)
 				xe_wait_ufence(fd, &data[j].exec_sync,
 					       USER_FENCE_VALUE,
-					       NULL, fence_timeout);
+					       exec_queues[e], fence_timeout);
 			xe_vm_unbind_async(fd, vm, 0, 0, addr, bo_size,
 					   NULL, 0);
 
@@ -374,7 +374,7 @@ test_compute_mode(int fd, uint32_t vm, uint64_t addr, uint64_t userptr,
 							 addr, bo_size, sync,
 							 1);
 			xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE,
-				       NULL, fence_timeout);
+				       0, fence_timeout);
 			data[0].vm_sync = 0;
 		}
 
@@ -389,7 +389,8 @@ test_compute_mode(int fd, uint32_t vm, uint64_t addr, uint64_t userptr,
 				for (j = i == 0x20 ? 0 : i - 0x1f; j <= i; ++j)
 					xe_wait_ufence(fd, &data[j].exec_sync,
 						       USER_FENCE_VALUE,
-						       NULL, fence_timeout);
+						       exec_queues[e],
+						       fence_timeout);
 				igt_assert_eq(data[i].data, 0xc0ffee);
 			} else if (i * 2 != n_execs) {
 				/*
@@ -421,8 +422,8 @@ test_compute_mode(int fd, uint32_t vm, uint64_t addr, uint64_t userptr,
 	j = flags & INVALIDATE ?
 		(flags & RACE ? n_execs / 2 + 1 : n_execs - 1) : 0;
 	for (i = j; i < n_execs; i++)
-		xe_wait_ufence(fd, &data[i].exec_sync, USER_FENCE_VALUE, NULL,
-			       fence_timeout);
+		xe_wait_ufence(fd, &data[i].exec_sync, USER_FENCE_VALUE,
+			       exec_queues[i % n_exec_queues], fence_timeout);
 
 	/* Wait for all execs to complete */
 	if (flags & INVALIDATE)
@@ -430,7 +431,7 @@ test_compute_mode(int fd, uint32_t vm, uint64_t addr, uint64_t userptr,
 
 	sync[0].addr = to_user_pointer(&data[0].vm_sync);
 	xe_vm_unbind_async(fd, vm, 0, 0, addr, bo_size, sync, 1);
-	xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, NULL, fence_timeout);
+	xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, 0, fence_timeout);
 
 	for (i = j; i < n_execs; i++)
 		igt_assert_eq(data[i].data, 0xc0ffee);
diff --git a/tests/intel/xe_waitfence.c b/tests/intel/xe_waitfence.c
index 3be987954..c272d8a83 100644
--- a/tests/intel/xe_waitfence.c
+++ b/tests/intel/xe_waitfence.c
@@ -37,22 +37,20 @@ static void do_bind(int fd, uint32_t vm, uint32_t bo, uint64_t offset,
 }
 
 static int64_t wait_with_eci_abstime(int fd, uint64_t *addr, uint64_t value,
-				     struct drm_xe_engine_class_instance *eci,
-				     int64_t timeout)
+				     uint32_t exec_queue, int64_t timeout,
+				     uint16_t flag)
 {
 	struct drm_xe_wait_user_fence wait = {
 		.addr = to_user_pointer(addr),
 		.op = DRM_XE_UFENCE_WAIT_OP_EQ,
-		.flags = !eci ? 0 : DRM_XE_UFENCE_WAIT_FLAG_ABSTIME,
+		.flags = flag,
 		.value = value,
 		.mask = DRM_XE_UFENCE_WAIT_MASK_U64,
 		.timeout = timeout,
-		.num_engines = eci ? 1 : 0,
-		.instances = eci ? to_user_pointer(eci) : 0,
+		.exec_queue_id = exec_queue,
 	};
 	struct timespec ts;
 
-	igt_assert(eci);
 	igt_assert_eq(igt_ioctl(fd, DRM_IOCTL_XE_WAIT_USER_FENCE, &wait), 0);
 	igt_assert_eq(clock_gettime(CLOCK_MONOTONIC, &ts), 0);
 
@@ -82,7 +80,7 @@ enum waittype {
 static void
 waitfence(int fd, enum waittype wt)
 {
-	struct drm_xe_engine *engine = NULL;
+	uint32_t exec_queue;
 	struct timespec ts;
 	int64_t current, signalled;
 	uint32_t bo_1;
@@ -111,15 +109,17 @@ waitfence(int fd, enum waittype wt)
 	do_bind(fd, vm, bo_7, 0, 0xeffff0000, 0x10000, 7);
 
 	if (wt == RELTIME) {
-		timeout = xe_wait_ufence(fd, &wait_fence, 7, NULL, MS_TO_NS(10));
+		timeout = xe_wait_ufence(fd, &wait_fence, 7, 0, MS_TO_NS(10));
 		igt_debug("wait type: RELTIME - timeout: %ld, timeout left: %ld\n",
 			  MS_TO_NS(10), timeout);
 	} else if (wt == ENGINE) {
-		engine = xe_engine(fd, 1);
+		exec_queue = xe_exec_queue_create_class(fd, vm, DRM_XE_ENGINE_CLASS_COPY);
 		clock_gettime(CLOCK_MONOTONIC, &ts);
 		current = ts.tv_sec * 1e9 + ts.tv_nsec;
 		timeout = current + MS_TO_NS(10);
-		signalled = wait_with_eci_abstime(fd, &wait_fence, 7, &engine->instance, timeout);
+		signalled = wait_with_eci_abstime(fd, &wait_fence, 7,
+						  exec_queue, timeout,
+						  DRM_XE_UFENCE_WAIT_FLAG_ABSTIME);
 		igt_debug("wait type: ENGINE ABSTIME - timeout: %" PRId64
 			  ", signalled: %" PRId64
 			  ", elapsed: %" PRId64 "\n",
@@ -128,7 +128,8 @@ waitfence(int fd, enum waittype wt)
 		clock_gettime(CLOCK_MONOTONIC, &ts);
 		current = ts.tv_sec * 1e9 + ts.tv_nsec;
 		timeout = current + MS_TO_NS(10);
-		signalled = xe_wait_ufence_abstime(fd, &wait_fence, 7, NULL, timeout);
+		signalled = xe_wait_ufence_abstime(fd, &wait_fence, 7, 0,
+						   timeout, 0);
 		igt_debug("wait type: ABSTIME - timeout: %" PRId64
 			  ", signalled: %" PRId64
 			  ", elapsed: %" PRId64 "\n",
@@ -191,8 +192,7 @@ invalid_ops(int fd)
 		.value = 1,
 		.mask = DRM_XE_UFENCE_WAIT_MASK_U64,
 		.timeout = 1,
-		.num_engines = 0,
-		.instances = 0,
+		.exec_queue_id = 0,
 	};
 
 	uint32_t vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT, 0);
@@ -216,8 +216,7 @@ invalid_engine(int fd)
 		.value = 1,
 		.mask = DRM_XE_UFENCE_WAIT_MASK_U64,
 		.timeout = -1,
-		.num_engines = 1,
-		.instances = 0,
+		.exec_queue_id = 0,
 	};
 
 	uint32_t vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT, 0);
-- 
2.25.1

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH v5 2/2] drm-uapi/xe: Don't wait on user_fence during exec queue reset
  2023-12-12  5:10 [PATCH v5 0/2] RFC: drm-uapi/xe: add exec_queue_id member to drm_xe_wait_user_fence structure Bommu Krishnaiah
  2023-12-12  5:10 ` [PATCH v5 1/2] " Bommu Krishnaiah
@ 2023-12-12  5:10 ` Bommu Krishnaiah
  2023-12-12 12:07   ` Francois Dugast
  2023-12-12  5:35 ` ✗ Fi.CI.BUILD: failure for RFC: drm-uapi/xe: add exec_queue_id member to drm_xe_wait_user_fence structure (rev7) Patchwork
  2 siblings, 1 reply; 7+ messages in thread
From: Bommu Krishnaiah @ 2023-12-12  5:10 UTC (permalink / raw)
  To: igt-dev; +Cc: Bommu Krishnaiah, Rodrigo Vivi

Don't wait till timeout on user fence when exec_queue reset is detected
and return return  proper error code

Signed-off-by: Bommu Krishnaiah <krishnaiah.bommu@intel.com>
Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: Francois Dugast <francois.dugast@intel.com>
---
 tests/intel/xe_waitfence.c | 82 ++++++++++++++++++++++++++++++++++++++
 1 file changed, 82 insertions(+)

diff --git a/tests/intel/xe_waitfence.c b/tests/intel/xe_waitfence.c
index c272d8a83..2eb3a28b2 100644
--- a/tests/intel/xe_waitfence.c
+++ b/tests/intel/xe_waitfence.c
@@ -153,6 +153,9 @@ waitfence(int fd, enum waittype wt)
  *
  * SUBTEST: invalid-engine
  * Description: Check query with invalid engine info returns expected error code
+ *
+ * SUBTEST: exec_queue-reset-wait
+ * Description: Don’t wait till timeout on user fence when exec_queue reset is detected and return return proper error
  */
 
 static void
@@ -228,6 +231,82 @@ invalid_engine(int fd)
 	do_ioctl_err(fd, DRM_IOCTL_XE_WAIT_USER_FENCE, &wait, EFAULT);
 }
 
+static void
+exec_queue_reset_wait(int fd)
+{
+	uint32_t bo, b;
+	uint64_t batch_offset;
+	uint64_t batch_addr;
+	uint64_t sdi_offset;
+	uint64_t sdi_addr;
+	uint64_t addr = 0x1a0000;
+
+	struct {
+		uint32_t batch[16];
+		uint64_t pad;
+		uint64_t vm_sync;
+		uint64_t exec_sync;
+		uint32_t data;
+	} *data;
+
+#define USER_FENCE_VALUE        0xdeadbeefdeadbeefull
+	struct drm_xe_sync sync[1] = {
+		{ .flags = DRM_XE_SYNC_TYPE_USER_FENCE | DRM_XE_SYNC_FLAG_SIGNAL,
+			.timeline_value = USER_FENCE_VALUE },
+	};
+
+	struct drm_xe_exec exec = {
+		.num_batch_buffer = 1,
+		.num_syncs = 1,
+		.syncs = to_user_pointer(sync),
+	};
+
+	uint32_t vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT, 0);
+	uint32_t exec_queue = xe_exec_queue_create_class(fd, vm, DRM_XE_ENGINE_CLASS_COPY);
+	struct drm_xe_wait_user_fence1 wait = {
+		.op = DRM_XE_UFENCE_WAIT_OP_EQ,
+		.flags = 0,
+		.value = 0xc0ffee,
+		.mask = DRM_XE_UFENCE_WAIT_MASK_U64,
+		.timeout = -1,
+		.exec_queue_id = exec_queue,
+	};
+
+	bo = xe_bo_create(fd, vm, 0x40000, vram_if_possible(fd, 0), 0);
+	data = xe_bo_map(fd, bo, 0x40000);
+
+	batch_offset = (char *)&data[0].batch - (char *)data;
+	batch_addr = addr + batch_offset;
+	sdi_offset = (char *)&data[0].data - (char *)data;
+	sdi_addr = addr + sdi_offset;
+
+	b = 0;
+	data[0].batch[b++] = MI_STORE_DWORD_IMM_GEN4;
+	data[0].batch[b++] = sdi_addr;
+	data[0].batch[b++] = sdi_addr >> 32;
+	data[0].batch[b++] = 0xc0ffee;
+	data[0].batch[b++] = MI_BATCH_BUFFER_END;
+	igt_assert(b <= ARRAY_SIZE(data[0].batch));
+
+	wait.addr = to_user_pointer(&data[0].exec_sync);
+	exec.exec_queue_id = exec_queue;
+	exec.address = batch_addr;
+
+	xe_exec(fd, &exec);
+
+	/**
+	  * Don't do the GPU mapping(vm_bind) for object, so that exec_queue
+	  * reset will happen and xe_wait_ufence will return EIO not ETIME
+	  */
+	do_ioctl_err(fd, DRM_IOCTL_XE_WAIT_USER_FENCE, &wait, EIO);
+
+	xe_exec_queue_destroy(fd, exec_queue);
+
+	if (bo) {
+		munmap(data, 0x40000);
+		gem_close(fd, bo);
+	}
+}
 
 igt_main
 {
@@ -254,6 +333,9 @@ igt_main
 	igt_subtest("invalid-engine")
 		invalid_engine(fd);
 
+	igt_subtest("exec_queue-reset-wait")
+		exec_queue_reset_wait(fd);
+
 	igt_fixture
 		drm_close_driver(fd);
 }
-- 
2.25.1

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* ✗ Fi.CI.BUILD: failure for RFC: drm-uapi/xe: add exec_queue_id member to drm_xe_wait_user_fence structure (rev7)
  2023-12-12  5:10 [PATCH v5 0/2] RFC: drm-uapi/xe: add exec_queue_id member to drm_xe_wait_user_fence structure Bommu Krishnaiah
  2023-12-12  5:10 ` [PATCH v5 1/2] " Bommu Krishnaiah
  2023-12-12  5:10 ` [PATCH v5 2/2] drm-uapi/xe: Don't wait on user_fence during exec queue reset Bommu Krishnaiah
@ 2023-12-12  5:35 ` Patchwork
  2 siblings, 0 replies; 7+ messages in thread
From: Patchwork @ 2023-12-12  5:35 UTC (permalink / raw)
  To: Bommu Krishnaiah; +Cc: igt-dev

== Series Details ==

Series: RFC: drm-uapi/xe: add exec_queue_id member to drm_xe_wait_user_fence structure (rev7)
URL   : https://patchwork.freedesktop.org/series/127364/
State : failure

== Summary ==

IGT patchset build failed on latest successful build
0b796be8ce05cb2070ce5136d248f438c962d11e tests/intel: Add Xe peer2peer test

Tail of build.log:
[621/1665] Compiling C object 'tests/59830eb@@kms_chamelium_edid@exe/chamelium_kms_chamelium_helper.c.o'.
[622/1665] Compiling C object 'tests/59830eb@@kms_chamelium_audio@exe/chamelium_kms_chamelium_helper.c.o'.
[623/1665] Compiling C object 'tests/59830eb@@kms_psr@exe/intel_kms_psr.c.o'.
[624/1665] Compiling C object 'tests/v3d/cad21b8@@v3d_mmap@exe/v3d_mmap.c.o'.
[625/1665] Compiling C object 'tests/v3d/cad21b8@@v3d_get_param@exe/v3d_get_param.c.o'.
[626/1665] Compiling C object 'tests/59830eb@@i915_pm_rpm@exe/intel_i915_pm_rpm.c.o'.
[627/1665] Compiling C object 'tests/vc4/e4667e8@@vc4_create_bo@exe/vc4_create_bo.c.o'.
[628/1665] Compiling C object 'tests/vc4/e4667e8@@vc4_lookup_fail@exe/vc4_lookup_fail.c.o'.
[629/1665] Compiling C object 'tests/59830eb@@gem_sync@exe/intel_gem_sync.c.o'.
[630/1665] Compiling C object 'tests/59830eb@@msm_shrink@exe/msm_msm_shrink.c.o'.
[631/1665] Compiling C object 'lib/76b5a35@@igt-igt_kms_c@sta/igt_kms.c.o'.
[632/1665] Compiling C object 'tests/59830eb@@kms_chamelium_hpd@exe/chamelium_kms_chamelium_helper.c.o'.
[633/1665] Compiling C object 'tests/v3d/cad21b8@@v3d_get_bo_offset@exe/v3d_get_bo_offset.c.o'.
[634/1665] Compiling C object 'tests/vc4/e4667e8@@vc4_mmap@exe/vc4_mmap.c.o'.
[635/1665] Compiling C object 'tests/v3d/cad21b8@@v3d_wait_bo@exe/v3d_wait_bo.c.o'.
[636/1665] Compiling C object 'tests/vc4/e4667e8@@vc4_dmabuf_poll@exe/vc4_dmabuf_poll.c.o'.
[637/1665] Compiling C object 'tests/59830eb@@kms_chamelium_frames@exe/chamelium_kms_chamelium_helper.c.o'.
[638/1665] Compiling C object 'tests/59830eb@@xe_pm@exe/intel_xe_pm.c.o'.
[639/1665] Compiling C object 'tests/59830eb@@xe_exec_balancer@exe/intel_xe_exec_balancer.c.o'.
[640/1665] Compiling C object 'tests/59830eb@@xe_exec_reset@exe/intel_xe_exec_reset.c.o'.
[641/1665] Compiling C object 'tests/59830eb@@kms_chamelium_color@exe/kms_color_helper.c.o'.
[642/1665] Compiling C object 'tests/59830eb@@gem_softpin@exe/intel_gem_softpin.c.o'.
[643/1665] Compiling C object 'tests/59830eb@@xe_query@exe/intel_xe_query.c.o'.
[644/1665] Compiling C object 'tests/vc4/e4667e8@@vc4_label_bo@exe/vc4_label_bo.c.o'.
[645/1665] Compiling C object 'tests/vc4/e4667e8@@vc4_purgeable_bo@exe/vc4_purgeable_bo.c.o'.
[646/1665] Compiling C object 'tests/59830eb@@kms_chamelium_color@exe/chamelium_kms_chamelium_color.c.o'.
[647/1665] Compiling C object 'tests/v3d/cad21b8@@v3d_perfmon@exe/v3d_perfmon.c.o'.
[648/1665] Compiling C object 'tests/v3d/cad21b8@@v3d_job_submission@exe/v3d_job_submission.c.o'.
[649/1665] Compiling C object 'tests/59830eb@@xe_evict@exe/intel_xe_evict.c.o'.
[650/1665] Compiling C object 'tests/v3d/cad21b8@@v3d_submit_csd@exe/v3d_submit_csd.c.o'.
[651/1665] Compiling C object 'tests/59830eb@@kms_psr2_sf@exe/intel_kms_psr2_sf.c.o'.
[652/1665] Compiling C object 'tests/59830eb@@kms_chamelium_hpd@exe/chamelium_kms_chamelium_hpd.c.o'.
[653/1665] Compiling C object 'tests/59830eb@@kms_chamelium_audio@exe/chamelium_kms_chamelium_audio.c.o'.
[654/1665] Compiling C object 'tests/v3d/cad21b8@@v3d_submit_cl@exe/v3d_submit_cl.c.o'.
[655/1665] Compiling C object 'tests/59830eb@@xe_pat@exe/intel_xe_pat.c.o'.
[656/1665] Compiling C object 'tests/59830eb@@kms_chamelium_edid@exe/chamelium_kms_chamelium_edid.c.o'.
[657/1665] Compiling C object 'tests/59830eb@@xe_exec_threads@exe/intel_xe_exec_threads.c.o'.
[658/1665] Compiling C object 'tests/59830eb@@kms_pm_rpm@exe/intel_kms_pm_rpm.c.o'.
[659/1665] Compiling C object 'tests/59830eb@@i915_query@exe/intel_i915_query.c.o'.
[660/1665] Compiling C object 'tests/59830eb@@xe_intel_bb@exe/intel_xe_intel_bb.c.o'.
[661/1665] Compiling C object 'tests/59830eb@@kms_chamelium_frames@exe/chamelium_kms_chamelium_frames.c.o'.
[662/1665] Compiling C object 'tests/59830eb@@gem_exec_balancer@exe/intel_gem_exec_balancer.c.o'.
[663/1665] Compiling C object 'tests/59830eb@@gem_exec_fence@exe/intel_gem_exec_fence.c.o'.
[664/1665] Compiling C object 'tests/59830eb@@xe_vm@exe/intel_xe_vm.c.o'.
[665/1665] Generating i915-perf-equations with a custom command.
[666/1665] Compiling C object 'tests/59830eb@@kms_frontbuffer_tracking@exe/intel_kms_frontbuffer_tracking.c.o'.
[667/1665] Compiling C object 'tests/59830eb@@perf_pmu@exe/intel_perf_pmu.c.o'.
[668/1665] Compiling C object 'tests/59830eb@@gem_exec_schedule@exe/intel_gem_exec_schedule.c.o'.
[669/1665] Compiling C object 'tests/59830eb@@perf@exe/intel_perf.c.o'.
ninja: build stopped: subcommand failed.


^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v5 1/2] drm-uapi/xe: add exec_queue_id member to drm_xe_wait_user_fence structure
  2023-12-12  5:10 ` [PATCH v5 1/2] " Bommu Krishnaiah
@ 2023-12-12  5:39   ` Rodrigo Vivi
  0 siblings, 0 replies; 7+ messages in thread
From: Rodrigo Vivi @ 2023-12-12  5:39 UTC (permalink / raw)
  To: Bommu Krishnaiah; +Cc: igt-dev

On Tue, Dec 12, 2023 at 10:40:39AM +0530, Bommu Krishnaiah wrote:
> remove the num_engines/instances members from drm_xe_wait_user_fence structure
> and add a exec_queue_id member
> 
> Right now this is only checking if the engine list is sane and nothing
> else. In the end every operation with this IOCTL is a soft check.
> So, let's formalize that and only use this IOCTL to wait on the fence.
> 
> exec_queue_id member will help to user space to get proper error code
> from kernel while in exec_queue reset

Thanks for the explanation on i % n_exec_queues

Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>

> 
> Signed-off-by: Bommu Krishnaiah <krishnaiah.bommu@intel.com>
> Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
> Cc: Francois Dugast <francois.dugast@intel.com>
> ---
>  include/drm-uapi/xe_drm.h          | 28 ++++++++++++++--------------
>  lib/xe/xe_ioctl.c                  | 29 +++++++++++++----------------
>  lib/xe/xe_ioctl.h                  | 11 ++++-------
>  tests/intel/xe_evict.c             |  4 ++--
>  tests/intel/xe_exec_balancer.c     | 15 ++++++++-------
>  tests/intel/xe_exec_compute_mode.c | 18 +++++++++---------
>  tests/intel/xe_exec_fault_mode.c   | 21 ++++++++++++---------
>  tests/intel/xe_exec_reset.c        |  6 +++---
>  tests/intel/xe_exec_threads.c      | 15 ++++++++-------
>  tests/intel/xe_waitfence.c         | 29 ++++++++++++++---------------
>  10 files changed, 87 insertions(+), 89 deletions(-)
> 
> diff --git a/include/drm-uapi/xe_drm.h b/include/drm-uapi/xe_drm.h
> index 590f7b7af..fd06e4920 100644
> --- a/include/drm-uapi/xe_drm.h
> +++ b/include/drm-uapi/xe_drm.h
> @@ -129,7 +129,6 @@ struct xe_user_extension {
>   * It is returned as part of the @drm_xe_engine, but it also is used as
>   * the input of engine selection for both @drm_xe_exec_queue_create and
>   * @drm_xe_query_engine_cycles
> - *
>   */
>  struct drm_xe_engine_class_instance {
>  #define DRM_XE_ENGINE_CLASS_RENDER		0
> @@ -143,9 +142,11 @@ struct drm_xe_engine_class_instance {
>  	 */
>  #define DRM_XE_ENGINE_CLASS_VM_BIND_ASYNC	5
>  #define DRM_XE_ENGINE_CLASS_VM_BIND_SYNC	6
> +	/** @engine_class: engine class id */
>  	__u16 engine_class;
> -
> +	/** @engine_instance: engine instance id */
>  	__u16 engine_instance;
> +	/** @gt_id: Unique ID of this GT within the PCI Device */
>  	__u16 gt_id;
>  	/** @pad: MBZ */
>  	__u16 pad;
> @@ -736,6 +737,12 @@ struct drm_xe_vm_bind_op {
>  	 *
>  	 * Note: For userptr and externally imported dma-buf the kernel expects
>  	 * either 1WAY or 2WAY for the @pat_index.
> +	 *
> +	 * For DRM_XE_VM_BIND_FLAG_NULL bindings there are no KMD restrictions
> +	 * on the @pat_index. For such mappings there is no actual memory being
> +	 * mapped (the address in the PTE is invalid), so the various PAT memory
> +	 * attributes likely do not apply.  Simply leaving as zero is one
> +	 * option (still a valid pat_index).
>  	 */
>  	__u16 pat_index;
>  
> @@ -1024,8 +1031,7 @@ struct drm_xe_wait_user_fence {
>  	/** @op: wait operation (type of comparison) */
>  	__u16 op;
>  
> -#define DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP	(1 << 0)	/* e.g. Wait on VM bind */
> -#define DRM_XE_UFENCE_WAIT_FLAG_ABSTIME	(1 << 1)
> +#define DRM_XE_UFENCE_WAIT_FLAG_ABSTIME	(1 << 0)
>  	/** @flags: wait flags */
>  	__u16 flags;
>  
> @@ -1058,17 +1064,11 @@ struct drm_xe_wait_user_fence {
>  	 */
>  	__s64 timeout;
>  
> -	/**
> -	 * @num_engines: number of engine instances to wait on, must be zero
> -	 * when DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP set
> -	 */
> -	__u64 num_engines;
> +	/** @exec_queue_id: exec_queue_id returned from xe_exec_queue_create_ioctl */
> +	__u32 exec_queue_id;
>  
> -	/**
> -	 * @instances: user pointer to array of drm_xe_engine_class_instance to
> -	 * wait on, must be NULL when DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP set
> -	 */
> -	__u64 instances;
> +	/** @pad2: MBZ */
> +	__u32 pad2;
>  
>  	/** @reserved: Reserved */
>  	__u64 reserved[2];
> diff --git a/lib/xe/xe_ioctl.c b/lib/xe/xe_ioctl.c
> index cdad3d7e2..8f466318d 100644
> --- a/lib/xe/xe_ioctl.c
> +++ b/lib/xe/xe_ioctl.c
> @@ -481,7 +481,7 @@ void xe_exec_wait(int fd, uint32_t exec_queue, uint64_t addr)
>   * @fd: xe device fd
>   * @addr: address of value to compare
>   * @value: expected value (equal) in @address
> - * @eci: engine class instance
> + * @exec_queue: exec_queue id
>   * @timeout: pointer to time to wait in nanoseconds
>   *
>   * Function compares @value with memory pointed by @addr until they are equal.
> @@ -490,17 +490,15 @@ void xe_exec_wait(int fd, uint32_t exec_queue, uint64_t addr)
>   * signalled. Returns 0 on success, -errno of ioctl on error.
>   */
>  int __xe_wait_ufence(int fd, uint64_t *addr, uint64_t value,
> -		     struct drm_xe_engine_class_instance *eci,
> -		     int64_t *timeout)
> +		     uint32_t exec_queue, int64_t *timeout)
>  {
>  	struct drm_xe_wait_user_fence wait = {
>  		.addr = to_user_pointer(addr),
>  		.op = DRM_XE_UFENCE_WAIT_OP_EQ,
> -		.flags = !eci ? DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP : 0,
> +		.flags = 0,
>  		.value = value,
>  		.mask = DRM_XE_UFENCE_WAIT_MASK_U64,
> -		.num_engines = eci ? 1 :0,
> -		.instances = eci ? to_user_pointer(eci) : 0,
> +		.exec_queue_id = exec_queue,
>  	};
>  
>  	igt_assert(timeout);
> @@ -518,7 +516,7 @@ int __xe_wait_ufence(int fd, uint64_t *addr, uint64_t value,
>   * @fd: xe device fd
>   * @addr: address of value to compare
>   * @value: expected value (equal) in @address
> - * @eci: engine class instance
> + * @exec_queue: exec_queue id
>   * @timeout: time to wait in nanoseconds
>   *
>   * Function compares @value with memory pointed by @addr until they are equal.
> @@ -527,10 +525,9 @@ int __xe_wait_ufence(int fd, uint64_t *addr, uint64_t value,
>   * Returns elapsed time in nanoseconds if user fence was signalled.
>   */
>  int64_t xe_wait_ufence(int fd, uint64_t *addr, uint64_t value,
> -		       struct drm_xe_engine_class_instance *eci,
> -		       int64_t timeout)
> +		       uint32_t exec_queue, int64_t timeout)
>  {
> -	igt_assert_eq(__xe_wait_ufence(fd, addr, value, eci, &timeout), 0);
> +	igt_assert_eq(__xe_wait_ufence(fd, addr, value, exec_queue, &timeout), 0);
>  	return timeout;
>  }
>  
> @@ -539,8 +536,9 @@ int64_t xe_wait_ufence(int fd, uint64_t *addr, uint64_t value,
>   * @fd: xe device fd
>   * @addr: address of value to compare
>   * @value: expected value (equal) in @address
> - * @eci: engine class instance
> + * @exec_queue: exec_queue id
>   * @timeout: absolute time when wait expire
> + * @flag: wait flag
>   *
>   * Function compares @value with memory pointed by @addr until they are equal.
>   * Asserts that ioctl returned without error.
> @@ -548,18 +546,17 @@ int64_t xe_wait_ufence(int fd, uint64_t *addr, uint64_t value,
>   * Returns elapsed time in nanoseconds if user fence was signalled.
>   */
>  int64_t xe_wait_ufence_abstime(int fd, uint64_t *addr, uint64_t value,
> -			       struct drm_xe_engine_class_instance *eci,
> -			       int64_t timeout)
> +			       uint32_t exec_queue, int64_t timeout,
> +			       uint16_t flag)
>  {
>  	struct drm_xe_wait_user_fence wait = {
>  		.addr = to_user_pointer(addr),
>  		.op = DRM_XE_UFENCE_WAIT_OP_EQ,
> -		.flags = !eci ? DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP | DRM_XE_UFENCE_WAIT_FLAG_ABSTIME : 0,
> +		.flags = flag,
>  		.value = value,
>  		.mask = DRM_XE_UFENCE_WAIT_MASK_U64,
>  		.timeout = timeout,
> -		.num_engines = eci ? 1 : 0,
> -		.instances = eci ? to_user_pointer(eci) : 0,
> +		.exec_queue_id = exec_queue,
>  	};
>  	struct timespec ts;
>  
> diff --git a/lib/xe/xe_ioctl.h b/lib/xe/xe_ioctl.h
> index c8d46fb29..05480e40b 100644
> --- a/lib/xe/xe_ioctl.h
> +++ b/lib/xe/xe_ioctl.h
> @@ -92,14 +92,11 @@ void xe_exec_sync(int fd, uint32_t exec_queue, uint64_t addr,
>  		  struct drm_xe_sync *sync, uint32_t num_syncs);
>  void xe_exec_wait(int fd, uint32_t exec_queue, uint64_t addr);
>  int __xe_wait_ufence(int fd, uint64_t *addr, uint64_t value,
> -		     struct drm_xe_engine_class_instance *eci,
> -		     int64_t *timeout);
> +		     uint32_t exec_queue, int64_t *timeout);
>  int64_t xe_wait_ufence(int fd, uint64_t *addr, uint64_t value,
> -		       struct drm_xe_engine_class_instance *eci,
> -		       int64_t timeout);
> -int64_t xe_wait_ufence_abstime(int fd, uint64_t *addr, uint64_t value,
> -			       struct drm_xe_engine_class_instance *eci,
> -			       int64_t timeout);
> +		       uint32_t exec_queue, int64_t timeout);
> +int64_t xe_wait_ufence_abstime(int fd, uint64_t *addr, uint64_t value, uint32_t
> +			       exec_queue, int64_t timeout, uint16_t flag);
>  void xe_force_gt_reset(int fd, int gt);
>  
>  #endif /* XE_IOCTL_H */
> diff --git a/tests/intel/xe_evict.c b/tests/intel/xe_evict.c
> index 89dc46fae..0ac83a3f7 100644
> --- a/tests/intel/xe_evict.c
> +++ b/tests/intel/xe_evict.c
> @@ -317,7 +317,7 @@ test_evict_cm(int fd, struct drm_xe_engine_class_instance *eci,
>  			}
>  #define TWENTY_SEC	MS_TO_NS(20000)
>  			xe_wait_ufence(fd, &data[i].vm_sync, USER_FENCE_VALUE,
> -				       NULL, TWENTY_SEC);
> +				       bind_exec_queues[0], TWENTY_SEC);
>  		}
>  		sync[0].addr = addr + (char *)&data[i].exec_sync -
>  			(char *)data;
> @@ -352,7 +352,7 @@ test_evict_cm(int fd, struct drm_xe_engine_class_instance *eci,
>  		data = xe_bo_map(fd, __bo,
>  				 ALIGN(sizeof(*data) * n_execs, 0x1000));
>  		xe_wait_ufence(fd, &data[i].exec_sync, USER_FENCE_VALUE,
> -			       NULL, TWENTY_SEC);
> +			       exec_queues[i % n_exec_queues], TWENTY_SEC);
>  		igt_assert_eq(data[i].data, 0xc0ffee);
>  	}
>  	munmap(data, ALIGN(sizeof(*data) * n_execs, 0x1000));
> diff --git a/tests/intel/xe_exec_balancer.c b/tests/intel/xe_exec_balancer.c
> index 79ff65e89..5dded3ce4 100644
> --- a/tests/intel/xe_exec_balancer.c
> +++ b/tests/intel/xe_exec_balancer.c
> @@ -483,7 +483,7 @@ test_cm(int fd, int gt, int class, int n_exec_queues, int n_execs,
>  					 bo_size, sync, 1);
>  
>  #define ONE_SEC	MS_TO_NS(1000)
> -	xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, NULL, ONE_SEC);
> +	xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, 0, ONE_SEC);
>  	data[0].vm_sync = 0;
>  
>  	for (i = 0; i < n_execs; i++) {
> @@ -514,7 +514,7 @@ test_cm(int fd, int gt, int class, int n_exec_queues, int n_execs,
>  
>  		if (flags & REBIND && i + 1 != n_execs) {
>  			xe_wait_ufence(fd, &data[i].exec_sync, USER_FENCE_VALUE,
> -				       NULL, ONE_SEC);
> +				       exec_queues[e], ONE_SEC);
>  			xe_vm_unbind_async(fd, vm, 0, 0, addr, bo_size, NULL,
>  					   0);
>  
> @@ -529,7 +529,7 @@ test_cm(int fd, int gt, int class, int n_exec_queues, int n_execs,
>  							 addr, bo_size, sync,
>  							 1);
>  			xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE,
> -				       NULL, ONE_SEC);
> +				       0, ONE_SEC);
>  			data[0].vm_sync = 0;
>  		}
>  
> @@ -542,7 +542,8 @@ test_cm(int fd, int gt, int class, int n_exec_queues, int n_execs,
>  				 * an invalidate.
>  				 */
>  				xe_wait_ufence(fd, &data[i].exec_sync,
> -					       USER_FENCE_VALUE, NULL, ONE_SEC);
> +					       USER_FENCE_VALUE, exec_queues[e],
> +					       ONE_SEC);
>  				igt_assert_eq(data[i].data, 0xc0ffee);
>  			} else if (i * 2 != n_execs) {
>  				/*
> @@ -571,8 +572,8 @@ test_cm(int fd, int gt, int class, int n_exec_queues, int n_execs,
>  
>  	j = flags & INVALIDATE && n_execs ? n_execs - 1 : 0;
>  	for (i = j; i < n_execs; i++)
> -		xe_wait_ufence(fd, &data[i].exec_sync, USER_FENCE_VALUE, NULL,
> -			       ONE_SEC);
> +		xe_wait_ufence(fd, &data[i].exec_sync, USER_FENCE_VALUE,
> +			       exec_queues[i % n_exec_queues], ONE_SEC);
>  
>  	/* Wait for all execs to complete */
>  	if (flags & INVALIDATE)
> @@ -580,7 +581,7 @@ test_cm(int fd, int gt, int class, int n_exec_queues, int n_execs,
>  
>  	sync[0].addr = to_user_pointer(&data[0].vm_sync);
>  	xe_vm_unbind_async(fd, vm, 0, 0, addr, bo_size, sync, 1);
> -	xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, NULL, ONE_SEC);
> +	xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, 0, ONE_SEC);
>  
>  	for (i = (flags & INVALIDATE && n_execs) ? n_execs - 1 : 0;
>  	     i < n_execs; i++)
> diff --git a/tests/intel/xe_exec_compute_mode.c b/tests/intel/xe_exec_compute_mode.c
> index 7d3004d65..6123d2b29 100644
> --- a/tests/intel/xe_exec_compute_mode.c
> +++ b/tests/intel/xe_exec_compute_mode.c
> @@ -171,8 +171,8 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
>  
>  	fence_timeout = igt_run_in_simulation() ? HUNDRED_SEC : ONE_SEC;
>  
> -	xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, NULL,
> -		       fence_timeout);
> +	xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE,
> +		       bind_exec_queues[0], fence_timeout);
>  	data[0].vm_sync = 0;
>  
>  	for (i = 0; i < n_execs; i++) {
> @@ -198,7 +198,7 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
>  
>  		if (flags & REBIND && i + 1 != n_execs) {
>  			xe_wait_ufence(fd, &data[i].exec_sync, USER_FENCE_VALUE,
> -				       NULL, fence_timeout);
> +				       exec_queues[e], fence_timeout);
>  			xe_vm_unbind_async(fd, vm, bind_exec_queues[e], 0,
>  					   addr, bo_size, NULL, 0);
>  
> @@ -214,7 +214,7 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
>  							 addr, bo_size, sync,
>  							 1);
>  			xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE,
> -				       NULL, fence_timeout);
> +				       bind_exec_queues[e], fence_timeout);
>  			data[0].vm_sync = 0;
>  		}
>  
> @@ -227,7 +227,7 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
>  				 * an invalidate.
>  				 */
>  				xe_wait_ufence(fd, &data[i].exec_sync,
> -					       USER_FENCE_VALUE, NULL,
> +					       USER_FENCE_VALUE, exec_queues[e],
>  					       fence_timeout);
>  				igt_assert_eq(data[i].data, 0xc0ffee);
>  			} else if (i * 2 != n_execs) {
> @@ -257,8 +257,8 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
>  
>  	j = flags & INVALIDATE ? n_execs - 1 : 0;
>  	for (i = j; i < n_execs; i++)
> -		xe_wait_ufence(fd, &data[i].exec_sync, USER_FENCE_VALUE, NULL,
> -			       fence_timeout);
> +		xe_wait_ufence(fd, &data[i].exec_sync, USER_FENCE_VALUE,
> +			       exec_queues[i % n_exec_queues], fence_timeout);
>  
>  	/* Wait for all execs to complete */
>  	if (flags & INVALIDATE)
> @@ -267,8 +267,8 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
>  	sync[0].addr = to_user_pointer(&data[0].vm_sync);
>  	xe_vm_unbind_async(fd, vm, bind_exec_queues[0], 0, addr, bo_size,
>  			   sync, 1);
> -	xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, NULL,
> -		       fence_timeout);
> +	xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE,
> +		       bind_exec_queues[0], fence_timeout);
>  
>  	for (i = j; i < n_execs; i++)
>  		igt_assert_eq(data[i].data, 0xc0ffee);
> diff --git a/tests/intel/xe_exec_fault_mode.c b/tests/intel/xe_exec_fault_mode.c
> index ee7cbb604..3dda33469 100644
> --- a/tests/intel/xe_exec_fault_mode.c
> +++ b/tests/intel/xe_exec_fault_mode.c
> @@ -195,15 +195,16 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
>  	}
>  
>  #define ONE_SEC	MS_TO_NS(1000)
> -	xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, NULL, ONE_SEC);
> +	xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE,
> +		       bind_exec_queues[0], ONE_SEC);
>  	data[0].vm_sync = 0;
>  
>  	if (flags & PREFETCH) {
>  		/* Should move to system memory */
>  		xe_vm_prefetch_async(fd, vm, bind_exec_queues[0], 0, addr,
>  				     bo_size, sync, 1, 0);
> -		xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, NULL,
> -			       ONE_SEC);
> +		xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE,
> +			       bind_exec_queues[0], ONE_SEC);
>  		data[0].vm_sync = 0;
>  	}
>  
> @@ -230,7 +231,7 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
>  
>  		if (flags & REBIND && i + 1 != n_execs) {
>  			xe_wait_ufence(fd, &data[i].exec_sync, USER_FENCE_VALUE,
> -				       NULL, ONE_SEC);
> +				       exec_queues[e], ONE_SEC);
>  			xe_vm_unbind_async(fd, vm, bind_exec_queues[e], 0,
>  					   addr, bo_size, NULL, 0);
>  
> @@ -246,7 +247,7 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
>  							 addr, bo_size, sync,
>  							 1);
>  			xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE,
> -				       NULL, ONE_SEC);
> +				       bind_exec_queues[e], ONE_SEC);
>  			data[0].vm_sync = 0;
>  		}
>  
> @@ -259,7 +260,8 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
>  				 * an invalidate.
>  				 */
>  				xe_wait_ufence(fd, &data[i].exec_sync,
> -					       USER_FENCE_VALUE, NULL, ONE_SEC);
> +					       USER_FENCE_VALUE, exec_queues[e],
> +					       ONE_SEC);
>  				igt_assert_eq(data[i].data, 0xc0ffee);
>  			} else if (i * 2 != n_execs) {
>  				/*
> @@ -289,14 +291,15 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
>  	if (!(flags & INVALID_FAULT)) {
>  		j = flags & INVALIDATE ? n_execs - 1 : 0;
>  		for (i = j; i < n_execs; i++)
> -			xe_wait_ufence(fd, &data[i].exec_sync,
> -				       USER_FENCE_VALUE, NULL, ONE_SEC);
> +			xe_wait_ufence(fd, &data[i].exec_sync, USER_FENCE_VALUE,
> +				       exec_queues[i % n_exec_queues], ONE_SEC);
>  	}
>  
>  	sync[0].addr = to_user_pointer(&data[0].vm_sync);
>  	xe_vm_unbind_async(fd, vm, bind_exec_queues[0], 0, addr, bo_size,
>  			   sync, 1);
> -	xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, NULL, ONE_SEC);
> +	xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE,
> +		       bind_exec_queues[0], ONE_SEC);
>  
>  	if (!(flags & INVALID_FAULT)) {
>  		for (i = j; i < n_execs; i++)
> diff --git a/tests/intel/xe_exec_reset.c b/tests/intel/xe_exec_reset.c
> index 094b34896..8e6c2e2e4 100644
> --- a/tests/intel/xe_exec_reset.c
> +++ b/tests/intel/xe_exec_reset.c
> @@ -564,7 +564,7 @@ test_compute_mode(int fd, struct drm_xe_engine_class_instance *eci,
>  	xe_vm_bind_async(fd, vm, 0, bo, 0, addr, bo_size, sync, 1);
>  
>  #define THREE_SEC	MS_TO_NS(3000)
> -	xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, NULL, THREE_SEC);
> +	xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, 0, THREE_SEC);
>  	data[0].vm_sync = 0;
>  
>  	for (i = 0; i < n_execs; i++) {
> @@ -621,7 +621,7 @@ test_compute_mode(int fd, struct drm_xe_engine_class_instance *eci,
>  		int err;
>  
>  		err = __xe_wait_ufence(fd, &data[i].exec_sync, USER_FENCE_VALUE,
> -				       NULL, &timeout);
> +				       exec_queues[i % n_exec_queues], &timeout);
>  		if (flags & GT_RESET)
>  			/* exec races with reset: may timeout or complete */
>  			igt_assert(err == -ETIME || !err);
> @@ -631,7 +631,7 @@ test_compute_mode(int fd, struct drm_xe_engine_class_instance *eci,
>  
>  	sync[0].addr = to_user_pointer(&data[0].vm_sync);
>  	xe_vm_unbind_async(fd, vm, 0, 0, addr, bo_size, sync, 1);
> -	xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, NULL, THREE_SEC);
> +	xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, 0, THREE_SEC);
>  
>  	if (!(flags & GT_RESET)) {
>  		for (i = 1; i < n_execs; i++)
> diff --git a/tests/intel/xe_exec_threads.c b/tests/intel/xe_exec_threads.c
> index fcb926698..ca2dd421e 100644
> --- a/tests/intel/xe_exec_threads.c
> +++ b/tests/intel/xe_exec_threads.c
> @@ -331,7 +331,7 @@ test_compute_mode(int fd, uint32_t vm, uint64_t addr, uint64_t userptr,
>  
>  	fence_timeout = igt_run_in_simulation() ? THIRTY_SEC : THREE_SEC;
>  
> -	xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, NULL, fence_timeout);
> +	xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, 0, fence_timeout);
>  	data[0].vm_sync = 0;
>  
>  	for (i = 0; i < n_execs; i++) {
> @@ -359,7 +359,7 @@ test_compute_mode(int fd, uint32_t vm, uint64_t addr, uint64_t userptr,
>  			for (j = i - 0x20; j <= i; ++j)
>  				xe_wait_ufence(fd, &data[j].exec_sync,
>  					       USER_FENCE_VALUE,
> -					       NULL, fence_timeout);
> +					       exec_queues[e], fence_timeout);
>  			xe_vm_unbind_async(fd, vm, 0, 0, addr, bo_size,
>  					   NULL, 0);
>  
> @@ -374,7 +374,7 @@ test_compute_mode(int fd, uint32_t vm, uint64_t addr, uint64_t userptr,
>  							 addr, bo_size, sync,
>  							 1);
>  			xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE,
> -				       NULL, fence_timeout);
> +				       0, fence_timeout);
>  			data[0].vm_sync = 0;
>  		}
>  
> @@ -389,7 +389,8 @@ test_compute_mode(int fd, uint32_t vm, uint64_t addr, uint64_t userptr,
>  				for (j = i == 0x20 ? 0 : i - 0x1f; j <= i; ++j)
>  					xe_wait_ufence(fd, &data[j].exec_sync,
>  						       USER_FENCE_VALUE,
> -						       NULL, fence_timeout);
> +						       exec_queues[e],
> +						       fence_timeout);
>  				igt_assert_eq(data[i].data, 0xc0ffee);
>  			} else if (i * 2 != n_execs) {
>  				/*
> @@ -421,8 +422,8 @@ test_compute_mode(int fd, uint32_t vm, uint64_t addr, uint64_t userptr,
>  	j = flags & INVALIDATE ?
>  		(flags & RACE ? n_execs / 2 + 1 : n_execs - 1) : 0;
>  	for (i = j; i < n_execs; i++)
> -		xe_wait_ufence(fd, &data[i].exec_sync, USER_FENCE_VALUE, NULL,
> -			       fence_timeout);
> +		xe_wait_ufence(fd, &data[i].exec_sync, USER_FENCE_VALUE,
> +			       exec_queues[i % n_exec_queues], fence_timeout);
>  
>  	/* Wait for all execs to complete */
>  	if (flags & INVALIDATE)
> @@ -430,7 +431,7 @@ test_compute_mode(int fd, uint32_t vm, uint64_t addr, uint64_t userptr,
>  
>  	sync[0].addr = to_user_pointer(&data[0].vm_sync);
>  	xe_vm_unbind_async(fd, vm, 0, 0, addr, bo_size, sync, 1);
> -	xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, NULL, fence_timeout);
> +	xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, 0, fence_timeout);
>  
>  	for (i = j; i < n_execs; i++)
>  		igt_assert_eq(data[i].data, 0xc0ffee);
> diff --git a/tests/intel/xe_waitfence.c b/tests/intel/xe_waitfence.c
> index 3be987954..c272d8a83 100644
> --- a/tests/intel/xe_waitfence.c
> +++ b/tests/intel/xe_waitfence.c
> @@ -37,22 +37,20 @@ static void do_bind(int fd, uint32_t vm, uint32_t bo, uint64_t offset,
>  }
>  
>  static int64_t wait_with_eci_abstime(int fd, uint64_t *addr, uint64_t value,
> -				     struct drm_xe_engine_class_instance *eci,
> -				     int64_t timeout)
> +				     uint32_t exec_queue, int64_t timeout,
> +				     uint16_t flag)
>  {
>  	struct drm_xe_wait_user_fence wait = {
>  		.addr = to_user_pointer(addr),
>  		.op = DRM_XE_UFENCE_WAIT_OP_EQ,
> -		.flags = !eci ? 0 : DRM_XE_UFENCE_WAIT_FLAG_ABSTIME,
> +		.flags = flag,
>  		.value = value,
>  		.mask = DRM_XE_UFENCE_WAIT_MASK_U64,
>  		.timeout = timeout,
> -		.num_engines = eci ? 1 : 0,
> -		.instances = eci ? to_user_pointer(eci) : 0,
> +		.exec_queue_id = exec_queue,
>  	};
>  	struct timespec ts;
>  
> -	igt_assert(eci);
>  	igt_assert_eq(igt_ioctl(fd, DRM_IOCTL_XE_WAIT_USER_FENCE, &wait), 0);
>  	igt_assert_eq(clock_gettime(CLOCK_MONOTONIC, &ts), 0);
>  
> @@ -82,7 +80,7 @@ enum waittype {
>  static void
>  waitfence(int fd, enum waittype wt)
>  {
> -	struct drm_xe_engine *engine = NULL;
> +	uint32_t exec_queue;
>  	struct timespec ts;
>  	int64_t current, signalled;
>  	uint32_t bo_1;
> @@ -111,15 +109,17 @@ waitfence(int fd, enum waittype wt)
>  	do_bind(fd, vm, bo_7, 0, 0xeffff0000, 0x10000, 7);
>  
>  	if (wt == RELTIME) {
> -		timeout = xe_wait_ufence(fd, &wait_fence, 7, NULL, MS_TO_NS(10));
> +		timeout = xe_wait_ufence(fd, &wait_fence, 7, 0, MS_TO_NS(10));
>  		igt_debug("wait type: RELTIME - timeout: %ld, timeout left: %ld\n",
>  			  MS_TO_NS(10), timeout);
>  	} else if (wt == ENGINE) {
> -		engine = xe_engine(fd, 1);
> +		exec_queue = xe_exec_queue_create_class(fd, vm, DRM_XE_ENGINE_CLASS_COPY);
>  		clock_gettime(CLOCK_MONOTONIC, &ts);
>  		current = ts.tv_sec * 1e9 + ts.tv_nsec;
>  		timeout = current + MS_TO_NS(10);
> -		signalled = wait_with_eci_abstime(fd, &wait_fence, 7, &engine->instance, timeout);
> +		signalled = wait_with_eci_abstime(fd, &wait_fence, 7,
> +						  exec_queue, timeout,
> +						  DRM_XE_UFENCE_WAIT_FLAG_ABSTIME);
>  		igt_debug("wait type: ENGINE ABSTIME - timeout: %" PRId64
>  			  ", signalled: %" PRId64
>  			  ", elapsed: %" PRId64 "\n",
> @@ -128,7 +128,8 @@ waitfence(int fd, enum waittype wt)
>  		clock_gettime(CLOCK_MONOTONIC, &ts);
>  		current = ts.tv_sec * 1e9 + ts.tv_nsec;
>  		timeout = current + MS_TO_NS(10);
> -		signalled = xe_wait_ufence_abstime(fd, &wait_fence, 7, NULL, timeout);
> +		signalled = xe_wait_ufence_abstime(fd, &wait_fence, 7, 0,
> +						   timeout, 0);
>  		igt_debug("wait type: ABSTIME - timeout: %" PRId64
>  			  ", signalled: %" PRId64
>  			  ", elapsed: %" PRId64 "\n",
> @@ -191,8 +192,7 @@ invalid_ops(int fd)
>  		.value = 1,
>  		.mask = DRM_XE_UFENCE_WAIT_MASK_U64,
>  		.timeout = 1,
> -		.num_engines = 0,
> -		.instances = 0,
> +		.exec_queue_id = 0,
>  	};
>  
>  	uint32_t vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT, 0);
> @@ -216,8 +216,7 @@ invalid_engine(int fd)
>  		.value = 1,
>  		.mask = DRM_XE_UFENCE_WAIT_MASK_U64,
>  		.timeout = -1,
> -		.num_engines = 1,
> -		.instances = 0,
> +		.exec_queue_id = 0,
>  	};
>  
>  	uint32_t vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT, 0);
> -- 
> 2.25.1
> 

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v5 2/2] drm-uapi/xe: Don't wait on user_fence during exec queue reset
  2023-12-12  5:10 ` [PATCH v5 2/2] drm-uapi/xe: Don't wait on user_fence during exec queue reset Bommu Krishnaiah
@ 2023-12-12 12:07   ` Francois Dugast
  2023-12-12 12:16     ` Bommu, Krishnaiah
  0 siblings, 1 reply; 7+ messages in thread
From: Francois Dugast @ 2023-12-12 12:07 UTC (permalink / raw)
  To: Bommu Krishnaiah; +Cc: igt-dev, Rodrigo Vivi

On Tue, Dec 12, 2023 at 10:40:40AM +0530, Bommu Krishnaiah wrote:
> Don't wait till timeout on user fence when exec_queue reset is detected
> and return return  proper error code
> 
> Signed-off-by: Bommu Krishnaiah <krishnaiah.bommu@intel.com>
> Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
> Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
> Cc: Francois Dugast <francois.dugast@intel.com>
> ---
>  tests/intel/xe_waitfence.c | 82 ++++++++++++++++++++++++++++++++++++++
>  1 file changed, 82 insertions(+)
> 
> diff --git a/tests/intel/xe_waitfence.c b/tests/intel/xe_waitfence.c
> index c272d8a83..2eb3a28b2 100644
> --- a/tests/intel/xe_waitfence.c
> +++ b/tests/intel/xe_waitfence.c
> @@ -153,6 +153,9 @@ waitfence(int fd, enum waittype wt)
>   *
>   * SUBTEST: invalid-engine
>   * Description: Check query with invalid engine info returns expected error code
> + *
> + * SUBTEST: exec_queue-reset-wait
> + * Description: Don’t wait till timeout on user fence when exec_queue reset is detected and return return proper error
>   */
>  
>  static void
> @@ -228,6 +231,82 @@ invalid_engine(int fd)
>  	do_ioctl_err(fd, DRM_IOCTL_XE_WAIT_USER_FENCE, &wait, EFAULT);
>  }
>  
> +static void
> +exec_queue_reset_wait(int fd)
> +{
> +	uint32_t bo, b;
> +	uint64_t batch_offset;
> +	uint64_t batch_addr;
> +	uint64_t sdi_offset;
> +	uint64_t sdi_addr;
> +	uint64_t addr = 0x1a0000;
> +
> +	struct {
> +		uint32_t batch[16];
> +		uint64_t pad;
> +		uint64_t vm_sync;
> +		uint64_t exec_sync;
> +		uint32_t data;
> +	} *data;
> +
> +#define USER_FENCE_VALUE        0xdeadbeefdeadbeefull
> +	struct drm_xe_sync sync[1] = {
> +		{ .flags = DRM_XE_SYNC_TYPE_USER_FENCE | DRM_XE_SYNC_FLAG_SIGNAL,
> +			.timeline_value = USER_FENCE_VALUE },
> +	};
> +
> +	struct drm_xe_exec exec = {
> +		.num_batch_buffer = 1,
> +		.num_syncs = 1,
> +		.syncs = to_user_pointer(sync),
> +	};
> +
> +	uint32_t vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT, 0);
> +	uint32_t exec_queue = xe_exec_queue_create_class(fd, vm, DRM_XE_ENGINE_CLASS_COPY);
> +	struct drm_xe_wait_user_fence1 wait = {

Hi Bommu,

I assume this is a typo and should be like this, right?
s/drm_xe_wait_user_fence1/drm_xe_wait_user_fence/

Francois

> +		.op = DRM_XE_UFENCE_WAIT_OP_EQ,
> +		.flags = 0,
> +		.value = 0xc0ffee,
> +		.mask = DRM_XE_UFENCE_WAIT_MASK_U64,
> +		.timeout = -1,
> +		.exec_queue_id = exec_queue,
> +	};
> +
> +	bo = xe_bo_create(fd, vm, 0x40000, vram_if_possible(fd, 0), 0);
> +	data = xe_bo_map(fd, bo, 0x40000);
> +
> +	batch_offset = (char *)&data[0].batch - (char *)data;
> +	batch_addr = addr + batch_offset;
> +	sdi_offset = (char *)&data[0].data - (char *)data;
> +	sdi_addr = addr + sdi_offset;
> +
> +	b = 0;
> +	data[0].batch[b++] = MI_STORE_DWORD_IMM_GEN4;
> +	data[0].batch[b++] = sdi_addr;
> +	data[0].batch[b++] = sdi_addr >> 32;
> +	data[0].batch[b++] = 0xc0ffee;
> +	data[0].batch[b++] = MI_BATCH_BUFFER_END;
> +	igt_assert(b <= ARRAY_SIZE(data[0].batch));
> +
> +	wait.addr = to_user_pointer(&data[0].exec_sync);
> +	exec.exec_queue_id = exec_queue;
> +	exec.address = batch_addr;
> +
> +	xe_exec(fd, &exec);
> +
> +	/**
> +	  * Don't do the GPU mapping(vm_bind) for object, so that exec_queue
> +	  * reset will happen and xe_wait_ufence will return EIO not ETIME
> +	  */
> +	do_ioctl_err(fd, DRM_IOCTL_XE_WAIT_USER_FENCE, &wait, EIO);
> +
> +	xe_exec_queue_destroy(fd, exec_queue);
> +
> +	if (bo) {
> +		munmap(data, 0x40000);
> +		gem_close(fd, bo);
> +	}
> +}
>  
>  igt_main
>  {
> @@ -254,6 +333,9 @@ igt_main
>  	igt_subtest("invalid-engine")
>  		invalid_engine(fd);
>  
> +	igt_subtest("exec_queue-reset-wait")
> +		exec_queue_reset_wait(fd);
> +
>  	igt_fixture
>  		drm_close_driver(fd);
>  }
> -- 
> 2.25.1
> 

^ permalink raw reply	[flat|nested] 7+ messages in thread

* RE: [PATCH v5 2/2] drm-uapi/xe: Don't wait on user_fence during exec queue reset
  2023-12-12 12:07   ` Francois Dugast
@ 2023-12-12 12:16     ` Bommu, Krishnaiah
  0 siblings, 0 replies; 7+ messages in thread
From: Bommu, Krishnaiah @ 2023-12-12 12:16 UTC (permalink / raw)
  To: Dugast, Francois; +Cc: igt-dev@lists.freedesktop.org, Vivi, Rodrigo



> -----Original Message-----
> From: Dugast, Francois <francois.dugast@intel.com>
> Sent: Tuesday, December 12, 2023 5:37 PM
> To: Bommu, Krishnaiah <krishnaiah.bommu@intel.com>
> Cc: igt-dev@lists.freedesktop.org; Vivi, Rodrigo <rodrigo.vivi@intel.com>
> Subject: Re: [PATCH v5 2/2] drm-uapi/xe: Don't wait on user_fence during exec
> queue reset
> 
> On Tue, Dec 12, 2023 at 10:40:40AM +0530, Bommu Krishnaiah wrote:
> > Don't wait till timeout on user fence when exec_queue reset is
> > detected and return return  proper error code
> >
> > Signed-off-by: Bommu Krishnaiah <krishnaiah.bommu@intel.com>
> > Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
> > Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
> > Cc: Francois Dugast <francois.dugast@intel.com>
> > ---
> >  tests/intel/xe_waitfence.c | 82
> > ++++++++++++++++++++++++++++++++++++++
> >  1 file changed, 82 insertions(+)
> >
> > diff --git a/tests/intel/xe_waitfence.c b/tests/intel/xe_waitfence.c
> > index c272d8a83..2eb3a28b2 100644
> > --- a/tests/intel/xe_waitfence.c
> > +++ b/tests/intel/xe_waitfence.c
> > @@ -153,6 +153,9 @@ waitfence(int fd, enum waittype wt)
> >   *
> >   * SUBTEST: invalid-engine
> >   * Description: Check query with invalid engine info returns expected
> > error code
> > + *
> > + * SUBTEST: exec_queue-reset-wait
> > + * Description: Don’t wait till timeout on user fence when exec_queue
> > + reset is detected and return return proper error
> >   */
> >
> >  static void
> > @@ -228,6 +231,82 @@ invalid_engine(int fd)
> >  	do_ioctl_err(fd, DRM_IOCTL_XE_WAIT_USER_FENCE, &wait, EFAULT);
> }
> >
> > +static void
> > +exec_queue_reset_wait(int fd)
> > +{
> > +	uint32_t bo, b;
> > +	uint64_t batch_offset;
> > +	uint64_t batch_addr;
> > +	uint64_t sdi_offset;
> > +	uint64_t sdi_addr;
> > +	uint64_t addr = 0x1a0000;
> > +
> > +	struct {
> > +		uint32_t batch[16];
> > +		uint64_t pad;
> > +		uint64_t vm_sync;
> > +		uint64_t exec_sync;
> > +		uint32_t data;
> > +	} *data;
> > +
> > +#define USER_FENCE_VALUE        0xdeadbeefdeadbeefull
> > +	struct drm_xe_sync sync[1] = {
> > +		{ .flags = DRM_XE_SYNC_TYPE_USER_FENCE |
> DRM_XE_SYNC_FLAG_SIGNAL,
> > +			.timeline_value = USER_FENCE_VALUE },
> > +	};
> > +
> > +	struct drm_xe_exec exec = {
> > +		.num_batch_buffer = 1,
> > +		.num_syncs = 1,
> > +		.syncs = to_user_pointer(sync),
> > +	};
> > +
> > +	uint32_t vm = xe_vm_create(fd,
> DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT, 0);
> > +	uint32_t exec_queue = xe_exec_queue_create_class(fd, vm,
> DRM_XE_ENGINE_CLASS_COPY);
> > +	struct drm_xe_wait_user_fence1 wait = {
> 
> Hi Bommu,
> 
> I assume this is a typo and should be like this, right?
> s/drm_xe_wait_user_fence1/drm_xe_wait_user_fence/
> 
> Francois
Yes it is typo, thank you for pointing out
Regards,
Krishna
> 
> > +		.op = DRM_XE_UFENCE_WAIT_OP_EQ,
> > +		.flags = 0,
> > +		.value = 0xc0ffee,
> > +		.mask = DRM_XE_UFENCE_WAIT_MASK_U64,
> > +		.timeout = -1,
> > +		.exec_queue_id = exec_queue,
> > +	};
> > +
> > +	bo = xe_bo_create(fd, vm, 0x40000, vram_if_possible(fd, 0), 0);
> > +	data = xe_bo_map(fd, bo, 0x40000);
> > +
> > +	batch_offset = (char *)&data[0].batch - (char *)data;
> > +	batch_addr = addr + batch_offset;
> > +	sdi_offset = (char *)&data[0].data - (char *)data;
> > +	sdi_addr = addr + sdi_offset;
> > +
> > +	b = 0;
> > +	data[0].batch[b++] = MI_STORE_DWORD_IMM_GEN4;
> > +	data[0].batch[b++] = sdi_addr;
> > +	data[0].batch[b++] = sdi_addr >> 32;
> > +	data[0].batch[b++] = 0xc0ffee;
> > +	data[0].batch[b++] = MI_BATCH_BUFFER_END;
> > +	igt_assert(b <= ARRAY_SIZE(data[0].batch));
> > +
> > +	wait.addr = to_user_pointer(&data[0].exec_sync);
> > +	exec.exec_queue_id = exec_queue;
> > +	exec.address = batch_addr;
> > +
> > +	xe_exec(fd, &exec);
> > +
> > +	/**
> > +	  * Don't do the GPU mapping(vm_bind) for object, so that exec_queue
> > +	  * reset will happen and xe_wait_ufence will return EIO not ETIME
> > +	  */
> > +	do_ioctl_err(fd, DRM_IOCTL_XE_WAIT_USER_FENCE, &wait, EIO);
> > +
> > +	xe_exec_queue_destroy(fd, exec_queue);
> > +
> > +	if (bo) {
> > +		munmap(data, 0x40000);
> > +		gem_close(fd, bo);
> > +	}
> > +}
> >
> >  igt_main
> >  {
> > @@ -254,6 +333,9 @@ igt_main
> >  	igt_subtest("invalid-engine")
> >  		invalid_engine(fd);
> >
> > +	igt_subtest("exec_queue-reset-wait")
> > +		exec_queue_reset_wait(fd);
> > +
> >  	igt_fixture
> >  		drm_close_driver(fd);
> >  }
> > --
> > 2.25.1
> >

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2023-12-12 12:16 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2023-12-12  5:10 [PATCH v5 0/2] RFC: drm-uapi/xe: add exec_queue_id member to drm_xe_wait_user_fence structure Bommu Krishnaiah
2023-12-12  5:10 ` [PATCH v5 1/2] " Bommu Krishnaiah
2023-12-12  5:39   ` Rodrigo Vivi
2023-12-12  5:10 ` [PATCH v5 2/2] drm-uapi/xe: Don't wait on user_fence during exec queue reset Bommu Krishnaiah
2023-12-12 12:07   ` Francois Dugast
2023-12-12 12:16     ` Bommu, Krishnaiah
2023-12-12  5:35 ` ✗ Fi.CI.BUILD: failure for RFC: drm-uapi/xe: add exec_queue_id member to drm_xe_wait_user_fence structure (rev7) Patchwork

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox