Igt-dev Archive on lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH i-g-t] tests/intel/xe_compute_preempt: Add mtp disabled preempt test
@ 2024-02-18 13:18 janga.rahul.kumar
  2024-02-18 14:05 ` ✓ CI.xeBAT: success for " Patchwork
                   ` (2 more replies)
  0 siblings, 3 replies; 4+ messages in thread
From: janga.rahul.kumar @ 2024-02-18 13:18 UTC (permalink / raw)
  To: igt-dev, ramadevi.gandi, janga.rahul.kumar; +Cc: nirmoy.das

From: Janga Rahul Kumar <janga.rahul.kumar@intel.com>

Check preemption scenario with Mid thread preemption disabled.

Cc: Nirmoy Das <nirmoy.das@intel.com>
Signed-off-by: Janga Rahul Kumar <janga.rahul.kumar@intel.com>
---
 lib/intel_compute.c              | 148 ++++++++++++++++++++-----------
 lib/intel_compute.h              |   2 +-
 tests/intel/xe_compute_preempt.c |  16 +++-
 3 files changed, 111 insertions(+), 55 deletions(-)

diff --git a/lib/intel_compute.c b/lib/intel_compute.c
index eab407a0d..753add674 100644
--- a/lib/intel_compute.c
+++ b/lib/intel_compute.c
@@ -1162,7 +1162,8 @@ static void xe2lpg_compute_exec_compute(uint32_t *addr_bo_buffer_batch,
 					uint64_t addr_state_contect_data_base,
 					uint64_t offset_indirect_data_start,
 					uint64_t kernel_start_pointer,
-					uint64_t sip_start_pointer)
+					uint64_t sip_start_pointer,
+					bool	 thread_preemption)
 {
 	int b = 0;
 
@@ -1195,6 +1196,13 @@ static void xe2lpg_compute_exec_compute(uint32_t *addr_bo_buffer_batch,
 	addr_bo_buffer_batch[b++] = 0x03808800;
 	addr_bo_buffer_batch[b++] = 0x00000000;
 	addr_bo_buffer_batch[b++] = 0x00000000;
+
+	if (!thread_preemption) {
+		addr_bo_buffer_batch[b++] = MI_LOAD_REGISTER_IMM(1);
+		addr_bo_buffer_batch[b++] = 0x0001a580;
+		addr_bo_buffer_batch[b++] = 0x0000d401;
+	}
+
 	addr_bo_buffer_batch[b++] = STATE_BASE_ADDRESS | 0x14;
 	addr_bo_buffer_batch[b++] = (addr_general_state_base & 0xffffffff) | 0x21;
 	addr_bo_buffer_batch[b++] = addr_general_state_base >> 32;
@@ -1251,7 +1259,12 @@ static void xe2lpg_compute_exec_compute(uint32_t *addr_bo_buffer_batch,
 
 	addr_bo_buffer_batch[b++] = kernel_start_pointer;
 	addr_bo_buffer_batch[b++] = 0x00000000;
-	addr_bo_buffer_batch[b++] = 0x00100000; // Enable Thread Preemption BitField:20
+
+	if (thread_preemption)
+		addr_bo_buffer_batch[b++] = 0x00100000; // Enable Thread Preemption BitField:20
+	else
+		addr_bo_buffer_batch[b++] = 0x00000000; // Disable Thread Preemption BitField:20
+
 	addr_bo_buffer_batch[b++] = 0x00000000;
 	addr_bo_buffer_batch[b++] = 0x00000000;
 	addr_bo_buffer_batch[b++] = 0x0c000020;
@@ -1369,7 +1382,9 @@ static void xe2lpg_compute_exec(int fd, const unsigned char *kernel,
 				  ADDR_INSTRUCTION_STATE_BASE,
 				  XE2_ADDR_STATE_CONTEXT_DATA_BASE,
 				  OFFSET_INDIRECT_DATA_START,
-				  OFFSET_KERNEL, 0);
+				  OFFSET_KERNEL,
+				  0,
+				  false);
 
 	bo_execenv_exec(&execenv, ADDR_BATCH);
 
@@ -1520,13 +1535,17 @@ bool xe_run_intel_compute_kernel_on_engine(int fd,
  * @short_kernel_size: size of @short_kernel
  * @sip_kernel: WMTP sip kernel which does save restore during preemption
  * @sip_kernel_size: size of @sip_kernel
+ * @thread_preemption: flag to enable/disable thread level preemption
+ * @multi_short: submit multiple short kernels
  */
 static void xe2lpg_compute_preempt_exec(int fd, const unsigned char *long_kernel,
 					unsigned int long_kernel_size,
 					const unsigned char *short_kernel,
 					unsigned int short_kernel_size,
 					const unsigned char *sip_kernel,
-					unsigned int sip_kernel_size)
+					unsigned int sip_kernel_size,
+					bool thread_preemption,
+					bool multi_short)
 {
 #define XE2_BO_PREEMPT_DICT_ENTRIES 11
 	struct bo_dict_entry bo_dict_long[XE2_BO_PREEMPT_DICT_ENTRIES] = {
@@ -1560,41 +1579,67 @@ static void xe2lpg_compute_preempt_exec(int fd, const unsigned char *long_kernel
 		  .name = "sip kernel"},
 	};
 
-	struct bo_dict_entry bo_dict_short[XE2_BO_PREEMPT_DICT_ENTRIES];
-	struct bo_execenv execenv_short, execenv_long;
+	int n_short = (multi_short) ? 3 : 1;
+	struct bo_dict_entry bo_dict_short[n_short][XE2_BO_PREEMPT_DICT_ENTRIES];
+	struct bo_execenv execenv_short[n_short], execenv_long;
+	struct drm_xe_sync sync_short[n_short];
 	float *dinput;
+	memset(sync_short, 0, sizeof(sync_short));
+
 	struct drm_xe_sync sync_long = {
 		.type = DRM_XE_SYNC_TYPE_SYNCOBJ,
 		.flags = DRM_XE_SYNC_FLAG_SIGNAL,
 		.handle = syncobj_create(fd, 0),
 	};
-	struct drm_xe_sync sync_short = {
-		.type = DRM_XE_SYNC_TYPE_SYNCOBJ,
-		.flags = DRM_XE_SYNC_FLAG_SIGNAL,
-		.handle = syncobj_create(fd, 0),
-	};
+
 	unsigned int long_kernel_loop_count = 1000000;
 
-	for (int i = 0; i < XE2_BO_PREEMPT_DICT_ENTRIES; ++i)
-		bo_dict_short[i] = bo_dict_long[i];
+	for (int j = 0; j < n_short; j++) {
+		for (int i = 0; i < XE2_BO_PREEMPT_DICT_ENTRIES; ++i)
+			bo_dict_short[j][i] = bo_dict_long[i];
+
+		bo_execenv_create(fd, &execenv_short[j], NULL);
+
+		bo_dict_short[j][0].size = ALIGN(short_kernel_size, 0x1000);
+		bo_dict_short[j][10].size = ALIGN(sip_kernel_size, 0x1000);
+
+		bo_execenv_bind(&execenv_short[j], bo_dict_short[j], XE2_BO_PREEMPT_DICT_ENTRIES);
+
+		memcpy(bo_dict_short[j][0].data, short_kernel, short_kernel_size);
+		memcpy(bo_dict_short[j][10].data, sip_kernel, sip_kernel_size);
+
+		create_dynamic_state(bo_dict_short[j][1].data, OFFSET_KERNEL);
+		xehp_create_surface_state(bo_dict_short[j][2].data, ADDR_INPUT, ADDR_OUTPUT);
+		xehp_create_indirect_data(bo_dict_short[j][3].data, ADDR_INPUT, ADDR_OUTPUT);
+		xehp_create_surface_state(bo_dict_short[j][7].data, ADDR_INPUT, ADDR_OUTPUT);
+
+		dinput = (float *)bo_dict_short[j][4].data;
+
+		for (int i = 0; i < SIZE_DATA; i++)
+			((float *)dinput)[i] = rand() / (float)RAND_MAX;
+
+		xe2lpg_compute_exec_compute(bo_dict_short[j][8].data, ADDR_GENERAL_STATE_BASE,
+						ADDR_SURFACE_STATE_BASE, ADDR_DYNAMIC_STATE_BASE,
+						ADDR_INSTRUCTION_STATE_BASE, XE2_ADDR_STATE_CONTEXT_DATA_BASE,
+						OFFSET_INDIRECT_DATA_START, OFFSET_KERNEL, OFFSET_STATE_SIP,
+						thread_preemption);
+
+		sync_short[j].type = DRM_XE_SYNC_TYPE_SYNCOBJ;
+		sync_short[j].flags = DRM_XE_SYNC_FLAG_SIGNAL;
+		sync_short[j].handle = syncobj_create(fd, 0);
+	}
 
-	bo_execenv_create(fd, &execenv_short, NULL);
 	bo_execenv_create(fd, &execenv_long, NULL);
 
 	bo_dict_long[0].size = ALIGN(long_kernel_size, 0x1000);
-	bo_dict_short[0].size = ALIGN(short_kernel_size, 0x1000);
 
 	bo_dict_long[10].size = ALIGN(sip_kernel_size, 0x1000);
-	bo_dict_short[10].size = ALIGN(sip_kernel_size, 0x1000);
 
 	bo_execenv_bind(&execenv_long, bo_dict_long, XE2_BO_PREEMPT_DICT_ENTRIES);
-	bo_execenv_bind(&execenv_short, bo_dict_short, XE2_BO_PREEMPT_DICT_ENTRIES);
 
 	memcpy(bo_dict_long[0].data, long_kernel, long_kernel_size);
-	memcpy(bo_dict_short[0].data, short_kernel, short_kernel_size);
 
 	memcpy(bo_dict_long[10].data, sip_kernel, sip_kernel_size);
-	memcpy(bo_dict_short[10].data, sip_kernel, sip_kernel_size);
 
 	create_dynamic_state(bo_dict_long[1].data, OFFSET_KERNEL);
 	xehp_create_surface_state(bo_dict_long[2].data, ADDR_INPUT, ADDR_OUTPUT);
@@ -1602,10 +1647,6 @@ static void xe2lpg_compute_preempt_exec(int fd, const unsigned char *long_kernel
 					    long_kernel_loop_count);
 	xehp_create_surface_state(bo_dict_long[7].data, ADDR_INPUT, ADDR_OUTPUT);
 
-	create_dynamic_state(bo_dict_short[1].data, OFFSET_KERNEL);
-	xehp_create_surface_state(bo_dict_short[2].data, ADDR_INPUT, ADDR_OUTPUT);
-	xehp_create_indirect_data(bo_dict_short[3].data, ADDR_INPUT, ADDR_OUTPUT);
-	xehp_create_surface_state(bo_dict_short[7].data, ADDR_INPUT, ADDR_OUTPUT);
 
 	dinput = (float *)bo_dict_long[4].data;
 	srand(time(NULL));
@@ -1613,40 +1654,37 @@ static void xe2lpg_compute_preempt_exec(int fd, const unsigned char *long_kernel
 	for (int i = 0; i < SIZE_DATA; i++)
 		((float *)dinput)[i] = rand() / (float)RAND_MAX;
 
-	dinput = (float *)bo_dict_short[4].data;
-
-	for (int i = 0; i < SIZE_DATA; i++)
-		((float *)dinput)[i] = rand() / (float)RAND_MAX;
 
 	xe2lpg_compute_exec_compute(bo_dict_long[8].data, ADDR_GENERAL_STATE_BASE,
 				    ADDR_SURFACE_STATE_BASE, ADDR_DYNAMIC_STATE_BASE,
 				    ADDR_INSTRUCTION_STATE_BASE, XE2_ADDR_STATE_CONTEXT_DATA_BASE,
-				    OFFSET_INDIRECT_DATA_START, OFFSET_KERNEL, OFFSET_STATE_SIP);
-
-	xe2lpg_compute_exec_compute(bo_dict_short[8].data, ADDR_GENERAL_STATE_BASE,
-				    ADDR_SURFACE_STATE_BASE, ADDR_DYNAMIC_STATE_BASE,
-				    ADDR_INSTRUCTION_STATE_BASE, XE2_ADDR_STATE_CONTEXT_DATA_BASE,
-				    OFFSET_INDIRECT_DATA_START, OFFSET_KERNEL, OFFSET_STATE_SIP);
+				    OFFSET_INDIRECT_DATA_START, OFFSET_KERNEL, OFFSET_STATE_SIP,
+				    thread_preemption);
 
 	xe_exec_sync(fd, execenv_long.exec_queue, ADDR_BATCH, &sync_long, 1);
 
-	xe_exec_sync(fd, execenv_short.exec_queue, ADDR_BATCH, &sync_short, 1);
+	for (int j = 0; j < n_short; j++) {
+		xe_exec_sync(fd, execenv_short[j].exec_queue, ADDR_BATCH, &sync_short[j], 1);
+
+		igt_assert(syncobj_wait(fd, &sync_short[j].handle, 1, INT64_MAX, 0, NULL));
+		syncobj_destroy(fd, sync_short[j].handle);
+	}
 
-	igt_assert(syncobj_wait(fd, &sync_short.handle, 1, INT64_MAX, 0, NULL));
-	syncobj_destroy(fd, sync_short.handle);
 
 	igt_assert(syncobj_wait(fd, &sync_long.handle, 1, INT64_MAX, 0, NULL));
 	syncobj_destroy(fd, sync_long.handle);
 
-	for (int i = 0; i < SIZE_DATA; i++) {
-		float f1, f2;
+	for (int j = 0; j < n_short; j++) {
+		for (int i = 0; i < SIZE_DATA; i++) {
+			float f1, f2;
 
-		f1 = ((float *) bo_dict_short[5].data)[i];
-		f2 = ((float *) bo_dict_short[4].data)[i];
+			f1 = ((float *) bo_dict_short[j][5].data)[i];
+			f2 = ((float *) bo_dict_short[j][4].data)[i];
 
-		if (f1 != f2 * f2)
-			igt_debug("[%4d] f1: %f != %f\n", i, f1, f2 * f2);
-		igt_assert(f1 == f2 * f2);
+			if (f1 != f2 * f2)
+				igt_debug("[%4d] f1: %f != %f\n", i, f1, f2 * f2);
+			igt_assert(f1 == f2 * f2);
+		}
 	}
 
 	for (int i = 0; i < SIZE_DATA; i++) {
@@ -1659,10 +1697,12 @@ static void xe2lpg_compute_preempt_exec(int fd, const unsigned char *long_kernel
 		igt_assert(f1 == long_kernel_loop_count);
 	}
 
-	bo_execenv_unbind(&execenv_short, bo_dict_short, XE2_BO_PREEMPT_DICT_ENTRIES);
-	bo_execenv_unbind(&execenv_long, bo_dict_long, XE2_BO_PREEMPT_DICT_ENTRIES);
+	for (int j = 0; j < n_short; j++) {
+		bo_execenv_unbind(&execenv_short[j], bo_dict_short[j], XE2_BO_PREEMPT_DICT_ENTRIES);
+		bo_execenv_destroy(&execenv_short[j]);
+	}
 
-	bo_execenv_destroy(&execenv_short);
+	bo_execenv_unbind(&execenv_long, bo_dict_long, XE2_BO_PREEMPT_DICT_ENTRIES);
 	bo_execenv_destroy(&execenv_long);
 }
 
@@ -1673,7 +1713,9 @@ static const struct {
 			     const unsigned char *short_kernel,
 			     unsigned int short_kernel_size,
 			     const unsigned char *sip_kernel,
-			     unsigned int sip_kernel_size);
+			     unsigned int sip_kernel_size,
+			     bool thread_preemption,
+			     bool multi_short);
 	uint32_t compat;
 } intel_compute_preempt_batches[] = {
 	{
@@ -1683,7 +1725,7 @@ static const struct {
 	},
 };
 
-static bool __run_intel_compute_kernel_preempt(int fd)
+static bool __run_intel_compute_kernel_preempt(int fd, bool thread_preemption, bool multi_short)
 {
 	unsigned int ip_ver = intel_graphics_ver(intel_get_drm_devid(fd));
 	unsigned int batch;
@@ -1720,7 +1762,9 @@ static bool __run_intel_compute_kernel_preempt(int fd)
 							  kernels->long_kernel_size,
 							  kernels->kernel, kernels->size,
 							  kernels->sip_kernel,
-							  kernels->sip_kernel_size);
+							  kernels->sip_kernel_size,
+							  thread_preemption,
+							  multi_short);
 
 	return true;
 }
@@ -1729,10 +1773,12 @@ static bool __run_intel_compute_kernel_preempt(int fd)
  * exercise preemption scenario.
  *
  * @fd: file descriptor of the opened DRM Xe device
+ * @thread_preemption: flag to enable/disable thread level preemption
+ * @multi_short: submit multiple short kernels
  *
  * Returns true on success, false otherwise.
  */
-bool run_intel_compute_kernel_preempt(int fd)
+bool run_intel_compute_kernel_preempt(int fd, bool thread_preemption, bool multi_short)
 {
-	return __run_intel_compute_kernel_preempt(fd);
+	return __run_intel_compute_kernel_preempt(fd, thread_preemption, multi_short);
 }
diff --git a/lib/intel_compute.h b/lib/intel_compute.h
index a02688ad4..b5932ac2b 100644
--- a/lib/intel_compute.h
+++ b/lib/intel_compute.h
@@ -37,5 +37,5 @@ extern const struct intel_compute_kernels intel_compute_square_kernels[];
 
 bool run_intel_compute_kernel(int fd);
 bool xe_run_intel_compute_kernel_on_engine(int fd, struct drm_xe_engine_class_instance *eci);
-bool run_intel_compute_kernel_preempt(int fd);
+bool run_intel_compute_kernel_preempt(int fd, bool thread_preemption, bool multi_short);
 #endif	/* INTEL_COMPUTE_H */
diff --git a/tests/intel/xe_compute_preempt.c b/tests/intel/xe_compute_preempt.c
index 31703638e..4d0feb22b 100644
--- a/tests/intel/xe_compute_preempt.c
+++ b/tests/intel/xe_compute_preempt.c
@@ -22,11 +22,18 @@
  * Description:
  *      Exercise compute walker mid thread preemption scenario
  * Functionality: compute openCL kernel
+ *
+ * SUBTEST: compute-preempt-mtp-disabled
+ * GPU requirement: LNL
+ * Description:
+ *      Exercise compute preemption with Mid thread preemption disabled
+ * Functionality: compute openCL kernel
  */
+
 static void
-test_compute_preempt(int fd)
+test_compute_preempt(int fd, bool thread_preemption, bool multi_short)
 {
-	igt_require_f(run_intel_compute_kernel_preempt(fd), "GPU not supported\n");
+	igt_require_f(run_intel_compute_kernel_preempt(fd, thread_preemption, multi_short), "GPU not supported\n");
 }
 
 igt_main
@@ -37,7 +44,10 @@ igt_main
 		xe = drm_open_driver(DRIVER_XE);
 
 	igt_subtest("compute-preempt")
-		test_compute_preempt(xe);
+		test_compute_preempt(xe, true, 0);
+
+	igt_subtest("compute-preempt-mtp-disabled")
+		test_compute_preempt(xe, false, 1);
 
 	igt_fixture
 		drm_close_driver(xe);
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* ✓ CI.xeBAT: success for tests/intel/xe_compute_preempt: Add mtp disabled preempt test
  2024-02-18 13:18 [PATCH i-g-t] tests/intel/xe_compute_preempt: Add mtp disabled preempt test janga.rahul.kumar
@ 2024-02-18 14:05 ` Patchwork
  2024-02-18 14:29 ` ✗ Fi.CI.BAT: failure " Patchwork
  2024-02-20 20:27 ` [PATCH i-g-t] " Nirmoy Das
  2 siblings, 0 replies; 4+ messages in thread
From: Patchwork @ 2024-02-18 14:05 UTC (permalink / raw)
  To: janga.rahul.kumar; +Cc: igt-dev

[-- Attachment #1: Type: text/plain, Size: 990 bytes --]

== Series Details ==

Series: tests/intel/xe_compute_preempt: Add mtp disabled preempt test
URL   : https://patchwork.freedesktop.org/series/130050/
State : success

== Summary ==

CI Bug Log - changes from XEIGT_7716_BAT -> XEIGTPW_10693_BAT
====================================================

Summary
-------

  **SUCCESS**

  No regressions found.

  

Participating hosts (4 -> 4)
------------------------------

  No changes in participating hosts


Changes
-------

  No changes found


Build changes
-------------

  * IGT: IGT_7716 -> IGTPW_10693
  * Linux: xe-789-55724f2a1075fcace79fd49ea0c2b7b5600d7ba8 -> xe-794-b09fd1bd9f8b4a8e5cd8c1de8dbba4d4eb15b4e9

  IGTPW_10693: 10693
  IGT_7716: 7716
  xe-789-55724f2a1075fcace79fd49ea0c2b7b5600d7ba8: 55724f2a1075fcace79fd49ea0c2b7b5600d7ba8
  xe-794-b09fd1bd9f8b4a8e5cd8c1de8dbba4d4eb15b4e9: b09fd1bd9f8b4a8e5cd8c1de8dbba4d4eb15b4e9

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_10693/index.html

[-- Attachment #2: Type: text/html, Size: 1549 bytes --]

^ permalink raw reply	[flat|nested] 4+ messages in thread

* ✗ Fi.CI.BAT: failure for tests/intel/xe_compute_preempt: Add mtp disabled preempt test
  2024-02-18 13:18 [PATCH i-g-t] tests/intel/xe_compute_preempt: Add mtp disabled preempt test janga.rahul.kumar
  2024-02-18 14:05 ` ✓ CI.xeBAT: success for " Patchwork
@ 2024-02-18 14:29 ` Patchwork
  2024-02-20 20:27 ` [PATCH i-g-t] " Nirmoy Das
  2 siblings, 0 replies; 4+ messages in thread
From: Patchwork @ 2024-02-18 14:29 UTC (permalink / raw)
  To: janga.rahul.kumar; +Cc: igt-dev

[-- Attachment #1: Type: text/plain, Size: 4977 bytes --]

== Series Details ==

Series: tests/intel/xe_compute_preempt: Add mtp disabled preempt test
URL   : https://patchwork.freedesktop.org/series/130050/
State : failure

== Summary ==

CI Bug Log - changes from CI_DRM_14287 -> IGTPW_10693
====================================================

Summary
-------

  **FAILURE**

  Serious unknown changes coming with IGTPW_10693 absolutely need to be
  verified manually.
  
  If you think the reported changes have nothing to do with the changes
  introduced in IGTPW_10693, please notify your bug team (I915-ci-infra@lists.freedesktop.org) to allow them
  to document this new failure mode, which will reduce false positives in CI.

  External URL: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_10693/index.html

Participating hosts (36 -> 34)
------------------------------

  Additional (1): bat-kbl-2 
  Missing    (3): fi-glk-j4005 bat-atsm-1 fi-snb-2520m 

Possible new issues
-------------------

  Here are the unknown changes that may have been introduced in IGTPW_10693:

### IGT changes ###

#### Possible regressions ####

  * igt@i915_selftest@live@coherency:
    - bat-adlm-1:         [PASS][1] -> [INCOMPLETE][2]
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_14287/bat-adlm-1/igt@i915_selftest@live@coherency.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_10693/bat-adlm-1/igt@i915_selftest@live@coherency.html

  
Known issues
------------

  Here are the changes found in IGTPW_10693 that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@debugfs_test@basic-hwmon:
    - bat-jsl-1:          NOTRUN -> [SKIP][3] ([i915#9318])
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_10693/bat-jsl-1/igt@debugfs_test@basic-hwmon.html

  * igt@fbdev@info:
    - bat-kbl-2:          NOTRUN -> [SKIP][4] ([fdo#109271] / [i915#1849])
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_10693/bat-kbl-2/igt@fbdev@info.html

  * igt@gem_huc_copy@huc-copy:
    - bat-jsl-1:          NOTRUN -> [SKIP][5] ([i915#2190])
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_10693/bat-jsl-1/igt@gem_huc_copy@huc-copy.html

  * igt@gem_lmem_swapping@parallel-random-engines:
    - bat-kbl-2:          NOTRUN -> [SKIP][6] ([fdo#109271]) +35 other tests skip
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_10693/bat-kbl-2/igt@gem_lmem_swapping@parallel-random-engines.html

  * igt@gem_lmem_swapping@verify-random:
    - bat-jsl-1:          NOTRUN -> [SKIP][7] ([i915#4613]) +3 other tests skip
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_10693/bat-jsl-1/igt@gem_lmem_swapping@verify-random.html

  * igt@kms_cursor_legacy@basic-busy-flip-before-cursor-legacy:
    - bat-jsl-1:          NOTRUN -> [SKIP][8] ([i915#4103]) +1 other test skip
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_10693/bat-jsl-1/igt@kms_cursor_legacy@basic-busy-flip-before-cursor-legacy.html

  * igt@kms_dsc@dsc-basic:
    - bat-jsl-1:          NOTRUN -> [SKIP][9] ([i915#3555] / [i915#9886])
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_10693/bat-jsl-1/igt@kms_dsc@dsc-basic.html

  * igt@kms_force_connector_basic@force-load-detect:
    - bat-jsl-1:          NOTRUN -> [SKIP][10] ([fdo#109285])
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_10693/bat-jsl-1/igt@kms_force_connector_basic@force-load-detect.html

  * igt@kms_setmode@basic-clone-single-crtc:
    - bat-jsl-1:          NOTRUN -> [SKIP][11] ([i915#3555])
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_10693/bat-jsl-1/igt@kms_setmode@basic-clone-single-crtc.html

  
  {name}: This element is suppressed. This means it is ignored when computing
          the status of the difference (SUCCESS, WARNING, or FAILURE).

  [fdo#109271]: https://bugs.freedesktop.org/show_bug.cgi?id=109271
  [fdo#109285]: https://bugs.freedesktop.org/show_bug.cgi?id=109285
  [i915#10194]: https://gitlab.freedesktop.org/drm/intel/issues/10194
  [i915#10213]: https://gitlab.freedesktop.org/drm/intel/issues/10213
  [i915#10215]: https://gitlab.freedesktop.org/drm/intel/issues/10215
  [i915#1849]: https://gitlab.freedesktop.org/drm/intel/issues/1849
  [i915#2190]: https://gitlab.freedesktop.org/drm/intel/issues/2190
  [i915#3555]: https://gitlab.freedesktop.org/drm/intel/issues/3555
  [i915#4103]: https://gitlab.freedesktop.org/drm/intel/issues/4103
  [i915#4613]: https://gitlab.freedesktop.org/drm/intel/issues/4613
  [i915#9318]: https://gitlab.freedesktop.org/drm/intel/issues/9318
  [i915#9886]: https://gitlab.freedesktop.org/drm/intel/issues/9886


Build changes
-------------

  * CI: CI-20190529 -> None
  * IGT: IGT_7716 -> IGTPW_10693

  CI-20190529: 20190529
  CI_DRM_14287: b09fd1bd9f8b4a8e5cd8c1de8dbba4d4eb15b4e9 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGTPW_10693: 10693
  IGT_7716: 7716


Testlist changes
----------------

+igt@xe_compute_preempt@compute-preempt-mtp-disabled

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_10693/index.html

[-- Attachment #2: Type: text/html, Size: 5762 bytes --]

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH i-g-t] tests/intel/xe_compute_preempt: Add mtp disabled preempt test
  2024-02-18 13:18 [PATCH i-g-t] tests/intel/xe_compute_preempt: Add mtp disabled preempt test janga.rahul.kumar
  2024-02-18 14:05 ` ✓ CI.xeBAT: success for " Patchwork
  2024-02-18 14:29 ` ✗ Fi.CI.BAT: failure " Patchwork
@ 2024-02-20 20:27 ` Nirmoy Das
  2 siblings, 0 replies; 4+ messages in thread
From: Nirmoy Das @ 2024-02-20 20:27 UTC (permalink / raw)
  To: janga.rahul.kumar, igt-dev, ramadevi.gandi; +Cc: nirmoy.das


On 2/18/2024 2:18 PM, janga.rahul.kumar@intel.com wrote:
> From: Janga Rahul Kumar <janga.rahul.kumar@intel.com>
>
> Check preemption scenario with Mid thread preemption disabled.

The compute square will only preempt until all the running threads are 
finished. With current kernel

this test won't work, will timeout. Need a better strategy to handle 
this case.


Regards,

Nirmoy

>
> Cc: Nirmoy Das <nirmoy.das@intel.com>
> Signed-off-by: Janga Rahul Kumar <janga.rahul.kumar@intel.com>
> ---
>   lib/intel_compute.c              | 148 ++++++++++++++++++++-----------
>   lib/intel_compute.h              |   2 +-
>   tests/intel/xe_compute_preempt.c |  16 +++-
>   3 files changed, 111 insertions(+), 55 deletions(-)
>
> diff --git a/lib/intel_compute.c b/lib/intel_compute.c
> index eab407a0d..753add674 100644
> --- a/lib/intel_compute.c
> +++ b/lib/intel_compute.c
> @@ -1162,7 +1162,8 @@ static void xe2lpg_compute_exec_compute(uint32_t *addr_bo_buffer_batch,
>   					uint64_t addr_state_contect_data_base,
>   					uint64_t offset_indirect_data_start,
>   					uint64_t kernel_start_pointer,
> -					uint64_t sip_start_pointer)
> +					uint64_t sip_start_pointer,
> +					bool	 thread_preemption)
>   {
>   	int b = 0;
>   
> @@ -1195,6 +1196,13 @@ static void xe2lpg_compute_exec_compute(uint32_t *addr_bo_buffer_batch,
>   	addr_bo_buffer_batch[b++] = 0x03808800;
>   	addr_bo_buffer_batch[b++] = 0x00000000;
>   	addr_bo_buffer_batch[b++] = 0x00000000;
> +
> +	if (!thread_preemption) {
> +		addr_bo_buffer_batch[b++] = MI_LOAD_REGISTER_IMM(1);
> +		addr_bo_buffer_batch[b++] = 0x0001a580;
> +		addr_bo_buffer_batch[b++] = 0x0000d401;
> +	}
> +
>   	addr_bo_buffer_batch[b++] = STATE_BASE_ADDRESS | 0x14;
>   	addr_bo_buffer_batch[b++] = (addr_general_state_base & 0xffffffff) | 0x21;
>   	addr_bo_buffer_batch[b++] = addr_general_state_base >> 32;
> @@ -1251,7 +1259,12 @@ static void xe2lpg_compute_exec_compute(uint32_t *addr_bo_buffer_batch,
>   
>   	addr_bo_buffer_batch[b++] = kernel_start_pointer;
>   	addr_bo_buffer_batch[b++] = 0x00000000;
> -	addr_bo_buffer_batch[b++] = 0x00100000; // Enable Thread Preemption BitField:20
> +
> +	if (thread_preemption)
> +		addr_bo_buffer_batch[b++] = 0x00100000; // Enable Thread Preemption BitField:20
> +	else
> +		addr_bo_buffer_batch[b++] = 0x00000000; // Disable Thread Preemption BitField:20
> +
>   	addr_bo_buffer_batch[b++] = 0x00000000;
>   	addr_bo_buffer_batch[b++] = 0x00000000;
>   	addr_bo_buffer_batch[b++] = 0x0c000020;
> @@ -1369,7 +1382,9 @@ static void xe2lpg_compute_exec(int fd, const unsigned char *kernel,
>   				  ADDR_INSTRUCTION_STATE_BASE,
>   				  XE2_ADDR_STATE_CONTEXT_DATA_BASE,
>   				  OFFSET_INDIRECT_DATA_START,
> -				  OFFSET_KERNEL, 0);
> +				  OFFSET_KERNEL,
> +				  0,
> +				  false);
>   
>   	bo_execenv_exec(&execenv, ADDR_BATCH);
>   
> @@ -1520,13 +1535,17 @@ bool xe_run_intel_compute_kernel_on_engine(int fd,
>    * @short_kernel_size: size of @short_kernel
>    * @sip_kernel: WMTP sip kernel which does save restore during preemption
>    * @sip_kernel_size: size of @sip_kernel
> + * @thread_preemption: flag to enable/disable thread level preemption
> + * @multi_short: submit multiple short kernels
>    */
>   static void xe2lpg_compute_preempt_exec(int fd, const unsigned char *long_kernel,
>   					unsigned int long_kernel_size,
>   					const unsigned char *short_kernel,
>   					unsigned int short_kernel_size,
>   					const unsigned char *sip_kernel,
> -					unsigned int sip_kernel_size)
> +					unsigned int sip_kernel_size,
> +					bool thread_preemption,
> +					bool multi_short)
>   {
>   #define XE2_BO_PREEMPT_DICT_ENTRIES 11
>   	struct bo_dict_entry bo_dict_long[XE2_BO_PREEMPT_DICT_ENTRIES] = {
> @@ -1560,41 +1579,67 @@ static void xe2lpg_compute_preempt_exec(int fd, const unsigned char *long_kernel
>   		  .name = "sip kernel"},
>   	};
>   
> -	struct bo_dict_entry bo_dict_short[XE2_BO_PREEMPT_DICT_ENTRIES];
> -	struct bo_execenv execenv_short, execenv_long;
> +	int n_short = (multi_short) ? 3 : 1;
> +	struct bo_dict_entry bo_dict_short[n_short][XE2_BO_PREEMPT_DICT_ENTRIES];
> +	struct bo_execenv execenv_short[n_short], execenv_long;
> +	struct drm_xe_sync sync_short[n_short];
>   	float *dinput;
> +	memset(sync_short, 0, sizeof(sync_short));
> +
>   	struct drm_xe_sync sync_long = {
>   		.type = DRM_XE_SYNC_TYPE_SYNCOBJ,
>   		.flags = DRM_XE_SYNC_FLAG_SIGNAL,
>   		.handle = syncobj_create(fd, 0),
>   	};
> -	struct drm_xe_sync sync_short = {
> -		.type = DRM_XE_SYNC_TYPE_SYNCOBJ,
> -		.flags = DRM_XE_SYNC_FLAG_SIGNAL,
> -		.handle = syncobj_create(fd, 0),
> -	};
> +
>   	unsigned int long_kernel_loop_count = 1000000;
>   
> -	for (int i = 0; i < XE2_BO_PREEMPT_DICT_ENTRIES; ++i)
> -		bo_dict_short[i] = bo_dict_long[i];
> +	for (int j = 0; j < n_short; j++) {
> +		for (int i = 0; i < XE2_BO_PREEMPT_DICT_ENTRIES; ++i)
> +			bo_dict_short[j][i] = bo_dict_long[i];
> +
> +		bo_execenv_create(fd, &execenv_short[j], NULL);
> +
> +		bo_dict_short[j][0].size = ALIGN(short_kernel_size, 0x1000);
> +		bo_dict_short[j][10].size = ALIGN(sip_kernel_size, 0x1000);
> +
> +		bo_execenv_bind(&execenv_short[j], bo_dict_short[j], XE2_BO_PREEMPT_DICT_ENTRIES);
> +
> +		memcpy(bo_dict_short[j][0].data, short_kernel, short_kernel_size);
> +		memcpy(bo_dict_short[j][10].data, sip_kernel, sip_kernel_size);
> +
> +		create_dynamic_state(bo_dict_short[j][1].data, OFFSET_KERNEL);
> +		xehp_create_surface_state(bo_dict_short[j][2].data, ADDR_INPUT, ADDR_OUTPUT);
> +		xehp_create_indirect_data(bo_dict_short[j][3].data, ADDR_INPUT, ADDR_OUTPUT);
> +		xehp_create_surface_state(bo_dict_short[j][7].data, ADDR_INPUT, ADDR_OUTPUT);
> +
> +		dinput = (float *)bo_dict_short[j][4].data;
> +
> +		for (int i = 0; i < SIZE_DATA; i++)
> +			((float *)dinput)[i] = rand() / (float)RAND_MAX;
> +
> +		xe2lpg_compute_exec_compute(bo_dict_short[j][8].data, ADDR_GENERAL_STATE_BASE,
> +						ADDR_SURFACE_STATE_BASE, ADDR_DYNAMIC_STATE_BASE,
> +						ADDR_INSTRUCTION_STATE_BASE, XE2_ADDR_STATE_CONTEXT_DATA_BASE,
> +						OFFSET_INDIRECT_DATA_START, OFFSET_KERNEL, OFFSET_STATE_SIP,
> +						thread_preemption);
> +
> +		sync_short[j].type = DRM_XE_SYNC_TYPE_SYNCOBJ;
> +		sync_short[j].flags = DRM_XE_SYNC_FLAG_SIGNAL;
> +		sync_short[j].handle = syncobj_create(fd, 0);
> +	}
>   
> -	bo_execenv_create(fd, &execenv_short, NULL);
>   	bo_execenv_create(fd, &execenv_long, NULL);
>   
>   	bo_dict_long[0].size = ALIGN(long_kernel_size, 0x1000);
> -	bo_dict_short[0].size = ALIGN(short_kernel_size, 0x1000);
>   
>   	bo_dict_long[10].size = ALIGN(sip_kernel_size, 0x1000);
> -	bo_dict_short[10].size = ALIGN(sip_kernel_size, 0x1000);
>   
>   	bo_execenv_bind(&execenv_long, bo_dict_long, XE2_BO_PREEMPT_DICT_ENTRIES);
> -	bo_execenv_bind(&execenv_short, bo_dict_short, XE2_BO_PREEMPT_DICT_ENTRIES);
>   
>   	memcpy(bo_dict_long[0].data, long_kernel, long_kernel_size);
> -	memcpy(bo_dict_short[0].data, short_kernel, short_kernel_size);
>   
>   	memcpy(bo_dict_long[10].data, sip_kernel, sip_kernel_size);
> -	memcpy(bo_dict_short[10].data, sip_kernel, sip_kernel_size);
>   
>   	create_dynamic_state(bo_dict_long[1].data, OFFSET_KERNEL);
>   	xehp_create_surface_state(bo_dict_long[2].data, ADDR_INPUT, ADDR_OUTPUT);
> @@ -1602,10 +1647,6 @@ static void xe2lpg_compute_preempt_exec(int fd, const unsigned char *long_kernel
>   					    long_kernel_loop_count);
>   	xehp_create_surface_state(bo_dict_long[7].data, ADDR_INPUT, ADDR_OUTPUT);
>   
> -	create_dynamic_state(bo_dict_short[1].data, OFFSET_KERNEL);
> -	xehp_create_surface_state(bo_dict_short[2].data, ADDR_INPUT, ADDR_OUTPUT);
> -	xehp_create_indirect_data(bo_dict_short[3].data, ADDR_INPUT, ADDR_OUTPUT);
> -	xehp_create_surface_state(bo_dict_short[7].data, ADDR_INPUT, ADDR_OUTPUT);
>   
>   	dinput = (float *)bo_dict_long[4].data;
>   	srand(time(NULL));
> @@ -1613,40 +1654,37 @@ static void xe2lpg_compute_preempt_exec(int fd, const unsigned char *long_kernel
>   	for (int i = 0; i < SIZE_DATA; i++)
>   		((float *)dinput)[i] = rand() / (float)RAND_MAX;
>   
> -	dinput = (float *)bo_dict_short[4].data;
> -
> -	for (int i = 0; i < SIZE_DATA; i++)
> -		((float *)dinput)[i] = rand() / (float)RAND_MAX;
>   
>   	xe2lpg_compute_exec_compute(bo_dict_long[8].data, ADDR_GENERAL_STATE_BASE,
>   				    ADDR_SURFACE_STATE_BASE, ADDR_DYNAMIC_STATE_BASE,
>   				    ADDR_INSTRUCTION_STATE_BASE, XE2_ADDR_STATE_CONTEXT_DATA_BASE,
> -				    OFFSET_INDIRECT_DATA_START, OFFSET_KERNEL, OFFSET_STATE_SIP);
> -
> -	xe2lpg_compute_exec_compute(bo_dict_short[8].data, ADDR_GENERAL_STATE_BASE,
> -				    ADDR_SURFACE_STATE_BASE, ADDR_DYNAMIC_STATE_BASE,
> -				    ADDR_INSTRUCTION_STATE_BASE, XE2_ADDR_STATE_CONTEXT_DATA_BASE,
> -				    OFFSET_INDIRECT_DATA_START, OFFSET_KERNEL, OFFSET_STATE_SIP);
> +				    OFFSET_INDIRECT_DATA_START, OFFSET_KERNEL, OFFSET_STATE_SIP,
> +				    thread_preemption);
>   
>   	xe_exec_sync(fd, execenv_long.exec_queue, ADDR_BATCH, &sync_long, 1);
>   
> -	xe_exec_sync(fd, execenv_short.exec_queue, ADDR_BATCH, &sync_short, 1);
> +	for (int j = 0; j < n_short; j++) {
> +		xe_exec_sync(fd, execenv_short[j].exec_queue, ADDR_BATCH, &sync_short[j], 1);
> +
> +		igt_assert(syncobj_wait(fd, &sync_short[j].handle, 1, INT64_MAX, 0, NULL));
> +		syncobj_destroy(fd, sync_short[j].handle);
> +	}
>   
> -	igt_assert(syncobj_wait(fd, &sync_short.handle, 1, INT64_MAX, 0, NULL));
> -	syncobj_destroy(fd, sync_short.handle);
>   
>   	igt_assert(syncobj_wait(fd, &sync_long.handle, 1, INT64_MAX, 0, NULL));
>   	syncobj_destroy(fd, sync_long.handle);
>   
> -	for (int i = 0; i < SIZE_DATA; i++) {
> -		float f1, f2;
> +	for (int j = 0; j < n_short; j++) {
> +		for (int i = 0; i < SIZE_DATA; i++) {
> +			float f1, f2;
>   
> -		f1 = ((float *) bo_dict_short[5].data)[i];
> -		f2 = ((float *) bo_dict_short[4].data)[i];
> +			f1 = ((float *) bo_dict_short[j][5].data)[i];
> +			f2 = ((float *) bo_dict_short[j][4].data)[i];
>   
> -		if (f1 != f2 * f2)
> -			igt_debug("[%4d] f1: %f != %f\n", i, f1, f2 * f2);
> -		igt_assert(f1 == f2 * f2);
> +			if (f1 != f2 * f2)
> +				igt_debug("[%4d] f1: %f != %f\n", i, f1, f2 * f2);
> +			igt_assert(f1 == f2 * f2);
> +		}
>   	}
>   
>   	for (int i = 0; i < SIZE_DATA; i++) {
> @@ -1659,10 +1697,12 @@ static void xe2lpg_compute_preempt_exec(int fd, const unsigned char *long_kernel
>   		igt_assert(f1 == long_kernel_loop_count);
>   	}
>   
> -	bo_execenv_unbind(&execenv_short, bo_dict_short, XE2_BO_PREEMPT_DICT_ENTRIES);
> -	bo_execenv_unbind(&execenv_long, bo_dict_long, XE2_BO_PREEMPT_DICT_ENTRIES);
> +	for (int j = 0; j < n_short; j++) {
> +		bo_execenv_unbind(&execenv_short[j], bo_dict_short[j], XE2_BO_PREEMPT_DICT_ENTRIES);
> +		bo_execenv_destroy(&execenv_short[j]);
> +	}
>   
> -	bo_execenv_destroy(&execenv_short);
> +	bo_execenv_unbind(&execenv_long, bo_dict_long, XE2_BO_PREEMPT_DICT_ENTRIES);
>   	bo_execenv_destroy(&execenv_long);
>   }
>   
> @@ -1673,7 +1713,9 @@ static const struct {
>   			     const unsigned char *short_kernel,
>   			     unsigned int short_kernel_size,
>   			     const unsigned char *sip_kernel,
> -			     unsigned int sip_kernel_size);
> +			     unsigned int sip_kernel_size,
> +			     bool thread_preemption,
> +			     bool multi_short);
>   	uint32_t compat;
>   } intel_compute_preempt_batches[] = {
>   	{
> @@ -1683,7 +1725,7 @@ static const struct {
>   	},
>   };
>   
> -static bool __run_intel_compute_kernel_preempt(int fd)
> +static bool __run_intel_compute_kernel_preempt(int fd, bool thread_preemption, bool multi_short)
>   {
>   	unsigned int ip_ver = intel_graphics_ver(intel_get_drm_devid(fd));
>   	unsigned int batch;
> @@ -1720,7 +1762,9 @@ static bool __run_intel_compute_kernel_preempt(int fd)
>   							  kernels->long_kernel_size,
>   							  kernels->kernel, kernels->size,
>   							  kernels->sip_kernel,
> -							  kernels->sip_kernel_size);
> +							  kernels->sip_kernel_size,
> +							  thread_preemption,
> +							  multi_short);
>   
>   	return true;
>   }
> @@ -1729,10 +1773,12 @@ static bool __run_intel_compute_kernel_preempt(int fd)
>    * exercise preemption scenario.
>    *
>    * @fd: file descriptor of the opened DRM Xe device
> + * @thread_preemption: flag to enable/disable thread level preemption
> + * @multi_short: submit multiple short kernels
>    *
>    * Returns true on success, false otherwise.
>    */
> -bool run_intel_compute_kernel_preempt(int fd)
> +bool run_intel_compute_kernel_preempt(int fd, bool thread_preemption, bool multi_short)
>   {
> -	return __run_intel_compute_kernel_preempt(fd);
> +	return __run_intel_compute_kernel_preempt(fd, thread_preemption, multi_short);
>   }
> diff --git a/lib/intel_compute.h b/lib/intel_compute.h
> index a02688ad4..b5932ac2b 100644
> --- a/lib/intel_compute.h
> +++ b/lib/intel_compute.h
> @@ -37,5 +37,5 @@ extern const struct intel_compute_kernels intel_compute_square_kernels[];
>   
>   bool run_intel_compute_kernel(int fd);
>   bool xe_run_intel_compute_kernel_on_engine(int fd, struct drm_xe_engine_class_instance *eci);
> -bool run_intel_compute_kernel_preempt(int fd);
> +bool run_intel_compute_kernel_preempt(int fd, bool thread_preemption, bool multi_short);
>   #endif	/* INTEL_COMPUTE_H */
> diff --git a/tests/intel/xe_compute_preempt.c b/tests/intel/xe_compute_preempt.c
> index 31703638e..4d0feb22b 100644
> --- a/tests/intel/xe_compute_preempt.c
> +++ b/tests/intel/xe_compute_preempt.c
> @@ -22,11 +22,18 @@
>    * Description:
>    *      Exercise compute walker mid thread preemption scenario
>    * Functionality: compute openCL kernel
> + *
> + * SUBTEST: compute-preempt-mtp-disabled
> + * GPU requirement: LNL
> + * Description:
> + *      Exercise compute preemption with Mid thread preemption disabled
> + * Functionality: compute openCL kernel
>    */
> +
>   static void
> -test_compute_preempt(int fd)
> +test_compute_preempt(int fd, bool thread_preemption, bool multi_short)
>   {
> -	igt_require_f(run_intel_compute_kernel_preempt(fd), "GPU not supported\n");
> +	igt_require_f(run_intel_compute_kernel_preempt(fd, thread_preemption, multi_short), "GPU not supported\n");
>   }
>   
>   igt_main
> @@ -37,7 +44,10 @@ igt_main
>   		xe = drm_open_driver(DRIVER_XE);
>   
>   	igt_subtest("compute-preempt")
> -		test_compute_preempt(xe);
> +		test_compute_preempt(xe, true, 0);
> +
> +	igt_subtest("compute-preempt-mtp-disabled")
> +		test_compute_preempt(xe, false, 1);
>   
>   	igt_fixture
>   		drm_close_driver(xe);

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2024-02-20 20:28 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2024-02-18 13:18 [PATCH i-g-t] tests/intel/xe_compute_preempt: Add mtp disabled preempt test janga.rahul.kumar
2024-02-18 14:05 ` ✓ CI.xeBAT: success for " Patchwork
2024-02-18 14:29 ` ✗ Fi.CI.BAT: failure " Patchwork
2024-02-20 20:27 ` [PATCH i-g-t] " Nirmoy Das

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox