Igt-dev Archive on lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH i-g-t v3 1/3] tests/intel: Add xe_exec_atomic test
@ 2024-01-17 13:26 Nirmoy Das
  2024-01-17 13:26 ` [PATCH i-g-t v3] tests/xe_spin_batch: Add spin-fixed-duration-with-preempter Nirmoy Das
                   ` (2 more replies)
  0 siblings, 3 replies; 5+ messages in thread
From: Nirmoy Das @ 2024-01-17 13:26 UTC (permalink / raw)
  To: igt-dev; +Cc: Nirmoy Das

Implement xe_exec_atomic test which will validate
MI_ATOMIC(increment, decrement) operations for all
engines and all memory regions. The test is skipped
on system memory for PVC as this is known to be not
supported.

Signed-off-by: Nirmoy Das <nirmoy.das@intel.com>
Reviewed-by: Janga Rahul Kumar <janga.rahul.kumar@intel.com>
---
 lib/intel_reg.h              |   1 +
 tests/intel/xe_exec_atomic.c | 174 +++++++++++++++++++++++++++++++++++
 tests/meson.build            |   1 +
 3 files changed, 176 insertions(+)
 create mode 100644 tests/intel/xe_exec_atomic.c

diff --git a/lib/intel_reg.h b/lib/intel_reg.h
index a8190d683..26833c66f 100644
--- a/lib/intel_reg.h
+++ b/lib/intel_reg.h
@@ -2647,6 +2647,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 
 /* Atomics */
 #define   MI_ATOMIC_INC                 (0x5 << 8)
+#define   MI_ATOMIC_DEC                 (0x6 << 8)
 #define   MI_ATOMIC_ADD                 (0x7 << 8)
 
 /* Batch */
diff --git a/tests/intel/xe_exec_atomic.c b/tests/intel/xe_exec_atomic.c
new file mode 100644
index 000000000..a21def999
--- /dev/null
+++ b/tests/intel/xe_exec_atomic.c
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: MIT */
+/*
+* Copyright © 2024 Intel Corporation
+*
+* Authors:
+*    Nirmoy Das <nirmoy.das@intel.com>
+*/
+
+#include "igt.h"
+#include "lib/igt_syncobj.h"
+#include "xe/xe_ioctl.h"
+#include "xe/xe_query.h"
+#include "xe_drm.h"
+
+/**
+ * TEST: Tests to verify atomic functionality.
+ * Category: Software building block
+ * Sub-category: HW
+ * Functionality: intel-bb
+ * Test category: functionality test
+ */
+
+struct data {
+	uint32_t batch[16];
+	uint64_t pad;
+	uint32_t data;
+	uint64_t addr;
+};
+
+static void atomic_batch(struct data *data, uint64_t addr, int ops)
+{
+	int b;
+	uint64_t batch_offset = (char *)&(data->batch) - (char *)data;
+	uint64_t batch_addr = addr + batch_offset;
+	uint64_t sdi_offset = (char *)&(data->data) - (char *)data;
+	uint64_t sdi_addr = addr + sdi_offset;
+
+	b = 0;
+	data->batch[b++] = MI_ATOMIC | ops;
+	data->batch[b++] = sdi_addr;
+	data->batch[b++] = sdi_addr >> 32;
+	data->batch[b++] = MI_BATCH_BUFFER_END;
+	igt_assert(b <= ARRAY_SIZE(data->batch));
+
+	data->addr = batch_addr;
+}
+
+/**
+ * SUBTEST: basic-inc-all
+ * Description: Test to verify atomic increment on all available engines and memory types.
+ * SUBTEST: basic-dec-all
+ * Description: Test to verify atomic decrement on all available engines and memory types.
+ */
+static void basic_inst(int fd, int inst_type, struct drm_xe_engine_class_instance *eci,
+		       uint32_t placement)
+{
+	struct drm_xe_sync sync = {
+		.type = DRM_XE_SYNC_TYPE_SYNCOBJ,
+		.flags = DRM_XE_SYNC_FLAG_SIGNAL,
+	};
+	struct drm_xe_exec exec = {
+		.num_batch_buffer = 1,
+		.num_syncs = 1,
+		.syncs = to_user_pointer(&sync),
+	};
+	struct data *data;
+	uint32_t vm;
+	uint32_t exec_queue;
+	uint32_t bind_engine;
+	uint32_t syncobj;
+	size_t bo_size;
+	int value = 0x123456, match;
+	uint64_t addr = 0x100000;
+	uint32_t bo = 0;
+
+	syncobj = syncobj_create(fd, 0);
+	sync.handle = syncobj;
+
+	vm = xe_vm_create(fd, 0, 0);
+	bo_size = sizeof(*data);
+	bo_size = ALIGN(bo_size + xe_cs_prefetch_size(fd),
+			xe_get_default_alignment(fd));
+
+	bo = xe_bo_create(fd, vm, bo_size, placement,
+			  I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS);
+
+	exec_queue = xe_exec_queue_create(fd, vm, eci, 0);
+	bind_engine = xe_bind_exec_queue_create(fd, vm, 0);
+	xe_vm_bind_async(fd, vm, bind_engine, bo, 0, addr, bo_size, &sync, 1);
+	data = xe_bo_mmap_ext(fd, bo, bo_size, PROT_READ|PROT_WRITE);
+	data->data = value;
+
+	atomic_batch(data, addr, inst_type);
+
+	exec.exec_queue_id = exec_queue;
+	exec.address = data->addr;
+	sync.flags &= DRM_XE_SYNC_FLAG_SIGNAL;
+	xe_exec(fd, &exec);
+
+	igt_assert(syncobj_wait(fd, &syncobj, 1, INT64_MAX, 0, NULL));
+	munmap(data, bo_size);
+	data = xe_bo_mmap_ext(fd, bo, bo_size, PROT_READ|PROT_WRITE);
+	match = (inst_type == MI_ATOMIC_INC) ? ++value : --value;
+	igt_assert_eq(data->data, match);
+
+	syncobj_destroy(fd, syncobj);
+	munmap(data, bo_size);
+	gem_close(fd, bo);
+
+	xe_exec_queue_destroy(fd, exec_queue);
+	xe_vm_destroy(fd, vm);
+}
+
+static bool has_atomics(int fd, uint32_t region)
+{
+	/* System memory atomics on PVC doesn't work */
+	if (region == system_memory(fd) &&
+	    IS_PONTEVECCHIO(intel_get_drm_devid(fd)))
+		return false;
+
+	return true;
+}
+
+igt_main
+{
+	struct drm_xe_engine_class_instance *hwe;
+	int fd;
+
+	igt_fixture {
+		fd = drm_open_driver(DRIVER_XE);
+		xe_device_get(fd);
+	}
+
+	igt_subtest_with_dynamic("basic-dec-all") {
+		xe_for_each_engine(fd, hwe) {
+			uint64_t memreg = all_memory_regions(fd), region;
+
+			xe_for_each_mem_region(fd, memreg, region) {
+
+				igt_dynamic_f("Engine-%s-Instance-%d-Tile-%d-%s-memory",
+					      xe_engine_class_string(hwe->engine_class),
+					      hwe->engine_instance,
+					      hwe->gt_id, xe_region_name(region)) {
+
+					igt_skip_on(!has_atomics(fd, region));
+					basic_inst(fd, MI_ATOMIC_DEC, hwe, region);
+				}
+			}
+		}
+	}
+
+	igt_subtest_with_dynamic("basic-inc-all") {
+		xe_for_each_engine(fd, hwe) {
+			uint64_t memreg = all_memory_regions(fd), region;
+
+			xe_for_each_mem_region(fd, memreg, region) {
+
+				igt_dynamic_f("Engine-%s-Instance-%d-Tile-%d-%s-memory",
+					      xe_engine_class_string(hwe->engine_class),
+					      hwe->engine_instance,
+					      hwe->gt_id, xe_region_name(region)) {
+
+					igt_skip_on(!has_atomics(fd, region));
+					basic_inst(fd, MI_ATOMIC_INC, hwe, region);
+				}
+			}
+		}
+	}
+
+	igt_fixture {
+		xe_device_put(fd);
+		close(fd);
+	}
+}
diff --git a/tests/meson.build b/tests/meson.build
index a6a8498e2..c5ca951ef 100644
--- a/tests/meson.build
+++ b/tests/meson.build
@@ -284,6 +284,7 @@ intel_xe_progs = [
 	'xe_drm_fdinfo',
 	'xe_evict',
 	'xe_evict_ccs',
+	'xe_exec_atomic',
 	'xe_exec_balancer',
 	'xe_exec_basic',
 	'xe_exec_compute_mode',
-- 
2.42.0



^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH i-g-t v3] tests/xe_spin_batch: Add spin-fixed-duration-with-preempter
  2024-01-17 13:26 [PATCH i-g-t v3 1/3] tests/intel: Add xe_exec_atomic test Nirmoy Das
@ 2024-01-17 13:26 ` Nirmoy Das
  2024-01-17 13:43   ` Nirmoy Das
  2024-01-17 13:26 ` [PATCH i-g-t v3 2/3] lib/xe/xe_query: Fix compilation -Wshadow warning Nirmoy Das
  2024-01-17 13:26 ` [PATCH i-g-t v3 3/3] tests/intel-ci: Add mi_atomic test to fast-feedback Nirmoy Das
  2 siblings, 1 reply; 5+ messages in thread
From: Nirmoy Das @ 2024-01-17 13:26 UTC (permalink / raw)
  To: igt-dev; +Cc: Nirmoy Das

Add a variation of spin-fixed-duration where the spinners
gets preempted with a short duration high priority task.

This validates preemption in GPU and xe_spin.

v2-3: rebase

Cc: Zbigniew Kempczyński <zbigniew.kempczynski@intel.com>
Cc: Janga Rahul Kumar <janga.rahul.kumar@intel.com>
Signed-off-by: Nirmoy Das <nirmoy.das@intel.com>
Reviewed-by: Janga Rahul Kumar <janga.rahul.kumar@intel.com>
---
 tests/intel/xe_spin_batch.c | 127 ++++++++++++++++++++++++++++++++++--
 1 file changed, 123 insertions(+), 4 deletions(-)

diff --git a/tests/intel/xe_spin_batch.c b/tests/intel/xe_spin_batch.c
index c75709c4e..e8dca7826 100644
--- a/tests/intel/xe_spin_batch.c
+++ b/tests/intel/xe_spin_batch.c
@@ -136,12 +136,103 @@ static void spin_all(int fd, int gt, int class)
 	xe_vm_destroy(fd, vm);
 }
 
+struct data {
+	uint32_t batch[16];
+	uint64_t pad;
+	uint32_t data;
+	uint64_t addr;
+};
+
+static void store_dword_batch(struct data *data, uint64_t addr, int value)
+{
+	int b;
+	uint64_t batch_offset = (char *)&(data->batch) - (char *)data;
+	uint64_t batch_addr = addr + batch_offset;
+	uint64_t sdi_offset = (char *)&(data->data) - (char *)data;
+	uint64_t sdi_addr = addr + sdi_offset;
+
+	b = 0;
+	data->batch[b++] = MI_STORE_DWORD_IMM_GEN4;
+	data->batch[b++] = sdi_addr;
+	data->batch[b++] = sdi_addr >> 32;
+	data->batch[b++] = value;
+	data->batch[b++] = MI_BATCH_BUFFER_END;
+	igt_assert(b <= ARRAY_SIZE(data->batch));
+
+	data->addr = batch_addr;
+}
+
+static void preempter(int fd, struct drm_xe_engine_class_instance *hwe)
+{
+	struct drm_xe_sync sync = {
+		.flags = DRM_XE_SYNC_TYPE_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL
+	};
+	struct drm_xe_exec exec = {
+		.num_batch_buffer = 1,
+		.num_syncs = 1,
+		.syncs = to_user_pointer(&sync),
+	};
+	struct drm_xe_ext_set_property ext = {
+		.base.next_extension = 0,
+		.base.name = DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY,
+		.property = DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY,
+		.value = 2, /* High priority */
+	};
+	struct data *data;
+	uint32_t vm;
+	uint32_t exec_queue;
+	uint32_t syncobj;
+	size_t bo_size;
+	int value = 0x123456;
+	uint64_t addr = 0x100000;
+	uint32_t bo = 0;
+
+	syncobj = syncobj_create(fd, 0);
+	sync.handle = syncobj;
+
+	vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT, 0);
+	bo_size = sizeof(*data);
+	bo_size = ALIGN(bo_size + xe_cs_prefetch_size(fd),
+			xe_get_default_alignment(fd));
+
+	bo = xe_bo_create(fd, vm, bo_size,
+			  vram_if_possible(fd, hwe->gt_id),
+			  DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
+
+	xe_vm_bind_async(fd, vm, hwe->gt_id, bo, 0, addr, bo_size, &sync, 1);
+	data = xe_bo_map(fd, bo, bo_size);
+	store_dword_batch(data, addr, value);
+
+	exec_queue = xe_exec_queue_create(fd, vm, hwe, to_user_pointer(&ext));
+	exec.exec_queue_id = exec_queue;
+	exec.address = data->addr;
+	sync.flags &= DRM_XE_SYNC_FLAG_SIGNAL;
+	xe_exec(fd, &exec);
+
+	igt_assert(syncobj_wait(fd, &syncobj, 1, INT64_MAX, 0, NULL));
+	igt_assert_eq(data->data, value);
+
+	syncobj_destroy(fd, syncobj);
+	munmap(data, bo_size);
+	gem_close(fd, bo);
+
+	xe_exec_queue_destroy(fd, exec_queue);
+	xe_vm_destroy(fd, vm);
+}
+
+#define SPIN_FIX_DURATION_NORMAL		0
+#define SPIN_FIX_DURATION_PREEMPT		1
 /**
  * SUBTEST: spin-fixed-duration
  * Description: Basic test which validates the functionality of xe_spin with fixed duration.
  * Run type: FULL
  */
-static void xe_spin_fixed_duration(int fd)
+/**
+ * SUBTEST: spin-fixed-duration-with-preempter
+ * Description: Basic test which validates the functionality of xe_spin preemption which gets preempted with a short duration high-priority task.
+ * Run type: FULL
+ */
+static void xe_spin_fixed_duration(int fd, int gt, int class, int flags)
 {
 	struct drm_xe_sync sync = {
 		.handle = syncobj_create(fd, 0),
@@ -153,12 +244,20 @@ static void xe_spin_fixed_duration(int fd)
 		.num_syncs = 1,
 		.syncs = to_user_pointer(&sync),
 	};
+	struct drm_xe_ext_set_property ext_prio = {
+		.base.next_extension = 0,
+		.base.name = DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY,
+		.property = DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY,
+		.value = 0, /* Low priority */
+	};
+	struct drm_xe_engine_class_instance *hwe = NULL, *_hwe;
 	const uint64_t duration_ns = NSEC_PER_SEC / 10; /* 100ms */
 	uint64_t spin_addr;
 	uint64_t ahnd;
 	uint32_t exec_queue;
 	uint32_t vm;
 	uint32_t bo;
+	uint64_t ext = 0;
 	size_t bo_size;
 	struct xe_spin *spin;
 	struct timespec tv;
@@ -166,8 +265,18 @@ static void xe_spin_fixed_duration(int fd)
 	igt_stats_t stats;
 	int i;
 
+	if (flags & SPIN_FIX_DURATION_PREEMPT)
+		ext = to_user_pointer(&ext_prio);
+
+	xe_for_each_engine(fd, _hwe)
+		if (_hwe->engine_class == class && _hwe->gt_id == gt)
+			hwe = _hwe;
+
+	if (!hwe)
+		return;
+
 	vm = xe_vm_create(fd, 0, 0);
-	exec_queue = xe_exec_queue_create_class(fd, vm, DRM_XE_ENGINE_CLASS_COPY);
+	exec_queue = xe_exec_queue_create(fd, vm, hwe, ext);
 	ahnd = intel_allocator_open(fd, 0, INTEL_ALLOCATOR_RELOC);
 	bo_size = ALIGN(sizeof(*spin) + xe_cs_prefetch_size(fd), xe_get_default_alignment(fd));
 	bo = xe_bo_create(fd, vm, bo_size, vram_if_possible(fd, 0), 0);
@@ -187,13 +296,17 @@ static void xe_spin_fixed_duration(int fd)
 		igt_gettime(&tv);
 		xe_exec(fd, &exec);
 		xe_spin_wait_started(spin);
+		if (flags & SPIN_FIX_DURATION_PREEMPT)
+			preempter(fd, hwe);
+
 		igt_assert(syncobj_wait(fd, &sync.handle, 1, INT64_MAX, 0, NULL));
 		igt_stats_push_float(&stats, igt_nsec_elapsed(&tv) * 1e-6);
 		syncobj_reset(fd, &sync.handle, 1);
 		igt_debug("i=%d %.2fms\n", i, stats.values_f[i]);
 	}
 	elapsed_ms = igt_stats_get_median(&stats);
-	igt_info("%.0fms spin took %.2fms (median)\n", duration_ns * 1e-6, elapsed_ms);
+	igt_info("%s: %.0fms spin took %.2fms (median)\n", xe_engine_class_string(hwe->engine_class),
+		 duration_ns * 1e-6, elapsed_ms);
 	igt_assert(elapsed_ms < duration_ns * 1.5e-6 && elapsed_ms > duration_ns * 0.5e-6);
 
 	xe_vm_unbind_sync(fd, vm, 0, spin_addr, bo_size);
@@ -231,7 +344,13 @@ igt_main
 	}
 
 	igt_subtest("spin-fixed-duration")
-		xe_spin_fixed_duration(fd);
+		xe_spin_fixed_duration(fd, 0, DRM_XE_ENGINE_CLASS_COPY, SPIN_FIX_DURATION_NORMAL);
+
+
+	igt_subtest("spin-fixed-duration-with-preempter")
+		xe_for_each_gt(fd, gt)
+			xe_for_each_engine_class(class)
+				xe_spin_fixed_duration(fd, gt, class, SPIN_FIX_DURATION_PREEMPT);
 
 	igt_fixture
 		drm_close_driver(fd);
-- 
2.42.0



^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH i-g-t v3 2/3] lib/xe/xe_query: Fix compilation -Wshadow warning
  2024-01-17 13:26 [PATCH i-g-t v3 1/3] tests/intel: Add xe_exec_atomic test Nirmoy Das
  2024-01-17 13:26 ` [PATCH i-g-t v3] tests/xe_spin_batch: Add spin-fixed-duration-with-preempter Nirmoy Das
@ 2024-01-17 13:26 ` Nirmoy Das
  2024-01-17 13:26 ` [PATCH i-g-t v3 3/3] tests/intel-ci: Add mi_atomic test to fast-feedback Nirmoy Das
  2 siblings, 0 replies; 5+ messages in thread
From: Nirmoy Das @ 2024-01-17 13:26 UTC (permalink / raw)
  To: igt-dev; +Cc: Nirmoy Das

Fix -Wshadow warning when xe_for_each_engine() and
xe_for_each_mem_region are used together.

Signed-off-by: Nirmoy Das <nirmoy.das@intel.com>
Reviewed-by: Janga Rahul Kumar <janga.rahul.kumar@intel.com>
---
 lib/xe/xe_query.h | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/lib/xe/xe_query.h b/lib/xe/xe_query.h
index 1e3a7bdb9..032db1308 100644
--- a/lib/xe/xe_query.h
+++ b/lib/xe/xe_query.h
@@ -67,8 +67,8 @@ struct xe_device {
 	for (__gt = 0; __gt < xe_number_gt(__fd); ++__gt)
 
 #define xe_for_each_mem_region(__fd, __memreg, __r) \
-	for (uint64_t __i = 0; __i < igt_fls(__memreg); __i++) \
-		for_if(__r = (__memreg & (1ull << __i)))
+	for (uint64_t __j = 0; __j < igt_fls(__memreg); __j++) \
+		for_if(__r = (__memreg & (1ull << __j)))
 
 #define XE_IS_CLASS_SYSMEM(__region) ((__region)->mem_class == DRM_XE_MEM_REGION_CLASS_SYSMEM)
 #define XE_IS_CLASS_VRAM(__region) ((__region)->mem_class == DRM_XE_MEM_REGION_CLASS_VRAM)
-- 
2.42.0



^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH i-g-t v3 3/3] tests/intel-ci: Add mi_atomic test to fast-feedback
  2024-01-17 13:26 [PATCH i-g-t v3 1/3] tests/intel: Add xe_exec_atomic test Nirmoy Das
  2024-01-17 13:26 ` [PATCH i-g-t v3] tests/xe_spin_batch: Add spin-fixed-duration-with-preempter Nirmoy Das
  2024-01-17 13:26 ` [PATCH i-g-t v3 2/3] lib/xe/xe_query: Fix compilation -Wshadow warning Nirmoy Das
@ 2024-01-17 13:26 ` Nirmoy Das
  2 siblings, 0 replies; 5+ messages in thread
From: Nirmoy Das @ 2024-01-17 13:26 UTC (permalink / raw)
  To: igt-dev; +Cc: Nirmoy Das

Add this basic functionality test the fast feedback list.

Signed-off-by: Nirmoy Das <nirmoy.das@intel.com>
Acked-by: Kamil Konieczny <kamil.konieczny@linux.intel.com>
---
 tests/intel-ci/xe-fast-feedback.testlist | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/tests/intel-ci/xe-fast-feedback.testlist b/tests/intel-ci/xe-fast-feedback.testlist
index bef5b0b8a..da79c76b4 100644
--- a/tests/intel-ci/xe-fast-feedback.testlist
+++ b/tests/intel-ci/xe-fast-feedback.testlist
@@ -29,6 +29,8 @@ igt@xe_evict@evict-small-multi-vm-cm
 igt@xe_evict@evict-threads-small
 igt@xe_evict_ccs@evict-overcommit-simple
 igt@xe_evict_ccs@evict-overcommit-parallel-nofree-samefd
+igt@xe_exec_atomic@basic-inc-all
+igt@xe_exec_atomic@basic-dec-all
 igt@xe_exec_balancer@twice-virtual-basic
 igt@xe_exec_balancer@no-exec-virtual-basic
 igt@xe_exec_balancer@twice-cm-virtual-basic
-- 
2.42.0



^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH i-g-t v3] tests/xe_spin_batch: Add spin-fixed-duration-with-preempter
  2024-01-17 13:26 ` [PATCH i-g-t v3] tests/xe_spin_batch: Add spin-fixed-duration-with-preempter Nirmoy Das
@ 2024-01-17 13:43   ` Nirmoy Das
  0 siblings, 0 replies; 5+ messages in thread
From: Nirmoy Das @ 2024-01-17 13:43 UTC (permalink / raw)
  To: igt-dev

Please ignore this series. I included this older patch.


On 1/17/2024 2:26 PM, Nirmoy Das wrote:
> Add a variation of spin-fixed-duration where the spinners
> gets preempted with a short duration high priority task.
>
> This validates preemption in GPU and xe_spin.
>
> v2-3: rebase
>
> Cc: Zbigniew Kempczyński <zbigniew.kempczynski@intel.com>
> Cc: Janga Rahul Kumar <janga.rahul.kumar@intel.com>
> Signed-off-by: Nirmoy Das <nirmoy.das@intel.com>
> Reviewed-by: Janga Rahul Kumar <janga.rahul.kumar@intel.com>
> ---
>   tests/intel/xe_spin_batch.c | 127 ++++++++++++++++++++++++++++++++++--
>   1 file changed, 123 insertions(+), 4 deletions(-)
>
> diff --git a/tests/intel/xe_spin_batch.c b/tests/intel/xe_spin_batch.c
> index c75709c4e..e8dca7826 100644
> --- a/tests/intel/xe_spin_batch.c
> +++ b/tests/intel/xe_spin_batch.c
> @@ -136,12 +136,103 @@ static void spin_all(int fd, int gt, int class)
>   	xe_vm_destroy(fd, vm);
>   }
>   
> +struct data {
> +	uint32_t batch[16];
> +	uint64_t pad;
> +	uint32_t data;
> +	uint64_t addr;
> +};
> +
> +static void store_dword_batch(struct data *data, uint64_t addr, int value)
> +{
> +	int b;
> +	uint64_t batch_offset = (char *)&(data->batch) - (char *)data;
> +	uint64_t batch_addr = addr + batch_offset;
> +	uint64_t sdi_offset = (char *)&(data->data) - (char *)data;
> +	uint64_t sdi_addr = addr + sdi_offset;
> +
> +	b = 0;
> +	data->batch[b++] = MI_STORE_DWORD_IMM_GEN4;
> +	data->batch[b++] = sdi_addr;
> +	data->batch[b++] = sdi_addr >> 32;
> +	data->batch[b++] = value;
> +	data->batch[b++] = MI_BATCH_BUFFER_END;
> +	igt_assert(b <= ARRAY_SIZE(data->batch));
> +
> +	data->addr = batch_addr;
> +}
> +
> +static void preempter(int fd, struct drm_xe_engine_class_instance *hwe)
> +{
> +	struct drm_xe_sync sync = {
> +		.flags = DRM_XE_SYNC_TYPE_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL
> +	};
> +	struct drm_xe_exec exec = {
> +		.num_batch_buffer = 1,
> +		.num_syncs = 1,
> +		.syncs = to_user_pointer(&sync),
> +	};
> +	struct drm_xe_ext_set_property ext = {
> +		.base.next_extension = 0,
> +		.base.name = DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY,
> +		.property = DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY,
> +		.value = 2, /* High priority */
> +	};
> +	struct data *data;
> +	uint32_t vm;
> +	uint32_t exec_queue;
> +	uint32_t syncobj;
> +	size_t bo_size;
> +	int value = 0x123456;
> +	uint64_t addr = 0x100000;
> +	uint32_t bo = 0;
> +
> +	syncobj = syncobj_create(fd, 0);
> +	sync.handle = syncobj;
> +
> +	vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT, 0);
> +	bo_size = sizeof(*data);
> +	bo_size = ALIGN(bo_size + xe_cs_prefetch_size(fd),
> +			xe_get_default_alignment(fd));
> +
> +	bo = xe_bo_create(fd, vm, bo_size,
> +			  vram_if_possible(fd, hwe->gt_id),
> +			  DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
> +
> +	xe_vm_bind_async(fd, vm, hwe->gt_id, bo, 0, addr, bo_size, &sync, 1);
> +	data = xe_bo_map(fd, bo, bo_size);
> +	store_dword_batch(data, addr, value);
> +
> +	exec_queue = xe_exec_queue_create(fd, vm, hwe, to_user_pointer(&ext));
> +	exec.exec_queue_id = exec_queue;
> +	exec.address = data->addr;
> +	sync.flags &= DRM_XE_SYNC_FLAG_SIGNAL;
> +	xe_exec(fd, &exec);
> +
> +	igt_assert(syncobj_wait(fd, &syncobj, 1, INT64_MAX, 0, NULL));
> +	igt_assert_eq(data->data, value);
> +
> +	syncobj_destroy(fd, syncobj);
> +	munmap(data, bo_size);
> +	gem_close(fd, bo);
> +
> +	xe_exec_queue_destroy(fd, exec_queue);
> +	xe_vm_destroy(fd, vm);
> +}
> +
> +#define SPIN_FIX_DURATION_NORMAL		0
> +#define SPIN_FIX_DURATION_PREEMPT		1
>   /**
>    * SUBTEST: spin-fixed-duration
>    * Description: Basic test which validates the functionality of xe_spin with fixed duration.
>    * Run type: FULL
>    */
> -static void xe_spin_fixed_duration(int fd)
> +/**
> + * SUBTEST: spin-fixed-duration-with-preempter
> + * Description: Basic test which validates the functionality of xe_spin preemption which gets preempted with a short duration high-priority task.
> + * Run type: FULL
> + */
> +static void xe_spin_fixed_duration(int fd, int gt, int class, int flags)
>   {
>   	struct drm_xe_sync sync = {
>   		.handle = syncobj_create(fd, 0),
> @@ -153,12 +244,20 @@ static void xe_spin_fixed_duration(int fd)
>   		.num_syncs = 1,
>   		.syncs = to_user_pointer(&sync),
>   	};
> +	struct drm_xe_ext_set_property ext_prio = {
> +		.base.next_extension = 0,
> +		.base.name = DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY,
> +		.property = DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY,
> +		.value = 0, /* Low priority */
> +	};
> +	struct drm_xe_engine_class_instance *hwe = NULL, *_hwe;
>   	const uint64_t duration_ns = NSEC_PER_SEC / 10; /* 100ms */
>   	uint64_t spin_addr;
>   	uint64_t ahnd;
>   	uint32_t exec_queue;
>   	uint32_t vm;
>   	uint32_t bo;
> +	uint64_t ext = 0;
>   	size_t bo_size;
>   	struct xe_spin *spin;
>   	struct timespec tv;
> @@ -166,8 +265,18 @@ static void xe_spin_fixed_duration(int fd)
>   	igt_stats_t stats;
>   	int i;
>   
> +	if (flags & SPIN_FIX_DURATION_PREEMPT)
> +		ext = to_user_pointer(&ext_prio);
> +
> +	xe_for_each_engine(fd, _hwe)
> +		if (_hwe->engine_class == class && _hwe->gt_id == gt)
> +			hwe = _hwe;
> +
> +	if (!hwe)
> +		return;
> +
>   	vm = xe_vm_create(fd, 0, 0);
> -	exec_queue = xe_exec_queue_create_class(fd, vm, DRM_XE_ENGINE_CLASS_COPY);
> +	exec_queue = xe_exec_queue_create(fd, vm, hwe, ext);
>   	ahnd = intel_allocator_open(fd, 0, INTEL_ALLOCATOR_RELOC);
>   	bo_size = ALIGN(sizeof(*spin) + xe_cs_prefetch_size(fd), xe_get_default_alignment(fd));
>   	bo = xe_bo_create(fd, vm, bo_size, vram_if_possible(fd, 0), 0);
> @@ -187,13 +296,17 @@ static void xe_spin_fixed_duration(int fd)
>   		igt_gettime(&tv);
>   		xe_exec(fd, &exec);
>   		xe_spin_wait_started(spin);
> +		if (flags & SPIN_FIX_DURATION_PREEMPT)
> +			preempter(fd, hwe);
> +
>   		igt_assert(syncobj_wait(fd, &sync.handle, 1, INT64_MAX, 0, NULL));
>   		igt_stats_push_float(&stats, igt_nsec_elapsed(&tv) * 1e-6);
>   		syncobj_reset(fd, &sync.handle, 1);
>   		igt_debug("i=%d %.2fms\n", i, stats.values_f[i]);
>   	}
>   	elapsed_ms = igt_stats_get_median(&stats);
> -	igt_info("%.0fms spin took %.2fms (median)\n", duration_ns * 1e-6, elapsed_ms);
> +	igt_info("%s: %.0fms spin took %.2fms (median)\n", xe_engine_class_string(hwe->engine_class),
> +		 duration_ns * 1e-6, elapsed_ms);
>   	igt_assert(elapsed_ms < duration_ns * 1.5e-6 && elapsed_ms > duration_ns * 0.5e-6);
>   
>   	xe_vm_unbind_sync(fd, vm, 0, spin_addr, bo_size);
> @@ -231,7 +344,13 @@ igt_main
>   	}
>   
>   	igt_subtest("spin-fixed-duration")
> -		xe_spin_fixed_duration(fd);
> +		xe_spin_fixed_duration(fd, 0, DRM_XE_ENGINE_CLASS_COPY, SPIN_FIX_DURATION_NORMAL);
> +
> +
> +	igt_subtest("spin-fixed-duration-with-preempter")
> +		xe_for_each_gt(fd, gt)
> +			xe_for_each_engine_class(class)
> +				xe_spin_fixed_duration(fd, gt, class, SPIN_FIX_DURATION_PREEMPT);
>   
>   	igt_fixture
>   		drm_close_driver(fd);


^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2024-01-17 13:43 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2024-01-17 13:26 [PATCH i-g-t v3 1/3] tests/intel: Add xe_exec_atomic test Nirmoy Das
2024-01-17 13:26 ` [PATCH i-g-t v3] tests/xe_spin_batch: Add spin-fixed-duration-with-preempter Nirmoy Das
2024-01-17 13:43   ` Nirmoy Das
2024-01-17 13:26 ` [PATCH i-g-t v3 2/3] lib/xe/xe_query: Fix compilation -Wshadow warning Nirmoy Das
2024-01-17 13:26 ` [PATCH i-g-t v3 3/3] tests/intel-ci: Add mi_atomic test to fast-feedback Nirmoy Das

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox