Igt-dev Archive on lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH i-g-t v2 0/4] PSR testing improvement by checking sink status
@ 2024-09-10 13:12 Jouni Högander
  2024-09-10 13:12 ` [PATCH i-g-t v2 1/4] tests/intel/xe_drm_fdinfo: Implement virtual/parallel exec queues Jouni Högander
                   ` (2 more replies)
  0 siblings, 3 replies; 5+ messages in thread
From: Jouni Högander @ 2024-09-10 13:12 UTC (permalink / raw)
  To: igt-dev; +Cc: ramanaidu.naladala, Jouni Högander

Currently we are not checking panel status in our PSR testcases. This
might lead to situation where panel is not even aware of PSR being
used. Also possible errors detected by a sink are ignored by our
testcase. This patch set is trying to address this.

v2: split patch 2.

Dominik Karol Piątkowski (1):
  lib/intel_batchbuffer: Introduce
    intel_bb_create_with_context_in_region

Jouni Högander (2):
  lib/igt_psr: Add mechanism to check sink status as well
  tests/intel/kms_psr*: Add psr_sink_error_check to PSR tests

Lucas De Marchi (1):
  tests/intel/xe_drm_fdinfo: Implement virtual/parallel exec queues

 lib/igt_psr.c                     |  52 ++++++++++++-
 lib/igt_psr.h                     |   1 +
 lib/intel_batchbuffer.c           |  74 ++++++++++++++----
 lib/intel_batchbuffer.h           |   5 +-
 tests/intel/kms_psr.c             |   2 +
 tests/intel/kms_psr2_sf.c         |   2 +
 tests/intel/kms_psr2_su.c         |   2 +
 tests/intel/kms_psr_stress_test.c |   1 +
 tests/intel/xe_drm_fdinfo.c       | 124 +++++++++++++++++++++++++++++-
 tests/intel/xe_pat.c              |   4 +-
 10 files changed, 245 insertions(+), 22 deletions(-)

-- 
2.34.1


^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH i-g-t v2 1/4] tests/intel/xe_drm_fdinfo: Implement virtual/parallel exec queues
  2024-09-10 13:12 [PATCH i-g-t v2 0/4] PSR testing improvement by checking sink status Jouni Högander
@ 2024-09-10 13:12 ` Jouni Högander
  2024-09-10 13:18   ` Hogander, Jouni
  2024-09-10 13:12 ` [PATCH i-g-t v2 2/4] lib/intel_batchbuffer: Introduce intel_bb_create_with_context_in_region Jouni Högander
  2024-09-10 13:12 ` [PATCH i-g-t v2 3/4] lib/igt_psr: Add mechanism to check sink status as well Jouni Högander
  2 siblings, 1 reply; 5+ messages in thread
From: Jouni Högander @ 2024-09-10 13:12 UTC (permalink / raw)
  To: igt-dev
  Cc: ramanaidu.naladala, Lucas De Marchi, Umesh Nerlige Ramappa,
	Matthew Brost

From: Lucas De Marchi <lucas.demarchi@intel.com>

Implement a similar function to utilization_single(), but also taking
virtual/parallel into account. I chose some different variable names
to make it more obvious what exactly it is testing and integrated
with the xe_gt_fill_engines_by_class() function recently added.
A possible refactor in the future is to make the other tests use
this function and remove utilization_single().

Based on previous patch by Umesh.

Cc: Umesh Nerlige Ramappa <umesh.nerlige.ramappa@intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Link: https://lore.kernel.org/r/20240904225746.2857448-3-lucas.demarchi@intel.com
Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
---
 tests/intel/xe_drm_fdinfo.c | 124 +++++++++++++++++++++++++++++++++++-
 1 file changed, 123 insertions(+), 1 deletion(-)

diff --git a/tests/intel/xe_drm_fdinfo.c b/tests/intel/xe_drm_fdinfo.c
index 8acb95040..747b6155c 100644
--- a/tests/intel/xe_drm_fdinfo.c
+++ b/tests/intel/xe_drm_fdinfo.c
@@ -12,6 +12,8 @@
 #include "xe/xe_ioctl.h"
 #include "xe/xe_query.h"
 #include "xe/xe_spin.h"
+#include "xe/xe_util.h"
+
 /**
  * TEST: xe drm fdinfo
  * Description: Read and verify drm client memory consumption and engine utilization using fdinfo
@@ -67,6 +69,8 @@ IGT_TEST_DESCRIPTION("Read and verify drm client memory consumption and engine u
 #define TEST_BUSY		(1 << 0)
 #define TEST_TRAILING_IDLE	(1 << 1)
 #define TEST_ISOLATION		(1 << 2)
+#define TEST_VIRTUAL		(1 << 3)
+#define TEST_PARALLEL		(1 << 4)
 
 enum expected_load {
 	EXPECTED_LOAD_IDLE,
@@ -715,10 +719,102 @@ utilization_all_full_load(int fd)
 
 	xe_vm_destroy(fd, vm);
 }
+
+/**
+ * SUBTEST: %s-utilization-single-idle
+ * Description: Check that each engine shows no load
+ *
+ * SUBTEST: %s-utilization-single-full-load
+ * Description: Check that each engine shows full load
+ *
+ * SUBTEST: %s-utilization-single-full-load-isolation
+ * Description: Check that each engine load does not spill over to other drm clients
+ *
+ * arg[1]:
+ *
+ * @virtual:			virtual
+ * @parallel:			parallel
+ */
+static void
+utilization_multi(int fd, int gt, int class, unsigned int flags)
+{
+	struct pceu_cycles pceu[2][DRM_XE_ENGINE_CLASS_COMPUTE + 1];
+	struct pceu_cycles pceu_spill[2][DRM_XE_ENGINE_CLASS_COMPUTE + 1];
+	struct drm_xe_engine_class_instance eci[XE_MAX_ENGINE_INSTANCE];
+	struct spin_ctx *ctx = NULL;
+	enum expected_load expected_load;
+	int fd_spill, num_placements;
+	uint32_t vm;
+	bool virtual = flags & TEST_VIRTUAL;
+	bool parallel = flags & TEST_PARALLEL;
+	uint16_t width;
+
+	igt_assert(virtual ^ parallel);
+
+	num_placements = xe_gt_fill_engines_by_class(fd, gt, class, eci);
+	if (num_placements < 2)
+		return;
+
+	if (parallel) {
+		width = num_placements;
+		num_placements = 1;
+	} else {
+		width = 1;
+	}
+
+	if (flags & TEST_ISOLATION)
+		fd_spill = drm_reopen_driver(fd);
+
+	vm = xe_vm_create(fd, 0, 0);
+	if (flags & TEST_BUSY) {
+		ctx = spin_ctx_init(fd, eci, vm, width, num_placements);
+		spin_sync_start(fd, ctx);
+	}
+
+	read_engine_cycles(fd, pceu[0]);
+	if (flags & TEST_ISOLATION)
+		read_engine_cycles(fd_spill, pceu_spill[0]);
+
+	usleep(batch_duration_usec);
+	if (flags & TEST_TRAILING_IDLE)
+		spin_sync_end(fd, ctx);
+
+	read_engine_cycles(fd, pceu[1]);
+	if (flags & TEST_ISOLATION)
+		read_engine_cycles(fd_spill, pceu_spill[1]);
+
+	expected_load = flags & TEST_BUSY ?
+	       EXPECTED_LOAD_FULL : EXPECTED_LOAD_IDLE;
+	check_results(pceu[0], pceu[1], class, width, expected_load);
+
+	if (flags & TEST_ISOLATION) {
+		/*
+		 * Load from one client shouldn't spill on another,
+		 * so check for idle
+		 */
+		check_results(pceu_spill[0], pceu_spill[1], class, width,
+			      EXPECTED_LOAD_IDLE);
+		close(fd_spill);
+	}
+
+	spin_sync_end(fd, ctx);
+	spin_ctx_destroy(fd, ctx);
+
+	xe_vm_destroy(fd, vm);
+}
+
 igt_main
 {
+	const struct section {
+		const char *name;
+		unsigned int flags;
+	} sections[] = {
+		{ .name = "virtual", .flags = TEST_VIRTUAL },
+		{ .name = "parallel", .flags = TEST_PARALLEL },
+		{ }
+	};
 	struct drm_xe_engine_class_instance *hwe;
-	int xe;
+	int xe, gt, class;
 
 	igt_fixture {
 		struct drm_client_fdinfo info = { };
@@ -775,6 +871,32 @@ igt_main
 	igt_subtest("utilization-all-full-load")
 		utilization_all_full_load(xe);
 
+
+	for (const struct section *s = sections; s->name; s++) {
+		igt_subtest_f("%s-utilization-single-idle", s->name)
+			xe_for_each_gt(xe, gt)
+				xe_for_each_engine_class(class)
+					utilization_multi(xe, gt, class, s->flags);
+
+		igt_subtest_f("%s-utilization-single-full-load", s->name)
+			xe_for_each_gt(xe, gt)
+				xe_for_each_engine_class(class)
+					utilization_multi(xe, gt, class,
+							  s->flags |
+							  TEST_BUSY |
+							  TEST_TRAILING_IDLE);
+
+		igt_subtest_f("%s-utilization-single-full-load-isolation",
+			      s->name)
+			xe_for_each_gt(xe, gt)
+				xe_for_each_engine_class(class)
+					utilization_multi(xe, gt, class,
+							  s->flags |
+							  TEST_BUSY |
+							  TEST_TRAILING_IDLE |
+							  TEST_ISOLATION);
+	}
+
 	igt_fixture {
 		drm_close_driver(xe);
 	}
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH i-g-t v2 2/4] lib/intel_batchbuffer: Introduce intel_bb_create_with_context_in_region
  2024-09-10 13:12 [PATCH i-g-t v2 0/4] PSR testing improvement by checking sink status Jouni Högander
  2024-09-10 13:12 ` [PATCH i-g-t v2 1/4] tests/intel/xe_drm_fdinfo: Implement virtual/parallel exec queues Jouni Högander
@ 2024-09-10 13:12 ` Jouni Högander
  2024-09-10 13:12 ` [PATCH i-g-t v2 3/4] lib/igt_psr: Add mechanism to check sink status as well Jouni Högander
  2 siblings, 0 replies; 5+ messages in thread
From: Jouni Högander @ 2024-09-10 13:12 UTC (permalink / raw)
  To: igt-dev
  Cc: ramanaidu.naladala, Dominik Karol Piątkowski,
	Zbigniew Kempczyński

From: Dominik Karol Piątkowski <dominik.karol.piatkowski@intel.com>

This patch extends __intel_bb_create to take memory region as argument,
making it possible to create batchbuffer in given memory region.
Existing helper functions preserve original behavior.

To make use of this extension, intel_bb_create_with_context_in_region
is introduced, that creates bb with given context in given memory region.

v2:
 - Support both i915 and xe in intel_bb_create_with_context_in_region
 - Extend intel_bb_create_full to use region argument
v3:
 - Introduce is_i915 variable to avoid calling is_i915_device() twice
 - Squash "Fix igt_require in intel_bb_create_no_relocs"
   gem_uses_full_ppgtt() calls gem_gtt_type(), that expects i915 drm
   file descriptor. Wrap the igt_require in is_i915_device() check
   to fix the issue.

Signed-off-by: Dominik Karol Piątkowski <dominik.karol.piatkowski@intel.com>
Reviewed-by: Zbigniew Kempczyński <zbigniew.kempczynski@intel.com>
Link: https://lore.kernel.org/r/20240904084249.4365-2-dominik.karol.piatkowski@intel.com
---
 lib/intel_batchbuffer.c | 74 +++++++++++++++++++++++++++++++----------
 lib/intel_batchbuffer.h |  5 ++-
 tests/intel/xe_pat.c    |  4 +--
 3 files changed, 63 insertions(+), 20 deletions(-)

diff --git a/lib/intel_batchbuffer.c b/lib/intel_batchbuffer.c
index f91091bc4..299e08926 100644
--- a/lib/intel_batchbuffer.c
+++ b/lib/intel_batchbuffer.c
@@ -850,6 +850,7 @@ static inline uint64_t __intel_bb_get_offset(struct intel_bb *ibb,
  * @size: size of the batchbuffer
  * @do_relocs: use relocations or allocator
  * @allocator_type: allocator type, must be INTEL_ALLOCATOR_NONE for relocations
+ * @region: memory region
  *
  * intel-bb assumes it will work in one of two modes - with relocations or
  * with using allocator (currently RELOC and SIMPLE are implemented).
@@ -893,7 +894,7 @@ static struct intel_bb *
 __intel_bb_create(int fd, uint32_t ctx, uint32_t vm, const intel_ctx_cfg_t *cfg,
 		  uint32_t size, bool do_relocs,
 		  uint64_t start, uint64_t end, uint64_t alignment,
-		  uint8_t allocator_type, enum allocator_strategy strategy)
+		  uint8_t allocator_type, enum allocator_strategy strategy, uint64_t region)
 {
 	struct drm_i915_gem_exec_object2 *object;
 	struct intel_bb *ibb = calloc(1, sizeof(*ibb));
@@ -922,7 +923,7 @@ __intel_bb_create(int fd, uint32_t ctx, uint32_t vm, const intel_ctx_cfg_t *cfg,
 
 		ibb->alignment = alignment;
 		ibb->gtt_size = gem_aperture_size(fd);
-		ibb->handle = gem_create(fd, size);
+		ibb->handle = gem_create_in_memory_regions(fd, size, region);
 
 		if (!ibb->uses_full_ppgtt)
 			do_relocs = true;
@@ -954,7 +955,7 @@ __intel_bb_create(int fd, uint32_t ctx, uint32_t vm, const intel_ctx_cfg_t *cfg,
 
 		ibb->alignment = alignment;
 		size = ALIGN(size + xe_cs_prefetch_size(fd), ibb->alignment);
-		ibb->handle = xe_bo_create(fd, 0, size, vram_if_possible(fd, 0),
+		ibb->handle = xe_bo_create(fd, 0, size, region,
 					   DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
 
 		/* Limit to 48-bit due to MI_* address limitation */
@@ -1027,12 +1028,13 @@ __intel_bb_create(int fd, uint32_t ctx, uint32_t vm, const intel_ctx_cfg_t *cfg,
  * @alignment: alignment to use for allocator, zero for default
  * @allocator_type: allocator type, SIMPLE, RELOC, ...
  * @strategy: allocation strategy
+ * @region: memory region
  *
  * Creates bb with context passed in @ctx, size in @size and allocator type
- * in @allocator_type. Relocations are set to false because IGT allocator
- * is used in that case. VM range is passed to allocator (@start and @end)
- * and allocation @strategy (suggestion to allocator about address allocation
- * preferences).
+ * in @allocator_type, in memory region passed in @region. Relocations are set
+ * to false because IGT allocator is used in that case. VM range is passed
+ * to allocator (@start and @end) and allocation @strategy (suggestion
+ * to allocator about address allocation preferences).
  *
  * Returns:
  *
@@ -1042,10 +1044,10 @@ struct intel_bb *intel_bb_create_full(int fd, uint32_t ctx, uint32_t vm,
 				      const intel_ctx_cfg_t *cfg, uint32_t size,
 				      uint64_t start, uint64_t end,
 				      uint64_t alignment, uint8_t allocator_type,
-				      enum allocator_strategy strategy)
+				      enum allocator_strategy strategy, uint64_t region)
 {
 	return __intel_bb_create(fd, ctx, vm, cfg, size, false, start, end,
-				 alignment, allocator_type, strategy);
+				 alignment, allocator_type, strategy, region);
 }
 
 /**
@@ -1071,7 +1073,8 @@ struct intel_bb *intel_bb_create_with_allocator(int fd, uint32_t ctx, uint32_t v
 						uint8_t allocator_type)
 {
 	return __intel_bb_create(fd, ctx, vm, cfg, size, false, 0, 0, 0,
-				 allocator_type, ALLOC_STRATEGY_HIGH_TO_LOW);
+				 allocator_type, ALLOC_STRATEGY_HIGH_TO_LOW,
+				 is_i915_device(fd) ? REGION_SMEM : vram_if_possible(fd, 0));
 }
 
 static bool aux_needs_softpin(int fd)
@@ -1106,12 +1109,14 @@ static bool has_ctx_cfg(struct intel_bb *ibb)
  */
 struct intel_bb *intel_bb_create(int fd, uint32_t size)
 {
-	bool relocs = is_i915_device(fd) && gem_has_relocations(fd);
+	bool is_i915 = is_i915_device(fd);
+	bool relocs = is_i915 && gem_has_relocations(fd);
 
 	return __intel_bb_create(fd, 0, 0, NULL, size,
 				 relocs && !aux_needs_softpin(fd), 0, 0, 0,
 				 INTEL_ALLOCATOR_SIMPLE,
-				 ALLOC_STRATEGY_HIGH_TO_LOW);
+				 ALLOC_STRATEGY_HIGH_TO_LOW,
+				 is_i915 ? REGION_SMEM : vram_if_possible(fd, 0));
 }
 
 /**
@@ -1132,13 +1137,42 @@ struct intel_bb *intel_bb_create(int fd, uint32_t size)
 struct intel_bb *
 intel_bb_create_with_context(int fd, uint32_t ctx, uint32_t vm,
 			     const intel_ctx_cfg_t *cfg, uint32_t size)
+{
+	bool is_i915 = is_i915_device(fd);
+	bool relocs = is_i915 && gem_has_relocations(fd);
+
+	return __intel_bb_create(fd, ctx, vm, cfg, size,
+				 relocs && !aux_needs_softpin(fd), 0, 0, 0,
+				 INTEL_ALLOCATOR_SIMPLE,
+				 ALLOC_STRATEGY_HIGH_TO_LOW,
+				 is_i915 ? REGION_SMEM : vram_if_possible(fd, 0));
+}
+
+/**
+ * intel_bb_create_with_context_in_region:
+ * @fd: drm fd - i915 or xe
+ * @ctx: for i915 context id, for xe engine id
+ * @vm: for xe vm_id, unused for i915
+ * @cfg: intel_ctx configuration, NULL for default context or legacy mode
+ * @size: size of the batchbuffer
+ * @region: memory region
+ *
+ * Creates bb with context passed in @ctx in memory region passed in @memory.
+ *
+ * Returns:
+ *
+ * Pointer the intel_bb, asserts on failure.
+ */
+struct intel_bb *
+intel_bb_create_with_context_in_region(int fd, uint32_t ctx, uint32_t vm,
+				       const intel_ctx_cfg_t *cfg, uint32_t size, uint64_t region)
 {
 	bool relocs = is_i915_device(fd) && gem_has_relocations(fd);
 
 	return __intel_bb_create(fd, ctx, vm, cfg, size,
 				 relocs && !aux_needs_softpin(fd), 0, 0, 0,
 				 INTEL_ALLOCATOR_SIMPLE,
-				 ALLOC_STRATEGY_HIGH_TO_LOW);
+				 ALLOC_STRATEGY_HIGH_TO_LOW, region);
 }
 
 /**
@@ -1158,7 +1192,8 @@ struct intel_bb *intel_bb_create_with_relocs(int fd, uint32_t size)
 	igt_require(is_i915_device(fd) && gem_has_relocations(fd));
 
 	return __intel_bb_create(fd, 0, 0, NULL, size, true, 0, 0, 0,
-				 INTEL_ALLOCATOR_NONE, ALLOC_STRATEGY_NONE);
+				 INTEL_ALLOCATOR_NONE, ALLOC_STRATEGY_NONE,
+				 REGION_SMEM);
 }
 
 /**
@@ -1183,7 +1218,8 @@ intel_bb_create_with_relocs_and_context(int fd, uint32_t ctx,
 	igt_require(is_i915_device(fd) && gem_has_relocations(fd));
 
 	return __intel_bb_create(fd, ctx, 0, cfg, size, true, 0, 0, 0,
-				 INTEL_ALLOCATOR_NONE, ALLOC_STRATEGY_NONE);
+				 INTEL_ALLOCATOR_NONE, ALLOC_STRATEGY_NONE,
+				 REGION_SMEM);
 }
 
 /**
@@ -1200,11 +1236,15 @@ intel_bb_create_with_relocs_and_context(int fd, uint32_t ctx,
  */
 struct intel_bb *intel_bb_create_no_relocs(int fd, uint32_t size)
 {
-	igt_require(gem_uses_full_ppgtt(fd));
+	bool is_i915 = is_i915_device(fd);
+
+	if (is_i915)
+		igt_require(gem_uses_full_ppgtt(fd));
 
 	return __intel_bb_create(fd, 0, 0, NULL, size, false, 0, 0, 0,
 				 INTEL_ALLOCATOR_SIMPLE,
-				 ALLOC_STRATEGY_HIGH_TO_LOW);
+				 ALLOC_STRATEGY_HIGH_TO_LOW,
+				 is_i915 ? REGION_SMEM : vram_if_possible(fd, 0));
 }
 
 static void __intel_bb_destroy_relocations(struct intel_bb *ibb)
diff --git a/lib/intel_batchbuffer.h b/lib/intel_batchbuffer.h
index cb32206e5..64121011c 100644
--- a/lib/intel_batchbuffer.h
+++ b/lib/intel_batchbuffer.h
@@ -309,7 +309,7 @@ struct intel_bb *
 intel_bb_create_full(int fd, uint32_t ctx, uint32_t vm,
 		     const intel_ctx_cfg_t *cfg, uint32_t size, uint64_t start,
 		     uint64_t end, uint64_t alignment, uint8_t allocator_type,
-		     enum allocator_strategy strategy);
+		     enum allocator_strategy strategy, uint64_t region);
 struct intel_bb *
 intel_bb_create_with_allocator(int fd, uint32_t ctx, uint32_t vm,
 			       const intel_ctx_cfg_t *cfg, uint32_t size,
@@ -318,6 +318,9 @@ struct intel_bb *intel_bb_create(int fd, uint32_t size);
 struct intel_bb *
 intel_bb_create_with_context(int fd, uint32_t ctx, uint32_t vm,
 			     const intel_ctx_cfg_t *cfg, uint32_t size);
+struct intel_bb *
+intel_bb_create_with_context_in_region(int fd, uint32_t ctx, uint32_t vm,
+				       const intel_ctx_cfg_t *cfg, uint32_t size, uint64_t region);
 struct intel_bb *intel_bb_create_with_relocs(int fd, uint32_t size);
 struct intel_bb *
 intel_bb_create_with_relocs_and_context(int fd, uint32_t ctx,
diff --git a/tests/intel/xe_pat.c b/tests/intel/xe_pat.c
index 153d9ce1d..b0b3ad8a7 100644
--- a/tests/intel/xe_pat.c
+++ b/tests/intel/xe_pat.c
@@ -384,7 +384,7 @@ static void pat_index_render(struct xe_pat_param *p)
 	ibb = intel_bb_create_full(fd, 0, 0, NULL, xe_get_default_alignment(fd),
 				   0, 0, p->size->alignment,
 				   INTEL_ALLOCATOR_SIMPLE,
-				   ALLOC_STRATEGY_HIGH_TO_LOW);
+				   ALLOC_STRATEGY_HIGH_TO_LOW, vram_if_possible(fd, 0));
 
 	size = width * height * bpp / 8;
 	stride = width * 4;
@@ -479,7 +479,7 @@ static void pat_index_dw(struct xe_pat_param *p)
 	ibb = intel_bb_create_full(fd, ctx, vm, NULL, xe_get_default_alignment(fd),
 				   0, 0, p->size->alignment,
 				   INTEL_ALLOCATOR_SIMPLE,
-				   ALLOC_STRATEGY_LOW_TO_HIGH);
+				   ALLOC_STRATEGY_LOW_TO_HIGH, vram_if_possible(fd, 0));
 
 	size = width * height * bpp / 8;
 	stride = width * 4;
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH i-g-t v2 3/4] lib/igt_psr: Add mechanism to check sink status as well
  2024-09-10 13:12 [PATCH i-g-t v2 0/4] PSR testing improvement by checking sink status Jouni Högander
  2024-09-10 13:12 ` [PATCH i-g-t v2 1/4] tests/intel/xe_drm_fdinfo: Implement virtual/parallel exec queues Jouni Högander
  2024-09-10 13:12 ` [PATCH i-g-t v2 2/4] lib/intel_batchbuffer: Introduce intel_bb_create_with_context_in_region Jouni Högander
@ 2024-09-10 13:12 ` Jouni Högander
  2 siblings, 0 replies; 5+ messages in thread
From: Jouni Högander @ 2024-09-10 13:12 UTC (permalink / raw)
  To: igt-dev; +Cc: ramanaidu.naladala, Jouni Högander

We have seen passing PSR testcases even though panel is not even aware of
PSR being used. This can happen because we currently not checking sink PSR
statuses at all. Also sink might have detected errors but our testcase
currently don't care about that.

Help the gap described above by adding new interface to check sink error
statuses. Also add sink status check to be part of psr_is_active check.

Signed-off-by: Jouni Högander <jouni.hogander@intel.com>
Reviewed-by: Naladala Ramanaidu <ramanaidu.naladala@intel.com>
---
 lib/igt_psr.c | 52 ++++++++++++++++++++++++++++++++++++++++++++++++++-
 lib/igt_psr.h |  1 +
 2 files changed, 52 insertions(+), 1 deletion(-)

diff --git a/lib/igt_psr.c b/lib/igt_psr.c
index e3e7577eb..47517b5dc 100644
--- a/lib/igt_psr.c
+++ b/lib/igt_psr.c
@@ -52,6 +52,19 @@ bool selective_fetch_check(int debugfs_fd, igt_output_t *output)
 
 	return strstr(buf, "PSR2 selective fetch: enabled");
 }
+static bool psr_active_sink_check(int debugfs_fd, igt_output_t *output)
+{
+	char debugfs_file[128] = {0};
+	char buf[PSR_STATUS_MAX_LEN];
+	int ret;
+
+	sprintf(debugfs_file, "%s/i915_psr_sink_status", output->name);
+	ret = igt_debugfs_simple_read(debugfs_fd, debugfs_file, buf,
+				      sizeof(buf));
+	igt_assert_f(ret >= 1, "Failed to read sink status\n");
+
+	return strstr(buf, "0x2 [active, display from RFB]");
+}
 
 /*
  * Checks if Early Transport is enabled in PSR status by reading the debugfs.
@@ -72,6 +85,7 @@ static bool psr_active_check(int debugfs_fd, enum psr_mode mode, igt_output_t *o
 	char buf[PSR_STATUS_MAX_LEN];
 	drmModeConnector *c;
 	const char *state;
+	bool active;
 	int ret;
 
 	if (mode == PR_MODE || mode == PR_MODE_SEL_FETCH) {
@@ -100,7 +114,11 @@ static bool psr_active_check(int debugfs_fd, enum psr_mode mode, igt_output_t *o
 
 	igt_skip_on(strstr(buf, "PSR sink not reliable: yes"));
 
-	return strstr(buf, state);
+	active = strstr(buf, state);
+	if (active && output)
+		active = psr_active_sink_check(debugfs_fd, output);
+
+	return active;
 }
 
 /*
@@ -297,6 +315,38 @@ bool psr_sink_support(int device, int debugfs_fd, enum psr_mode mode, igt_output
 	}
 }
 
+/**
+ * psr_sink_error_check
+ * Check and assert on PSR errors detected by panel
+ *
+ * Returns:
+ * None
+ */
+void psr_sink_error_check(int debugfs_fd, enum psr_mode mode, igt_output_t *output)
+{
+	char *line;
+	char debugfs_file[128] = {0};
+	char buf[PSR_STATUS_MAX_LEN];
+	int ret;
+
+	sprintf(debugfs_file, "%s/i915_psr_sink_status", output->name);
+	ret = igt_debugfs_simple_read(debugfs_fd, debugfs_file, buf,
+				      sizeof(buf));
+	igt_assert_f(ret >= 1, "Failed to read sink status\n");
+
+	line = strstr(buf, "error status: 0x0");
+
+	/*
+	 * On certain PSR1 panels we are seeing "PSR VSC SDP
+	 * uncorrectable error" bit set even it is applicable for PSR1
+	 * only
+	 */
+	if (!line && mode == PSR_MODE_1)
+		line = strstr(buf, "Sink PSR error status: 0x4");
+
+	igt_assert_f(line, "Sink detected PSR error(s):\n%s\n", buf);
+}
+
 #define PSR2_SU_BLOCK_STR_LOOKUP "PSR2 SU blocks:\n0\t"
 
 /* Return the the last or last but one su blocks */
diff --git a/lib/igt_psr.h b/lib/igt_psr.h
index a7ebd0739..7639f8d46 100644
--- a/lib/igt_psr.h
+++ b/lib/igt_psr.h
@@ -56,6 +56,7 @@ bool psr_long_wait_update(int debugfs_fd, enum psr_mode mode, igt_output_t *outp
 bool psr_enable(int device, int debugfs_fd, enum psr_mode, igt_output_t *output);
 bool psr_disable(int device, int debugfs_fd, igt_output_t *output);
 bool psr_sink_support(int device, int debugfs_fd, enum psr_mode mode, igt_output_t *output);
+void psr_sink_error_check(int debugfs_fd, enum psr_mode mode, igt_output_t *output);
 bool psr2_wait_su(int debugfs_fd, uint16_t *num_su_blocks);
 void psr_print_debugfs(int debugfs_fd);
 enum psr_mode psr_get_mode(int debugfs_fd, igt_output_t *output);
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH i-g-t v2 1/4] tests/intel/xe_drm_fdinfo: Implement virtual/parallel exec queues
  2024-09-10 13:12 ` [PATCH i-g-t v2 1/4] tests/intel/xe_drm_fdinfo: Implement virtual/parallel exec queues Jouni Högander
@ 2024-09-10 13:18   ` Hogander, Jouni
  0 siblings, 0 replies; 5+ messages in thread
From: Hogander, Jouni @ 2024-09-10 13:18 UTC (permalink / raw)
  To: igt-dev@lists.freedesktop.org
  Cc: Brost, Matthew, Naladala, Ramanaidu, Nerlige Ramappa, Umesh,
	De Marchi, Lucas

Hello,

I made mistake in sending patches. Please ignore this and
"lib/intel_batchbuffer: Introduce
intel_bb_create_with_context_in_region" sent by me.

Sorry for inconvenience,

Jouni Högander

On Tue, 2024-09-10 at 16:12 +0300, Jouni Högander wrote:
> From: Lucas De Marchi <lucas.demarchi@intel.com>
> 
> Implement a similar function to utilization_single(), but also taking
> virtual/parallel into account. I chose some different variable names
> to make it more obvious what exactly it is testing and integrated
> with the xe_gt_fill_engines_by_class() function recently added.
> A possible refactor in the future is to make the other tests use
> this function and remove utilization_single().
> 
> Based on previous patch by Umesh.
> 
> Cc: Umesh Nerlige Ramappa <umesh.nerlige.ramappa@intel.com>
> Reviewed-by: Matthew Brost <matthew.brost@intel.com>
> Link:
> https://lore.kernel.org/r/20240904225746.2857448-3-lucas.demarchi@intel.com
> Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
> ---
>  tests/intel/xe_drm_fdinfo.c | 124
> +++++++++++++++++++++++++++++++++++-
>  1 file changed, 123 insertions(+), 1 deletion(-)
> 
> diff --git a/tests/intel/xe_drm_fdinfo.c
> b/tests/intel/xe_drm_fdinfo.c
> index 8acb95040..747b6155c 100644
> --- a/tests/intel/xe_drm_fdinfo.c
> +++ b/tests/intel/xe_drm_fdinfo.c
> @@ -12,6 +12,8 @@
>  #include "xe/xe_ioctl.h"
>  #include "xe/xe_query.h"
>  #include "xe/xe_spin.h"
> +#include "xe/xe_util.h"
> +
>  /**
>   * TEST: xe drm fdinfo
>   * Description: Read and verify drm client memory consumption and
> engine utilization using fdinfo
> @@ -67,6 +69,8 @@ IGT_TEST_DESCRIPTION("Read and verify drm client
> memory consumption and engine u
>  #define TEST_BUSY              (1 << 0)
>  #define TEST_TRAILING_IDLE     (1 << 1)
>  #define TEST_ISOLATION         (1 << 2)
> +#define TEST_VIRTUAL           (1 << 3)
> +#define TEST_PARALLEL          (1 << 4)
>  
>  enum expected_load {
>         EXPECTED_LOAD_IDLE,
> @@ -715,10 +719,102 @@ utilization_all_full_load(int fd)
>  
>         xe_vm_destroy(fd, vm);
>  }
> +
> +/**
> + * SUBTEST: %s-utilization-single-idle
> + * Description: Check that each engine shows no load
> + *
> + * SUBTEST: %s-utilization-single-full-load
> + * Description: Check that each engine shows full load
> + *
> + * SUBTEST: %s-utilization-single-full-load-isolation
> + * Description: Check that each engine load does not spill over to
> other drm clients
> + *
> + * arg[1]:
> + *
> + * @virtual:                   virtual
> + * @parallel:                  parallel
> + */
> +static void
> +utilization_multi(int fd, int gt, int class, unsigned int flags)
> +{
> +       struct pceu_cycles pceu[2][DRM_XE_ENGINE_CLASS_COMPUTE + 1];
> +       struct pceu_cycles pceu_spill[2][DRM_XE_ENGINE_CLASS_COMPUTE
> + 1];
> +       struct drm_xe_engine_class_instance
> eci[XE_MAX_ENGINE_INSTANCE];
> +       struct spin_ctx *ctx = NULL;
> +       enum expected_load expected_load;
> +       int fd_spill, num_placements;
> +       uint32_t vm;
> +       bool virtual = flags & TEST_VIRTUAL;
> +       bool parallel = flags & TEST_PARALLEL;
> +       uint16_t width;
> +
> +       igt_assert(virtual ^ parallel);
> +
> +       num_placements = xe_gt_fill_engines_by_class(fd, gt, class,
> eci);
> +       if (num_placements < 2)
> +               return;
> +
> +       if (parallel) {
> +               width = num_placements;
> +               num_placements = 1;
> +       } else {
> +               width = 1;
> +       }
> +
> +       if (flags & TEST_ISOLATION)
> +               fd_spill = drm_reopen_driver(fd);
> +
> +       vm = xe_vm_create(fd, 0, 0);
> +       if (flags & TEST_BUSY) {
> +               ctx = spin_ctx_init(fd, eci, vm, width,
> num_placements);
> +               spin_sync_start(fd, ctx);
> +       }
> +
> +       read_engine_cycles(fd, pceu[0]);
> +       if (flags & TEST_ISOLATION)
> +               read_engine_cycles(fd_spill, pceu_spill[0]);
> +
> +       usleep(batch_duration_usec);
> +       if (flags & TEST_TRAILING_IDLE)
> +               spin_sync_end(fd, ctx);
> +
> +       read_engine_cycles(fd, pceu[1]);
> +       if (flags & TEST_ISOLATION)
> +               read_engine_cycles(fd_spill, pceu_spill[1]);
> +
> +       expected_load = flags & TEST_BUSY ?
> +              EXPECTED_LOAD_FULL : EXPECTED_LOAD_IDLE;
> +       check_results(pceu[0], pceu[1], class, width, expected_load);
> +
> +       if (flags & TEST_ISOLATION) {
> +               /*
> +                * Load from one client shouldn't spill on another,
> +                * so check for idle
> +                */
> +               check_results(pceu_spill[0], pceu_spill[1], class,
> width,
> +                             EXPECTED_LOAD_IDLE);
> +               close(fd_spill);
> +       }
> +
> +       spin_sync_end(fd, ctx);
> +       spin_ctx_destroy(fd, ctx);
> +
> +       xe_vm_destroy(fd, vm);
> +}
> +
>  igt_main
>  {
> +       const struct section {
> +               const char *name;
> +               unsigned int flags;
> +       } sections[] = {
> +               { .name = "virtual", .flags = TEST_VIRTUAL },
> +               { .name = "parallel", .flags = TEST_PARALLEL },
> +               { }
> +       };
>         struct drm_xe_engine_class_instance *hwe;
> -       int xe;
> +       int xe, gt, class;
>  
>         igt_fixture {
>                 struct drm_client_fdinfo info = { };
> @@ -775,6 +871,32 @@ igt_main
>         igt_subtest("utilization-all-full-load")
>                 utilization_all_full_load(xe);
>  
> +
> +       for (const struct section *s = sections; s->name; s++) {
> +               igt_subtest_f("%s-utilization-single-idle", s->name)
> +                       xe_for_each_gt(xe, gt)
> +                               xe_for_each_engine_class(class)
> +                                       utilization_multi(xe, gt,
> class, s->flags);
> +
> +               igt_subtest_f("%s-utilization-single-full-load", s-
> >name)
> +                       xe_for_each_gt(xe, gt)
> +                               xe_for_each_engine_class(class)
> +                                       utilization_multi(xe, gt,
> class,
> +                                                         s->flags |
> +                                                         TEST_BUSY |
> +                                                        
> TEST_TRAILING_IDLE);
> +
> +               igt_subtest_f("%s-utilization-single-full-load-
> isolation",
> +                             s->name)
> +                       xe_for_each_gt(xe, gt)
> +                               xe_for_each_engine_class(class)
> +                                       utilization_multi(xe, gt,
> class,
> +                                                         s->flags |
> +                                                         TEST_BUSY |
> +                                                        
> TEST_TRAILING_IDLE |
> +                                                        
> TEST_ISOLATION);
> +       }
> +
>         igt_fixture {
>                 drm_close_driver(xe);
>         }


^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2024-09-10 13:18 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2024-09-10 13:12 [PATCH i-g-t v2 0/4] PSR testing improvement by checking sink status Jouni Högander
2024-09-10 13:12 ` [PATCH i-g-t v2 1/4] tests/intel/xe_drm_fdinfo: Implement virtual/parallel exec queues Jouni Högander
2024-09-10 13:18   ` Hogander, Jouni
2024-09-10 13:12 ` [PATCH i-g-t v2 2/4] lib/intel_batchbuffer: Introduce intel_bb_create_with_context_in_region Jouni Högander
2024-09-10 13:12 ` [PATCH i-g-t v2 3/4] lib/igt_psr: Add mechanism to check sink status as well Jouni Högander

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox