Intel-GFX Archive on lore.kernel.org
 help / color / mirror / Atom feed
* [Intel-gfx] [CI] drm/i915/gt: Refactor hangcheck selftest to use igt_spinner
@ 2023-08-19 22:50 Andi Shyti
  2023-08-19 23:19 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for " Patchwork
                   ` (10 more replies)
  0 siblings, 11 replies; 14+ messages in thread
From: Andi Shyti @ 2023-08-19 22:50 UTC (permalink / raw)
  To: Jonathan Cavitt; +Cc: intel-gfx

From: Jonathan Cavitt <jonathan.cavitt@intel.com>

The hangcheck live selftest contains duplicate declarations of some
functions that already exist in igt_spinner.c, such as the creation and
deconstruction of a spinning batch buffer (spinner) that hangs an engine.
It's undesireable to have such code duplicated, as the requirements for
the spinner may change with hardware updates, necessitating both
execution paths be updated.  To avoid this, have the hangcheck live
selftest use the declaration from igt_spinner.  This eliminates the need
for the declarations in the selftest itself, as well as the associated
local helper structures, so we can erase those.

Suggested-by: Matt Roper <matthew.d.roper@intel.com>
Signed-off-by: Jonathan Cavitt <jonathan.cavitt@intel.com>
---
 drivers/gpu/drm/i915/gt/selftest_hangcheck.c | 457 ++++++-------------
 drivers/gpu/drm/i915/selftests/igt_spinner.c |  15 +-
 drivers/gpu/drm/i915/selftests/igt_spinner.h |   9 +
 3 files changed, 155 insertions(+), 326 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index 0dd4d00ee894e..36376a4ade8e4 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -29,281 +29,40 @@
 
 #define IGT_IDLE_TIMEOUT 50 /* ms; time to wait after flushing between tests */
 
-struct hang {
-	struct intel_gt *gt;
-	struct drm_i915_gem_object *hws;
-	struct drm_i915_gem_object *obj;
-	struct i915_gem_context *ctx;
-	u32 *seqno;
-	u32 *batch;
-};
-
-static int hang_init(struct hang *h, struct intel_gt *gt)
-{
-	void *vaddr;
-	int err;
-
-	memset(h, 0, sizeof(*h));
-	h->gt = gt;
-
-	h->ctx = kernel_context(gt->i915, NULL);
-	if (IS_ERR(h->ctx))
-		return PTR_ERR(h->ctx);
-
-	GEM_BUG_ON(i915_gem_context_is_bannable(h->ctx));
-
-	h->hws = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
-	if (IS_ERR(h->hws)) {
-		err = PTR_ERR(h->hws);
-		goto err_ctx;
-	}
-
-	h->obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
-	if (IS_ERR(h->obj)) {
-		err = PTR_ERR(h->obj);
-		goto err_hws;
-	}
-
-	i915_gem_object_set_cache_coherency(h->hws, I915_CACHE_LLC);
-	vaddr = i915_gem_object_pin_map_unlocked(h->hws, I915_MAP_WB);
-	if (IS_ERR(vaddr)) {
-		err = PTR_ERR(vaddr);
-		goto err_obj;
-	}
-	h->seqno = memset(vaddr, 0xff, PAGE_SIZE);
-
-	vaddr = i915_gem_object_pin_map_unlocked(h->obj,
-						 intel_gt_coherent_map_type(gt, h->obj, false));
-	if (IS_ERR(vaddr)) {
-		err = PTR_ERR(vaddr);
-		goto err_unpin_hws;
-	}
-	h->batch = vaddr;
-
-	return 0;
-
-err_unpin_hws:
-	i915_gem_object_unpin_map(h->hws);
-err_obj:
-	i915_gem_object_put(h->obj);
-err_hws:
-	i915_gem_object_put(h->hws);
-err_ctx:
-	kernel_context_close(h->ctx);
-	return err;
-}
-
-static u64 hws_address(const struct i915_vma *hws,
-		       const struct i915_request *rq)
-{
-	return i915_vma_offset(hws) +
-	       offset_in_page(sizeof(u32) * rq->fence.context);
-}
-
-static struct i915_request *
-hang_create_request(struct hang *h, struct intel_engine_cs *engine)
-{
-	struct intel_gt *gt = h->gt;
-	struct i915_address_space *vm = i915_gem_context_get_eb_vm(h->ctx);
-	struct drm_i915_gem_object *obj;
-	struct i915_request *rq = NULL;
-	struct i915_vma *hws, *vma;
-	unsigned int flags;
-	void *vaddr;
-	u32 *batch;
-	int err;
-
-	obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
-	if (IS_ERR(obj)) {
-		i915_vm_put(vm);
-		return ERR_CAST(obj);
-	}
-
-	vaddr = i915_gem_object_pin_map_unlocked(obj, intel_gt_coherent_map_type(gt, obj, false));
-	if (IS_ERR(vaddr)) {
-		i915_gem_object_put(obj);
-		i915_vm_put(vm);
-		return ERR_CAST(vaddr);
-	}
-
-	i915_gem_object_unpin_map(h->obj);
-	i915_gem_object_put(h->obj);
-
-	h->obj = obj;
-	h->batch = vaddr;
-
-	vma = i915_vma_instance(h->obj, vm, NULL);
-	if (IS_ERR(vma)) {
-		i915_vm_put(vm);
-		return ERR_CAST(vma);
-	}
-
-	hws = i915_vma_instance(h->hws, vm, NULL);
-	if (IS_ERR(hws)) {
-		i915_vm_put(vm);
-		return ERR_CAST(hws);
-	}
-
-	err = i915_vma_pin(vma, 0, 0, PIN_USER);
-	if (err) {
-		i915_vm_put(vm);
-		return ERR_PTR(err);
-	}
-
-	err = i915_vma_pin(hws, 0, 0, PIN_USER);
-	if (err)
-		goto unpin_vma;
-
-	rq = igt_request_alloc(h->ctx, engine);
-	if (IS_ERR(rq)) {
-		err = PTR_ERR(rq);
-		goto unpin_hws;
-	}
-
-	err = igt_vma_move_to_active_unlocked(vma, rq, 0);
-	if (err)
-		goto cancel_rq;
-
-	err = igt_vma_move_to_active_unlocked(hws, rq, 0);
-	if (err)
-		goto cancel_rq;
-
-	batch = h->batch;
-	if (GRAPHICS_VER(gt->i915) >= 8) {
-		*batch++ = MI_STORE_DWORD_IMM_GEN4;
-		*batch++ = lower_32_bits(hws_address(hws, rq));
-		*batch++ = upper_32_bits(hws_address(hws, rq));
-		*batch++ = rq->fence.seqno;
-		*batch++ = MI_NOOP;
-
-		memset(batch, 0, 1024);
-		batch += 1024 / sizeof(*batch);
-
-		*batch++ = MI_NOOP;
-		*batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
-		*batch++ = lower_32_bits(i915_vma_offset(vma));
-		*batch++ = upper_32_bits(i915_vma_offset(vma));
-	} else if (GRAPHICS_VER(gt->i915) >= 6) {
-		*batch++ = MI_STORE_DWORD_IMM_GEN4;
-		*batch++ = 0;
-		*batch++ = lower_32_bits(hws_address(hws, rq));
-		*batch++ = rq->fence.seqno;
-		*batch++ = MI_NOOP;
-
-		memset(batch, 0, 1024);
-		batch += 1024 / sizeof(*batch);
-
-		*batch++ = MI_NOOP;
-		*batch++ = MI_BATCH_BUFFER_START | 1 << 8;
-		*batch++ = lower_32_bits(i915_vma_offset(vma));
-	} else if (GRAPHICS_VER(gt->i915) >= 4) {
-		*batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
-		*batch++ = 0;
-		*batch++ = lower_32_bits(hws_address(hws, rq));
-		*batch++ = rq->fence.seqno;
-		*batch++ = MI_NOOP;
-
-		memset(batch, 0, 1024);
-		batch += 1024 / sizeof(*batch);
-
-		*batch++ = MI_NOOP;
-		*batch++ = MI_BATCH_BUFFER_START | 2 << 6;
-		*batch++ = lower_32_bits(i915_vma_offset(vma));
-	} else {
-		*batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
-		*batch++ = lower_32_bits(hws_address(hws, rq));
-		*batch++ = rq->fence.seqno;
-		*batch++ = MI_NOOP;
-
-		memset(batch, 0, 1024);
-		batch += 1024 / sizeof(*batch);
-
-		*batch++ = MI_NOOP;
-		*batch++ = MI_BATCH_BUFFER_START | 2 << 6;
-		*batch++ = lower_32_bits(i915_vma_offset(vma));
-	}
-	*batch++ = MI_BATCH_BUFFER_END; /* not reached */
-	intel_gt_chipset_flush(engine->gt);
-
-	if (rq->engine->emit_init_breadcrumb) {
-		err = rq->engine->emit_init_breadcrumb(rq);
-		if (err)
-			goto cancel_rq;
-	}
-
-	flags = 0;
-	if (GRAPHICS_VER(gt->i915) <= 5)
-		flags |= I915_DISPATCH_SECURE;
-
-	err = rq->engine->emit_bb_start(rq, i915_vma_offset(vma), PAGE_SIZE, flags);
-
-cancel_rq:
-	if (err) {
-		i915_request_set_error_once(rq, err);
-		i915_request_add(rq);
-	}
-unpin_hws:
-	i915_vma_unpin(hws);
-unpin_vma:
-	i915_vma_unpin(vma);
-	i915_vm_put(vm);
-	return err ? ERR_PTR(err) : rq;
-}
-
-static u32 hws_seqno(const struct hang *h, const struct i915_request *rq)
-{
-	return READ_ONCE(h->seqno[rq->fence.context % (PAGE_SIZE/sizeof(u32))]);
-}
-
-static void hang_fini(struct hang *h)
-{
-	*h->batch = MI_BATCH_BUFFER_END;
-	intel_gt_chipset_flush(h->gt);
-
-	i915_gem_object_unpin_map(h->obj);
-	i915_gem_object_put(h->obj);
-
-	i915_gem_object_unpin_map(h->hws);
-	i915_gem_object_put(h->hws);
-
-	kernel_context_close(h->ctx);
-
-	igt_flush_test(h->gt->i915);
-}
-
-static bool wait_until_running(struct hang *h, struct i915_request *rq)
-{
-	return !(wait_for_us(i915_seqno_passed(hws_seqno(h, rq),
-					       rq->fence.seqno),
-			     10) &&
-		 wait_for(i915_seqno_passed(hws_seqno(h, rq),
-					    rq->fence.seqno),
-			  1000));
-}
-
 static int igt_hang_sanitycheck(void *arg)
 {
 	struct intel_gt *gt = arg;
 	struct i915_request *rq;
 	struct intel_engine_cs *engine;
 	enum intel_engine_id id;
-	struct hang h;
+	struct igt_spinner spin;
 	int err;
 
 	/* Basic check that we can execute our hanging batch */
 
-	err = hang_init(&h, gt);
+	err = igt_spinner_init(&spin, gt);
 	if (err)
 		return err;
 
 	for_each_engine(engine, gt, id) {
 		struct intel_wedge_me w;
+		struct intel_context *ce;
 		long timeout;
 
 		if (!intel_engine_can_store_dword(engine))
 			continue;
 
-		rq = hang_create_request(&h, engine);
+		ce = intel_context_create(engine);
+		if (IS_ERR(ce)) {
+			err = PTR_ERR(ce);
+			pr_err("Failed to create context for %s, err=%d\n",
+			       engine->name, err);
+			goto fini;
+		}
+
+		rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
+		intel_context_put(ce);
+
 		if (IS_ERR(rq)) {
 			err = PTR_ERR(rq);
 			pr_err("Failed to create request for %s, err=%d\n",
@@ -312,10 +71,7 @@ static int igt_hang_sanitycheck(void *arg)
 		}
 
 		i915_request_get(rq);
-
-		*h.batch = MI_BATCH_BUFFER_END;
-		intel_gt_chipset_flush(engine->gt);
-
+		igt_spinner_end(&spin);
 		i915_request_add(rq);
 
 		timeout = 0;
@@ -336,7 +92,7 @@ static int igt_hang_sanitycheck(void *arg)
 	}
 
 fini:
-	hang_fini(&h);
+	igt_spinner_fini(&spin);
 	return err;
 }
 
@@ -686,7 +442,7 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
 	struct i915_gpu_error *global = &gt->i915->gpu_error;
 	struct intel_engine_cs *engine;
 	enum intel_engine_id id;
-	struct hang h;
+	struct igt_spinner spin;
 	int err = 0;
 
 	/* Check that we can issue an engine reset on an idle engine (no-op) */
@@ -695,7 +451,7 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
 		return 0;
 
 	if (active) {
-		err = hang_init(&h, gt);
+		err = igt_spinner_init(&spin, gt);
 		if (err)
 			return err;
 	}
@@ -739,7 +495,17 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
 			}
 
 			if (active) {
-				rq = hang_create_request(&h, engine);
+				struct intel_context *ce = intel_context_create(engine);
+				if (IS_ERR(ce)) {
+					err = PTR_ERR(ce);
+					pr_err("[%s] Create context failed: %d!\n",
+					       engine->name, err);
+					goto restore;
+				}
+
+				rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
+				intel_context_put(ce);
+
 				if (IS_ERR(rq)) {
 					err = PTR_ERR(rq);
 					pr_err("[%s] Create hang request failed: %d!\n",
@@ -750,11 +516,11 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
 				i915_request_get(rq);
 				i915_request_add(rq);
 
-				if (!wait_until_running(&h, rq)) {
+				if (!igt_wait_for_spinner(&spin, rq)) {
 					struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
 
 					pr_err("%s: Failed to start request %llx, at %x\n",
-					       __func__, rq->fence.seqno, hws_seqno(&h, rq));
+					       __func__, rq->fence.seqno, hws_seqno(&spin, rq));
 					intel_engine_dump(engine, &p,
 							  "%s\n", engine->name);
 
@@ -835,7 +601,7 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
 	}
 
 	if (active)
-		hang_fini(&h);
+		igt_spinner_fini(&spin);
 
 	return err;
 }
@@ -967,7 +733,7 @@ static int __igt_reset_engines(struct intel_gt *gt,
 	struct intel_engine_cs *engine, *other;
 	struct active_engine *threads;
 	enum intel_engine_id id, tmp;
-	struct hang h;
+	struct igt_spinner spin;
 	int err = 0;
 
 	/* Check that issuing a reset on one engine does not interfere
@@ -978,12 +744,9 @@ static int __igt_reset_engines(struct intel_gt *gt,
 		return 0;
 
 	if (flags & TEST_ACTIVE) {
-		err = hang_init(&h, gt);
+		err = igt_spinner_init(&spin, gt);
 		if (err)
 			return err;
-
-		if (flags & TEST_PRIORITY)
-			h.ctx->sched.priority = 1024;
 	}
 
 	threads = kmalloc_array(I915_NUM_ENGINES, sizeof(*threads), GFP_KERNEL);
@@ -1057,7 +820,20 @@ static int __igt_reset_engines(struct intel_gt *gt,
 			}
 
 			if (flags & TEST_ACTIVE) {
-				rq = hang_create_request(&h, engine);
+				struct intel_context *ce = intel_context_create(engine);
+				if (IS_ERR(ce)) {
+					err = PTR_ERR(ce);
+					pr_err("[%s] Create context failed: %d!\n",
+					       engine->name, err);
+					goto restore;
+				}
+
+				if (flags && TEST_PRIORITY)
+					ce->gem_context->sched.priority = 1024;
+
+				rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
+				intel_context_put(ce);
+
 				if (IS_ERR(rq)) {
 					err = PTR_ERR(rq);
 					pr_err("[%s] Create hang request failed: %d!\n",
@@ -1068,11 +844,11 @@ static int __igt_reset_engines(struct intel_gt *gt,
 				i915_request_get(rq);
 				i915_request_add(rq);
 
-				if (!wait_until_running(&h, rq)) {
+				if (!igt_wait_for_spinner(&spin, rq)) {
 					struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
 
 					pr_err("%s: Failed to start request %llx, at %x\n",
-					       __func__, rq->fence.seqno, hws_seqno(&h, rq));
+					       __func__, rq->fence.seqno, hws_seqno(&spin, rq));
 					intel_engine_dump(engine, &p,
 							  "%s\n", engine->name);
 
@@ -1240,7 +1016,7 @@ static int __igt_reset_engines(struct intel_gt *gt,
 		err = -EIO;
 
 	if (flags & TEST_ACTIVE)
-		hang_fini(&h);
+		igt_spinner_fini(&spin);
 
 	return err;
 }
@@ -1299,7 +1075,8 @@ static int igt_reset_wait(void *arg)
 	struct intel_engine_cs *engine;
 	struct i915_request *rq;
 	unsigned int reset_count;
-	struct hang h;
+	struct igt_spinner spin;
+	struct intel_context *ce;
 	long timeout;
 	int err;
 
@@ -1312,13 +1089,22 @@ static int igt_reset_wait(void *arg)
 
 	igt_global_reset_lock(gt);
 
-	err = hang_init(&h, gt);
+	err = igt_spinner_init(&spin, gt);
 	if (err) {
-		pr_err("[%s] Hang init failed: %d!\n", engine->name, err);
+		pr_err("[%s] Spinner init failed: %d!\n", engine->name, err);
 		goto unlock;
 	}
 
-	rq = hang_create_request(&h, engine);
+	ce = intel_context_create(engine);
+	if (IS_ERR(ce)) {
+		err = PTR_ERR(ce);
+		pr_err("[%s] Create context failed: %d!\n", engine->name, err);
+		goto fini;
+	}
+
+	rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
+	intel_context_put(ce);
+
 	if (IS_ERR(rq)) {
 		err = PTR_ERR(rq);
 		pr_err("[%s] Create hang request failed: %d!\n", engine->name, err);
@@ -1328,11 +1114,11 @@ static int igt_reset_wait(void *arg)
 	i915_request_get(rq);
 	i915_request_add(rq);
 
-	if (!wait_until_running(&h, rq)) {
+	if (!igt_wait_for_spinner(&spin, rq)) {
 		struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
 
 		pr_err("%s: Failed to start request %llx, at %x\n",
-		       __func__, rq->fence.seqno, hws_seqno(&h, rq));
+		       __func__, rq->fence.seqno, hws_seqno(&spin, rq));
 		intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
 
 		intel_gt_set_wedged(gt);
@@ -1360,7 +1146,7 @@ static int igt_reset_wait(void *arg)
 out_rq:
 	i915_request_put(rq);
 fini:
-	hang_fini(&h);
+	igt_spinner_fini(&spin);
 unlock:
 	igt_global_reset_unlock(gt);
 
@@ -1433,7 +1219,8 @@ static int __igt_reset_evict_vma(struct intel_gt *gt,
 	struct task_struct *tsk = NULL;
 	struct i915_request *rq;
 	struct evict_vma arg;
-	struct hang h;
+	struct igt_spinner spin;
+	struct intel_context *ce;
 	unsigned int pin_flags;
 	int err;
 
@@ -1447,9 +1234,9 @@ static int __igt_reset_evict_vma(struct intel_gt *gt,
 
 	/* Check that we can recover an unbind stuck on a hanging request */
 
-	err = hang_init(&h, gt);
+	err = igt_spinner_init(&spin, gt);
 	if (err) {
-		pr_err("[%s] Hang init failed: %d!\n", engine->name, err);
+		pr_err("[%s] Spinner init failed: %d!\n", engine->name, err);
 		return err;
 	}
 
@@ -1475,7 +1262,16 @@ static int __igt_reset_evict_vma(struct intel_gt *gt,
 		goto out_obj;
 	}
 
-	rq = hang_create_request(&h, engine);
+	ce = intel_context_create(engine);
+	if (IS_ERR(ce)) {
+		err = PTR_ERR(ce);
+		pr_err("[%s] Create context failed: %d!\n", engine->name, err);
+		goto out_obj;
+	}
+
+	rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
+	intel_context_put(ce);
+
 	if (IS_ERR(rq)) {
 		err = PTR_ERR(rq);
 		pr_err("[%s] Create hang request failed: %d!\n", engine->name, err);
@@ -1517,11 +1313,11 @@ static int __igt_reset_evict_vma(struct intel_gt *gt,
 	if (err)
 		goto out_rq;
 
-	if (!wait_until_running(&h, rq)) {
+	if (!igt_wait_for_spinner(&spin, rq)) {
 		struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
 
 		pr_err("%s: Failed to start request %llx, at %x\n",
-		       __func__, rq->fence.seqno, hws_seqno(&h, rq));
+		       __func__, rq->fence.seqno, hws_seqno(&spin, rq));
 		intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
 
 		intel_gt_set_wedged(gt);
@@ -1571,7 +1367,7 @@ static int __igt_reset_evict_vma(struct intel_gt *gt,
 out_obj:
 	i915_gem_object_put(obj);
 fini:
-	hang_fini(&h);
+	igt_spinner_fini(&spin);
 	if (intel_gt_is_wedged(gt))
 		return -EIO;
 
@@ -1638,20 +1434,21 @@ static int igt_reset_queue(void *arg)
 	struct i915_gpu_error *global = &gt->i915->gpu_error;
 	struct intel_engine_cs *engine;
 	enum intel_engine_id id;
-	struct hang h;
+	struct igt_spinner spin;
 	int err;
 
 	/* Check that we replay pending requests following a hang */
 
 	igt_global_reset_lock(gt);
 
-	err = hang_init(&h, gt);
+	err = igt_spinner_init(&spin, gt);
 	if (err)
 		goto unlock;
 
 	for_each_engine(engine, gt, id) {
 		struct intel_selftest_saved_policy saved;
 		struct i915_request *prev;
+		struct intel_context *ce;
 		IGT_TIMEOUT(end_time);
 		unsigned int count;
 		bool using_guc = intel_engine_uses_guc(engine);
@@ -1668,7 +1465,16 @@ static int igt_reset_queue(void *arg)
 			}
 		}
 
-		prev = hang_create_request(&h, engine);
+		ce = intel_context_create(engine);
+		if (IS_ERR(ce)) {
+			err = PTR_ERR(ce);
+			pr_err("[%s] Create 'prev' context failed: %d!\n", engine->name, err);
+			goto restore;
+		}
+
+		prev = igt_spinner_create_request(&spin, ce, MI_NOOP);
+		intel_context_put(ce);
+
 		if (IS_ERR(prev)) {
 			err = PTR_ERR(prev);
 			pr_err("[%s] Create 'prev' hang request failed: %d!\n", engine->name, err);
@@ -1682,8 +1488,17 @@ static int igt_reset_queue(void *arg)
 		do {
 			struct i915_request *rq;
 			unsigned int reset_count;
+			
+			ce = intel_context_create(engine);
+			if (IS_ERR(ce)) {
+				err = PTR_ERR(ce);
+				pr_err("[%s] Create context failed: %d!\n", engine->name, err);
+				goto restore;
+			}
+
+			rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
+			intel_context_put(ce);
 
-			rq = hang_create_request(&h, engine);
 			if (IS_ERR(rq)) {
 				err = PTR_ERR(rq);
 				pr_err("[%s] Create hang request failed: %d!\n", engine->name, err);
@@ -1715,12 +1530,12 @@ static int igt_reset_queue(void *arg)
 				goto restore;
 			}
 
-			if (!wait_until_running(&h, prev)) {
+			if (!igt_wait_for_spinner(&spin, prev)) {
 				struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
 
 				pr_err("%s(%s): Failed to start request %llx, at %x\n",
 				       __func__, engine->name,
-				       prev->fence.seqno, hws_seqno(&h, prev));
+				       prev->fence.seqno, hws_seqno(&spin, prev));
 				intel_engine_dump(engine, &p,
 						  "%s\n", engine->name);
 
@@ -1768,9 +1583,7 @@ static int igt_reset_queue(void *arg)
 		pr_info("%s: Completed %d queued resets\n",
 			engine->name, count);
 
-		*h.batch = MI_BATCH_BUFFER_END;
-		intel_gt_chipset_flush(engine->gt);
-
+		igt_spinner_end(&spin);
 		i915_request_put(prev);
 
 restore:
@@ -1794,7 +1607,7 @@ static int igt_reset_queue(void *arg)
 	}
 
 fini:
-	hang_fini(&h);
+	igt_spinner_fini(&spin);
 unlock:
 	igt_global_reset_unlock(gt);
 
@@ -1809,7 +1622,8 @@ static int igt_handle_error(void *arg)
 	struct intel_gt *gt = arg;
 	struct i915_gpu_error *global = &gt->i915->gpu_error;
 	struct intel_engine_cs *engine;
-	struct hang h;
+	struct igt_spinner spin;
+	struct intel_context *ce;
 	struct i915_request *rq;
 	struct i915_gpu_coredump *error;
 	int err;
@@ -1824,13 +1638,22 @@ static int igt_handle_error(void *arg)
 	if (!engine || !intel_engine_can_store_dword(engine))
 		return 0;
 
-	err = hang_init(&h, gt);
+	err = igt_spinner_init(&spin, gt);
 	if (err) {
-		pr_err("[%s] Hang init failed: %d!\n", engine->name, err);
+		pr_err("[%s] Spinner init failed: %d!\n", engine->name, err);
 		return err;
 	}
 
-	rq = hang_create_request(&h, engine);
+	ce = intel_context_create(engine);
+	if (IS_ERR(ce)) {
+		err = PTR_ERR(ce);
+		pr_err("[%s] Create context failed: %d!\n", engine->name, err);
+		goto err_fini;
+	}
+
+	rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
+	intel_context_put(ce);
+
 	if (IS_ERR(rq)) {
 		err = PTR_ERR(rq);
 		pr_err("[%s] Create hang request failed: %d!\n", engine->name, err);
@@ -1840,11 +1663,11 @@ static int igt_handle_error(void *arg)
 	i915_request_get(rq);
 	i915_request_add(rq);
 
-	if (!wait_until_running(&h, rq)) {
+	if (!igt_wait_for_spinner(&spin, rq)) {
 		struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
 
 		pr_err("%s: Failed to start request %llx, at %x\n",
-		       __func__, rq->fence.seqno, hws_seqno(&h, rq));
+		       __func__, rq->fence.seqno, hws_seqno(&spin, rq));
 		intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
 
 		intel_gt_set_wedged(gt);
@@ -1869,7 +1692,7 @@ static int igt_handle_error(void *arg)
 err_request:
 	i915_request_put(rq);
 err_fini:
-	hang_fini(&h);
+	igt_spinner_fini(&spin);
 	return err;
 }
 
@@ -1910,20 +1733,30 @@ static int igt_atomic_reset_engine(struct intel_engine_cs *engine,
 				   const struct igt_atomic_section *p)
 {
 	struct i915_request *rq;
-	struct hang h;
+	struct igt_spinner spin;
+	struct intel_context *ce;
 	int err;
 
 	err = __igt_atomic_reset_engine(engine, p, "idle");
 	if (err)
 		return err;
 
-	err = hang_init(&h, engine->gt);
+	err = igt_spinner_init(&spin, engine->gt);
 	if (err) {
 		pr_err("[%s] Hang init failed: %d!\n", engine->name, err);
 		return err;
 	}
 
-	rq = hang_create_request(&h, engine);
+	ce = intel_context_create(engine);
+	if (IS_ERR(ce)) {
+		err = PTR_ERR(ce);
+		pr_err("[%s] Create context failed: %d!\n", engine->name, err);
+		goto out;
+	}
+
+	rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
+	intel_context_put(ce);
+
 	if (IS_ERR(rq)) {
 		err = PTR_ERR(rq);
 		pr_err("[%s] Create hang request failed: %d!\n", engine->name, err);
@@ -1933,12 +1766,12 @@ static int igt_atomic_reset_engine(struct intel_engine_cs *engine,
 	i915_request_get(rq);
 	i915_request_add(rq);
 
-	if (wait_until_running(&h, rq)) {
+	if (igt_wait_for_spinner(&spin, rq)) {
 		err = __igt_atomic_reset_engine(engine, p, "active");
 	} else {
 		pr_err("%s(%s): Failed to start request %llx, at %x\n",
 		       __func__, engine->name,
-		       rq->fence.seqno, hws_seqno(&h, rq));
+		       rq->fence.seqno, hws_seqno(&spin, rq));
 		intel_gt_set_wedged(engine->gt);
 		err = -EIO;
 	}
@@ -1954,7 +1787,7 @@ static int igt_atomic_reset_engine(struct intel_engine_cs *engine,
 
 	i915_request_put(rq);
 out:
-	hang_fini(&h);
+	igt_spinner_fini(&spin);
 	return err;
 }
 
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
index 8c3e1f20e5a15..fc4f33076ec7b 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.c
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -108,15 +108,10 @@ int igt_spinner_pin(struct igt_spinner *spin,
 	return 0;
 }
 
-static unsigned int seqno_offset(u64 fence)
-{
-	return offset_in_page(sizeof(u32) * fence);
-}
-
 static u64 hws_address(const struct i915_vma *hws,
 		       const struct i915_request *rq)
 {
-	return i915_vma_offset(hws) + seqno_offset(rq->fence.context);
+	return i915_vma_offset(hws) + offset_in_page(sizeof(u32) * rq->fence.context);
 }
 
 struct i915_request *
@@ -216,14 +211,6 @@ igt_spinner_create_request(struct igt_spinner *spin,
 	return err ? ERR_PTR(err) : rq;
 }
 
-static u32
-hws_seqno(const struct igt_spinner *spin, const struct i915_request *rq)
-{
-	u32 *seqno = spin->seqno + seqno_offset(rq->fence.context);
-
-	return READ_ONCE(*seqno);
-}
-
 void igt_spinner_end(struct igt_spinner *spin)
 {
 	if (!spin->batch)
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.h b/drivers/gpu/drm/i915/selftests/igt_spinner.h
index fbe5b1625b05e..faff1008999a5 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.h
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.h
@@ -40,4 +40,13 @@ void igt_spinner_end(struct igt_spinner *spin);
 
 bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq);
 
+static inline u32
+hws_seqno(const struct igt_spinner *spin, const struct i915_request *rq)
+{
+	u32 *seqno = spin->seqno + offset_in_page(sizeof(u32) * rq->fence.context);
+
+	return READ_ONCE(*seqno);
+}
+
+
 #endif
-- 
2.40.1


^ permalink raw reply related	[flat|nested] 14+ messages in thread
* [Intel-gfx] [PATCH 1/3] drm/i915: Create a blitter context for GGTT updates
@ 2023-08-18 19:42 Andi Shyti
  2023-08-18 19:42 ` [Intel-gfx] [CI] drm/i915/gt: Refactor hangcheck selftest to use igt_spinner Andi Shyti
  0 siblings, 1 reply; 14+ messages in thread
From: Andi Shyti @ 2023-08-18 19:42 UTC (permalink / raw)
  To: Jonathan Cavitt; +Cc: intel-gfx

From: Nirmoy Das <nirmoy.das@intel.com>

Create a separate blitter context if a platform requires
GGTT updates using MI_UPDATE_GTT blitter command.

Subsequent patch will introduce methods to update
GGTT using this blitter context and MI_UPDATE_GTT blitter
command.

Signed-off-by: Nirmoy Das <nirmoy.das@intel.com>
---
 drivers/gpu/drm/i915/gt/intel_engine.h       |  4 ++
 drivers/gpu/drm/i915/gt/intel_engine_cs.c    | 44 +++++++++++++++++++-
 drivers/gpu/drm/i915/gt/intel_engine_types.h |  3 ++
 drivers/gpu/drm/i915/gt/intel_gtt.c          |  4 ++
 drivers/gpu/drm/i915/gt/intel_gtt.h          |  2 +
 5 files changed, 56 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index b58c30ac8ef02..ee36db2fdaa7c 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -170,6 +170,8 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
 #define I915_GEM_HWS_SEQNO		0x40
 #define I915_GEM_HWS_SEQNO_ADDR		(I915_GEM_HWS_SEQNO * sizeof(u32))
 #define I915_GEM_HWS_MIGRATE		(0x42 * sizeof(u32))
+#define I915_GEM_HWS_GGTT_BLIT		0x46
+#define I915_GEM_HWS_GGTT_BLIT_ADDR	(I915_GEM_HWS_GGTT_BLIT * sizeof(u32))
 #define I915_GEM_HWS_PXP		0x60
 #define I915_GEM_HWS_PXP_ADDR		(I915_GEM_HWS_PXP * sizeof(u32))
 #define I915_GEM_HWS_GSC		0x62
@@ -356,4 +358,6 @@ u64 intel_clamp_preempt_timeout_ms(struct intel_engine_cs *engine, u64 value);
 u64 intel_clamp_stop_timeout_ms(struct intel_engine_cs *engine, u64 value);
 u64 intel_clamp_timeslice_duration_ms(struct intel_engine_cs *engine, u64 value);
 
+void intel_engine_blitter_context_set_ready(struct intel_gt *gt, bool ready);
+bool intel_engine_blitter_context_ready(struct intel_gt *gt);
 #endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index ee15486fed0da..9871ee5ab754b 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -27,6 +27,7 @@
 #include "intel_gt_mcr.h"
 #include "intel_gt_pm.h"
 #include "intel_gt_requests.h"
+#include "intel_gtt.h"
 #include "intel_lrc.h"
 #include "intel_lrc_reg.h"
 #include "intel_reset.h"
@@ -1419,6 +1420,34 @@ void intel_engine_destroy_pinned_context(struct intel_context *ce)
 	intel_context_put(ce);
 }
 
+void intel_engine_blitter_context_set_ready(struct intel_gt *gt, bool ready)
+{
+	struct intel_engine_cs *engine = gt->engine[BCS0];
+
+	if (engine && engine->blitter_context)
+		atomic_set(&engine->blitter_context_ready, ready ? 1 : 0);
+}
+
+bool intel_engine_blitter_context_ready(struct intel_gt *gt)
+{
+	struct intel_engine_cs *engine = gt->engine[BCS0];
+
+	if (engine)
+		return atomic_read(&engine->blitter_context_ready) == 1;
+
+	return false;
+}
+
+static struct intel_context *
+create_ggtt_blitter_context(struct intel_engine_cs *engine)
+{
+	static struct lock_class_key kernel;
+
+	/* MI_UPDATE_GTT can insert upto 512 PTE entries so get a bigger ring */
+	return intel_engine_create_pinned_context(engine, engine->gt->vm, SZ_512K,
+						  I915_GEM_HWS_GGTT_BLIT_ADDR,
+						  &kernel, "ggtt_blitter_context");
+}
 static struct intel_context *
 create_kernel_context(struct intel_engine_cs *engine)
 {
@@ -1442,7 +1471,7 @@ create_kernel_context(struct intel_engine_cs *engine)
  */
 static int engine_init_common(struct intel_engine_cs *engine)
 {
-	struct intel_context *ce;
+	struct intel_context *ce, *bce = NULL;
 	int ret;
 
 	engine->set_default_submission(engine);
@@ -1458,6 +1487,15 @@ static int engine_init_common(struct intel_engine_cs *engine)
 	ce = create_kernel_context(engine);
 	if (IS_ERR(ce))
 		return PTR_ERR(ce);
+	/*
+	 * Create a separate pinned context for GGTT update using blitter
+	 * if a platform require such service.
+	 */
+	if (i915_ggtt_require_blitter(engine->i915) && engine->id == BCS0) {
+		bce = create_ggtt_blitter_context(engine);
+		if (IS_ERR(bce))
+			return PTR_ERR(bce);
+	}
 
 	ret = measure_breadcrumb_dw(ce);
 	if (ret < 0)
@@ -1465,6 +1503,7 @@ static int engine_init_common(struct intel_engine_cs *engine)
 
 	engine->emit_fini_breadcrumb_dw = ret;
 	engine->kernel_context = ce;
+	engine->blitter_context = bce;
 
 	return 0;
 
@@ -1537,6 +1576,9 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
 
 	if (engine->kernel_context)
 		intel_engine_destroy_pinned_context(engine->kernel_context);
+	if (engine->blitter_context)
+		intel_engine_destroy_pinned_context(engine->blitter_context);
+
 
 	GEM_BUG_ON(!llist_empty(&engine->barrier_tasks));
 	cleanup_status_page(engine);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index e99a6fa03d453..62095c0d8783d 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -415,6 +415,9 @@ struct intel_engine_cs {
 	struct llist_head barrier_tasks;
 
 	struct intel_context *kernel_context; /* pinned */
+	struct intel_context *blitter_context; /* pinned, only for BCS0 */
+	/* mark the blitter engine's availability status */
+	atomic_t blitter_context_ready;
 
 	/**
 	 * pinned_contexts_list: List of pinned contexts. This list is only
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c
index 13944a14ea2d1..9c77c97670fea 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
@@ -21,6 +21,10 @@
 #include "intel_gt_regs.h"
 #include "intel_gtt.h"
 
+bool i915_ggtt_require_blitter(struct drm_i915_private *i915)
+{
+	return IS_METEORLAKE(i915);
+}
 
 static bool intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *i915)
 {
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
index 4d6296cdbcfdd..9710eb031fb2c 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
@@ -688,4 +688,6 @@ static inline struct sgt_dma {
 	return (struct sgt_dma){ sg, addr, addr + sg_dma_len(sg) };
 }
 
+bool i915_ggtt_require_blitter(struct drm_i915_private *i915);
+
 #endif
-- 
2.40.1


^ permalink raw reply related	[flat|nested] 14+ messages in thread

end of thread, other threads:[~2023-08-22 17:59 UTC | newest]

Thread overview: 14+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2023-08-19 22:50 [Intel-gfx] [CI] drm/i915/gt: Refactor hangcheck selftest to use igt_spinner Andi Shyti
2023-08-19 23:19 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for " Patchwork
2023-08-19 23:19 ` [Intel-gfx] ✗ Fi.CI.SPARSE: " Patchwork
2023-08-19 23:34 ` [Intel-gfx] ✗ Fi.CI.BAT: failure " Patchwork
2023-08-20 11:15 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for drm/i915/gt: Refactor hangcheck selftest to use igt_spinner (rev2) Patchwork
2023-08-20 11:15 ` [Intel-gfx] ✗ Fi.CI.SPARSE: " Patchwork
2023-08-20 11:33 ` [Intel-gfx] ✗ Fi.CI.BAT: failure " Patchwork
2023-08-21 20:48 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for drm/i915/gt: Refactor hangcheck selftest to use igt_spinner (rev3) Patchwork
2023-08-21 20:48 ` [Intel-gfx] ✗ Fi.CI.SPARSE: " Patchwork
2023-08-21 21:07 ` [Intel-gfx] ✗ Fi.CI.BAT: failure " Patchwork
2023-08-21 21:42 ` [Intel-gfx] [CI] drm/i915/gt: Refactor hangcheck selftest to use igt_spinner Cavitt, Jonathan
2023-08-22 12:38   ` Andi Shyti
2023-08-22 17:59 ` John Harrison
  -- strict thread matches above, loose matches on Subject: below --
2023-08-18 19:42 [Intel-gfx] [PATCH 1/3] drm/i915: Create a blitter context for GGTT updates Andi Shyti
2023-08-18 19:42 ` [Intel-gfx] [CI] drm/i915/gt: Refactor hangcheck selftest to use igt_spinner Andi Shyti

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox