intel-gfx.lists.freedesktop.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 1/2] drm/i915/execlists: Delay updating ring register state after resume
@ 2018-09-13 17:32 Chris Wilson
  2018-09-13 17:32 ` [PATCH 2/2] drm/i915/execlists: Use coherent writes into the context image Chris Wilson
                   ` (6 more replies)
  0 siblings, 7 replies; 15+ messages in thread
From: Chris Wilson @ 2018-09-13 17:32 UTC (permalink / raw)
  To: intel-gfx

Now that we reload both RING_HEAD and RING_TAIL when rebinding the
context, we do not need to scrub those registers immediately on resume.

v2: Handle the perma-pinned contexts.
v3: Set RING_TAIL on context-pin so that we always have known state in
the context image for the ring registers and all parties have similar
code (ripe for refactoring).

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
---
 drivers/gpu/drm/i915/intel_lrc.c | 33 +++++++++++++++-----------------
 1 file changed, 15 insertions(+), 18 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 9b1f0e5211a0..d7fcbba8e982 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1338,11 +1338,13 @@ __execlists_context_pin(struct intel_engine_cs *engine,
 
 	intel_lr_context_descriptor_update(ctx, engine, ce);
 
+	GEM_BUG_ON(!intel_ring_offset_valid(ce->ring, ce->ring->head));
+
 	ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
 	ce->lrc_reg_state[CTX_RING_BUFFER_START+1] =
 		i915_ggtt_offset(ce->ring->vma);
-	GEM_BUG_ON(!intel_ring_offset_valid(ce->ring, ce->ring->head));
-	ce->lrc_reg_state[CTX_RING_HEAD+1] = ce->ring->head;
+	ce->lrc_reg_state[CTX_RING_HEAD + 1] = ce->ring->head;
+	ce->lrc_reg_state[CTX_RING_TAIL + 1] = ce->ring->tail;
 
 	ce->state->obj->pin_global++;
 	i915_gem_context_get(ctx);
@@ -2841,13 +2843,14 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
 	return ret;
 }
 
-void intel_lr_context_resume(struct drm_i915_private *dev_priv)
+void intel_lr_context_resume(struct drm_i915_private *i915)
 {
 	struct intel_engine_cs *engine;
 	struct i915_gem_context *ctx;
 	enum intel_engine_id id;
 
-	/* Because we emit WA_TAIL_DWORDS there may be a disparity
+	/*
+	 * Because we emit WA_TAIL_DWORDS there may be a disparity
 	 * between our bookkeeping in ce->ring->head and ce->ring->tail and
 	 * that stored in context. As we only write new commands from
 	 * ce->ring->tail onwards, everything before that is junk. If the GPU
@@ -2857,28 +2860,22 @@ void intel_lr_context_resume(struct drm_i915_private *dev_priv)
 	 * So to avoid that we reset the context images upon resume. For
 	 * simplicity, we just zero everything out.
 	 */
-	list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
-		for_each_engine(engine, dev_priv, id) {
+	list_for_each_entry(ctx, &i915->contexts.list, link) {
+		for_each_engine(engine, i915, id) {
 			struct intel_context *ce =
 				to_intel_context(ctx, engine);
-			u32 *reg;
 
 			if (!ce->state)
 				continue;
 
-			reg = i915_gem_object_pin_map(ce->state->obj,
-						      I915_MAP_WB);
-			if (WARN_ON(IS_ERR(reg)))
-				continue;
-
-			reg += LRC_STATE_PN * PAGE_SIZE / sizeof(*reg);
-			reg[CTX_RING_HEAD+1] = 0;
-			reg[CTX_RING_TAIL+1] = 0;
+			intel_ring_reset(ce->ring, 0);
 
-			ce->state->obj->mm.dirty = true;
-			i915_gem_object_unpin_map(ce->state->obj);
+			if (ce->pin_count) { /* otherwise done in context_pin */
+				u32 *regs = ce->lrc_reg_state;
 
-			intel_ring_reset(ce->ring, 0);
+				regs[CTX_RING_HEAD + 1] = ce->ring->head;
+				regs[CTX_RING_TAIL + 1] = ce->ring->tail;
+			}
 		}
 	}
 }
-- 
2.19.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 15+ messages in thread
* [PATCH 1/2] drm/i915/execlists: Delay updating ring register state after resume
@ 2018-08-16 11:11 Chris Wilson
  0 siblings, 0 replies; 15+ messages in thread
From: Chris Wilson @ 2018-08-16 11:11 UTC (permalink / raw)
  To: intel-gfx

Now that we reload both RING_HEAD and RING_TAIL when rebinding the
context, we do not need to scrub those registers immediately on resume.

v2: Handle the perma-pinned contexts.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
---
 drivers/gpu/drm/i915/intel_lrc.c | 29 +++++++++++------------------
 1 file changed, 11 insertions(+), 18 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 841895cfb05f..3632521a5fb2 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -2775,13 +2775,14 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
 	return ret;
 }
 
-void intel_lr_context_resume(struct drm_i915_private *dev_priv)
+void intel_lr_context_resume(struct drm_i915_private *i915)
 {
 	struct intel_engine_cs *engine;
 	struct i915_gem_context *ctx;
 	enum intel_engine_id id;
 
-	/* Because we emit WA_TAIL_DWORDS there may be a disparity
+	/*
+	 * Because we emit WA_TAIL_DWORDS there may be a disparity
 	 * between our bookkeeping in ce->ring->head and ce->ring->tail and
 	 * that stored in context. As we only write new commands from
 	 * ce->ring->tail onwards, everything before that is junk. If the GPU
@@ -2791,28 +2792,20 @@ void intel_lr_context_resume(struct drm_i915_private *dev_priv)
 	 * So to avoid that we reset the context images upon resume. For
 	 * simplicity, we just zero everything out.
 	 */
-	list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
-		for_each_engine(engine, dev_priv, id) {
+	list_for_each_entry(ctx, &i915->contexts.list, link) {
+		for_each_engine(engine, i915, id) {
 			struct intel_context *ce =
 				to_intel_context(ctx, engine);
-			u32 *reg;
-
-			if (!ce->state)
-				continue;
 
-			reg = i915_gem_object_pin_map(ce->state->obj,
-						      I915_MAP_WB);
-			if (WARN_ON(IS_ERR(reg)))
+			if (!ce->ring)
 				continue;
 
-			reg += LRC_STATE_PN * PAGE_SIZE / sizeof(*reg);
-			reg[CTX_RING_HEAD+1] = 0;
-			reg[CTX_RING_TAIL+1] = 0;
-
-			ce->state->obj->mm.dirty = true;
-			i915_gem_object_unpin_map(ce->state->obj);
-
 			intel_ring_reset(ce->ring, 0);
+
+			if (ce->pin_count) { /* otherwise done in context_pin */
+				ce->lrc_reg_state[CTX_RING_HEAD+1] = 0;
+				ce->lrc_reg_state[CTX_RING_TAIL+1] = 0;
+			}
 		}
 	}
 }
-- 
2.18.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 15+ messages in thread

end of thread, other threads:[~2018-09-14  9:25 UTC | newest]

Thread overview: 15+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2018-09-13 17:32 [PATCH 1/2] drm/i915/execlists: Delay updating ring register state after resume Chris Wilson
2018-09-13 17:32 ` [PATCH 2/2] drm/i915/execlists: Use coherent writes into the context image Chris Wilson
2018-09-13 19:33   ` [PATCH v3] " Chris Wilson
2018-09-14  8:14     ` Tvrtko Ursulin
2018-09-14  8:21       ` Chris Wilson
2018-09-14  9:12         ` Tvrtko Ursulin
2018-09-14  9:17           ` Chris Wilson
2018-09-14  9:25             ` Tvrtko Ursulin
2018-09-13 17:50 ` [PATCH 1/2] drm/i915/execlists: Delay updating ring register state after resume Tvrtko Ursulin
2018-09-13 17:53 ` ✗ Fi.CI.SPARSE: warning for series starting with [1/2] " Patchwork
2018-09-13 18:12 ` ✗ Fi.CI.BAT: failure " Patchwork
2018-09-13 20:17 ` ✗ Fi.CI.SPARSE: warning for series starting with [1/2] drm/i915/execlists: Delay updating ring register state after resume (rev2) Patchwork
2018-09-13 20:35 ` ✓ Fi.CI.BAT: success " Patchwork
2018-09-13 23:02 ` ✓ Fi.CI.IGT: " Patchwork
  -- strict thread matches above, loose matches on Subject: below --
2018-08-16 11:11 [PATCH 1/2] drm/i915/execlists: Delay updating ring register state after resume Chris Wilson

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).