public inbox for intel-gfx@lists.freedesktop.org
 help / color / mirror / Atom feed
* [PATCH v2 1/5] drm/i915: Make i915_engine_info pretty printer to standalone
@ 2017-10-09 11:02 Chris Wilson
  2017-10-09 11:02 ` [PATCH v2 2/5] drm/i915/selftests: Pretty print engine state when requests fail to start Chris Wilson
                   ` (5 more replies)
  0 siblings, 6 replies; 11+ messages in thread
From: Chris Wilson @ 2017-10-09 11:02 UTC (permalink / raw)
  To: intel-gfx; +Cc: Mika Kuoppala

We can use drm_printer to hide the differences between printk and
seq_printf, and so make the i915_engine_info pretty printer able to be
called from different contexts and not just debugfs. For instance, I
want to use the pretty printer to debug kselftests.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@intel.com>
---
 drivers/gpu/drm/i915/i915_debugfs.c     | 148 +----------------------------
 drivers/gpu/drm/i915/intel_engine_cs.c  | 160 ++++++++++++++++++++++++++++++++
 drivers/gpu/drm/i915/intel_ringbuffer.h |   4 +
 3 files changed, 168 insertions(+), 144 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index f7817c667958..9ec2bcd9a695 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -3292,9 +3292,9 @@ static int i915_display_info(struct seq_file *m, void *unused)
 static int i915_engine_info(struct seq_file *m, void *unused)
 {
 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	struct i915_gpu_error *error = &dev_priv->gpu_error;
 	struct intel_engine_cs *engine;
 	enum intel_engine_id id;
+	struct drm_printer p;
 
 	intel_runtime_pm_get(dev_priv);
 
@@ -3303,149 +3303,9 @@ static int i915_engine_info(struct seq_file *m, void *unused)
 	seq_printf(m, "Global active requests: %d\n",
 		   dev_priv->gt.active_requests);
 
-	for_each_engine(engine, dev_priv, id) {
-		struct intel_breadcrumbs *b = &engine->breadcrumbs;
-		struct drm_i915_gem_request *rq;
-		struct rb_node *rb;
-		u64 addr;
-
-		seq_printf(m, "%s\n", engine->name);
-		seq_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms], inflight %d\n",
-			   intel_engine_get_seqno(engine),
-			   intel_engine_last_submit(engine),
-			   engine->hangcheck.seqno,
-			   jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp),
-			   engine->timeline->inflight_seqnos);
-		seq_printf(m, "\tReset count: %d\n",
-			   i915_reset_engine_count(error, engine));
-
-		rcu_read_lock();
-
-		seq_printf(m, "\tRequests:\n");
-
-		rq = list_first_entry(&engine->timeline->requests,
-				      struct drm_i915_gem_request, link);
-		if (&rq->link != &engine->timeline->requests)
-			print_request(m, rq, "\t\tfirst  ");
-
-		rq = list_last_entry(&engine->timeline->requests,
-				     struct drm_i915_gem_request, link);
-		if (&rq->link != &engine->timeline->requests)
-			print_request(m, rq, "\t\tlast   ");
-
-		rq = i915_gem_find_active_request(engine);
-		if (rq) {
-			print_request(m, rq, "\t\tactive ");
-			seq_printf(m,
-				   "\t\t[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]\n",
-				   rq->head, rq->postfix, rq->tail,
-				   rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
-				   rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
-		}
-
-		seq_printf(m, "\tRING_START: 0x%08x [0x%08x]\n",
-			   I915_READ(RING_START(engine->mmio_base)),
-			   rq ? i915_ggtt_offset(rq->ring->vma) : 0);
-		seq_printf(m, "\tRING_HEAD:  0x%08x [0x%08x]\n",
-			   I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR,
-			   rq ? rq->ring->head : 0);
-		seq_printf(m, "\tRING_TAIL:  0x%08x [0x%08x]\n",
-			   I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR,
-			   rq ? rq->ring->tail : 0);
-		seq_printf(m, "\tRING_CTL:   0x%08x [%s]\n",
-			   I915_READ(RING_CTL(engine->mmio_base)),
-			   I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? "waiting" : "");
-
-		rcu_read_unlock();
-
-		addr = intel_engine_get_active_head(engine);
-		seq_printf(m, "\tACTHD:  0x%08x_%08x\n",
-			   upper_32_bits(addr), lower_32_bits(addr));
-		addr = intel_engine_get_last_batch_head(engine);
-		seq_printf(m, "\tBBADDR: 0x%08x_%08x\n",
-			   upper_32_bits(addr), lower_32_bits(addr));
-
-		if (i915_modparams.enable_execlists) {
-			const u32 *hws = &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
-			struct intel_engine_execlists * const execlists = &engine->execlists;
-			u32 ptr, read, write;
-			unsigned int idx;
-
-			seq_printf(m, "\tExeclist status: 0x%08x %08x\n",
-				   I915_READ(RING_EXECLIST_STATUS_LO(engine)),
-				   I915_READ(RING_EXECLIST_STATUS_HI(engine)));
-
-			ptr = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
-			read = GEN8_CSB_READ_PTR(ptr);
-			write = GEN8_CSB_WRITE_PTR(ptr);
-			seq_printf(m, "\tExeclist CSB read %d [%d cached], write %d [%d from hws], interrupt posted? %s\n",
-				   read, execlists->csb_head,
-				   write,
-				   intel_read_status_page(engine, intel_hws_csb_write_index(engine->i915)),
-				   yesno(test_bit(ENGINE_IRQ_EXECLIST,
-						  &engine->irq_posted)));
-			if (read >= GEN8_CSB_ENTRIES)
-				read = 0;
-			if (write >= GEN8_CSB_ENTRIES)
-				write = 0;
-			if (read > write)
-				write += GEN8_CSB_ENTRIES;
-			while (read < write) {
-				idx = ++read % GEN8_CSB_ENTRIES;
-				seq_printf(m, "\tExeclist CSB[%d]: 0x%08x [0x%08x in hwsp], context: %d [%d in hwsp]\n",
-					   idx,
-					   I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)),
-					   hws[idx * 2],
-					   I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx)),
-					   hws[idx * 2 + 1]);
-			}
-
-			rcu_read_lock();
-			for (idx = 0; idx < execlists_num_ports(execlists); idx++) {
-				unsigned int count;
-
-				rq = port_unpack(&execlists->port[idx], &count);
-				if (rq) {
-					seq_printf(m, "\t\tELSP[%d] count=%d, ",
-						   idx, count);
-					print_request(m, rq, "rq: ");
-				} else {
-					seq_printf(m, "\t\tELSP[%d] idle\n",
-						   idx);
-				}
-			}
-			rcu_read_unlock();
-
-			spin_lock_irq(&engine->timeline->lock);
-			for (rb = execlists->first; rb; rb = rb_next(rb)) {
-				struct i915_priolist *p =
-					rb_entry(rb, typeof(*p), node);
-
-				list_for_each_entry(rq, &p->requests,
-						    priotree.link)
-					print_request(m, rq, "\t\tQ ");
-			}
-			spin_unlock_irq(&engine->timeline->lock);
-		} else if (INTEL_GEN(dev_priv) > 6) {
-			seq_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
-				   I915_READ(RING_PP_DIR_BASE(engine)));
-			seq_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n",
-				   I915_READ(RING_PP_DIR_BASE_READ(engine)));
-			seq_printf(m, "\tPP_DIR_DCLV: 0x%08x\n",
-				   I915_READ(RING_PP_DIR_DCLV(engine)));
-		}
-
-		spin_lock_irq(&b->rb_lock);
-		for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
-			struct intel_wait *w = rb_entry(rb, typeof(*w), node);
-
-			seq_printf(m, "\t%s [%d] waiting for %x\n",
-				   w->tsk->comm, w->tsk->pid, w->seqno);
-		}
-		spin_unlock_irq(&b->rb_lock);
-
-		seq_puts(m, "\n");
-	}
+	p = drm_seq_file_printer(m);
+	for_each_engine(engine, dev_priv, id)
+		intel_engine_dump(engine, &p);
 
 	intel_runtime_pm_put(dev_priv);
 
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 807a7aafc089..a59b2a30ff5a 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -22,6 +22,8 @@
  *
  */
 
+#include <drm/drm_print.h>
+
 #include "i915_drv.h"
 #include "intel_ringbuffer.h"
 #include "intel_lrc.h"
@@ -1616,6 +1618,164 @@ bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
 	}
 }
 
+static void print_request(struct drm_printer *m,
+			  struct drm_i915_gem_request *rq,
+			  const char *prefix)
+{
+	drm_printf(m, "%s%x [%x:%x] prio=%d @ %dms: %s\n", prefix,
+		   rq->global_seqno, rq->ctx->hw_id, rq->fence.seqno,
+		   rq->priotree.priority,
+		   jiffies_to_msecs(jiffies - rq->emitted_jiffies),
+		   rq->timeline->common->name);
+}
+
+void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m)
+{
+	struct intel_breadcrumbs *b = &engine->breadcrumbs;
+	struct i915_gpu_error *error = &engine->i915->gpu_error;
+	struct drm_i915_private *dev_priv = engine->i915;
+	struct drm_i915_gem_request *rq;
+	struct rb_node *rb;
+	u64 addr;
+
+	drm_printf(m, "%s\n", engine->name);
+	drm_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms], inflight %d\n",
+		   intel_engine_get_seqno(engine),
+		   intel_engine_last_submit(engine),
+		   engine->hangcheck.seqno,
+		   jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp),
+		   engine->timeline->inflight_seqnos);
+	drm_printf(m, "\tReset count: %d\n",
+		   i915_reset_engine_count(error, engine));
+
+	rcu_read_lock();
+
+	drm_printf(m, "\tRequests:\n");
+
+	rq = list_first_entry(&engine->timeline->requests,
+			      struct drm_i915_gem_request, link);
+	if (&rq->link != &engine->timeline->requests)
+		print_request(m, rq, "\t\tfirst  ");
+
+	rq = list_last_entry(&engine->timeline->requests,
+			     struct drm_i915_gem_request, link);
+	if (&rq->link != &engine->timeline->requests)
+		print_request(m, rq, "\t\tlast   ");
+
+	rq = i915_gem_find_active_request(engine);
+	if (rq) {
+		print_request(m, rq, "\t\tactive ");
+		drm_printf(m,
+			   "\t\t[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]\n",
+			   rq->head, rq->postfix, rq->tail,
+			   rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
+			   rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
+	}
+
+	drm_printf(m, "\tRING_START: 0x%08x [0x%08x]\n",
+		   I915_READ(RING_START(engine->mmio_base)),
+		   rq ? i915_ggtt_offset(rq->ring->vma) : 0);
+	drm_printf(m, "\tRING_HEAD:  0x%08x [0x%08x]\n",
+		   I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR,
+		   rq ? rq->ring->head : 0);
+	drm_printf(m, "\tRING_TAIL:  0x%08x [0x%08x]\n",
+		   I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR,
+		   rq ? rq->ring->tail : 0);
+	drm_printf(m, "\tRING_CTL:   0x%08x [%s]\n",
+		   I915_READ(RING_CTL(engine->mmio_base)),
+		   I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? "waiting" : "");
+
+	rcu_read_unlock();
+
+	addr = intel_engine_get_active_head(engine);
+	drm_printf(m, "\tACTHD:  0x%08x_%08x\n",
+		   upper_32_bits(addr), lower_32_bits(addr));
+	addr = intel_engine_get_last_batch_head(engine);
+	drm_printf(m, "\tBBADDR: 0x%08x_%08x\n",
+		   upper_32_bits(addr), lower_32_bits(addr));
+
+	if (i915_modparams.enable_execlists) {
+		const u32 *hws = &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
+		struct intel_engine_execlists * const execlists = &engine->execlists;
+		u32 ptr, read, write;
+		unsigned int idx;
+
+		drm_printf(m, "\tExeclist status: 0x%08x %08x\n",
+			   I915_READ(RING_EXECLIST_STATUS_LO(engine)),
+			   I915_READ(RING_EXECLIST_STATUS_HI(engine)));
+
+		ptr = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
+		read = GEN8_CSB_READ_PTR(ptr);
+		write = GEN8_CSB_WRITE_PTR(ptr);
+		drm_printf(m, "\tExeclist CSB read %d [%d cached], write %d [%d from hws], interrupt posted? %s\n",
+			   read, execlists->csb_head,
+			   write,
+			   intel_read_status_page(engine, intel_hws_csb_write_index(engine->i915)),
+			   yesno(test_bit(ENGINE_IRQ_EXECLIST,
+					  &engine->irq_posted)));
+		if (read >= GEN8_CSB_ENTRIES)
+			read = 0;
+		if (write >= GEN8_CSB_ENTRIES)
+			write = 0;
+		if (read > write)
+			write += GEN8_CSB_ENTRIES;
+		while (read < write) {
+			idx = ++read % GEN8_CSB_ENTRIES;
+			drm_printf(m, "\tExeclist CSB[%d]: 0x%08x [0x%08x in hwsp], context: %d [%d in hwsp]\n",
+				   idx,
+				   I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)),
+				   hws[idx * 2],
+				   I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx)),
+				   hws[idx * 2 + 1]);
+		}
+
+		rcu_read_lock();
+		for (idx = 0; idx < execlists_num_ports(execlists); idx++) {
+			unsigned int count;
+
+			rq = port_unpack(&execlists->port[idx], &count);
+			if (rq) {
+				drm_printf(m, "\t\tELSP[%d] count=%d, ",
+					   idx, count);
+				print_request(m, rq, "rq: ");
+			} else {
+				drm_printf(m, "\t\tELSP[%d] idle\n",
+					   idx);
+			}
+		}
+		rcu_read_unlock();
+
+		spin_lock_irq(&engine->timeline->lock);
+		for (rb = execlists->first; rb; rb = rb_next(rb)) {
+			struct i915_priolist *p =
+				rb_entry(rb, typeof(*p), node);
+
+			list_for_each_entry(rq, &p->requests,
+					    priotree.link)
+				print_request(m, rq, "\t\tQ ");
+		}
+		spin_unlock_irq(&engine->timeline->lock);
+	} else if (INTEL_GEN(dev_priv) > 6) {
+		drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
+			   I915_READ(RING_PP_DIR_BASE(engine)));
+		drm_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n",
+			   I915_READ(RING_PP_DIR_BASE_READ(engine)));
+		drm_printf(m, "\tPP_DIR_DCLV: 0x%08x\n",
+			   I915_READ(RING_PP_DIR_DCLV(engine)));
+	}
+
+	spin_lock_irq(&b->rb_lock);
+	for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
+		struct intel_wait *w = rb_entry(rb, typeof(*w), node);
+
+		drm_printf(m, "\t%s [%d] waiting for %x\n",
+			   w->tsk->comm, w->tsk->pid, w->seqno);
+	}
+	spin_unlock_irq(&b->rb_lock);
+
+	drm_printf(m, "\n");
+}
+
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 #include "selftests/mock_engine.c"
 #endif
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 0fedda17488c..17186f067408 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -7,6 +7,8 @@
 #include "i915_gem_timeline.h"
 #include "i915_selftest.h"
 
+struct drm_printer;
+
 #define I915_CMD_HASH_ORDER 9
 
 /* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
@@ -839,4 +841,6 @@ void intel_engines_reset_default_submission(struct drm_i915_private *i915);
 
 bool intel_engine_can_store_dword(struct intel_engine_cs *engine);
 
+void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *p);
+
 #endif /* _INTEL_RINGBUFFER_H_ */
-- 
2.14.2

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH v2 2/5] drm/i915/selftests: Pretty print engine state when requests fail to start
  2017-10-09 11:02 [PATCH v2 1/5] drm/i915: Make i915_engine_info pretty printer to standalone Chris Wilson
@ 2017-10-09 11:02 ` Chris Wilson
  2017-10-09 11:50   ` Mika Kuoppala
  2017-10-09 11:02 ` [PATCH v2 3/5] drm/i915: Hold forcewake for the duration of reset+restart Chris Wilson
                   ` (4 subsequent siblings)
  5 siblings, 1 reply; 11+ messages in thread
From: Chris Wilson @ 2017-10-09 11:02 UTC (permalink / raw)
  To: intel-gfx; +Cc: Mika Kuoppala

During hangcheck testing, we try to execute requests following the GPU
reset, and in particular want to try and debug when those fail.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@intel.com>
---
 drivers/gpu/drm/i915/selftests/intel_hangcheck.c | 10 ++++++++++
 1 file changed, 10 insertions(+)

diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index 08159b268893..7e1bdd88eda3 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -624,8 +624,11 @@ static int igt_wait_reset(void *arg)
 	__i915_add_request(rq, true);
 
 	if (!wait_for_hang(&h, rq)) {
+		struct drm_printer p = drm_info_printer(i915->drm.dev);
+
 		pr_err("Failed to start request %x, at %x\n",
 		       rq->fence.seqno, hws_seqno(&h, rq));
+		intel_engine_dump(rq->engine, &p);
 
 		i915_reset(i915, 0);
 		i915_gem_set_wedged(i915);
@@ -716,8 +719,12 @@ static int igt_reset_queue(void *arg)
 			__i915_add_request(rq, true);
 
 			if (!wait_for_hang(&h, prev)) {
+				struct drm_printer p = drm_info_printer(i915->drm.dev);
+
 				pr_err("Failed to start request %x, at %x\n",
 				       prev->fence.seqno, hws_seqno(&h, prev));
+				intel_engine_dump(rq->engine, &p);
+
 				i915_gem_request_put(rq);
 				i915_gem_request_put(prev);
 
@@ -818,8 +825,11 @@ static int igt_handle_error(void *arg)
 	__i915_add_request(rq, true);
 
 	if (!wait_for_hang(&h, rq)) {
+		struct drm_printer p = drm_info_printer(i915->drm.dev);
+
 		pr_err("Failed to start request %x, at %x\n",
 		       rq->fence.seqno, hws_seqno(&h, rq));
+		intel_engine_dump(rq->engine, &p);
 
 		i915_reset(i915, 0);
 		i915_gem_set_wedged(i915);
-- 
2.14.2

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH v2 3/5] drm/i915: Hold forcewake for the duration of reset+restart
  2017-10-09 11:02 [PATCH v2 1/5] drm/i915: Make i915_engine_info pretty printer to standalone Chris Wilson
  2017-10-09 11:02 ` [PATCH v2 2/5] drm/i915/selftests: Pretty print engine state when requests fail to start Chris Wilson
@ 2017-10-09 11:02 ` Chris Wilson
  2017-10-09 11:32   ` Mika Kuoppala
  2017-10-09 11:03 ` [PATCH v2 4/5] drm/i915/selftests: Hold the rpm wakeref for the reset tests Chris Wilson
                   ` (3 subsequent siblings)
  5 siblings, 1 reply; 11+ messages in thread
From: Chris Wilson @ 2017-10-09 11:02 UTC (permalink / raw)
  To: intel-gfx

Resetting the engine requires us to hold the forcewake wakeref to
prevent RC6 trying to happen in the middle of the reset sequence. The
consequence of an unwanted RC6 event in the middle is that random state
is then saved to the powercontext and restored later, which may
overwrite the mmio state we need to preserve (e.g. PD_DIR_BASE in the
legacy ringbuffer reset_ring_common()).

This was noticed in the live_hangcheck selftests when Haswell would
sporadically fail to restart during igt_reset_queue().

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_gem.c | 17 +++++++++++++++--
 1 file changed, 15 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 82a10036fb38..eba23c239aae 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2832,7 +2832,17 @@ i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
 {
 	struct drm_i915_gem_request *request = NULL;
 
-	/* Prevent the signaler thread from updating the request
+	/*
+	 * During the reset sequence, we must prevent the engine from
+	 * entering RC6. As the context state is undefined until we restart
+	 * the engine, if it does enter RC6 during the reset, the state
+	 * written to the powercontext is undefined and so we may lose
+	 * GPU state upon resume, i.e. fail to restart after a reset.
+	 */
+	intel_uncore_forcewake_get(engine->i915, FORCEWAKE_ALL);
+
+	/*
+	 * Prevent the signaler thread from updating the request
 	 * state (by calling dma_fence_signal) as we are processing
 	 * the reset. The write from the GPU of the seqno is
 	 * asynchronous and the signaler thread may see a different
@@ -2843,7 +2853,8 @@ i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
 	 */
 	kthread_park(engine->breadcrumbs.signaler);
 
-	/* Prevent request submission to the hardware until we have
+	/*
+	 * Prevent request submission to the hardware until we have
 	 * completed the reset in i915_gem_reset_finish(). If a request
 	 * is completed by one engine, it may then queue a request
 	 * to a second via its engine->irq_tasklet *just* as we are
@@ -3033,6 +3044,8 @@ void i915_gem_reset_finish_engine(struct intel_engine_cs *engine)
 {
 	tasklet_enable(&engine->execlists.irq_tasklet);
 	kthread_unpark(engine->breadcrumbs.signaler);
+
+	intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL);
 }
 
 void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
-- 
2.14.2

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH v2 4/5] drm/i915/selftests: Hold the rpm wakeref for the reset tests
  2017-10-09 11:02 [PATCH v2 1/5] drm/i915: Make i915_engine_info pretty printer to standalone Chris Wilson
  2017-10-09 11:02 ` [PATCH v2 2/5] drm/i915/selftests: Pretty print engine state when requests fail to start Chris Wilson
  2017-10-09 11:02 ` [PATCH v2 3/5] drm/i915: Hold forcewake for the duration of reset+restart Chris Wilson
@ 2017-10-09 11:03 ` Chris Wilson
  2017-10-09 11:50   ` Mika Kuoppala
  2017-10-09 11:03 ` [PATCH v2 5/5] drm/i915: Provide an assert for when we expect forcewake to be held Chris Wilson
                   ` (2 subsequent siblings)
  5 siblings, 1 reply; 11+ messages in thread
From: Chris Wilson @ 2017-10-09 11:03 UTC (permalink / raw)
  To: intel-gfx

The lowlevel reset functions expect the caller to be holding the rpm
wakeref for the device access across the reset. We were not explicitly
doing this in the sefltest, so for simplicity acquire the wakeref for
the duration of all subtests.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
---
 drivers/gpu/drm/i915/selftests/intel_hangcheck.c | 9 ++++++++-
 1 file changed, 8 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index 7e1bdd88eda3..71ce06680d66 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -878,9 +878,16 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
 		SUBTEST(igt_reset_queue),
 		SUBTEST(igt_handle_error),
 	};
+	int err;
 
 	if (!intel_has_gpu_reset(i915))
 		return 0;
 
-	return i915_subtests(tests, i915);
+	intel_runtime_pm_get(i915);
+
+	err = i915_subtests(tests, i915);
+
+	intel_runtime_pm_put(i915);
+
+	return err;
 }
-- 
2.14.2

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH v2 5/5] drm/i915: Provide an assert for when we expect forcewake to be held
  2017-10-09 11:02 [PATCH v2 1/5] drm/i915: Make i915_engine_info pretty printer to standalone Chris Wilson
                   ` (2 preceding siblings ...)
  2017-10-09 11:03 ` [PATCH v2 4/5] drm/i915/selftests: Hold the rpm wakeref for the reset tests Chris Wilson
@ 2017-10-09 11:03 ` Chris Wilson
  2017-10-09 11:48 ` [PATCH v2 1/5] drm/i915: Make i915_engine_info pretty printer to standalone Mika Kuoppala
  2017-10-09 13:41 ` ✗ Fi.CI.BAT: warning for series starting with [v2,1/5] " Patchwork
  5 siblings, 0 replies; 11+ messages in thread
From: Chris Wilson @ 2017-10-09 11:03 UTC (permalink / raw)
  To: intel-gfx

Add assert_forcewakes_active() (the complementary function to
assert_forcewakes_inactive) that documents the requirement of a
function for its callers to be holding the forcewake ref (i.e. the
function is part of a sequence over which RC6 must be prevented).

One such example is during ringbuffer reset, where RC6 must be held
across the whole reinitialisation sequence.

v2: Include debug information in the WARN so we know which fw domain is
missing.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> #v1
---
 drivers/gpu/drm/i915/intel_ringbuffer.c | 11 ++++++++++-
 drivers/gpu/drm/i915/intel_uncore.c     | 18 +++++++++++++++++-
 drivers/gpu/drm/i915/intel_uncore.h     |  2 ++
 3 files changed, 29 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 05c08b0bc172..4285f09ff8b8 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -579,7 +579,16 @@ static int init_ring_common(struct intel_engine_cs *engine)
 static void reset_ring_common(struct intel_engine_cs *engine,
 			      struct drm_i915_gem_request *request)
 {
-	/* Try to restore the logical GPU state to match the continuation
+	/*
+	 * RC6 must be prevented until the reset is complete and the engine
+	 * reinitialised. If it occurs in the middle of this sequence, the
+	 * state written to/loaded from the power context is ill-defined (e.g.
+	 * the PP_BASE_DIR may be lost).
+	 */
+	assert_forcewakes_active(engine->i915, FORCEWAKE_ALL);
+
+	/*
+	 * Try to restore the logical GPU state to match the continuation
 	 * of the request queue. If we skip the context/PD restore, then
 	 * the next request may try to execute assuming that its context
 	 * is valid and loaded on the GPU and so may try to access invalid
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index b3c3f94fc7e4..983617b5b338 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -626,7 +626,23 @@ void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
 	if (!dev_priv->uncore.funcs.force_wake_get)
 		return;
 
-	WARN_ON(dev_priv->uncore.fw_domains_active);
+	WARN(dev_priv->uncore.fw_domains_active,
+	     "Expected all fw_domains to be inactive, but %08x are still on\n",
+	     dev_priv->uncore.fw_domains_active);
+}
+
+void assert_forcewakes_active(struct drm_i915_private *dev_priv,
+			      enum forcewake_domains fw_domains)
+{
+	if (!dev_priv->uncore.funcs.force_wake_get)
+		return;
+
+	assert_rpm_wakelock_held(dev_priv);
+
+	fw_domains &= dev_priv->uncore.fw_domains;
+	WARN(fw_domains & ~dev_priv->uncore.fw_domains_active,
+	     "Expected %08x fw_domains to be active, but %08x are off\n",
+	     fw_domains, fw_domains & ~dev_priv->uncore.fw_domains_active);
 }
 
 /* We give fast paths for the really cool registers */
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index 66eae2ce2f29..582771251b57 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -137,6 +137,8 @@ void intel_uncore_resume_early(struct drm_i915_private *dev_priv);
 
 u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv);
 void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);
+void assert_forcewakes_active(struct drm_i915_private *dev_priv,
+			      enum forcewake_domains fw_domains);
 const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id);
 
 enum forcewake_domains
-- 
2.14.2

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* Re: [PATCH v2 3/5] drm/i915: Hold forcewake for the duration of reset+restart
  2017-10-09 11:02 ` [PATCH v2 3/5] drm/i915: Hold forcewake for the duration of reset+restart Chris Wilson
@ 2017-10-09 11:32   ` Mika Kuoppala
  2017-10-09 11:37     ` Chris Wilson
  0 siblings, 1 reply; 11+ messages in thread
From: Mika Kuoppala @ 2017-10-09 11:32 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx

Chris Wilson <chris@chris-wilson.co.uk> writes:

> Resetting the engine requires us to hold the forcewake wakeref to
> prevent RC6 trying to happen in the middle of the reset sequence. The
> consequence of an unwanted RC6 event in the middle is that random state
> is then saved to the powercontext and restored later, which may
> overwrite the mmio state we need to preserve (e.g. PD_DIR_BASE in the
> legacy ringbuffer reset_ring_common()).
>
> This was noticed in the live_hangcheck selftests when Haswell would
> sporadically fail to restart during igt_reset_queue().
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
> ---
>  drivers/gpu/drm/i915/i915_gem.c | 17 +++++++++++++++--
>  1 file changed, 15 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 82a10036fb38..eba23c239aae 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -2832,7 +2832,17 @@ i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
>  {
>  	struct drm_i915_gem_request *request = NULL;
>  
> -	/* Prevent the signaler thread from updating the request
> +	/*
> +	 * During the reset sequence, we must prevent the engine from
> +	 * entering RC6. As the context state is undefined until we restart
> +	 * the engine, if it does enter RC6 during the reset, the state
> +	 * written to the powercontext is undefined and so we may lose
> +	 * GPU state upon resume, i.e. fail to restart after a reset.
> +	 */
> +	intel_uncore_forcewake_get(engine->i915, FORCEWAKE_ALL);

We do nested get when actually issuing the hw commands. I would
still keep them there and consider changing them to asserts
some day.

Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>

> +
> +	/*
> +	 * Prevent the signaler thread from updating the request
>  	 * state (by calling dma_fence_signal) as we are processing
>  	 * the reset. The write from the GPU of the seqno is
>  	 * asynchronous and the signaler thread may see a different
> @@ -2843,7 +2853,8 @@ i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
>  	 */
>  	kthread_park(engine->breadcrumbs.signaler);
>  
> -	/* Prevent request submission to the hardware until we have
> +	/*
> +	 * Prevent request submission to the hardware until we have
>  	 * completed the reset in i915_gem_reset_finish(). If a request
>  	 * is completed by one engine, it may then queue a request
>  	 * to a second via its engine->irq_tasklet *just* as we are
> @@ -3033,6 +3044,8 @@ void i915_gem_reset_finish_engine(struct intel_engine_cs *engine)
>  {
>  	tasklet_enable(&engine->execlists.irq_tasklet);
>  	kthread_unpark(engine->breadcrumbs.signaler);
> +
> +	intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL);
>  }
>  
>  void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
> -- 
> 2.14.2
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH v2 3/5] drm/i915: Hold forcewake for the duration of reset+restart
  2017-10-09 11:32   ` Mika Kuoppala
@ 2017-10-09 11:37     ` Chris Wilson
  0 siblings, 0 replies; 11+ messages in thread
From: Chris Wilson @ 2017-10-09 11:37 UTC (permalink / raw)
  To: Mika Kuoppala, intel-gfx

Quoting Mika Kuoppala (2017-10-09 12:32:16)
> Chris Wilson <chris@chris-wilson.co.uk> writes:
> 
> > Resetting the engine requires us to hold the forcewake wakeref to
> > prevent RC6 trying to happen in the middle of the reset sequence. The
> > consequence of an unwanted RC6 event in the middle is that random state
> > is then saved to the powercontext and restored later, which may
> > overwrite the mmio state we need to preserve (e.g. PD_DIR_BASE in the
> > legacy ringbuffer reset_ring_common()).
> >
> > This was noticed in the live_hangcheck selftests when Haswell would
> > sporadically fail to restart during igt_reset_queue().
> >
> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
> > ---
> >  drivers/gpu/drm/i915/i915_gem.c | 17 +++++++++++++++--
> >  1 file changed, 15 insertions(+), 2 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> > index 82a10036fb38..eba23c239aae 100644
> > --- a/drivers/gpu/drm/i915/i915_gem.c
> > +++ b/drivers/gpu/drm/i915/i915_gem.c
> > @@ -2832,7 +2832,17 @@ i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
> >  {
> >       struct drm_i915_gem_request *request = NULL;
> >  
> > -     /* Prevent the signaler thread from updating the request
> > +     /*
> > +      * During the reset sequence, we must prevent the engine from
> > +      * entering RC6. As the context state is undefined until we restart
> > +      * the engine, if it does enter RC6 during the reset, the state
> > +      * written to the powercontext is undefined and so we may lose
> > +      * GPU state upon resume, i.e. fail to restart after a reset.
> > +      */
> > +     intel_uncore_forcewake_get(engine->i915, FORCEWAKE_ALL);
> 
> We do nested get when actually issuing the hw commands. I would
> still keep them there and consider changing them to asserts
> some day.

Yup, our security blanket for init_hw() is many layers deep. T'is a job
for a rainy tomorrow.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH v2 1/5] drm/i915: Make i915_engine_info pretty printer to standalone
  2017-10-09 11:02 [PATCH v2 1/5] drm/i915: Make i915_engine_info pretty printer to standalone Chris Wilson
                   ` (3 preceding siblings ...)
  2017-10-09 11:03 ` [PATCH v2 5/5] drm/i915: Provide an assert for when we expect forcewake to be held Chris Wilson
@ 2017-10-09 11:48 ` Mika Kuoppala
  2017-10-09 13:41 ` ✗ Fi.CI.BAT: warning for series starting with [v2,1/5] " Patchwork
  5 siblings, 0 replies; 11+ messages in thread
From: Mika Kuoppala @ 2017-10-09 11:48 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx

Chris Wilson <chris@chris-wilson.co.uk> writes:

> We can use drm_printer to hide the differences between printk and
> seq_printf, and so make the i915_engine_info pretty printer able to be
> called from different contexts and not just debugfs. For instance, I
> want to use the pretty printer to debug kselftests.
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Mika Kuoppala <mika.kuoppala@intel.com>

With this, triaging all kinds of engine problems is easier
and perhaps GEM_ENGINE_BUG_ON or similar might also emerge.

Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>

> ---
>  drivers/gpu/drm/i915/i915_debugfs.c     | 148 +----------------------------
>  drivers/gpu/drm/i915/intel_engine_cs.c  | 160 ++++++++++++++++++++++++++++++++
>  drivers/gpu/drm/i915/intel_ringbuffer.h |   4 +
>  3 files changed, 168 insertions(+), 144 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
> index f7817c667958..9ec2bcd9a695 100644
> --- a/drivers/gpu/drm/i915/i915_debugfs.c
> +++ b/drivers/gpu/drm/i915/i915_debugfs.c
> @@ -3292,9 +3292,9 @@ static int i915_display_info(struct seq_file *m, void *unused)
>  static int i915_engine_info(struct seq_file *m, void *unused)
>  {
>  	struct drm_i915_private *dev_priv = node_to_i915(m->private);
> -	struct i915_gpu_error *error = &dev_priv->gpu_error;
>  	struct intel_engine_cs *engine;
>  	enum intel_engine_id id;
> +	struct drm_printer p;
>  
>  	intel_runtime_pm_get(dev_priv);
>  
> @@ -3303,149 +3303,9 @@ static int i915_engine_info(struct seq_file *m, void *unused)
>  	seq_printf(m, "Global active requests: %d\n",
>  		   dev_priv->gt.active_requests);
>  
> -	for_each_engine(engine, dev_priv, id) {
> -		struct intel_breadcrumbs *b = &engine->breadcrumbs;
> -		struct drm_i915_gem_request *rq;
> -		struct rb_node *rb;
> -		u64 addr;
> -
> -		seq_printf(m, "%s\n", engine->name);
> -		seq_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms], inflight %d\n",
> -			   intel_engine_get_seqno(engine),
> -			   intel_engine_last_submit(engine),
> -			   engine->hangcheck.seqno,
> -			   jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp),
> -			   engine->timeline->inflight_seqnos);
> -		seq_printf(m, "\tReset count: %d\n",
> -			   i915_reset_engine_count(error, engine));
> -
> -		rcu_read_lock();
> -
> -		seq_printf(m, "\tRequests:\n");
> -
> -		rq = list_first_entry(&engine->timeline->requests,
> -				      struct drm_i915_gem_request, link);
> -		if (&rq->link != &engine->timeline->requests)
> -			print_request(m, rq, "\t\tfirst  ");
> -
> -		rq = list_last_entry(&engine->timeline->requests,
> -				     struct drm_i915_gem_request, link);
> -		if (&rq->link != &engine->timeline->requests)
> -			print_request(m, rq, "\t\tlast   ");
> -
> -		rq = i915_gem_find_active_request(engine);
> -		if (rq) {
> -			print_request(m, rq, "\t\tactive ");
> -			seq_printf(m,
> -				   "\t\t[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]\n",
> -				   rq->head, rq->postfix, rq->tail,
> -				   rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
> -				   rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
> -		}
> -
> -		seq_printf(m, "\tRING_START: 0x%08x [0x%08x]\n",
> -			   I915_READ(RING_START(engine->mmio_base)),
> -			   rq ? i915_ggtt_offset(rq->ring->vma) : 0);
> -		seq_printf(m, "\tRING_HEAD:  0x%08x [0x%08x]\n",
> -			   I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR,
> -			   rq ? rq->ring->head : 0);
> -		seq_printf(m, "\tRING_TAIL:  0x%08x [0x%08x]\n",
> -			   I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR,
> -			   rq ? rq->ring->tail : 0);
> -		seq_printf(m, "\tRING_CTL:   0x%08x [%s]\n",
> -			   I915_READ(RING_CTL(engine->mmio_base)),
> -			   I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? "waiting" : "");
> -
> -		rcu_read_unlock();
> -
> -		addr = intel_engine_get_active_head(engine);
> -		seq_printf(m, "\tACTHD:  0x%08x_%08x\n",
> -			   upper_32_bits(addr), lower_32_bits(addr));
> -		addr = intel_engine_get_last_batch_head(engine);
> -		seq_printf(m, "\tBBADDR: 0x%08x_%08x\n",
> -			   upper_32_bits(addr), lower_32_bits(addr));
> -
> -		if (i915_modparams.enable_execlists) {
> -			const u32 *hws = &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
> -			struct intel_engine_execlists * const execlists = &engine->execlists;
> -			u32 ptr, read, write;
> -			unsigned int idx;
> -
> -			seq_printf(m, "\tExeclist status: 0x%08x %08x\n",
> -				   I915_READ(RING_EXECLIST_STATUS_LO(engine)),
> -				   I915_READ(RING_EXECLIST_STATUS_HI(engine)));
> -
> -			ptr = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
> -			read = GEN8_CSB_READ_PTR(ptr);
> -			write = GEN8_CSB_WRITE_PTR(ptr);
> -			seq_printf(m, "\tExeclist CSB read %d [%d cached], write %d [%d from hws], interrupt posted? %s\n",
> -				   read, execlists->csb_head,
> -				   write,
> -				   intel_read_status_page(engine, intel_hws_csb_write_index(engine->i915)),
> -				   yesno(test_bit(ENGINE_IRQ_EXECLIST,
> -						  &engine->irq_posted)));
> -			if (read >= GEN8_CSB_ENTRIES)
> -				read = 0;
> -			if (write >= GEN8_CSB_ENTRIES)
> -				write = 0;
> -			if (read > write)
> -				write += GEN8_CSB_ENTRIES;
> -			while (read < write) {
> -				idx = ++read % GEN8_CSB_ENTRIES;
> -				seq_printf(m, "\tExeclist CSB[%d]: 0x%08x [0x%08x in hwsp], context: %d [%d in hwsp]\n",
> -					   idx,
> -					   I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)),
> -					   hws[idx * 2],
> -					   I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx)),
> -					   hws[idx * 2 + 1]);
> -			}
> -
> -			rcu_read_lock();
> -			for (idx = 0; idx < execlists_num_ports(execlists); idx++) {
> -				unsigned int count;
> -
> -				rq = port_unpack(&execlists->port[idx], &count);
> -				if (rq) {
> -					seq_printf(m, "\t\tELSP[%d] count=%d, ",
> -						   idx, count);
> -					print_request(m, rq, "rq: ");
> -				} else {
> -					seq_printf(m, "\t\tELSP[%d] idle\n",
> -						   idx);
> -				}
> -			}
> -			rcu_read_unlock();
> -
> -			spin_lock_irq(&engine->timeline->lock);
> -			for (rb = execlists->first; rb; rb = rb_next(rb)) {
> -				struct i915_priolist *p =
> -					rb_entry(rb, typeof(*p), node);
> -
> -				list_for_each_entry(rq, &p->requests,
> -						    priotree.link)
> -					print_request(m, rq, "\t\tQ ");
> -			}
> -			spin_unlock_irq(&engine->timeline->lock);
> -		} else if (INTEL_GEN(dev_priv) > 6) {
> -			seq_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
> -				   I915_READ(RING_PP_DIR_BASE(engine)));
> -			seq_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n",
> -				   I915_READ(RING_PP_DIR_BASE_READ(engine)));
> -			seq_printf(m, "\tPP_DIR_DCLV: 0x%08x\n",
> -				   I915_READ(RING_PP_DIR_DCLV(engine)));
> -		}
> -
> -		spin_lock_irq(&b->rb_lock);
> -		for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
> -			struct intel_wait *w = rb_entry(rb, typeof(*w), node);
> -
> -			seq_printf(m, "\t%s [%d] waiting for %x\n",
> -				   w->tsk->comm, w->tsk->pid, w->seqno);
> -		}
> -		spin_unlock_irq(&b->rb_lock);
> -
> -		seq_puts(m, "\n");
> -	}
> +	p = drm_seq_file_printer(m);
> +	for_each_engine(engine, dev_priv, id)
> +		intel_engine_dump(engine, &p);
>  
>  	intel_runtime_pm_put(dev_priv);
>  
> diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
> index 807a7aafc089..a59b2a30ff5a 100644
> --- a/drivers/gpu/drm/i915/intel_engine_cs.c
> +++ b/drivers/gpu/drm/i915/intel_engine_cs.c
> @@ -22,6 +22,8 @@
>   *
>   */
>  
> +#include <drm/drm_print.h>
> +
>  #include "i915_drv.h"
>  #include "intel_ringbuffer.h"
>  #include "intel_lrc.h"
> @@ -1616,6 +1618,164 @@ bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
>  	}
>  }
>  
> +static void print_request(struct drm_printer *m,
> +			  struct drm_i915_gem_request *rq,
> +			  const char *prefix)
> +{
> +	drm_printf(m, "%s%x [%x:%x] prio=%d @ %dms: %s\n", prefix,
> +		   rq->global_seqno, rq->ctx->hw_id, rq->fence.seqno,
> +		   rq->priotree.priority,
> +		   jiffies_to_msecs(jiffies - rq->emitted_jiffies),
> +		   rq->timeline->common->name);
> +}
> +
> +void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m)
> +{
> +	struct intel_breadcrumbs *b = &engine->breadcrumbs;
> +	struct i915_gpu_error *error = &engine->i915->gpu_error;
> +	struct drm_i915_private *dev_priv = engine->i915;
> +	struct drm_i915_gem_request *rq;
> +	struct rb_node *rb;
> +	u64 addr;
> +
> +	drm_printf(m, "%s\n", engine->name);
> +	drm_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms], inflight %d\n",
> +		   intel_engine_get_seqno(engine),
> +		   intel_engine_last_submit(engine),
> +		   engine->hangcheck.seqno,
> +		   jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp),
> +		   engine->timeline->inflight_seqnos);
> +	drm_printf(m, "\tReset count: %d\n",
> +		   i915_reset_engine_count(error, engine));
> +
> +	rcu_read_lock();
> +
> +	drm_printf(m, "\tRequests:\n");
> +
> +	rq = list_first_entry(&engine->timeline->requests,
> +			      struct drm_i915_gem_request, link);
> +	if (&rq->link != &engine->timeline->requests)
> +		print_request(m, rq, "\t\tfirst  ");
> +
> +	rq = list_last_entry(&engine->timeline->requests,
> +			     struct drm_i915_gem_request, link);
> +	if (&rq->link != &engine->timeline->requests)
> +		print_request(m, rq, "\t\tlast   ");
> +
> +	rq = i915_gem_find_active_request(engine);
> +	if (rq) {
> +		print_request(m, rq, "\t\tactive ");
> +		drm_printf(m,
> +			   "\t\t[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]\n",
> +			   rq->head, rq->postfix, rq->tail,
> +			   rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
> +			   rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
> +	}
> +
> +	drm_printf(m, "\tRING_START: 0x%08x [0x%08x]\n",
> +		   I915_READ(RING_START(engine->mmio_base)),
> +		   rq ? i915_ggtt_offset(rq->ring->vma) : 0);
> +	drm_printf(m, "\tRING_HEAD:  0x%08x [0x%08x]\n",
> +		   I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR,
> +		   rq ? rq->ring->head : 0);
> +	drm_printf(m, "\tRING_TAIL:  0x%08x [0x%08x]\n",
> +		   I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR,
> +		   rq ? rq->ring->tail : 0);
> +	drm_printf(m, "\tRING_CTL:   0x%08x [%s]\n",
> +		   I915_READ(RING_CTL(engine->mmio_base)),
> +		   I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? "waiting" : "");
> +
> +	rcu_read_unlock();
> +
> +	addr = intel_engine_get_active_head(engine);
> +	drm_printf(m, "\tACTHD:  0x%08x_%08x\n",
> +		   upper_32_bits(addr), lower_32_bits(addr));
> +	addr = intel_engine_get_last_batch_head(engine);
> +	drm_printf(m, "\tBBADDR: 0x%08x_%08x\n",
> +		   upper_32_bits(addr), lower_32_bits(addr));
> +
> +	if (i915_modparams.enable_execlists) {
> +		const u32 *hws = &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
> +		struct intel_engine_execlists * const execlists = &engine->execlists;
> +		u32 ptr, read, write;
> +		unsigned int idx;
> +
> +		drm_printf(m, "\tExeclist status: 0x%08x %08x\n",
> +			   I915_READ(RING_EXECLIST_STATUS_LO(engine)),
> +			   I915_READ(RING_EXECLIST_STATUS_HI(engine)));
> +
> +		ptr = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
> +		read = GEN8_CSB_READ_PTR(ptr);
> +		write = GEN8_CSB_WRITE_PTR(ptr);
> +		drm_printf(m, "\tExeclist CSB read %d [%d cached], write %d [%d from hws], interrupt posted? %s\n",
> +			   read, execlists->csb_head,
> +			   write,
> +			   intel_read_status_page(engine, intel_hws_csb_write_index(engine->i915)),
> +			   yesno(test_bit(ENGINE_IRQ_EXECLIST,
> +					  &engine->irq_posted)));
> +		if (read >= GEN8_CSB_ENTRIES)
> +			read = 0;
> +		if (write >= GEN8_CSB_ENTRIES)
> +			write = 0;
> +		if (read > write)
> +			write += GEN8_CSB_ENTRIES;
> +		while (read < write) {
> +			idx = ++read % GEN8_CSB_ENTRIES;
> +			drm_printf(m, "\tExeclist CSB[%d]: 0x%08x [0x%08x in hwsp], context: %d [%d in hwsp]\n",
> +				   idx,
> +				   I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)),
> +				   hws[idx * 2],
> +				   I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx)),
> +				   hws[idx * 2 + 1]);
> +		}
> +
> +		rcu_read_lock();
> +		for (idx = 0; idx < execlists_num_ports(execlists); idx++) {
> +			unsigned int count;
> +
> +			rq = port_unpack(&execlists->port[idx], &count);
> +			if (rq) {
> +				drm_printf(m, "\t\tELSP[%d] count=%d, ",
> +					   idx, count);
> +				print_request(m, rq, "rq: ");
> +			} else {
> +				drm_printf(m, "\t\tELSP[%d] idle\n",
> +					   idx);
> +			}
> +		}
> +		rcu_read_unlock();
> +
> +		spin_lock_irq(&engine->timeline->lock);
> +		for (rb = execlists->first; rb; rb = rb_next(rb)) {
> +			struct i915_priolist *p =
> +				rb_entry(rb, typeof(*p), node);
> +
> +			list_for_each_entry(rq, &p->requests,
> +					    priotree.link)
> +				print_request(m, rq, "\t\tQ ");
> +		}
> +		spin_unlock_irq(&engine->timeline->lock);
> +	} else if (INTEL_GEN(dev_priv) > 6) {
> +		drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
> +			   I915_READ(RING_PP_DIR_BASE(engine)));
> +		drm_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n",
> +			   I915_READ(RING_PP_DIR_BASE_READ(engine)));
> +		drm_printf(m, "\tPP_DIR_DCLV: 0x%08x\n",
> +			   I915_READ(RING_PP_DIR_DCLV(engine)));
> +	}
> +
> +	spin_lock_irq(&b->rb_lock);
> +	for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
> +		struct intel_wait *w = rb_entry(rb, typeof(*w), node);
> +
> +		drm_printf(m, "\t%s [%d] waiting for %x\n",
> +			   w->tsk->comm, w->tsk->pid, w->seqno);
> +	}
> +	spin_unlock_irq(&b->rb_lock);
> +
> +	drm_printf(m, "\n");
> +}
> +
>  #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
>  #include "selftests/mock_engine.c"
>  #endif
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
> index 0fedda17488c..17186f067408 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.h
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
> @@ -7,6 +7,8 @@
>  #include "i915_gem_timeline.h"
>  #include "i915_selftest.h"
>  
> +struct drm_printer;
> +
>  #define I915_CMD_HASH_ORDER 9
>  
>  /* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
> @@ -839,4 +841,6 @@ void intel_engines_reset_default_submission(struct drm_i915_private *i915);
>  
>  bool intel_engine_can_store_dword(struct intel_engine_cs *engine);
>  
> +void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *p);
> +
>  #endif /* _INTEL_RINGBUFFER_H_ */
> -- 
> 2.14.2
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH v2 4/5] drm/i915/selftests: Hold the rpm wakeref for the reset tests
  2017-10-09 11:03 ` [PATCH v2 4/5] drm/i915/selftests: Hold the rpm wakeref for the reset tests Chris Wilson
@ 2017-10-09 11:50   ` Mika Kuoppala
  0 siblings, 0 replies; 11+ messages in thread
From: Mika Kuoppala @ 2017-10-09 11:50 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx

Chris Wilson <chris@chris-wilson.co.uk> writes:

> The lowlevel reset functions expect the caller to be holding the rpm
> wakeref for the device access across the reset. We were not explicitly
> doing this in the sefltest, so for simplicity acquire the wakeref for
> the duration of all subtests.
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>

Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>

> ---
>  drivers/gpu/drm/i915/selftests/intel_hangcheck.c | 9 ++++++++-
>  1 file changed, 8 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
> index 7e1bdd88eda3..71ce06680d66 100644
> --- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
> +++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
> @@ -878,9 +878,16 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
>  		SUBTEST(igt_reset_queue),
>  		SUBTEST(igt_handle_error),
>  	};
> +	int err;
>  
>  	if (!intel_has_gpu_reset(i915))
>  		return 0;
>  
> -	return i915_subtests(tests, i915);
> +	intel_runtime_pm_get(i915);
> +
> +	err = i915_subtests(tests, i915);
> +
> +	intel_runtime_pm_put(i915);
> +
> +	return err;
>  }
> -- 
> 2.14.2
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH v2 2/5] drm/i915/selftests: Pretty print engine state when requests fail to start
  2017-10-09 11:02 ` [PATCH v2 2/5] drm/i915/selftests: Pretty print engine state when requests fail to start Chris Wilson
@ 2017-10-09 11:50   ` Mika Kuoppala
  0 siblings, 0 replies; 11+ messages in thread
From: Mika Kuoppala @ 2017-10-09 11:50 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx

Chris Wilson <chris@chris-wilson.co.uk> writes:

> During hangcheck testing, we try to execute requests following the GPU
> reset, and in particular want to try and debug when those fail.
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Mika Kuoppala <mika.kuoppala@intel.com>

Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>

> ---
>  drivers/gpu/drm/i915/selftests/intel_hangcheck.c | 10 ++++++++++
>  1 file changed, 10 insertions(+)
>
> diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
> index 08159b268893..7e1bdd88eda3 100644
> --- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
> +++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
> @@ -624,8 +624,11 @@ static int igt_wait_reset(void *arg)
>  	__i915_add_request(rq, true);
>  
>  	if (!wait_for_hang(&h, rq)) {
> +		struct drm_printer p = drm_info_printer(i915->drm.dev);
> +
>  		pr_err("Failed to start request %x, at %x\n",
>  		       rq->fence.seqno, hws_seqno(&h, rq));
> +		intel_engine_dump(rq->engine, &p);
>  
>  		i915_reset(i915, 0);
>  		i915_gem_set_wedged(i915);
> @@ -716,8 +719,12 @@ static int igt_reset_queue(void *arg)
>  			__i915_add_request(rq, true);
>  
>  			if (!wait_for_hang(&h, prev)) {
> +				struct drm_printer p = drm_info_printer(i915->drm.dev);
> +
>  				pr_err("Failed to start request %x, at %x\n",
>  				       prev->fence.seqno, hws_seqno(&h, prev));
> +				intel_engine_dump(rq->engine, &p);
> +
>  				i915_gem_request_put(rq);
>  				i915_gem_request_put(prev);
>  
> @@ -818,8 +825,11 @@ static int igt_handle_error(void *arg)
>  	__i915_add_request(rq, true);
>  
>  	if (!wait_for_hang(&h, rq)) {
> +		struct drm_printer p = drm_info_printer(i915->drm.dev);
> +
>  		pr_err("Failed to start request %x, at %x\n",
>  		       rq->fence.seqno, hws_seqno(&h, rq));
> +		intel_engine_dump(rq->engine, &p);
>  
>  		i915_reset(i915, 0);
>  		i915_gem_set_wedged(i915);
> -- 
> 2.14.2
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 11+ messages in thread

* ✗ Fi.CI.BAT: warning for series starting with [v2,1/5] drm/i915: Make i915_engine_info pretty printer to standalone
  2017-10-09 11:02 [PATCH v2 1/5] drm/i915: Make i915_engine_info pretty printer to standalone Chris Wilson
                   ` (4 preceding siblings ...)
  2017-10-09 11:48 ` [PATCH v2 1/5] drm/i915: Make i915_engine_info pretty printer to standalone Mika Kuoppala
@ 2017-10-09 13:41 ` Patchwork
  5 siblings, 0 replies; 11+ messages in thread
From: Patchwork @ 2017-10-09 13:41 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [v2,1/5] drm/i915: Make i915_engine_info pretty printer to standalone
URL   : https://patchwork.freedesktop.org/series/31586/
State : warning

== Summary ==

Series 31586v1 series starting with [v2,1/5] drm/i915: Make i915_engine_info pretty printer to standalone
https://patchwork.freedesktop.org/api/1.0/series/31586/revisions/1/mbox/

Test chamelium:
        Subgroup dp-crc-fast:
                pass       -> FAIL       (fi-kbl-7500u) fdo#102514
        Subgroup common-hpd-after-suspend:
                dmesg-warn -> PASS       (fi-kbl-7500u) fdo#102505
Test kms_pipe_crc_basic:
        Subgroup suspend-read-crc-pipe-a:
                pass       -> DMESG-WARN (fi-cfl-s)
        Subgroup suspend-read-crc-pipe-b:
                pass       -> DMESG-WARN (fi-byt-n2820) fdo#101705

fdo#102514 https://bugs.freedesktop.org/show_bug.cgi?id=102514
fdo#102505 https://bugs.freedesktop.org/show_bug.cgi?id=102505
fdo#101705 https://bugs.freedesktop.org/show_bug.cgi?id=101705

fi-bdw-5557u     total:289  pass:268  dwarn:0   dfail:0   fail:0   skip:21  time:451s
fi-bdw-gvtdvm    total:289  pass:265  dwarn:0   dfail:0   fail:0   skip:24  time:473s
fi-blb-e6850     total:289  pass:223  dwarn:1   dfail:0   fail:0   skip:65  time:389s
fi-bsw-n3050     total:289  pass:243  dwarn:0   dfail:0   fail:0   skip:46  time:572s
fi-bwr-2160      total:289  pass:183  dwarn:0   dfail:0   fail:0   skip:106 time:284s
fi-bxt-dsi       total:289  pass:259  dwarn:0   dfail:0   fail:0   skip:30  time:522s
fi-bxt-j4205     total:289  pass:260  dwarn:0   dfail:0   fail:0   skip:29  time:520s
fi-byt-j1900     total:289  pass:253  dwarn:1   dfail:0   fail:0   skip:35  time:530s
fi-byt-n2820     total:289  pass:249  dwarn:1   dfail:0   fail:0   skip:39  time:525s
fi-cfl-s         total:289  pass:255  dwarn:2   dfail:0   fail:0   skip:32  time:565s
fi-elk-e7500     total:289  pass:229  dwarn:0   dfail:0   fail:0   skip:60  time:433s
fi-glk-1         total:289  pass:261  dwarn:0   dfail:0   fail:0   skip:28  time:604s
fi-hsw-4770      total:289  pass:262  dwarn:0   dfail:0   fail:0   skip:27  time:433s
fi-hsw-4770r     total:289  pass:262  dwarn:0   dfail:0   fail:0   skip:27  time:418s
fi-ivb-3520m     total:289  pass:260  dwarn:0   dfail:0   fail:0   skip:29  time:507s
fi-ivb-3770      total:289  pass:260  dwarn:0   dfail:0   fail:0   skip:29  time:475s
fi-kbl-7500u     total:289  pass:264  dwarn:0   dfail:0   fail:1   skip:24  time:493s
fi-kbl-7560u     total:289  pass:270  dwarn:0   dfail:0   fail:0   skip:19  time:582s
fi-kbl-7567u     total:289  pass:265  dwarn:4   dfail:0   fail:0   skip:20  time:491s
fi-kbl-r         total:289  pass:262  dwarn:0   dfail:0   fail:0   skip:27  time:592s
fi-pnv-d510      total:289  pass:222  dwarn:1   dfail:0   fail:0   skip:66  time:655s
fi-skl-6260u     total:289  pass:269  dwarn:0   dfail:0   fail:0   skip:20  time:473s
fi-skl-6700hq    total:289  pass:263  dwarn:0   dfail:0   fail:0   skip:26  time:667s
fi-skl-6700k     total:289  pass:265  dwarn:0   dfail:0   fail:0   skip:24  time:528s
fi-skl-6770hq    total:289  pass:269  dwarn:0   dfail:0   fail:0   skip:20  time:525s
fi-skl-gvtdvm    total:289  pass:266  dwarn:0   dfail:0   fail:0   skip:23  time:475s
fi-snb-2520m     total:289  pass:250  dwarn:0   dfail:0   fail:0   skip:39  time:579s
fi-snb-2600      total:289  pass:249  dwarn:0   dfail:0   fail:0   skip:40  time:429s

7579653b5bfdee2b6a20aab20c36c516cbe3812f drm-tip: 2017y-10m-09d-12h-55m-47s UTC integration manifest
1ce22dda4764 drm/i915: Provide an assert for when we expect forcewake to be held
8db8c9aa3577 drm/i915/selftests: Hold the rpm wakeref for the reset tests
945d30722e0c drm/i915: Hold forcewake for the duration of reset+restart
dd1f47582977 drm/i915/selftests: Pretty print engine state when requests fail to start
2edb96588140 drm/i915: Make i915_engine_info pretty printer to standalone

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_5951/
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 11+ messages in thread

end of thread, other threads:[~2017-10-09 13:41 UTC | newest]

Thread overview: 11+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2017-10-09 11:02 [PATCH v2 1/5] drm/i915: Make i915_engine_info pretty printer to standalone Chris Wilson
2017-10-09 11:02 ` [PATCH v2 2/5] drm/i915/selftests: Pretty print engine state when requests fail to start Chris Wilson
2017-10-09 11:50   ` Mika Kuoppala
2017-10-09 11:02 ` [PATCH v2 3/5] drm/i915: Hold forcewake for the duration of reset+restart Chris Wilson
2017-10-09 11:32   ` Mika Kuoppala
2017-10-09 11:37     ` Chris Wilson
2017-10-09 11:03 ` [PATCH v2 4/5] drm/i915/selftests: Hold the rpm wakeref for the reset tests Chris Wilson
2017-10-09 11:50   ` Mika Kuoppala
2017-10-09 11:03 ` [PATCH v2 5/5] drm/i915: Provide an assert for when we expect forcewake to be held Chris Wilson
2017-10-09 11:48 ` [PATCH v2 1/5] drm/i915: Make i915_engine_info pretty printer to standalone Mika Kuoppala
2017-10-09 13:41 ` ✗ Fi.CI.BAT: warning for series starting with [v2,1/5] " Patchwork

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox