From: John.C.Harrison@Intel.com
To: Intel-GFX@Lists.FreeDesktop.Org
Subject: [PATCH v6 33/34] drm/i915: Added debug state dump facilities to scheduler
Date: Wed, 20 Apr 2016 18:13:51 +0100 [thread overview]
Message-ID: <1461172435-4256-34-git-send-email-John.C.Harrison@Intel.com> (raw)
In-Reply-To: <1461172435-4256-1-git-send-email-John.C.Harrison@Intel.com>
From: John Harrison <John.C.Harrison@Intel.com>
When debugging batch buffer submission issues, it is useful to be able
to see what the current state of the scheduler is. This change adds
functions for decoding the internal scheduler state and reporting it.
v3: Updated a debug message with the new state_str() function.
v4: Wrapped some long lines to keep the style checker happy. Removed
the fence/sync code as that will now be part of a separate patch series.
v5: Removed forward declarations and white space. Added documentation.
[Joonas Lahtinen]
Also squashed in later patch to add seqno information from the start.
It was only being added in a separate patch due to historical reasons
which have since gone away.
v6: Updated to newer nightly (lots of ring -> engine renaming).
Added 'for_each_scheduler_node()' helper macro. Updated to use
'to_i915()' instead of dev_private. Converted all enum labels to
uppercase. Moved the enum to string conversion function to debugfs.c
rather than scheduler.c [review feedback from Joonas Lahtinen]
For: VIZ-1587
Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
drivers/gpu/drm/i915/i915_debugfs.c | 2 +-
drivers/gpu/drm/i915/i915_scheduler.c | 270 +++++++++++++++++++++++++++++++++-
drivers/gpu/drm/i915/i915_scheduler.h | 15 ++
3 files changed, 284 insertions(+), 3 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 1d04cde..9ac486f 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -3609,7 +3609,7 @@ static int i915_drrs_status(struct seq_file *m, void *unused)
return 0;
}
-static const char *i915_scheduler_queue_status_str(
+const char *i915_scheduler_queue_status_str(
enum i915_scheduler_queue_status status)
{
static char str[50];
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index fd53833..92ca786 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -48,6 +48,85 @@ bool i915_scheduler_is_enabled(struct drm_device *dev)
return dev_priv->scheduler != NULL;
}
+const char *i915_qe_state_str(struct i915_scheduler_queue_entry *node)
+{
+ static char str[50];
+ char *ptr = str;
+
+ *(ptr++) = node->bumped ? 'B' : '-',
+ *(ptr++) = i915_gem_request_completed(node->params.request) ? 'C' : '-';
+
+ *ptr = 0;
+
+ return str;
+}
+
+char i915_scheduler_queue_status_chr(enum i915_scheduler_queue_status status)
+{
+ switch (status) {
+ case I915_SQS_NONE:
+ return 'N';
+
+ case I915_SQS_QUEUED:
+ return 'Q';
+
+ case I915_SQS_POPPED:
+ return 'X';
+
+ case I915_SQS_FLYING:
+ return 'F';
+
+ case I915_SQS_COMPLETE:
+ return 'C';
+
+ case I915_SQS_DEAD:
+ return 'D';
+
+ default:
+ break;
+ }
+
+ return '?';
+}
+
+const char *i915_scheduler_flag_str(uint32_t flags)
+{
+ static char str[100];
+ char *ptr = str;
+
+ *ptr = 0;
+
+#define TEST_FLAG(flag, msg) \
+ do { \
+ if (flags & (flag)) { \
+ strcpy(ptr, msg); \
+ ptr += strlen(ptr); \
+ flags &= ~(flag); \
+ } \
+ } while (0)
+
+ TEST_FLAG(I915_SF_INTERRUPTS_ENABLED, "IntOn|");
+ TEST_FLAG(I915_SF_SUBMITTING, "Submitting|");
+ TEST_FLAG(I915_SF_DUMP_FORCE, "DumpForce|");
+ TEST_FLAG(I915_SF_DUMP_DETAILS, "DumpDetails|");
+ TEST_FLAG(I915_SF_DUMP_DEPENDENCIES, "DumpDeps|");
+ TEST_FLAG(I915_SF_DUMP_SEQNO, "DumpSeqno|");
+
+#undef TEST_FLAG
+
+ if (flags) {
+ sprintf(ptr, "Unknown_0x%X!", flags);
+ ptr += strlen(ptr);
+ }
+
+ if (ptr == str)
+ strcpy(str, "-");
+ else
+ ptr[-1] = 0;
+
+ return str;
+};
+
/**
* i915_scheduler_init - Initialise the scheduler.
* @dev: DRM device
@@ -1107,6 +1186,193 @@ err_unref:
return true;
}
+static int i915_scheduler_dump_locked(struct intel_engine_cs *engine,
+ const char *msg)
+{
+ struct drm_i915_private *dev_priv = to_i915(engine->dev);
+ struct i915_scheduler *scheduler = dev_priv->scheduler;
+ struct i915_scheduler_queue_entry *node;
+ int flying = 0, queued = 0, complete = 0, other = 0;
+ static int old_flying = -1, old_queued = -1, old_complete = -1;
+ bool b_dump;
+ char brkt[2] = { '<', '>' };
+
+ if (!engine)
+ return -EINVAL;
+
+ for_each_scheduler_node(node, engine->id) {
+ if (I915_SQS_IS_QUEUED(node))
+ queued++;
+ else if (I915_SQS_IS_FLYING(node))
+ flying++;
+ else if (I915_SQS_IS_COMPLETE(node))
+ complete++;
+ else
+ other++;
+ }
+
+ b_dump = (flying != old_flying) ||
+ (queued != old_queued) ||
+ (complete != old_complete);
+ if (scheduler->flags[engine->id] & I915_SF_DUMP_FORCE) {
+ if (!b_dump) {
+ b_dump = true;
+ brkt[0] = '{';
+ brkt[1] = '}';
+ }
+
+ scheduler->flags[engine->id] &= ~I915_SF_DUMP_FORCE;
+ }
+
+ if (b_dump) {
+ old_flying = flying;
+ old_queued = queued;
+ old_complete = complete;
+ DRM_DEBUG_DRIVER("<%s> Q:%02d, F:%02d, C:%02d, O:%02d, "
+ "Flags = %s, Next = %d:%d %c%s%c\n",
+ engine->name, queued, flying, complete, other,
+ i915_scheduler_flag_str(scheduler->flags[engine->id]),
+ dev_priv->request_uniq, dev_priv->next_seqno,
+ brkt[0], msg, brkt[1]);
+ } else {
+ /*DRM_DEBUG_DRIVER("<%s> Q:%02d, F:%02d, C:%02d, O:%02d"
+ ", Flags = %s, Next = %d:%d [%s]\n",
+ engine->name,
+ queued, flying, complete, other,
+ i915_scheduler_flag_str(scheduler->flags[engine->id]),
+ dev_priv->request_uniq, dev_priv->next_seqno, msg); */
+
+ return 0;
+ }
+
+ if (scheduler->flags[engine->id] & I915_SF_DUMP_SEQNO) {
+ uint32_t seqno;
+
+ seqno = engine->get_seqno(engine);
+
+ DRM_DEBUG_DRIVER("<%s> Seqno = %d\n", engine->name, seqno);
+ }
+
+ if (scheduler->flags[engine->id] & I915_SF_DUMP_DETAILS) {
+ int i, deps;
+ uint32_t count, counts[I915_SQS_MAX];
+
+ memset(counts, 0x00, sizeof(counts));
+
+ for_each_scheduler_node(node, engine->id) {
+ if (node->status < I915_SQS_MAX) {
+ count = counts[node->status]++;
+ } else {
+ DRM_DEBUG_DRIVER("<%s> Unknown status: %d!\n",
+ engine->name, node->status);
+ count = -1;
+ }
+
+ deps = 0;
+ for (i = 0; i < node->num_deps; i++)
+ if (i915_scheduler_is_dependency_valid(node, i))
+ deps++;
+
+ DRM_DEBUG_DRIVER("<%s> %c:%02d> uniq = %d, seqno"
+ " = %d/%s, deps = %d / %d, %s [pri = "
+ "%4d]\n", engine->name,
+ i915_scheduler_queue_status_chr(node->status),
+ count,
+ node->params.request->uniq,
+ node->params.request->seqno,
+ node->params.engine->name,
+ deps, node->num_deps,
+ i915_qe_state_str(node),
+ node->priority);
+
+ if ((scheduler->flags[engine->id] & I915_SF_DUMP_DEPENDENCIES)
+ == 0)
+ continue;
+
+ for (i = 0; i < node->num_deps; i++)
+ if (node->dep_list[i])
+ DRM_DEBUG_DRIVER("<%s> |-%c:"
+ "%02d%c uniq = %d, seqno = %d/%s, %s [pri = %4d]\n",
+ engine->name,
+ i915_scheduler_queue_status_chr(node->dep_list[i]->status),
+ i,
+ i915_scheduler_is_dependency_valid(node, i)
+ ? '>' : '#',
+ node->dep_list[i]->params.request->uniq,
+ node->dep_list[i]->params.request->seqno,
+ node->dep_list[i]->params.engine->name,
+ i915_qe_state_str(node->dep_list[i]),
+ node->dep_list[i]->priority);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * i915_scheduler_dump - dump the scheduler's internal state to the debug log.
+ * @engine: Engine to dump info for
+ * @msg: A reason why it is being dumped
+ * For debugging purposes, it can be very useful to see the internal state of
+ * the scheduler for a given engine.
+ */
+int i915_scheduler_dump(struct intel_engine_cs *engine, const char *msg)
+{
+ struct drm_i915_private *dev_priv = to_i915(engine->dev);
+ struct i915_scheduler *scheduler = dev_priv->scheduler;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&scheduler->lock, flags);
+ ret = i915_scheduler_dump_locked(engine, msg);
+ spin_unlock_irqrestore(&scheduler->lock, flags);
+
+ return ret;
+}
+
+static int i915_scheduler_dump_all_locked(struct drm_device *dev,
+ const char *msg)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_scheduler *scheduler = dev_priv->scheduler;
+ struct intel_engine_cs *engine;
+ int r, ret = 0;
+
+ for_each_engine(engine, dev_priv) {
+ scheduler->flags[engine->id] |= I915_SF_DUMP_FORCE |
+ I915_SF_DUMP_DETAILS |
+ I915_SF_DUMP_SEQNO |
+ I915_SF_DUMP_DEPENDENCIES;
+ r = i915_scheduler_dump_locked(engine, msg);
+ if (ret == 0)
+ ret = r;
+ }
+
+ return ret;
+}
+
+/**
+ * i915_scheduler_dump_all - dump the scheduler's internal state to the debug
+ * log.
+ * @dev: DRM device
+ * @msg: A reason why it is being dumped
+ * For debugging purposes, it can be very useful to see the internal state of
+ * the scheduler.
+ */
+int i915_scheduler_dump_all(struct drm_device *dev, const char *msg)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_scheduler *scheduler = dev_priv->scheduler;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&scheduler->lock, flags);
+ ret = i915_scheduler_dump_all_locked(dev, msg);
+ spin_unlock_irqrestore(&scheduler->lock, flags);
+
+ return ret;
+}
+
/**
* i915_scheduler_query_stats - return various scheduler statistics
* @engine: Engine to report on
@@ -1394,10 +1660,10 @@ void i915_scheduler_closefile(struct drm_device *dev, struct drm_file *file)
continue;
if (!I915_SQS_IS_COMPLETE(node))
- DRM_DEBUG_DRIVER("Closing file handle with outstanding work: %d:%d/%d on %s\n",
+ DRM_DEBUG_DRIVER("Closing file handle with outstanding work: %d:%d/%s on %s\n",
node->params.request->uniq,
node->params.request->seqno,
- node->status,
+ i915_qe_state_str(node),
engine->name);
i915_scheduler_file_queue_dec(node->params.file);
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index 0df16e7..7c0edf5 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -41,6 +41,9 @@ enum i915_scheduler_queue_status {
/* Limit value for use with arrays/loops */
I915_SQS_MAX
};
+char i915_scheduler_queue_status_chr(enum i915_scheduler_queue_status status);
+const char *i915_scheduler_queue_status_str(
+ enum i915_scheduler_queue_status status);
#define I915_SQS_IS_QUEUED(node) (((node)->status == I915_SQS_QUEUED))
#define I915_SQS_IS_FLYING(node) (((node)->status == I915_SQS_FLYING))
@@ -74,6 +77,7 @@ struct i915_scheduler_queue_entry {
/* List of all scheduler queue entry nodes */
struct list_head link;
};
+const char *i915_qe_state_str(struct i915_scheduler_queue_entry *node);
struct i915_scheduler_node_states {
uint32_t flying;
@@ -132,9 +136,17 @@ struct i915_scheduler {
/* Flag bits for i915_scheduler::flags */
enum {
+ /* Internal state */
I915_SF_INTERRUPTS_ENABLED = (1 << 0),
I915_SF_SUBMITTING = (1 << 1),
+
+ /* Dump/debug flags */
+ I915_SF_DUMP_FORCE = (1 << 8),
+ I915_SF_DUMP_DETAILS = (1 << 9),
+ I915_SF_DUMP_DEPENDENCIES = (1 << 10),
+ I915_SF_DUMP_SEQNO = (1 << 11),
};
+const char *i915_scheduler_flag_str(uint32_t flags);
bool i915_scheduler_is_enabled(struct drm_device *dev);
int i915_scheduler_init(struct drm_device *dev);
@@ -150,6 +162,9 @@ void i915_scheduler_work_handler(struct work_struct *work);
int i915_scheduler_flush(struct intel_engine_cs *engine, bool is_locked);
int i915_scheduler_flush_stamp(struct intel_engine_cs *engine,
unsigned long stamp, bool is_locked);
+int i915_scheduler_dump(struct intel_engine_cs *engine,
+ const char *msg);
+int i915_scheduler_dump_all(struct drm_device *dev, const char *msg);
bool i915_scheduler_is_mutex_required(struct drm_i915_gem_request *req);
bool i915_scheduler_is_request_batch_buffer(struct drm_i915_gem_request *req);
int i915_scheduler_query_stats(struct intel_engine_cs *engine,
--
1.9.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
next prev parent reply other threads:[~2016-04-20 17:14 UTC|newest]
Thread overview: 50+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-04-20 17:13 [PATCH v6 00/34] GPU scheduler for i915 driver John.C.Harrison
2016-04-20 17:13 ` [PATCH v6 01/34] drm/i915: Add total count to context status debugfs output John.C.Harrison
2016-04-20 17:13 ` [PATCH v6 02/34] drm/i915: Prelude to splitting i915_gem_do_execbuffer in two John.C.Harrison
2016-04-20 17:13 ` [PATCH v6 03/34] drm/i915: Split i915_dem_do_execbuffer() in half John.C.Harrison
2016-04-20 17:13 ` [PATCH v6 04/34] drm/i915: Cache request pointer in *_submission_final() John.C.Harrison
2016-04-20 17:13 ` [PATCH v6 05/34] drm/i915: Re-instate request->uniq because it is extremely useful John.C.Harrison
2016-04-20 17:13 ` [PATCH v6 06/34] drm/i915: Start of GPU scheduler John.C.Harrison
2016-06-10 16:24 ` Tvrtko Ursulin
2016-04-20 17:13 ` [PATCH v6 07/34] drm/i915: Disable hardware semaphores when GPU scheduler is enabled John.C.Harrison
2016-04-20 17:13 ` [PATCH v6 08/34] drm/i915: Force MMIO flips when scheduler enabled John.C.Harrison
2016-04-20 17:13 ` [PATCH v6 09/34] drm/i915: Added scheduler hook when closing DRM file handles John.C.Harrison
2016-04-20 17:13 ` [PATCH v6 10/34] drm/i915: Added scheduler hook into i915_gem_request_notify() John.C.Harrison
2016-04-20 17:13 ` [PATCH v6 11/34] drm/i915: Added deferred work handler for scheduler John.C.Harrison
2016-06-10 16:29 ` Tvrtko Ursulin
2016-04-20 17:13 ` [PATCH v6 12/34] drm/i915: Redirect execbuffer_final() via scheduler John.C.Harrison
2016-04-20 17:13 ` [PATCH v6 13/34] drm/i915: Keep the reserved space mechanism happy John.C.Harrison
2016-04-20 17:13 ` [PATCH v6 14/34] drm/i915: Added tracking/locking of batch buffer objects John.C.Harrison
2016-04-20 17:13 ` [PATCH v6 15/34] drm/i915: Hook scheduler node clean up into retire requests John.C.Harrison
2016-04-20 17:13 ` [PATCH v6 16/34] drm/i915: Added scheduler support to __wait_request() calls John.C.Harrison
2016-04-20 17:13 ` [PATCH v6 17/34] drm/i915: Added scheduler support to page fault handler John.C.Harrison
2016-04-20 17:13 ` [PATCH v6 18/34] drm/i915: Added scheduler flush calls to ring throttle and idle functions John.C.Harrison
2016-04-20 17:13 ` [PATCH v6 19/34] drm/i915: Add scheduler hook to GPU reset John.C.Harrison
2016-04-20 17:13 ` [PATCH v6 20/34] drm/i915: Added a module parameter to allow the scheduler to be disabled John.C.Harrison
2016-04-20 17:13 ` [PATCH v6 21/34] drm/i915: Support for 'unflushed' ring idle John.C.Harrison
2016-04-20 17:13 ` [PATCH v6 22/34] drm/i915: Defer seqno allocation until actual hardware submission time John.C.Harrison
2016-04-20 17:13 ` [PATCH v6 23/34] drm/i915: Added trace points to scheduler John.C.Harrison
2016-04-20 17:13 ` [PATCH v6 24/34] drm/i915: Added scheduler queue throttling by DRM file handle John.C.Harrison
2016-05-06 13:19 ` John.C.Harrison
2016-04-20 17:13 ` [PATCH v6 25/34] drm/i915: Added debugfs interface to scheduler tuning parameters John.C.Harrison
2016-04-20 17:13 ` [PATCH v6 26/34] drm/i915: Add early exit to execbuff_final() if insufficient ring space John.C.Harrison
2016-04-20 17:13 ` [PATCH v6 27/34] drm/i915: Added scheduler statistic reporting to debugfs John.C.Harrison
2016-05-06 13:21 ` John.C.Harrison
2016-04-20 17:13 ` [PATCH v6 28/34] drm/i915: Add scheduler support functions for TDR John.C.Harrison
2016-04-20 17:13 ` [PATCH v6 29/34] drm/i915: Enable GPU scheduler by default John.C.Harrison
2016-04-20 17:13 ` [PATCH v6 30/34] drm/i915: Add scheduling priority to per-context parameters John.C.Harrison
2016-04-20 17:13 ` [PATCH v6 31/34] drm/i915: Add support for retro-actively banning batch buffers John.C.Harrison
2016-04-20 17:13 ` [PATCH v6 32/34] drm/i915: Allow scheduler to manage inter-ring object synchronisation John.C.Harrison
2016-04-20 17:13 ` John.C.Harrison [this message]
2016-04-20 17:13 ` [PATCH v6 34/34] drm/i915: Scheduler state dump via debugfs John.C.Harrison
2016-04-20 17:13 ` [PATCH 1/1] drm/i915: Add wrapper for context priority interface John.C.Harrison
2016-04-20 17:13 ` [PATCH 1/2] igt/gem_ctx_param_basic: Updated to support scheduler " John.C.Harrison
2016-04-20 17:13 ` [PATCH 2/2] igt/gem_scheduler: Add gem_scheduler test John.C.Harrison
2016-04-21 9:43 ` ✓ Fi.CI.BAT: success for GPU scheduler for i915 driver (rev2) Patchwork
2016-04-22 15:37 ` [PATCH v6 00/34] GPU scheduler for i915 driver John Harrison
2016-04-23 9:57 ` ✗ Fi.CI.BAT: failure for GPU scheduler for i915 driver (rev2) Patchwork
2016-04-25 9:54 ` [PATCH v6 00/34] GPU scheduler for i915 driver Chris Wilson
2016-04-25 11:55 ` John Harrison
2016-04-26 13:20 ` Daniel Vetter
2016-05-05 11:54 ` John Harrison
2016-05-09 9:49 ` ✗ Fi.CI.BAT: warning for GPU scheduler for i915 driver (rev4) Patchwork
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1461172435-4256-34-git-send-email-John.C.Harrison@Intel.com \
--to=john.c.harrison@intel.com \
--cc=Intel-GFX@Lists.FreeDesktop.Org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox