From: John.C.Harrison@Intel.com
To: Intel-GFX@Lists.FreeDesktop.Org
Subject: [PATCH 32/39] drm/i915: Added scheduler statistic reporting to debugfs
Date: Mon, 23 Nov 2015 11:39:27 +0000 [thread overview]
Message-ID: <1448278774-31376-33-git-send-email-John.C.Harrison@Intel.com> (raw)
In-Reply-To: <1448278774-31376-1-git-send-email-John.C.Harrison@Intel.com>
From: John Harrison <John.C.Harrison@Intel.com>
It is useful for know what the scheduler is doing for both debugging
and performance analysis purposes. This change adds a bunch of
counters and such that keep track of various scheduler operations
(batches submitted, completed, flush requests, etc.). The data can
then be read in userland via the debugfs mechanism.
v2: Updated to match changes to scheduler implementation.
Change-Id: I3266c631cd70c9eeb2c235f88f493e60462f85d7
For: VIZ-1587
Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
---
drivers/gpu/drm/i915/i915_debugfs.c | 77 ++++++++++++++++++++++++++
drivers/gpu/drm/i915/i915_gem_execbuffer.c | 11 +++-
drivers/gpu/drm/i915/i915_scheduler.c | 87 +++++++++++++++++++++++++++---
drivers/gpu/drm/i915/i915_scheduler.h | 36 +++++++++++++
4 files changed, 203 insertions(+), 8 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 8f1c10c..9e7d67d 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -3603,6 +3603,82 @@ static int i915_drrs_status(struct seq_file *m, void *unused)
return 0;
}
+static int i915_scheduler_info(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_scheduler *scheduler = dev_priv->scheduler;
+ struct i915_scheduler_stats *stats = scheduler->stats;
+ struct i915_scheduler_stats_nodes node_stats[I915_NUM_RINGS];
+ struct intel_engine_cs *ring;
+ char str[50 * (I915_NUM_RINGS + 1)], name[50], *ptr;
+ int ret, i, r;
+
+ ret = mutex_lock_interruptible(&dev->mode_config.mutex);
+ if (ret)
+ return ret;
+
+#define PRINT_VAR(name, fmt, var) \
+ do { \
+ sprintf(str, "%-22s", name); \
+ ptr = str + strlen(str); \
+ for_each_ring(ring, dev_priv, r) { \
+ sprintf(ptr, " %10" fmt, var); \
+ ptr += strlen(ptr); \
+ } \
+ seq_printf(m, "%s\n", str); \
+ } while (0)
+
+ PRINT_VAR("Ring name:", "s", dev_priv->ring[r].name);
+ PRINT_VAR(" Ring seqno", "d", ring->get_seqno(ring, false));
+ seq_putc(m, '\n');
+
+ seq_puts(m, "Batch submissions:\n");
+ PRINT_VAR(" Queued", "u", stats[r].queued);
+ PRINT_VAR(" Submitted", "u", stats[r].submitted);
+ PRINT_VAR(" Completed", "u", stats[r].completed);
+ PRINT_VAR(" Expired", "u", stats[r].expired);
+ seq_putc(m, '\n');
+
+ seq_puts(m, "Flush counts:\n");
+ PRINT_VAR(" By object", "u", stats[r].flush_obj);
+ PRINT_VAR(" By request", "u", stats[r].flush_req);
+ PRINT_VAR(" By stamp", "u", stats[r].flush_stamp);
+ PRINT_VAR(" Blanket", "u", stats[r].flush_all);
+ PRINT_VAR(" Entries bumped", "u", stats[r].flush_bump);
+ PRINT_VAR(" Entries submitted", "u", stats[r].flush_submit);
+ seq_putc(m, '\n');
+
+ seq_puts(m, "Miscellaneous:\n");
+ PRINT_VAR(" ExecEarly retry", "u", stats[r].exec_early);
+ PRINT_VAR(" ExecFinal requeue", "u", stats[r].exec_again);
+ PRINT_VAR(" ExecFinal killed", "u", stats[r].exec_dead);
+ PRINT_VAR(" Fence wait", "u", stats[r].fence_wait);
+ PRINT_VAR(" Fence wait again", "u", stats[r].fence_again);
+ PRINT_VAR(" Fence wait ignore", "u", stats[r].fence_ignore);
+ PRINT_VAR(" Fence supplied", "u", stats[r].fence_got);
+ PRINT_VAR(" Hung flying", "u", stats[r].kill_flying);
+ PRINT_VAR(" Hung queued", "u", stats[r].kill_queued);
+ seq_putc(m, '\n');
+
+ seq_puts(m, "Queue contents:\n");
+ for_each_ring(ring, dev_priv, i)
+ i915_scheduler_query_stats(ring, node_stats + ring->id);
+
+ for (i = 0; i < (i915_sqs_MAX + 1); i++) {
+ sprintf(name, " %s", i915_scheduler_queue_status_str(i));
+ PRINT_VAR(name, "d", node_stats[r].counts[i]);
+ }
+ seq_putc(m, '\n');
+
+#undef PRINT_VAR
+
+ mutex_unlock(&dev->mode_config.mutex);
+
+ return 0;
+}
+
struct pipe_crc_info {
const char *name;
struct drm_device *dev;
@@ -5571,6 +5647,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_semaphore_status", i915_semaphore_status, 0},
{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
{"i915_dp_mst_info", i915_dp_mst_info, 0},
+ {"i915_scheduler_info", i915_scheduler_info, 0},
{"i915_wa_registers", i915_wa_registers, 0},
{"i915_ddb_info", i915_ddb_info, 0},
{"i915_sseu_status", i915_sseu_status, 0},
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index ca57147..dfd9c29 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1422,13 +1422,20 @@ static int i915_early_fence_wait(struct intel_engine_cs *ring, int fence_fd)
}
if (!sync_fence_is_signaled(fence)) {
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct i915_scheduler *scheduler = dev_priv->scheduler;
+
/*
* Wait forever for the fence to be signalled. This is safe
* because the the mutex lock has not yet been acquired and
* the wait is interruptible.
*/
- if (!i915_safe_to_ignore_fence(ring, fence))
+ if (i915_safe_to_ignore_fence(ring, fence))
+ scheduler->stats[ring->id].fence_ignore++;
+ else {
+ scheduler->stats[ring->id].fence_wait++;
ret = sync_fence_wait(fence, -1);
+ }
}
sync_fence_put(fence);
@@ -1844,6 +1851,8 @@ err:
mutex_unlock(&dev->struct_mutex);
pre_mutex_err:
+ dev_priv->scheduler->stats[ring->id].exec_early++;
+
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 5eec217..2acc798 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -121,6 +121,9 @@ const char *i915_scheduler_queue_status_str(
case i915_sqs_dead:
return "Dead";
+ case i915_sqs_MAX:
+ return "Invalid";
+
default:
break;
}
@@ -210,9 +213,14 @@ int i915_scheduler_queue_execbuffer(struct i915_scheduler_queue_entry *qe)
BUG_ON(!scheduler);
+ if (qe->params.fence_wait)
+ scheduler->stats[ring->id].fence_got++;
+
if (i915.scheduler_override & i915_so_direct_submit) {
int ret;
+ scheduler->stats[qe->params.ring->id].queued++;
+
trace_i915_scheduler_queue(qe->params.ring, qe);
WARN_ON(qe->params.fence_wait &&
@@ -222,6 +230,7 @@ int i915_scheduler_queue_execbuffer(struct i915_scheduler_queue_entry *qe)
scheduler->flags[qe->params.ring->id] |= i915_sf_submitting;
ret = dev_priv->gt.execbuf_final(&qe->params);
+ scheduler->stats[qe->params.ring->id].submitted++;
scheduler->flags[qe->params.ring->id] &= ~i915_sf_submitting;
/*
@@ -255,6 +264,8 @@ int i915_scheduler_queue_execbuffer(struct i915_scheduler_queue_entry *qe)
if (qe->params.fence_wait)
sync_fence_put(qe->params.fence_wait);
+ scheduler->stats[qe->params.ring->id].expired++;
+
return 0;
}
@@ -368,6 +379,8 @@ int i915_scheduler_queue_execbuffer(struct i915_scheduler_queue_entry *qe)
not_flying = i915_scheduler_count_flying(scheduler, ring) <
scheduler->min_flying;
+ scheduler->stats[ring->id].queued++;
+
trace_i915_scheduler_queue(ring, node);
trace_i915_scheduler_node_state_change(ring, node);
@@ -472,7 +485,8 @@ static void i915_scheduler_node_requeue(struct i915_scheduler_queue_entry *node)
/* Give up on a popped node completely. For example, because it is causing the
* ring to hang or is using some resource that no longer exists. */
-static void i915_scheduler_node_kill(struct i915_scheduler_queue_entry *node)
+static void i915_scheduler_node_kill(struct i915_scheduler *scheduler,
+ struct i915_scheduler_queue_entry *node)
{
BUG_ON(!node);
BUG_ON(!I915_SQS_IS_FLYING(node));
@@ -480,6 +494,8 @@ static void i915_scheduler_node_kill(struct i915_scheduler_queue_entry *node)
node->status = i915_sqs_dead;
trace_i915_scheduler_unfly(node->params.ring, node);
trace_i915_scheduler_node_state_change(node->params.ring, node);
+
+ scheduler->stats[node->params.ring->id].kill_flying++;
}
/*
@@ -509,10 +525,13 @@ bool i915_scheduler_notify_request(struct drm_i915_gem_request *req)
WARN_ON(!I915_SQS_IS_FLYING(node));
/* Node was in flight so mark it as complete. */
- if (req->cancelled)
+ if (req->cancelled) {
node->status = i915_sqs_dead;
- else
+ scheduler->stats[req->ring->id].kill_flying++;
+ } else {
node->status = i915_sqs_complete;
+ scheduler->stats[req->ring->id].completed++;
+ }
trace_i915_scheduler_node_state_change(req->ring, node);
@@ -637,6 +656,7 @@ static int i915_scheduler_remove(struct intel_engine_cs *ring)
list_del(&node->link);
list_add(&node->link, &remove);
+ scheduler->stats[ring->id].expired++;
/* Strip the dependency info while the mutex is still locked */
i915_scheduler_remove_dependent(scheduler, node);
@@ -879,6 +899,35 @@ static int i915_scheduler_dump_locked(struct intel_engine_cs *ring, const char *
return 0;
}
+int i915_scheduler_query_stats(struct intel_engine_cs *ring,
+ struct i915_scheduler_stats_nodes *stats)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct i915_scheduler *scheduler = dev_priv->scheduler;
+ struct i915_scheduler_queue_entry *node;
+ unsigned long flags;
+
+ memset(stats, 0x00, sizeof(*stats));
+
+ spin_lock_irqsave(&scheduler->lock, flags);
+
+ list_for_each_entry(node, &scheduler->node_queue[ring->id], link) {
+ if (node->status >= i915_sqs_MAX) {
+ DRM_DEBUG_DRIVER("Invalid node state: %d! [uniq = %d, seqno = %d]\n",
+ node->status, node->params.request->uniq, node->params.request->seqno);
+
+ stats->counts[i915_sqs_MAX]++;
+ continue;
+ }
+
+ stats->counts[node->status]++;
+ }
+
+ spin_unlock_irqrestore(&scheduler->lock, flags);
+
+ return 0;
+}
+
int i915_scheduler_flush_request(struct drm_i915_gem_request *req,
bool is_locked)
{
@@ -915,16 +964,21 @@ int i915_scheduler_flush_request(struct drm_i915_gem_request *req,
spin_lock_irqsave(&scheduler->lock, flags);
+ scheduler->stats[ring_id].flush_req++;
+
i915_scheduler_priority_bump_clear(scheduler);
flush_count = i915_scheduler_priority_bump(scheduler,
req->scheduler_qe, scheduler->priority_level_max);
+ scheduler->stats[ring_id].flush_bump += flush_count;
spin_unlock_irqrestore(&scheduler->lock, flags);
if (flush_count) {
DRM_DEBUG_DRIVER("<%s> Bumped %d entries\n", req->ring->name, flush_count);
flush_count = i915_scheduler_submit_max_priority(req->ring, is_locked);
+ if (flush_count > 0)
+ scheduler->stats[ring_id].flush_submit += flush_count;
}
return flush_count;
@@ -956,6 +1010,7 @@ int i915_scheduler_flush_stamp(struct intel_engine_cs *ring,
}
spin_lock_irqsave(&scheduler->lock, flags);
+ scheduler->stats[ring->id].flush_stamp++;
i915_scheduler_priority_bump_clear(scheduler);
list_for_each_entry(node, &scheduler->node_queue[ring->id], link) {
if (!I915_SQS_IS_QUEUED(node))
@@ -966,12 +1021,15 @@ int i915_scheduler_flush_stamp(struct intel_engine_cs *ring,
flush_count = i915_scheduler_priority_bump(scheduler,
node, scheduler->priority_level_max);
+ scheduler->stats[ring->id].flush_bump += flush_count;
}
spin_unlock_irqrestore(&scheduler->lock, flags);
if (flush_count) {
DRM_DEBUG_DRIVER("<%s> Bumped %d entries\n", ring->name, flush_count);
flush_count = i915_scheduler_submit_max_priority(ring, is_locked);
+ if (flush_count > 0)
+ scheduler->stats[ring->id].flush_submit += flush_count;
}
return flush_count;
@@ -998,6 +1056,8 @@ int i915_scheduler_flush(struct intel_engine_cs *ring, bool is_locked)
BUG_ON(is_locked && (scheduler->flags[ring->id] & i915_sf_submitting));
+ scheduler->stats[ring->id].flush_all++;
+
do {
found = false;
spin_lock_irqsave(&scheduler->lock, flags);
@@ -1012,6 +1072,7 @@ int i915_scheduler_flush(struct intel_engine_cs *ring, bool is_locked)
if (found) {
ret = i915_scheduler_submit(ring, is_locked);
+ scheduler->stats[ring->id].flush_submit++;
if (ret < 0)
return ret;
@@ -1149,15 +1210,20 @@ static void i915_scheduler_wait_fence_signaled(struct sync_fence *fence,
static bool i915_scheduler_async_fence_wait(struct drm_device *dev,
struct i915_scheduler_queue_entry *node)
{
+ struct drm_i915_private *dev_priv = node->params.ring->dev->dev_private;
+ struct i915_scheduler *scheduler = dev_priv->scheduler;
struct i915_sync_fence_waiter *fence_waiter;
struct sync_fence *fence = node->params.fence_wait;
int signaled;
bool success = true;
- if ((node->flags & i915_qef_fence_waiting) == 0)
+ if ((node->flags & i915_qef_fence_waiting) == 0) {
node->flags |= i915_qef_fence_waiting;
- else
+ scheduler->stats[node->params.ring->id].fence_wait++;
+ } else {
+ scheduler->stats[node->params.ring->id].fence_again++;
return true;
+ }
if (fence == NULL)
return false;
@@ -1222,8 +1288,10 @@ static int i915_scheduler_pop_from_queue_locked(struct intel_engine_cs *ring,
else
signalled = true;
- if (!signalled)
+ if (!signalled) {
signalled = i915_safe_to_ignore_fence(ring, node->params.fence_wait);
+ scheduler->stats[node->params.ring->id].fence_ignore++;
+ }
has_local = false;
has_remote = false;
@@ -1360,6 +1428,8 @@ static int i915_scheduler_submit(struct intel_engine_cs *ring, bool was_locked)
* list. So add it back in and mark it as in flight. */
i915_scheduler_fly_node(node);
+ scheduler->stats[ring->id].submitted++;
+
scheduler->flags[ring->id] |= i915_sf_submitting;
spin_unlock_irqrestore(&scheduler->lock, flags);
ret = dev_priv->gt.execbuf_final(&node->params);
@@ -1378,6 +1448,7 @@ static int i915_scheduler_submit(struct intel_engine_cs *ring, bool was_locked)
case ENOENT:
/* Fatal errors. Kill the node. */
requeue = -1;
+ scheduler->stats[ring->id].exec_dead++;
break;
case EAGAIN:
@@ -1387,12 +1458,14 @@ static int i915_scheduler_submit(struct intel_engine_cs *ring, bool was_locked)
case ERESTARTSYS:
case EINTR:
/* Supposedly recoverable errors. */
+ scheduler->stats[ring->id].exec_again++;
break;
default:
DRM_DEBUG_DRIVER("<%s> Got unexpected error from execfinal(): %d!\n",
ring->name, ret);
/* Assume it is recoverable and hope for the best. */
+ scheduler->stats[ring->id].exec_again++;
break;
}
@@ -1408,7 +1481,7 @@ static int i915_scheduler_submit(struct intel_engine_cs *ring, bool was_locked)
* later. */
break;
} else if (requeue == -1)
- i915_scheduler_node_kill(node);
+ i915_scheduler_node_kill(scheduler, node);
}
/* Keep launching until the sky is sufficiently full. */
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index b5518b5..ea70a3f 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -75,6 +75,37 @@ struct i915_scheduler_queue_entry {
};
const char *i915_qe_state_str(struct i915_scheduler_queue_entry *node);
+struct i915_scheduler_stats_nodes {
+ uint32_t counts[i915_sqs_MAX + 1];
+};
+
+struct i915_scheduler_stats {
+ /* Batch buffer counts: */
+ uint32_t queued;
+ uint32_t submitted;
+ uint32_t completed;
+ uint32_t expired;
+
+ /* Other stuff: */
+ uint32_t flush_obj;
+ uint32_t flush_req;
+ uint32_t flush_stamp;
+ uint32_t flush_all;
+ uint32_t flush_bump;
+ uint32_t flush_submit;
+
+ uint32_t exec_early;
+ uint32_t exec_again;
+ uint32_t exec_dead;
+ uint32_t kill_flying;
+ uint32_t kill_queued;
+
+ uint32_t fence_wait;
+ uint32_t fence_again;
+ uint32_t fence_ignore;
+ uint32_t fence_got;
+};
+
struct i915_scheduler {
struct list_head node_queue[I915_NUM_RINGS];
uint32_t flags[I915_NUM_RINGS];
@@ -86,6 +117,9 @@ struct i915_scheduler {
int32_t priority_level_preempt;
uint32_t min_flying;
uint32_t file_queue_max;
+
+ /* Statistics: */
+ struct i915_scheduler_stats stats[I915_NUM_RINGS];
};
/* Flag bits for i915_scheduler::flags */
@@ -126,6 +160,8 @@ int i915_scheduler_dump(struct intel_engine_cs *ring,
int i915_scheduler_dump_all(struct drm_device *dev, const char *msg);
bool i915_scheduler_is_request_tracked(struct drm_i915_gem_request *req,
bool *completed, bool *busy);
+int i915_scheduler_query_stats(struct intel_engine_cs *ring,
+ struct i915_scheduler_stats_nodes *stats);
bool i915_scheduler_file_queue_is_full(struct drm_file *file);
#endif /* _I915_SCHEDULER_H_ */
--
1.9.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx
next prev parent reply other threads:[~2015-11-23 11:40 UTC|newest]
Thread overview: 143+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-11-23 11:38 [PATCH 00/39] GPU scheduler for i915 driver John.C.Harrison
2015-11-23 11:38 ` [PATCH 01/39] drm/i915: Add total count to context status debugfs output John.C.Harrison
2016-01-08 9:50 ` Joonas Lahtinen
2015-11-23 11:38 ` [PATCH 02/39] drm/i915: Updating assorted register and status page definitions John.C.Harrison
2016-01-08 12:26 ` Joonas Lahtinen
2016-01-11 7:47 ` Daniel Vetter
2015-11-23 11:38 ` [PATCH 03/39] drm/i915: Explicit power enable during deferred context initialisation John.C.Harrison
2016-01-08 12:35 ` Joonas Lahtinen
2015-11-23 11:38 ` [PATCH 04/39] drm/i915: Prelude to splitting i915_gem_do_execbuffer in two John.C.Harrison
2015-11-23 11:39 ` [PATCH 05/39] drm/i915: Split i915_dem_do_execbuffer() in half John.C.Harrison
2015-12-11 13:15 ` [PATCH 05/40] " John.C.Harrison
2015-11-23 11:39 ` [PATCH 06/39] drm/i915: Re-instate request->uniq because it is extremely useful John.C.Harrison
2015-11-23 11:39 ` [PATCH 07/39] drm/i915: Start of GPU scheduler John.C.Harrison
2015-12-11 13:16 ` [PATCH 08/40] " John.C.Harrison
2015-11-23 11:39 ` [PATCH 08/39] drm/i915: Prepare retire_requests to handle out-of-order seqnos John.C.Harrison
2015-11-23 11:39 ` [PATCH 09/39] drm/i915: Disable hardware semaphores when GPU scheduler is enabled John.C.Harrison
2015-11-23 11:39 ` [PATCH 10/39] drm/i915: Force MMIO flips when scheduler enabled John.C.Harrison
2015-11-23 11:39 ` [PATCH 11/39] drm/i915: Added scheduler hook when closing DRM file handles John.C.Harrison
2015-12-11 13:19 ` [PATCH 12/40] " John.C.Harrison
2015-11-23 11:39 ` [PATCH 12/39] drm/i915: Added scheduler hook into i915_gem_request_notify() John.C.Harrison
2015-11-23 11:39 ` [PATCH 13/39] drm/i915: Added deferred work handler for scheduler John.C.Harrison
2015-11-23 11:39 ` [PATCH 14/39] drm/i915: Redirect execbuffer_final() via scheduler John.C.Harrison
2015-11-23 11:39 ` [PATCH 15/39] drm/i915: Keep the reserved space mechanism happy John.C.Harrison
2015-12-11 13:19 ` [PATCH 16/40] " John.C.Harrison
2015-11-23 11:39 ` [PATCH 16/39] drm/i915: Added tracking/locking of batch buffer objects John.C.Harrison
2015-12-11 13:19 ` [PATCH 17/40] " John.C.Harrison
2015-11-23 11:39 ` [PATCH 17/39] drm/i915: Hook scheduler node clean up into retire requests John.C.Harrison
2015-12-11 13:19 ` [PATCH 18/40] " John.C.Harrison
2015-11-23 11:39 ` [PATCH 18/39] drm/i915: Added scheduler support to __wait_request() calls John.C.Harrison
2015-12-11 13:20 ` [PATCH 19/40] " John.C.Harrison
2015-11-23 11:39 ` [PATCH 19/39] drm/i915: Added scheduler support to page fault handler John.C.Harrison
2015-11-23 11:39 ` [PATCH 20/39] drm/i915: Added scheduler flush calls to ring throttle and idle functions John.C.Harrison
2015-12-11 13:20 ` [PATCH 21/40] " John.C.Harrison
2015-11-23 11:39 ` [PATCH 21/39] drm/i915: Added a module parameter for allowing scheduler overrides John.C.Harrison
2015-11-23 11:39 ` [PATCH 22/39] drm/i915: Support for 'unflushed' ring idle John.C.Harrison
2015-11-23 11:39 ` [PATCH 23/39] drm/i915: Defer seqno allocation until actual hardware submission time John.C.Harrison
2015-12-11 13:20 ` [PATCH 24/40] " John.C.Harrison
2015-11-23 11:39 ` [PATCH 24/39] drm/i915: Added immediate submission override to scheduler John.C.Harrison
2015-11-23 11:39 ` [PATCH 25/39] drm/i915: Add sync wait support " John.C.Harrison
2015-11-23 11:39 ` [PATCH 26/39] drm/i915: Connecting execbuff fences " John.C.Harrison
2015-11-23 11:39 ` [PATCH 27/39] drm/i915: Added trace points " John.C.Harrison
2015-12-11 13:20 ` [PATCH 28/40] " John.C.Harrison
2015-11-23 11:39 ` [PATCH 28/39] drm/i915: Added scheduler queue throttling by DRM file handle John.C.Harrison
2015-12-11 13:21 ` [PATCH 29/40] " John.C.Harrison
2015-11-23 11:39 ` [PATCH 29/39] drm/i915: Added debugfs interface to scheduler tuning parameters John.C.Harrison
2015-11-23 11:39 ` [PATCH 30/39] drm/i915: Added debug state dump facilities to scheduler John.C.Harrison
2015-12-11 13:21 ` [PATCH 31/40] " John.C.Harrison
2015-11-23 11:39 ` [PATCH 31/39] drm/i915: Add early exit to execbuff_final() if insufficient ring space John.C.Harrison
2015-12-11 13:21 ` [PATCH 32/40] " John.C.Harrison
2015-11-23 11:39 ` John.C.Harrison [this message]
2015-12-11 13:21 ` [PATCH 33/40] drm/i915: Added scheduler statistic reporting to debugfs John.C.Harrison
2015-11-23 11:39 ` [PATCH 33/39] drm/i915: Added seqno values to scheduler status dump John.C.Harrison
2015-11-23 11:39 ` [PATCH 34/39] drm/i915: Add scheduler support functions for TDR John.C.Harrison
2015-11-23 11:39 ` [PATCH 35/39] drm/i915: GPU priority bumping to prevent starvation John.C.Harrison
2015-11-23 11:39 ` [PATCH 36/39] drm/i915: Scheduler state dump via debugfs John.C.Harrison
2015-11-23 11:39 ` [PATCH 37/39] drm/i915: Enable GPU scheduler by default John.C.Harrison
2015-11-23 11:39 ` [PATCH 38/39] drm/i915: Add scheduling priority to per-context parameters John.C.Harrison
2015-11-23 11:39 ` [PATCH 39/39] drm/i915: Allow scheduler to manage inter-ring object synchronisation John.C.Harrison
2015-12-11 13:16 ` [PATCH 06/40] drm/i915: Cache request pointer in *_submission_final() John.C.Harrison
2015-12-11 13:23 ` [PATCH 00/40] GPU scheduler for i915 driver John.C.Harrison
2016-01-11 18:42 ` [PATCH v4 00/38] " John.C.Harrison
2016-01-11 18:42 ` [PATCH v4 01/38] drm/i915: Add total count to context status debugfs output John.C.Harrison
2016-01-11 18:42 ` [PATCH v4 02/38] drm/i915: Explicit power enable during deferred context initialisation John.C.Harrison
2016-01-12 0:20 ` Chris Wilson
2016-01-12 11:11 ` John Harrison
2016-01-12 11:28 ` Chris Wilson
2016-01-12 11:50 ` John Harrison
2016-01-12 14:04 ` Daniel Vetter
2016-01-12 14:21 ` John Harrison
2016-01-12 15:35 ` Daniel Vetter
2016-01-12 15:59 ` Imre Deak
2016-01-12 16:11 ` Daniel Vetter
2016-01-12 16:59 ` Chris Wilson
2016-01-11 18:42 ` [PATCH v4 03/38] drm/i915: Prelude to splitting i915_gem_do_execbuffer in two John.C.Harrison
2016-02-04 17:01 ` Jesse Barnes
2016-02-12 16:18 ` John Harrison
2016-01-11 18:42 ` [PATCH v4 04/38] drm/i915: Split i915_dem_do_execbuffer() in half John.C.Harrison
2016-01-11 22:03 ` Chris Wilson
2016-02-04 17:08 ` Jesse Barnes
2016-01-11 18:42 ` [PATCH v4 05/38] drm/i915: Cache request pointer in *_submission_final() John.C.Harrison
2016-02-04 17:09 ` Jesse Barnes
2016-01-11 18:42 ` [PATCH v4 06/38] drm/i915: Re-instate request->uniq because it is extremely useful John.C.Harrison
2016-01-11 22:04 ` Chris Wilson
2016-01-12 11:16 ` John Harrison
2016-01-11 18:42 ` [PATCH v4 07/38] drm/i915: Start of GPU scheduler John.C.Harrison
2016-01-20 13:18 ` Joonas Lahtinen
2016-02-18 14:22 ` John Harrison
2016-02-19 10:13 ` Joonas Lahtinen
2016-01-11 18:42 ` [PATCH v4 08/38] drm/i915: Prepare retire_requests to handle out-of-order seqnos John.C.Harrison
2016-01-11 22:10 ` Chris Wilson
2016-02-04 17:14 ` Jesse Barnes
2016-01-11 18:42 ` [PATCH v4 09/38] drm/i915: Disable hardware semaphores when GPU scheduler is enabled John.C.Harrison
2016-01-11 18:42 ` [PATCH v4 10/38] drm/i915: Force MMIO flips when scheduler enabled John.C.Harrison
2016-01-11 22:16 ` Chris Wilson
2016-01-12 11:19 ` John Harrison
2016-01-12 14:07 ` Daniel Vetter
2016-01-12 21:53 ` Chris Wilson
2016-01-13 12:37 ` John Harrison
2016-01-13 13:14 ` Chris Wilson
2016-01-11 18:42 ` [PATCH v4 11/38] drm/i915: Added scheduler hook when closing DRM file handles John.C.Harrison
2016-01-11 18:42 ` [PATCH v4 12/38] drm/i915: Added scheduler hook into i915_gem_request_notify() John.C.Harrison
2016-01-11 22:14 ` Chris Wilson
2016-01-12 11:25 ` John Harrison
2016-01-11 18:42 ` [PATCH v4 13/38] drm/i915: Added deferred work handler for scheduler John.C.Harrison
2016-01-11 18:42 ` [PATCH v4 14/38] drm/i915: Redirect execbuffer_final() via scheduler John.C.Harrison
2016-01-11 18:42 ` [PATCH v4 15/38] drm/i915: Keep the reserved space mechanism happy John.C.Harrison
2016-01-11 18:42 ` [PATCH v4 16/38] drm/i915: Added tracking/locking of batch buffer objects John.C.Harrison
2016-01-11 18:42 ` [PATCH v4 17/38] drm/i915: Hook scheduler node clean up into retire requests John.C.Harrison
2016-01-11 18:42 ` [PATCH v4 18/38] drm/i915: Added scheduler support to __wait_request() calls John.C.Harrison
2016-01-11 23:14 ` Chris Wilson
2016-01-12 11:28 ` John Harrison
2016-01-11 18:42 ` [PATCH v4 19/38] drm/i915: Added scheduler support to page fault handler John.C.Harrison
2016-01-11 18:42 ` [PATCH v4 20/38] drm/i915: Added scheduler flush calls to ring throttle and idle functions John.C.Harrison
2016-01-11 22:20 ` Chris Wilson
2016-01-11 18:42 ` [PATCH v4 21/38] drm/i915: Added a module parameter for allowing scheduler overrides John.C.Harrison
2016-01-11 22:24 ` Chris Wilson
2016-01-12 11:34 ` John Harrison
2016-01-12 11:55 ` Chris Wilson
2016-01-11 18:42 ` [PATCH v4 22/38] drm/i915: Support for 'unflushed' ring idle John.C.Harrison
2016-01-11 18:42 ` [PATCH v4 23/38] drm/i915: Defer seqno allocation until actual hardware submission time John.C.Harrison
2016-01-11 18:42 ` [PATCH v4 24/38] drm/i915: Added immediate submission override to scheduler John.C.Harrison
2016-01-11 18:42 ` [PATCH v4 25/38] drm/i915: Added trace points " John.C.Harrison
2016-01-11 18:42 ` [PATCH v4 26/38] drm/i915: Added scheduler queue throttling by DRM file handle John.C.Harrison
2016-01-11 18:42 ` [PATCH v4 27/38] drm/i915: Added debugfs interface to scheduler tuning parameters John.C.Harrison
2016-01-11 18:42 ` [PATCH v4 28/38] drm/i915: Added debug state dump facilities to scheduler John.C.Harrison
2016-01-11 18:42 ` [PATCH v4 29/38] drm/i915: Add early exit to execbuff_final() if insufficient ring space John.C.Harrison
2016-01-11 18:42 ` [PATCH v4 30/38] drm/i915: Added scheduler statistic reporting to debugfs John.C.Harrison
2016-01-11 18:43 ` [PATCH v4 31/38] drm/i915: Added seqno values to scheduler status dump John.C.Harrison
2016-01-11 18:43 ` [PATCH v4 32/38] drm/i915: Add scheduler support functions for TDR John.C.Harrison
2016-01-11 18:43 ` [PATCH v4 33/38] drm/i915: GPU priority bumping to prevent starvation John.C.Harrison
2016-01-11 18:43 ` [PATCH v4 34/38] drm/i915: Scheduler state dump via debugfs John.C.Harrison
2016-01-11 18:43 ` [PATCH v4 35/38] drm/i915: Enable GPU scheduler by default John.C.Harrison
2016-01-11 18:43 ` [PATCH v4 36/38] drm/i915: Add scheduling priority to per-context parameters John.C.Harrison
2016-01-11 18:43 ` [PATCH v4 37/38] drm/i915: Add support for retro-actively banning batch buffers John.C.Harrison
2016-01-11 18:43 ` [PATCH v4 38/38] drm/i915: Allow scheduler to manage inter-ring object synchronisation John.C.Harrison
2016-01-11 22:07 ` Chris Wilson
2016-01-12 11:38 ` John Harrison
2016-01-11 18:43 ` [PATCH] igt/gem_ctx_param_basic: Updated to support scheduler priority interface John.C.Harrison
2016-01-11 23:52 ` [PATCH v4 00/38] GPU scheduler for i915 driver Chris Wilson
2016-01-12 4:37 ` Tian, Kevin
2016-01-12 11:43 ` John Harrison
2016-01-12 13:49 ` Dave Gordon
2016-01-13 2:33 ` Tian, Kevin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1448278774-31376-33-git-send-email-John.C.Harrison@Intel.com \
--to=john.c.harrison@intel.com \
--cc=Intel-GFX@Lists.FreeDesktop.Org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).