From: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
To: Intel-gfx@lists.freedesktop.org
Subject: [RFC 2/6] drm/i915: Track per-context engine busyness
Date: Fri, 10 May 2019 14:22:36 +0100 [thread overview]
Message-ID: <20190510132240.11029-3-tvrtko.ursulin@linux.intel.com> (raw)
In-Reply-To: <20190510132240.11029-1-tvrtko.ursulin@linux.intel.com>
From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Some customers want to know how much of the GPU time are their clients
using in order to make dynamic load balancing decisions.
With the hooks already in place which track the overall engine busyness,
we can extend that slightly to split that time between contexts.
v2: Fix accounting for tail updates.
v3: Rebase.
v4: Mark currently running contexts as active on stats enable.
v5: Include some headers to fix the build.
v6: Added fine grained lock.
v7: Convert to seqlock. (Chris Wilson)
v8: Rebase and tidy with helpers.
Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
drivers/gpu/drm/i915/gt/intel_context.c | 21 +++++++
drivers/gpu/drm/i915/gt/intel_context.h | 9 +++
drivers/gpu/drm/i915/gt/intel_context_types.h | 9 +++
drivers/gpu/drm/i915/gt/intel_engine_cs.c | 8 +++
drivers/gpu/drm/i915/gt/intel_lrc.c | 62 ++++++++++++++++---
5 files changed, 99 insertions(+), 10 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index 5b31e1e05ddd..9adf63ff02e0 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -125,6 +125,8 @@ intel_context_init(struct intel_context *ce,
i915_active_request_init(&ce->active_tracker,
NULL, intel_context_retire);
+
+ seqlock_init(&ce->stats.lock);
}
static void i915_global_context_shrink(void)
@@ -177,3 +179,22 @@ struct i915_request *intel_context_create_request(struct intel_context *ce)
return rq;
}
+
+ktime_t intel_context_get_busy_time(struct intel_context *ce)
+{
+ unsigned int seq;
+ ktime_t total;
+
+ do {
+ seq = read_seqbegin(&ce->stats.lock);
+
+ total = ce->stats.total;
+
+ if (ce->stats.active)
+ total = ktime_add(total,
+ ktime_sub(ktime_get(),
+ ce->stats.start));
+ } while (read_seqretry(&ce->stats.lock, seq));
+
+ return total;
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index 63392c88cd98..657dcdce7152 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -127,4 +127,13 @@ static inline void intel_context_timeline_unlock(struct intel_context *ce)
struct i915_request *intel_context_create_request(struct intel_context *ce);
+static inline void
+__intel_context_stats_start(struct intel_context_stats *stats, ktime_t now)
+{
+ stats->start = now;
+ stats->active = true;
+}
+
+ktime_t intel_context_get_busy_time(struct intel_context *ce);
+
#endif /* __INTEL_CONTEXT_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index 963a312430e6..b33770f396e2 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -11,6 +11,7 @@
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/types.h>
+#include <linux/seqlock.h>
#include "i915_active_types.h"
#include "intel_engine_types.h"
@@ -65,6 +66,14 @@ struct intel_context {
/** sseu: Control eu/slice partitioning */
struct intel_sseu sseu;
+
+ /** stats: Context GPU engine busyness tracking. */
+ struct intel_context_stats {
+ seqlock_t lock;
+ bool active;
+ ktime_t start;
+ ktime_t total;
+ } stats;
};
#endif /* __INTEL_CONTEXT_TYPES__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 4c3753c1b573..c97269e1beb3 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -1559,6 +1559,14 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
engine->stats.enabled_at = ktime_get();
+ /* Mark currently running context as active. */
+ if (port_isset(port)) {
+ struct i915_request *rq = port_request(port);
+
+ __intel_context_stats_start(&rq->hw_context->stats,
+ engine->stats.enabled_at);
+ }
+
/* XXX submission method oblivious? */
while (num_ports-- && port_isset(port)) {
engine->stats.active++;
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 170e394206ca..3a96bddf9474 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -454,18 +454,48 @@ execlists_context_status_change(struct i915_request *rq, unsigned long status)
}
static inline void
-intel_engine_context_in(struct intel_engine_cs *engine)
+intel_context_stats_start(struct intel_context_stats *stats, ktime_t now)
{
+ write_seqlock(&stats->lock);
+ __intel_context_stats_start(stats, now);
+ write_sequnlock(&stats->lock);
+}
+
+static inline void
+intel_context_stats_stop(struct intel_context_stats *stats, ktime_t now)
+{
+ write_seqlock(&stats->lock);
+ GEM_BUG_ON(!stats->start);
+ stats->total = ktime_add(stats->total, ktime_sub(now, stats->start));
+ stats->active = false;
+ write_sequnlock(&stats->lock);
+}
+
+static inline void
+intel_context_in(struct intel_context *ce, bool submit)
+{
+ struct intel_engine_cs *engine = ce->engine;
unsigned long flags;
+ ktime_t now;
if (READ_ONCE(engine->stats.enabled) == 0)
return;
write_seqlock_irqsave(&engine->stats.lock, flags);
+ if (submit) {
+ now = ktime_get();
+ intel_context_stats_start(&ce->stats, now);
+ } else {
+ now = 0;
+ }
+
if (engine->stats.enabled > 0) {
- if (engine->stats.active++ == 0)
- engine->stats.start = ktime_get();
+ if (engine->stats.active++ == 0) {
+ if (!now)
+ now = ktime_get();
+ engine->stats.start = now;
+ }
GEM_BUG_ON(engine->stats.active == 0);
}
@@ -473,8 +503,9 @@ intel_engine_context_in(struct intel_engine_cs *engine)
}
static inline void
-intel_engine_context_out(struct intel_engine_cs *engine)
+intel_context_out(struct intel_context *ce)
{
+ struct intel_engine_cs *engine = ce->engine;
unsigned long flags;
if (READ_ONCE(engine->stats.enabled) == 0)
@@ -483,14 +514,25 @@ intel_engine_context_out(struct intel_engine_cs *engine)
write_seqlock_irqsave(&engine->stats.lock, flags);
if (engine->stats.enabled > 0) {
+ struct execlist_port *next_port = &engine->execlists.port[1];
+ ktime_t now = ktime_get();
ktime_t last;
+ intel_context_stats_stop(&ce->stats, now);
+
+ if (port_isset(next_port)) {
+ struct i915_request *next_rq = port_request(next_port);
+
+ intel_context_stats_start(&next_rq->hw_context->stats,
+ now);
+ }
+
if (engine->stats.active && --engine->stats.active == 0) {
/*
* Decrement the active context count and in case GPU
* is now idle add up to the running total.
*/
- last = ktime_sub(ktime_get(), engine->stats.start);
+ last = ktime_sub(now, engine->stats.start);
engine->stats.total = ktime_add(engine->stats.total,
last);
@@ -500,7 +542,7 @@ intel_engine_context_out(struct intel_engine_cs *engine)
* the first event in which case we account from the
* time stats gathering was turned on.
*/
- last = ktime_sub(ktime_get(), engine->stats.enabled_at);
+ last = ktime_sub(now, engine->stats.enabled_at);
engine->stats.total = ktime_add(engine->stats.total,
last);
@@ -524,12 +566,12 @@ execlists_user_end(struct intel_engine_execlists *execlists)
}
static inline void
-execlists_context_schedule_in(struct i915_request *rq)
+execlists_context_schedule_in(struct i915_request *rq, unsigned int port)
{
GEM_BUG_ON(rq->hw_context->active);
execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
- intel_engine_context_in(rq->engine);
+ intel_context_in(rq->hw_context, port == 0);
rq->hw_context->active = rq->engine;
}
@@ -537,7 +579,7 @@ static inline void
execlists_context_schedule_out(struct i915_request *rq, unsigned long status)
{
rq->hw_context->active = NULL;
- intel_engine_context_out(rq->engine);
+ intel_context_out(rq->hw_context);
execlists_context_status_change(rq, status);
trace_i915_request_out(rq);
}
@@ -610,7 +652,7 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
if (rq) {
GEM_BUG_ON(count > !n);
if (!count++)
- execlists_context_schedule_in(rq);
+ execlists_context_schedule_in(rq, n);
port_set(&port[n], port_pack(rq, count));
desc = execlists_update_context(rq);
GEM_DEBUG_EXEC(port[n].context_id = upper_32_bits(desc));
--
2.19.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
next prev parent reply other threads:[~2019-05-10 13:22 UTC|newest]
Thread overview: 14+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-05-10 13:22 [RFC 0/6] Per context and per client GPU busyness tracking Tvrtko Ursulin
2019-05-10 13:22 ` [RFC 1/6] drm/i915: Move intel_engine_context_in/out into intel_lrc.c Tvrtko Ursulin
2019-05-10 13:22 ` Tvrtko Ursulin [this message]
2019-05-10 14:22 ` [RFC 2/6] drm/i915: Track per-context engine busyness Chris Wilson
2019-05-10 13:22 ` [RFC 3/6] drm/i915: Expose list of clients in sysfs Tvrtko Ursulin
2019-05-10 14:12 ` Chris Wilson
2019-05-10 13:22 ` [RFC 4/6] drm/i915: Update client name on context create Tvrtko Ursulin
2019-05-10 13:22 ` [RFC 5/6] drm/i915: Expose per-engine client busyness Tvrtko Ursulin
2019-05-10 13:57 ` Chris Wilson
2019-05-10 14:02 ` Tvrtko Ursulin
2019-05-10 13:22 ` [RFC 6/6] drm/i915: Add sysfs toggle to enable per-client engine stats Tvrtko Ursulin
2019-05-10 14:13 ` ✗ Fi.CI.CHECKPATCH: warning for Per context and per client GPU busyness tracking Patchwork
2019-05-10 15:06 ` ✓ Fi.CI.BAT: success " Patchwork
2019-05-10 16:48 ` ✓ Fi.CI.IGT: " Patchwork
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190510132240.11029-3-tvrtko.ursulin@linux.intel.com \
--to=tvrtko.ursulin@linux.intel.com \
--cc=Intel-gfx@lists.freedesktop.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox