From: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
To: Intel-gfx@lists.freedesktop.org
Subject: [Intel-gfx] [RFC 6/8] drm/i915: Track all user contexts per client
Date: Thu, 19 Dec 2019 18:00:17 +0000 [thread overview]
Message-ID: <20191219180019.25562-7-tvrtko.ursulin@linux.intel.com> (raw)
In-Reply-To: <20191219180019.25562-1-tvrtko.ursulin@linux.intel.com>
From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
We soon want to start answering questions like how much GPU time is the
context belonging to a client which exited still using.
To enable this we start tracking all context belonging to a client on a
separate list, plus we make contexts take a reference on their clients
file_priv.
Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
drivers/gpu/drm/i915/gem/i915_gem_context.c | 23 ++++++++++++++++++-
.../gpu/drm/i915/gem/i915_gem_context_types.h | 6 +++++
drivers/gpu/drm/i915/i915_drv.h | 21 +++++++++++++++++
drivers/gpu/drm/i915/i915_gem.c | 6 +++--
4 files changed, 53 insertions(+), 3 deletions(-)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index dc3a7856ae22..6586edcf4ffb 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -266,8 +266,18 @@ static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
static void i915_gem_context_free(struct i915_gem_context *ctx)
{
+ struct i915_drm_client *client = ctx->client;
+
GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
+ if (client) {
+ spin_lock(&client->ctx_lock);
+ list_del_rcu(&ctx->client_link);
+ spin_unlock(&client->ctx_lock);
+
+ i915_gem_client_put(client);
+ }
+
spin_lock(&ctx->i915->gem.contexts.lock);
list_del(&ctx->link);
spin_unlock(&ctx->i915->gem.contexts.lock);
@@ -790,6 +800,7 @@ static int gem_context_register(struct i915_gem_context *ctx,
struct drm_i915_file_private *fpriv,
u32 *id)
{
+ struct i915_drm_client *client = &fpriv->client;
struct i915_address_space *vm;
int ret;
@@ -807,9 +818,19 @@ static int gem_context_register(struct i915_gem_context *ctx,
/* And finally expose ourselves to userspace via the idr */
ret = xa_alloc(&fpriv->context_xa, id, ctx, xa_limit_32b, GFP_KERNEL);
- if (ret)
+ if (ret) {
put_pid(fetch_and_zero(&ctx->pid));
+ goto out;
+ }
+
+ ctx->client = client;
+ i915_gem_client_get(client);
+ spin_lock(&client->ctx_lock);
+ list_add_tail_rcu(&ctx->client_link, &client->ctx_list);
+ spin_unlock(&client->ctx_lock);
+
+out:
return ret;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
index 69df5459c350..090ef10fdc5d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
@@ -104,6 +104,12 @@ struct i915_gem_context {
struct list_head link;
struct llist_node free_link;
+ /** client: struct i915_drm_client */
+ struct i915_drm_client *client;
+
+ /** link: &fpriv.context_list */
+ struct list_head client_link;
+
/**
* @ref: reference count
*
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 514d7d630fce..8ffd638a071f 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -234,6 +234,9 @@ struct drm_i915_file_private {
char *name;
bool closed;
+ spinlock_t ctx_lock;
+ struct list_head ctx_list;
+
struct kobject *root;
struct {
struct device_attribute pid;
@@ -1909,6 +1912,24 @@ void __i915_gem_unregister_client(struct i915_drm_client *client);
int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file);
void i915_gem_release(struct drm_device *dev, struct drm_file *file);
+static inline void i915_gem_client_get(struct i915_drm_client *client)
+{
+ struct drm_i915_file_private *fpriv =
+ container_of(client, typeof(*fpriv), client);
+
+ kref_get(&fpriv->kref);
+}
+
+void __i915_gem_release(struct kref *kref);
+
+static inline void i915_gem_client_put(struct i915_drm_client *client)
+{
+ struct drm_i915_file_private *fpriv =
+ container_of(client, typeof(*fpriv), client);
+
+ kref_put(&fpriv->kref, __i915_gem_release);
+}
+
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index f953d4e20e33..564e21902dff 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1612,7 +1612,7 @@ void __i915_gem_unregister_client(struct i915_drm_client *client)
kfree(fetch_and_zero(&client->name));
}
-static void gem_release(struct kref *kref)
+void __i915_gem_release(struct kref *kref)
{
struct drm_i915_file_private *fpriv =
container_of(kref, typeof(*fpriv), kref);
@@ -1639,7 +1639,7 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file)
GEM_BUG_ON(client->closed);
client->closed = true;
- kref_put(&file_priv->kref, gem_release);
+ i915_gem_client_put(client);
}
int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
@@ -1667,6 +1667,8 @@ int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
goto err_add;
kref_init(&file_priv->kref);
+ spin_lock_init(&client->ctx_lock);
+ INIT_LIST_HEAD(&client->ctx_list);
file->driver_priv = file_priv;
file_priv->i915 = i915;
--
2.20.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
next prev parent reply other threads:[~2019-12-19 18:00 UTC|newest]
Thread overview: 25+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-12-19 18:00 [Intel-gfx] [RFC 0/8] Per client engine busyness Tvrtko Ursulin
2019-12-19 18:00 ` [Intel-gfx] [RFC 1/8] drm/i915: Switch context id allocation directoy to xarray Tvrtko Ursulin
2019-12-19 19:55 ` Chris Wilson
2019-12-19 18:00 ` [Intel-gfx] [RFC 2/8] drm/i915: Reference count struct drm_i915_file_private Tvrtko Ursulin
2019-12-19 20:43 ` Chris Wilson
2019-12-20 7:55 ` Tvrtko Ursulin
2019-12-19 18:00 ` [Intel-gfx] [RFC 3/8] drm/i915: Expose list of clients in sysfs Tvrtko Ursulin
2019-12-19 20:48 ` Chris Wilson
2019-12-20 7:56 ` Tvrtko Ursulin
2019-12-20 10:08 ` Chris Wilson
2019-12-19 18:00 ` [Intel-gfx] [RFC 4/8] drm/i915: Update client name on context create Tvrtko Ursulin
2019-12-19 18:00 ` [Intel-gfx] [RFC 5/8] drm/i915: Track per-context engine busyness Tvrtko Ursulin
2019-12-19 20:51 ` Chris Wilson
2019-12-20 7:58 ` Tvrtko Ursulin
2019-12-19 18:00 ` Tvrtko Ursulin [this message]
2019-12-19 18:00 ` [Intel-gfx] [RFC 7/8] drm/i915: Contexts can use struct pid stored in the client Tvrtko Ursulin
2019-12-19 20:57 ` Chris Wilson
2019-12-20 8:00 ` Tvrtko Ursulin
2019-12-19 18:00 ` [Intel-gfx] [RFC 8/8] drm/i915: Expose per-engine client busyness Tvrtko Ursulin
2019-12-19 21:04 ` Chris Wilson
2019-12-20 8:07 ` Tvrtko Ursulin
2019-12-19 21:23 ` Chris Wilson
2019-12-19 23:16 ` Chris Wilson
2019-12-19 18:41 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for Per client engine busyness (rev2) Patchwork
2019-12-19 19:17 ` [Intel-gfx] ✗ Fi.CI.BAT: failure " Patchwork
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20191219180019.25562-7-tvrtko.ursulin@linux.intel.com \
--to=tvrtko.ursulin@linux.intel.com \
--cc=Intel-gfx@lists.freedesktop.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox