From: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
To: Intel-gfx@lists.freedesktop.org
Cc: kui.wen@intel.com
Subject: [Intel-gfx] [RFC 5/8] drm/i915: Contexts can use struct pid stored in the client
Date: Fri, 10 Jan 2020 13:30:46 +0000 [thread overview]
Message-ID: <20200110133049.2705-6-tvrtko.ursulin@linux.intel.com> (raw)
In-Reply-To: <20200110133049.2705-1-tvrtko.ursulin@linux.intel.com>
From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Now that contexts keep their parent client reference counted, we can
remove separate struct pid reference owned by contexts in favour of the
one already held by the client.
Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
drivers/gpu/drm/i915/gem/i915_gem_context.c | 13 ++++---------
drivers/gpu/drm/i915/gem/i915_gem_context_types.h | 10 ----------
drivers/gpu/drm/i915/i915_debugfs.c | 7 ++++---
drivers/gpu/drm/i915/i915_gpu_error.c | 8 ++++----
4 files changed, 12 insertions(+), 26 deletions(-)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index ba8ccc754f20..758cebb99ba4 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -323,7 +323,6 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
if (ctx->timeline)
intel_timeline_put(ctx->timeline);
- put_pid(ctx->pid);
mutex_destroy(&ctx->mutex);
kfree_rcu(ctx, rcu);
@@ -794,24 +793,20 @@ static int gem_context_register(struct i915_gem_context *ctx,
WRITE_ONCE(vm->file, fpriv); /* XXX */
mutex_unlock(&ctx->mutex);
- ctx->pid = get_task_pid(current, PIDTYPE_PID);
snprintf(ctx->name, sizeof(ctx->name), "%s[%d]",
- current->comm, pid_nr(ctx->pid));
+ current->comm, pid_nr(client->pid));
/* And finally expose ourselves to userspace via the idr */
ret = xa_alloc(&fpriv->context_xa, id, ctx, xa_limit_32b, GFP_KERNEL);
- if (ret) {
- put_pid(fetch_and_zero(&ctx->pid));
- goto out;
- }
+ if (ret)
+ return ret;
ctx->client = client = i915_drm_client_get(fpriv->client);
spin_lock(&client->ctx_lock);
list_add_tail_rcu(&ctx->client_link, &client->ctx_list);
spin_unlock(&client->ctx_lock);
-out:
- return ret;
+ return 0;
}
int i915_gem_context_open(struct drm_i915_private *i915,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
index 879824159646..23421377a43d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
@@ -90,16 +90,6 @@ struct i915_gem_context {
*/
struct i915_address_space __rcu *vm;
- /**
- * @pid: process id of creator
- *
- * Note that who created the context may not be the principle user,
- * as the context may be shared across a local socket. However,
- * that should only affect the default context, all contexts created
- * explicitly by the client are expected to be isolated.
- */
- struct pid *pid;
-
/** link: place with &drm_i915_private.context_list */
struct list_head link;
struct llist_node free_link;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 8f01c2bc7355..bc533501b4e0 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -346,7 +346,8 @@ static void print_context_stats(struct seq_file *m,
rcu_read_unlock();
rcu_read_lock();
- task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
+ task = pid_task(ctx->client->pid ?: file->pid,
+ PIDTYPE_PID);
snprintf(name, sizeof(name), "%s",
task ? task->comm : "<unknown>");
rcu_read_unlock();
@@ -1492,10 +1493,10 @@ static int i915_context_status(struct seq_file *m, void *unused)
spin_unlock(&i915->gem.contexts.lock);
seq_puts(m, "HW context ");
- if (ctx->pid) {
+ if (ctx->client->pid) {
struct task_struct *task;
- task = get_pid_task(ctx->pid, PIDTYPE_PID);
+ task = get_pid_task(ctx->client->pid, PIDTYPE_PID);
if (task) {
seq_printf(m, "(%s [%d]) ",
task->comm, task->pid);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index fda0977d2059..9240327bdb7d 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1235,8 +1235,8 @@ static void record_request(const struct i915_request *request,
erq->pid = 0;
rcu_read_lock();
ctx = rcu_dereference(request->context->gem_context);
- if (ctx)
- erq->pid = pid_nr(ctx->pid);
+ if (ctx && ctx->client->pid)
+ erq->pid = pid_nr(ctx->client->pid);
rcu_read_unlock();
}
@@ -1313,11 +1313,11 @@ static bool record_context(struct drm_i915_error_context *e,
if (ctx && !kref_get_unless_zero(&ctx->ref))
ctx = NULL;
rcu_read_unlock();
- if (!ctx)
+ if (!ctx || !ctx->client->pid)
return false;
rcu_read_lock();
- task = pid_task(ctx->pid, PIDTYPE_PID);
+ task = pid_task(ctx->client->pid, PIDTYPE_PID);
if (task) {
strcpy(e->comm, task->comm);
e->pid = task->pid;
--
2.20.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
next prev parent reply other threads:[~2020-01-10 13:31 UTC|newest]
Thread overview: 21+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-01-10 13:30 [Intel-gfx] [RFC 0/8] Per client engine busyness Tvrtko Ursulin
2020-01-10 13:30 ` [Intel-gfx] [RFC 1/8] drm/i915: Expose list of clients in sysfs Tvrtko Ursulin
2020-01-10 13:36 ` Chris Wilson
2020-01-10 13:30 ` [Intel-gfx] [RFC 2/8] drm/i915: Update client name on context create Tvrtko Ursulin
2020-01-10 13:39 ` Chris Wilson
2020-01-10 13:30 ` [Intel-gfx] [RFC 3/8] drm/i915: Track per-context engine busyness Tvrtko Ursulin
2020-01-10 13:46 ` Chris Wilson
2020-01-10 13:30 ` [Intel-gfx] [RFC 4/8] drm/i915: Track all user contexts per client Tvrtko Ursulin
2020-01-10 13:30 ` Tvrtko Ursulin [this message]
2020-01-10 13:42 ` [Intel-gfx] [RFC 5/8] drm/i915: Contexts can use struct pid stored in the client Chris Wilson
2020-01-30 16:11 ` Tvrtko Ursulin
2020-01-10 13:30 ` [Intel-gfx] [RFC 6/8] drm/i915: Expose per-engine client busyness Tvrtko Ursulin
2020-01-10 13:58 ` Chris Wilson
2020-01-10 14:09 ` Tvrtko Ursulin
2020-01-10 14:12 ` Chris Wilson
2020-01-10 13:30 ` [Intel-gfx] [RFC 7/8] drm/i915: Track hw reported context runtime Tvrtko Ursulin
2020-01-10 14:03 ` Chris Wilson
2020-01-10 13:30 ` [Intel-gfx] [RFC 8/8] drm/i915: Fallback to hw context runtime when sw tracking is not available Tvrtko Ursulin
2020-01-10 16:26 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for Per client engine busyness (rev3) Patchwork
2020-01-10 16:51 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
2020-01-14 4:39 ` [Intel-gfx] ✗ Fi.CI.IGT: failure " Patchwork
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200110133049.2705-6-tvrtko.ursulin@linux.intel.com \
--to=tvrtko.ursulin@linux.intel.com \
--cc=Intel-gfx@lists.freedesktop.org \
--cc=kui.wen@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox