From: "Iddamsetty, Aravind" <aravind.iddamsetty@intel.com>
To: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>,
<Intel-gfx@lists.freedesktop.org>,
<dri-devel@lists.freedesktop.org>
Subject: Re: [Intel-gfx] [PATCH 1/5] drm/i915: Add ability for tracking buffer objects per client
Date: Thu, 3 Aug 2023 11:00:13 +0530 [thread overview]
Message-ID: <1e9f6ec7-afbb-1708-fbc1-8ce4c2ec511d@intel.com> (raw)
In-Reply-To: <20230727101352.1899466-2-tvrtko.ursulin@linux.intel.com>
On 27-07-2023 15:43, Tvrtko Ursulin wrote:
> From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
>
> In order to show per client memory usage lets add some infrastructure
> which enables tracking buffer objects owned by clients.
>
> We add a per client list protected by a new per client lock and to support
> delayed destruction (post client exit) we make tracked objects hold
> references to the owning client.
>
> Also, object memory region teardown is moved to the existing RCU free
> callback to allow safe dereference from the fdinfo RCU read section.
This is same as the earlier series, which I had reviewed but forgot to
give r-b. sorry for the delay.
Reviewed-by: Aravind Iddamsetty <aravind.iddamsetty@intel.com>
Thanks,
Aravind.
>
> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> ---
> drivers/gpu/drm/i915/gem/i915_gem_object.c | 13 +++++--
> .../gpu/drm/i915/gem/i915_gem_object_types.h | 12 +++++++
> drivers/gpu/drm/i915/i915_drm_client.c | 36 +++++++++++++++++++
> drivers/gpu/drm/i915/i915_drm_client.h | 32 +++++++++++++++++
> 4 files changed, 90 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
> index 97ac6fb37958..3dc4fbb67d2b 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
> @@ -105,6 +105,10 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
>
> INIT_LIST_HEAD(&obj->mm.link);
>
> +#ifdef CONFIG_PROC_FS
> + INIT_LIST_HEAD(&obj->client_link);
> +#endif
> +
> INIT_LIST_HEAD(&obj->lut_list);
> spin_lock_init(&obj->lut_lock);
>
> @@ -292,6 +296,10 @@ void __i915_gem_free_object_rcu(struct rcu_head *head)
> container_of(head, typeof(*obj), rcu);
> struct drm_i915_private *i915 = to_i915(obj->base.dev);
>
> + /* We need to keep this alive for RCU read access from fdinfo. */
> + if (obj->mm.n_placements > 1)
> + kfree(obj->mm.placements);
> +
> i915_gem_object_free(obj);
>
> GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
> @@ -388,9 +396,6 @@ void __i915_gem_free_object(struct drm_i915_gem_object *obj)
> if (obj->ops->release)
> obj->ops->release(obj);
>
> - if (obj->mm.n_placements > 1)
> - kfree(obj->mm.placements);
> -
> if (obj->shares_resv_from)
> i915_vm_resv_put(obj->shares_resv_from);
>
> @@ -441,6 +446,8 @@ static void i915_gem_free_object(struct drm_gem_object *gem_obj)
>
> GEM_BUG_ON(i915_gem_object_is_framebuffer(obj));
>
> + i915_drm_client_remove_object(obj);
> +
> /*
> * Before we free the object, make sure any pure RCU-only
> * read-side critical sections are complete, e.g.
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
> index e72c57716bee..8de2b91b3edf 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
> @@ -300,6 +300,18 @@ struct drm_i915_gem_object {
> */
> struct i915_address_space *shares_resv_from;
>
> +#ifdef CONFIG_PROC_FS
> + /**
> + * @client: @i915_drm_client which created the object
> + */
> + struct i915_drm_client *client;
> +
> + /**
> + * @client_link: Link into @i915_drm_client.objects_list
> + */
> + struct list_head client_link;
> +#endif
> +
> union {
> struct rcu_head rcu;
> struct llist_node freed;
> diff --git a/drivers/gpu/drm/i915/i915_drm_client.c b/drivers/gpu/drm/i915/i915_drm_client.c
> index 2a44b3876cb5..2e5e69edc0f9 100644
> --- a/drivers/gpu/drm/i915/i915_drm_client.c
> +++ b/drivers/gpu/drm/i915/i915_drm_client.c
> @@ -28,6 +28,10 @@ struct i915_drm_client *i915_drm_client_alloc(void)
> kref_init(&client->kref);
> spin_lock_init(&client->ctx_lock);
> INIT_LIST_HEAD(&client->ctx_list);
> +#ifdef CONFIG_PROC_FS
> + spin_lock_init(&client->objects_lock);
> + INIT_LIST_HEAD(&client->objects_list);
> +#endif
>
> return client;
> }
> @@ -108,4 +112,36 @@ void i915_drm_client_fdinfo(struct drm_printer *p, struct drm_file *file)
> for (i = 0; i < ARRAY_SIZE(uabi_class_names); i++)
> show_client_class(p, i915, file_priv->client, i);
> }
> +
> +void i915_drm_client_add_object(struct i915_drm_client *client,
> + struct drm_i915_gem_object *obj)
> +{
> + unsigned long flags;
> +
> + GEM_WARN_ON(obj->client);
> + GEM_WARN_ON(!list_empty(&obj->client_link));
> +
> + spin_lock_irqsave(&client->objects_lock, flags);
> + obj->client = i915_drm_client_get(client);
> + list_add_tail_rcu(&obj->client_link, &client->objects_list);
> + spin_unlock_irqrestore(&client->objects_lock, flags);
> +}
> +
> +bool i915_drm_client_remove_object(struct drm_i915_gem_object *obj)
> +{
> + struct i915_drm_client *client = fetch_and_zero(&obj->client);
> + unsigned long flags;
> +
> + /* Object may not be associated with a client. */
> + if (!client)
> + return false;
> +
> + spin_lock_irqsave(&client->objects_lock, flags);
> + list_del_rcu(&obj->client_link);
> + spin_unlock_irqrestore(&client->objects_lock, flags);
> +
> + i915_drm_client_put(client);
> +
> + return true;
> +}
> #endif
> diff --git a/drivers/gpu/drm/i915/i915_drm_client.h b/drivers/gpu/drm/i915/i915_drm_client.h
> index 67816c912bca..5f58fdf7dcb8 100644
> --- a/drivers/gpu/drm/i915/i915_drm_client.h
> +++ b/drivers/gpu/drm/i915/i915_drm_client.h
> @@ -12,6 +12,9 @@
>
> #include <uapi/drm/i915_drm.h>
>
> +#include "i915_file_private.h"
> +#include "gem/i915_gem_object_types.h"
> +
> #define I915_LAST_UABI_ENGINE_CLASS I915_ENGINE_CLASS_COMPUTE
>
> struct drm_file;
> @@ -25,6 +28,20 @@ struct i915_drm_client {
> spinlock_t ctx_lock; /* For add/remove from ctx_list. */
> struct list_head ctx_list; /* List of contexts belonging to client. */
>
> +#ifdef CONFIG_PROC_FS
> + /**
> + * @objects_lock: lock protecting @objects_list
> + */
> + spinlock_t objects_lock;
> +
> + /**
> + * @objects_list: list of objects created by this client
> + *
> + * Protected by @objects_lock.
> + */
> + struct list_head objects_list;
> +#endif
> +
> /**
> * @past_runtime: Accumulation of pphwsp runtimes from closed contexts.
> */
> @@ -49,4 +66,19 @@ struct i915_drm_client *i915_drm_client_alloc(void);
>
> void i915_drm_client_fdinfo(struct drm_printer *p, struct drm_file *file);
>
> +#ifdef CONFIG_PROC_FS
> +void i915_drm_client_add_object(struct i915_drm_client *client,
> + struct drm_i915_gem_object *obj);
> +bool i915_drm_client_remove_object(struct drm_i915_gem_object *obj);
> +#else
> +static inline void i915_drm_client_add_object(struct i915_drm_client *client,
> + struct drm_i915_gem_object *obj)
> +{
> +}
> +
> +static inline bool i915_drm_client_remove_object(struct drm_i915_gem_object *obj)
> +{
> +}
> +#endif
> +
> #endif /* !__I915_DRM_CLIENT_H__ */
next prev parent reply other threads:[~2023-08-03 5:30 UTC|newest]
Thread overview: 22+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-07-27 10:13 [Intel-gfx] [PATCH v6 0/5] fdinfo memory stats Tvrtko Ursulin
2023-07-27 10:13 ` [Intel-gfx] [PATCH 1/5] drm/i915: Add ability for tracking buffer objects per client Tvrtko Ursulin
2023-08-03 5:30 ` Iddamsetty, Aravind [this message]
2023-07-27 10:13 ` [Intel-gfx] [PATCH 2/5] drm/i915: Record which client owns a VM Tvrtko Ursulin
2023-07-27 10:13 ` [Intel-gfx] [PATCH 3/5] drm/i915: Track page table backing store usage Tvrtko Ursulin
2023-07-27 10:13 ` [Intel-gfx] [PATCH 4/5] drm/i915: Account ring buffer and context state storage Tvrtko Ursulin
2023-07-27 10:13 ` [Intel-gfx] [PATCH 5/5] drm/i915: Implement fdinfo memory stats printing Tvrtko Ursulin
2023-08-03 5:15 ` Iddamsetty, Aravind
2023-08-03 8:49 ` Tvrtko Ursulin
2023-08-09 4:33 ` Iddamsetty, Aravind
2023-07-27 11:17 ` [Intel-gfx] ✗ Fi.CI.SPARSE: warning for fdinfo memory stats (rev5) Patchwork
2023-07-27 11:40 ` [Intel-gfx] ✗ Fi.CI.BAT: failure " Patchwork
2023-07-27 12:58 ` [Intel-gfx] ✗ Fi.CI.SPARSE: warning for fdinfo memory stats (rev6) Patchwork
2023-07-27 13:16 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
2023-07-27 15:30 ` [Intel-gfx] ✓ Fi.CI.IGT: " Patchwork
-- strict thread matches above, loose matches on Subject: below --
2023-09-21 11:48 [Intel-gfx] [PATCH v7 0/5] fdinfo memory stats Tvrtko Ursulin
2023-09-21 11:48 ` [Intel-gfx] [PATCH 1/5] drm/i915: Add ability for tracking buffer objects per client Tvrtko Ursulin
2023-07-07 13:02 [Intel-gfx] [PATCH v5 0/5] fdinfo memory stats Tvrtko Ursulin
2023-07-07 13:02 ` [Intel-gfx] [PATCH 1/5] drm/i915: Add ability for tracking buffer objects per client Tvrtko Ursulin
2023-07-10 10:44 ` Iddamsetty, Aravind
2023-07-10 13:20 ` Tvrtko Ursulin
2023-07-11 7:48 ` Iddamsetty, Aravind
2023-07-11 9:39 ` Tvrtko Ursulin
2023-06-12 10:46 [Intel-gfx] [PATCH v4 0/5] fdinfo memory stats Tvrtko Ursulin
2023-06-12 10:46 ` [Intel-gfx] [PATCH 1/5] drm/i915: Add ability for tracking buffer objects per client Tvrtko Ursulin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1e9f6ec7-afbb-1708-fbc1-8ce4c2ec511d@intel.com \
--to=aravind.iddamsetty@intel.com \
--cc=Intel-gfx@lists.freedesktop.org \
--cc=dri-devel@lists.freedesktop.org \
--cc=tvrtko.ursulin@linux.intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox