From: Tejas Upadhyay <tejas.upadhyay@intel.com>
To: intel-xe@lists.freedesktop.org
Subject: [Intel-xe] [PATCH V2 3/7] drm/xe: Add tracking support for bos per client
Date: Tue, 12 Sep 2023 17:22:35 +0530 [thread overview]
Message-ID: <20230912115239.3554341-4-tejas.upadhyay@intel.com> (raw)
In-Reply-To: <20230912115239.3554341-1-tejas.upadhyay@intel.com>
In order to show per client memory consumption, we
need tracking support APIs to add at every bo consumption
and removal. Adding APIs here to add tracking calls at
places wherever it is applicable.
V2:
- make xe_drm_client_remove_bo return void - Himal
Signed-off-by: Tejas Upadhyay <tejas.upadhyay@intel.com>
---
drivers/gpu/drm/xe/xe_bo.c | 7 ++++
drivers/gpu/drm/xe/xe_bo_types.h | 10 ++++++
drivers/gpu/drm/xe/xe_drm_client.c | 52 ++++++++++++++++++++++++++++++
drivers/gpu/drm/xe/xe_drm_client.h | 26 +++++++++++++++
4 files changed, 95 insertions(+)
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
index 25fdc04627ca..c90a9615ab98 100644
--- a/drivers/gpu/drm/xe/xe_bo.c
+++ b/drivers/gpu/drm/xe/xe_bo.c
@@ -16,6 +16,7 @@
#include "xe_device.h"
#include "xe_dma_buf.h"
+#include "xe_drm_client.h"
#include "xe_ggtt.h"
#include "xe_gt.h"
#include "xe_map.h"
@@ -1051,6 +1052,9 @@ static void xe_ttm_bo_destroy(struct ttm_buffer_object *ttm_bo)
if (bo->vm && xe_bo_is_user(bo))
xe_vm_put(bo->vm);
+ if (bo->client)
+ xe_drm_client_remove_bo(bo);
+
kfree(bo);
}
@@ -1228,6 +1232,9 @@ struct xe_bo *__xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
bo->ttm.priority = DRM_XE_VMA_PRIORITY_NORMAL;
INIT_LIST_HEAD(&bo->vmas);
INIT_LIST_HEAD(&bo->pinned_link);
+#ifdef CONFIG_PROC_FS
+ INIT_LIST_HEAD(&bo->client_link);
+#endif
drm_gem_private_object_init(&xe->drm, &bo->ttm.base, size);
diff --git a/drivers/gpu/drm/xe/xe_bo_types.h b/drivers/gpu/drm/xe/xe_bo_types.h
index f6ee920303af..1aef413b6f8a 100644
--- a/drivers/gpu/drm/xe/xe_bo_types.h
+++ b/drivers/gpu/drm/xe/xe_bo_types.h
@@ -45,6 +45,16 @@ struct xe_bo {
struct ttm_bo_kmap_obj kmap;
/** @pinned_link: link to present / evicted list of pinned BO */
struct list_head pinned_link;
+#ifdef CONFIG_PROC_FS
+ /**
+ * @client: @xe_drm_client which created the bo
+ */
+ struct xe_drm_client *client;
+ /**
+ * @client_link: Link into @xe_drm_client.objects_list
+ */
+ struct list_head client_link;
+#endif
/** @props: BO user controlled properties */
struct {
/** @preferred_mem: preferred memory class for this BO */
diff --git a/drivers/gpu/drm/xe/xe_drm_client.c b/drivers/gpu/drm/xe/xe_drm_client.c
index b5dc024b5dd0..f10b13079074 100644
--- a/drivers/gpu/drm/xe/xe_drm_client.c
+++ b/drivers/gpu/drm/xe/xe_drm_client.c
@@ -8,8 +8,11 @@
#include <linux/slab.h>
#include <linux/types.h>
+#include "xe_bo_types.h"
#include "xe_device_types.h"
#include "xe_drm_client.h"
+#include "xe_device_types.h"
+#include "xe_trace.h"
/**
* xe_drm_client_alloc() - Allocate drm client
@@ -31,6 +34,10 @@ struct xe_drm_client *xe_drm_client_alloc(void)
kref_init(&client->kref);
+#ifdef CONFIG_PROC_FS
+ spin_lock_init(&client->bos_lock);
+ INIT_LIST_HEAD(&client->bos_list);
+#endif
return client;
}
@@ -52,6 +59,51 @@ void __xe_drm_client_free(struct kref *kref)
}
#ifdef CONFIG_PROC_FS
+/**
+ * xe_drm_client_add_bo() - Add BO for tracking client mem usage
+ * @client: The drm client ptr
+ * @bo: The xe BO ptr
+ *
+ * Add all BO created by individual drm client by calling this function.
+ * This helps in tracking client memory usage.
+ *
+ * Return: void
+ */
+void xe_drm_client_add_bo(struct xe_drm_client *client,
+ struct xe_bo *bo)
+{
+ unsigned long flags;
+
+ XE_WARN_ON(bo->client);
+ XE_WARN_ON(!list_empty(&bo->client_link));
+
+ spin_lock_irqsave(&client->bos_lock, flags);
+ bo->client = xe_drm_client_get(client);
+ list_add_tail_rcu(&bo->client_link, &client->bos_list);
+ spin_unlock_irqrestore(&client->bos_lock, flags);
+}
+
+/**
+ * xe_drm_client_remove_bo() - Remove BO for tracking client mem usage
+ * @bo: The xe BO ptr
+ *
+ * Remove all BO removed by individual drm client by calling this function.
+ * This helps in tracking client memory usage.
+ *
+ * Return: void
+ */
+void xe_drm_client_remove_bo(struct xe_bo *bo)
+{
+ struct xe_drm_client *client = bo->client;
+ unsigned long flags;
+
+ spin_lock_irqsave(&client->bos_lock, flags);
+ list_del_rcu(&bo->client_link);
+ spin_unlock_irqrestore(&client->bos_lock, flags);
+
+ xe_drm_client_put(client);
+}
+
/**
* xe_drm_client_fdinfo() - Callback for fdinfo interface
* @p: The drm_printer ptr
diff --git a/drivers/gpu/drm/xe/xe_drm_client.h b/drivers/gpu/drm/xe/xe_drm_client.h
index dbe3a083c9df..744efb008861 100644
--- a/drivers/gpu/drm/xe/xe_drm_client.h
+++ b/drivers/gpu/drm/xe/xe_drm_client.h
@@ -15,10 +15,23 @@
struct drm_file;
struct drm_printer;
+struct xe_bo;
struct xe_drm_client {
struct kref kref;
unsigned int id;
+#ifdef CONFIG_PROC_FS
+ /**
+ * @bos_lock: lock protecting @bos_list
+ */
+ spinlock_t bos_lock;
+ /**
+ * @bos_list: list of bos created by this client
+ *
+ * Protected by @bos_lock.
+ */
+ struct list_head bos_list;
+#endif
};
static inline struct xe_drm_client *
@@ -41,5 +54,18 @@ xe_drm_client_get(struct xe_drm_client *client);
static inline void xe_drm_client_put(struct xe_drm_client *client);
#ifdef CONFIG_PROC_FS
void xe_drm_client_fdinfo(struct drm_printer *p, struct drm_file *file);
+void xe_drm_client_add_bo(struct xe_drm_client *client,
+ struct xe_bo *bo);
+void xe_drm_client_remove_bo(struct xe_bo *bo);
+#else
+static inline void xe_drm_client_add_bo(struct xe_drm_client *client,
+ struct xe_bo *bo)
+{
+}
+
+static inline bool xe_drm_client_remove_bo(struct xe_bo *bo)
+{
+}
#endif
+
#endif
--
2.25.1
next prev parent reply other threads:[~2023-09-12 11:45 UTC|newest]
Thread overview: 18+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-09-12 11:52 [Intel-xe] [PATCH V2 0/7] drm/xe: fdinfo memory stats Tejas Upadhyay
2023-09-12 11:52 ` [Intel-xe] [PATCH V2 1/7] drm/xe: Add drm-client infrastructure Tejas Upadhyay
2023-09-12 11:52 ` [Intel-xe] [PATCH V2 2/7] drm/xe: Interface xe drm client with fdinfo interface Tejas Upadhyay
2023-09-12 11:52 ` Tejas Upadhyay [this message]
2023-09-13 8:39 ` [Intel-xe] [PATCH V2 3/7] drm/xe: Add tracking support for bos per client Ghimiray, Himal Prasad
2023-09-12 11:52 ` [Intel-xe] [PATCH V2 4/7] drm/xe: Record each drm client with its VM Tejas Upadhyay
2023-09-13 8:58 ` Ghimiray, Himal Prasad
2023-09-12 11:52 ` [Intel-xe] [PATCH V2 5/7] drm/xe: Track page table memory usage for client Tejas Upadhyay
2023-09-13 8:38 ` Ghimiray, Himal Prasad
2023-09-12 11:52 ` [Intel-xe] [PATCH V2 6/7] drm/xe: Account ring buffer and context state storage Tejas Upadhyay
2023-09-12 11:52 ` [Intel-xe] [PATCH V2 7/7] drm/xe: Implement fdinfo memory stats printing Tejas Upadhyay
2023-09-12 12:06 ` [Intel-xe] ✓ CI.Patch_applied: success for drm/xe: fdinfo memory stats (rev2) Patchwork
2023-09-12 12:07 ` [Intel-xe] ✗ CI.checkpatch: warning " Patchwork
2023-09-12 12:08 ` [Intel-xe] ✓ CI.KUnit: success " Patchwork
2023-09-12 12:15 ` [Intel-xe] ✓ CI.Build: " Patchwork
2023-09-12 12:15 ` [Intel-xe] ✓ CI.Hooks: " Patchwork
2023-09-12 12:17 ` [Intel-xe] ✓ CI.checksparse: " Patchwork
2023-09-12 12:49 ` [Intel-xe] ✓ CI.BAT: " Patchwork
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230912115239.3554341-4-tejas.upadhyay@intel.com \
--to=tejas.upadhyay@intel.com \
--cc=intel-xe@lists.freedesktop.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox