Intel-XE Archive on lore.kernel.org
 help / color / mirror / Atom feed
From: "Ghimiray, Himal Prasad" <himal.prasad.ghimiray@intel.com>
To: "Upadhyay, Tejas" <tejas.upadhyay@intel.com>,
	"intel-xe@lists.freedesktop.org" <intel-xe@lists.freedesktop.org>
Subject: Re: [Intel-xe] [PATCH 1/7] drm/xe: Add drm-client infrastructure
Date: Tue, 12 Sep 2023 13:54:06 +0530	[thread overview]
Message-ID: <ef994ecc-15d2-493d-85db-b8aaa3072fca@intel.com> (raw)
In-Reply-To: <SJ1PR11MB620440975315F5D0E8D3C03981EEA@SJ1PR11MB6204.namprd11.prod.outlook.com>

[-- Attachment #1: Type: text/plain, Size: 8787 bytes --]


On 07-09-2023 14:27, Upadhyay, Tejas wrote:
>
> *From:*Ghimiray, Himal Prasad <himal.prasad.ghimiray@intel.com>
> *Sent:* Wednesday, September 6, 2023 9:45 AM
> *To:* Upadhyay, Tejas <tejas.upadhyay@intel.com>; 
> intel-xe@lists.freedesktop.org
> *Cc:* Iddamsetty, Aravind <aravind.iddamsetty@intel.com>
> *Subject:* Re: [PATCH 1/7] drm/xe: Add drm-client infrastructure
>
> Hi Tejas,
>
> On 31-08-2023 14:35, Tejas Upadhyay wrote:
>
>     Add drm-client infrastructure to record stats of consumption
>
>     done by individual drm client.
>
>     Signed-off-by: Tejas Upadhyay<tejas.upadhyay@intel.com>  <mailto:tejas.upadhyay@intel.com>
>
>     ---
>
>       drivers/gpu/drm/xe/Makefile          |  1 +
>
>       drivers/gpu/drm/xe/xe_device.c       | 15 +++++++-
>
>       drivers/gpu/drm/xe/xe_device_types.h |  6 ++++
>
>       drivers/gpu/drm/xe/xe_drm_client.c   | 52 ++++++++++++++++++++++++++++
>
>       drivers/gpu/drm/xe/xe_drm_client.h   | 43 +++++++++++++++++++++++
>
>       5 files changed, 116 insertions(+), 1 deletion(-)
>
>       create mode 100644 drivers/gpu/drm/xe/xe_drm_client.c
>
>       create mode 100644 drivers/gpu/drm/xe/xe_drm_client.h
>
>     diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
>
>     index 9d2311f8141f..f9c25cb2f890 100644
>
>     --- a/drivers/gpu/drm/xe/Makefile
>
>     +++ b/drivers/gpu/drm/xe/Makefile
>
>     @@ -51,6 +51,7 @@ xe-y += xe_bb.o \
>
>              xe_device.o \
>
>              xe_device_sysfs.o \
>
>              xe_dma_buf.o \
>
>     +       xe_drm_client.o \
>
>              xe_exec.o \
>
>              xe_execlist.o \
>
>              xe_exec_queue.o \
>
>     diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
>
>     index 109aeb25d19c..cf59c7b74eaf 100644
>
>     --- a/drivers/gpu/drm/xe/xe_device.c
>
>     +++ b/drivers/gpu/drm/xe/xe_device.c
>
>     @@ -18,6 +18,7 @@
>
>       #include "xe_debugfs.h"
>
>       #include "xe_display.h"
>
>       #include "xe_dma_buf.h"
>
>     +#include "xe_drm_client.h"
>
>       #include "xe_drv.h"
>
>       #include "xe_exec_queue.h"
>
>       #include "xe_exec.h"
>
>     @@ -43,13 +44,24 @@ struct lockdep_map xe_device_mem_access_lockdep_map = {
>
>       
>
>       static int xe_file_open(struct drm_device *dev, struct drm_file *file)
>
>       {
>
>     +       struct xe_device *xe = to_xe_device(dev);
>
> Don't see xe_device being used in this patch. Better to make this 
> assignment in patch where the xe will be used to reference ttm 
> device.[Patch 7]
>
>     +       struct xe_drm_client *client;
>
>              struct xe_file *xef;
>
>     +       int ret = -ENOMEM;
>
>       
>
>              xef = kzalloc(sizeof(*xef), GFP_KERNEL);
>
>              if (!xef)
>
>     -               return -ENOMEM;
>
>     +               return ret;
>
>     +
>
>     +       client = xe_drm_client_alloc();
>
>     +       if (!client) {
>
>     +               kfree(xef);
>
>     +               return ret;
>
>     +       }
>
>       
>
>              xef->drm = file;
>
>     +       xef->client = client;
>
>     +       xef->xe = xe;
>
>       
>
>              mutex_init(&xef->vm.lock);
>
>              xa_init_flags(&xef->vm.xa, XA_FLAGS_ALLOC1);
>
>     @@ -89,6 +101,7 @@ static void xe_file_close(struct drm_device *dev, struct drm_file *file)
>
>              xa_destroy(&xef->vm.xa);
>
>              mutex_destroy(&xef->vm.lock);
>
>       
>
>     +       xe_drm_client_put(xef->client);
>
>              kfree(xef);
>
>       }
>
>       
>
>     diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
>
>     index 750e1f0d3339..d210a535c703 100644
>
>     --- a/drivers/gpu/drm/xe/xe_device_types.h
>
>     +++ b/drivers/gpu/drm/xe/xe_device_types.h
>
>     @@ -462,6 +462,9 @@ struct xe_device {
>
>        * struct xe_file - file handle for XE driver
>
>        */
>
>       struct xe_file {
>
>     +       /** @xe: xe DEVICE **/
>
>     +       struct xe_device *xe;
>
>     +
>
> same comment as above.
>
>              /** @drm: base DRM file */
>
>              struct drm_file *drm;
>
>       
>
>     @@ -480,6 +483,9 @@ struct xe_file {
>
>                      /** @lock: protects file engine state */
>
>                      struct mutex lock;
>
>              } exec_queue;
>
>     +
>
>     +       /** @client: drm client */
>
>     +       struct xe_drm_client *client;
>
>       };
>
>       
>
>       #endif
>
>     diff --git a/drivers/gpu/drm/xe/xe_drm_client.c b/drivers/gpu/drm/xe/xe_drm_client.c
>
>     new file mode 100644
>
>     index 000000000000..ea7993338076
>
>     --- /dev/null
>
>     +++ b/drivers/gpu/drm/xe/xe_drm_client.c
>
>     @@ -0,0 +1,52 @@
>
>     +// SPDX-License-Identifier: MIT
>
>     +/*
>
>     + * Copyright © 2023 Intel Corporation
>
>     + */
>
>     +
>
>     +#include <drm/drm_print.h>
>
>     +#include <linux/kernel.h>
>
>     +#include <linux/slab.h>
>
>     +#include <linux/types.h>
>
>     +
>
>     +#include "xe_device_types.h"
>
>     +#include "xe_drm_client.h"
>
>     +
>
>     +/**
>
>     + * xe_drm_client_alloc() - Allocate drm client
>
>     + * @void: No arg
>
>     + *
>
>     + * Allocate drm client struct to track client memory against
>
>     + * same till client life. Call this API whenever new client
>
>     + * has opened xe device.
>
>     + *
>
>     + * Return: pointer to client struct or NULL if cant allocate
>
>     + */
>
>     +struct xe_drm_client *xe_drm_client_alloc(void)
>
>     +{
>
>     +       struct xe_drm_client *client;
>
>     +
>
>     +       client = kzalloc(sizeof(*client), GFP_KERNEL);
>
>     +       if (!client)
>
>     +               return NULL;
>
>     +
>
>     +       kref_init(&client->kref);
>
>     +
>
>     +       return client;
>
>     +}
>
>     +
>
>     +/**
>
>     + * __xe_drm_client_free() - Free client struct
>
>     + * @kref: The reference
>
>     + *
>
>     + * This frees client struct. Call this API when xe device is closed
>
>     + * by drm client.
>
>     + *
>
>     + * Return: void
>
>     + */
>
>     +void __xe_drm_client_free(struct kref *kref)
>
>     +{
>
>     +       struct xe_drm_client *client =
>
>     +               container_of(kref, typeof(*client), kref);
>
>     +
>
>     +       kfree(client);
>
>     +}
>
>     diff --git a/drivers/gpu/drm/xe/xe_drm_client.h b/drivers/gpu/drm/xe/xe_drm_client.h
>
>     new file mode 100644
>
>     index 000000000000..be097cdf5d12
>
>     --- /dev/null
>
>     +++ b/drivers/gpu/drm/xe/xe_drm_client.h
>
>     @@ -0,0 +1,43 @@
>
>     +/* SPDX-License-Identifier: MIT */
>
>     +/*
>
>     + * Copyright © 2023 Intel Corporation
>
>     + */
>
>     +
>
>     +#ifndef _XE_DRM_CLIENT_H_
>
>     +#define _XE_DRM_CLIENT_H_
>
>     +
>
>     +#include <linux/kref.h>
>
>     +#include <linux/list.h>
>
>     +#include <linux/pid.h>
>
>     +#include <linux/rcupdate.h>
>
>     +#include <linux/sched.h>
>
>     +#include <linux/spinlock.h>
>
>     +
>
>     +struct drm_file;
>
>     +struct drm_printer;
>
>     +
>
>     +struct xe_drm_client {
>
>     +       struct kref kref;
>
>     +       unsigned int id;
>
>     +};
>
>     +
>
>     +       static inline struct xe_drm_client *
>
>     +xe_drm_client_get(struct xe_drm_client *client)
>
>     +{
>
>     +       kref_get(&client->kref);
>
>     +       return client;
>
>     +}
>
>     +
>
>     +void __xe_drm_client_free(struct kref *kref);
>
>     +
>
>     +static inline void xe_drm_client_put(struct xe_drm_client *client)
>
>     +{
>
>     +       kref_put(&client->kref, __xe_drm_client_free);
>
>     +}
>
>     +
>
>     +struct xe_drm_client *xe_drm_client_alloc(void);
>
>     +static inline struct xe_drm_client *
>
>     +xe_drm_client_get(struct xe_drm_client *client);
>
> kref_get/xe_drm_client_get can be moved to the patch where we are 
> introducing xe_drm_client_add_bo to maintain refcount.
>
> Sure. Let me check. But as we introduce infra related to client in 
> this patch. I thought of keeping all basic APIs and xe->xef as part of 
> this patch. Let me know if it does not make sense.
>
Hmm. Makes sense. RB from my end

Reviewed-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>

> Thanks,
>
> Tejas
>
> BR
>
> Himal
>
>     +static inline void xe_drm_client_put(struct xe_drm_client *client);
>
>     +
>
>     +#endif
>

[-- Attachment #2: Type: text/html, Size: 20269 bytes --]

  reply	other threads:[~2023-09-12  8:25 UTC|newest]

Thread overview: 34+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-08-31  9:05 [Intel-xe] [PATCH 0/7] drm/xe: fdinfo memory stats Tejas Upadhyay
2023-08-31  9:05 ` [Intel-xe] [PATCH 1/7] drm/xe: Add drm-client infrastructure Tejas Upadhyay
2023-09-06  4:14   ` Ghimiray, Himal Prasad
2023-09-07  8:57     ` Upadhyay, Tejas
2023-09-12  8:24       ` Ghimiray, Himal Prasad [this message]
2023-08-31  9:05 ` [Intel-xe] [PATCH 2/7] drm/xe: Interface xe drm client with fdinfo interface Tejas Upadhyay
2023-09-06  4:21   ` Ghimiray, Himal Prasad
2023-08-31  9:05 ` [Intel-xe] [PATCH 3/7] drm/xe: Add tracking support for bos per client Tejas Upadhyay
2023-09-06  9:38   ` Ghimiray, Himal Prasad
2023-09-07  8:52     ` Upadhyay, Tejas
2023-08-31  9:05 ` [Intel-xe] [PATCH 4/7] drm/xe: Record each drm client with its VM Tejas Upadhyay
2023-09-06  9:47   ` Ghimiray, Himal Prasad
2023-09-07  8:49     ` Upadhyay, Tejas
2023-09-07  9:28       ` Ghimiray, Himal Prasad
2023-09-07  9:44         ` Upadhyay, Tejas
2023-08-31  9:05 ` [Intel-xe] [PATCH 5/7] drm/xe: Track page table memory usage for client Tejas Upadhyay
2023-09-06  9:54   ` Ghimiray, Himal Prasad
2023-09-07  8:40     ` Upadhyay, Tejas
2023-08-31  9:05 ` [Intel-xe] [PATCH 6/7] drm/xe: Account ring buffer and context state storage Tejas Upadhyay
2023-09-06 11:19   ` Ghimiray, Himal Prasad
2023-08-31  9:05 ` [Intel-xe] [PATCH 7/7] drm/xe: Implement fdinfo memory stats printing Tejas Upadhyay
2023-08-31 12:19   ` Upadhyay, Tejas
2023-09-12  6:22   ` Ghimiray, Himal Prasad
2023-09-12 11:14     ` Jani Nikula
2023-08-31  9:21 ` [Intel-xe] ✓ CI.Patch_applied: success for drm/xe: fdinfo memory stats Patchwork
2023-08-31  9:21 ` [Intel-xe] ✗ CI.checkpatch: warning " Patchwork
2023-08-31  9:22 ` [Intel-xe] ✓ CI.KUnit: success " Patchwork
2023-08-31  9:29 ` [Intel-xe] ✓ CI.Build: " Patchwork
2023-08-31  9:30 ` [Intel-xe] ✗ CI.Hooks: failure " Patchwork
2023-08-31  9:30 ` [Intel-xe] ✗ CI.checksparse: warning " Patchwork
2023-08-31 10:04 ` [Intel-xe] ✓ CI.BAT: success " Patchwork
2023-08-31 11:23 ` Patchwork
2023-09-07  9:22 ` [Intel-xe] [PATCH 0/7] " Ghimiray, Himal Prasad
2023-09-07  9:35   ` Upadhyay, Tejas

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=ef994ecc-15d2-493d-85db-b8aaa3072fca@intel.com \
    --to=himal.prasad.ghimiray@intel.com \
    --cc=intel-xe@lists.freedesktop.org \
    --cc=tejas.upadhyay@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox