dri-devel.lists.freedesktop.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] drm/panthor: assign unique names to queues
@ 2025-08-28 20:05 Chia-I Wu
  2025-08-29  8:00 ` Boris Brezillon
  2025-08-29  9:20 ` Steven Price
  0 siblings, 2 replies; 4+ messages in thread
From: Chia-I Wu @ 2025-08-28 20:05 UTC (permalink / raw)
  To: Boris Brezillon, Steven Price, Liviu Dudau, Maarten Lankhorst,
	Maxime Ripard, Thomas Zimmermann, David Airlie, Simona Vetter,
	dri-devel, linux-kernel

Userspace relies on the ring field of gpu_scheduler tracepoints to
identify a drm_gpu_scheduler.  The value of the ring field is taken from
sched->name.

Because we typically have multiple schedulers running in parallel in
each process, assign unique names to schedulers such that userspace can
distinguish them.

Signed-off-by: Chia-I Wu <olvaffe@gmail.com>
---
 drivers/gpu/drm/panthor/panthor_sched.c | 32 ++++++++++++++++++-------
 1 file changed, 23 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c
index ba5dc3e443d9c..26616b6cb110d 100644
--- a/drivers/gpu/drm/panthor/panthor_sched.c
+++ b/drivers/gpu/drm/panthor/panthor_sched.c
@@ -360,6 +360,9 @@ struct panthor_queue {
 	/** @entity: DRM scheduling entity used for this queue. */
 	struct drm_sched_entity entity;
 
+	/** @name: DRM scheduler name for this queue. */
+	char name[32];
+
 	/**
 	 * @remaining_time: Time remaining before the job timeout expires.
 	 *
@@ -3308,9 +3311,10 @@ static u32 calc_profiling_ringbuf_num_slots(struct panthor_device *ptdev,
 
 static struct panthor_queue *
 group_create_queue(struct panthor_group *group,
-		   const struct drm_panthor_queue_create *args)
+		   const struct drm_panthor_queue_create *args, u32 gid,
+		   u32 qid)
 {
-	const struct drm_sched_init_args sched_args = {
+	struct drm_sched_init_args sched_args = {
 		.ops = &panthor_queue_sched_ops,
 		.submit_wq = group->ptdev->scheduler->wq,
 		.num_rqs = 1,
@@ -3323,7 +3327,7 @@ group_create_queue(struct panthor_group *group,
 		.credit_limit = args->ringbuf_size / sizeof(u64),
 		.timeout = msecs_to_jiffies(JOB_TIMEOUT_MS),
 		.timeout_wq = group->ptdev->reset.wq,
-		.name = "panthor-queue",
+		.name = NULL, /* will point to queue->name */
 		.dev = group->ptdev->base.dev,
 	};
 	struct drm_gpu_scheduler *drm_sched;
@@ -3398,6 +3402,11 @@ group_create_queue(struct panthor_group *group,
 	if (ret)
 		goto err_free_queue;
 
+	/* assign a unique name */
+	snprintf(queue->name, sizeof(queue->name), "panthor-queue-%d-%d", gid,
+		 qid);
+	sched_args.name = queue->name;
+
 	ret = drm_sched_init(&queue->scheduler, &sched_args);
 	if (ret)
 		goto err_free_queue;
@@ -3540,12 +3549,18 @@ int panthor_group_create(struct panthor_file *pfile,
 	memset(group->syncobjs->kmap, 0,
 	       group_args->queues.count * sizeof(struct panthor_syncobj_64b));
 
+	ret = xa_alloc(&gpool->xa, &gid, group,
+		       XA_LIMIT(1, MAX_GROUPS_PER_POOL), GFP_KERNEL);
+	if (ret)
+		goto err_put_group;
+
 	for (i = 0; i < group_args->queues.count; i++) {
-		group->queues[i] = group_create_queue(group, &queue_args[i]);
+		group->queues[i] =
+			group_create_queue(group, &queue_args[i], gid, i);
 		if (IS_ERR(group->queues[i])) {
 			ret = PTR_ERR(group->queues[i]);
 			group->queues[i] = NULL;
-			goto err_put_group;
+			goto err_erase_gid;
 		}
 
 		group->queue_count++;
@@ -3553,10 +3568,6 @@ int panthor_group_create(struct panthor_file *pfile,
 
 	group->idle_queues = GENMASK(group->queue_count - 1, 0);
 
-	ret = xa_alloc(&gpool->xa, &gid, group, XA_LIMIT(1, MAX_GROUPS_PER_POOL), GFP_KERNEL);
-	if (ret)
-		goto err_put_group;
-
 	mutex_lock(&sched->reset.lock);
 	if (atomic_read(&sched->reset.in_progress)) {
 		panthor_group_stop(group);
@@ -3575,6 +3586,9 @@ int panthor_group_create(struct panthor_file *pfile,
 
 	return gid;
 
+err_erase_gid:
+	xa_erase(&gpool->xa, gid);
+
 err_put_group:
 	group_put(group);
 	return ret;
-- 
2.51.0.318.gd7df087d1a-goog


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH] drm/panthor: assign unique names to queues
  2025-08-28 20:05 [PATCH] drm/panthor: assign unique names to queues Chia-I Wu
@ 2025-08-29  8:00 ` Boris Brezillon
  2025-08-29 22:43   ` Chia-I Wu
  2025-08-29  9:20 ` Steven Price
  1 sibling, 1 reply; 4+ messages in thread
From: Boris Brezillon @ 2025-08-29  8:00 UTC (permalink / raw)
  To: Chia-I Wu
  Cc: Steven Price, Liviu Dudau, Maarten Lankhorst, Maxime Ripard,
	Thomas Zimmermann, David Airlie, Simona Vetter, dri-devel,
	linux-kernel

On Thu, 28 Aug 2025 13:05:32 -0700
Chia-I Wu <olvaffe@gmail.com> wrote:

> Userspace relies on the ring field of gpu_scheduler tracepoints to
> identify a drm_gpu_scheduler.  The value of the ring field is taken from
> sched->name.
> 
> Because we typically have multiple schedulers running in parallel in
> each process, assign unique names to schedulers such that userspace can
> distinguish them.
> 
> Signed-off-by: Chia-I Wu <olvaffe@gmail.com>

Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com>

Two minor comments below.

> ---
>  drivers/gpu/drm/panthor/panthor_sched.c | 32 ++++++++++++++++++-------
>  1 file changed, 23 insertions(+), 9 deletions(-)
> 
> diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c
> index ba5dc3e443d9c..26616b6cb110d 100644
> --- a/drivers/gpu/drm/panthor/panthor_sched.c
> +++ b/drivers/gpu/drm/panthor/panthor_sched.c
> @@ -360,6 +360,9 @@ struct panthor_queue {
>  	/** @entity: DRM scheduling entity used for this queue. */
>  	struct drm_sched_entity entity;
>  
> +	/** @name: DRM scheduler name for this queue. */
> +	char name[32];
> +
>  	/**
>  	 * @remaining_time: Time remaining before the job timeout expires.
>  	 *
> @@ -3308,9 +3311,10 @@ static u32 calc_profiling_ringbuf_num_slots(struct panthor_device *ptdev,
>  
>  static struct panthor_queue *
>  group_create_queue(struct panthor_group *group,
> -		   const struct drm_panthor_queue_create *args)
> +		   const struct drm_panthor_queue_create *args, u32 gid,
> +		   u32 qid)
>  {
> -	const struct drm_sched_init_args sched_args = {
> +	struct drm_sched_init_args sched_args = {
>  		.ops = &panthor_queue_sched_ops,
>  		.submit_wq = group->ptdev->scheduler->wq,
>  		.num_rqs = 1,
> @@ -3323,7 +3327,7 @@ group_create_queue(struct panthor_group *group,
>  		.credit_limit = args->ringbuf_size / sizeof(u64),
>  		.timeout = msecs_to_jiffies(JOB_TIMEOUT_MS),
>  		.timeout_wq = group->ptdev->reset.wq,
> -		.name = "panthor-queue",
> +		.name = NULL, /* will point to queue->name */
>  		.dev = group->ptdev->base.dev,
>  	};
>  	struct drm_gpu_scheduler *drm_sched;
> @@ -3398,6 +3402,11 @@ group_create_queue(struct panthor_group *group,
>  	if (ret)
>  		goto err_free_queue;
>  
> +	/* assign a unique name */
> +	snprintf(queue->name, sizeof(queue->name), "panthor-queue-%d-%d", gid,
> +		 qid);
> +	sched_args.name = queue->name;

Should we plan ahead and have the pid in the name too?

> +
>  	ret = drm_sched_init(&queue->scheduler, &sched_args);
>  	if (ret)
>  		goto err_free_queue;
> @@ -3540,12 +3549,18 @@ int panthor_group_create(struct panthor_file *pfile,
>  	memset(group->syncobjs->kmap, 0,
>  	       group_args->queues.count * sizeof(struct panthor_syncobj_64b));
>  
> +	ret = xa_alloc(&gpool->xa, &gid, group,
> +		       XA_LIMIT(1, MAX_GROUPS_PER_POOL), GFP_KERNEL);
> +	if (ret)
> +		goto err_put_group;
> +
>  	for (i = 0; i < group_args->queues.count; i++) {
> -		group->queues[i] = group_create_queue(group, &queue_args[i]);
> +		group->queues[i] =
> +			group_create_queue(group, &queue_args[i], gid, i);

nit: the limit is 100 chars now, so I think we can have it on a single
line.

>  		if (IS_ERR(group->queues[i])) {
>  			ret = PTR_ERR(group->queues[i]);
>  			group->queues[i] = NULL;
> -			goto err_put_group;
> +			goto err_erase_gid;
>  		}
>  
>  		group->queue_count++;
> @@ -3553,10 +3568,6 @@ int panthor_group_create(struct panthor_file *pfile,
>  
>  	group->idle_queues = GENMASK(group->queue_count - 1, 0);
>  
> -	ret = xa_alloc(&gpool->xa, &gid, group, XA_LIMIT(1, MAX_GROUPS_PER_POOL), GFP_KERNEL);
> -	if (ret)
> -		goto err_put_group;
> -
>  	mutex_lock(&sched->reset.lock);
>  	if (atomic_read(&sched->reset.in_progress)) {
>  		panthor_group_stop(group);
> @@ -3575,6 +3586,9 @@ int panthor_group_create(struct panthor_file *pfile,
>  
>  	return gid;
>  
> +err_erase_gid:
> +	xa_erase(&gpool->xa, gid);
> +
>  err_put_group:
>  	group_put(group);
>  	return ret;


^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH] drm/panthor: assign unique names to queues
  2025-08-28 20:05 [PATCH] drm/panthor: assign unique names to queues Chia-I Wu
  2025-08-29  8:00 ` Boris Brezillon
@ 2025-08-29  9:20 ` Steven Price
  1 sibling, 0 replies; 4+ messages in thread
From: Steven Price @ 2025-08-29  9:20 UTC (permalink / raw)
  To: Chia-I Wu, Boris Brezillon, Liviu Dudau, Maarten Lankhorst,
	Maxime Ripard, Thomas Zimmermann, David Airlie, Simona Vetter,
	dri-devel, linux-kernel

On 28/08/2025 21:05, Chia-I Wu wrote:
> Userspace relies on the ring field of gpu_scheduler tracepoints to
> identify a drm_gpu_scheduler.  The value of the ring field is taken from
> sched->name.
> 
> Because we typically have multiple schedulers running in parallel in
> each process, assign unique names to schedulers such that userspace can
> distinguish them.
> 
> Signed-off-by: Chia-I Wu <olvaffe@gmail.com>

One nit below, but otherwise:

Reviewed-by: Steven Price <steven.price@arm.com>

> ---
>  drivers/gpu/drm/panthor/panthor_sched.c | 32 ++++++++++++++++++-------
>  1 file changed, 23 insertions(+), 9 deletions(-)
> 
> diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c
> index ba5dc3e443d9c..26616b6cb110d 100644
> --- a/drivers/gpu/drm/panthor/panthor_sched.c
> +++ b/drivers/gpu/drm/panthor/panthor_sched.c
> @@ -360,6 +360,9 @@ struct panthor_queue {
>  	/** @entity: DRM scheduling entity used for this queue. */
>  	struct drm_sched_entity entity;
>  
> +	/** @name: DRM scheduler name for this queue. */
> +	char name[32];
> +
>  	/**
>  	 * @remaining_time: Time remaining before the job timeout expires.
>  	 *
> @@ -3308,9 +3311,10 @@ static u32 calc_profiling_ringbuf_num_slots(struct panthor_device *ptdev,
>  
>  static struct panthor_queue *
>  group_create_queue(struct panthor_group *group,
> -		   const struct drm_panthor_queue_create *args)
> +		   const struct drm_panthor_queue_create *args, u32 gid,
> +		   u32 qid)
>  {
> -	const struct drm_sched_init_args sched_args = {
> +	struct drm_sched_init_args sched_args = {
>  		.ops = &panthor_queue_sched_ops,
>  		.submit_wq = group->ptdev->scheduler->wq,
>  		.num_rqs = 1,
> @@ -3323,7 +3327,7 @@ group_create_queue(struct panthor_group *group,
>  		.credit_limit = args->ringbuf_size / sizeof(u64),
>  		.timeout = msecs_to_jiffies(JOB_TIMEOUT_MS),
>  		.timeout_wq = group->ptdev->reset.wq,
> -		.name = "panthor-queue",
> +		.name = NULL, /* will point to queue->name */

NIT: There's no need to explicitly assign NULL here.

Thanks,
Steve

>  		.dev = group->ptdev->base.dev,
>  	};
>  	struct drm_gpu_scheduler *drm_sched;
> @@ -3398,6 +3402,11 @@ group_create_queue(struct panthor_group *group,
>  	if (ret)
>  		goto err_free_queue;
>  
> +	/* assign a unique name */
> +	snprintf(queue->name, sizeof(queue->name), "panthor-queue-%d-%d", gid,
> +		 qid);
> +	sched_args.name = queue->name;
> +
>  	ret = drm_sched_init(&queue->scheduler, &sched_args);
>  	if (ret)
>  		goto err_free_queue;
> @@ -3540,12 +3549,18 @@ int panthor_group_create(struct panthor_file *pfile,
>  	memset(group->syncobjs->kmap, 0,
>  	       group_args->queues.count * sizeof(struct panthor_syncobj_64b));
>  
> +	ret = xa_alloc(&gpool->xa, &gid, group,
> +		       XA_LIMIT(1, MAX_GROUPS_PER_POOL), GFP_KERNEL);
> +	if (ret)
> +		goto err_put_group;
> +
>  	for (i = 0; i < group_args->queues.count; i++) {
> -		group->queues[i] = group_create_queue(group, &queue_args[i]);
> +		group->queues[i] =
> +			group_create_queue(group, &queue_args[i], gid, i);
>  		if (IS_ERR(group->queues[i])) {
>  			ret = PTR_ERR(group->queues[i]);
>  			group->queues[i] = NULL;
> -			goto err_put_group;
> +			goto err_erase_gid;
>  		}
>  
>  		group->queue_count++;
> @@ -3553,10 +3568,6 @@ int panthor_group_create(struct panthor_file *pfile,
>  
>  	group->idle_queues = GENMASK(group->queue_count - 1, 0);
>  
> -	ret = xa_alloc(&gpool->xa, &gid, group, XA_LIMIT(1, MAX_GROUPS_PER_POOL), GFP_KERNEL);
> -	if (ret)
> -		goto err_put_group;
> -
>  	mutex_lock(&sched->reset.lock);
>  	if (atomic_read(&sched->reset.in_progress)) {
>  		panthor_group_stop(group);
> @@ -3575,6 +3586,9 @@ int panthor_group_create(struct panthor_file *pfile,
>  
>  	return gid;
>  
> +err_erase_gid:
> +	xa_erase(&gpool->xa, gid);
> +
>  err_put_group:
>  	group_put(group);
>  	return ret;


^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH] drm/panthor: assign unique names to queues
  2025-08-29  8:00 ` Boris Brezillon
@ 2025-08-29 22:43   ` Chia-I Wu
  0 siblings, 0 replies; 4+ messages in thread
From: Chia-I Wu @ 2025-08-29 22:43 UTC (permalink / raw)
  To: Boris Brezillon
  Cc: Steven Price, Liviu Dudau, Maarten Lankhorst, Maxime Ripard,
	Thomas Zimmermann, David Airlie, Simona Vetter, dri-devel,
	linux-kernel

On Fri, Aug 29, 2025 at 1:00 AM Boris Brezillon
<boris.brezillon@collabora.com> wrote:
>
> On Thu, 28 Aug 2025 13:05:32 -0700
> Chia-I Wu <olvaffe@gmail.com> wrote:
>
> > Userspace relies on the ring field of gpu_scheduler tracepoints to
> > identify a drm_gpu_scheduler.  The value of the ring field is taken from
> > sched->name.
> >
> > Because we typically have multiple schedulers running in parallel in
> > each process, assign unique names to schedulers such that userspace can
> > distinguish them.
> >
> > Signed-off-by: Chia-I Wu <olvaffe@gmail.com>
>
> Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com>
>
> Two minor comments below.
>
> > ---
> >  drivers/gpu/drm/panthor/panthor_sched.c | 32 ++++++++++++++++++-------
> >  1 file changed, 23 insertions(+), 9 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c
> > index ba5dc3e443d9c..26616b6cb110d 100644
> > --- a/drivers/gpu/drm/panthor/panthor_sched.c
> > +++ b/drivers/gpu/drm/panthor/panthor_sched.c
> > @@ -360,6 +360,9 @@ struct panthor_queue {
> >       /** @entity: DRM scheduling entity used for this queue. */
> >       struct drm_sched_entity entity;
> >
> > +     /** @name: DRM scheduler name for this queue. */
> > +     char name[32];
> > +
> >       /**
> >        * @remaining_time: Time remaining before the job timeout expires.
> >        *
> > @@ -3308,9 +3311,10 @@ static u32 calc_profiling_ringbuf_num_slots(struct panthor_device *ptdev,
> >
> >  static struct panthor_queue *
> >  group_create_queue(struct panthor_group *group,
> > -                const struct drm_panthor_queue_create *args)
> > +                const struct drm_panthor_queue_create *args, u32 gid,
> > +                u32 qid)
> >  {
> > -     const struct drm_sched_init_args sched_args = {
> > +     struct drm_sched_init_args sched_args = {
> >               .ops = &panthor_queue_sched_ops,
> >               .submit_wq = group->ptdev->scheduler->wq,
> >               .num_rqs = 1,
> > @@ -3323,7 +3327,7 @@ group_create_queue(struct panthor_group *group,
> >               .credit_limit = args->ringbuf_size / sizeof(u64),
> >               .timeout = msecs_to_jiffies(JOB_TIMEOUT_MS),
> >               .timeout_wq = group->ptdev->reset.wq,
> > -             .name = "panthor-queue",
> > +             .name = NULL, /* will point to queue->name */
> >               .dev = group->ptdev->base.dev,
> >       };
> >       struct drm_gpu_scheduler *drm_sched;
> > @@ -3398,6 +3402,11 @@ group_create_queue(struct panthor_group *group,
> >       if (ret)
> >               goto err_free_queue;
> >
> > +     /* assign a unique name */
> > +     snprintf(queue->name, sizeof(queue->name), "panthor-queue-%d-%d", gid,
> > +              qid);
> > +     sched_args.name = queue->name;
>
> Should we plan ahead and have the pid in the name too?
Yes!  I intended for the name to be unique, but incorrectly assumed
that the group pool was global.

Since group pools are per-file, I will include file->client_id in the
names in v2.

>
> > +
> >       ret = drm_sched_init(&queue->scheduler, &sched_args);
> >       if (ret)
> >               goto err_free_queue;
> > @@ -3540,12 +3549,18 @@ int panthor_group_create(struct panthor_file *pfile,
> >       memset(group->syncobjs->kmap, 0,
> >              group_args->queues.count * sizeof(struct panthor_syncobj_64b));
> >
> > +     ret = xa_alloc(&gpool->xa, &gid, group,
> > +                    XA_LIMIT(1, MAX_GROUPS_PER_POOL), GFP_KERNEL);
> > +     if (ret)
> > +             goto err_put_group;
> > +
> >       for (i = 0; i < group_args->queues.count; i++) {
> > -             group->queues[i] = group_create_queue(group, &queue_args[i]);
> > +             group->queues[i] =
> > +                     group_create_queue(group, &queue_args[i], gid, i);
>
> nit: the limit is 100 chars now, so I think we can have it on a single
> line.
>
> >               if (IS_ERR(group->queues[i])) {
> >                       ret = PTR_ERR(group->queues[i]);
> >                       group->queues[i] = NULL;
> > -                     goto err_put_group;
> > +                     goto err_erase_gid;
> >               }
> >
> >               group->queue_count++;
> > @@ -3553,10 +3568,6 @@ int panthor_group_create(struct panthor_file *pfile,
> >
> >       group->idle_queues = GENMASK(group->queue_count - 1, 0);
> >
> > -     ret = xa_alloc(&gpool->xa, &gid, group, XA_LIMIT(1, MAX_GROUPS_PER_POOL), GFP_KERNEL);
> > -     if (ret)
> > -             goto err_put_group;
> > -
> >       mutex_lock(&sched->reset.lock);
> >       if (atomic_read(&sched->reset.in_progress)) {
> >               panthor_group_stop(group);
> > @@ -3575,6 +3586,9 @@ int panthor_group_create(struct panthor_file *pfile,
> >
> >       return gid;
> >
> > +err_erase_gid:
> > +     xa_erase(&gpool->xa, gid);
> > +
> >  err_put_group:
> >       group_put(group);
> >       return ret;
>

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2025-08-29 22:43 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-08-28 20:05 [PATCH] drm/panthor: assign unique names to queues Chia-I Wu
2025-08-29  8:00 ` Boris Brezillon
2025-08-29 22:43   ` Chia-I Wu
2025-08-29  9:20 ` Steven Price

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).