From: Matthew Brost <matthew.brost@intel.com>
To: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: intel-xe@lists.freedesktop.org
Subject: Re: [PATCH 13/13] fixup! drm/xe: Introduce a new DRM driver for Intel GPUs
Date: Mon, 11 Dec 2023 21:25:07 +0000 [thread overview]
Message-ID: <ZXd+M+PhvXJYSSfv@DUT025-TGLU.fm.intel.com> (raw)
In-Reply-To: <20231211191306.1069362-13-rodrigo.vivi@intel.com>
On Mon, Dec 11, 2023 at 02:13:06PM -0500, Rodrigo Vivi wrote:
> Adjust to recent drm-scheduler changes that already landed in drm-next
>
> Cc: Matthew Brost <matthew.brost@intel.com>
> Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
> ---
> drivers/gpu/drm/xe/xe_exec_queue.c | 6 +++---
> drivers/gpu/drm/xe/xe_execlist.c | 3 +--
> drivers/gpu/drm/xe/xe_execlist_types.h | 4 ++--
> drivers/gpu/drm/xe/xe_gpu_scheduler.h | 2 +-
> drivers/gpu/drm/xe/xe_gpu_scheduler_types.h | 6 ++++++
> drivers/gpu/drm/xe/xe_guc_submit.c | 10 +++++-----
> drivers/gpu/drm/xe/xe_migrate.c | 2 +-
> drivers/gpu/drm/xe/xe_sched_job.c | 2 +-
> 8 files changed, 20 insertions(+), 15 deletions(-)
>
> diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
> index 91d67f4da2cc..3cf7f5d3d5e3 100644
> --- a/drivers/gpu/drm/xe/xe_exec_queue.c
> +++ b/drivers/gpu/drm/xe/xe_exec_queue.c
> @@ -222,14 +222,14 @@ struct xe_exec_queue *xe_exec_queue_lookup(struct xe_file *xef, u32 id)
> enum drm_sched_priority
> xe_exec_queue_device_get_max_priority(struct xe_device *xe)
> {
> - return capable(CAP_SYS_NICE) ? DRM_SCHED_PRIORITY_HIGH :
> - DRM_SCHED_PRIORITY_NORMAL;
> + return capable(CAP_SYS_NICE) ? XE_SCHED_PRIORITY_HIGH :
> + XE_SCHED_PRIORITY_NORMAL;
> }
>
> static int exec_queue_set_priority(struct xe_device *xe, struct xe_exec_queue *q,
> u64 value, bool create)
> {
> - if (XE_IOCTL_DBG(xe, value > DRM_SCHED_PRIORITY_HIGH))
> + if (XE_IOCTL_DBG(xe, value > XE_SCHED_PRIORITY_HIGH))
> return -EINVAL;
>
> if (XE_IOCTL_DBG(xe, value > xe_exec_queue_device_get_max_priority(xe)))
> diff --git a/drivers/gpu/drm/xe/xe_execlist.c b/drivers/gpu/drm/xe/xe_execlist.c
> index e8754adfc52a..e6a94d884115 100644
> --- a/drivers/gpu/drm/xe/xe_execlist.c
> +++ b/drivers/gpu/drm/xe/xe_execlist.c
> @@ -344,8 +344,7 @@ static int execlist_exec_queue_init(struct xe_exec_queue *q)
> goto err_free;
>
> sched = &exl->sched;
> - err = drm_sched_entity_init(&exl->entity, DRM_SCHED_PRIORITY_MIN,
> - &sched, 1, NULL);
> + err = drm_sched_entity_init(&exl->entity, 0, &sched, 1, NULL);
> if (err)
> goto err_sched;
>
> diff --git a/drivers/gpu/drm/xe/xe_execlist_types.h b/drivers/gpu/drm/xe/xe_execlist_types.h
> index ee1fccd4ee8b..38bb6f0709c6 100644
> --- a/drivers/gpu/drm/xe/xe_execlist_types.h
> +++ b/drivers/gpu/drm/xe/xe_execlist_types.h
> @@ -10,7 +10,7 @@
> #include <linux/spinlock.h>
> #include <linux/workqueue.h>
>
> -#include <drm/gpu_scheduler.h>
> +#include <xe_gpu_scheduler_types.h>
>
> struct xe_hw_engine;
> struct xe_execlist_exec_queue;
> @@ -20,7 +20,7 @@ struct xe_execlist_port {
>
> spinlock_t lock;
>
> - struct list_head active[DRM_SCHED_PRIORITY_COUNT];
> + struct list_head active[XE_SCHED_PRIORITY_COUNT];
>
> u32 last_ctx_id;
>
> diff --git a/drivers/gpu/drm/xe/xe_gpu_scheduler.h b/drivers/gpu/drm/xe/xe_gpu_scheduler.h
> index ea785bcd3eb2..10c6bb9c9386 100644
> --- a/drivers/gpu/drm/xe/xe_gpu_scheduler.h
> +++ b/drivers/gpu/drm/xe/xe_gpu_scheduler.h
> @@ -63,7 +63,7 @@ static inline int
> xe_sched_entity_init(struct xe_sched_entity *entity,
> struct xe_gpu_scheduler *sched)
> {
> - return drm_sched_entity_init(entity, DRM_SCHED_PRIORITY_MIN,
> + return drm_sched_entity_init(entity, 0,
> (struct drm_gpu_scheduler **)&sched,
> 1, NULL);
> }
> diff --git a/drivers/gpu/drm/xe/xe_gpu_scheduler_types.h b/drivers/gpu/drm/xe/xe_gpu_scheduler_types.h
> index 86133835d4d1..941a360af2f5 100644
> --- a/drivers/gpu/drm/xe/xe_gpu_scheduler_types.h
> +++ b/drivers/gpu/drm/xe/xe_gpu_scheduler_types.h
> @@ -8,6 +8,12 @@
>
> #include <drm/gpu_scheduler.h>
>
> +#define XE_SCHED_PRIORITY_LOW 0
> +#define XE_SCHED_PRIORITY_NORMAL 1
> +#define XE_SCHED_PRIORITY_HIGH 2
> +#define XE_SCHED_PRIORITY_KERNEL 3
> +#define XE_SCHED_PRIORITY_COUNT 3
XE_SCHED_PRIORITY_COUNT should be 4.
Maybe I screw that up when I sent this over?
Matt
> +
> /**
> * struct xe_sched_msg - an in-band (relative to GPU scheduler run queue)
> * message
> diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
> index 46b132ee1d3a..bfa8fb710cdf 100644
> --- a/drivers/gpu/drm/xe/xe_guc_submit.c
> +++ b/drivers/gpu/drm/xe/xe_guc_submit.c
> @@ -411,10 +411,10 @@ MAKE_EXEC_QUEUE_POLICY_ADD(priority, SCHEDULING_PRIORITY)
> #undef MAKE_EXEC_QUEUE_POLICY_ADD
>
> static const int xe_sched_prio_to_guc[] = {
> - [DRM_SCHED_PRIORITY_MIN] = GUC_CLIENT_PRIORITY_NORMAL,
> - [DRM_SCHED_PRIORITY_NORMAL] = GUC_CLIENT_PRIORITY_KMD_NORMAL,
> - [DRM_SCHED_PRIORITY_HIGH] = GUC_CLIENT_PRIORITY_HIGH,
> - [DRM_SCHED_PRIORITY_KERNEL] = GUC_CLIENT_PRIORITY_KMD_HIGH,
> + [XE_SCHED_PRIORITY_LOW] = GUC_CLIENT_PRIORITY_NORMAL,
> + [XE_SCHED_PRIORITY_NORMAL] = GUC_CLIENT_PRIORITY_KMD_NORMAL,
> + [XE_SCHED_PRIORITY_HIGH] = GUC_CLIENT_PRIORITY_HIGH,
> + [XE_SCHED_PRIORITY_KERNEL] = GUC_CLIENT_PRIORITY_KMD_HIGH,
> };
>
> static void init_policies(struct xe_guc *guc, struct xe_exec_queue *q)
> @@ -1227,7 +1227,7 @@ static int guc_exec_queue_init(struct xe_exec_queue *q)
> err = xe_sched_entity_init(&ge->entity, sched);
> if (err)
> goto err_sched;
> - q->priority = DRM_SCHED_PRIORITY_NORMAL;
> + q->priority = XE_SCHED_PRIORITY_NORMAL;
>
> if (xe_exec_queue_is_lr(q))
> INIT_WORK(&q->guc->lr_tdr, xe_guc_exec_queue_lr_cleanup);
> diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
> index be2a92dee52c..1d22aac57478 100644
> --- a/drivers/gpu/drm/xe/xe_migrate.c
> +++ b/drivers/gpu/drm/xe/xe_migrate.c
> @@ -398,7 +398,7 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
> return ERR_CAST(m->q);
> }
> if (xe->info.has_usm)
> - m->q->priority = DRM_SCHED_PRIORITY_KERNEL;
> + m->q->priority = XE_SCHED_PRIORITY_KERNEL;
>
> mutex_init(&m->job_mutex);
>
> diff --git a/drivers/gpu/drm/xe/xe_sched_job.c b/drivers/gpu/drm/xe/xe_sched_job.c
> index b467d5bfa4ac..a9c7ae815bec 100644
> --- a/drivers/gpu/drm/xe/xe_sched_job.c
> +++ b/drivers/gpu/drm/xe/xe_sched_job.c
> @@ -104,7 +104,7 @@ struct xe_sched_job *xe_sched_job_create(struct xe_exec_queue *q,
> kref_init(&job->refcount);
> xe_exec_queue_get(job->q);
>
> - err = drm_sched_job_init(&job->drm, q->entity, NULL);
> + err = drm_sched_job_init(&job->drm, q->entity, 1, NULL);
> if (err)
> goto err_free;
>
> --
> 2.43.0
>
next prev parent reply other threads:[~2023-12-11 21:25 UTC|newest]
Thread overview: 24+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-12-11 19:12 [PATCH 01/13] drm/sched: Rename drm_sched_get_cleanup_job to be more descriptive Rodrigo Vivi
2023-12-11 19:12 ` [PATCH 02/13] drm/sched: Move free worker re-queuing out of the if block Rodrigo Vivi
2023-12-11 19:12 ` [PATCH 03/13] drm/sched: Rename drm_sched_free_job_queue to be more descriptive Rodrigo Vivi
2023-12-11 19:12 ` [PATCH 04/13] drm/sched: Rename drm_sched_run_job_queue_if_ready and clarify kerneldoc Rodrigo Vivi
2023-12-11 19:12 ` [PATCH 05/13] drm/sched: Drop suffix from drm_sched_wakeup_if_can_queue Rodrigo Vivi
2023-12-11 19:12 ` [PATCH 06/13] drm/sched: Don't disturb the entity when in RR-mode scheduling Rodrigo Vivi
2023-12-11 19:13 ` [PATCH 07/13] drm/sched: Qualify drm_sched_wakeup() by drm_sched_entity_is_ready() Rodrigo Vivi
2023-12-11 19:13 ` [PATCH 08/13] drm/sched: implement dynamic job-flow control Rodrigo Vivi
2023-12-11 19:13 ` [PATCH 09/13] drm/sched: Fix bounds limiting when given a malformed entity Rodrigo Vivi
2023-12-11 19:13 ` [PATCH 10/13] drm/sched: Rename priority MIN to LOW Rodrigo Vivi
2023-12-11 19:13 ` [PATCH 11/13] drm/sched: Reverse run-queue priority enumeration Rodrigo Vivi
2023-12-11 19:13 ` [PATCH 12/13] drm/sched: Partial revert of "Qualify drm_sched_wakeup() by drm_sched_entity_is_ready()" Rodrigo Vivi
2023-12-11 19:13 ` [PATCH 13/13] fixup! drm/xe: Introduce a new DRM driver for Intel GPUs Rodrigo Vivi
2023-12-11 21:25 ` Matthew Brost [this message]
2023-12-11 20:17 ` ✓ CI.Patch_applied: success for series starting with [01/13] drm/sched: Rename drm_sched_get_cleanup_job to be more descriptive Patchwork
2023-12-11 20:17 ` ✗ CI.checkpatch: warning " Patchwork
2023-12-11 20:18 ` ✓ CI.KUnit: success " Patchwork
2023-12-11 20:26 ` ✓ CI.Build: " Patchwork
2023-12-11 20:26 ` ✓ CI.Hooks: " Patchwork
2023-12-11 20:27 ` ✓ CI.checksparse: " Patchwork
2023-12-11 21:03 ` ✓ CI.BAT: " Patchwork
-- strict thread matches above, loose matches on Subject: below --
2023-12-12 0:10 [PATCH 01/13] " Rodrigo Vivi
2023-12-12 0:10 ` [PATCH 13/13] fixup! drm/xe: Introduce a new DRM driver for Intel GPUs Rodrigo Vivi
2023-12-12 0:55 ` Matthew Brost
2023-12-08 20:27 [PATCH 01/13] drm/sched: Rename drm_sched_get_cleanup_job to be more descriptive Rodrigo Vivi
2023-12-08 20:27 ` [PATCH 13/13] fixup! drm/xe: Introduce a new DRM driver for Intel GPUs Rodrigo Vivi
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=ZXd+M+PhvXJYSSfv@DUT025-TGLU.fm.intel.com \
--to=matthew.brost@intel.com \
--cc=intel-xe@lists.freedesktop.org \
--cc=rodrigo.vivi@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox