From: "Thomas Hellström" <thomas.hellstrom@linux.intel.com>
To: Matthew Brost <matthew.brost@intel.com>, intel-xe@lists.freedesktop.org
Cc: francois.dugast@intel.com, michal.mrozek@intel.com
Subject: Re: [PATCH v2 4/7] drm/xe: Skip exec queue schedule toggle if queue is idle during suspend
Date: Mon, 15 Dec 2025 13:08:39 +0100 [thread overview]
Message-ID: <29b910869c16f7e22ac2481a64227289a550adb6.camel@linux.intel.com> (raw)
In-Reply-To: <20251212182847.1683222-5-matthew.brost@intel.com>
On Fri, 2025-12-12 at 10:28 -0800, Matthew Brost wrote:
> If an exec queue is idle, there is no need to issue a schedule
> disable
> to the GuC when suspending the queue’s execution. Opportunistically
> skip
> this step if the queue is idle and not a parallel queue. Parallel
> queues
> must have their scheduling state flipped in the GuC due to
> limitations
> in how submission is implemented in run_job().
>
> Also if all pagefault queues can skip the schedule disable during a
> switch to dma-fence mode, do not schedule a resume for the pagefault
> queues after the next submission.
>
> v2:
> - Don't touch the LRC tail is queue is suspended but enabled in
> run_job
> (CI)
>
> Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Not fully up-to-date with the GuC scheduling code, but changes look
sane to me.
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
> ---
> drivers/gpu/drm/xe/xe_exec_queue.h | 17 ++++++++
> drivers/gpu/drm/xe/xe_guc_submit.c | 55
> +++++++++++++++++++++++--
> drivers/gpu/drm/xe/xe_hw_engine_group.c | 2 +-
> 3 files changed, 70 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/gpu/drm/xe/xe_exec_queue.h
> b/drivers/gpu/drm/xe/xe_exec_queue.h
> index 10abed98fb6b..b5ad975d7e97 100644
> --- a/drivers/gpu/drm/xe/xe_exec_queue.h
> +++ b/drivers/gpu/drm/xe/xe_exec_queue.h
> @@ -162,4 +162,21 @@ int xe_exec_queue_contexts_hwsp_rebase(struct
> xe_exec_queue *q, void *scratch);
>
> struct xe_lrc *xe_exec_queue_lrc(struct xe_exec_queue *q);
>
> +/**
> + * xe_exec_queue_idle_skip_suspend() - Can exec queue skip suspend
> + * @q: The exec_queue
> + *
> + * If an exec queue is not parallel and is idle, the suspend steps
> can be
> + * skipped in the submission backend immediatley signaling the
> suspend fence.
> + * Parallel queues cannot skip this step due to limitations in the
> submission
> + * backend.
> + *
> + * Return: True if exec queue is idle and can skip suspend steps,
> False
> + * otherwise
> + */
> +static inline bool xe_exec_queue_idle_skip_suspend(struct
> xe_exec_queue *q)
> +{
> + return !xe_exec_queue_is_parallel(q) &&
> xe_exec_queue_is_idle(q);
> +}
> +
> #endif
> diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c
> b/drivers/gpu/drm/xe/xe_guc_submit.c
> index 18cac5594d6a..8bab816da7fd 100644
> --- a/drivers/gpu/drm/xe/xe_guc_submit.c
> +++ b/drivers/gpu/drm/xe/xe_guc_submit.c
> @@ -75,6 +75,7 @@ exec_queue_to_guc(struct xe_exec_queue *q)
> #define EXEC_QUEUE_STATE_EXTRA_REF (1 << 11)
> #define EXEC_QUEUE_STATE_PENDING_RESUME (1 << 12)
> #define EXEC_QUEUE_STATE_PENDING_TDR_EXIT (1 << 13)
> +#define EXEC_QUEUE_STATE_IDLE_SKIP_SUSPEND (1 << 14)
>
> static bool exec_queue_registered(struct xe_exec_queue *q)
> {
> @@ -266,6 +267,21 @@ static void
> clear_exec_queue_pending_tdr_exit(struct xe_exec_queue *q)
> atomic_and(~EXEC_QUEUE_STATE_PENDING_TDR_EXIT, &q->guc-
> >state);
> }
>
> +static bool exec_queue_idle_skip_suspend(struct xe_exec_queue *q)
> +{
> + return atomic_read(&q->guc->state) &
> EXEC_QUEUE_STATE_IDLE_SKIP_SUSPEND;
> +}
> +
> +static void set_exec_queue_idle_skip_suspend(struct xe_exec_queue
> *q)
> +{
> + atomic_or(EXEC_QUEUE_STATE_IDLE_SKIP_SUSPEND, &q->guc-
> >state);
> +}
> +
> +static void clear_exec_queue_idle_skip_suspend(struct xe_exec_queue
> *q)
> +{
> + atomic_and(~EXEC_QUEUE_STATE_IDLE_SKIP_SUSPEND, &q->guc-
> >state);
> +}
> +
> static bool exec_queue_killed_or_banned_or_wedged(struct
> xe_exec_queue *q)
> {
> return (atomic_read(&q->guc->state) &
> @@ -1118,7 +1134,7 @@ static void submit_exec_queue(struct
> xe_exec_queue *q, struct xe_sched_job *job)
> if (!job->restore_replay || job->last_replay) {
> if (xe_exec_queue_is_parallel(q))
> wq_item_append(q);
> - else
> + else if (!exec_queue_idle_skip_suspend(q))
> xe_lrc_set_ring_tail(lrc, lrc->ring.tail);
> job->last_replay = false;
> }
> @@ -1906,9 +1922,10 @@ static void
> __guc_exec_queue_process_msg_suspend(struct xe_sched_msg *msg)
> {
> struct xe_exec_queue *q = msg->private_data;
> struct xe_guc *guc = exec_queue_to_guc(q);
> + bool idle_skip_suspend = xe_exec_queue_idle_skip_suspend(q);
>
> - if (guc_exec_queue_allowed_to_change_state(q) &&
> !exec_queue_suspended(q) &&
> - exec_queue_enabled(q)) {
> + if (!idle_skip_suspend &&
> guc_exec_queue_allowed_to_change_state(q) &&
> + !exec_queue_suspended(q) && exec_queue_enabled(q)) {
> wait_event(guc->ct.wq, vf_recovery(guc) ||
> ((q->guc->resume_time != RESUME_PENDING
> ||
> xe_guc_read_stopped(guc)) &&
> !exec_queue_pending_disable(q)));
> @@ -1927,11 +1944,33 @@ static void
> __guc_exec_queue_process_msg_suspend(struct xe_sched_msg *msg)
> disable_scheduling(q, false);
> }
> } else if (q->guc->suspend_pending) {
> + if (idle_skip_suspend)
> + set_exec_queue_idle_skip_suspend(q);
> set_exec_queue_suspended(q);
> suspend_fence_signal(q);
> }
> }
>
> +static void sched_context(struct xe_exec_queue *q)
> +{
> + struct xe_guc *guc = exec_queue_to_guc(q);
> + struct xe_lrc *lrc = q->lrc[0];
> + u32 action [] = {
> + XE_GUC_ACTION_SCHED_CONTEXT,
> + q->guc->id,
> + };
> +
> + xe_gt_assert(guc_to_gt(guc), !xe_exec_queue_is_parallel(q));
> + xe_gt_assert(guc_to_gt(guc), !exec_queue_destroyed(q));
> + xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q));
> + xe_gt_assert(guc_to_gt(guc),
> !exec_queue_pending_disable(q));
> +
> + trace_xe_exec_queue_submit(q);
> +
> + xe_lrc_set_ring_tail(lrc, lrc->ring.tail);
> + xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action), 0, 0);
> +}
> +
> static void __guc_exec_queue_process_msg_resume(struct xe_sched_msg
> *msg)
> {
> struct xe_exec_queue *q = msg->private_data;
> @@ -1939,12 +1978,22 @@ static void
> __guc_exec_queue_process_msg_resume(struct xe_sched_msg *msg)
> if (guc_exec_queue_allowed_to_change_state(q)) {
> clear_exec_queue_suspended(q);
> if (!exec_queue_enabled(q)) {
> + if (exec_queue_idle_skip_suspend(q)) {
> + struct xe_lrc *lrc = q->lrc[0];
> +
> + clear_exec_queue_idle_skip_suspend(q
> );
> + xe_lrc_set_ring_tail(lrc, lrc-
> >ring.tail);
> + }
> q->guc->resume_time = RESUME_PENDING;
> set_exec_queue_pending_resume(q);
> enable_scheduling(q);
> + } else if (exec_queue_idle_skip_suspend(q)) {
> + clear_exec_queue_idle_skip_suspend(q);
> + sched_context(q);
> }
> } else {
> clear_exec_queue_suspended(q);
> + clear_exec_queue_idle_skip_suspend(q);
> }
> }
>
> diff --git a/drivers/gpu/drm/xe/xe_hw_engine_group.c
> b/drivers/gpu/drm/xe/xe_hw_engine_group.c
> index 290205a266b8..4d9263a1a208 100644
> --- a/drivers/gpu/drm/xe/xe_hw_engine_group.c
> +++ b/drivers/gpu/drm/xe/xe_hw_engine_group.c
> @@ -205,7 +205,7 @@ static int
> xe_hw_engine_group_suspend_faulting_lr_jobs(struct xe_hw_engine_group
> continue;
>
> xe_gt_stats_incr(q->gt,
> XE_GT_STATS_ID_HW_ENGINE_GROUP_SUSPEND_LR_QUEUE_COUNT, 1);
> - need_resume = true;
> + need_resume |= !xe_exec_queue_idle_skip_suspend(q);
> q->ops->suspend(q);
> }
>
next prev parent reply other threads:[~2025-12-15 12:08 UTC|newest]
Thread overview: 24+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-12-12 18:28 [PATCH v2 0/7] Fix performance when pagefaults and 3d/display share resources Matthew Brost
2025-12-12 18:28 ` [PATCH v2 1/7] drm/xe: Adjust long-running workload timeslices to reasonable values Matthew Brost
2025-12-15 10:08 ` Thomas Hellström
2025-12-15 21:48 ` Matthew Brost
2025-12-12 18:28 ` [PATCH v2 2/7] drm/xe: Use usleep_range for accurate long-running workload timeslicing Matthew Brost
2025-12-15 10:10 ` Thomas Hellström
2025-12-12 18:28 ` [PATCH v2 3/7] drm/xe: Add debugfs knobs to control long running " Matthew Brost
2025-12-15 10:11 ` Thomas Hellström
2025-12-12 18:28 ` [PATCH v2 4/7] drm/xe: Skip exec queue schedule toggle if queue is idle during suspend Matthew Brost
2025-12-15 12:08 ` Thomas Hellström [this message]
2025-12-12 18:28 ` [PATCH v2 5/7] drm/xe: Wait on in-syncs when swicthing to dma-fence mode Matthew Brost
2025-12-15 10:32 ` Thomas Hellström
2025-12-15 21:46 ` Matthew Brost
2025-12-15 21:48 ` Thomas Hellström
2025-12-16 1:12 ` Matthew Brost
2025-12-12 18:28 ` [PATCH v2 6/7] drm/xe: Add GT stats ktime helpers Matthew Brost
2025-12-15 10:17 ` Thomas Hellström
2025-12-12 18:28 ` [PATCH v2 7/7] drm/xe: Add more GT stats around pagefault mode switch flows Matthew Brost
2025-12-15 11:00 ` Thomas Hellström
2025-12-15 13:05 ` Francois Dugast
2025-12-12 22:37 ` ✗ CI.checkpatch: warning for Fix performance when pagefaults and 3d/display share resources (rev2) Patchwork
2025-12-12 22:38 ` ✓ CI.KUnit: success " Patchwork
2025-12-12 23:33 ` ✓ Xe.CI.BAT: " Patchwork
2025-12-13 19:27 ` ✗ Xe.CI.Full: failure " Patchwork
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=29b910869c16f7e22ac2481a64227289a550adb6.camel@linux.intel.com \
--to=thomas.hellstrom@linux.intel.com \
--cc=francois.dugast@intel.com \
--cc=intel-xe@lists.freedesktop.org \
--cc=matthew.brost@intel.com \
--cc=michal.mrozek@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox