Intel-XE Archive on lore.kernel.org
 help / color / mirror / Atom feed
From: Matthew Brost <matthew.brost@intel.com>
To: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
Cc: <intel-xe@lists.freedesktop.org>,
	<dri-devel@lists.freedesktop.org>, <christian.koenig@amd.com>,
	<pstanner@redhat.com>, <dakr@kernel.org>
Subject: Re: [PATCH v3 6/7] drm/xe: Remove special casing for LR queues in submission
Date: Tue, 18 Nov 2025 10:03:23 -0800	[thread overview]
Message-ID: <aRy063wOoY14qb5i@lstrano-desk.jf.intel.com> (raw)
In-Reply-To: <aRwWC46dgXKjRGVW@nvishwa1-desk>

On Mon, Nov 17, 2025 at 10:45:31PM -0800, Niranjana Vishwanathapura wrote:
> On Thu, Oct 16, 2025 at 01:48:25PM -0700, Matthew Brost wrote:
> > Now that LR jobs are tracked by the DRM scheduler, there's no longer a
> > need to special-case LR queues. This change removes all LR
> > queue-specific handling, including dedicated TDR logic, reference
> > counting schemes, and other related mechanisms.
> > 
> > Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> > ---
> > drivers/gpu/drm/xe/xe_guc_exec_queue_types.h |   2 -
> > drivers/gpu/drm/xe/xe_guc_submit.c           | 129 +------------------
> > 2 files changed, 7 insertions(+), 124 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h b/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h
> > index a3b034e4b205..fd0915ed8eb1 100644
> > --- a/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h
> > +++ b/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h
> > @@ -33,8 +33,6 @@ struct xe_guc_exec_queue {
> > 	 */
> > #define MAX_STATIC_MSG_TYPE	3
> > 	struct xe_sched_msg static_msgs[MAX_STATIC_MSG_TYPE];
> > -	/** @lr_tdr: long running TDR worker */
> > -	struct work_struct lr_tdr;
> > 	/** @destroy_async: do final destroy async from this worker */
> > 	struct work_struct destroy_async;
> > 	/** @resume_time: time of last resume */
> > diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
> > index ab0f1a2d4871..bb1f2929441c 100644
> > --- a/drivers/gpu/drm/xe/xe_guc_submit.c
> > +++ b/drivers/gpu/drm/xe/xe_guc_submit.c
> > @@ -674,14 +674,6 @@ static void register_exec_queue(struct xe_exec_queue *q, int ctx_type)
> > 		parallel_write(xe, map, wq_desc.wq_status, WQ_STATUS_ACTIVE);
> > 	}
> > 
> > -	/*
> > -	 * We must keep a reference for LR engines if engine is registered with
> > -	 * the GuC as jobs signal immediately and can't destroy an engine if the
> > -	 * GuC has a reference to it.
> > -	 */
> > -	if (xe_exec_queue_is_lr(q))
> > -		xe_exec_queue_get(q);
> > -
> > 	set_exec_queue_registered(q);
> > 	trace_xe_exec_queue_register(q);
> > 	if (xe_exec_queue_is_parallel(q))
> > @@ -854,7 +846,7 @@ guc_exec_queue_run_job(struct drm_sched_job *drm_job)
> > 	struct xe_sched_job *job = to_xe_sched_job(drm_job);
> > 	struct xe_exec_queue *q = job->q;
> > 	struct xe_guc *guc = exec_queue_to_guc(q);
> > -	bool lr = xe_exec_queue_is_lr(q), killed_or_banned_or_wedged =
> > +	bool killed_or_banned_or_wedged =
> > 		exec_queue_killed_or_banned_or_wedged(q);
> > 
> > 	xe_gt_assert(guc_to_gt(guc), !(exec_queue_destroyed(q) || exec_queue_pending_disable(q)) ||
> > @@ -871,15 +863,6 @@ guc_exec_queue_run_job(struct drm_sched_job *drm_job)
> > 		job->skip_emit = false;
> > 	}
> > 
> > -	/*
> > -	 * We don't care about job-fence ordering in LR VMs because these fences
> > -	 * are never exported; they are used solely to keep jobs on the pending
> > -	 * list. Once a queue enters an error state, there's no need to track
> > -	 * them.
> > -	 */
> > -	if (killed_or_banned_or_wedged && lr)
> > -		xe_sched_job_set_error(job, -ECANCELED);
> > -
> 
> Why this piece of code here is being removed?
> 

The TDR will always run for LR jobs now, that path will error out the
job. Prior to this, the LR cleanup function only ran once.

> > 	return job->fence;
> > }
> > 
> > @@ -923,8 +906,7 @@ static void disable_scheduling_deregister(struct xe_guc *guc,
> > 		xe_gt_warn(q->gt, "Pending enable/disable failed to respond\n");
> > 		xe_sched_submission_start(sched);
> > 		xe_gt_reset_async(q->gt);
> > -		if (!xe_exec_queue_is_lr(q))
> > -			xe_sched_tdr_queue_imm(sched);
> > +		xe_sched_tdr_queue_imm(sched);
> > 		return;
> > 	}
> > 
> > @@ -950,10 +932,7 @@ static void xe_guc_exec_queue_trigger_cleanup(struct xe_exec_queue *q)
> > 	/** to wakeup xe_wait_user_fence ioctl if exec queue is reset */
> > 	wake_up_all(&xe->ufence_wq);
> > 
> > -	if (xe_exec_queue_is_lr(q))
> > -		queue_work(guc_to_gt(guc)->ordered_wq, &q->guc->lr_tdr);
> > -	else
> > -		xe_sched_tdr_queue_imm(&q->guc->sched);
> > +	xe_sched_tdr_queue_imm(&q->guc->sched);
> > }
> > 
> > /**
> > @@ -1009,78 +988,6 @@ static bool guc_submit_hint_wedged(struct xe_guc *guc)
> > 	return true;
> > }
> > 
> > -static void xe_guc_exec_queue_lr_cleanup(struct work_struct *w)
> > -{
> > -	struct xe_guc_exec_queue *ge =
> > -		container_of(w, struct xe_guc_exec_queue, lr_tdr);
> > -	struct xe_exec_queue *q = ge->q;
> > -	struct xe_guc *guc = exec_queue_to_guc(q);
> > -	struct xe_gpu_scheduler *sched = &ge->sched;
> > -	struct drm_sched_job *job;
> > -	bool wedged = false;
> > -
> > -	xe_gt_assert(guc_to_gt(guc), xe_exec_queue_is_lr(q));
> > -
> > -	if (vf_recovery(guc))
> > -		return;
> > -
> > -	trace_xe_exec_queue_lr_cleanup(q);
> 
> Remove the trace event as well in xe_trace.h?
> 

Yes, will do.

Matt

> Niranjana
> 
> > -
> > -	if (!exec_queue_killed(q))
> > -		wedged = guc_submit_hint_wedged(exec_queue_to_guc(q));
> > -
> > -	/* Kill the run_job / process_msg entry points */
> > -	xe_sched_submission_stop(sched);
> > -
> > -	/*
> > -	 * Engine state now mostly stable, disable scheduling / deregister if
> > -	 * needed. This cleanup routine might be called multiple times, where
> > -	 * the actual async engine deregister drops the final engine ref.
> > -	 * Calling disable_scheduling_deregister will mark the engine as
> > -	 * destroyed and fire off the CT requests to disable scheduling /
> > -	 * deregister, which we only want to do once. We also don't want to mark
> > -	 * the engine as pending_disable again as this may race with the
> > -	 * xe_guc_deregister_done_handler() which treats it as an unexpected
> > -	 * state.
> > -	 */
> > -	if (!wedged && exec_queue_registered(q) && !exec_queue_destroyed(q)) {
> > -		struct xe_guc *guc = exec_queue_to_guc(q);
> > -		int ret;
> > -
> > -		set_exec_queue_banned(q);
> > -		disable_scheduling_deregister(guc, q);
> > -
> > -		/*
> > -		 * Must wait for scheduling to be disabled before signalling
> > -		 * any fences, if GT broken the GT reset code should signal us.
> > -		 */
> > -		ret = wait_event_timeout(guc->ct.wq,
> > -					 !exec_queue_pending_disable(q) ||
> > -					 xe_guc_read_stopped(guc) ||
> > -					 vf_recovery(guc), HZ * 5);
> > -		if (vf_recovery(guc))
> > -			return;
> > -
> > -		if (!ret) {
> > -			xe_gt_warn(q->gt, "Schedule disable failed to respond, guc_id=%d\n",
> > -				   q->guc->id);
> > -			xe_devcoredump(q, NULL, "Schedule disable failed to respond, guc_id=%d\n",
> > -				       q->guc->id);
> > -			xe_sched_submission_start(sched);
> > -			xe_gt_reset_async(q->gt);
> > -			return;
> > -		}
> > -	}
> > -
> > -	if (!exec_queue_killed(q) && !xe_lrc_ring_is_idle(q->lrc[0]))
> > -		xe_devcoredump(q, NULL, "LR job cleanup, guc_id=%d", q->guc->id);
> > -
> > -	drm_sched_for_each_pending_job(job, &sched->base, NULL)
> > -		xe_sched_job_set_error(to_xe_sched_job(job), -ECANCELED);
> > -
> > -	xe_sched_submission_start(sched);
> > -}
> > -
> > #define ADJUST_FIVE_PERCENT(__t)	mul_u64_u32_div(__t, 105, 100)
> > 
> > static bool check_timeout(struct xe_exec_queue *q, struct xe_sched_job *job)
> > @@ -1150,8 +1057,7 @@ static void enable_scheduling(struct xe_exec_queue *q)
> > 		xe_gt_warn(guc_to_gt(guc), "Schedule enable failed to respond");
> > 		set_exec_queue_banned(q);
> > 		xe_gt_reset_async(q->gt);
> > -		if (!xe_exec_queue_is_lr(q))
> > -			xe_sched_tdr_queue_imm(&q->guc->sched);
> > +		xe_sched_tdr_queue_imm(&q->guc->sched);
> > 	}
> > }
> > 
> > @@ -1189,8 +1095,6 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
> > 	pid_t pid = -1;
> > 	bool wedged = false, skip_timeout_check;
> > 
> > -	xe_gt_assert(guc_to_gt(guc), !xe_exec_queue_is_lr(q));
> > -
> > 	/*
> > 	 * TDR has fired before free job worker. Common if exec queue
> > 	 * immediately closed after last fence signaled. Add back to pending
> > @@ -1395,8 +1299,6 @@ static void __guc_exec_queue_destroy_async(struct work_struct *w)
> > 	xe_pm_runtime_get(guc_to_xe(guc));
> > 	trace_xe_exec_queue_destroy(q);
> > 
> > -	if (xe_exec_queue_is_lr(q))
> > -		cancel_work_sync(&ge->lr_tdr);
> > 	/* Confirm no work left behind accessing device structures */
> > 	cancel_delayed_work_sync(&ge->sched.base.work_tdr);
> > 
> > @@ -1629,9 +1531,6 @@ static int guc_exec_queue_init(struct xe_exec_queue *q)
> > 	if (err)
> > 		goto err_sched;
> > 
> > -	if (xe_exec_queue_is_lr(q))
> > -		INIT_WORK(&q->guc->lr_tdr, xe_guc_exec_queue_lr_cleanup);
> > -
> > 	mutex_lock(&guc->submission_state.lock);
> > 
> > 	err = alloc_guc_id(guc, q);
> > @@ -1885,9 +1784,7 @@ static void guc_exec_queue_stop(struct xe_guc *guc, struct xe_exec_queue *q)
> > 
> > 	/* Clean up lost G2H + reset engine state */
> > 	if (exec_queue_registered(q)) {
> > -		if (xe_exec_queue_is_lr(q))
> > -			xe_exec_queue_put(q);
> > -		else if (exec_queue_destroyed(q))
> > +		if (exec_queue_destroyed(q))
> > 			__guc_exec_queue_destroy(guc, q);
> > 	}
> > 	if (q->guc->suspend_pending) {
> > @@ -1917,9 +1814,6 @@ static void guc_exec_queue_stop(struct xe_guc *guc, struct xe_exec_queue *q)
> > 				trace_xe_sched_job_ban(job);
> > 				ban = true;
> > 			}
> > -		} else if (xe_exec_queue_is_lr(q) &&
> > -			   !xe_lrc_ring_is_idle(q->lrc[0])) {
> > -			ban = true;
> > 		}
> > 
> > 		if (ban) {
> > @@ -2002,8 +1896,6 @@ static void guc_exec_queue_revert_pending_state_change(struct xe_guc *guc,
> > 	if (pending_enable && !pending_resume &&
> > 	    !exec_queue_pending_tdr_exit(q)) {
> > 		clear_exec_queue_registered(q);
> > -		if (xe_exec_queue_is_lr(q))
> > -			xe_exec_queue_put(q);
> > 		xe_gt_dbg(guc_to_gt(guc), "Replay REGISTER - guc_id=%d",
> > 			  q->guc->id);
> > 	}
> > @@ -2060,10 +1952,7 @@ static void guc_exec_queue_pause(struct xe_guc *guc, struct xe_exec_queue *q)
> > 
> > 	/* Stop scheduling + flush any DRM scheduler operations */
> > 	xe_sched_submission_stop(sched);
> > -	if (xe_exec_queue_is_lr(q))
> > -		cancel_work_sync(&q->guc->lr_tdr);
> > -	else
> > -		cancel_delayed_work_sync(&sched->base.work_tdr);
> > +	cancel_delayed_work_sync(&sched->base.work_tdr);
> > 
> > 	guc_exec_queue_revert_pending_state_change(guc, q);
> > 
> > @@ -2435,11 +2324,7 @@ static void handle_deregister_done(struct xe_guc *guc, struct xe_exec_queue *q)
> > 	trace_xe_exec_queue_deregister_done(q);
> > 
> > 	clear_exec_queue_registered(q);
> > -
> > -	if (xe_exec_queue_is_lr(q))
> > -		xe_exec_queue_put(q);
> > -	else
> > -		__guc_exec_queue_destroy(guc, q);
> > +	__guc_exec_queue_destroy(guc, q);
> > }
> > 
> > int xe_guc_deregister_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
> > -- 
> > 2.34.1
> > 

  reply	other threads:[~2025-11-18 18:03 UTC|newest]

Thread overview: 31+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-10-16 20:48 [PATCH v3 0/7] Fix DRM scheduler layering violations in Xe Matthew Brost
2025-10-16 20:48 ` [PATCH v3 1/7] drm/sched: Add pending job list iterator Matthew Brost
2025-11-15  1:25   ` Niranjana Vishwanathapura
2025-11-18 17:52     ` Matthew Brost
2025-11-18 21:12       ` Niranjana Vishwanathapura
2025-10-16 20:48 ` [PATCH v3 2/7] drm/sched: Add several job helpers to avoid drivers touching scheduler state Matthew Brost
2025-11-17 19:57   ` Niranjana Vishwanathapura
2025-11-18 17:45     ` Matthew Brost
2025-10-16 20:48 ` [PATCH v3 3/7] drm/xe: Add dedicated message lock Matthew Brost
2025-11-17 19:58   ` Niranjana Vishwanathapura
2025-11-18 17:53     ` Matthew Brost
2025-10-16 20:48 ` [PATCH v3 4/7] drm/xe: Stop abusing DRM scheduler internals Matthew Brost
2025-11-18  6:39   ` Niranjana Vishwanathapura
2025-11-18 17:59     ` Matthew Brost
2025-11-18 21:17       ` Niranjana Vishwanathapura
2025-11-18 22:54         ` Matthew Brost
2025-10-16 20:48 ` [PATCH v3 5/7] drm/xe: Do not deregister queues in TDR Matthew Brost
2025-11-18  6:41   ` Niranjana Vishwanathapura
2025-11-18 18:02     ` Matthew Brost
2025-11-18 21:19       ` Niranjana Vishwanathapura
2025-11-18 22:59         ` Matthew Brost
2025-10-16 20:48 ` [PATCH v3 6/7] drm/xe: Remove special casing for LR queues in submission Matthew Brost
2025-11-18  6:45   ` Niranjana Vishwanathapura
2025-11-18 18:03     ` Matthew Brost [this message]
2025-10-16 20:48 ` [PATCH v3 7/7] drm/xe: Only toggle scheduling in TDR if GuC is running Matthew Brost
2025-11-15  1:01   ` Niranjana Vishwanathapura
2025-11-18 18:06     ` Matthew Brost
2025-10-16 20:55 ` ✗ CI.checkpatch: warning for Fix DRM scheduler layering violations in Xe (rev3) Patchwork
2025-10-16 20:56 ` ✓ CI.KUnit: success " Patchwork
2025-10-16 21:36 ` ✓ Xe.CI.BAT: " Patchwork
2025-10-17 18:43 ` ✗ Xe.CI.Full: failure " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=aRy063wOoY14qb5i@lstrano-desk.jf.intel.com \
    --to=matthew.brost@intel.com \
    --cc=christian.koenig@amd.com \
    --cc=dakr@kernel.org \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=intel-xe@lists.freedesktop.org \
    --cc=niranjana.vishwanathapura@intel.com \
    --cc=pstanner@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox