From: Raag Jadav <raag.jadav@intel.com>
To: intel-xe@lists.freedesktop.org
Cc: matthew.brost@intel.com, rodrigo.vivi@intel.com,
thomas.hellstrom@linux.intel.com, riana.tauro@intel.com,
michal.wajdeczko@intel.com, matthew.d.roper@intel.com,
michal.winiarski@intel.com, matthew.auld@intel.com,
maarten@lankhorst.se, jani.nikula@intel.com,
lukasz.laguna@intel.com, zhanjun.dong@intel.com, lukas@wunner.de,
Raag Jadav <raag.jadav@intel.com>
Subject: [PATCH v5 2/9] drm/xe/guc_submit: Introduce guc_exec_queue_reinit()
Date: Mon, 6 Apr 2026 19:37:15 +0530 [thread overview]
Message-ID: <20260406140722.154445-3-raag.jadav@intel.com> (raw)
In-Reply-To: <20260406140722.154445-1-raag.jadav@intel.com>
In preparation of usecases which require re-initializing GuC submission
after PCIe FLR, introduce guc_exec_queue_reinit() helper. This will restore
exec queues which might have been killed before PCIe FLR.
Signed-off-by: Raag Jadav <raag.jadav@intel.com>
---
v4: Teardown exec queues instead of mangling scheduler pending list (Matthew Brost)
v5: Re-initialize kernel queues through submission backend (Matthew Brost)
---
drivers/gpu/drm/xe/xe_exec_queue_types.h | 2 ++
drivers/gpu/drm/xe/xe_gpu_scheduler.h | 5 +++++
drivers/gpu/drm/xe/xe_guc_submit.c | 11 +++++++++++
3 files changed, 18 insertions(+)
diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h
index 8ce78e0b1d50..926d419310b6 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue_types.h
+++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h
@@ -274,6 +274,8 @@ struct xe_exec_queue {
struct xe_exec_queue_ops {
/** @init: Initialize exec queue for submission backend */
int (*init)(struct xe_exec_queue *q);
+ /** @reinit: Re-initialize exec queue for submission backend */
+ void (*reinit)(struct xe_exec_queue *q);
/** @kill: Kill inflight submissions for backend */
void (*kill)(struct xe_exec_queue *q);
/** @fini: Undoes the init() for submission backend */
diff --git a/drivers/gpu/drm/xe/xe_gpu_scheduler.h b/drivers/gpu/drm/xe/xe_gpu_scheduler.h
index 664c2db56af3..1e079ca3891c 100644
--- a/drivers/gpu/drm/xe/xe_gpu_scheduler.h
+++ b/drivers/gpu/drm/xe/xe_gpu_scheduler.h
@@ -51,6 +51,11 @@ static inline void xe_sched_tdr_queue_imm(struct xe_gpu_scheduler *sched)
drm_sched_tdr_queue_imm(&sched->base);
}
+static inline void xe_sched_update_timeout(struct xe_gpu_scheduler *sched, long timeout)
+{
+ sched->base.timeout = timeout;
+}
+
static inline void xe_sched_resubmit_jobs(struct xe_gpu_scheduler *sched)
{
struct drm_sched_job *s_job;
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index 10556156eaad..f15192b83738 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -2053,6 +2053,16 @@ static int guc_exec_queue_init(struct xe_exec_queue *q)
return err;
}
+static void guc_exec_queue_reinit(struct xe_exec_queue *q)
+{
+ struct xe_gpu_scheduler *sched = &q->guc->sched;
+ long timeout = (q->vm && xe_vm_in_lr_mode(q->vm)) ? MAX_SCHEDULE_TIMEOUT :
+ msecs_to_jiffies(q->sched_props.job_timeout_ms);
+
+ xe_sched_update_timeout(sched, timeout);
+ atomic_set(&q->guc->state, 0);
+}
+
static void guc_exec_queue_kill(struct xe_exec_queue *q)
{
trace_xe_exec_queue_kill(q);
@@ -2295,6 +2305,7 @@ static bool guc_exec_queue_active(struct xe_exec_queue *q)
*/
static const struct xe_exec_queue_ops guc_exec_queue_ops = {
.init = guc_exec_queue_init,
+ .reinit = guc_exec_queue_reinit,
.kill = guc_exec_queue_kill,
.fini = guc_exec_queue_fini,
.destroy = guc_exec_queue_destroy,
--
2.43.0
next prev parent reply other threads:[~2026-04-06 14:11 UTC|newest]
Thread overview: 17+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-04-06 14:07 [PATCH v5 0/9] Introduce Xe PCIe FLR Raag Jadav
2026-04-06 14:07 ` [PATCH v5 1/9] drm/xe/uc_fw: Allow re-initializing firmware Raag Jadav
2026-04-06 14:07 ` Raag Jadav [this message]
2026-04-06 14:07 ` [PATCH v5 3/9] drm/xe/gt: Introduce FLR helpers Raag Jadav
2026-04-06 14:07 ` [PATCH v5 4/9] drm/xe/irq: Introduce xe_irq_disable() Raag Jadav
2026-04-06 14:07 ` [PATCH v5 5/9] drm/xe: Introduce xe_device_assert_lmem_ready() Raag Jadav
2026-04-06 14:07 ` [PATCH v5 6/9] drm/xe/bo_evict: Introduce xe_bo_restore_map() Raag Jadav
2026-04-06 14:07 ` [PATCH v5 7/9] drm/xe/exec_queue: Introduce xe_exec_queue_reinit() Raag Jadav
2026-04-06 14:07 ` [PATCH v5 8/9] drm/xe/migrate: Introduce xe_migrate_reinit() Raag Jadav
2026-04-06 14:07 ` [PATCH v5 9/9] drm/xe/pci: Introduce PCIe FLR Raag Jadav
2026-04-06 14:18 ` ✗ CI.checkpatch: warning for Introduce Xe PCIe FLR (rev5) Patchwork
2026-04-06 14:19 ` ✓ CI.KUnit: success " Patchwork
2026-04-06 14:54 ` ✓ Xe.CI.BAT: " Patchwork
2026-04-06 18:08 ` ✗ Xe.CI.FULL: failure " Patchwork
2026-04-10 14:22 ` [PATCH v5 0/9] Introduce Xe PCIe FLR Raag Jadav
2026-04-10 18:22 ` Maarten Lankhorst
2026-04-11 8:11 ` Raag Jadav
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260406140722.154445-3-raag.jadav@intel.com \
--to=raag.jadav@intel.com \
--cc=intel-xe@lists.freedesktop.org \
--cc=jani.nikula@intel.com \
--cc=lukas@wunner.de \
--cc=lukasz.laguna@intel.com \
--cc=maarten@lankhorst.se \
--cc=matthew.auld@intel.com \
--cc=matthew.brost@intel.com \
--cc=matthew.d.roper@intel.com \
--cc=michal.wajdeczko@intel.com \
--cc=michal.winiarski@intel.com \
--cc=riana.tauro@intel.com \
--cc=rodrigo.vivi@intel.com \
--cc=thomas.hellstrom@linux.intel.com \
--cc=zhanjun.dong@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox