From: "Lis, Tomasz" <tomasz.lis@intel.com>
To: Matthew Brost <matthew.brost@intel.com>,
<intel-xe@lists.freedesktop.org>
Subject: Re: [PATCH v3 28/36] drm/xe/vf: Replay GuC submission state on pause / unpause
Date: Wed, 1 Oct 2025 16:37:36 +0200 [thread overview]
Message-ID: <bd5f4907-5ff3-407a-b385-86533e4e58af@intel.com> (raw)
In-Reply-To: <20250929025542.1486303-29-matthew.brost@intel.com>
On 9/29/2025 4:55 AM, Matthew Brost wrote:
> Fixup GuC submission pause / unpause functions to properly replay any
> possible state lost during VF post migration recovery.
>
> v3:
> - Add helpers for revert / replay (Tomasz)
> - Add comment around WQ NOPs (Tomasz)
Reviewed-by: Tomasz Lis <tomasz.lis@intel.com>
-Tomasz
> Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> ---
> drivers/gpu/drm/xe/xe_gpu_scheduler.c | 14 ++
> drivers/gpu/drm/xe/xe_gpu_scheduler.h | 2 +
> drivers/gpu/drm/xe/xe_gt_sriov_vf.c | 1 +
> drivers/gpu/drm/xe/xe_guc_exec_queue_types.h | 15 ++
> drivers/gpu/drm/xe/xe_guc_submit.c | 242 +++++++++++++++++--
> drivers/gpu/drm/xe/xe_guc_submit.h | 1 +
> drivers/gpu/drm/xe/xe_sched_job_types.h | 4 +
> 7 files changed, 264 insertions(+), 15 deletions(-)
>
> diff --git a/drivers/gpu/drm/xe/xe_gpu_scheduler.c b/drivers/gpu/drm/xe/xe_gpu_scheduler.c
> index 455ccaf17314..af300adc7e1a 100644
> --- a/drivers/gpu/drm/xe/xe_gpu_scheduler.c
> +++ b/drivers/gpu/drm/xe/xe_gpu_scheduler.c
> @@ -135,3 +135,17 @@ void xe_sched_add_msg_locked(struct xe_gpu_scheduler *sched,
> list_add_tail(&msg->link, &sched->msgs);
> xe_sched_process_msg_queue(sched);
> }
> +
> +/**
> + * xe_sched_add_msg_head() - Xe GPU scheduler add message to head of list
> + * @sched: Xe GPU scheduler
> + * @msg: Message to add
> + */
> +void xe_sched_add_msg_head(struct xe_gpu_scheduler *sched,
> + struct xe_sched_msg *msg)
> +{
> + lockdep_assert_held(&sched->base.job_list_lock);
> +
> + list_add(&msg->link, &sched->msgs);
> + xe_sched_process_msg_queue(sched);
> +}
> diff --git a/drivers/gpu/drm/xe/xe_gpu_scheduler.h b/drivers/gpu/drm/xe/xe_gpu_scheduler.h
> index e548b2aed95a..010003a6103a 100644
> --- a/drivers/gpu/drm/xe/xe_gpu_scheduler.h
> +++ b/drivers/gpu/drm/xe/xe_gpu_scheduler.h
> @@ -29,6 +29,8 @@ void xe_sched_add_msg(struct xe_gpu_scheduler *sched,
> struct xe_sched_msg *msg);
> void xe_sched_add_msg_locked(struct xe_gpu_scheduler *sched,
> struct xe_sched_msg *msg);
> +void xe_sched_add_msg_head(struct xe_gpu_scheduler *sched,
> + struct xe_sched_msg *msg);
>
> static inline void xe_sched_msg_lock(struct xe_gpu_scheduler *sched)
> {
> diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
> index 9f33561b91c6..0d94867dce8e 100644
> --- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
> +++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
> @@ -1217,6 +1217,7 @@ static int vf_post_migration_fixups(struct xe_gt *gt)
> static void vf_post_migration_rearm(struct xe_gt *gt)
> {
> xe_guc_ct_restart(>->uc.guc.ct);
> + xe_guc_submit_unpause_prepare(>->uc.guc);
> }
>
> static void vf_post_migration_kickstart(struct xe_gt *gt)
> diff --git a/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h b/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h
> index c30c0e3ccbbb..a3b034e4b205 100644
> --- a/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h
> +++ b/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h
> @@ -51,6 +51,21 @@ struct xe_guc_exec_queue {
> wait_queue_head_t suspend_wait;
> /** @suspend_pending: a suspend of the exec_queue is pending */
> bool suspend_pending;
> + /**
> + * @needs_cleanup: Needs a cleanup message during VF post migration
> + * recovery.
> + */
> + bool needs_cleanup;
> + /**
> + * @needs_suspend: Needs a suspend message during VF post migration
> + * recovery.
> + */
> + bool needs_suspend;
> + /**
> + * @needs_resume: Needs a resume message during VF post migration
> + * recovery.
> + */
> + bool needs_resume;
> };
>
> #endif
> diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
> index 99ea9b3507cd..497a736c23c3 100644
> --- a/drivers/gpu/drm/xe/xe_guc_submit.c
> +++ b/drivers/gpu/drm/xe/xe_guc_submit.c
> @@ -424,6 +424,11 @@ static void set_exec_queue_destroyed(struct xe_exec_queue *q)
> atomic_or(EXEC_QUEUE_STATE_DESTROYED, &q->guc->state);
> }
>
> +static void clear_exec_queue_destroyed(struct xe_exec_queue *q)
> +{
> + atomic_and(~EXEC_QUEUE_STATE_DESTROYED, &q->guc->state);
> +}
> +
> static bool exec_queue_banned(struct xe_exec_queue *q)
> {
> return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_BANNED;
> @@ -504,7 +509,12 @@ static void set_exec_queue_extra_ref(struct xe_exec_queue *q)
> atomic_or(EXEC_QUEUE_STATE_EXTRA_REF, &q->guc->state);
> }
>
> -static bool __maybe_unused exec_queue_pending_resume(struct xe_exec_queue *q)
> +static void clear_exec_queue_extra_ref(struct xe_exec_queue *q)
> +{
> + atomic_and(~EXEC_QUEUE_STATE_EXTRA_REF, &q->guc->state);
> +}
> +
> +static bool exec_queue_pending_resume(struct xe_exec_queue *q)
> {
> return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_PENDING_RESUME;
> }
> @@ -519,7 +529,7 @@ static void clear_exec_queue_pending_resume(struct xe_exec_queue *q)
> atomic_and(~EXEC_QUEUE_STATE_PENDING_RESUME, &q->guc->state);
> }
>
> -static bool __maybe_unused exec_queue_pending_tdr_exit(struct xe_exec_queue *q)
> +static bool exec_queue_pending_tdr_exit(struct xe_exec_queue *q)
> {
> return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_PENDING_TDR_EXIT;
> }
> @@ -1079,7 +1089,7 @@ static void wq_item_append(struct xe_exec_queue *q)
> }
>
> #define RESUME_PENDING ~0x0ull
> -static void submit_exec_queue(struct xe_exec_queue *q)
> +static void submit_exec_queue(struct xe_exec_queue *q, struct xe_sched_job *job)
> {
> struct xe_guc *guc = exec_queue_to_guc(q);
> struct xe_lrc *lrc = q->lrc[0];
> @@ -1091,10 +1101,13 @@ static void submit_exec_queue(struct xe_exec_queue *q)
>
> xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q));
>
> - if (xe_exec_queue_is_parallel(q))
> - wq_item_append(q);
> - else
> - xe_lrc_set_ring_tail(lrc, lrc->ring.tail);
> + if (!job->skip_emit || job->last_replay) {
> + if (xe_exec_queue_is_parallel(q))
> + wq_item_append(q);
> + else
> + xe_lrc_set_ring_tail(lrc, lrc->ring.tail);
> + job->last_replay = false;
> + }
>
> if (exec_queue_suspended(q) && !xe_exec_queue_is_parallel(q))
> return;
> @@ -1147,8 +1160,10 @@ guc_exec_queue_run_job(struct drm_sched_job *drm_job)
> if (!killed_or_banned_or_wedged && !xe_sched_job_is_error(job)) {
> if (!exec_queue_registered(q))
> register_exec_queue(q, GUC_CONTEXT_NORMAL);
> - q->ring_ops->emit_job(job);
> - submit_exec_queue(q);
> + if (!job->skip_emit)
> + q->ring_ops->emit_job(job);
> + submit_exec_queue(q, job);
> + job->skip_emit = false;
> }
>
> /*
> @@ -1865,6 +1880,7 @@ static void __guc_exec_queue_process_msg_resume(struct xe_sched_msg *msg)
> #define RESUME 4
> #define OPCODE_MASK 0xf
> #define MSG_LOCKED BIT(8)
> +#define MSG_HEAD BIT(9)
>
> static void guc_exec_queue_process_msg(struct xe_sched_msg *msg)
> {
> @@ -1989,12 +2005,24 @@ static void guc_exec_queue_add_msg(struct xe_exec_queue *q, struct xe_sched_msg
> msg->private_data = q;
>
> trace_xe_sched_msg_add(msg);
> - if (opcode & MSG_LOCKED)
> + if (opcode & MSG_HEAD)
> + xe_sched_add_msg_head(&q->guc->sched, msg);
> + else if (opcode & MSG_LOCKED)
> xe_sched_add_msg_locked(&q->guc->sched, msg);
> else
> xe_sched_add_msg(&q->guc->sched, msg);
> }
>
> +static void guc_exec_queue_try_add_msg_head(struct xe_exec_queue *q,
> + struct xe_sched_msg *msg,
> + u32 opcode)
> +{
> + if (!list_empty(&msg->link))
> + return;
> +
> + guc_exec_queue_add_msg(q, msg, opcode | MSG_LOCKED | MSG_HEAD);
> +}
> +
> static bool guc_exec_queue_try_add_msg(struct xe_exec_queue *q,
> struct xe_sched_msg *msg,
> u32 opcode)
> @@ -2278,6 +2306,105 @@ void xe_guc_submit_stop(struct xe_guc *guc)
>
> }
>
> +static void guc_exec_queue_revert_pending_state_change(struct xe_exec_queue *q)
> +{
> + bool pending_enable, pending_disable, pending_resume;
> +
> + pending_enable = exec_queue_pending_enable(q);
> + pending_resume = exec_queue_pending_resume(q);
> +
> + if (pending_enable && pending_resume)
> + q->guc->needs_resume = true;
> +
> + if (pending_enable && !pending_resume &&
> + !exec_queue_pending_tdr_exit(q)) {
> + clear_exec_queue_registered(q);
> + if (xe_exec_queue_is_lr(q))
> + xe_exec_queue_put(q);
> + }
> +
> + if (pending_enable) {
> + clear_exec_queue_enabled(q);
> + clear_exec_queue_pending_resume(q);
> + clear_exec_queue_pending_tdr_exit(q);
> + clear_exec_queue_pending_enable(q);
> + }
> +
> + if (exec_queue_destroyed(q) && exec_queue_registered(q)) {
> + clear_exec_queue_destroyed(q);
> + if (exec_queue_extra_ref(q))
> + xe_exec_queue_put(q);
> + else
> + q->guc->needs_cleanup = true;
> + clear_exec_queue_extra_ref(q);
> + }
> +
> + pending_disable = exec_queue_pending_disable(q);
> +
> + if (pending_disable && exec_queue_suspended(q)) {
> + clear_exec_queue_suspended(q);
> + q->guc->needs_suspend = true;
> + }
> +
> + if (pending_disable) {
> + if (!pending_enable)
> + set_exec_queue_enabled(q);
> + clear_exec_queue_pending_disable(q);
> + clear_exec_queue_check_timeout(q);
> + }
> +
> + q->guc->resume_time = 0;
> +}
> +
> +/*
> + * This function is quite complex but only real way to ensure no state is lost
> + * during VF resume flows. The function scans the queue state, make adjustments
> + * as needed, and queues jobs / messages which replayed upon unpause.
> + */
> +static void guc_exec_queue_pause(struct xe_guc *guc, struct xe_exec_queue *q)
> +{
> + struct xe_gpu_scheduler *sched = &q->guc->sched;
> + struct xe_sched_job *job;
> + int i;
> +
> + lockdep_assert_held(&guc->submission_state.lock);
> +
> + /* Stop scheduling + flush any DRM scheduler operations */
> + xe_sched_submission_stop(sched);
> + if (xe_exec_queue_is_lr(q))
> + cancel_work_sync(&q->guc->lr_tdr);
> + else
> + cancel_delayed_work_sync(&sched->base.work_tdr);
> +
> + guc_exec_queue_revert_pending_state_change(q);
> +
> + if (xe_exec_queue_is_parallel(q)) {
> + struct xe_device *xe = guc_to_xe(guc);
> + struct iosys_map map = xe_lrc_parallel_map(q->lrc[0]);
> +
> + /*
> + * NOP existing WQ commands that may contain stale GGTT
> + * addresses. These will be replayed upon unpause. The hardware
> + * seems to get confused if the WQ head/tail pointers are
> + * adjusted.
> + */
> + for (i = 0; i < WQ_SIZE / sizeof(u32); ++i)
> + parallel_write(xe, map, wq[i],
> + FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_NOOP) |
> + FIELD_PREP(WQ_LEN_MASK, 0));
> + }
> +
> + job = xe_sched_first_pending_job(sched);
> + if (job) {
> + /*
> + * Adjust software tail so jobs submitted overwrite previous
> + * position in ring buffer with new GGTT addresses.
> + */
> + for (i = 0; i < q->width; ++i)
> + q->lrc[i]->ring.tail = job->ptrs[i].head;
> + }
> +}
> +
> /**
> * xe_guc_submit_pause - Stop further runs of submission tasks on given GuC.
> * @guc: the &xe_guc struct instance whose scheduler is to be disabled
> @@ -2287,8 +2414,12 @@ void xe_guc_submit_pause(struct xe_guc *guc)
> struct xe_exec_queue *q;
> unsigned long index;
>
> + xe_gt_assert(guc_to_gt(guc), vf_recovery(guc));
> +
> + mutex_lock(&guc->submission_state.lock);
> xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
> - xe_sched_submission_stop_async(&q->guc->sched);
> + guc_exec_queue_pause(guc, q);
> + mutex_unlock(&guc->submission_state.lock);
> }
>
> static void guc_exec_queue_start(struct xe_exec_queue *q)
> @@ -2337,11 +2468,92 @@ int xe_guc_submit_start(struct xe_guc *guc)
> return 0;
> }
>
> -static void guc_exec_queue_unpause(struct xe_exec_queue *q)
> +static void guc_exec_queue_unpause_prepare(struct xe_guc *guc,
> + struct xe_exec_queue *q)
> {
> struct xe_gpu_scheduler *sched = &q->guc->sched;
> + struct drm_sched_job *s_job;
> + struct xe_sched_job *job = NULL;
> +
> + list_for_each_entry(s_job, &sched->base.pending_list, list) {
> + job = to_xe_sched_job(s_job);
> +
> + q->ring_ops->emit_job(job);
> + job->skip_emit = true;
> + }
>
> + if (job)
> + job->last_replay = true;
> +}
> +
> +/**
> + * xe_guc_submit_unpause_prepare - Prepare unpause submission tasks on given GuC.
> + * @guc: the &xe_guc struct instance whose scheduler is to be prepared for unpause
> + */
> +void xe_guc_submit_unpause_prepare(struct xe_guc *guc)
> +{
> + struct xe_exec_queue *q;
> + unsigned long index;
> +
> + xe_gt_assert(guc_to_gt(guc), vf_recovery(guc));
> +
> + mutex_lock(&guc->submission_state.lock);
> + xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
> + guc_exec_queue_unpause_prepare(guc, q);
> + mutex_unlock(&guc->submission_state.lock);
> +}
> +
> +static void guc_exec_queue_replay_pending_state_change(struct xe_exec_queue *q)
> +{
> + struct xe_gpu_scheduler *sched = &q->guc->sched;
> + struct xe_sched_msg *msg;
> +
> + if (q->guc->needs_cleanup) {
> + msg = q->guc->static_msgs + STATIC_MSG_CLEANUP;
> +
> + guc_exec_queue_add_msg(q, msg, CLEANUP);
> + q->guc->needs_cleanup = false;
> + }
> +
> + if (q->guc->needs_suspend) {
> + msg = q->guc->static_msgs + STATIC_MSG_SUSPEND;
> +
> + xe_sched_msg_lock(sched);
> + guc_exec_queue_try_add_msg_head(q, msg, SUSPEND);
> + xe_sched_msg_unlock(sched);
> +
> + q->guc->needs_suspend = false;
> + }
> +
> + /*
> + * The resume must be in the message queue before the suspend as it is
> + * not possible for a resume to be issued if a suspend pending is, but
> + * the inverse is possible.
> + */
> + if (q->guc->needs_resume) {
> + msg = q->guc->static_msgs + STATIC_MSG_RESUME;
> +
> + xe_sched_msg_lock(sched);
> + guc_exec_queue_try_add_msg_head(q, msg, RESUME);
> + xe_sched_msg_unlock(sched);
> +
> + q->guc->needs_resume = false;
> + }
> +}
> +
> +static void guc_exec_queue_unpause(struct xe_guc *guc, struct xe_exec_queue *q)
> +{
> + struct xe_gpu_scheduler *sched = &q->guc->sched;
> + bool needs_tdr = exec_queue_killed_or_banned_or_wedged(q);
> +
> + lockdep_assert_held(&guc->submission_state.lock);
> +
> + xe_sched_resubmit_jobs(sched);
> + guc_exec_queue_replay_pending_state_change(q);
> xe_sched_submission_start(sched);
> + if (needs_tdr)
> + xe_guc_exec_queue_trigger_cleanup(q);
> + xe_sched_submission_resume_tdr(sched);
> }
>
> /**
> @@ -2353,10 +2565,10 @@ void xe_guc_submit_unpause(struct xe_guc *guc)
> struct xe_exec_queue *q;
> unsigned long index;
>
> + mutex_lock(&guc->submission_state.lock);
> xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
> - guc_exec_queue_unpause(q);
> -
> - wake_up_all(&guc->ct.wq);
> + guc_exec_queue_unpause(guc, q);
> + mutex_unlock(&guc->submission_state.lock);
> }
>
> /**
> diff --git a/drivers/gpu/drm/xe/xe_guc_submit.h b/drivers/gpu/drm/xe/xe_guc_submit.h
> index fe82c317048e..b49a2748ec46 100644
> --- a/drivers/gpu/drm/xe/xe_guc_submit.h
> +++ b/drivers/gpu/drm/xe/xe_guc_submit.h
> @@ -22,6 +22,7 @@ void xe_guc_submit_stop(struct xe_guc *guc);
> int xe_guc_submit_start(struct xe_guc *guc);
> void xe_guc_submit_pause(struct xe_guc *guc);
> void xe_guc_submit_unpause(struct xe_guc *guc);
> +void xe_guc_submit_unpause_prepare(struct xe_guc *guc);
> void xe_guc_submit_pause_abort(struct xe_guc *guc);
> void xe_guc_submit_wedge(struct xe_guc *guc);
>
> diff --git a/drivers/gpu/drm/xe/xe_sched_job_types.h b/drivers/gpu/drm/xe/xe_sched_job_types.h
> index 7ce58765a34a..13e7a12b03ad 100644
> --- a/drivers/gpu/drm/xe/xe_sched_job_types.h
> +++ b/drivers/gpu/drm/xe/xe_sched_job_types.h
> @@ -63,6 +63,10 @@ struct xe_sched_job {
> bool ring_ops_flush_tlb;
> /** @ggtt: mapped in ggtt. */
> bool ggtt;
> + /** @skip_emit: skip emitting the job */
> + bool skip_emit;
> + /** @last_replay: last job being replayed */
> + bool last_replay;
> /** @ptrs: per instance pointers. */
> struct xe_job_ptrs ptrs[];
> };
next prev parent reply other threads:[~2025-10-01 14:37 UTC|newest]
Thread overview: 83+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-09-29 2:55 [PATCH v3 00/36] VF migration redesign Matthew Brost
2025-09-29 2:55 ` [PATCH v3 01/36] drm/xe: Add NULL checks to scratch LRC allocation Matthew Brost
2025-09-30 2:06 ` Lis, Tomasz
2025-09-30 22:53 ` Matthew Brost
2025-09-29 2:55 ` [PATCH v3 02/36] drm/xe/vf: Lock querying GGTT config during driver init Matthew Brost
2025-09-29 7:42 ` Michal Wajdeczko
2025-09-29 12:15 ` Matthew Brost
2025-09-30 0:42 ` Lis, Tomasz
2025-09-30 10:25 ` Michal Wajdeczko
2025-09-29 8:13 ` Ville Syrjälä
2025-09-30 13:22 ` Lis, Tomasz
2025-09-29 2:55 ` [PATCH v3 03/36] Revert "drm/xe/vf: Rebase exec queue parallel commands during migration recovery" Matthew Brost
2025-09-30 15:22 ` Michal Wajdeczko
2025-09-29 2:55 ` [PATCH v3 04/36] Revert "drm/xe/vf: Post migration, repopulate ring area for pending request" Matthew Brost
2025-09-30 15:24 ` Michal Wajdeczko
2025-09-29 2:55 ` [PATCH v3 05/36] Revert "drm/xe/vf: Fixup CTB send buffer messages after migration" Matthew Brost
2025-09-30 15:27 ` Michal Wajdeczko
2025-09-29 2:55 ` [PATCH v3 06/36] drm/xe: Save off position in ring in which a job was programmed Matthew Brost
2025-09-29 2:55 ` [PATCH v3 07/36] drm/xe/guc: Track pending-enable source in submission state Matthew Brost
2025-09-29 2:55 ` [PATCH v3 08/36] drm/xe: Track LR jobs in DRM scheduler pending list Matthew Brost
2025-09-29 2:55 ` [PATCH v3 09/36] drm/xe: Don't change LRC ring head on job resubmission Matthew Brost
2025-09-30 2:38 ` Lis, Tomasz
2025-09-29 2:55 ` [PATCH v3 10/36] drm/xe: Make LRC W/A scratch buffer usage consistent Matthew Brost
2025-09-29 2:55 ` [PATCH v3 11/36] drm/xe/guc: Document GuC submission backend Matthew Brost
2025-09-30 3:28 ` Lis, Tomasz
2025-09-30 6:30 ` Matthew Brost
2025-09-29 2:55 ` [PATCH v3 12/36] drm/xe/vf: Add xe_gt_recovery_inprogress helper Matthew Brost
2025-09-29 8:04 ` Michal Wajdeczko
2025-09-29 8:52 ` Matthew Brost
2025-09-29 2:55 ` [PATCH v3 13/36] drm/xe/vf: Make VF recovery run on per-GT worker Matthew Brost
2025-09-30 14:47 ` Lis, Tomasz
2025-09-29 2:55 ` [PATCH v3 14/36] drm/xe/vf: Abort H2G sends during VF post-migration recovery Matthew Brost
2025-09-29 8:17 ` Michal Wajdeczko
2025-09-29 2:55 ` [PATCH v3 15/36] drm/xe/vf: Remove memory allocations from VF post migration recovery Matthew Brost
2025-09-30 15:00 ` Lis, Tomasz
2025-09-29 2:55 ` [PATCH v3 16/36] drm/xe/vf: Close multi-GT GGTT shift race Matthew Brost
2025-09-29 8:44 ` Michal Wajdeczko
2025-09-29 12:31 ` Matthew Brost
2025-09-29 2:55 ` [PATCH v3 17/36] drm/xe/vf: Teardown VF post migration worker on driver unload Matthew Brost
2025-09-30 16:24 ` Lis, Tomasz
2025-09-29 2:55 ` [PATCH v3 18/36] drm/xe/vf: Don't allow GT reset to be queued during VF post migration recovery Matthew Brost
2025-09-29 9:17 ` Michal Wajdeczko
2025-09-29 12:50 ` Matthew Brost
2025-09-29 2:55 ` [PATCH v3 19/36] drm/xe/vf: Wakeup in GuC backend on " Matthew Brost
2025-09-29 2:55 ` [PATCH v3 20/36] drm/xe/vf: Avoid indefinite blocking in preempt rebind worker for VFs supporting migration Matthew Brost
2025-10-01 13:45 ` Lis, Tomasz
2025-10-01 13:56 ` Lis, Tomasz
2025-09-29 2:55 ` [PATCH v3 21/36] drm/xe/vf: Extra debug on GGTT shift Matthew Brost
2025-09-29 2:55 ` [PATCH v3 22/36] drm/xe/vf: Use GUC_HXG_TYPE_EVENT for GuC context register Matthew Brost
2025-09-29 2:55 ` [PATCH v3 23/36] drm/xe/vf: Flush and stop CTs in VF post migration recovery Matthew Brost
2025-09-29 21:31 ` Michal Wajdeczko
2025-09-29 2:55 ` [PATCH v3 24/36] drm/xe/vf: Reset TLB invalidations during " Matthew Brost
2025-10-01 13:53 ` Lis, Tomasz
2025-09-29 2:55 ` [PATCH v3 25/36] drm/xe/vf: Kickstart after resfix in " Matthew Brost
2025-09-29 2:55 ` [PATCH v3 26/36] drm/xe/vf: Start CTs before resfix " Matthew Brost
2025-09-29 21:49 ` Michal Wajdeczko
2025-09-30 6:26 ` Matthew Brost
2025-09-29 2:55 ` [PATCH v3 27/36] drm/xe/vf: Abort VF post migration recovery on failure Matthew Brost
2025-10-01 14:06 ` Lis, Tomasz
2025-09-29 2:55 ` [PATCH v3 28/36] drm/xe/vf: Replay GuC submission state on pause / unpause Matthew Brost
2025-10-01 14:37 ` Lis, Tomasz [this message]
2025-09-29 2:55 ` [PATCH v3 29/36] drm/xe: Move queue init before LRC creation Matthew Brost
2025-10-02 0:44 ` Lis, Tomasz
2025-10-02 7:36 ` Matthew Brost
2025-10-02 14:54 ` Lis, Tomasz
2025-09-29 2:55 ` [PATCH v3 30/36] drm/xe/vf: Add debug prints for GuC replaying state during VF recovery Matthew Brost
2025-10-02 1:02 ` Lis, Tomasz
2025-09-29 2:55 ` [PATCH v3 31/36] drm/xe/vf: Workaround for race condition in GuC firmware during VF pause Matthew Brost
2025-10-02 1:09 ` Lis, Tomasz
2025-10-02 6:12 ` Matthew Brost
2025-09-29 2:55 ` [PATCH v3 32/36] drm/xe: Use PPGTT addresses for TLB invalidation to avoid GGTT fixups Matthew Brost
2025-10-02 1:25 ` Lis, Tomasz
2025-09-29 2:55 ` [PATCH v3 33/36] drm/xe/vf: Use primary GT ordered work queue on media GT on PTL VF Matthew Brost
2025-09-29 2:55 ` [PATCH v3 34/36] drm/xe/vf: Ensure media GT VF recovery runs after primary GT on PTL Matthew Brost
2025-09-29 2:55 ` [PATCH v3 35/36] drm/xe/vf: Rebase CCS save/restore BB GGTT addresses Matthew Brost
2025-09-29 2:55 ` [PATCH v3 36/36] drm/xe/guc: Increase wait timeout to 2sec after BUSY reply from GuC Matthew Brost
2025-09-29 15:17 ` K V P, Satyanarayana
2025-09-30 12:39 ` Matthew Brost
2025-09-30 13:38 ` Michal Wajdeczko
2025-09-30 14:39 ` Matthew Brost
2025-09-29 3:06 ` ✗ CI.checkpatch: warning for VF migration redesign (rev3) Patchwork
2025-09-29 3:08 ` ✓ CI.KUnit: success " Patchwork
2025-09-29 6:28 ` ✗ Xe.CI.Full: failure " Patchwork
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=bd5f4907-5ff3-407a-b385-86533e4e58af@intel.com \
--to=tomasz.lis@intel.com \
--cc=intel-xe@lists.freedesktop.org \
--cc=matthew.brost@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox