* [PATCH v9 01/34] drm/xe: Add NULL checks to scratch LRC allocation
2025-10-08 18:04 [PATCH v9 00/34] VF migration redesign Matthew Brost
@ 2025-10-08 18:04 ` Matthew Brost
2025-10-08 18:04 ` [PATCH v9 02/34] drm/xe: Save off position in ring in which a job was programmed Matthew Brost
` (36 subsequent siblings)
37 siblings, 0 replies; 42+ messages in thread
From: Matthew Brost @ 2025-10-08 18:04 UTC (permalink / raw)
To: intel-xe
kmalloc can fail, the returned value must have a NULL check. This should
be immediately after kmalloc for clarity.
v5:
- Assert state->buffer in setup_bo if buffer is iomem (Tomasz)
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Tomasz Lis <tomasz.lis@intel.com>
---
drivers/gpu/drm/xe/xe_lrc.c | 13 +++++++++----
1 file changed, 9 insertions(+), 4 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c
index af09f70f6e78..2c6eae2de1f2 100644
--- a/drivers/gpu/drm/xe/xe_lrc.c
+++ b/drivers/gpu/drm/xe/xe_lrc.c
@@ -1214,8 +1214,7 @@ static int setup_bo(struct bo_setup_state *state)
ssize_t remain;
if (state->lrc->bo->vmap.is_iomem) {
- if (!state->buffer)
- return -ENOMEM;
+ xe_gt_assert(state->hwe->gt, state->buffer);
state->ptr = state->buffer;
} else {
state->ptr = state->lrc->bo->vmap.vaddr + state->offset;
@@ -1303,8 +1302,11 @@ static int setup_wa_bb(struct xe_lrc *lrc, struct xe_hw_engine *hwe)
u32 *buf = NULL;
int ret;
- if (lrc->bo->vmap.is_iomem)
+ if (lrc->bo->vmap.is_iomem) {
buf = kmalloc(LRC_WA_BB_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ }
ret = xe_lrc_setup_wa_bb_with_scratch(lrc, hwe, buf);
@@ -1347,8 +1349,11 @@ setup_indirect_ctx(struct xe_lrc *lrc, struct xe_hw_engine *hwe)
if (xe_gt_WARN_ON(lrc->gt, !state.funcs))
return 0;
- if (lrc->bo->vmap.is_iomem)
+ if (lrc->bo->vmap.is_iomem) {
state.buffer = kmalloc(state.max_size, GFP_KERNEL);
+ if (!state.buffer)
+ return -ENOMEM;
+ }
ret = setup_bo(&state);
if (ret) {
--
2.34.1
^ permalink raw reply related [flat|nested] 42+ messages in thread* [PATCH v9 02/34] drm/xe: Save off position in ring in which a job was programmed
2025-10-08 18:04 [PATCH v9 00/34] VF migration redesign Matthew Brost
2025-10-08 18:04 ` [PATCH v9 01/34] drm/xe: Add NULL checks to scratch LRC allocation Matthew Brost
@ 2025-10-08 18:04 ` Matthew Brost
2025-10-08 18:04 ` [PATCH v9 03/34] drm/xe/guc: Track pending-enable source in submission state Matthew Brost
` (35 subsequent siblings)
37 siblings, 0 replies; 42+ messages in thread
From: Matthew Brost @ 2025-10-08 18:04 UTC (permalink / raw)
To: intel-xe
VF post-migration recovery needs to modify the ring with updated GGTT
addresses for pending jobs. Save off position in ring in which a job was
programmed to facilitate.
v4:
- s/VF resume/VF post-migration recovery (Tomasz)
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Tomasz Lis <tomasz.lis@intel.com>
---
drivers/gpu/drm/xe/xe_ring_ops.c | 23 +++++++++++++++++++----
drivers/gpu/drm/xe/xe_sched_job_types.h | 5 +++++
2 files changed, 24 insertions(+), 4 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_ring_ops.c b/drivers/gpu/drm/xe/xe_ring_ops.c
index d71837773d6c..ac0c6dcffe15 100644
--- a/drivers/gpu/drm/xe/xe_ring_ops.c
+++ b/drivers/gpu/drm/xe/xe_ring_ops.c
@@ -245,12 +245,14 @@ static int emit_copy_timestamp(struct xe_lrc *lrc, u32 *dw, int i)
/* for engines that don't require any special HW handling (no EUs, no aux inval, etc) */
static void __emit_job_gen12_simple(struct xe_sched_job *job, struct xe_lrc *lrc,
- u64 batch_addr, u32 seqno)
+ u64 batch_addr, u32 *head, u32 seqno)
{
u32 dw[MAX_JOB_SIZE_DW], i = 0;
u32 ppgtt_flag = get_ppgtt_flag(job);
struct xe_gt *gt = job->q->gt;
+ *head = lrc->ring.tail;
+
i = emit_copy_timestamp(lrc, dw, i);
if (job->ring_ops_flush_tlb) {
@@ -296,7 +298,7 @@ static bool has_aux_ccs(struct xe_device *xe)
}
static void __emit_job_gen12_video(struct xe_sched_job *job, struct xe_lrc *lrc,
- u64 batch_addr, u32 seqno)
+ u64 batch_addr, u32 *head, u32 seqno)
{
u32 dw[MAX_JOB_SIZE_DW], i = 0;
u32 ppgtt_flag = get_ppgtt_flag(job);
@@ -304,6 +306,8 @@ static void __emit_job_gen12_video(struct xe_sched_job *job, struct xe_lrc *lrc,
struct xe_device *xe = gt_to_xe(gt);
bool decode = job->q->class == XE_ENGINE_CLASS_VIDEO_DECODE;
+ *head = lrc->ring.tail;
+
i = emit_copy_timestamp(lrc, dw, i);
dw[i++] = preparser_disable(true);
@@ -346,7 +350,8 @@ static void __emit_job_gen12_video(struct xe_sched_job *job, struct xe_lrc *lrc,
static void __emit_job_gen12_render_compute(struct xe_sched_job *job,
struct xe_lrc *lrc,
- u64 batch_addr, u32 seqno)
+ u64 batch_addr, u32 *head,
+ u32 seqno)
{
u32 dw[MAX_JOB_SIZE_DW], i = 0;
u32 ppgtt_flag = get_ppgtt_flag(job);
@@ -355,6 +360,8 @@ static void __emit_job_gen12_render_compute(struct xe_sched_job *job,
bool lacks_render = !(gt->info.engine_mask & XE_HW_ENGINE_RCS_MASK);
u32 mask_flags = 0;
+ *head = lrc->ring.tail;
+
i = emit_copy_timestamp(lrc, dw, i);
dw[i++] = preparser_disable(true);
@@ -396,11 +403,14 @@ static void __emit_job_gen12_render_compute(struct xe_sched_job *job,
}
static void emit_migration_job_gen12(struct xe_sched_job *job,
- struct xe_lrc *lrc, u32 seqno)
+ struct xe_lrc *lrc, u32 *head,
+ u32 seqno)
{
u32 saddr = xe_lrc_start_seqno_ggtt_addr(lrc);
u32 dw[MAX_JOB_SIZE_DW], i = 0;
+ *head = lrc->ring.tail;
+
i = emit_copy_timestamp(lrc, dw, i);
i = emit_store_imm_ggtt(saddr, seqno, dw, i);
@@ -434,6 +444,7 @@ static void emit_job_gen12_gsc(struct xe_sched_job *job)
__emit_job_gen12_simple(job, job->q->lrc[0],
job->ptrs[0].batch_addr,
+ &job->ptrs[0].head,
xe_sched_job_lrc_seqno(job));
}
@@ -443,6 +454,7 @@ static void emit_job_gen12_copy(struct xe_sched_job *job)
if (xe_sched_job_is_migration(job->q)) {
emit_migration_job_gen12(job, job->q->lrc[0],
+ &job->ptrs[0].head,
xe_sched_job_lrc_seqno(job));
return;
}
@@ -450,6 +462,7 @@ static void emit_job_gen12_copy(struct xe_sched_job *job)
for (i = 0; i < job->q->width; ++i)
__emit_job_gen12_simple(job, job->q->lrc[i],
job->ptrs[i].batch_addr,
+ &job->ptrs[i].head,
xe_sched_job_lrc_seqno(job));
}
@@ -461,6 +474,7 @@ static void emit_job_gen12_video(struct xe_sched_job *job)
for (i = 0; i < job->q->width; ++i)
__emit_job_gen12_video(job, job->q->lrc[i],
job->ptrs[i].batch_addr,
+ &job->ptrs[i].head,
xe_sched_job_lrc_seqno(job));
}
@@ -471,6 +485,7 @@ static void emit_job_gen12_render_compute(struct xe_sched_job *job)
for (i = 0; i < job->q->width; ++i)
__emit_job_gen12_render_compute(job, job->q->lrc[i],
job->ptrs[i].batch_addr,
+ &job->ptrs[i].head,
xe_sched_job_lrc_seqno(job));
}
diff --git a/drivers/gpu/drm/xe/xe_sched_job_types.h b/drivers/gpu/drm/xe/xe_sched_job_types.h
index dbf260dded8d..7ce58765a34a 100644
--- a/drivers/gpu/drm/xe/xe_sched_job_types.h
+++ b/drivers/gpu/drm/xe/xe_sched_job_types.h
@@ -24,6 +24,11 @@ struct xe_job_ptrs {
struct dma_fence_chain *chain_fence;
/** @batch_addr: Batch buffer address. */
u64 batch_addr;
+ /**
+ * @head: The tail pointer of the LRC (so head pointer of job) when the
+ * job was submitted
+ */
+ u32 head;
};
/**
--
2.34.1
^ permalink raw reply related [flat|nested] 42+ messages in thread* [PATCH v9 03/34] drm/xe/guc: Track pending-enable source in submission state
2025-10-08 18:04 [PATCH v9 00/34] VF migration redesign Matthew Brost
2025-10-08 18:04 ` [PATCH v9 01/34] drm/xe: Add NULL checks to scratch LRC allocation Matthew Brost
2025-10-08 18:04 ` [PATCH v9 02/34] drm/xe: Save off position in ring in which a job was programmed Matthew Brost
@ 2025-10-08 18:04 ` Matthew Brost
2025-10-08 18:04 ` [PATCH v9 04/34] drm/xe: Track LR jobs in DRM scheduler pending list Matthew Brost
` (34 subsequent siblings)
37 siblings, 0 replies; 42+ messages in thread
From: Matthew Brost @ 2025-10-08 18:04 UTC (permalink / raw)
To: intel-xe
Add explicit tracking in the GuC submission state to record the source
of a pending enable (TDR vs. queue resume path vs. submission).
Disambiguating the origin lets the GuC submission state machine apply
the correct recovery/replay behavior.
This helps VF restore: when the device comes back, the state machine knows
whether the pending enable stems from timeout recovery, from a queue resume
sequence, or submission and can gate sequencing and fixups accordingly.
v4:
- Clarify commit message (Tomasz)
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Tomasz Lis <tomasz.lis@intel.com>
---
drivers/gpu/drm/xe/xe_guc_submit.c | 36 ++++++++++++++++++++++++++++++
1 file changed, 36 insertions(+)
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index 16f78376f196..13746f32b231 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -69,6 +69,8 @@ exec_queue_to_guc(struct xe_exec_queue *q)
#define EXEC_QUEUE_STATE_BANNED (1 << 9)
#define EXEC_QUEUE_STATE_CHECK_TIMEOUT (1 << 10)
#define EXEC_QUEUE_STATE_EXTRA_REF (1 << 11)
+#define EXEC_QUEUE_STATE_PENDING_RESUME (1 << 12)
+#define EXEC_QUEUE_STATE_PENDING_TDR_EXIT (1 << 13)
static bool exec_queue_registered(struct xe_exec_queue *q)
{
@@ -220,6 +222,36 @@ static void set_exec_queue_extra_ref(struct xe_exec_queue *q)
atomic_or(EXEC_QUEUE_STATE_EXTRA_REF, &q->guc->state);
}
+static bool __maybe_unused exec_queue_pending_resume(struct xe_exec_queue *q)
+{
+ return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_PENDING_RESUME;
+}
+
+static void set_exec_queue_pending_resume(struct xe_exec_queue *q)
+{
+ atomic_or(EXEC_QUEUE_STATE_PENDING_RESUME, &q->guc->state);
+}
+
+static void clear_exec_queue_pending_resume(struct xe_exec_queue *q)
+{
+ atomic_and(~EXEC_QUEUE_STATE_PENDING_RESUME, &q->guc->state);
+}
+
+static bool __maybe_unused exec_queue_pending_tdr_exit(struct xe_exec_queue *q)
+{
+ return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_PENDING_TDR_EXIT;
+}
+
+static void set_exec_queue_pending_tdr_exit(struct xe_exec_queue *q)
+{
+ atomic_or(EXEC_QUEUE_STATE_PENDING_TDR_EXIT, &q->guc->state);
+}
+
+static void clear_exec_queue_pending_tdr_exit(struct xe_exec_queue *q)
+{
+ atomic_and(~EXEC_QUEUE_STATE_PENDING_TDR_EXIT, &q->guc->state);
+}
+
static bool exec_queue_killed_or_banned_or_wedged(struct xe_exec_queue *q)
{
return (atomic_read(&q->guc->state) &
@@ -1334,6 +1366,7 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
return DRM_GPU_SCHED_STAT_RESET;
sched_enable:
+ set_exec_queue_pending_tdr_exit(q);
enable_scheduling(q);
rearm:
/*
@@ -1493,6 +1526,7 @@ static void __guc_exec_queue_process_msg_resume(struct xe_sched_msg *msg)
clear_exec_queue_suspended(q);
if (!exec_queue_enabled(q)) {
q->guc->resume_time = RESUME_PENDING;
+ set_exec_queue_pending_resume(q);
enable_scheduling(q);
}
} else {
@@ -2065,6 +2099,8 @@ static void handle_sched_done(struct xe_guc *guc, struct xe_exec_queue *q,
xe_gt_assert(guc_to_gt(guc), exec_queue_pending_enable(q));
q->guc->resume_time = ktime_get();
+ clear_exec_queue_pending_resume(q);
+ clear_exec_queue_pending_tdr_exit(q);
clear_exec_queue_pending_enable(q);
smp_wmb();
wake_up_all(&guc->ct.wq);
--
2.34.1
^ permalink raw reply related [flat|nested] 42+ messages in thread* [PATCH v9 04/34] drm/xe: Track LR jobs in DRM scheduler pending list
2025-10-08 18:04 [PATCH v9 00/34] VF migration redesign Matthew Brost
` (2 preceding siblings ...)
2025-10-08 18:04 ` [PATCH v9 03/34] drm/xe/guc: Track pending-enable source in submission state Matthew Brost
@ 2025-10-08 18:04 ` Matthew Brost
2025-10-08 18:04 ` [PATCH v9 05/34] drm/xe: Return first unsignaled job first pending job helper Matthew Brost
` (33 subsequent siblings)
37 siblings, 0 replies; 42+ messages in thread
From: Matthew Brost @ 2025-10-08 18:04 UTC (permalink / raw)
To: intel-xe
VF migration requires jobs to remain pending so they can be replayed
after the VF comes back. Previously, LR job fences were intentionally
signaled immediately after submission to avoid the risk of exporting
them, as these fences do not naturally signal in a timely manner and
could break dma-fence contracts. A side effect of this approach was that
LR jobs were never added to the DRM scheduler’s pending list, preventing
them from being tracked for later resubmission.
We now avoid signaling LR job fences and ensure they are never exported;
Xe already guards against exporting these internal fences. With that
guarantee in place, we can safely track LR jobs in the scheduler’s
pending list so they are eligible for resubmission during VF
post-migration recovery (and similar recovery paths).
An added benefit is that LR queues now gain the DRM scheduler’s built-in
flow control over ring usage rather than rejecting new jobs in the exec
IOCTL if the ring is full.
v2:
- Ensure DRM scheduler TDR doesn't run for LR jobs
- Stack variable for killed_or_banned_or_wedged
v4:
- Clarify commit message (Tomasz)
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Tomasz Lis <tomasz.lis@intel.com>
---
drivers/gpu/drm/xe/xe_exec.c | 12 ++-------
drivers/gpu/drm/xe/xe_exec_queue.c | 19 -------------
drivers/gpu/drm/xe/xe_exec_queue.h | 2 --
drivers/gpu/drm/xe/xe_guc_submit.c | 43 ++++++++++++++++++++----------
4 files changed, 31 insertions(+), 45 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c
index 83897950f0da..0dc27476832b 100644
--- a/drivers/gpu/drm/xe/xe_exec.c
+++ b/drivers/gpu/drm/xe/xe_exec.c
@@ -124,7 +124,7 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
struct xe_validation_ctx ctx;
struct xe_sched_job *job;
struct xe_vm *vm;
- bool write_locked, skip_retry = false;
+ bool write_locked;
int err = 0;
struct xe_hw_engine_group *group;
enum xe_hw_engine_group_execution_mode mode, previous_mode;
@@ -266,12 +266,6 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
goto err_exec;
}
- if (xe_exec_queue_is_lr(q) && xe_exec_queue_ring_full(q)) {
- err = -EWOULDBLOCK; /* Aliased to -EAGAIN */
- skip_retry = true;
- goto err_exec;
- }
-
if (xe_exec_queue_uses_pxp(q)) {
err = xe_vm_validate_protected(q->vm);
if (err)
@@ -328,8 +322,6 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
xe_sched_job_init_user_fence(job, &syncs[i]);
}
- if (xe_exec_queue_is_lr(q))
- q->ring_ops->emit_job(job);
if (!xe_vm_in_lr_mode(vm))
xe_exec_queue_last_fence_set(q, vm, &job->drm.s_fence->finished);
xe_sched_job_push(job);
@@ -355,7 +347,7 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
xe_validation_ctx_fini(&ctx);
err_unlock_list:
up_read(&vm->lock);
- if (err == -EAGAIN && !skip_retry)
+ if (err == -EAGAIN)
goto retry;
err_hw_exec_mode:
if (mode == EXEC_MODE_DMA_FENCE)
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
index df82463b19f6..7621089a47fe 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.c
+++ b/drivers/gpu/drm/xe/xe_exec_queue.c
@@ -850,25 +850,6 @@ bool xe_exec_queue_is_lr(struct xe_exec_queue *q)
!(q->flags & EXEC_QUEUE_FLAG_VM);
}
-static s32 xe_exec_queue_num_job_inflight(struct xe_exec_queue *q)
-{
- return q->lrc[0]->fence_ctx.next_seqno - xe_lrc_seqno(q->lrc[0]) - 1;
-}
-
-/**
- * xe_exec_queue_ring_full() - Whether an exec_queue's ring is full
- * @q: The exec_queue
- *
- * Return: True if the exec_queue's ring is full, false otherwise.
- */
-bool xe_exec_queue_ring_full(struct xe_exec_queue *q)
-{
- struct xe_lrc *lrc = q->lrc[0];
- s32 max_job = lrc->ring.size / MAX_JOB_SIZE_BYTES;
-
- return xe_exec_queue_num_job_inflight(q) >= max_job;
-}
-
/**
* xe_exec_queue_is_idle() - Whether an exec_queue is idle.
* @q: The exec_queue
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.h b/drivers/gpu/drm/xe/xe_exec_queue.h
index 8821ceb838d0..a4dfbe858bda 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.h
+++ b/drivers/gpu/drm/xe/xe_exec_queue.h
@@ -64,8 +64,6 @@ static inline bool xe_exec_queue_uses_pxp(struct xe_exec_queue *q)
bool xe_exec_queue_is_lr(struct xe_exec_queue *q);
-bool xe_exec_queue_ring_full(struct xe_exec_queue *q);
-
bool xe_exec_queue_is_idle(struct xe_exec_queue *q);
void xe_exec_queue_kill(struct xe_exec_queue *q);
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index 13746f32b231..3a534d93505f 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -851,30 +851,31 @@ guc_exec_queue_run_job(struct drm_sched_job *drm_job)
struct xe_sched_job *job = to_xe_sched_job(drm_job);
struct xe_exec_queue *q = job->q;
struct xe_guc *guc = exec_queue_to_guc(q);
- struct dma_fence *fence = NULL;
- bool lr = xe_exec_queue_is_lr(q);
+ bool lr = xe_exec_queue_is_lr(q), killed_or_banned_or_wedged =
+ exec_queue_killed_or_banned_or_wedged(q);
xe_gt_assert(guc_to_gt(guc), !(exec_queue_destroyed(q) || exec_queue_pending_disable(q)) ||
exec_queue_banned(q) || exec_queue_suspended(q));
trace_xe_sched_job_run(job);
- if (!exec_queue_killed_or_banned_or_wedged(q) && !xe_sched_job_is_error(job)) {
+ if (!killed_or_banned_or_wedged && !xe_sched_job_is_error(job)) {
if (!exec_queue_registered(q))
register_exec_queue(q, GUC_CONTEXT_NORMAL);
- if (!lr) /* LR jobs are emitted in the exec IOCTL */
- q->ring_ops->emit_job(job);
+ q->ring_ops->emit_job(job);
submit_exec_queue(q);
}
- if (lr) {
- xe_sched_job_set_error(job, -EOPNOTSUPP);
- dma_fence_put(job->fence); /* Drop ref from xe_sched_job_arm */
- } else {
- fence = job->fence;
- }
+ /*
+ * We don't care about job-fence ordering in LR VMs because these fences
+ * are never exported; they are used solely to keep jobs on the pending
+ * list. Once a queue enters an error state, there's no need to track
+ * them.
+ */
+ if (killed_or_banned_or_wedged && lr)
+ xe_sched_job_set_error(job, -ECANCELED);
- return fence;
+ return job->fence;
}
static void guc_exec_queue_free_job(struct drm_sched_job *drm_job)
@@ -916,7 +917,8 @@ static void disable_scheduling_deregister(struct xe_guc *guc,
xe_gt_warn(q->gt, "Pending enable/disable failed to respond\n");
xe_sched_submission_start(sched);
xe_gt_reset_async(q->gt);
- xe_sched_tdr_queue_imm(sched);
+ if (!xe_exec_queue_is_lr(q))
+ xe_sched_tdr_queue_imm(sched);
return;
}
@@ -1008,6 +1010,7 @@ static void xe_guc_exec_queue_lr_cleanup(struct work_struct *w)
struct xe_exec_queue *q = ge->q;
struct xe_guc *guc = exec_queue_to_guc(q);
struct xe_gpu_scheduler *sched = &ge->sched;
+ struct xe_sched_job *job;
bool wedged = false;
xe_gt_assert(guc_to_gt(guc), xe_exec_queue_is_lr(q));
@@ -1058,7 +1061,16 @@ static void xe_guc_exec_queue_lr_cleanup(struct work_struct *w)
if (!exec_queue_killed(q) && !xe_lrc_ring_is_idle(q->lrc[0]))
xe_devcoredump(q, NULL, "LR job cleanup, guc_id=%d", q->guc->id);
+ xe_hw_fence_irq_stop(q->fence_irq);
+
xe_sched_submission_start(sched);
+
+ spin_lock(&sched->base.job_list_lock);
+ list_for_each_entry(job, &sched->base.pending_list, drm.list)
+ xe_sched_job_set_error(job, -ECANCELED);
+ spin_unlock(&sched->base.job_list_lock);
+
+ xe_hw_fence_irq_start(q->fence_irq);
}
#define ADJUST_FIVE_PERCENT(__t) mul_u64_u32_div(__t, 105, 100)
@@ -1129,7 +1141,8 @@ static void enable_scheduling(struct xe_exec_queue *q)
xe_gt_warn(guc_to_gt(guc), "Schedule enable failed to respond");
set_exec_queue_banned(q);
xe_gt_reset_async(q->gt);
- xe_sched_tdr_queue_imm(&q->guc->sched);
+ if (!xe_exec_queue_is_lr(q))
+ xe_sched_tdr_queue_imm(&q->guc->sched);
}
}
@@ -1187,6 +1200,8 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
int i = 0;
bool wedged = false, skip_timeout_check;
+ xe_gt_assert(guc_to_gt(guc), !xe_exec_queue_is_lr(q));
+
/*
* TDR has fired before free job worker. Common if exec queue
* immediately closed after last fence signaled. Add back to pending
--
2.34.1
^ permalink raw reply related [flat|nested] 42+ messages in thread* [PATCH v9 05/34] drm/xe: Return first unsignaled job first pending job helper
2025-10-08 18:04 [PATCH v9 00/34] VF migration redesign Matthew Brost
` (3 preceding siblings ...)
2025-10-08 18:04 ` [PATCH v9 04/34] drm/xe: Track LR jobs in DRM scheduler pending list Matthew Brost
@ 2025-10-08 18:04 ` Matthew Brost
2025-10-08 18:04 ` [PATCH v9 06/34] drm/xe: Don't change LRC ring head on job resubmission Matthew Brost
` (32 subsequent siblings)
37 siblings, 0 replies; 42+ messages in thread
From: Matthew Brost @ 2025-10-08 18:04 UTC (permalink / raw)
To: intel-xe
In all cases where the first pending job helper is called, we only want
to retrieve the first unsignaled pending job, as this helper is used
exclusively in recovery flows. It is possible for signaled jobs to
remain in the pending list as the scheduler is stopped, so those should
be skipped.
Also, add kernel documentation to clarify this behavior.
v8:
- Split out into own patch (Auld)
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
---
drivers/gpu/drm/xe/xe_gpu_scheduler.h | 21 +++++++++++++++++----
1 file changed, 17 insertions(+), 4 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_gpu_scheduler.h b/drivers/gpu/drm/xe/xe_gpu_scheduler.h
index e548b2aed95a..3a9ff78d9346 100644
--- a/drivers/gpu/drm/xe/xe_gpu_scheduler.h
+++ b/drivers/gpu/drm/xe/xe_gpu_scheduler.h
@@ -77,17 +77,30 @@ static inline void xe_sched_add_pending_job(struct xe_gpu_scheduler *sched,
spin_unlock(&sched->base.job_list_lock);
}
+/**
+ * xe_sched_first_pending_job() - Find first pending job which is unsignaled
+ * @sched: Xe GPU scheduler
+ *
+ * Return first unsignaled job in pending list or NULL
+ */
static inline
struct xe_sched_job *xe_sched_first_pending_job(struct xe_gpu_scheduler *sched)
{
- struct xe_sched_job *job;
+ struct xe_sched_job *job, *r_job = NULL;
spin_lock(&sched->base.job_list_lock);
- job = list_first_entry_or_null(&sched->base.pending_list,
- struct xe_sched_job, drm.list);
+ list_for_each_entry(job, &sched->base.pending_list, drm.list) {
+ struct drm_sched_fence *s_fence = job->drm.s_fence;
+ struct dma_fence *hw_fence = s_fence->parent;
+
+ if (hw_fence && !dma_fence_is_signaled(hw_fence)) {
+ r_job = job;
+ break;
+ }
+ }
spin_unlock(&sched->base.job_list_lock);
- return job;
+ return r_job;
}
static inline int
--
2.34.1
^ permalink raw reply related [flat|nested] 42+ messages in thread* [PATCH v9 06/34] drm/xe: Don't change LRC ring head on job resubmission
2025-10-08 18:04 [PATCH v9 00/34] VF migration redesign Matthew Brost
` (4 preceding siblings ...)
2025-10-08 18:04 ` [PATCH v9 05/34] drm/xe: Return first unsignaled job first pending job helper Matthew Brost
@ 2025-10-08 18:04 ` Matthew Brost
2025-10-08 18:04 ` [PATCH v9 07/34] drm/xe: Make LRC W/A scratch buffer usage consistent Matthew Brost
` (31 subsequent siblings)
37 siblings, 0 replies; 42+ messages in thread
From: Matthew Brost @ 2025-10-08 18:04 UTC (permalink / raw)
To: intel-xe
Now that we save the job's head during submission, it's no longer
necessary to adjust the LRC ring head during resubmission. Instead, a
software-based adjustment of the tail will overwrite the old jobs in
place. For some odd reason, adjusting the LRC ring head didn't work on
parallel queues, which was causing issues in our CI.
v5:
- Add comment in guc_exec_queue_start explaning why the function works
(Auld)
v7:
- Only adjust first state on first unsignaled job (Auld)
v8:
- Break unsignaled job handling to separate patch (Auld)
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Tomasz Lis <tomasz.lis@intel.com>
---
drivers/gpu/drm/xe/xe_guc_submit.c | 18 ++++++++++++++++--
1 file changed, 16 insertions(+), 2 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index 3a534d93505f..d123bdb63369 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -2008,11 +2008,25 @@ static void guc_exec_queue_start(struct xe_exec_queue *q)
struct xe_gpu_scheduler *sched = &q->guc->sched;
if (!exec_queue_killed_or_banned_or_wedged(q)) {
+ struct xe_sched_job *job = xe_sched_first_pending_job(sched);
int i;
trace_xe_exec_queue_resubmit(q);
- for (i = 0; i < q->width; ++i)
- xe_lrc_set_ring_head(q->lrc[i], q->lrc[i]->ring.tail);
+ if (job) {
+ for (i = 0; i < q->width; ++i) {
+ /*
+ * The GuC context is unregistered at this point
+ * time, adjusting software ring tail ensures
+ * jobs are rewritten in original placement,
+ * adjusting LRC tail ensures the newly loaded
+ * GuC / contexts only view the LRC tail
+ * increasing as jobs are written out.
+ */
+ q->lrc[i]->ring.tail = job->ptrs[i].head;
+ xe_lrc_set_ring_tail(q->lrc[i],
+ xe_lrc_ring_head(q->lrc[i]));
+ }
+ }
xe_sched_resubmit_jobs(sched);
}
--
2.34.1
^ permalink raw reply related [flat|nested] 42+ messages in thread* [PATCH v9 07/34] drm/xe: Make LRC W/A scratch buffer usage consistent
2025-10-08 18:04 [PATCH v9 00/34] VF migration redesign Matthew Brost
` (5 preceding siblings ...)
2025-10-08 18:04 ` [PATCH v9 06/34] drm/xe: Don't change LRC ring head on job resubmission Matthew Brost
@ 2025-10-08 18:04 ` Matthew Brost
2025-10-08 18:04 ` [PATCH v9 08/34] drm/xe/vf: Add xe_gt_recovery_pending helper Matthew Brost
` (30 subsequent siblings)
37 siblings, 0 replies; 42+ messages in thread
From: Matthew Brost @ 2025-10-08 18:04 UTC (permalink / raw)
To: intel-xe
The LRC W/A currently checks for LRC being iomem in some places, while
in others it checks if the scratch buffer is non-NULL. This
inconsistency causes issues with the VF post-migration recovery code,
which blindly passes in a scratch buffer.
This patch standardizes the check by consistently verifying whether the
LRC is iomem to determine if the scratch buffer should be used.
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Tomasz Lis <tomasz.lis@intel.com>
Reviewed-by: Lucas De Marchi <lucas.demarchi@intel.com>
---
drivers/gpu/drm/xe/xe_lrc.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c
index 2c6eae2de1f2..b5083c99dd50 100644
--- a/drivers/gpu/drm/xe/xe_lrc.c
+++ b/drivers/gpu/drm/xe/xe_lrc.c
@@ -1247,7 +1247,7 @@ static int setup_bo(struct bo_setup_state *state)
static void finish_bo(struct bo_setup_state *state)
{
- if (!state->buffer)
+ if (!state->lrc->bo->vmap.is_iomem)
return;
xe_map_memcpy_to(gt_to_xe(state->lrc->gt), &state->lrc->bo->vmap,
--
2.34.1
^ permalink raw reply related [flat|nested] 42+ messages in thread* [PATCH v9 08/34] drm/xe/vf: Add xe_gt_recovery_pending helper
2025-10-08 18:04 [PATCH v9 00/34] VF migration redesign Matthew Brost
` (6 preceding siblings ...)
2025-10-08 18:04 ` [PATCH v9 07/34] drm/xe: Make LRC W/A scratch buffer usage consistent Matthew Brost
@ 2025-10-08 18:04 ` Matthew Brost
2025-10-08 18:04 ` [PATCH v9 09/34] drm/xe/vf: Make VF recovery run on per-GT worker Matthew Brost
` (29 subsequent siblings)
37 siblings, 0 replies; 42+ messages in thread
From: Matthew Brost @ 2025-10-08 18:04 UTC (permalink / raw)
To: intel-xe
Add xe_gt_recovery_pending helper.
This helper serves as the singular point to determine whether a GT
recovery is currently in progress. Expected callers include the GuC CT
layer and the GuC submission layer. Atomically visable as soon as vCPU
are unhalted until VF recovery completes.
v3:
- Add GT layer xe_gt_recovery_inprogress (Michal)
- Don't blow up in memirq not enabled (CI)
- Add __memirq_received with clear argument (Michal)
- xe_memirq_sw_int_0_irq_pending rename (Michal)
- Use offset in xe_memirq_sw_int_0_irq_pending (Michal)
v4:
- Refactor xe_gt_recovery_inprogress logic around memirq (Michal)
v5:
- s/inprogress/pending (Michal)
v7:
- Fix typos, adjust comment (Michal)
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Reviewed-by: Tomasz Lis <tomasz.lis@intel.com>
---
drivers/gpu/drm/xe/xe_gt.h | 13 ++++++
drivers/gpu/drm/xe/xe_gt_sriov_vf.c | 28 +++++++++++++
drivers/gpu/drm/xe/xe_gt_sriov_vf.h | 2 +
drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h | 10 +++++
drivers/gpu/drm/xe/xe_memirq.c | 48 +++++++++++++++++++++--
drivers/gpu/drm/xe/xe_memirq.h | 2 +
6 files changed, 99 insertions(+), 4 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_gt.h b/drivers/gpu/drm/xe/xe_gt.h
index 41880979f4de..5df2ffe3ff83 100644
--- a/drivers/gpu/drm/xe/xe_gt.h
+++ b/drivers/gpu/drm/xe/xe_gt.h
@@ -12,6 +12,7 @@
#include "xe_device.h"
#include "xe_device_types.h"
+#include "xe_gt_sriov_vf.h"
#include "xe_hw_engine.h"
#define for_each_hw_engine(hwe__, gt__, id__) \
@@ -124,4 +125,16 @@ static inline bool xe_gt_is_usm_hwe(struct xe_gt *gt, struct xe_hw_engine *hwe)
hwe->instance == gt->usm.reserved_bcs_instance;
}
+/**
+ * xe_gt_recovery_pending() - GT recovery pending
+ * @gt: the &xe_gt
+ *
+ * Return: True if GT recovery in pending, False otherwise
+ */
+static inline bool xe_gt_recovery_pending(struct xe_gt *gt)
+{
+ return IS_SRIOV_VF(gt_to_xe(gt)) &&
+ xe_gt_sriov_vf_recovery_pending(gt);
+}
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
index 0461d5513487..43cb5fd7b222 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
@@ -26,6 +26,7 @@
#include "xe_guc_hxg_helpers.h"
#include "xe_guc_relay.h"
#include "xe_lrc.h"
+#include "xe_memirq.h"
#include "xe_mmio.h"
#include "xe_sriov.h"
#include "xe_sriov_vf.h"
@@ -776,6 +777,7 @@ void xe_gt_sriov_vf_migrated_event_handler(struct xe_gt *gt)
struct xe_device *xe = gt_to_xe(gt);
xe_gt_assert(gt, IS_SRIOV_VF(xe));
+ xe_gt_assert(gt, xe_gt_sriov_vf_recovery_pending(gt));
set_bit(gt->info.id, &xe->sriov.vf.migration.gt_flags);
/*
@@ -1118,3 +1120,29 @@ void xe_gt_sriov_vf_print_version(struct xe_gt *gt, struct drm_printer *p)
drm_printf(p, "\thandshake:\t%u.%u\n",
pf_version->major, pf_version->minor);
}
+
+/**
+ * xe_gt_sriov_vf_recovery_pending() - VF post migration recovery pending
+ * @gt: the &xe_gt
+ *
+ * The return value of this function must be immediately visible upon vCPU
+ * unhalt and must persist until RESFIX_DONE is issued. This guarantee is
+ * currently implemented only for platforms that support memirq. If non-memirq
+ * platforms begin to support VF migration, this function will need to be
+ * updated accordingly.
+ *
+ * Return: True if VF post migration recovery is pending, False otherwise
+ */
+bool xe_gt_sriov_vf_recovery_pending(struct xe_gt *gt)
+{
+ struct xe_memirq *memirq = >_to_tile(gt)->memirq;
+
+ xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
+
+ /* early detection until recovery starts */
+ if (xe_device_uses_memirq(gt_to_xe(gt)) &&
+ xe_memirq_guc_sw_int_0_irq_pending(memirq, >->uc.guc))
+ return true;
+
+ return READ_ONCE(gt->sriov.vf.migration.recovery_inprogress);
+}
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.h b/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
index 0af1dc769fe0..b91ae857e983 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
@@ -25,6 +25,8 @@ void xe_gt_sriov_vf_default_lrcs_hwsp_rebase(struct xe_gt *gt);
int xe_gt_sriov_vf_notify_resfix_done(struct xe_gt *gt);
void xe_gt_sriov_vf_migrated_event_handler(struct xe_gt *gt);
+bool xe_gt_sriov_vf_recovery_pending(struct xe_gt *gt);
+
u32 xe_gt_sriov_vf_gmdid(struct xe_gt *gt);
u16 xe_gt_sriov_vf_guc_ids(struct xe_gt *gt);
u64 xe_gt_sriov_vf_lmem(struct xe_gt *gt);
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h
index 298dedf4b009..1dfef60ec044 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h
@@ -46,6 +46,14 @@ struct xe_gt_sriov_vf_runtime {
} *regs;
};
+/**
+ * xe_gt_sriov_vf_migration - VF migration data.
+ */
+struct xe_gt_sriov_vf_migration {
+ /** @recovery_inprogress: VF post migration recovery in progress */
+ bool recovery_inprogress;
+};
+
/**
* struct xe_gt_sriov_vf - GT level VF virtualization data.
*/
@@ -58,6 +66,8 @@ struct xe_gt_sriov_vf {
struct xe_gt_sriov_vf_selfconfig self_config;
/** @runtime: runtime data retrieved from the PF. */
struct xe_gt_sriov_vf_runtime runtime;
+ /** @migration: migration data for the VF. */
+ struct xe_gt_sriov_vf_migration migration;
};
#endif
diff --git a/drivers/gpu/drm/xe/xe_memirq.c b/drivers/gpu/drm/xe/xe_memirq.c
index 0affede05820..2ef9d9aab264 100644
--- a/drivers/gpu/drm/xe/xe_memirq.c
+++ b/drivers/gpu/drm/xe/xe_memirq.c
@@ -397,8 +397,9 @@ void xe_memirq_postinstall(struct xe_memirq *memirq)
memirq_set_enable(memirq, true);
}
-static bool memirq_received(struct xe_memirq *memirq, struct iosys_map *vector,
- u16 offset, const char *name)
+static bool __memirq_received(struct xe_memirq *memirq,
+ struct iosys_map *vector, u16 offset,
+ const char *name, bool clear)
{
u8 value;
@@ -408,12 +409,26 @@ static bool memirq_received(struct xe_memirq *memirq, struct iosys_map *vector,
memirq_err_ratelimited(memirq,
"Unexpected memirq value %#x from %s at %u\n",
value, name, offset);
- iosys_map_wr(vector, offset, u8, 0x00);
+ if (clear)
+ iosys_map_wr(vector, offset, u8, 0x00);
}
return value;
}
+static bool memirq_received_noclear(struct xe_memirq *memirq,
+ struct iosys_map *vector,
+ u16 offset, const char *name)
+{
+ return __memirq_received(memirq, vector, offset, name, false);
+}
+
+static bool memirq_received(struct xe_memirq *memirq, struct iosys_map *vector,
+ u16 offset, const char *name)
+{
+ return __memirq_received(memirq, vector, offset, name, true);
+}
+
static void memirq_dispatch_engine(struct xe_memirq *memirq, struct iosys_map *status,
struct xe_hw_engine *hwe)
{
@@ -433,8 +448,16 @@ static void memirq_dispatch_guc(struct xe_memirq *memirq, struct iosys_map *stat
if (memirq_received(memirq, status, ilog2(GUC_INTR_GUC2HOST), name))
xe_guc_irq_handler(guc, GUC_INTR_GUC2HOST);
- if (memirq_received(memirq, status, ilog2(GUC_INTR_SW_INT_0), name))
+ /*
+ * This is a software interrupt that must be cleared after it's consumed
+ * to avoid race conditions where xe_gt_sriov_vf_recovery_pending()
+ * returns false.
+ */
+ if (memirq_received_noclear(memirq, status, ilog2(GUC_INTR_SW_INT_0),
+ name)) {
xe_guc_irq_handler(guc, GUC_INTR_SW_INT_0);
+ iosys_map_wr(status, ilog2(GUC_INTR_SW_INT_0), u8, 0x00);
+ }
}
/**
@@ -459,6 +482,23 @@ void xe_memirq_hwe_handler(struct xe_memirq *memirq, struct xe_hw_engine *hwe)
}
}
+/**
+ * xe_memirq_guc_sw_int_0_irq_pending() - SW_INT_0 IRQ is pending
+ * @memirq: the &xe_memirq
+ * @guc: the &xe_guc to check for IRQ
+ *
+ * Return: True if SW_INT_0 IRQ is pending on @guc, False otherwise
+ */
+bool xe_memirq_guc_sw_int_0_irq_pending(struct xe_memirq *memirq, struct xe_guc *guc)
+{
+ struct xe_gt *gt = guc_to_gt(guc);
+ u32 offset = xe_gt_is_media_type(gt) ? ilog2(INTR_MGUC) : ilog2(INTR_GUC);
+ struct iosys_map map = IOSYS_MAP_INIT_OFFSET(&memirq->status, offset * SZ_16);
+
+ return memirq_received_noclear(memirq, &map, ilog2(GUC_INTR_SW_INT_0),
+ guc_name(guc));
+}
+
/**
* xe_memirq_handler - The `Memory Based Interrupts`_ Handler.
* @memirq: the &xe_memirq
diff --git a/drivers/gpu/drm/xe/xe_memirq.h b/drivers/gpu/drm/xe/xe_memirq.h
index 06130650e9d6..e25d2234ab87 100644
--- a/drivers/gpu/drm/xe/xe_memirq.h
+++ b/drivers/gpu/drm/xe/xe_memirq.h
@@ -25,4 +25,6 @@ void xe_memirq_handler(struct xe_memirq *memirq);
int xe_memirq_init_guc(struct xe_memirq *memirq, struct xe_guc *guc);
+bool xe_memirq_guc_sw_int_0_irq_pending(struct xe_memirq *memirq, struct xe_guc *guc);
+
#endif
--
2.34.1
^ permalink raw reply related [flat|nested] 42+ messages in thread* [PATCH v9 09/34] drm/xe/vf: Make VF recovery run on per-GT worker
2025-10-08 18:04 [PATCH v9 00/34] VF migration redesign Matthew Brost
` (7 preceding siblings ...)
2025-10-08 18:04 ` [PATCH v9 08/34] drm/xe/vf: Add xe_gt_recovery_pending helper Matthew Brost
@ 2025-10-08 18:04 ` Matthew Brost
2025-10-08 18:04 ` [PATCH v9 10/34] drm/xe/vf: Abort H2G sends during VF post-migration recovery Matthew Brost
` (28 subsequent siblings)
37 siblings, 0 replies; 42+ messages in thread
From: Matthew Brost @ 2025-10-08 18:04 UTC (permalink / raw)
To: intel-xe
VF recovery is a per-GT operation, so it makes sense to isolate it to a
per-GT queue. Scheduling this operation on the same worker as the GT
reset and TDR not only aligns with this design but also helps avoid race
conditions, as those operations can also modify the queue state.
v2:
- Fix lockdep splat (Adam)
- Use xe_sriov_vf_migration_supported helper
v3:
- Drop xe_gt_sriov_ prefix for private functions (Michal)
- Drop message in xe_gt_sriov_vf_migration_init_early (Michal)
- Logic rework in vf_post_migration_notify_resfix_done (Michal)
- Rework init sequence layering (Michal)
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Tomasz Lis <tomasz.lis@intel.com>
---
drivers/gpu/drm/xe/xe_gt.c | 6 +
drivers/gpu/drm/xe/xe_gt_sriov_vf.c | 178 +++++++++++++++-
drivers/gpu/drm/xe/xe_gt_sriov_vf.h | 3 +-
drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h | 7 +
drivers/gpu/drm/xe/xe_sriov_vf.c | 240 ----------------------
drivers/gpu/drm/xe/xe_sriov_vf.h | 1 -
drivers/gpu/drm/xe/xe_sriov_vf_types.h | 4 -
7 files changed, 181 insertions(+), 258 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
index 8fc3a6929d6c..2bdc1cdf24a5 100644
--- a/drivers/gpu/drm/xe/xe_gt.c
+++ b/drivers/gpu/drm/xe/xe_gt.c
@@ -388,6 +388,12 @@ int xe_gt_init_early(struct xe_gt *gt)
return err;
}
+ if (IS_SRIOV_VF(gt_to_xe(gt))) {
+ err = xe_gt_sriov_vf_init_early(gt);
+ if (err)
+ return err;
+ }
+
xe_reg_sr_init(>->reg_sr, "GT", gt_to_xe(gt));
err = xe_wa_gt_init(gt);
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
index 43cb5fd7b222..1a2cf58f5dfd 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
@@ -25,11 +25,15 @@
#include "xe_guc.h"
#include "xe_guc_hxg_helpers.h"
#include "xe_guc_relay.h"
+#include "xe_guc_submit.h"
+#include "xe_irq.h"
#include "xe_lrc.h"
#include "xe_memirq.h"
#include "xe_mmio.h"
+#include "xe_pm.h"
#include "xe_sriov.h"
#include "xe_sriov_vf.h"
+#include "xe_tile_sriov_vf.h"
#include "xe_uc_fw.h"
#include "xe_wopcm.h"
@@ -308,13 +312,13 @@ static int guc_action_vf_notify_resfix_done(struct xe_guc *guc)
}
/**
- * xe_gt_sriov_vf_notify_resfix_done - Notify GuC about resource fixups apply completed.
+ * vf_notify_resfix_done - Notify GuC about resource fixups apply completed.
* @gt: the &xe_gt struct instance linked to target GuC
*
* Returns: 0 if the operation completed successfully, or a negative error
* code otherwise.
*/
-int xe_gt_sriov_vf_notify_resfix_done(struct xe_gt *gt)
+static int vf_notify_resfix_done(struct xe_gt *gt)
{
struct xe_guc *guc = >->uc.guc;
int err;
@@ -756,7 +760,7 @@ int xe_gt_sriov_vf_connect(struct xe_gt *gt)
* xe_gt_sriov_vf_default_lrcs_hwsp_rebase - Update GGTT references in HWSP of default LRCs.
* @gt: the &xe_gt struct instance
*/
-void xe_gt_sriov_vf_default_lrcs_hwsp_rebase(struct xe_gt *gt)
+static void xe_gt_sriov_vf_default_lrcs_hwsp_rebase(struct xe_gt *gt)
{
struct xe_hw_engine *hwe;
enum xe_hw_engine_id id;
@@ -765,6 +769,26 @@ void xe_gt_sriov_vf_default_lrcs_hwsp_rebase(struct xe_gt *gt)
xe_default_lrc_update_memirq_regs_with_address(hwe);
}
+static void vf_start_migration_recovery(struct xe_gt *gt)
+{
+ bool started;
+
+ xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
+
+ spin_lock(>->sriov.vf.migration.lock);
+
+ if (!gt->sriov.vf.migration.recovery_queued) {
+ gt->sriov.vf.migration.recovery_queued = true;
+ WRITE_ONCE(gt->sriov.vf.migration.recovery_inprogress, true);
+
+ started = queue_work(gt->ordered_wq, >->sriov.vf.migration.worker);
+ xe_gt_sriov_info(gt, "VF migration recovery %s\n", started ?
+ "scheduled" : "already in progress");
+ }
+
+ spin_unlock(>->sriov.vf.migration.lock);
+}
+
/**
* xe_gt_sriov_vf_migrated_event_handler - Start a VF migration recovery,
* or just mark that a GuC is ready for it.
@@ -779,15 +803,8 @@ void xe_gt_sriov_vf_migrated_event_handler(struct xe_gt *gt)
xe_gt_assert(gt, IS_SRIOV_VF(xe));
xe_gt_assert(gt, xe_gt_sriov_vf_recovery_pending(gt));
- set_bit(gt->info.id, &xe->sriov.vf.migration.gt_flags);
- /*
- * We need to be certain that if all flags were set, at least one
- * thread will notice that and schedule the recovery.
- */
- smp_mb__after_atomic();
-
xe_gt_sriov_info(gt, "ready for recovery after migration\n");
- xe_sriov_vf_start_migration_recovery(xe);
+ vf_start_migration_recovery(gt);
}
static bool vf_is_negotiated(struct xe_gt *gt, u16 major, u16 minor)
@@ -1121,6 +1138,145 @@ void xe_gt_sriov_vf_print_version(struct xe_gt *gt, struct drm_printer *p)
pf_version->major, pf_version->minor);
}
+static void vf_post_migration_shutdown(struct xe_gt *gt)
+{
+ int ret = 0;
+
+ spin_lock_irq(>->sriov.vf.migration.lock);
+ gt->sriov.vf.migration.recovery_queued = false;
+ spin_unlock_irq(>->sriov.vf.migration.lock);
+
+ xe_guc_submit_pause(>->uc.guc);
+ ret |= xe_guc_submit_reset_block(>->uc.guc);
+
+ if (ret)
+ xe_gt_sriov_info(gt, "migration recovery encountered ongoing reset\n");
+}
+
+static size_t post_migration_scratch_size(struct xe_device *xe)
+{
+ return max(xe_lrc_reg_size(xe), LRC_WA_BB_SIZE);
+}
+
+static int vf_post_migration_fixups(struct xe_gt *gt)
+{
+ s64 shift;
+ void *buf;
+ int err;
+
+ buf = kmalloc(post_migration_scratch_size(gt_to_xe(gt)), GFP_ATOMIC);
+ if (!buf)
+ return -ENOMEM;
+
+ err = xe_gt_sriov_vf_query_config(gt);
+ if (err)
+ goto out;
+
+ shift = xe_gt_sriov_vf_ggtt_shift(gt);
+ if (shift) {
+ xe_tile_sriov_vf_fixup_ggtt_nodes(gt_to_tile(gt), shift);
+ xe_gt_sriov_vf_default_lrcs_hwsp_rebase(gt);
+ err = xe_guc_contexts_hwsp_rebase(>->uc.guc, buf);
+ if (err)
+ goto out;
+ }
+
+out:
+ kfree(buf);
+ return err;
+}
+
+static void vf_post_migration_kickstart(struct xe_gt *gt)
+{
+ /*
+ * Make sure interrupts on the new HW are properly set. The GuC IRQ
+ * must be working at this point, since the recovery did started,
+ * but the rest was not enabled using the procedure from spec.
+ */
+ xe_irq_resume(gt_to_xe(gt));
+
+ xe_guc_submit_reset_unblock(>->uc.guc);
+ xe_guc_submit_unpause(>->uc.guc);
+}
+
+static int vf_post_migration_notify_resfix_done(struct xe_gt *gt)
+{
+ bool skip_resfix = false;
+
+ spin_lock_irq(>->sriov.vf.migration.lock);
+ if (gt->sriov.vf.migration.recovery_queued) {
+ skip_resfix = true;
+ xe_gt_sriov_dbg(gt, "another recovery imminent, resfix skipped\n");
+ } else {
+ WRITE_ONCE(gt->sriov.vf.migration.recovery_inprogress, false);
+ }
+ spin_unlock_irq(>->sriov.vf.migration.lock);
+
+ if (skip_resfix)
+ return -EAGAIN;
+
+ return vf_notify_resfix_done(gt);
+}
+
+static void vf_post_migration_recovery(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ int err;
+
+ xe_gt_sriov_dbg(gt, "migration recovery in progress\n");
+
+ xe_pm_runtime_get(xe);
+ vf_post_migration_shutdown(gt);
+
+ if (!xe_sriov_vf_migration_supported(xe)) {
+ xe_gt_sriov_err(gt, "migration is not supported\n");
+ err = -ENOTRECOVERABLE;
+ goto fail;
+ }
+
+ err = vf_post_migration_fixups(gt);
+ if (err)
+ goto fail;
+
+ vf_post_migration_kickstart(gt);
+ err = vf_post_migration_notify_resfix_done(gt);
+ if (err && err != -EAGAIN)
+ goto fail;
+
+ xe_pm_runtime_put(xe);
+ xe_gt_sriov_notice(gt, "migration recovery ended\n");
+ return;
+fail:
+ xe_pm_runtime_put(xe);
+ xe_gt_sriov_err(gt, "migration recovery failed (%pe)\n", ERR_PTR(err));
+ xe_device_declare_wedged(xe);
+}
+
+static void migration_worker_func(struct work_struct *w)
+{
+ struct xe_gt *gt = container_of(w, struct xe_gt,
+ sriov.vf.migration.worker);
+
+ vf_post_migration_recovery(gt);
+}
+
+/**
+ * xe_gt_sriov_vf_init_early() - GT VF init early
+ * @gt: the &xe_gt
+ *
+ * Return 0 on success, errno on failure
+ */
+int xe_gt_sriov_vf_init_early(struct xe_gt *gt)
+{
+ if (!xe_sriov_vf_migration_supported(gt_to_xe(gt)))
+ return 0;
+
+ spin_lock_init(>->sriov.vf.migration.lock);
+ INIT_WORK(>->sriov.vf.migration.worker, migration_worker_func);
+
+ return 0;
+}
+
/**
* xe_gt_sriov_vf_recovery_pending() - VF post migration recovery pending
* @gt: the &xe_gt
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.h b/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
index b91ae857e983..0adebf8aa419 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
@@ -21,10 +21,9 @@ void xe_gt_sriov_vf_guc_versions(struct xe_gt *gt,
int xe_gt_sriov_vf_query_config(struct xe_gt *gt);
int xe_gt_sriov_vf_connect(struct xe_gt *gt);
int xe_gt_sriov_vf_query_runtime(struct xe_gt *gt);
-void xe_gt_sriov_vf_default_lrcs_hwsp_rebase(struct xe_gt *gt);
-int xe_gt_sriov_vf_notify_resfix_done(struct xe_gt *gt);
void xe_gt_sriov_vf_migrated_event_handler(struct xe_gt *gt);
+int xe_gt_sriov_vf_init_early(struct xe_gt *gt);
bool xe_gt_sriov_vf_recovery_pending(struct xe_gt *gt);
u32 xe_gt_sriov_vf_gmdid(struct xe_gt *gt);
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h
index 1dfef60ec044..b2c8e8c89c30 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h
@@ -7,6 +7,7 @@
#define _XE_GT_SRIOV_VF_TYPES_H_
#include <linux/types.h>
+#include <linux/workqueue.h>
#include "xe_uc_fw_types.h"
/**
@@ -50,6 +51,12 @@ struct xe_gt_sriov_vf_runtime {
* xe_gt_sriov_vf_migration - VF migration data.
*/
struct xe_gt_sriov_vf_migration {
+ /** @migration: VF migration recovery worker */
+ struct work_struct worker;
+ /** @lock: Protects recovery_queued */
+ spinlock_t lock;
+ /** @recovery_queued: VF post migration recovery in queued */
+ bool recovery_queued;
/** @recovery_inprogress: VF post migration recovery in progress */
bool recovery_inprogress;
};
diff --git a/drivers/gpu/drm/xe/xe_sriov_vf.c b/drivers/gpu/drm/xe/xe_sriov_vf.c
index c1830ec8f0fd..911d5720917b 100644
--- a/drivers/gpu/drm/xe/xe_sriov_vf.c
+++ b/drivers/gpu/drm/xe/xe_sriov_vf.c
@@ -6,21 +6,12 @@
#include <drm/drm_debugfs.h>
#include <drm/drm_managed.h>
-#include "xe_assert.h"
-#include "xe_device.h"
#include "xe_gt.h"
-#include "xe_gt_sriov_printk.h"
#include "xe_gt_sriov_vf.h"
#include "xe_guc.h"
-#include "xe_guc_submit.h"
-#include "xe_irq.h"
-#include "xe_lrc.h"
-#include "xe_pm.h"
-#include "xe_sriov.h"
#include "xe_sriov_printk.h"
#include "xe_sriov_vf.h"
#include "xe_sriov_vf_ccs.h"
-#include "xe_tile_sriov_vf.h"
/**
* DOC: VF restore procedure in PF KMD and VF KMD
@@ -158,8 +149,6 @@ static void vf_disable_migration(struct xe_device *xe, const char *fmt, ...)
xe->sriov.vf.migration.enabled = false;
}
-static void migration_worker_func(struct work_struct *w);
-
static void vf_migration_init_early(struct xe_device *xe)
{
/*
@@ -184,8 +173,6 @@ static void vf_migration_init_early(struct xe_device *xe)
guc_version.major, guc_version.minor);
}
- INIT_WORK(&xe->sriov.vf.migration.worker, migration_worker_func);
-
xe->sriov.vf.migration.enabled = true;
xe_sriov_dbg(xe, "migration support enabled\n");
}
@@ -199,233 +186,6 @@ void xe_sriov_vf_init_early(struct xe_device *xe)
vf_migration_init_early(xe);
}
-/**
- * vf_post_migration_shutdown - Stop the driver activities after VF migration.
- * @xe: the &xe_device struct instance
- *
- * After this VM is migrated and assigned to a new VF, it is running on a new
- * hardware, and therefore many hardware-dependent states and related structures
- * require fixups. Without fixups, the hardware cannot do any work, and therefore
- * all GPU pipelines are stalled.
- * Stop some of kernel activities to make the fixup process faster.
- */
-static void vf_post_migration_shutdown(struct xe_device *xe)
-{
- struct xe_gt *gt;
- unsigned int id;
- int ret = 0;
-
- for_each_gt(gt, xe, id) {
- xe_guc_submit_pause(>->uc.guc);
- ret |= xe_guc_submit_reset_block(>->uc.guc);
- }
-
- if (ret)
- drm_info(&xe->drm, "migration recovery encountered ongoing reset\n");
-}
-
-/**
- * vf_post_migration_kickstart - Re-start the driver activities under new hardware.
- * @xe: the &xe_device struct instance
- *
- * After we have finished with all post-migration fixups, restart the driver
- * activities to continue feeding the GPU with workloads.
- */
-static void vf_post_migration_kickstart(struct xe_device *xe)
-{
- struct xe_gt *gt;
- unsigned int id;
-
- /*
- * Make sure interrupts on the new HW are properly set. The GuC IRQ
- * must be working at this point, since the recovery did started,
- * but the rest was not enabled using the procedure from spec.
- */
- xe_irq_resume(xe);
-
- for_each_gt(gt, xe, id) {
- xe_guc_submit_reset_unblock(>->uc.guc);
- xe_guc_submit_unpause(>->uc.guc);
- }
-}
-
-static bool gt_vf_post_migration_needed(struct xe_gt *gt)
-{
- return test_bit(gt->info.id, >_to_xe(gt)->sriov.vf.migration.gt_flags);
-}
-
-/*
- * Notify GuCs marked in flags about resource fixups apply finished.
- * @xe: the &xe_device struct instance
- * @gt_flags: flags marking to which GTs the notification shall be sent
- */
-static int vf_post_migration_notify_resfix_done(struct xe_device *xe, unsigned long gt_flags)
-{
- struct xe_gt *gt;
- unsigned int id;
- int err = 0;
-
- for_each_gt(gt, xe, id) {
- if (!test_bit(id, >_flags))
- continue;
- /* skip asking GuC for RESFIX exit if new recovery request arrived */
- if (gt_vf_post_migration_needed(gt))
- continue;
- err = xe_gt_sriov_vf_notify_resfix_done(gt);
- if (err)
- break;
- clear_bit(id, >_flags);
- }
-
- if (gt_flags && !err)
- drm_dbg(&xe->drm, "another recovery imminent, skipped some notifications\n");
- return err;
-}
-
-static int vf_get_next_migrated_gt_id(struct xe_device *xe)
-{
- struct xe_gt *gt;
- unsigned int id;
-
- for_each_gt(gt, xe, id) {
- if (test_and_clear_bit(id, &xe->sriov.vf.migration.gt_flags))
- return id;
- }
- return -1;
-}
-
-static size_t post_migration_scratch_size(struct xe_device *xe)
-{
- return max(xe_lrc_reg_size(xe), LRC_WA_BB_SIZE);
-}
-
-/**
- * Perform post-migration fixups on a single GT.
- *
- * After migration, GuC needs to be re-queried for VF configuration to check
- * if it matches previous provisioning. Most of VF provisioning shall be the
- * same, except GGTT range, since GGTT is not virtualized per-VF. If GGTT
- * range has changed, we have to perform fixups - shift all GGTT references
- * used anywhere within the driver. After the fixups in this function succeed,
- * it is allowed to ask the GuC bound to this GT to continue normal operation.
- *
- * Returns: 0 if the operation completed successfully, or a negative error
- * code otherwise.
- */
-static int gt_vf_post_migration_fixups(struct xe_gt *gt)
-{
- s64 shift;
- void *buf;
- int err;
-
- buf = kmalloc(post_migration_scratch_size(gt_to_xe(gt)), GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- err = xe_gt_sriov_vf_query_config(gt);
- if (err)
- goto out;
-
- shift = xe_gt_sriov_vf_ggtt_shift(gt);
- if (shift) {
- xe_tile_sriov_vf_fixup_ggtt_nodes(gt_to_tile(gt), shift);
- xe_gt_sriov_vf_default_lrcs_hwsp_rebase(gt);
- err = xe_guc_contexts_hwsp_rebase(>->uc.guc, buf);
- if (err)
- goto out;
- }
-
-out:
- kfree(buf);
- return err;
-}
-
-static void vf_post_migration_recovery(struct xe_device *xe)
-{
- unsigned long fixed_gts = 0;
- int id, err;
-
- drm_dbg(&xe->drm, "migration recovery in progress\n");
- xe_pm_runtime_get(xe);
- vf_post_migration_shutdown(xe);
-
- if (!xe_sriov_vf_migration_supported(xe)) {
- xe_sriov_err(xe, "migration is not supported\n");
- err = -ENOTRECOVERABLE;
- goto fail;
- }
-
- while (id = vf_get_next_migrated_gt_id(xe), id >= 0) {
- struct xe_gt *gt = xe_device_get_gt(xe, id);
-
- err = gt_vf_post_migration_fixups(gt);
- if (err)
- goto fail;
-
- set_bit(id, &fixed_gts);
- }
-
- vf_post_migration_kickstart(xe);
- err = vf_post_migration_notify_resfix_done(xe, fixed_gts);
- if (err)
- goto fail;
-
- xe_pm_runtime_put(xe);
- drm_notice(&xe->drm, "migration recovery ended\n");
- return;
-fail:
- xe_pm_runtime_put(xe);
- drm_err(&xe->drm, "migration recovery failed (%pe)\n", ERR_PTR(err));
- xe_device_declare_wedged(xe);
-}
-
-static void migration_worker_func(struct work_struct *w)
-{
- struct xe_device *xe = container_of(w, struct xe_device,
- sriov.vf.migration.worker);
-
- vf_post_migration_recovery(xe);
-}
-
-/*
- * Check if post-restore recovery is coming on any of GTs.
- * @xe: the &xe_device struct instance
- *
- * Return: True if migration recovery worker will soon be running. Any worker currently
- * executing does not affect the result.
- */
-static bool vf_ready_to_recovery_on_any_gts(struct xe_device *xe)
-{
- struct xe_gt *gt;
- unsigned int id;
-
- for_each_gt(gt, xe, id) {
- if (test_bit(id, &xe->sriov.vf.migration.gt_flags))
- return true;
- }
- return false;
-}
-
-/**
- * xe_sriov_vf_start_migration_recovery - Start VF migration recovery.
- * @xe: the &xe_device to start recovery on
- *
- * This function shall be called only by VF.
- */
-void xe_sriov_vf_start_migration_recovery(struct xe_device *xe)
-{
- bool started;
-
- xe_assert(xe, IS_SRIOV_VF(xe));
-
- if (!vf_ready_to_recovery_on_any_gts(xe))
- return;
-
- started = queue_work(xe->sriov.wq, &xe->sriov.vf.migration.worker);
- drm_info(&xe->drm, "VF migration recovery %s\n", started ?
- "scheduled" : "already in progress");
-}
-
/**
* xe_sriov_vf_init_late() - SR-IOV VF late initialization functions.
* @xe: the &xe_device to initialize
diff --git a/drivers/gpu/drm/xe/xe_sriov_vf.h b/drivers/gpu/drm/xe/xe_sriov_vf.h
index 9e752105ec2a..4df95266b261 100644
--- a/drivers/gpu/drm/xe/xe_sriov_vf.h
+++ b/drivers/gpu/drm/xe/xe_sriov_vf.h
@@ -13,7 +13,6 @@ struct xe_device;
void xe_sriov_vf_init_early(struct xe_device *xe);
int xe_sriov_vf_init_late(struct xe_device *xe);
-void xe_sriov_vf_start_migration_recovery(struct xe_device *xe);
bool xe_sriov_vf_migration_supported(struct xe_device *xe);
void xe_sriov_vf_debugfs_register(struct xe_device *xe, struct dentry *root);
diff --git a/drivers/gpu/drm/xe/xe_sriov_vf_types.h b/drivers/gpu/drm/xe/xe_sriov_vf_types.h
index 426cc5841958..6a0fd0f5463e 100644
--- a/drivers/gpu/drm/xe/xe_sriov_vf_types.h
+++ b/drivers/gpu/drm/xe/xe_sriov_vf_types.h
@@ -33,10 +33,6 @@ struct xe_device_vf {
/** @migration: VF Migration state data */
struct {
- /** @migration.worker: VF migration recovery worker */
- struct work_struct worker;
- /** @migration.gt_flags: Per-GT request flags for VF migration recovery */
- unsigned long gt_flags;
/**
* @migration.enabled: flag indicating if migration support
* was enabled or not due to missing prerequisites
--
2.34.1
^ permalink raw reply related [flat|nested] 42+ messages in thread* [PATCH v9 10/34] drm/xe/vf: Abort H2G sends during VF post-migration recovery
2025-10-08 18:04 [PATCH v9 00/34] VF migration redesign Matthew Brost
` (8 preceding siblings ...)
2025-10-08 18:04 ` [PATCH v9 09/34] drm/xe/vf: Make VF recovery run on per-GT worker Matthew Brost
@ 2025-10-08 18:04 ` Matthew Brost
2025-10-08 18:04 ` [PATCH v9 11/34] drm/xe/vf: Remove memory allocations from VF post migration recovery Matthew Brost
` (27 subsequent siblings)
37 siblings, 0 replies; 42+ messages in thread
From: Matthew Brost @ 2025-10-08 18:04 UTC (permalink / raw)
To: intel-xe
While VF post-migration recovery is in progress, abort H2G sends with
-ECANCEL. These messages are treated as lost, and TLB invalidation
errors are suppressed. During this phase, the H2G channel is down, and
VF recovery requires the CT lock to proceed.
v3:
- Use xe_gt_recovery_inprogress (Michal)
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
---
drivers/gpu/drm/xe/xe_guc_ct.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
index 47079ab9922c..9f0090ae64a6 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.c
+++ b/drivers/gpu/drm/xe/xe_guc_ct.c
@@ -851,7 +851,7 @@ static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action,
u32 len, u32 g2h_len, u32 num_g2h,
struct g2h_fence *g2h_fence)
{
- struct xe_gt *gt __maybe_unused = ct_to_gt(ct);
+ struct xe_gt *gt = ct_to_gt(ct);
u16 seqno;
int ret;
@@ -872,7 +872,7 @@ static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action,
goto out;
}
- if (ct->state == XE_GUC_CT_STATE_STOPPED) {
+ if (ct->state == XE_GUC_CT_STATE_STOPPED || xe_gt_recovery_pending(gt)) {
ret = -ECANCELED;
goto out;
}
--
2.34.1
^ permalink raw reply related [flat|nested] 42+ messages in thread* [PATCH v9 11/34] drm/xe/vf: Remove memory allocations from VF post migration recovery
2025-10-08 18:04 [PATCH v9 00/34] VF migration redesign Matthew Brost
` (9 preceding siblings ...)
2025-10-08 18:04 ` [PATCH v9 10/34] drm/xe/vf: Abort H2G sends during VF post-migration recovery Matthew Brost
@ 2025-10-08 18:04 ` Matthew Brost
2025-10-08 18:04 ` [PATCH v9 12/34] drm/xe: Move GGTT lock init to alloc Matthew Brost
` (26 subsequent siblings)
37 siblings, 0 replies; 42+ messages in thread
From: Matthew Brost @ 2025-10-08 18:04 UTC (permalink / raw)
To: intel-xe
VF post migration recovery is the path of dma-fence signaling / reclaim,
avoid memory allocations in this path.
v3:
- s/lrc_wa_bb/scratch (Tomasz)
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Tomasz Lis <tomasz.lis@intel.com>
---
drivers/gpu/drm/xe/xe_gt_sriov_vf.c | 23 +++++++++++++----------
drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h | 2 ++
2 files changed, 15 insertions(+), 10 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
index 1a2cf58f5dfd..31a80d77da36 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
@@ -1160,17 +1160,13 @@ static size_t post_migration_scratch_size(struct xe_device *xe)
static int vf_post_migration_fixups(struct xe_gt *gt)
{
+ void *buf = gt->sriov.vf.migration.scratch;
s64 shift;
- void *buf;
int err;
- buf = kmalloc(post_migration_scratch_size(gt_to_xe(gt)), GFP_ATOMIC);
- if (!buf)
- return -ENOMEM;
-
err = xe_gt_sriov_vf_query_config(gt);
if (err)
- goto out;
+ return err;
shift = xe_gt_sriov_vf_ggtt_shift(gt);
if (shift) {
@@ -1178,12 +1174,10 @@ static int vf_post_migration_fixups(struct xe_gt *gt)
xe_gt_sriov_vf_default_lrcs_hwsp_rebase(gt);
err = xe_guc_contexts_hwsp_rebase(>->uc.guc, buf);
if (err)
- goto out;
+ return err;
}
-out:
- kfree(buf);
- return err;
+ return 0;
}
static void vf_post_migration_kickstart(struct xe_gt *gt)
@@ -1268,9 +1262,18 @@ static void migration_worker_func(struct work_struct *w)
*/
int xe_gt_sriov_vf_init_early(struct xe_gt *gt)
{
+ void *buf;
+
if (!xe_sriov_vf_migration_supported(gt_to_xe(gt)))
return 0;
+ buf = drmm_kmalloc(>_to_xe(gt)->drm,
+ post_migration_scratch_size(gt_to_xe(gt)),
+ GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ gt->sriov.vf.migration.scratch = buf;
spin_lock_init(>->sriov.vf.migration.lock);
INIT_WORK(>->sriov.vf.migration.worker, migration_worker_func);
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h
index b2c8e8c89c30..e753646debc4 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h
@@ -55,6 +55,8 @@ struct xe_gt_sriov_vf_migration {
struct work_struct worker;
/** @lock: Protects recovery_queued */
spinlock_t lock;
+ /** @scratch: Scratch memory for VF recovery */
+ void *scratch;
/** @recovery_queued: VF post migration recovery in queued */
bool recovery_queued;
/** @recovery_inprogress: VF post migration recovery in progress */
--
2.34.1
^ permalink raw reply related [flat|nested] 42+ messages in thread* [PATCH v9 12/34] drm/xe: Move GGTT lock init to alloc
2025-10-08 18:04 [PATCH v9 00/34] VF migration redesign Matthew Brost
` (10 preceding siblings ...)
2025-10-08 18:04 ` [PATCH v9 11/34] drm/xe/vf: Remove memory allocations from VF post migration recovery Matthew Brost
@ 2025-10-08 18:04 ` Matthew Brost
2025-10-08 18:04 ` [PATCH v9 13/34] drm/xe/vf: Move LMEM config to tile layer Matthew Brost
` (25 subsequent siblings)
37 siblings, 0 replies; 42+ messages in thread
From: Matthew Brost @ 2025-10-08 18:04 UTC (permalink / raw)
To: intel-xe
The GGTT lock is needed very early during GT initialization for a VF;
move the GGTT lock initialization to the allocation phase.
v8:
- Rework function structure (Michal)
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
---
drivers/gpu/drm/xe/xe_ggtt.c | 39 +++++++++++++++++++++---------------
1 file changed, 23 insertions(+), 16 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c
index 7fdd0a97a628..aca7ae5489b9 100644
--- a/drivers/gpu/drm/xe/xe_ggtt.c
+++ b/drivers/gpu/drm/xe/xe_ggtt.c
@@ -159,6 +159,16 @@ static void xe_ggtt_clear(struct xe_ggtt *ggtt, u64 start, u64 size)
}
}
+static void primelockdep(struct xe_ggtt *ggtt)
+{
+ if (!IS_ENABLED(CONFIG_LOCKDEP))
+ return;
+
+ fs_reclaim_acquire(GFP_KERNEL);
+ might_lock(&ggtt->lock);
+ fs_reclaim_release(GFP_KERNEL);
+}
+
/**
* xe_ggtt_alloc - Allocate a GGTT for a given &xe_tile
* @tile: &xe_tile
@@ -169,9 +179,19 @@ static void xe_ggtt_clear(struct xe_ggtt *ggtt, u64 start, u64 size)
*/
struct xe_ggtt *xe_ggtt_alloc(struct xe_tile *tile)
{
- struct xe_ggtt *ggtt = drmm_kzalloc(&tile_to_xe(tile)->drm, sizeof(*ggtt), GFP_KERNEL);
- if (ggtt)
- ggtt->tile = tile;
+ struct xe_device *xe = tile_to_xe(tile);
+ struct xe_ggtt *ggtt;
+
+ ggtt = drmm_kzalloc(&xe->drm, sizeof(*ggtt), GFP_KERNEL);
+ if (!ggtt)
+ return NULL;
+
+ if (drmm_mutex_init(&xe->drm, &ggtt->lock))
+ return NULL;
+
+ primelockdep(ggtt);
+ ggtt->tile = tile;
+
return ggtt;
}
@@ -180,7 +200,6 @@ static void ggtt_fini_early(struct drm_device *drm, void *arg)
struct xe_ggtt *ggtt = arg;
destroy_workqueue(ggtt->wq);
- mutex_destroy(&ggtt->lock);
drm_mm_takedown(&ggtt->mm);
}
@@ -198,16 +217,6 @@ void xe_ggtt_might_lock(struct xe_ggtt *ggtt)
}
#endif
-static void primelockdep(struct xe_ggtt *ggtt)
-{
- if (!IS_ENABLED(CONFIG_LOCKDEP))
- return;
-
- fs_reclaim_acquire(GFP_KERNEL);
- might_lock(&ggtt->lock);
- fs_reclaim_release(GFP_KERNEL);
-}
-
static const struct xe_ggtt_pt_ops xelp_pt_ops = {
.pte_encode_flags = xelp_ggtt_pte_flags,
.ggtt_set_pte = xe_ggtt_set_pte,
@@ -227,8 +236,6 @@ static void __xe_ggtt_init_early(struct xe_ggtt *ggtt, u32 reserved)
{
drm_mm_init(&ggtt->mm, reserved,
ggtt->size - reserved);
- mutex_init(&ggtt->lock);
- primelockdep(ggtt);
}
int xe_ggtt_init_kunit(struct xe_ggtt *ggtt, u32 reserved, u32 size)
--
2.34.1
^ permalink raw reply related [flat|nested] 42+ messages in thread* [PATCH v9 13/34] drm/xe/vf: Move LMEM config to tile layer
2025-10-08 18:04 [PATCH v9 00/34] VF migration redesign Matthew Brost
` (11 preceding siblings ...)
2025-10-08 18:04 ` [PATCH v9 12/34] drm/xe: Move GGTT lock init to alloc Matthew Brost
@ 2025-10-08 18:04 ` Matthew Brost
2025-10-08 20:36 ` Michal Wajdeczko
2025-10-08 18:04 ` [PATCH v9 14/34] drm/xe/vf: Close multi-GT GGTT shift race Matthew Brost
` (24 subsequent siblings)
37 siblings, 1 reply; 42+ messages in thread
From: Matthew Brost @ 2025-10-08 18:04 UTC (permalink / raw)
To: intel-xe
The LMEM VF provision is tile-layer-specific information. Move the LMEM
configuration to the tile layer accordingly.
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
---
drivers/gpu/drm/xe/xe_device_types.h | 3 ++
drivers/gpu/drm/xe/xe_gt_sriov_vf.c | 36 +++++++--------------
drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h | 2 --
drivers/gpu/drm/xe/xe_tile_sriov_vf.c | 33 +++++++++++++++++++
drivers/gpu/drm/xe/xe_tile_sriov_vf.h | 2 ++
drivers/gpu/drm/xe/xe_tile_sriov_vf_types.h | 19 +++++++++++
drivers/gpu/drm/xe/xe_vram.c | 6 ++--
7 files changed, 71 insertions(+), 30 deletions(-)
create mode 100644 drivers/gpu/drm/xe/xe_tile_sriov_vf_types.h
diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
index 1d2718b70a5c..c66523bf4bf0 100644
--- a/drivers/gpu/drm/xe/xe_device_types.h
+++ b/drivers/gpu/drm/xe/xe_device_types.h
@@ -27,6 +27,7 @@
#include "xe_sriov_vf_ccs_types.h"
#include "xe_step_types.h"
#include "xe_survivability_mode_types.h"
+#include "xe_tile_sriov_vf_types.h"
#include "xe_validation.h"
#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
@@ -193,6 +194,8 @@ struct xe_tile {
struct {
/** @sriov.vf.ggtt_balloon: GGTT regions excluded from use. */
struct xe_ggtt_node *ggtt_balloon[2];
+ /** @sriov.vf.self_config: VF configuration data */
+ struct xe_tile_sriov_vf_selfconfig self_config;
} vf;
} sriov;
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
index 31a80d77da36..6f3d9bc5ed22 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
@@ -471,10 +471,10 @@ static int vf_get_ggtt_info(struct xe_gt *gt)
static int vf_get_lmem_info(struct xe_gt *gt)
{
- struct xe_gt_sriov_vf_selfconfig *config = >->sriov.vf.self_config;
+ struct xe_tile *tile = gt_to_tile(gt);
struct xe_guc *guc = >->uc.guc;
char size_str[10];
- u64 size;
+ u64 size, lmem_size;
int err;
xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
@@ -483,18 +483,19 @@ static int vf_get_lmem_info(struct xe_gt *gt)
if (unlikely(err))
return err;
- if (config->lmem_size && config->lmem_size != size) {
+ lmem_size = xe_tile_sriov_vf_lmem(tile);
+ if (lmem_size && lmem_size != size) {
xe_gt_sriov_err(gt, "Unexpected LMEM reassignment: %lluM != %lluM\n",
- size / SZ_1M, config->lmem_size / SZ_1M);
+ size / SZ_1M, lmem_size / SZ_1M);
return -EREMCHG;
}
string_get_size(size, 1, STRING_UNITS_2, size_str, sizeof(size_str));
xe_gt_sriov_dbg_verbose(gt, "LMEM %lluM %s\n", size / SZ_1M, size_str);
- config->lmem_size = size;
+ xe_tile_sriov_vf_lmem_store(tile, size);
- return config->lmem_size ? 0 : -ENODATA;
+ return size ? 0 : -ENODATA;
}
static int vf_get_submission_cfg(struct xe_gt *gt)
@@ -591,23 +592,6 @@ u16 xe_gt_sriov_vf_guc_ids(struct xe_gt *gt)
return gt->sriov.vf.self_config.num_ctxs;
}
-/**
- * xe_gt_sriov_vf_lmem - VF LMEM configuration.
- * @gt: the &xe_gt
- *
- * This function is for VF use only.
- *
- * Return: size of the LMEM assigned to VF.
- */
-u64 xe_gt_sriov_vf_lmem(struct xe_gt *gt)
-{
- xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
- xe_gt_assert(gt, gt->sriov.vf.guc_version.major);
- xe_gt_assert(gt, gt->sriov.vf.self_config.lmem_size);
-
- return gt->sriov.vf.self_config.lmem_size;
-}
-
/**
* xe_gt_sriov_vf_ggtt - VF GGTT configuration.
* @gt: the &xe_gt
@@ -1059,6 +1043,7 @@ void xe_gt_sriov_vf_print_config(struct xe_gt *gt, struct drm_printer *p)
{
struct xe_gt_sriov_vf_selfconfig *config = >->sriov.vf.self_config;
struct xe_device *xe = gt_to_xe(gt);
+ u64 lmem_size;
char buf[10];
xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
@@ -1072,9 +1057,10 @@ void xe_gt_sriov_vf_print_config(struct xe_gt *gt, struct drm_printer *p)
drm_printf(p, "GGTT shift on last restore:\t%lld\n", config->ggtt_shift);
+ lmem_size = xe_tile_sriov_vf_lmem(gt_to_tile(gt));
if (IS_DGFX(xe) && xe_gt_is_main_type(gt)) {
- string_get_size(config->lmem_size, 1, STRING_UNITS_2, buf, sizeof(buf));
- drm_printf(p, "LMEM size:\t%llu (%s)\n", config->lmem_size, buf);
+ string_get_size(lmem_size, 1, STRING_UNITS_2, buf, sizeof(buf));
+ drm_printf(p, "LMEM size:\t%llu (%s)\n", lmem_size, buf);
}
drm_printf(p, "GuC contexts:\t%u\n", config->num_ctxs);
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h
index e753646debc4..aff76051c9bb 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h
@@ -20,8 +20,6 @@ struct xe_gt_sriov_vf_selfconfig {
u64 ggtt_size;
/** @ggtt_shift: difference in ggtt_base on last migration */
s64 ggtt_shift;
- /** @lmem_size: assigned size of the LMEM. */
- u64 lmem_size;
/** @num_ctxs: assigned number of GuC submission context IDs. */
u16 num_ctxs;
/** @num_dbs: assigned number of GuC doorbells IDs. */
diff --git a/drivers/gpu/drm/xe/xe_tile_sriov_vf.c b/drivers/gpu/drm/xe/xe_tile_sriov_vf.c
index f221dbed16f0..02430a53da9f 100644
--- a/drivers/gpu/drm/xe/xe_tile_sriov_vf.c
+++ b/drivers/gpu/drm/xe/xe_tile_sriov_vf.c
@@ -252,3 +252,36 @@ void xe_tile_sriov_vf_fixup_ggtt_nodes(struct xe_tile *tile, s64 shift)
mutex_unlock(&ggtt->lock);
}
+
+/**
+ * xe_tile_sriov_vf_lmem - VF LMEM configuration.
+ * @tile: the &xe_tile
+ *
+ * This function is for VF use only.
+ *
+ * Return: size of the LMEM assigned to VF.
+ */
+u64 xe_tile_sriov_vf_lmem(struct xe_tile *tile)
+{
+ struct xe_tile_sriov_vf_selfconfig *config = &tile->sriov.vf.self_config;
+
+ xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile)));
+
+ return config->lmem_size;
+}
+
+/**
+ * xe_tile_sriov_vf_lmem_store - Store VF LMEM configuration
+ * @tile: the &xe_tile
+ * @lmem_size: VF LMEM size to store
+ *
+ * This function is for VF use only.
+ */
+void xe_tile_sriov_vf_lmem_store(struct xe_tile *tile, u64 lmem_size)
+{
+ struct xe_tile_sriov_vf_selfconfig *config = &tile->sriov.vf.self_config;
+
+ xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile)));
+
+ config->lmem_size = lmem_size;
+}
diff --git a/drivers/gpu/drm/xe/xe_tile_sriov_vf.h b/drivers/gpu/drm/xe/xe_tile_sriov_vf.h
index 93eb043171e8..86d750a57530 100644
--- a/drivers/gpu/drm/xe/xe_tile_sriov_vf.h
+++ b/drivers/gpu/drm/xe/xe_tile_sriov_vf.h
@@ -14,5 +14,7 @@ int xe_tile_sriov_vf_prepare_ggtt(struct xe_tile *tile);
int xe_tile_sriov_vf_balloon_ggtt_locked(struct xe_tile *tile);
void xe_tile_sriov_vf_deballoon_ggtt_locked(struct xe_tile *tile);
void xe_tile_sriov_vf_fixup_ggtt_nodes(struct xe_tile *tile, s64 shift);
+u64 xe_tile_sriov_vf_lmem(struct xe_tile *tile);
+void xe_tile_sriov_vf_lmem_store(struct xe_tile *tile, u64 lmem_size);
#endif
diff --git a/drivers/gpu/drm/xe/xe_tile_sriov_vf_types.h b/drivers/gpu/drm/xe/xe_tile_sriov_vf_types.h
new file mode 100644
index 000000000000..f49afa8338f1
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_tile_sriov_vf_types.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_TILE_SRIOV_VF_TYPES_H_
+#define _XE_TILE_SRIOV_VF_TYPES_H_
+
+#include <linux/mutex.h>
+
+/**
+ * struct xe_tile_sriov_vf_selfconfig - VF configuration data.
+ */
+struct xe_tile_sriov_vf_selfconfig {
+ /** @lmem_size: assigned size of the LMEM. */
+ u64 lmem_size;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_vram.c b/drivers/gpu/drm/xe/xe_vram.c
index 7adfccf68e4c..70bcbb188867 100644
--- a/drivers/gpu/drm/xe/xe_vram.c
+++ b/drivers/gpu/drm/xe/xe_vram.c
@@ -17,10 +17,10 @@
#include "xe_device.h"
#include "xe_force_wake.h"
#include "xe_gt_mcr.h"
-#include "xe_gt_sriov_vf.h"
#include "xe_mmio.h"
#include "xe_module.h"
#include "xe_sriov.h"
+#include "xe_tile_sriov_vf.h"
#include "xe_ttm_vram_mgr.h"
#include "xe_vram.h"
#include "xe_vram_types.h"
@@ -238,9 +238,9 @@ static int tile_vram_size(struct xe_tile *tile, u64 *vram_size,
offset = 0;
for_each_tile(t, xe, id)
for_each_if(t->id < tile->id)
- offset += xe_gt_sriov_vf_lmem(t->primary_gt);
+ offset += xe_tile_sriov_vf_lmem(t);
- *tile_size = xe_gt_sriov_vf_lmem(gt);
+ *tile_size = xe_tile_sriov_vf_lmem(tile);
*vram_size = *tile_size;
*tile_offset = offset;
--
2.34.1
^ permalink raw reply related [flat|nested] 42+ messages in thread* Re: [PATCH v9 13/34] drm/xe/vf: Move LMEM config to tile layer
2025-10-08 18:04 ` [PATCH v9 13/34] drm/xe/vf: Move LMEM config to tile layer Matthew Brost
@ 2025-10-08 20:36 ` Michal Wajdeczko
0 siblings, 0 replies; 42+ messages in thread
From: Michal Wajdeczko @ 2025-10-08 20:36 UTC (permalink / raw)
To: Matthew Brost, intel-xe
On 10/8/2025 8:04 PM, Matthew Brost wrote:
> The LMEM VF provision is tile-layer-specific information. Move the LMEM
> configuration to the tile layer accordingly.
>
> Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> ---
> drivers/gpu/drm/xe/xe_device_types.h | 3 ++
> drivers/gpu/drm/xe/xe_gt_sriov_vf.c | 36 +++++++--------------
> drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h | 2 --
> drivers/gpu/drm/xe/xe_tile_sriov_vf.c | 33 +++++++++++++++++++
> drivers/gpu/drm/xe/xe_tile_sriov_vf.h | 2 ++
> drivers/gpu/drm/xe/xe_tile_sriov_vf_types.h | 19 +++++++++++
> drivers/gpu/drm/xe/xe_vram.c | 6 ++--
> 7 files changed, 71 insertions(+), 30 deletions(-)
> create mode 100644 drivers/gpu/drm/xe/xe_tile_sriov_vf_types.h
>
> diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
> index 1d2718b70a5c..c66523bf4bf0 100644
> --- a/drivers/gpu/drm/xe/xe_device_types.h
> +++ b/drivers/gpu/drm/xe/xe_device_types.h
> @@ -27,6 +27,7 @@
> #include "xe_sriov_vf_ccs_types.h"
> #include "xe_step_types.h"
> #include "xe_survivability_mode_types.h"
> +#include "xe_tile_sriov_vf_types.h"
> #include "xe_validation.h"
>
> #if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
> @@ -193,6 +194,8 @@ struct xe_tile {
> struct {
> /** @sriov.vf.ggtt_balloon: GGTT regions excluded from use. */
> struct xe_ggtt_node *ggtt_balloon[2];
> + /** @sriov.vf.self_config: VF configuration data */
> + struct xe_tile_sriov_vf_selfconfig self_config;
> } vf;
> } sriov;
>
> diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
> index 31a80d77da36..6f3d9bc5ed22 100644
> --- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
> +++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
> @@ -471,10 +471,10 @@ static int vf_get_ggtt_info(struct xe_gt *gt)
>
> static int vf_get_lmem_info(struct xe_gt *gt)
> {
> - struct xe_gt_sriov_vf_selfconfig *config = >->sriov.vf.self_config;
> + struct xe_tile *tile = gt_to_tile(gt);
> struct xe_guc *guc = >->uc.guc;
> char size_str[10];
> - u64 size;
> + u64 size, lmem_size;
> int err;
>
> xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
> @@ -483,18 +483,19 @@ static int vf_get_lmem_info(struct xe_gt *gt)
> if (unlikely(err))
> return err;
>
> - if (config->lmem_size && config->lmem_size != size) {
> + lmem_size = xe_tile_sriov_vf_lmem(tile);
> + if (lmem_size && lmem_size != size) {
> xe_gt_sriov_err(gt, "Unexpected LMEM reassignment: %lluM != %lluM\n",
> - size / SZ_1M, config->lmem_size / SZ_1M);
> + size / SZ_1M, lmem_size / SZ_1M);
> return -EREMCHG;
> }
>
> string_get_size(size, 1, STRING_UNITS_2, size_str, sizeof(size_str));
> xe_gt_sriov_dbg_verbose(gt, "LMEM %lluM %s\n", size / SZ_1M, size_str);
>
> - config->lmem_size = size;
> + xe_tile_sriov_vf_lmem_store(tile, size);
>
> - return config->lmem_size ? 0 : -ENODATA;
> + return size ? 0 : -ENODATA;
> }
>
> static int vf_get_submission_cfg(struct xe_gt *gt)
> @@ -591,23 +592,6 @@ u16 xe_gt_sriov_vf_guc_ids(struct xe_gt *gt)
> return gt->sriov.vf.self_config.num_ctxs;
> }
>
> -/**
> - * xe_gt_sriov_vf_lmem - VF LMEM configuration.
> - * @gt: the &xe_gt
> - *
> - * This function is for VF use only.
> - *
> - * Return: size of the LMEM assigned to VF.
> - */
> -u64 xe_gt_sriov_vf_lmem(struct xe_gt *gt)
> -{
> - xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
> - xe_gt_assert(gt, gt->sriov.vf.guc_version.major);
> - xe_gt_assert(gt, gt->sriov.vf.self_config.lmem_size);
> -
> - return gt->sriov.vf.self_config.lmem_size;
> -}
> -
> /**
> * xe_gt_sriov_vf_ggtt - VF GGTT configuration.
> * @gt: the &xe_gt
> @@ -1059,6 +1043,7 @@ void xe_gt_sriov_vf_print_config(struct xe_gt *gt, struct drm_printer *p)
> {
> struct xe_gt_sriov_vf_selfconfig *config = >->sriov.vf.self_config;
> struct xe_device *xe = gt_to_xe(gt);
> + u64 lmem_size;
> char buf[10];
>
> xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
> @@ -1072,9 +1057,10 @@ void xe_gt_sriov_vf_print_config(struct xe_gt *gt, struct drm_printer *p)
>
> drm_printf(p, "GGTT shift on last restore:\t%lld\n", config->ggtt_shift);
>
> + lmem_size = xe_tile_sriov_vf_lmem(gt_to_tile(gt));
that should be under below "if"
> if (IS_DGFX(xe) && xe_gt_is_main_type(gt)) {
> - string_get_size(config->lmem_size, 1, STRING_UNITS_2, buf, sizeof(buf));
> - drm_printf(p, "LMEM size:\t%llu (%s)\n", config->lmem_size, buf);
> + string_get_size(lmem_size, 1, STRING_UNITS_2, buf, sizeof(buf));
> + drm_printf(p, "LMEM size:\t%llu (%s)\n", lmem_size, buf);
> }
>
> drm_printf(p, "GuC contexts:\t%u\n", config->num_ctxs);
> diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h
> index e753646debc4..aff76051c9bb 100644
> --- a/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h
> +++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h
> @@ -20,8 +20,6 @@ struct xe_gt_sriov_vf_selfconfig {
> u64 ggtt_size;
> /** @ggtt_shift: difference in ggtt_base on last migration */
> s64 ggtt_shift;
> - /** @lmem_size: assigned size of the LMEM. */
> - u64 lmem_size;
> /** @num_ctxs: assigned number of GuC submission context IDs. */
> u16 num_ctxs;
> /** @num_dbs: assigned number of GuC doorbells IDs. */
> diff --git a/drivers/gpu/drm/xe/xe_tile_sriov_vf.c b/drivers/gpu/drm/xe/xe_tile_sriov_vf.c
> index f221dbed16f0..02430a53da9f 100644
> --- a/drivers/gpu/drm/xe/xe_tile_sriov_vf.c
> +++ b/drivers/gpu/drm/xe/xe_tile_sriov_vf.c
> @@ -252,3 +252,36 @@ void xe_tile_sriov_vf_fixup_ggtt_nodes(struct xe_tile *tile, s64 shift)
>
> mutex_unlock(&ggtt->lock);
> }
> +
> +/**
> + * xe_tile_sriov_vf_lmem - VF LMEM configuration.
> + * @tile: the &xe_tile
> + *
> + * This function is for VF use only.
> + *
> + * Return: size of the LMEM assigned to VF.
> + */
> +u64 xe_tile_sriov_vf_lmem(struct xe_tile *tile)
> +{
> + struct xe_tile_sriov_vf_selfconfig *config = &tile->sriov.vf.self_config;
> +
> + xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile)));
> +
> + return config->lmem_size;
> +}
> +
> +/**
> + * xe_tile_sriov_vf_lmem_store - Store VF LMEM configuration
> + * @tile: the &xe_tile
> + * @lmem_size: VF LMEM size to store
> + *
> + * This function is for VF use only.
> + */
> +void xe_tile_sriov_vf_lmem_store(struct xe_tile *tile, u64 lmem_size)
> +{
> + struct xe_tile_sriov_vf_selfconfig *config = &tile->sriov.vf.self_config;
> +
> + xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile)));
> +
> + config->lmem_size = lmem_size;
> +}
> diff --git a/drivers/gpu/drm/xe/xe_tile_sriov_vf.h b/drivers/gpu/drm/xe/xe_tile_sriov_vf.h
> index 93eb043171e8..86d750a57530 100644
> --- a/drivers/gpu/drm/xe/xe_tile_sriov_vf.h
> +++ b/drivers/gpu/drm/xe/xe_tile_sriov_vf.h
> @@ -14,5 +14,7 @@ int xe_tile_sriov_vf_prepare_ggtt(struct xe_tile *tile);
> int xe_tile_sriov_vf_balloon_ggtt_locked(struct xe_tile *tile);
> void xe_tile_sriov_vf_deballoon_ggtt_locked(struct xe_tile *tile);
> void xe_tile_sriov_vf_fixup_ggtt_nodes(struct xe_tile *tile, s64 shift);
> +u64 xe_tile_sriov_vf_lmem(struct xe_tile *tile);
> +void xe_tile_sriov_vf_lmem_store(struct xe_tile *tile, u64 lmem_size);
>
> #endif
> diff --git a/drivers/gpu/drm/xe/xe_tile_sriov_vf_types.h b/drivers/gpu/drm/xe/xe_tile_sriov_vf_types.h
> new file mode 100644
> index 000000000000..f49afa8338f1
> --- /dev/null
> +++ b/drivers/gpu/drm/xe/xe_tile_sriov_vf_types.h
> @@ -0,0 +1,19 @@
> +/* SPDX-License-Identifier: MIT */
> +/*
> + * Copyright © 2025 Intel Corporation
> + */
> +
> +#ifndef _XE_TILE_SRIOV_VF_TYPES_H_
> +#define _XE_TILE_SRIOV_VF_TYPES_H_
> +
> +#include <linux/mutex.h>
that's not needed
it should be <linux/types.h> instead
> +
> +/**
> + * struct xe_tile_sriov_vf_selfconfig - VF configuration data.
> + */
> +struct xe_tile_sriov_vf_selfconfig {
> + /** @lmem_size: assigned size of the LMEM. */
> + u64 lmem_size;
> +};
> +
> +#endif
> diff --git a/drivers/gpu/drm/xe/xe_vram.c b/drivers/gpu/drm/xe/xe_vram.c
> index 7adfccf68e4c..70bcbb188867 100644
> --- a/drivers/gpu/drm/xe/xe_vram.c
> +++ b/drivers/gpu/drm/xe/xe_vram.c
> @@ -17,10 +17,10 @@
> #include "xe_device.h"
> #include "xe_force_wake.h"
> #include "xe_gt_mcr.h"
> -#include "xe_gt_sriov_vf.h"
> #include "xe_mmio.h"
> #include "xe_module.h"
> #include "xe_sriov.h"
> +#include "xe_tile_sriov_vf.h"
> #include "xe_ttm_vram_mgr.h"
> #include "xe_vram.h"
> #include "xe_vram_types.h"
> @@ -238,9 +238,9 @@ static int tile_vram_size(struct xe_tile *tile, u64 *vram_size,
> offset = 0;
> for_each_tile(t, xe, id)
> for_each_if(t->id < tile->id)
> - offset += xe_gt_sriov_vf_lmem(t->primary_gt);
> + offset += xe_tile_sriov_vf_lmem(t);
>
> - *tile_size = xe_gt_sriov_vf_lmem(gt);
> + *tile_size = xe_tile_sriov_vf_lmem(tile);
> *vram_size = *tile_size;
> *tile_offset = offset;
>
two small nits, but LGTM
Reviewed-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
^ permalink raw reply [flat|nested] 42+ messages in thread
* [PATCH v9 14/34] drm/xe/vf: Close multi-GT GGTT shift race
2025-10-08 18:04 [PATCH v9 00/34] VF migration redesign Matthew Brost
` (12 preceding siblings ...)
2025-10-08 18:04 ` [PATCH v9 13/34] drm/xe/vf: Move LMEM config to tile layer Matthew Brost
@ 2025-10-08 18:04 ` Matthew Brost
2025-10-08 21:11 ` Michal Wajdeczko
2025-10-08 18:04 ` [PATCH v9 15/34] drm/xe/vf: Teardown VF post migration worker on driver unload Matthew Brost
` (23 subsequent siblings)
37 siblings, 1 reply; 42+ messages in thread
From: Matthew Brost @ 2025-10-08 18:04 UTC (permalink / raw)
To: intel-xe
As multi-GT VF post-migration recovery can run in parallel on different
workqueues, but both GTs point to the same GGTT, only one GT needs to
shift the GGTT. However, both GTs need to know when this step has
completed. To coordinate this, perform the GGTT shift under the GGTT
lock. With shift being done under the lock, storing the shift value
becomes unnecessary.
In addition to above, move the GGTT VF config from the GT to the tile.
v3:
- Update commmit message (Tomasz)
v4:
- Move GGTT values to tile state (Michal)
- Use GGTT lock (Michal)
v5:
- Only take GGTT lock during recovery (CI)
- Drop goto in vf_get_submission_cfg (Michal)
- Add kernel doc around recovery in xe_gt_sriov_vf_query_config (Michal)
v7:
- Drop recovery variable (Michal)
- Use _locked naming (Michal)
- Use guard (Michal)
v9:
- Break LMEM changes into different patch (Michal)
- Fix layering (Michal)
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
---
drivers/gpu/drm/xe/xe_gt_sriov_vf.c | 112 ++++++--------------
drivers/gpu/drm/xe/xe_gt_sriov_vf.h | 3 -
drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h | 6 --
drivers/gpu/drm/xe/xe_tile_sriov_vf.c | 81 ++++++++++++--
drivers/gpu/drm/xe/xe_tile_sriov_vf.h | 7 +-
drivers/gpu/drm/xe/xe_tile_sriov_vf_types.h | 4 +
6 files changed, 116 insertions(+), 97 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
index 6f3d9bc5ed22..aeadeb29d5ea 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
@@ -438,13 +438,17 @@ u32 xe_gt_sriov_vf_gmdid(struct xe_gt *gt)
static int vf_get_ggtt_info(struct xe_gt *gt)
{
- struct xe_gt_sriov_vf_selfconfig *config = >->sriov.vf.self_config;
+ struct xe_tile *tile = gt_to_tile(gt);
+ struct xe_ggtt *ggtt = tile->mem.ggtt;
struct xe_guc *guc = >->uc.guc;
- u64 start, size;
+ u64 start, size, ggtt_size;
+ s64 shift;
int err;
xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
+ guard(mutex)(&ggtt->lock);
+
err = guc_action_query_single_klv64(guc, GUC_KLV_VF_CFG_GGTT_START_KEY, &start);
if (unlikely(err))
return err;
@@ -453,20 +457,28 @@ static int vf_get_ggtt_info(struct xe_gt *gt)
if (unlikely(err))
return err;
- if (config->ggtt_size && config->ggtt_size != size) {
+ ggtt_size = xe_tile_sriov_vf_ggtt(tile);
+ if (ggtt_size && ggtt_size != size) {
xe_gt_sriov_err(gt, "Unexpected GGTT reassignment: %lluK != %lluK\n",
- size / SZ_1K, config->ggtt_size / SZ_1K);
+ size / SZ_1K, ggtt_size / SZ_1K);
return -EREMCHG;
}
xe_gt_sriov_dbg_verbose(gt, "GGTT %#llx-%#llx = %lluK\n",
start, start + size - 1, size / SZ_1K);
- config->ggtt_shift = start - (s64)config->ggtt_base;
- config->ggtt_base = start;
- config->ggtt_size = size;
+ shift = start - (s64)xe_tile_sriov_vf_ggtt_base(tile);
+ xe_tile_sriov_vf_ggtt_base_store(tile, start);
+ xe_tile_sriov_vf_ggtt_store(tile, size);
- return config->ggtt_size ? 0 : -ENODATA;
+ err = size ? 0 : -ENODATA;
+ if (!err && shift && shift != start) {
+ xe_gt_sriov_info(gt, "Shifting GGTT base by %lld to 0x%016llx\n",
+ shift, start);
+ xe_tile_sriov_vf_fixup_ggtt_nodes_locked(gt_to_tile(gt), shift);
+ }
+
+ return err;
}
static int vf_get_lmem_info(struct xe_gt *gt)
@@ -546,7 +558,9 @@ static void vf_cache_gmdid(struct xe_gt *gt)
* xe_gt_sriov_vf_query_config - Query SR-IOV config data over MMIO.
* @gt: the &xe_gt
*
- * This function is for VF use only.
+ * This function is for VF use only. This function may shift the GGTT and is
+ * performed under GGTT lock, making this step visible to all GTs that share a
+ * GGTT.
*
* Return: 0 on success or a negative error code on failure.
*/
@@ -592,58 +606,6 @@ u16 xe_gt_sriov_vf_guc_ids(struct xe_gt *gt)
return gt->sriov.vf.self_config.num_ctxs;
}
-/**
- * xe_gt_sriov_vf_ggtt - VF GGTT configuration.
- * @gt: the &xe_gt
- *
- * This function is for VF use only.
- *
- * Return: size of the GGTT assigned to VF.
- */
-u64 xe_gt_sriov_vf_ggtt(struct xe_gt *gt)
-{
- xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
- xe_gt_assert(gt, gt->sriov.vf.guc_version.major);
- xe_gt_assert(gt, gt->sriov.vf.self_config.ggtt_size);
-
- return gt->sriov.vf.self_config.ggtt_size;
-}
-
-/**
- * xe_gt_sriov_vf_ggtt_base - VF GGTT base offset.
- * @gt: the &xe_gt
- *
- * This function is for VF use only.
- *
- * Return: base offset of the GGTT assigned to VF.
- */
-u64 xe_gt_sriov_vf_ggtt_base(struct xe_gt *gt)
-{
- xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
- xe_gt_assert(gt, gt->sriov.vf.guc_version.major);
- xe_gt_assert(gt, gt->sriov.vf.self_config.ggtt_size);
-
- return gt->sriov.vf.self_config.ggtt_base;
-}
-
-/**
- * xe_gt_sriov_vf_ggtt_shift - Return shift in GGTT range due to VF migration
- * @gt: the &xe_gt struct instance
- *
- * This function is for VF use only.
- *
- * Return: The shift value; could be negative
- */
-s64 xe_gt_sriov_vf_ggtt_shift(struct xe_gt *gt)
-{
- struct xe_gt_sriov_vf_selfconfig *config = >->sriov.vf.self_config;
-
- xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
- xe_gt_assert(gt, xe_gt_is_main_type(gt));
-
- return config->ggtt_shift;
-}
-
static int relay_action_handshake(struct xe_gt *gt, u32 *major, u32 *minor)
{
u32 request[VF2PF_HANDSHAKE_REQUEST_MSG_LEN] = {
@@ -1048,14 +1010,15 @@ void xe_gt_sriov_vf_print_config(struct xe_gt *gt, struct drm_printer *p)
xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
- drm_printf(p, "GGTT range:\t%#llx-%#llx\n",
- config->ggtt_base,
- config->ggtt_base + config->ggtt_size - 1);
-
- string_get_size(config->ggtt_size, 1, STRING_UNITS_2, buf, sizeof(buf));
- drm_printf(p, "GGTT size:\t%llu (%s)\n", config->ggtt_size, buf);
+ if (xe_gt_is_main_type(gt)) {
+ u64 ggtt_size = xe_tile_sriov_vf_ggtt(gt_to_tile(gt));
+ u64 ggtt_base = xe_tile_sriov_vf_ggtt_base(gt_to_tile(gt));
- drm_printf(p, "GGTT shift on last restore:\t%lld\n", config->ggtt_shift);
+ drm_printf(p, "GGTT range:\t%#llx-%#llx\n",
+ ggtt_base, ggtt_base + ggtt_size - 1);
+ string_get_size(ggtt_size, 1, STRING_UNITS_2, buf, sizeof(buf));
+ drm_printf(p, "GGTT size:\t%llu (%s)\n", ggtt_size, buf);
+ }
lmem_size = xe_tile_sriov_vf_lmem(gt_to_tile(gt));
if (IS_DGFX(xe) && xe_gt_is_main_type(gt)) {
@@ -1147,21 +1110,16 @@ static size_t post_migration_scratch_size(struct xe_device *xe)
static int vf_post_migration_fixups(struct xe_gt *gt)
{
void *buf = gt->sriov.vf.migration.scratch;
- s64 shift;
int err;
err = xe_gt_sriov_vf_query_config(gt);
if (err)
return err;
- shift = xe_gt_sriov_vf_ggtt_shift(gt);
- if (shift) {
- xe_tile_sriov_vf_fixup_ggtt_nodes(gt_to_tile(gt), shift);
- xe_gt_sriov_vf_default_lrcs_hwsp_rebase(gt);
- err = xe_guc_contexts_hwsp_rebase(>->uc.guc, buf);
- if (err)
- return err;
- }
+ xe_gt_sriov_vf_default_lrcs_hwsp_rebase(gt);
+ err = xe_guc_contexts_hwsp_rebase(>->uc.guc, buf);
+ if (err)
+ return err;
return 0;
}
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.h b/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
index 0adebf8aa419..2eb793a2d8ba 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
@@ -29,9 +29,6 @@ bool xe_gt_sriov_vf_recovery_pending(struct xe_gt *gt);
u32 xe_gt_sriov_vf_gmdid(struct xe_gt *gt);
u16 xe_gt_sriov_vf_guc_ids(struct xe_gt *gt);
u64 xe_gt_sriov_vf_lmem(struct xe_gt *gt);
-u64 xe_gt_sriov_vf_ggtt(struct xe_gt *gt);
-u64 xe_gt_sriov_vf_ggtt_base(struct xe_gt *gt);
-s64 xe_gt_sriov_vf_ggtt_shift(struct xe_gt *gt);
u32 xe_gt_sriov_vf_read32(struct xe_gt *gt, struct xe_reg reg);
void xe_gt_sriov_vf_write32(struct xe_gt *gt, struct xe_reg reg, u32 val);
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h
index aff76051c9bb..0d9e217989af 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h
@@ -14,12 +14,6 @@
* struct xe_gt_sriov_vf_selfconfig - VF configuration data.
*/
struct xe_gt_sriov_vf_selfconfig {
- /** @ggtt_base: assigned base offset of the GGTT region. */
- u64 ggtt_base;
- /** @ggtt_size: assigned size of the GGTT region. */
- u64 ggtt_size;
- /** @ggtt_shift: difference in ggtt_base on last migration */
- s64 ggtt_shift;
/** @num_ctxs: assigned number of GuC submission context IDs. */
u16 num_ctxs;
/** @num_dbs: assigned number of GuC doorbells IDs. */
diff --git a/drivers/gpu/drm/xe/xe_tile_sriov_vf.c b/drivers/gpu/drm/xe/xe_tile_sriov_vf.c
index 02430a53da9f..043d93844a32 100644
--- a/drivers/gpu/drm/xe/xe_tile_sriov_vf.c
+++ b/drivers/gpu/drm/xe/xe_tile_sriov_vf.c
@@ -9,7 +9,6 @@
#include "xe_assert.h"
#include "xe_ggtt.h"
-#include "xe_gt_sriov_vf.h"
#include "xe_sriov.h"
#include "xe_sriov_printk.h"
#include "xe_tile_sriov_vf.h"
@@ -40,10 +39,10 @@ static int vf_init_ggtt_balloons(struct xe_tile *tile)
*
* Return: 0 on success or a negative error code on failure.
*/
-int xe_tile_sriov_vf_balloon_ggtt_locked(struct xe_tile *tile)
+static int xe_tile_sriov_vf_balloon_ggtt_locked(struct xe_tile *tile)
{
- u64 ggtt_base = xe_gt_sriov_vf_ggtt_base(tile->primary_gt);
- u64 ggtt_size = xe_gt_sriov_vf_ggtt(tile->primary_gt);
+ u64 ggtt_base = tile->sriov.vf.self_config.ggtt_base;
+ u64 ggtt_size = tile->sriov.vf.self_config.ggtt_size;
struct xe_device *xe = tile_to_xe(tile);
u64 wopcm = xe_wopcm_size(xe);
u64 start, end;
@@ -232,7 +231,7 @@ int xe_tile_sriov_vf_prepare_ggtt(struct xe_tile *tile)
*/
/**
- * xe_tile_sriov_vf_fixup_ggtt_nodes - Shift GGTT allocations to match assigned range.
+ * xe_tile_sriov_vf_fixup_ggtt_nodes_locked - Shift GGTT allocations to match assigned range.
* @tile: the &xe_tile struct instance
* @shift: the shift value
*
@@ -240,17 +239,15 @@ int xe_tile_sriov_vf_prepare_ggtt(struct xe_tile *tile)
* within the global space. This range might have changed during migration,
* which requires all memory addresses pointing to GGTT to be shifted.
*/
-void xe_tile_sriov_vf_fixup_ggtt_nodes(struct xe_tile *tile, s64 shift)
+void xe_tile_sriov_vf_fixup_ggtt_nodes_locked(struct xe_tile *tile, s64 shift)
{
struct xe_ggtt *ggtt = tile->mem.ggtt;
- mutex_lock(&ggtt->lock);
+ lockdep_assert_held(&ggtt->lock);
xe_tile_sriov_vf_deballoon_ggtt_locked(tile);
xe_ggtt_shift_nodes_locked(ggtt, shift);
xe_tile_sriov_vf_balloon_ggtt_locked(tile);
-
- mutex_unlock(&ggtt->lock);
}
/**
@@ -285,3 +282,69 @@ void xe_tile_sriov_vf_lmem_store(struct xe_tile *tile, u64 lmem_size)
config->lmem_size = lmem_size;
}
+
+/**
+ * xe_tile_sriov_vf_ggtt - VF GGTT configuration.
+ * @tile: the &xe_tile
+ *
+ * This function is for VF use only.
+ *
+ * Return: size of the GGTT assigned to VF.
+ */
+u64 xe_tile_sriov_vf_ggtt(struct xe_tile *tile)
+{
+ struct xe_tile_sriov_vf_selfconfig *config = &tile->sriov.vf.self_config;
+
+ xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile)));
+
+ return config->ggtt_size;
+}
+
+/**
+ * xe_tile_sriov_vf_ggtt_store - Store VF GGTT configuration
+ * @tile: the &xe_tile
+ * @ggtt_size: VF GGTT size to store
+ *
+ * This function is for VF use only.
+ */
+void xe_tile_sriov_vf_ggtt_store(struct xe_tile *tile, u64 ggtt_size)
+{
+ struct xe_tile_sriov_vf_selfconfig *config = &tile->sriov.vf.self_config;
+
+ xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile)));
+
+ config->ggtt_size = ggtt_size;
+}
+
+/**
+ * xe_tile_sriov_vf_ggtt_base - VF GGTT base configuration.
+ * @tile: the &xe_tile
+ *
+ * This function is for VF use only.
+ *
+ * Return: base of the GGTT assigned to VF.
+ */
+u64 xe_tile_sriov_vf_ggtt_base(struct xe_tile *tile)
+{
+ struct xe_tile_sriov_vf_selfconfig *config = &tile->sriov.vf.self_config;
+
+ xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile)));
+
+ return config->ggtt_base;
+}
+
+/**
+ * xe_tile_sriov_vf_ggtt_base_store - Store VF GGTT base configuration
+ * @tile: the &xe_tile
+ * @ggtt_size: VF GGTT base to store
+ *
+ * This function is for VF use only.
+ */
+void xe_tile_sriov_vf_ggtt_base_store(struct xe_tile *tile, u64 ggtt_base)
+{
+ struct xe_tile_sriov_vf_selfconfig *config = &tile->sriov.vf.self_config;
+
+ xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile)));
+
+ config->ggtt_base = ggtt_base;
+}
diff --git a/drivers/gpu/drm/xe/xe_tile_sriov_vf.h b/drivers/gpu/drm/xe/xe_tile_sriov_vf.h
index 86d750a57530..749f41504883 100644
--- a/drivers/gpu/drm/xe/xe_tile_sriov_vf.h
+++ b/drivers/gpu/drm/xe/xe_tile_sriov_vf.h
@@ -11,9 +11,12 @@
struct xe_tile;
int xe_tile_sriov_vf_prepare_ggtt(struct xe_tile *tile);
-int xe_tile_sriov_vf_balloon_ggtt_locked(struct xe_tile *tile);
void xe_tile_sriov_vf_deballoon_ggtt_locked(struct xe_tile *tile);
-void xe_tile_sriov_vf_fixup_ggtt_nodes(struct xe_tile *tile, s64 shift);
+void xe_tile_sriov_vf_fixup_ggtt_nodes_locked(struct xe_tile *tile, s64 shift);
+u64 xe_tile_sriov_vf_ggtt(struct xe_tile *tile);
+void xe_tile_sriov_vf_ggtt_store(struct xe_tile *tile, u64 ggtt_size);
+u64 xe_tile_sriov_vf_ggtt_base(struct xe_tile *tile);
+void xe_tile_sriov_vf_ggtt_base_store(struct xe_tile *tile, u64 ggtt_size);
u64 xe_tile_sriov_vf_lmem(struct xe_tile *tile);
void xe_tile_sriov_vf_lmem_store(struct xe_tile *tile, u64 lmem_size);
diff --git a/drivers/gpu/drm/xe/xe_tile_sriov_vf_types.h b/drivers/gpu/drm/xe/xe_tile_sriov_vf_types.h
index f49afa8338f1..140717f81d8f 100644
--- a/drivers/gpu/drm/xe/xe_tile_sriov_vf_types.h
+++ b/drivers/gpu/drm/xe/xe_tile_sriov_vf_types.h
@@ -12,6 +12,10 @@
* struct xe_tile_sriov_vf_selfconfig - VF configuration data.
*/
struct xe_tile_sriov_vf_selfconfig {
+ /** @ggtt_base: assigned base offset of the GGTT region. */
+ u64 ggtt_base;
+ /** @ggtt_size: assigned size of the GGTT region. */
+ u64 ggtt_size;
/** @lmem_size: assigned size of the LMEM. */
u64 lmem_size;
};
--
2.34.1
^ permalink raw reply related [flat|nested] 42+ messages in thread* Re: [PATCH v9 14/34] drm/xe/vf: Close multi-GT GGTT shift race
2025-10-08 18:04 ` [PATCH v9 14/34] drm/xe/vf: Close multi-GT GGTT shift race Matthew Brost
@ 2025-10-08 21:11 ` Michal Wajdeczko
0 siblings, 0 replies; 42+ messages in thread
From: Michal Wajdeczko @ 2025-10-08 21:11 UTC (permalink / raw)
To: Matthew Brost, intel-xe
On 10/8/2025 8:04 PM, Matthew Brost wrote:
> As multi-GT VF post-migration recovery can run in parallel on different
> workqueues, but both GTs point to the same GGTT, only one GT needs to
> shift the GGTT. However, both GTs need to know when this step has
> completed. To coordinate this, perform the GGTT shift under the GGTT
> lock. With shift being done under the lock, storing the shift value
> becomes unnecessary.
>
> In addition to above, move the GGTT VF config from the GT to the tile.
>
> v3:
> - Update commmit message (Tomasz)
> v4:
> - Move GGTT values to tile state (Michal)
> - Use GGTT lock (Michal)
> v5:
> - Only take GGTT lock during recovery (CI)
> - Drop goto in vf_get_submission_cfg (Michal)
> - Add kernel doc around recovery in xe_gt_sriov_vf_query_config (Michal)
> v7:
> - Drop recovery variable (Michal)
> - Use _locked naming (Michal)
> - Use guard (Michal)
> v9:
> - Break LMEM changes into different patch (Michal)
> - Fix layering (Michal)
>
> Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> ---
> drivers/gpu/drm/xe/xe_gt_sriov_vf.c | 112 ++++++--------------
> drivers/gpu/drm/xe/xe_gt_sriov_vf.h | 3 -
> drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h | 6 --
> drivers/gpu/drm/xe/xe_tile_sriov_vf.c | 81 ++++++++++++--
> drivers/gpu/drm/xe/xe_tile_sriov_vf.h | 7 +-
> drivers/gpu/drm/xe/xe_tile_sriov_vf_types.h | 4 +
> 6 files changed, 116 insertions(+), 97 deletions(-)
>
> diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
> index 6f3d9bc5ed22..aeadeb29d5ea 100644
> --- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
> +++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
> @@ -438,13 +438,17 @@ u32 xe_gt_sriov_vf_gmdid(struct xe_gt *gt)
>
> static int vf_get_ggtt_info(struct xe_gt *gt)
> {
> - struct xe_gt_sriov_vf_selfconfig *config = >->sriov.vf.self_config;
> + struct xe_tile *tile = gt_to_tile(gt);
> + struct xe_ggtt *ggtt = tile->mem.ggtt;
> struct xe_guc *guc = >->uc.guc;
> - u64 start, size;
> + u64 start, size, ggtt_size;
> + s64 shift;
> int err;
>
> xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
>
> + guard(mutex)(&ggtt->lock);
> +
> err = guc_action_query_single_klv64(guc, GUC_KLV_VF_CFG_GGTT_START_KEY, &start);
> if (unlikely(err))
> return err;
> @@ -453,20 +457,28 @@ static int vf_get_ggtt_info(struct xe_gt *gt)
> if (unlikely(err))
> return err;
>
> - if (config->ggtt_size && config->ggtt_size != size) {
> + ggtt_size = xe_tile_sriov_vf_ggtt(tile);
> + if (ggtt_size && ggtt_size != size) {
> xe_gt_sriov_err(gt, "Unexpected GGTT reassignment: %lluK != %lluK\n",
> - size / SZ_1K, config->ggtt_size / SZ_1K);
> + size / SZ_g1K, ggtt_size / SZ_1K);
> return -EREMCHG;
> }
>
> xe_gt_sriov_dbg_verbose(gt, "GGTT %#llx-%#llx = %lluK\n",
> start, start + size - 1, size / SZ_1K);
>
> - config->ggtt_shift = start - (s64)config->ggtt_base;
> - config->ggtt_base = start;
> - config->ggtt_size = size;
> + shift = start - (s64)xe_tile_sriov_vf_ggtt_base(tile);
> + xe_tile_sriov_vf_ggtt_base_store(tile, start);
> + xe_tile_sriov_vf_ggtt_store(tile, size);
since we always store both base&size maybe we can have single function:
xe_tile_sriov_vf_ggtt_store(tile, base, size);
>
> - return config->ggtt_size ? 0 : -ENODATA;
> + err = size ? 0 : -ENODATA;
> + if (!err && shift && shift != start) {
hmm, this !err is weird, maybe we should just have:
if (!size)
return -ENODATA;
if (shift && shift != start) {
// fixup
}
return 0;
> + xe_gt_sriov_info(gt, "Shifting GGTT base by %lld to 0x%016llx\n",
> + shift, start);
> + xe_tile_sriov_vf_fixup_ggtt_nodes_locked(gt_to_tile(gt), shift);
hmm, so now we have 3x calls to the tile-layer
xe_tile_sriov_vf_ggtt_base_store
xe_tile_sriov_vf_ggtt_store
xe_tile_sriov_vf_fixup_ggtt_nodes_locked
maybe we should move more logic/logs there?
(but can be also done as follow up, as part of not-querying ggtt from media-gt,
or just checking consistency there, without triggering any bad fixups again)
> + }
> +
> + return err;
> }
>
> static int vf_get_lmem_info(struct xe_gt *gt)
> @@ -546,7 +558,9 @@ static void vf_cache_gmdid(struct xe_gt *gt)
> * xe_gt_sriov_vf_query_config - Query SR-IOV config data over MMIO.
> * @gt: the &xe_gt
> *
> - * This function is for VF use only.
> + * This function is for VF use only. This function may shift the GGTT and is
> + * performed under GGTT lock, making this step visible to all GTs that share a
> + * GGTT.
> *
> * Return: 0 on success or a negative error code on failure.
> */
> @@ -592,58 +606,6 @@ u16 xe_gt_sriov_vf_guc_ids(struct xe_gt *gt)
> return gt->sriov.vf.self_config.num_ctxs;
> }
>
> -/**
> - * xe_gt_sriov_vf_ggtt - VF GGTT configuration.
> - * @gt: the &xe_gt
> - *
> - * This function is for VF use only.
> - *
> - * Return: size of the GGTT assigned to VF.
> - */
> -u64 xe_gt_sriov_vf_ggtt(struct xe_gt *gt)
> -{
> - xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
> - xe_gt_assert(gt, gt->sriov.vf.guc_version.major);
> - xe_gt_assert(gt, gt->sriov.vf.self_config.ggtt_size);
> -
> - return gt->sriov.vf.self_config.ggtt_size;
> -}
> -
> -/**
> - * xe_gt_sriov_vf_ggtt_base - VF GGTT base offset.
> - * @gt: the &xe_gt
> - *
> - * This function is for VF use only.
> - *
> - * Return: base offset of the GGTT assigned to VF.
> - */
> -u64 xe_gt_sriov_vf_ggtt_base(struct xe_gt *gt)
> -{
> - xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
> - xe_gt_assert(gt, gt->sriov.vf.guc_version.major);
> - xe_gt_assert(gt, gt->sriov.vf.self_config.ggtt_size);
> -
> - return gt->sriov.vf.self_config.ggtt_base;
> -}
> -
> -/**
> - * xe_gt_sriov_vf_ggtt_shift - Return shift in GGTT range due to VF migration
> - * @gt: the &xe_gt struct instance
> - *
> - * This function is for VF use only.
> - *
> - * Return: The shift value; could be negative
> - */
> -s64 xe_gt_sriov_vf_ggtt_shift(struct xe_gt *gt)
> -{
> - struct xe_gt_sriov_vf_selfconfig *config = >->sriov.vf.self_config;
> -
> - xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
> - xe_gt_assert(gt, xe_gt_is_main_type(gt));
> -
> - return config->ggtt_shift;
> -}
> -
> static int relay_action_handshake(struct xe_gt *gt, u32 *major, u32 *minor)
> {
> u32 request[VF2PF_HANDSHAKE_REQUEST_MSG_LEN] = {
> @@ -1048,14 +1010,15 @@ void xe_gt_sriov_vf_print_config(struct xe_gt *gt, struct drm_printer *p)
>
> xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
>
> - drm_printf(p, "GGTT range:\t%#llx-%#llx\n",
> - config->ggtt_base,
> - config->ggtt_base + config->ggtt_size - 1);
> -
> - string_get_size(config->ggtt_size, 1, STRING_UNITS_2, buf, sizeof(buf));
> - drm_printf(p, "GGTT size:\t%llu (%s)\n", config->ggtt_size, buf);
> + if (xe_gt_is_main_type(gt)) {
> + u64 ggtt_size = xe_tile_sriov_vf_ggtt(gt_to_tile(gt));
> + u64 ggtt_base = xe_tile_sriov_vf_ggtt_base(gt_to_tile(gt));
>
> - drm_printf(p, "GGTT shift on last restore:\t%lld\n", config->ggtt_shift);
> + drm_printf(p, "GGTT range:\t%#llx-%#llx\n",
> + ggtt_base, ggtt_base + ggtt_size - 1);
> + string_get_size(ggtt_size, 1, STRING_UNITS_2, buf, sizeof(buf));
> + drm_printf(p, "GGTT size:\t%llu (%s)\n", ggtt_size, buf);
> + }
>
> lmem_size = xe_tile_sriov_vf_lmem(gt_to_tile(gt));
> if (IS_DGFX(xe) && xe_gt_is_main_type(gt)) {
nit: this could be inner previous "if (primary_gt)" as
if (IS_DGFX(xe))
> @@ -1147,21 +1110,16 @@ static size_t post_migration_scratch_size(struct xe_device *xe)
> static int vf_post_migration_fixups(struct xe_gt *gt)
> {
> void *buf = gt->sriov.vf.migration.scratch;
> - s64 shift;
> int err;
>
maybe we should add small note here that below query_config() will actually do the ggtt nodes fixup that remaining code must take into account?
> err = xe_gt_sriov_vf_query_config(gt);
> if (err)> return err;
>
> - shift = xe_gt_sriov_vf_ggtt_shift(gt);
> - if (shift) {
> - xe_tile_sriov_vf_fixup_ggtt_nodes(gt_to_tile(gt), shift);
> - xe_gt_sriov_vf_default_lrcs_hwsp_rebase(gt);
> - err = xe_guc_contexts_hwsp_rebase(>->uc.guc, buf);
> - if (err)
> - return err;
> - }
> + xe_gt_sriov_vf_default_lrcs_hwsp_rebase(gt);
> + err = xe_guc_contexts_hwsp_rebase(>->uc.guc, buf);
> + if (err)
> + return err;
>
> return 0;
> }
> diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.h b/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
> index 0adebf8aa419..2eb793a2d8ba 100644
> --- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
> +++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
> @@ -29,9 +29,6 @@ bool xe_gt_sriov_vf_recovery_pending(struct xe_gt *gt);
> u32 xe_gt_sriov_vf_gmdid(struct xe_gt *gt);
> u16 xe_gt_sriov_vf_guc_ids(struct xe_gt *gt);
> u64 xe_gt_sriov_vf_lmem(struct xe_gt *gt);
> -u64 xe_gt_sriov_vf_ggtt(struct xe_gt *gt);
> -u64 xe_gt_sriov_vf_ggtt_base(struct xe_gt *gt);
> -s64 xe_gt_sriov_vf_ggtt_shift(struct xe_gt *gt);
>
> u32 xe_gt_sriov_vf_read32(struct xe_gt *gt, struct xe_reg reg);
> void xe_gt_sriov_vf_write32(struct xe_gt *gt, struct xe_reg reg, u32 val);
> diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h
> index aff76051c9bb..0d9e217989af 100644
> --- a/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h
> +++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h
> @@ -14,12 +14,6 @@
> * struct xe_gt_sriov_vf_selfconfig - VF configuration data.
> */
> struct xe_gt_sriov_vf_selfconfig {
> - /** @ggtt_base: assigned base offset of the GGTT region. */
> - u64 ggtt_base;
> - /** @ggtt_size: assigned size of the GGTT region. */
> - u64 ggtt_size;
> - /** @ggtt_shift: difference in ggtt_base on last migration */
> - s64 ggtt_shift;
> /** @num_ctxs: assigned number of GuC submission context IDs. */
> u16 num_ctxs;
> /** @num_dbs: assigned number of GuC doorbells IDs. */
> diff --git a/drivers/gpu/drm/xe/xe_tile_sriov_vf.c b/drivers/gpu/drm/xe/xe_tile_sriov_vf.c
> index 02430a53da9f..043d93844a32 100644
> --- a/drivers/gpu/drm/xe/xe_tile_sriov_vf.c
> +++ b/drivers/gpu/drm/xe/xe_tile_sriov_vf.c
> @@ -9,7 +9,6 @@
>
> #include "xe_assert.h"
> #include "xe_ggtt.h"
> -#include "xe_gt_sriov_vf.h"
> #include "xe_sriov.h"
> #include "xe_sriov_printk.h"
> #include "xe_tile_sriov_vf.h"
> @@ -40,10 +39,10 @@ static int vf_init_ggtt_balloons(struct xe_tile *tile)
> *
> * Return: 0 on success or a negative error code on failure.
> */
> -int xe_tile_sriov_vf_balloon_ggtt_locked(struct xe_tile *tile)
> +static int xe_tile_sriov_vf_balloon_ggtt_locked(struct xe_tile *tile)
> {
> - u64 ggtt_base = xe_gt_sriov_vf_ggtt_base(tile->primary_gt);
> - u64 ggtt_size = xe_gt_sriov_vf_ggtt(tile->primary_gt);
> + u64 ggtt_base = tile->sriov.vf.self_config.ggtt_base;
> + u64 ggtt_size = tile->sriov.vf.self_config.ggtt_size;
> struct xe_device *xe = tile_to_xe(tile);
> u64 wopcm = xe_wopcm_size(xe);
> u64 start, end;
> @@ -232,7 +231,7 @@ int xe_tile_sriov_vf_prepare_ggtt(struct xe_tile *tile)
> */
>
> /**
> - * xe_tile_sriov_vf_fixup_ggtt_nodes - Shift GGTT allocations to match assigned range.
> + * xe_tile_sriov_vf_fixup_ggtt_nodes_locked - Shift GGTT allocations to match assigned range.
> * @tile: the &xe_tile struct instance
> * @shift: the shift value
> *
> @@ -240,17 +239,15 @@ int xe_tile_sriov_vf_prepare_ggtt(struct xe_tile *tile)
> * within the global space. This range might have changed during migration,
> * which requires all memory addresses pointing to GGTT to be shifted.
> */
> -void xe_tile_sriov_vf_fixup_ggtt_nodes(struct xe_tile *tile, s64 shift)
> +void xe_tile_sriov_vf_fixup_ggtt_nodes_locked(struct xe_tile *tile, s64 shift)
> {
> struct xe_ggtt *ggtt = tile->mem.ggtt;
>
> - mutex_lock(&ggtt->lock);
> + lockdep_assert_held(&ggtt->lock);
>
> xe_tile_sriov_vf_deballoon_ggtt_locked(tile);
> xe_ggtt_shift_nodes_locked(ggtt, shift);
> xe_tile_sriov_vf_balloon_ggtt_locked(tile);
> -
> - mutex_unlock(&ggtt->lock);
> }
>
> /**
> @@ -285,3 +282,69 @@ void xe_tile_sriov_vf_lmem_store(struct xe_tile *tile, u64 lmem_size)
>
> config->lmem_size = lmem_size;
> }
> +
> +/**
> + * xe_tile_sriov_vf_ggtt - VF GGTT configuration.
> + * @tile: the &xe_tile
> + *
> + * This function is for VF use only.
> + *
> + * Return: size of the GGTT assigned to VF.
> + */
> +u64 xe_tile_sriov_vf_ggtt(struct xe_tile *tile)
> +{
> + struct xe_tile_sriov_vf_selfconfig *config = &tile->sriov.vf.self_config;
> +
> + xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile)));
> +
> + return config->ggtt_size;
> +}
> +
> +/**
> + * xe_tile_sriov_vf_ggtt_store - Store VF GGTT configuration
> + * @tile: the &xe_tile
> + * @ggtt_size: VF GGTT size to store
> + *
> + * This function is for VF use only.
> + */
> +void xe_tile_sriov_vf_ggtt_store(struct xe_tile *tile, u64 ggtt_size)
> +{
> + struct xe_tile_sriov_vf_selfconfig *config = &tile->sriov.vf.self_config;
> +
> + xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile)));
> +
> + config->ggtt_size = ggtt_size;
> +}
> +
> +/**
> + * xe_tile_sriov_vf_ggtt_base - VF GGTT base configuration.
> + * @tile: the &xe_tile
> + *
> + * This function is for VF use only.
> + *
> + * Return: base of the GGTT assigned to VF.
> + */
> +u64 xe_tile_sriov_vf_ggtt_base(struct xe_tile *tile)
> +{
> + struct xe_tile_sriov_vf_selfconfig *config = &tile->sriov.vf.self_config;
> +
> + xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile)));
> +
> + return config->ggtt_base;
> +}
> +
> +/**
> + * xe_tile_sriov_vf_ggtt_base_store - Store VF GGTT base configuration
> + * @tile: the &xe_tile
> + * @ggtt_size: VF GGTT base to store
ggtt_base ?
> + *
> + * This function is for VF use only.
> + */
> +void xe_tile_sriov_vf_ggtt_base_store(struct xe_tile *tile, u64 ggtt_base)
> +{
> + struct xe_tile_sriov_vf_selfconfig *config = &tile->sriov.vf.self_config;
> +
> + xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile)));
> +
> + config->ggtt_base = ggtt_base;
> +}
> diff --git a/drivers/gpu/drm/xe/xe_tile_sriov_vf.h b/drivers/gpu/drm/xe/xe_tile_sriov_vf.h
> index 86d750a57530..749f41504883 100644
> --- a/drivers/gpu/drm/xe/xe_tile_sriov_vf.h
> +++ b/drivers/gpu/drm/xe/xe_tile_sriov_vf.h
> @@ -11,9 +11,12 @@
> struct xe_tile;
>
> int xe_tile_sriov_vf_prepare_ggtt(struct xe_tile *tile);
> -int xe_tile_sriov_vf_balloon_ggtt_locked(struct xe_tile *tile);
> void xe_tile_sriov_vf_deballoon_ggtt_locked(struct xe_tile *tile);
> -void xe_tile_sriov_vf_fixup_ggtt_nodes(struct xe_tile *tile, s64 shift);
> +void xe_tile_sriov_vf_fixup_ggtt_nodes_locked(struct xe_tile *tile, s64 shift);
> +u64 xe_tile_sriov_vf_ggtt(struct xe_tile *tile);
> +void xe_tile_sriov_vf_ggtt_store(struct xe_tile *tile, u64 ggtt_size);
> +u64 xe_tile_sriov_vf_ggtt_base(struct xe_tile *tile);
> +void xe_tile_sriov_vf_ggtt_base_store(struct xe_tile *tile, u64 ggtt_size);
> u64 xe_tile_sriov_vf_lmem(struct xe_tile *tile);
> void xe_tile_sriov_vf_lmem_store(struct xe_tile *tile, u64 lmem_size);
>
> diff --git a/drivers/gpu/drm/xe/xe_tile_sriov_vf_types.h b/drivers/gpu/drm/xe/xe_tile_sriov_vf_types.h
> index f49afa8338f1..140717f81d8f 100644
> --- a/drivers/gpu/drm/xe/xe_tile_sriov_vf_types.h
> +++ b/drivers/gpu/drm/xe/xe_tile_sriov_vf_types.h
> @@ -12,6 +12,10 @@
> * struct xe_tile_sriov_vf_selfconfig - VF configuration data.
> */
> struct xe_tile_sriov_vf_selfconfig {
> + /** @ggtt_base: assigned base offset of the GGTT region. */
> + u64 ggtt_base;
> + /** @ggtt_size: assigned size of the GGTT region. */
> + u64 ggtt_size;
> /** @lmem_size: assigned size of the LMEM. */
> u64 lmem_size;
> };
I still think that pure GGTT selfconfig move from gt to tile could done without remaining changes,
but I will not block it as now it's better than before (only GGTT changes in one patch),
so with most nits hopefully fixed,
Reviewed-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
^ permalink raw reply [flat|nested] 42+ messages in thread
* [PATCH v9 15/34] drm/xe/vf: Teardown VF post migration worker on driver unload
2025-10-08 18:04 [PATCH v9 00/34] VF migration redesign Matthew Brost
` (13 preceding siblings ...)
2025-10-08 18:04 ` [PATCH v9 14/34] drm/xe/vf: Close multi-GT GGTT shift race Matthew Brost
@ 2025-10-08 18:04 ` Matthew Brost
2025-10-08 18:04 ` [PATCH v9 16/34] drm/xe/vf: Don't allow GT reset to be queued during VF post migration recovery Matthew Brost
` (22 subsequent siblings)
37 siblings, 0 replies; 42+ messages in thread
From: Matthew Brost @ 2025-10-08 18:04 UTC (permalink / raw)
To: intel-xe
Be cautious and ensure the VF post-migration worker is not running
during driver unload.
v3:
- More teardown later in driver init, use devm (Tomasz)
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Tomasz Lis <tomasz.lis@intel.com>
---
drivers/gpu/drm/xe/xe_gt.c | 6 ++++
drivers/gpu/drm/xe/xe_gt_sriov_vf.c | 34 ++++++++++++++++++++++-
drivers/gpu/drm/xe/xe_gt_sriov_vf.h | 1 +
drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h | 4 ++-
4 files changed, 43 insertions(+), 2 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
index 2bdc1cdf24a5..d713b649447b 100644
--- a/drivers/gpu/drm/xe/xe_gt.c
+++ b/drivers/gpu/drm/xe/xe_gt.c
@@ -651,6 +651,12 @@ int xe_gt_init(struct xe_gt *gt)
if (err)
return err;
+ if (IS_SRIOV_VF(gt_to_xe(gt))) {
+ err = xe_gt_sriov_vf_init(gt);
+ if (err)
+ return err;
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
index aeadeb29d5ea..d1236599da92 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
@@ -723,7 +723,8 @@ static void vf_start_migration_recovery(struct xe_gt *gt)
spin_lock(>->sriov.vf.migration.lock);
- if (!gt->sriov.vf.migration.recovery_queued) {
+ if (!gt->sriov.vf.migration.recovery_queued ||
+ !gt->sriov.vf.migration.recovery_teardown) {
gt->sriov.vf.migration.recovery_queued = true;
WRITE_ONCE(gt->sriov.vf.migration.recovery_inprogress, true);
@@ -1198,6 +1199,17 @@ static void migration_worker_func(struct work_struct *w)
vf_post_migration_recovery(gt);
}
+static void vf_migration_fini(void *arg)
+{
+ struct xe_gt *gt = arg;
+
+ spin_lock_irq(>->sriov.vf.migration.lock);
+ gt->sriov.vf.migration.recovery_teardown = true;
+ spin_unlock_irq(>->sriov.vf.migration.lock);
+
+ cancel_work_sync(>->sriov.vf.migration.worker);
+}
+
/**
* xe_gt_sriov_vf_init_early() - GT VF init early
* @gt: the &xe_gt
@@ -1224,6 +1236,26 @@ int xe_gt_sriov_vf_init_early(struct xe_gt *gt)
return 0;
}
+/**
+ * xe_gt_sriov_vf_init() - GT VF init
+ * @gt: the &xe_gt
+ *
+ * Return 0 on success, errno on failure
+ */
+int xe_gt_sriov_vf_init(struct xe_gt *gt)
+{
+ if (!xe_sriov_vf_migration_supported(gt_to_xe(gt)))
+ return 0;
+
+ /*
+ * We want to tear down the VF post-migration early during driver
+ * unload; therefore, we add this finalization action later during
+ * driver load.
+ */
+ return devm_add_action_or_reset(gt_to_xe(gt)->drm.dev,
+ vf_migration_fini, gt);
+}
+
/**
* xe_gt_sriov_vf_recovery_pending() - VF post migration recovery pending
* @gt: the &xe_gt
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.h b/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
index 2eb793a2d8ba..1d2eaa52f804 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
@@ -24,6 +24,7 @@ int xe_gt_sriov_vf_query_runtime(struct xe_gt *gt);
void xe_gt_sriov_vf_migrated_event_handler(struct xe_gt *gt);
int xe_gt_sriov_vf_init_early(struct xe_gt *gt);
+int xe_gt_sriov_vf_init(struct xe_gt *gt);
bool xe_gt_sriov_vf_recovery_pending(struct xe_gt *gt);
u32 xe_gt_sriov_vf_gmdid(struct xe_gt *gt);
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h
index 0d9e217989af..bdd9b968f204 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h
@@ -45,10 +45,12 @@ struct xe_gt_sriov_vf_runtime {
struct xe_gt_sriov_vf_migration {
/** @migration: VF migration recovery worker */
struct work_struct worker;
- /** @lock: Protects recovery_queued */
+ /** @lock: Protects recovery_queued, teardown */
spinlock_t lock;
/** @scratch: Scratch memory for VF recovery */
void *scratch;
+ /** @recovery_teardown: VF post migration recovery is being torn down */
+ bool recovery_teardown;
/** @recovery_queued: VF post migration recovery in queued */
bool recovery_queued;
/** @recovery_inprogress: VF post migration recovery in progress */
--
2.34.1
^ permalink raw reply related [flat|nested] 42+ messages in thread* [PATCH v9 16/34] drm/xe/vf: Don't allow GT reset to be queued during VF post migration recovery
2025-10-08 18:04 [PATCH v9 00/34] VF migration redesign Matthew Brost
` (14 preceding siblings ...)
2025-10-08 18:04 ` [PATCH v9 15/34] drm/xe/vf: Teardown VF post migration worker on driver unload Matthew Brost
@ 2025-10-08 18:04 ` Matthew Brost
2025-10-08 18:04 ` [PATCH v9 17/34] drm/xe/vf: Wakeup in GuC backend on " Matthew Brost
` (21 subsequent siblings)
37 siblings, 0 replies; 42+ messages in thread
From: Matthew Brost @ 2025-10-08 18:04 UTC (permalink / raw)
To: intel-xe
With well-behaved software, a GT reset should never occur, nor should it
happen during VF post-migration recovery. If it does, trigger a warning
but suppress the GT reset, as VF post-migration recovery is expected to
bring the VF back to a working state.
v3:
- Better commit message (Tomasz)
v5:
- Use xe_gt_WARN_ON (Michal)
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Tomasz Lis <tomasz.lis@intel.com>
---
drivers/gpu/drm/xe/xe_gt.c | 9 -------
drivers/gpu/drm/xe/xe_gt_sriov_vf.c | 7 -----
drivers/gpu/drm/xe/xe_guc_submit.c | 42 ++++-------------------------
drivers/gpu/drm/xe/xe_guc_submit.h | 3 ---
4 files changed, 5 insertions(+), 56 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
index d713b649447b..6951fedd4350 100644
--- a/drivers/gpu/drm/xe/xe_gt.c
+++ b/drivers/gpu/drm/xe/xe_gt.c
@@ -803,11 +803,6 @@ static int do_gt_restart(struct xe_gt *gt)
return 0;
}
-static int gt_wait_reset_unblock(struct xe_gt *gt)
-{
- return xe_guc_wait_reset_unblock(>->uc.guc);
-}
-
static int gt_reset(struct xe_gt *gt)
{
unsigned int fw_ref;
@@ -822,10 +817,6 @@ static int gt_reset(struct xe_gt *gt)
xe_gt_info(gt, "reset started\n");
- err = gt_wait_reset_unblock(gt);
- if (!err)
- xe_gt_warn(gt, "reset block failed to get lifted");
-
xe_pm_runtime_get(gt_to_xe(gt));
if (xe_fault_inject_gt_reset()) {
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
index d1236599da92..4d2fd81236a8 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
@@ -1090,17 +1090,11 @@ void xe_gt_sriov_vf_print_version(struct xe_gt *gt, struct drm_printer *p)
static void vf_post_migration_shutdown(struct xe_gt *gt)
{
- int ret = 0;
-
spin_lock_irq(>->sriov.vf.migration.lock);
gt->sriov.vf.migration.recovery_queued = false;
spin_unlock_irq(>->sriov.vf.migration.lock);
xe_guc_submit_pause(>->uc.guc);
- ret |= xe_guc_submit_reset_block(>->uc.guc);
-
- if (ret)
- xe_gt_sriov_info(gt, "migration recovery encountered ongoing reset\n");
}
static size_t post_migration_scratch_size(struct xe_device *xe)
@@ -1134,7 +1128,6 @@ static void vf_post_migration_kickstart(struct xe_gt *gt)
*/
xe_irq_resume(gt_to_xe(gt));
- xe_guc_submit_reset_unblock(>->uc.guc);
xe_guc_submit_unpause(>->uc.guc);
}
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index d123bdb63369..59371b7cc8a4 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -27,6 +27,7 @@
#include "xe_gt.h"
#include "xe_gt_clock.h"
#include "xe_gt_printk.h"
+#include "xe_gt_sriov_vf.h"
#include "xe_guc.h"
#include "xe_guc_capture.h"
#include "xe_guc_ct.h"
@@ -1900,47 +1901,14 @@ static void guc_exec_queue_stop(struct xe_guc *guc, struct xe_exec_queue *q)
}
}
-/**
- * xe_guc_submit_reset_block - Disallow reset calls on given GuC.
- * @guc: the &xe_guc struct instance
- */
-int xe_guc_submit_reset_block(struct xe_guc *guc)
-{
- return atomic_fetch_or(1, &guc->submission_state.reset_blocked);
-}
-
-/**
- * xe_guc_submit_reset_unblock - Allow back reset calls on given GuC.
- * @guc: the &xe_guc struct instance
- */
-void xe_guc_submit_reset_unblock(struct xe_guc *guc)
-{
- atomic_set_release(&guc->submission_state.reset_blocked, 0);
- wake_up_all(&guc->ct.wq);
-}
-
-static int guc_submit_reset_is_blocked(struct xe_guc *guc)
-{
- return atomic_read_acquire(&guc->submission_state.reset_blocked);
-}
-
-/* Maximum time of blocking reset */
-#define RESET_BLOCK_PERIOD_MAX (HZ * 5)
-
-/**
- * xe_guc_wait_reset_unblock - Wait until reset blocking flag is lifted, or timeout.
- * @guc: the &xe_guc struct instance
- */
-int xe_guc_wait_reset_unblock(struct xe_guc *guc)
-{
- return wait_event_timeout(guc->ct.wq,
- !guc_submit_reset_is_blocked(guc), RESET_BLOCK_PERIOD_MAX);
-}
-
int xe_guc_submit_reset_prepare(struct xe_guc *guc)
{
int ret;
+ if (xe_gt_WARN_ON(guc_to_gt(guc),
+ xe_gt_sriov_vf_recovery_pending(guc_to_gt(guc))))
+ return 0;
+
if (!guc->submission_state.initialized)
return 0;
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.h b/drivers/gpu/drm/xe/xe_guc_submit.h
index 5b4a0a6fd818..f535fe3895e5 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.h
+++ b/drivers/gpu/drm/xe/xe_guc_submit.h
@@ -22,9 +22,6 @@ void xe_guc_submit_stop(struct xe_guc *guc);
int xe_guc_submit_start(struct xe_guc *guc);
void xe_guc_submit_pause(struct xe_guc *guc);
void xe_guc_submit_unpause(struct xe_guc *guc);
-int xe_guc_submit_reset_block(struct xe_guc *guc);
-void xe_guc_submit_reset_unblock(struct xe_guc *guc);
-int xe_guc_wait_reset_unblock(struct xe_guc *guc);
void xe_guc_submit_wedge(struct xe_guc *guc);
int xe_guc_read_stopped(struct xe_guc *guc);
--
2.34.1
^ permalink raw reply related [flat|nested] 42+ messages in thread* [PATCH v9 17/34] drm/xe/vf: Wakeup in GuC backend on VF post migration recovery
2025-10-08 18:04 [PATCH v9 00/34] VF migration redesign Matthew Brost
` (15 preceding siblings ...)
2025-10-08 18:04 ` [PATCH v9 16/34] drm/xe/vf: Don't allow GT reset to be queued during VF post migration recovery Matthew Brost
@ 2025-10-08 18:04 ` Matthew Brost
2025-10-08 18:04 ` [PATCH v9 18/34] drm/xe/vf: Avoid indefinite blocking in preempt rebind worker for VFs supporting migration Matthew Brost
` (20 subsequent siblings)
37 siblings, 0 replies; 42+ messages in thread
From: Matthew Brost @ 2025-10-08 18:04 UTC (permalink / raw)
To: intel-xe
If VF post-migration recovery is in progress, the recovery flow will
rebuild all GuC submission state. In this case, exit all waiters to
ensure that submission queue scheduling can also be paused. Avoid taking
any adverse actions after aborting the wait.
As part of waking up the GuC backend, suspend_wait can now return
-EAGAIN indicating the waiter should be retried. If the caller is
running on work item, that work item need to be requeued to avoid a
deadlock for the work item blocking the VF migration recovery work item.
v3:
- Don't block in preempt fence work queue as this can interfere with VF
post-migration work queue scheduling leading to deadlock (Testing)
- Use xe_gt_recovery_inprogress (Michal)
v5:
- Use static function for vf_recovery (Michal)
- Add helper to wake CT waiters (Michal)
- Move some code to following patch (Michal)
- Adjust commit message to explain suspend_wait returning -EAGAIN (Michal)
- Add kernel doc to suspend_wait around returning -EAGAIN
v7:
- Add comment on why a shared wait queue is need on VFs (Michal)
- Guard again suspend_wait signaling early on resfix donw (Tomasz)
v8:
- Fix kernel doc (CI)
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
---
drivers/gpu/drm/xe/xe_exec_queue_types.h | 3 +
drivers/gpu/drm/xe/xe_gt_sriov_vf.c | 4 +
drivers/gpu/drm/xe/xe_guc_ct.h | 9 +++
drivers/gpu/drm/xe/xe_guc_submit.c | 93 ++++++++++++++++++------
drivers/gpu/drm/xe/xe_preempt_fence.c | 11 +++
5 files changed, 99 insertions(+), 21 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h
index 27b76cf9da89..282505fa1377 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue_types.h
+++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h
@@ -207,6 +207,9 @@ struct xe_exec_queue_ops {
* call after suspend. In dma-fencing path thus must return within a
* reasonable amount of time. -ETIME return shall indicate an error
* waiting for suspend resulting in associated VM getting killed.
+ * -EAGAIN return indicates the wait should be tried again, if the wait
+ * is within a work item, the work item should be requeued as deadlock
+ * avoidance mechanism.
*/
int (*suspend_wait)(struct xe_exec_queue *q);
/**
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
index 4d2fd81236a8..d86a9406a246 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
@@ -23,6 +23,7 @@
#include "xe_gt_sriov_vf.h"
#include "xe_gt_sriov_vf_types.h"
#include "xe_guc.h"
+#include "xe_guc_ct.h"
#include "xe_guc_hxg_helpers.h"
#include "xe_guc_relay.h"
#include "xe_guc_submit.h"
@@ -727,6 +728,9 @@ static void vf_start_migration_recovery(struct xe_gt *gt)
!gt->sriov.vf.migration.recovery_teardown) {
gt->sriov.vf.migration.recovery_queued = true;
WRITE_ONCE(gt->sriov.vf.migration.recovery_inprogress, true);
+ smp_wmb(); /* Ensure above write visable before wake */
+
+ xe_guc_ct_wake_waiters(>->uc.guc.ct);
started = queue_work(gt->ordered_wq, >->sriov.vf.migration.worker);
xe_gt_sriov_info(gt, "VF migration recovery %s\n", started ?
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.h b/drivers/gpu/drm/xe/xe_guc_ct.h
index d6c81325a76c..ae49364f6f28 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.h
+++ b/drivers/gpu/drm/xe/xe_guc_ct.h
@@ -72,4 +72,13 @@ xe_guc_ct_send_block_no_fail(struct xe_guc_ct *ct, const u32 *action, u32 len)
long xe_guc_ct_queue_proc_time_jiffies(struct xe_guc_ct *ct);
+/**
+ * xe_guc_ct_wake_waiters() - GuC CT wake up waiters
+ * @ct: GuC CT object
+ */
+static inline void xe_guc_ct_wake_waiters(struct xe_guc_ct *ct)
+{
+ wake_up_all(&ct->wq);
+}
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index 59371b7cc8a4..7f0ea35f4f0a 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -27,7 +27,6 @@
#include "xe_gt.h"
#include "xe_gt_clock.h"
#include "xe_gt_printk.h"
-#include "xe_gt_sriov_vf.h"
#include "xe_guc.h"
#include "xe_guc_capture.h"
#include "xe_guc_ct.h"
@@ -702,6 +701,11 @@ static u32 wq_space_until_wrap(struct xe_exec_queue *q)
return (WQ_SIZE - q->guc->wqi_tail);
}
+static bool vf_recovery(struct xe_guc *guc)
+{
+ return xe_gt_recovery_pending(guc_to_gt(guc));
+}
+
static int wq_wait_for_space(struct xe_exec_queue *q, u32 wqi_size)
{
struct xe_guc *guc = exec_queue_to_guc(q);
@@ -711,7 +715,7 @@ static int wq_wait_for_space(struct xe_exec_queue *q, u32 wqi_size)
#define AVAILABLE_SPACE \
CIRC_SPACE(q->guc->wqi_tail, q->guc->wqi_head, WQ_SIZE)
- if (wqi_size > AVAILABLE_SPACE) {
+ if (wqi_size > AVAILABLE_SPACE && !vf_recovery(guc)) {
try_again:
q->guc->wqi_head = parallel_read(xe, map, wq_desc.head);
if (wqi_size > AVAILABLE_SPACE) {
@@ -910,9 +914,10 @@ static void disable_scheduling_deregister(struct xe_guc *guc,
ret = wait_event_timeout(guc->ct.wq,
(!exec_queue_pending_enable(q) &&
!exec_queue_pending_disable(q)) ||
- xe_guc_read_stopped(guc),
+ xe_guc_read_stopped(guc) ||
+ vf_recovery(guc),
HZ * 5);
- if (!ret) {
+ if (!ret && !vf_recovery(guc)) {
struct xe_gpu_scheduler *sched = &q->guc->sched;
xe_gt_warn(q->gt, "Pending enable/disable failed to respond\n");
@@ -1015,6 +1020,10 @@ static void xe_guc_exec_queue_lr_cleanup(struct work_struct *w)
bool wedged = false;
xe_gt_assert(guc_to_gt(guc), xe_exec_queue_is_lr(q));
+
+ if (vf_recovery(guc))
+ return;
+
trace_xe_exec_queue_lr_cleanup(q);
if (!exec_queue_killed(q))
@@ -1047,7 +1056,11 @@ static void xe_guc_exec_queue_lr_cleanup(struct work_struct *w)
*/
ret = wait_event_timeout(guc->ct.wq,
!exec_queue_pending_disable(q) ||
- xe_guc_read_stopped(guc), HZ * 5);
+ xe_guc_read_stopped(guc) ||
+ vf_recovery(guc), HZ * 5);
+ if (vf_recovery(guc))
+ return;
+
if (!ret) {
xe_gt_warn(q->gt, "Schedule disable failed to respond, guc_id=%d\n",
q->guc->id);
@@ -1137,8 +1150,9 @@ static void enable_scheduling(struct xe_exec_queue *q)
ret = wait_event_timeout(guc->ct.wq,
!exec_queue_pending_enable(q) ||
- xe_guc_read_stopped(guc), HZ * 5);
- if (!ret || xe_guc_read_stopped(guc)) {
+ xe_guc_read_stopped(guc) ||
+ vf_recovery(guc), HZ * 5);
+ if ((!ret && !vf_recovery(guc)) || xe_guc_read_stopped(guc)) {
xe_gt_warn(guc_to_gt(guc), "Schedule enable failed to respond");
set_exec_queue_banned(q);
xe_gt_reset_async(q->gt);
@@ -1209,7 +1223,8 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
* list so job can be freed and kick scheduler ensuring free job is not
* lost.
*/
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &job->fence->flags))
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &job->fence->flags) ||
+ vf_recovery(guc))
return DRM_GPU_SCHED_STAT_NO_HANG;
/* Kill the run_job entry point */
@@ -1261,7 +1276,10 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
ret = wait_event_timeout(guc->ct.wq,
(!exec_queue_pending_enable(q) &&
!exec_queue_pending_disable(q)) ||
- xe_guc_read_stopped(guc), HZ * 5);
+ xe_guc_read_stopped(guc) ||
+ vf_recovery(guc), HZ * 5);
+ if (vf_recovery(guc))
+ goto handle_vf_resume;
if (!ret || xe_guc_read_stopped(guc))
goto trigger_reset;
@@ -1286,7 +1304,10 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
smp_rmb();
ret = wait_event_timeout(guc->ct.wq,
!exec_queue_pending_disable(q) ||
- xe_guc_read_stopped(guc), HZ * 5);
+ xe_guc_read_stopped(guc) ||
+ vf_recovery(guc), HZ * 5);
+ if (vf_recovery(guc))
+ goto handle_vf_resume;
if (!ret || xe_guc_read_stopped(guc)) {
trigger_reset:
if (!ret)
@@ -1391,6 +1412,7 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
* some thought, do this in a follow up.
*/
xe_sched_submission_start(sched);
+handle_vf_resume:
return DRM_GPU_SCHED_STAT_NO_HANG;
}
@@ -1487,11 +1509,24 @@ static void __guc_exec_queue_process_msg_set_sched_props(struct xe_sched_msg *ms
static void __suspend_fence_signal(struct xe_exec_queue *q)
{
+ struct xe_guc *guc = exec_queue_to_guc(q);
+ struct xe_device *xe = guc_to_xe(guc);
+
if (!q->guc->suspend_pending)
return;
WRITE_ONCE(q->guc->suspend_pending, false);
- wake_up(&q->guc->suspend_wait);
+
+ /*
+ * We use a GuC shared wait queue for VFs because the VF resfix start
+ * interrupt must be able to wake all instances of suspend_wait. This
+ * prevents the VF migration worker from being starved during
+ * scheduling.
+ */
+ if (IS_SRIOV_VF(xe))
+ wake_up_all(&guc->ct.wq);
+ else
+ wake_up(&q->guc->suspend_wait);
}
static void suspend_fence_signal(struct xe_exec_queue *q)
@@ -1512,8 +1547,9 @@ static void __guc_exec_queue_process_msg_suspend(struct xe_sched_msg *msg)
if (guc_exec_queue_allowed_to_change_state(q) && !exec_queue_suspended(q) &&
exec_queue_enabled(q)) {
- wait_event(guc->ct.wq, (q->guc->resume_time != RESUME_PENDING ||
- xe_guc_read_stopped(guc)) && !exec_queue_pending_disable(q));
+ wait_event(guc->ct.wq, vf_recovery(guc) ||
+ ((q->guc->resume_time != RESUME_PENDING ||
+ xe_guc_read_stopped(guc)) && !exec_queue_pending_disable(q)));
if (!xe_guc_read_stopped(guc)) {
s64 since_resume_ms =
@@ -1640,7 +1676,7 @@ static int guc_exec_queue_init(struct xe_exec_queue *q)
q->entity = &ge->entity;
- if (xe_guc_read_stopped(guc))
+ if (xe_guc_read_stopped(guc) || vf_recovery(guc))
xe_sched_stop(sched);
mutex_unlock(&guc->submission_state.lock);
@@ -1786,6 +1822,7 @@ static int guc_exec_queue_suspend(struct xe_exec_queue *q)
static int guc_exec_queue_suspend_wait(struct xe_exec_queue *q)
{
struct xe_guc *guc = exec_queue_to_guc(q);
+ struct xe_device *xe = guc_to_xe(guc);
int ret;
/*
@@ -1793,11 +1830,21 @@ static int guc_exec_queue_suspend_wait(struct xe_exec_queue *q)
* suspend_pending upon kill but to be paranoid but races in which
* suspend_pending is set after kill also check kill here.
*/
- ret = wait_event_interruptible_timeout(q->guc->suspend_wait,
- !READ_ONCE(q->guc->suspend_pending) ||
- exec_queue_killed(q) ||
- xe_guc_read_stopped(guc),
- HZ * 5);
+#define WAIT_COND \
+ (!READ_ONCE(q->guc->suspend_pending) || exec_queue_killed(q) || \
+ xe_guc_read_stopped(guc))
+
+retry:
+ if (IS_SRIOV_VF(xe))
+ ret = wait_event_interruptible_timeout(guc->ct.wq, WAIT_COND ||
+ vf_recovery(guc),
+ HZ * 5);
+ else
+ ret = wait_event_interruptible_timeout(q->guc->suspend_wait,
+ WAIT_COND, HZ * 5);
+
+ if (vf_recovery(guc) && !xe_device_wedged((guc_to_xe(guc))))
+ return -EAGAIN;
if (!ret) {
xe_gt_warn(guc_to_gt(guc),
@@ -1805,8 +1852,13 @@ static int guc_exec_queue_suspend_wait(struct xe_exec_queue *q)
q->guc->id);
/* XXX: Trigger GT reset? */
return -ETIME;
+ } else if (IS_SRIOV_VF(xe) && !WAIT_COND) {
+ /* Corner case on RESFIX DONE where vf_recovery() changes */
+ goto retry;
}
+#undef WAIT_COND
+
return ret < 0 ? ret : 0;
}
@@ -1905,8 +1957,7 @@ int xe_guc_submit_reset_prepare(struct xe_guc *guc)
{
int ret;
- if (xe_gt_WARN_ON(guc_to_gt(guc),
- xe_gt_sriov_vf_recovery_pending(guc_to_gt(guc))))
+ if (xe_gt_WARN_ON(guc_to_gt(guc), vf_recovery(guc)))
return 0;
if (!guc->submission_state.initialized)
diff --git a/drivers/gpu/drm/xe/xe_preempt_fence.c b/drivers/gpu/drm/xe/xe_preempt_fence.c
index 83fbeea5aa20..7f587ca3947d 100644
--- a/drivers/gpu/drm/xe/xe_preempt_fence.c
+++ b/drivers/gpu/drm/xe/xe_preempt_fence.c
@@ -8,6 +8,8 @@
#include <linux/slab.h>
#include "xe_exec_queue.h"
+#include "xe_gt_printk.h"
+#include "xe_guc_exec_queue_types.h"
#include "xe_vm.h"
static void preempt_fence_work_func(struct work_struct *w)
@@ -22,6 +24,15 @@ static void preempt_fence_work_func(struct work_struct *w)
} else if (!q->ops->reset_status(q)) {
int err = q->ops->suspend_wait(q);
+ if (err == -EAGAIN) {
+ xe_gt_dbg(q->gt, "PREEMPT FENCE RETRY guc_id=%d",
+ q->guc->id);
+ queue_work(q->vm->xe->preempt_fence_wq,
+ &pfence->preempt_work);
+ dma_fence_end_signalling(cookie);
+ return;
+ }
+
if (err)
dma_fence_set_error(&pfence->base, err);
} else {
--
2.34.1
^ permalink raw reply related [flat|nested] 42+ messages in thread* [PATCH v9 18/34] drm/xe/vf: Avoid indefinite blocking in preempt rebind worker for VFs supporting migration
2025-10-08 18:04 [PATCH v9 00/34] VF migration redesign Matthew Brost
` (16 preceding siblings ...)
2025-10-08 18:04 ` [PATCH v9 17/34] drm/xe/vf: Wakeup in GuC backend on " Matthew Brost
@ 2025-10-08 18:04 ` Matthew Brost
2025-10-08 18:04 ` [PATCH v9 19/34] drm/xe/vf: Use GUC_HXG_TYPE_EVENT for GuC context register Matthew Brost
` (19 subsequent siblings)
37 siblings, 0 replies; 42+ messages in thread
From: Matthew Brost @ 2025-10-08 18:04 UTC (permalink / raw)
To: intel-xe
Blocking in work queues on a hardware action that may never occur —
especially when it depends on a software fixup also scheduled on the
a work queue — is a recipe for deadlock. This situation arises with
the preempt rebind worker and VF post-migration recovery. To prevent
potential deadlocks, avoid indefinite blocking in the preempt rebind
worker for VFs that support migration.
v4:
- Use dma_fence_wait_timeout (CI)
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Tomasz Lis <tomasz.lis@intel.com>
---
drivers/gpu/drm/xe/xe_vm.c | 26 +++++++++++++++++++++++++-
1 file changed, 25 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 4e914928e0a9..faca626702b8 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -35,6 +35,7 @@
#include "xe_pt.h"
#include "xe_pxp.h"
#include "xe_res_cursor.h"
+#include "xe_sriov_vf.h"
#include "xe_svm.h"
#include "xe_sync.h"
#include "xe_tile.h"
@@ -111,12 +112,22 @@ static int alloc_preempt_fences(struct xe_vm *vm, struct list_head *list,
static int wait_for_existing_preempt_fences(struct xe_vm *vm)
{
struct xe_exec_queue *q;
+ bool vf_migration = IS_SRIOV_VF(vm->xe) &&
+ xe_sriov_vf_migration_supported(vm->xe);
+ signed long wait_time = vf_migration ? HZ / 5 : MAX_SCHEDULE_TIMEOUT;
xe_vm_assert_held(vm);
list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
if (q->lr.pfence) {
- long timeout = dma_fence_wait(q->lr.pfence, false);
+ long timeout;
+
+ timeout = dma_fence_wait_timeout(q->lr.pfence, false,
+ wait_time);
+ if (!timeout) {
+ xe_assert(vm->xe, vf_migration);
+ return -EAGAIN;
+ }
/* Only -ETIME on fence indicates VM needs to be killed */
if (timeout < 0 || q->lr.pfence->error == -ETIME)
@@ -541,6 +552,19 @@ static void preempt_rebind_work_func(struct work_struct *w)
out_unlock_outer:
if (err == -EAGAIN) {
trace_xe_vm_rebind_worker_retry(vm);
+
+ /*
+ * We can't block in workers on a VF which supports migration
+ * given this can block the VF post-migration workers from
+ * getting scheduled.
+ */
+ if (IS_SRIOV_VF(vm->xe) &&
+ xe_sriov_vf_migration_supported(vm->xe)) {
+ up_write(&vm->lock);
+ xe_vm_queue_rebind_worker(vm);
+ return;
+ }
+
goto retry;
}
--
2.34.1
^ permalink raw reply related [flat|nested] 42+ messages in thread* [PATCH v9 19/34] drm/xe/vf: Use GUC_HXG_TYPE_EVENT for GuC context register
2025-10-08 18:04 [PATCH v9 00/34] VF migration redesign Matthew Brost
` (17 preceding siblings ...)
2025-10-08 18:04 ` [PATCH v9 18/34] drm/xe/vf: Avoid indefinite blocking in preempt rebind worker for VFs supporting migration Matthew Brost
@ 2025-10-08 18:04 ` Matthew Brost
2025-10-08 18:04 ` [PATCH v9 20/34] drm/xe/vf: Flush and stop CTs in VF post migration recovery Matthew Brost
` (18 subsequent siblings)
37 siblings, 0 replies; 42+ messages in thread
From: Matthew Brost @ 2025-10-08 18:04 UTC (permalink / raw)
To: intel-xe
The only case where the GuC submission backend cannot reason 100%
correctly is when a GuC context is registered during VF post-migration
recovery. In this scenario, it's possible that the GuC context register
H2G is processed, but the immediately following schedule-enable H2G gets
lost. The schedule-enable G2H "done" response is how the GuC state machine
determines whether context registration has completed.
A double register is harmless when using `GUC_HXG_TYPE_EVENT`, as GuC
simply drops the duplicate H2G. To keep things simple, use
`GUC_HXG_TYPE_EVENT` for all context registrations on VFs.
v5:
- Check for xe_sriov_vf_migration_supported (Tomasz)
v7:
- Add comment about subsequent protocol failures (Tomasz)
- Modify commit message (Michal)
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Reviewed-by: Tomasz Lis <tomasz.lis@intel.com>
---
drivers/gpu/drm/xe/xe_guc_ct.c | 35 ++++++++++++++++++++++++++--------
1 file changed, 27 insertions(+), 8 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
index 9f0090ae64a6..f63ce0cec357 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.c
+++ b/drivers/gpu/drm/xe/xe_guc_ct.c
@@ -32,6 +32,7 @@
#include "xe_guc_tlb_inval.h"
#include "xe_map.h"
#include "xe_pm.h"
+#include "xe_sriov_vf.h"
#include "xe_trace_guc.h"
static void receive_g2h(struct xe_guc_ct *ct);
@@ -736,6 +737,28 @@ static u16 next_ct_seqno(struct xe_guc_ct *ct, bool is_g2h_fence)
return seqno;
}
+#define MAKE_ACTION(type, __action) \
+({ \
+ FIELD_PREP(GUC_HXG_MSG_0_TYPE, type) | \
+ FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION | \
+ GUC_HXG_EVENT_MSG_0_DATA0, __action); \
+})
+
+static bool vf_action_can_safely_fail(struct xe_device *xe, u32 action)
+{
+ /*
+ * When resuming a VF, we can't reliably track whether context
+ * registration has completed in the GuC state machine. It is harmless
+ * to resend the request, as it will fail silently if GUC_HXG_TYPE_EVENT
+ * is used. Additionally, if there is an H2G protocol issue on a VF,
+ * subsequent H2G messages sent as GUC_HXG_TYPE_FAST_REQUEST will likely
+ * fail.
+ */
+ return IS_SRIOV_VF(xe) && xe_sriov_vf_migration_supported(xe) &&
+ (action == XE_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC ||
+ action == XE_GUC_ACTION_REGISTER_CONTEXT);
+}
+
#define H2G_CT_HEADERS (GUC_CTB_HDR_LEN + 1) /* one DW CTB header and one DW HxG header */
static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len,
@@ -807,18 +830,14 @@ static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len,
FIELD_PREP(GUC_CTB_MSG_0_NUM_DWORDS, len) |
FIELD_PREP(GUC_CTB_MSG_0_FENCE, ct_fence_value);
if (want_response) {
- cmd[1] =
- FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
- FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION |
- GUC_HXG_EVENT_MSG_0_DATA0, action[0]);
+ cmd[1] = MAKE_ACTION(GUC_HXG_TYPE_REQUEST, action[0]);
+ } else if (vf_action_can_safely_fail(xe, action[0])) {
+ cmd[1] = MAKE_ACTION(GUC_HXG_TYPE_EVENT, action[0]);
} else {
fast_req_track(ct, ct_fence_value,
FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, action[0]));
- cmd[1] =
- FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_FAST_REQUEST) |
- FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION |
- GUC_HXG_EVENT_MSG_0_DATA0, action[0]);
+ cmd[1] = MAKE_ACTION(GUC_HXG_TYPE_FAST_REQUEST, action[0]);
}
/* H2G header in cmd[1] replaces action[0] so: */
--
2.34.1
^ permalink raw reply related [flat|nested] 42+ messages in thread* [PATCH v9 20/34] drm/xe/vf: Flush and stop CTs in VF post migration recovery
2025-10-08 18:04 [PATCH v9 00/34] VF migration redesign Matthew Brost
` (18 preceding siblings ...)
2025-10-08 18:04 ` [PATCH v9 19/34] drm/xe/vf: Use GUC_HXG_TYPE_EVENT for GuC context register Matthew Brost
@ 2025-10-08 18:04 ` Matthew Brost
2025-10-08 18:04 ` [PATCH v9 21/34] drm/xe/vf: Reset TLB invalidations during " Matthew Brost
` (17 subsequent siblings)
37 siblings, 0 replies; 42+ messages in thread
From: Matthew Brost @ 2025-10-08 18:04 UTC (permalink / raw)
To: intel-xe
Flushing CTs (i.e., progressing all pending G2H messages) gives VF
post-migration recovery an accurate view of which H2G messages the GuC
has processed, enabling the GuC submission state machine to correctly
rebuild all state.
Also, stop all CT traffic, as the CT is not live during VF
post-migration recovery.
v3:
- xe_guc_ct_flush_and_stop rename (Michal)
- Drop extra GuC CT WQ wake up (Michal)
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
---
drivers/gpu/drm/xe/xe_gt_sriov_vf.c | 1 +
drivers/gpu/drm/xe/xe_guc_ct.c | 10 ++++++++++
drivers/gpu/drm/xe/xe_guc_ct.h | 1 +
3 files changed, 12 insertions(+)
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
index d86a9406a246..3d072cb3c544 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
@@ -1098,6 +1098,7 @@ static void vf_post_migration_shutdown(struct xe_gt *gt)
gt->sriov.vf.migration.recovery_queued = false;
spin_unlock_irq(>->sriov.vf.migration.lock);
+ xe_guc_ct_flush_and_stop(>->uc.guc.ct);
xe_guc_submit_pause(>->uc.guc);
}
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
index f63ce0cec357..fd2385635962 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.c
+++ b/drivers/gpu/drm/xe/xe_guc_ct.c
@@ -574,6 +574,16 @@ void xe_guc_ct_disable(struct xe_guc_ct *ct)
stop_g2h_handler(ct);
}
+/**
+ * xe_guc_ct_flush_and_stop - Flush and stop all processing of G2H / H2G
+ * @ct: the &xe_guc_ct
+ */
+void xe_guc_ct_flush_and_stop(struct xe_guc_ct *ct)
+{
+ receive_g2h(ct);
+ xe_guc_ct_stop(ct);
+}
+
/**
* xe_guc_ct_stop - Set GuC to stopped state
* @ct: the &xe_guc_ct
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.h b/drivers/gpu/drm/xe/xe_guc_ct.h
index ae49364f6f28..f8370fa4727f 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.h
+++ b/drivers/gpu/drm/xe/xe_guc_ct.h
@@ -17,6 +17,7 @@ int xe_guc_ct_init_post_hwconfig(struct xe_guc_ct *ct);
int xe_guc_ct_enable(struct xe_guc_ct *ct);
void xe_guc_ct_disable(struct xe_guc_ct *ct);
void xe_guc_ct_stop(struct xe_guc_ct *ct);
+void xe_guc_ct_flush_and_stop(struct xe_guc_ct *ct);
void xe_guc_ct_fast_path(struct xe_guc_ct *ct);
struct xe_guc_ct_snapshot *xe_guc_ct_snapshot_capture(struct xe_guc_ct *ct);
--
2.34.1
^ permalink raw reply related [flat|nested] 42+ messages in thread* [PATCH v9 21/34] drm/xe/vf: Reset TLB invalidations during VF post migration recovery
2025-10-08 18:04 [PATCH v9 00/34] VF migration redesign Matthew Brost
` (19 preceding siblings ...)
2025-10-08 18:04 ` [PATCH v9 20/34] drm/xe/vf: Flush and stop CTs in VF post migration recovery Matthew Brost
@ 2025-10-08 18:04 ` Matthew Brost
2025-10-08 18:04 ` [PATCH v9 22/34] drm/xe/vf: Kickstart after resfix in " Matthew Brost
` (16 subsequent siblings)
37 siblings, 0 replies; 42+ messages in thread
From: Matthew Brost @ 2025-10-08 18:04 UTC (permalink / raw)
To: intel-xe
TLB invalidations requests can be lost during VF post-migration
recovery. Since the VF has migrated, these invalidations are no longer
needed.
Reset the TLB invalidation frontend, which will signal all pending
fences.
v3:
- Move TLB invalidation reset after pausing submission (Tomasz)
- Adjust commit message (Michal)
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Tomasz Lis <tomasz.lis@intel.com>
---
drivers/gpu/drm/xe/xe_gt_sriov_vf.c | 2 ++
1 file changed, 2 insertions(+)
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
index 3d072cb3c544..5680b1201eb8 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
@@ -35,6 +35,7 @@
#include "xe_sriov.h"
#include "xe_sriov_vf.h"
#include "xe_tile_sriov_vf.h"
+#include "xe_tlb_inval.h"
#include "xe_uc_fw.h"
#include "xe_wopcm.h"
@@ -1100,6 +1101,7 @@ static void vf_post_migration_shutdown(struct xe_gt *gt)
xe_guc_ct_flush_and_stop(>->uc.guc.ct);
xe_guc_submit_pause(>->uc.guc);
+ xe_tlb_inval_reset(>->tlb_inval);
}
static size_t post_migration_scratch_size(struct xe_device *xe)
--
2.34.1
^ permalink raw reply related [flat|nested] 42+ messages in thread* [PATCH v9 22/34] drm/xe/vf: Kickstart after resfix in VF post migration recovery
2025-10-08 18:04 [PATCH v9 00/34] VF migration redesign Matthew Brost
` (20 preceding siblings ...)
2025-10-08 18:04 ` [PATCH v9 21/34] drm/xe/vf: Reset TLB invalidations during " Matthew Brost
@ 2025-10-08 18:04 ` Matthew Brost
2025-10-08 18:04 ` [PATCH v9 23/34] drm/xe: Add CTB_H2G_BUFFER_OFFSET define Matthew Brost
` (15 subsequent siblings)
37 siblings, 0 replies; 42+ messages in thread
From: Matthew Brost @ 2025-10-08 18:04 UTC (permalink / raw)
To: intel-xe
GuC needs to be live for the GuC submission state machine to resubmit
anything lost during VF post-migration recovery. Therefore, move the
kickstart step after `resfix` to ensure proper resubmission.
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Tomasz Lis <tomasz.lis@intel.com>
---
drivers/gpu/drm/xe/xe_gt_sriov_vf.c | 17 +++++++++--------
1 file changed, 9 insertions(+), 8 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
index 5680b1201eb8..438ca899e18a 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
@@ -1128,13 +1128,6 @@ static int vf_post_migration_fixups(struct xe_gt *gt)
static void vf_post_migration_kickstart(struct xe_gt *gt)
{
- /*
- * Make sure interrupts on the new HW are properly set. The GuC IRQ
- * must be working at this point, since the recovery did started,
- * but the rest was not enabled using the procedure from spec.
- */
- xe_irq_resume(gt_to_xe(gt));
-
xe_guc_submit_unpause(>->uc.guc);
}
@@ -1154,6 +1147,13 @@ static int vf_post_migration_notify_resfix_done(struct xe_gt *gt)
if (skip_resfix)
return -EAGAIN;
+ /*
+ * Make sure interrupts on the new HW are properly set. The GuC IRQ
+ * must be working at this point, since the recovery did started,
+ * but the rest was not enabled using the procedure from spec.
+ */
+ xe_irq_resume(gt_to_xe(gt));
+
return vf_notify_resfix_done(gt);
}
@@ -1177,11 +1177,12 @@ static void vf_post_migration_recovery(struct xe_gt *gt)
if (err)
goto fail;
- vf_post_migration_kickstart(gt);
err = vf_post_migration_notify_resfix_done(gt);
if (err && err != -EAGAIN)
goto fail;
+ vf_post_migration_kickstart(gt);
+
xe_pm_runtime_put(xe);
xe_gt_sriov_notice(gt, "migration recovery ended\n");
return;
--
2.34.1
^ permalink raw reply related [flat|nested] 42+ messages in thread* [PATCH v9 23/34] drm/xe: Add CTB_H2G_BUFFER_OFFSET define
2025-10-08 18:04 [PATCH v9 00/34] VF migration redesign Matthew Brost
` (21 preceding siblings ...)
2025-10-08 18:04 ` [PATCH v9 22/34] drm/xe/vf: Kickstart after resfix in " Matthew Brost
@ 2025-10-08 18:04 ` Matthew Brost
2025-10-08 18:04 ` [PATCH v9 24/34] drm/xe/vf: Start CTs before resfix VF post migration recovery Matthew Brost
` (14 subsequent siblings)
37 siblings, 0 replies; 42+ messages in thread
From: Matthew Brost @ 2025-10-08 18:04 UTC (permalink / raw)
To: intel-xe
Rather than open coding the H2G buffer offset as 'CTB_DESC_SIZE * 2' add
CTB_H2G_BUFFER_OFFSET define.
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
---
drivers/gpu/drm/xe/xe_guc_ct.c | 11 ++++++-----
1 file changed, 6 insertions(+), 5 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
index fd2385635962..503cf5cb5d33 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.c
+++ b/drivers/gpu/drm/xe/xe_guc_ct.c
@@ -167,6 +167,7 @@ ct_to_xe(struct xe_guc_ct *ct)
*/
#define CTB_DESC_SIZE ALIGN(sizeof(struct guc_ct_buffer_desc), SZ_2K)
+#define CTB_H2G_BUFFER_OFFSET (CTB_DESC_SIZE * 2)
#define CTB_H2G_BUFFER_SIZE (SZ_4K)
#define CTB_G2H_BUFFER_SIZE (SZ_128K)
#define G2H_ROOM_BUFFER_SIZE (CTB_G2H_BUFFER_SIZE / 2)
@@ -190,7 +191,7 @@ long xe_guc_ct_queue_proc_time_jiffies(struct xe_guc_ct *ct)
static size_t guc_ct_size(void)
{
- return 2 * CTB_DESC_SIZE + CTB_H2G_BUFFER_SIZE +
+ return CTB_H2G_BUFFER_OFFSET + CTB_H2G_BUFFER_SIZE +
CTB_G2H_BUFFER_SIZE;
}
@@ -331,7 +332,7 @@ static void guc_ct_ctb_h2g_init(struct xe_device *xe, struct guc_ctb *h2g,
h2g->desc = *map;
xe_map_memset(xe, &h2g->desc, 0, 0, sizeof(struct guc_ct_buffer_desc));
- h2g->cmds = IOSYS_MAP_INIT_OFFSET(map, CTB_DESC_SIZE * 2);
+ h2g->cmds = IOSYS_MAP_INIT_OFFSET(map, CTB_H2G_BUFFER_OFFSET);
}
static void guc_ct_ctb_g2h_init(struct xe_device *xe, struct guc_ctb *g2h,
@@ -349,7 +350,7 @@ static void guc_ct_ctb_g2h_init(struct xe_device *xe, struct guc_ctb *g2h,
g2h->desc = IOSYS_MAP_INIT_OFFSET(map, CTB_DESC_SIZE);
xe_map_memset(xe, &g2h->desc, 0, 0, sizeof(struct guc_ct_buffer_desc));
- g2h->cmds = IOSYS_MAP_INIT_OFFSET(map, CTB_DESC_SIZE * 2 +
+ g2h->cmds = IOSYS_MAP_INIT_OFFSET(map, CTB_H2G_BUFFER_OFFSET +
CTB_H2G_BUFFER_SIZE);
}
@@ -360,7 +361,7 @@ static int guc_ct_ctb_h2g_register(struct xe_guc_ct *ct)
int err;
desc_addr = xe_bo_ggtt_addr(ct->bo);
- ctb_addr = xe_bo_ggtt_addr(ct->bo) + CTB_DESC_SIZE * 2;
+ ctb_addr = xe_bo_ggtt_addr(ct->bo) + CTB_H2G_BUFFER_OFFSET;
size = ct->ctbs.h2g.info.size * sizeof(u32);
err = xe_guc_self_cfg64(guc,
@@ -387,7 +388,7 @@ static int guc_ct_ctb_g2h_register(struct xe_guc_ct *ct)
int err;
desc_addr = xe_bo_ggtt_addr(ct->bo) + CTB_DESC_SIZE;
- ctb_addr = xe_bo_ggtt_addr(ct->bo) + CTB_DESC_SIZE * 2 +
+ ctb_addr = xe_bo_ggtt_addr(ct->bo) + CTB_H2G_BUFFER_OFFSET +
CTB_H2G_BUFFER_SIZE;
size = ct->ctbs.g2h.info.size * sizeof(u32);
--
2.34.1
^ permalink raw reply related [flat|nested] 42+ messages in thread* [PATCH v9 24/34] drm/xe/vf: Start CTs before resfix VF post migration recovery
2025-10-08 18:04 [PATCH v9 00/34] VF migration redesign Matthew Brost
` (22 preceding siblings ...)
2025-10-08 18:04 ` [PATCH v9 23/34] drm/xe: Add CTB_H2G_BUFFER_OFFSET define Matthew Brost
@ 2025-10-08 18:04 ` Matthew Brost
2025-10-08 18:04 ` [PATCH v9 25/34] drm/xe/vf: Abort VF post migration recovery on failure Matthew Brost
` (13 subsequent siblings)
37 siblings, 0 replies; 42+ messages in thread
From: Matthew Brost @ 2025-10-08 18:04 UTC (permalink / raw)
To: intel-xe
Before RESFIX_DONE, all CTs stuck in the H2G queue need to be squashed,
as they may contain actions which contain invalid GGTT references or are
unnecessary after HW change.
Starting the CTs clears all H2Gs in the queue. Any lost H2Gs are
resubmitted by the GuC submission state machine.
v3:
- Don't mess with head / tail values (Michal)
v4:
- Don't mess with broke (Michal)
- Add CTB_H2G_BUFFER_OFFSET (Michal)
v5:
- Adjust commit message (Tomasz)
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
---
drivers/gpu/drm/xe/xe_gt_sriov_vf.c | 7 ++++
drivers/gpu/drm/xe/xe_guc_ct.c | 60 ++++++++++++++++++++++-------
drivers/gpu/drm/xe/xe_guc_ct.h | 1 +
3 files changed, 55 insertions(+), 13 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
index 438ca899e18a..68906ffe931f 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
@@ -1126,6 +1126,11 @@ static int vf_post_migration_fixups(struct xe_gt *gt)
return 0;
}
+static void vf_post_migration_rearm(struct xe_gt *gt)
+{
+ xe_guc_ct_restart(>->uc.guc.ct);
+}
+
static void vf_post_migration_kickstart(struct xe_gt *gt)
{
xe_guc_submit_unpause(>->uc.guc);
@@ -1177,6 +1182,8 @@ static void vf_post_migration_recovery(struct xe_gt *gt)
if (err)
goto fail;
+ vf_post_migration_rearm(gt);
+
err = vf_post_migration_notify_resfix_done(gt);
if (err && err != -EAGAIN)
goto fail;
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
index 503cf5cb5d33..3472e4ea2609 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.c
+++ b/drivers/gpu/drm/xe/xe_guc_ct.c
@@ -502,7 +502,7 @@ static void ct_exit_safe_mode(struct xe_guc_ct *ct)
xe_gt_dbg(ct_to_gt(ct), "GuC CT safe-mode disabled\n");
}
-int xe_guc_ct_enable(struct xe_guc_ct *ct)
+static int __xe_guc_ct_start(struct xe_guc_ct *ct, bool needs_register)
{
struct xe_device *xe = ct_to_xe(ct);
struct xe_gt *gt = ct_to_gt(ct);
@@ -510,21 +510,29 @@ int xe_guc_ct_enable(struct xe_guc_ct *ct)
xe_gt_assert(gt, !xe_guc_ct_enabled(ct));
- xe_map_memset(xe, &ct->bo->vmap, 0, 0, xe_bo_size(ct->bo));
- guc_ct_ctb_h2g_init(xe, &ct->ctbs.h2g, &ct->bo->vmap);
- guc_ct_ctb_g2h_init(xe, &ct->ctbs.g2h, &ct->bo->vmap);
+ if (needs_register) {
+ xe_map_memset(xe, &ct->bo->vmap, 0, 0, xe_bo_size(ct->bo));
+ guc_ct_ctb_h2g_init(xe, &ct->ctbs.h2g, &ct->bo->vmap);
+ guc_ct_ctb_g2h_init(xe, &ct->ctbs.g2h, &ct->bo->vmap);
- err = guc_ct_ctb_h2g_register(ct);
- if (err)
- goto err_out;
+ err = guc_ct_ctb_h2g_register(ct);
+ if (err)
+ goto err_out;
- err = guc_ct_ctb_g2h_register(ct);
- if (err)
- goto err_out;
+ err = guc_ct_ctb_g2h_register(ct);
+ if (err)
+ goto err_out;
- err = guc_ct_control_toggle(ct, true);
- if (err)
- goto err_out;
+ err = guc_ct_control_toggle(ct, true);
+ if (err)
+ goto err_out;
+ } else {
+ ct->ctbs.h2g.info.broken = false;
+ ct->ctbs.g2h.info.broken = false;
+ /* Skip everything in H2G buffer */
+ xe_map_memset(xe, &ct->bo->vmap, CTB_H2G_BUFFER_OFFSET, 0,
+ CTB_H2G_BUFFER_SIZE);
+ }
guc_ct_change_state(ct, XE_GUC_CT_STATE_ENABLED);
@@ -556,6 +564,32 @@ int xe_guc_ct_enable(struct xe_guc_ct *ct)
return err;
}
+/**
+ * xe_guc_ct_restart() - Restart GuC CT
+ * @ct: the &xe_guc_ct
+ *
+ * Restart GuC CT to an empty state without issuing a CT register MMIO command.
+ *
+ * Return: 0 on success, or a negative errno on failure.
+ */
+int xe_guc_ct_restart(struct xe_guc_ct *ct)
+{
+ return __xe_guc_ct_start(ct, false);
+}
+
+/**
+ * xe_guc_ct_enable() - Enable GuC CT
+ * @ct: the &xe_guc_ct
+ *
+ * Enable GuC CT to an empty state and issue a CT register MMIO command.
+ *
+ * Return: 0 on success, or a negative errno on failure.
+ */
+int xe_guc_ct_enable(struct xe_guc_ct *ct)
+{
+ return __xe_guc_ct_start(ct, true);
+}
+
static void stop_g2h_handler(struct xe_guc_ct *ct)
{
cancel_work_sync(&ct->g2h_worker);
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.h b/drivers/gpu/drm/xe/xe_guc_ct.h
index f8370fa4727f..ca1ce2b3c354 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.h
+++ b/drivers/gpu/drm/xe/xe_guc_ct.h
@@ -15,6 +15,7 @@ int xe_guc_ct_init_noalloc(struct xe_guc_ct *ct);
int xe_guc_ct_init(struct xe_guc_ct *ct);
int xe_guc_ct_init_post_hwconfig(struct xe_guc_ct *ct);
int xe_guc_ct_enable(struct xe_guc_ct *ct);
+int xe_guc_ct_restart(struct xe_guc_ct *ct);
void xe_guc_ct_disable(struct xe_guc_ct *ct);
void xe_guc_ct_stop(struct xe_guc_ct *ct);
void xe_guc_ct_flush_and_stop(struct xe_guc_ct *ct);
--
2.34.1
^ permalink raw reply related [flat|nested] 42+ messages in thread* [PATCH v9 25/34] drm/xe/vf: Abort VF post migration recovery on failure
2025-10-08 18:04 [PATCH v9 00/34] VF migration redesign Matthew Brost
` (23 preceding siblings ...)
2025-10-08 18:04 ` [PATCH v9 24/34] drm/xe/vf: Start CTs before resfix VF post migration recovery Matthew Brost
@ 2025-10-08 18:04 ` Matthew Brost
2025-10-08 19:49 ` Niranjana Vishwanathapura
2025-10-08 18:04 ` [PATCH v9 26/34] drm/xe/vf: Replay GuC submission state on pause / unpause Matthew Brost
` (12 subsequent siblings)
37 siblings, 1 reply; 42+ messages in thread
From: Matthew Brost @ 2025-10-08 18:04 UTC (permalink / raw)
To: intel-xe
If VF post-migration recovery fails, the device is wedged. However,
submission queues still need to be enabled for proper cleanup. In such
cases, call into the GuC submission backend to restart all queues that
were previously paused.
v3:
- s/Avort/Abort (Tomasz)
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Tomasz Lis <tomasz.lis@intel.com>
---
drivers/gpu/drm/xe/xe_gt_sriov_vf.c | 10 ++++++++++
drivers/gpu/drm/xe/xe_guc_submit.c | 20 ++++++++++++++++++++
drivers/gpu/drm/xe/xe_guc_submit.h | 1 +
3 files changed, 31 insertions(+)
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
index 68906ffe931f..fe66c2236ef4 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
@@ -1136,6 +1136,15 @@ static void vf_post_migration_kickstart(struct xe_gt *gt)
xe_guc_submit_unpause(>->uc.guc);
}
+static void vf_post_migration_abort(struct xe_gt *gt)
+{
+ spin_lock_irq(>->sriov.vf.migration.lock);
+ WRITE_ONCE(gt->sriov.vf.migration.recovery_inprogress, false);
+ spin_unlock_irq(>->sriov.vf.migration.lock);
+
+ xe_guc_submit_pause_abort(>->uc.guc);
+}
+
static int vf_post_migration_notify_resfix_done(struct xe_gt *gt)
{
bool skip_resfix = false;
@@ -1194,6 +1203,7 @@ static void vf_post_migration_recovery(struct xe_gt *gt)
xe_gt_sriov_notice(gt, "migration recovery ended\n");
return;
fail:
+ vf_post_migration_abort(gt);
xe_pm_runtime_put(xe);
xe_gt_sriov_err(gt, "migration recovery failed (%pe)\n", ERR_PTR(err));
xe_device_declare_wedged(xe);
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index 7f0ea35f4f0a..4a45c4934dce 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -2098,6 +2098,26 @@ void xe_guc_submit_unpause(struct xe_guc *guc)
wake_up_all(&guc->ct.wq);
}
+/**
+ * xe_guc_submit_abort - Abort all paused submission task on given GuC.
+ * @guc: the &xe_guc struct instance whose scheduler is to be aborted
+ */
+void xe_guc_submit_pause_abort(struct xe_guc *guc)
+{
+ struct xe_exec_queue *q;
+ unsigned long index;
+
+ mutex_lock(&guc->submission_state.lock);
+ xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) {
+ struct xe_gpu_scheduler *sched = &q->guc->sched;
+
+ xe_sched_submission_start(sched);
+ if (exec_queue_killed_or_banned_or_wedged(q))
+ xe_guc_exec_queue_trigger_cleanup(q);
+ }
+ mutex_unlock(&guc->submission_state.lock);
+}
+
static struct xe_exec_queue *
g2h_exec_queue_lookup(struct xe_guc *guc, u32 guc_id)
{
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.h b/drivers/gpu/drm/xe/xe_guc_submit.h
index f535fe3895e5..fe82c317048e 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.h
+++ b/drivers/gpu/drm/xe/xe_guc_submit.h
@@ -22,6 +22,7 @@ void xe_guc_submit_stop(struct xe_guc *guc);
int xe_guc_submit_start(struct xe_guc *guc);
void xe_guc_submit_pause(struct xe_guc *guc);
void xe_guc_submit_unpause(struct xe_guc *guc);
+void xe_guc_submit_pause_abort(struct xe_guc *guc);
void xe_guc_submit_wedge(struct xe_guc *guc);
int xe_guc_read_stopped(struct xe_guc *guc);
--
2.34.1
^ permalink raw reply related [flat|nested] 42+ messages in thread* Re: [PATCH v9 25/34] drm/xe/vf: Abort VF post migration recovery on failure
2025-10-08 18:04 ` [PATCH v9 25/34] drm/xe/vf: Abort VF post migration recovery on failure Matthew Brost
@ 2025-10-08 19:49 ` Niranjana Vishwanathapura
0 siblings, 0 replies; 42+ messages in thread
From: Niranjana Vishwanathapura @ 2025-10-08 19:49 UTC (permalink / raw)
To: Matthew Brost; +Cc: intel-xe
On Wed, Oct 08, 2025 at 11:04:51AM -0700, Matthew Brost wrote:
>If VF post-migration recovery fails, the device is wedged. However,
>submission queues still need to be enabled for proper cleanup. In such
>cases, call into the GuC submission backend to restart all queues that
>were previously paused.
>
>v3:
> - s/Avort/Abort (Tomasz)
>
>Signed-off-by: Matthew Brost <matthew.brost@intel.com>
>Reviewed-by: Tomasz Lis <tomasz.lis@intel.com>
>---
> drivers/gpu/drm/xe/xe_gt_sriov_vf.c | 10 ++++++++++
> drivers/gpu/drm/xe/xe_guc_submit.c | 20 ++++++++++++++++++++
> drivers/gpu/drm/xe/xe_guc_submit.h | 1 +
> 3 files changed, 31 insertions(+)
>
>diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
>index 68906ffe931f..fe66c2236ef4 100644
>--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
>+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
>@@ -1136,6 +1136,15 @@ static void vf_post_migration_kickstart(struct xe_gt *gt)
> xe_guc_submit_unpause(>->uc.guc);
> }
>
>+static void vf_post_migration_abort(struct xe_gt *gt)
>+{
>+ spin_lock_irq(>->sriov.vf.migration.lock);
>+ WRITE_ONCE(gt->sriov.vf.migration.recovery_inprogress, false);
>+ spin_unlock_irq(>->sriov.vf.migration.lock);
>+
>+ xe_guc_submit_pause_abort(>->uc.guc);
>+}
>+
> static int vf_post_migration_notify_resfix_done(struct xe_gt *gt)
> {
> bool skip_resfix = false;
>@@ -1194,6 +1203,7 @@ static void vf_post_migration_recovery(struct xe_gt *gt)
> xe_gt_sriov_notice(gt, "migration recovery ended\n");
> return;
> fail:
>+ vf_post_migration_abort(gt);
> xe_pm_runtime_put(xe);
> xe_gt_sriov_err(gt, "migration recovery failed (%pe)\n", ERR_PTR(err));
> xe_device_declare_wedged(xe);
>diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
>index 7f0ea35f4f0a..4a45c4934dce 100644
>--- a/drivers/gpu/drm/xe/xe_guc_submit.c
>+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
>@@ -2098,6 +2098,26 @@ void xe_guc_submit_unpause(struct xe_guc *guc)
> wake_up_all(&guc->ct.wq);
> }
>
>+/**
>+ * xe_guc_submit_abort - Abort all paused submission task on given GuC.
should be xe_guc_submit_pause_abort.
Niranjana
>+ * @guc: the &xe_guc struct instance whose scheduler is to be aborted
>+ */
>+void xe_guc_submit_pause_abort(struct xe_guc *guc)
>+{
>+ struct xe_exec_queue *q;
>+ unsigned long index;
>+
>+ mutex_lock(&guc->submission_state.lock);
>+ xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) {
>+ struct xe_gpu_scheduler *sched = &q->guc->sched;
>+
>+ xe_sched_submission_start(sched);
>+ if (exec_queue_killed_or_banned_or_wedged(q))
>+ xe_guc_exec_queue_trigger_cleanup(q);
>+ }
>+ mutex_unlock(&guc->submission_state.lock);
>+}
>+
> static struct xe_exec_queue *
> g2h_exec_queue_lookup(struct xe_guc *guc, u32 guc_id)
> {
>diff --git a/drivers/gpu/drm/xe/xe_guc_submit.h b/drivers/gpu/drm/xe/xe_guc_submit.h
>index f535fe3895e5..fe82c317048e 100644
>--- a/drivers/gpu/drm/xe/xe_guc_submit.h
>+++ b/drivers/gpu/drm/xe/xe_guc_submit.h
>@@ -22,6 +22,7 @@ void xe_guc_submit_stop(struct xe_guc *guc);
> int xe_guc_submit_start(struct xe_guc *guc);
> void xe_guc_submit_pause(struct xe_guc *guc);
> void xe_guc_submit_unpause(struct xe_guc *guc);
>+void xe_guc_submit_pause_abort(struct xe_guc *guc);
> void xe_guc_submit_wedge(struct xe_guc *guc);
>
> int xe_guc_read_stopped(struct xe_guc *guc);
>--
>2.34.1
>
^ permalink raw reply [flat|nested] 42+ messages in thread
* [PATCH v9 26/34] drm/xe/vf: Replay GuC submission state on pause / unpause
2025-10-08 18:04 [PATCH v9 00/34] VF migration redesign Matthew Brost
` (24 preceding siblings ...)
2025-10-08 18:04 ` [PATCH v9 25/34] drm/xe/vf: Abort VF post migration recovery on failure Matthew Brost
@ 2025-10-08 18:04 ` Matthew Brost
2025-10-08 18:04 ` [PATCH v9 27/34] drm/xe: Move queue init before LRC creation Matthew Brost
` (11 subsequent siblings)
37 siblings, 0 replies; 42+ messages in thread
From: Matthew Brost @ 2025-10-08 18:04 UTC (permalink / raw)
To: intel-xe
Fixup GuC submission pause / unpause functions to properly replay any
possible state lost during VF post migration recovery.
v3:
- Add helpers for revert / replay (Tomasz)
- Add comment around WQ NOPs (Tomasz)
v7:
- Only fixup / replay parallel queues once (Testing)
- Skip unpause step on queues created after resfix done (Testing)
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Tomasz Lis <tomasz.lis@intel.com>
---
drivers/gpu/drm/xe/xe_gpu_scheduler.c | 27 +-
drivers/gpu/drm/xe/xe_gpu_scheduler.h | 8 +-
drivers/gpu/drm/xe/xe_gt_sriov_vf.c | 1 +
drivers/gpu/drm/xe/xe_guc_exec_queue_types.h | 15 +
drivers/gpu/drm/xe/xe_guc_submit.c | 271 +++++++++++++++++--
drivers/gpu/drm/xe/xe_guc_submit.h | 1 +
drivers/gpu/drm/xe/xe_sched_job_types.h | 4 +
7 files changed, 295 insertions(+), 32 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_gpu_scheduler.c b/drivers/gpu/drm/xe/xe_gpu_scheduler.c
index 455ccaf17314..f91e06d03511 100644
--- a/drivers/gpu/drm/xe/xe_gpu_scheduler.c
+++ b/drivers/gpu/drm/xe/xe_gpu_scheduler.c
@@ -101,19 +101,6 @@ void xe_sched_submission_stop(struct xe_gpu_scheduler *sched)
cancel_work_sync(&sched->work_process_msg);
}
-/**
- * xe_sched_submission_stop_async - Stop further runs of submission tasks on a scheduler.
- * @sched: the &xe_gpu_scheduler struct instance
- *
- * This call disables further runs of scheduling work queue. It does not wait
- * for any in-progress runs to finish, only makes sure no further runs happen
- * afterwards.
- */
-void xe_sched_submission_stop_async(struct xe_gpu_scheduler *sched)
-{
- drm_sched_wqueue_stop(&sched->base);
-}
-
void xe_sched_submission_resume_tdr(struct xe_gpu_scheduler *sched)
{
drm_sched_resume_timeout(&sched->base, sched->base.timeout);
@@ -135,3 +122,17 @@ void xe_sched_add_msg_locked(struct xe_gpu_scheduler *sched,
list_add_tail(&msg->link, &sched->msgs);
xe_sched_process_msg_queue(sched);
}
+
+/**
+ * xe_sched_add_msg_head() - Xe GPU scheduler add message to head of list
+ * @sched: Xe GPU scheduler
+ * @msg: Message to add
+ */
+void xe_sched_add_msg_head(struct xe_gpu_scheduler *sched,
+ struct xe_sched_msg *msg)
+{
+ lockdep_assert_held(&sched->base.job_list_lock);
+
+ list_add(&msg->link, &sched->msgs);
+ xe_sched_process_msg_queue(sched);
+}
diff --git a/drivers/gpu/drm/xe/xe_gpu_scheduler.h b/drivers/gpu/drm/xe/xe_gpu_scheduler.h
index 3a9ff78d9346..9955397aaaa9 100644
--- a/drivers/gpu/drm/xe/xe_gpu_scheduler.h
+++ b/drivers/gpu/drm/xe/xe_gpu_scheduler.h
@@ -7,7 +7,7 @@
#define _XE_GPU_SCHEDULER_H_
#include "xe_gpu_scheduler_types.h"
-#include "xe_sched_job_types.h"
+#include "xe_sched_job.h"
int xe_sched_init(struct xe_gpu_scheduler *sched,
const struct drm_sched_backend_ops *ops,
@@ -21,7 +21,6 @@ void xe_sched_fini(struct xe_gpu_scheduler *sched);
void xe_sched_submission_start(struct xe_gpu_scheduler *sched);
void xe_sched_submission_stop(struct xe_gpu_scheduler *sched);
-void xe_sched_submission_stop_async(struct xe_gpu_scheduler *sched);
void xe_sched_submission_resume_tdr(struct xe_gpu_scheduler *sched);
@@ -29,6 +28,8 @@ void xe_sched_add_msg(struct xe_gpu_scheduler *sched,
struct xe_sched_msg *msg);
void xe_sched_add_msg_locked(struct xe_gpu_scheduler *sched,
struct xe_sched_msg *msg);
+void xe_sched_add_msg_head(struct xe_gpu_scheduler *sched,
+ struct xe_sched_msg *msg);
static inline void xe_sched_msg_lock(struct xe_gpu_scheduler *sched)
{
@@ -58,7 +59,8 @@ static inline void xe_sched_resubmit_jobs(struct xe_gpu_scheduler *sched)
struct drm_sched_fence *s_fence = s_job->s_fence;
struct dma_fence *hw_fence = s_fence->parent;
- if (hw_fence && !dma_fence_is_signaled(hw_fence))
+ if (to_xe_sched_job(s_job)->skip_emit ||
+ (hw_fence && !dma_fence_is_signaled(hw_fence)))
sched->base.ops->run_job(s_job);
}
}
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
index fe66c2236ef4..44bda1d4cb38 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
@@ -1129,6 +1129,7 @@ static int vf_post_migration_fixups(struct xe_gt *gt)
static void vf_post_migration_rearm(struct xe_gt *gt)
{
xe_guc_ct_restart(>->uc.guc.ct);
+ xe_guc_submit_unpause_prepare(>->uc.guc);
}
static void vf_post_migration_kickstart(struct xe_gt *gt)
diff --git a/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h b/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h
index c30c0e3ccbbb..a3b034e4b205 100644
--- a/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h
@@ -51,6 +51,21 @@ struct xe_guc_exec_queue {
wait_queue_head_t suspend_wait;
/** @suspend_pending: a suspend of the exec_queue is pending */
bool suspend_pending;
+ /**
+ * @needs_cleanup: Needs a cleanup message during VF post migration
+ * recovery.
+ */
+ bool needs_cleanup;
+ /**
+ * @needs_suspend: Needs a suspend message during VF post migration
+ * recovery.
+ */
+ bool needs_suspend;
+ /**
+ * @needs_resume: Needs a resume message during VF post migration
+ * recovery.
+ */
+ bool needs_resume;
};
#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index 4a45c4934dce..d07ff014492e 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -142,6 +142,11 @@ static void set_exec_queue_destroyed(struct xe_exec_queue *q)
atomic_or(EXEC_QUEUE_STATE_DESTROYED, &q->guc->state);
}
+static void clear_exec_queue_destroyed(struct xe_exec_queue *q)
+{
+ atomic_and(~EXEC_QUEUE_STATE_DESTROYED, &q->guc->state);
+}
+
static bool exec_queue_banned(struct xe_exec_queue *q)
{
return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_BANNED;
@@ -222,7 +227,12 @@ static void set_exec_queue_extra_ref(struct xe_exec_queue *q)
atomic_or(EXEC_QUEUE_STATE_EXTRA_REF, &q->guc->state);
}
-static bool __maybe_unused exec_queue_pending_resume(struct xe_exec_queue *q)
+static void clear_exec_queue_extra_ref(struct xe_exec_queue *q)
+{
+ atomic_and(~EXEC_QUEUE_STATE_EXTRA_REF, &q->guc->state);
+}
+
+static bool exec_queue_pending_resume(struct xe_exec_queue *q)
{
return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_PENDING_RESUME;
}
@@ -237,7 +247,7 @@ static void clear_exec_queue_pending_resume(struct xe_exec_queue *q)
atomic_and(~EXEC_QUEUE_STATE_PENDING_RESUME, &q->guc->state);
}
-static bool __maybe_unused exec_queue_pending_tdr_exit(struct xe_exec_queue *q)
+static bool exec_queue_pending_tdr_exit(struct xe_exec_queue *q)
{
return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_PENDING_TDR_EXIT;
}
@@ -799,7 +809,7 @@ static void wq_item_append(struct xe_exec_queue *q)
}
#define RESUME_PENDING ~0x0ull
-static void submit_exec_queue(struct xe_exec_queue *q)
+static void submit_exec_queue(struct xe_exec_queue *q, struct xe_sched_job *job)
{
struct xe_guc *guc = exec_queue_to_guc(q);
struct xe_lrc *lrc = q->lrc[0];
@@ -811,10 +821,13 @@ static void submit_exec_queue(struct xe_exec_queue *q)
xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q));
- if (xe_exec_queue_is_parallel(q))
- wq_item_append(q);
- else
- xe_lrc_set_ring_tail(lrc, lrc->ring.tail);
+ if (!job->skip_emit || job->last_replay) {
+ if (xe_exec_queue_is_parallel(q))
+ wq_item_append(q);
+ else
+ xe_lrc_set_ring_tail(lrc, lrc->ring.tail);
+ job->last_replay = false;
+ }
if (exec_queue_suspended(q) && !xe_exec_queue_is_parallel(q))
return;
@@ -867,8 +880,10 @@ guc_exec_queue_run_job(struct drm_sched_job *drm_job)
if (!killed_or_banned_or_wedged && !xe_sched_job_is_error(job)) {
if (!exec_queue_registered(q))
register_exec_queue(q, GUC_CONTEXT_NORMAL);
- q->ring_ops->emit_job(job);
- submit_exec_queue(q);
+ if (!job->skip_emit)
+ q->ring_ops->emit_job(job);
+ submit_exec_queue(q, job);
+ job->skip_emit = false;
}
/*
@@ -1592,6 +1607,7 @@ static void __guc_exec_queue_process_msg_resume(struct xe_sched_msg *msg)
#define RESUME 4
#define OPCODE_MASK 0xf
#define MSG_LOCKED BIT(8)
+#define MSG_HEAD BIT(9)
static void guc_exec_queue_process_msg(struct xe_sched_msg *msg)
{
@@ -1716,12 +1732,24 @@ static void guc_exec_queue_add_msg(struct xe_exec_queue *q, struct xe_sched_msg
msg->private_data = q;
trace_xe_sched_msg_add(msg);
- if (opcode & MSG_LOCKED)
+ if (opcode & MSG_HEAD)
+ xe_sched_add_msg_head(&q->guc->sched, msg);
+ else if (opcode & MSG_LOCKED)
xe_sched_add_msg_locked(&q->guc->sched, msg);
else
xe_sched_add_msg(&q->guc->sched, msg);
}
+static void guc_exec_queue_try_add_msg_head(struct xe_exec_queue *q,
+ struct xe_sched_msg *msg,
+ u32 opcode)
+{
+ if (!list_empty(&msg->link))
+ return;
+
+ guc_exec_queue_add_msg(q, msg, opcode | MSG_LOCKED | MSG_HEAD);
+}
+
static bool guc_exec_queue_try_add_msg(struct xe_exec_queue *q,
struct xe_sched_msg *msg,
u32 opcode)
@@ -2009,6 +2037,105 @@ void xe_guc_submit_stop(struct xe_guc *guc)
}
+static void guc_exec_queue_revert_pending_state_change(struct xe_exec_queue *q)
+{
+ bool pending_enable, pending_disable, pending_resume;
+
+ pending_enable = exec_queue_pending_enable(q);
+ pending_resume = exec_queue_pending_resume(q);
+
+ if (pending_enable && pending_resume)
+ q->guc->needs_resume = true;
+
+ if (pending_enable && !pending_resume &&
+ !exec_queue_pending_tdr_exit(q)) {
+ clear_exec_queue_registered(q);
+ if (xe_exec_queue_is_lr(q))
+ xe_exec_queue_put(q);
+ }
+
+ if (pending_enable) {
+ clear_exec_queue_enabled(q);
+ clear_exec_queue_pending_resume(q);
+ clear_exec_queue_pending_tdr_exit(q);
+ clear_exec_queue_pending_enable(q);
+ }
+
+ if (exec_queue_destroyed(q) && exec_queue_registered(q)) {
+ clear_exec_queue_destroyed(q);
+ if (exec_queue_extra_ref(q))
+ xe_exec_queue_put(q);
+ else
+ q->guc->needs_cleanup = true;
+ clear_exec_queue_extra_ref(q);
+ }
+
+ pending_disable = exec_queue_pending_disable(q);
+
+ if (pending_disable && exec_queue_suspended(q)) {
+ clear_exec_queue_suspended(q);
+ q->guc->needs_suspend = true;
+ }
+
+ if (pending_disable) {
+ if (!pending_enable)
+ set_exec_queue_enabled(q);
+ clear_exec_queue_pending_disable(q);
+ clear_exec_queue_check_timeout(q);
+ }
+
+ q->guc->resume_time = 0;
+}
+
+/*
+ * This function is quite complex but only real way to ensure no state is lost
+ * during VF resume flows. The function scans the queue state, make adjustments
+ * as needed, and queues jobs / messages which replayed upon unpause.
+ */
+static void guc_exec_queue_pause(struct xe_guc *guc, struct xe_exec_queue *q)
+{
+ struct xe_gpu_scheduler *sched = &q->guc->sched;
+ struct xe_sched_job *job;
+ int i;
+
+ lockdep_assert_held(&guc->submission_state.lock);
+
+ /* Stop scheduling + flush any DRM scheduler operations */
+ xe_sched_submission_stop(sched);
+ if (xe_exec_queue_is_lr(q))
+ cancel_work_sync(&q->guc->lr_tdr);
+ else
+ cancel_delayed_work_sync(&sched->base.work_tdr);
+
+ guc_exec_queue_revert_pending_state_change(q);
+
+ if (xe_exec_queue_is_parallel(q)) {
+ struct xe_device *xe = guc_to_xe(guc);
+ struct iosys_map map = xe_lrc_parallel_map(q->lrc[0]);
+
+ /*
+ * NOP existing WQ commands that may contain stale GGTT
+ * addresses. These will be replayed upon unpause. The hardware
+ * seems to get confused if the WQ head/tail pointers are
+ * adjusted.
+ */
+ for (i = 0; i < WQ_SIZE / sizeof(u32); ++i)
+ parallel_write(xe, map, wq[i],
+ FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_NOOP) |
+ FIELD_PREP(WQ_LEN_MASK, 0));
+ }
+
+ job = xe_sched_first_pending_job(sched);
+ if (job) {
+ /*
+ * Adjust software tail so jobs submitted overwrite previous
+ * position in ring buffer with new GGTT addresses.
+ */
+ for (i = 0; i < q->width; ++i)
+ q->lrc[i]->ring.tail = job->ptrs[i].head;
+ }
+}
+
/**
* xe_guc_submit_pause - Stop further runs of submission tasks on given GuC.
* @guc: the &xe_guc struct instance whose scheduler is to be disabled
@@ -2018,8 +2145,17 @@ void xe_guc_submit_pause(struct xe_guc *guc)
struct xe_exec_queue *q;
unsigned long index;
- xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
- xe_sched_submission_stop_async(&q->guc->sched);
+ xe_gt_assert(guc_to_gt(guc), vf_recovery(guc));
+
+ mutex_lock(&guc->submission_state.lock);
+ xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) {
+ /* Prevent redundant attempts to stop parallel queues */
+ if (q->guc->id != index)
+ continue;
+
+ guc_exec_queue_pause(guc, q);
+ }
+ mutex_unlock(&guc->submission_state.lock);
}
static void guc_exec_queue_start(struct xe_exec_queue *q)
@@ -2076,11 +2212,97 @@ int xe_guc_submit_start(struct xe_guc *guc)
return 0;
}
-static void guc_exec_queue_unpause(struct xe_exec_queue *q)
+static void guc_exec_queue_unpause_prepare(struct xe_guc *guc,
+ struct xe_exec_queue *q)
+{
+ struct xe_gpu_scheduler *sched = &q->guc->sched;
+ struct drm_sched_job *s_job;
+ struct xe_sched_job *job = NULL;
+
+ list_for_each_entry(s_job, &sched->base.pending_list, list) {
+ job = to_xe_sched_job(s_job);
+
+ q->ring_ops->emit_job(job);
+ job->skip_emit = true;
+ }
+
+ if (job)
+ job->last_replay = true;
+}
+
+/**
+ * xe_guc_submit_unpause_prepare - Prepare unpause submission tasks on given GuC.
+ * @guc: the &xe_guc struct instance whose scheduler is to be prepared for unpause
+ */
+void xe_guc_submit_unpause_prepare(struct xe_guc *guc)
+{
+ struct xe_exec_queue *q;
+ unsigned long index;
+
+ xe_gt_assert(guc_to_gt(guc), vf_recovery(guc));
+
+ mutex_lock(&guc->submission_state.lock);
+ xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) {
+ /* Prevent redundant attempts to stop parallel queues */
+ if (q->guc->id != index)
+ continue;
+
+ guc_exec_queue_unpause_prepare(guc, q);
+ }
+ mutex_unlock(&guc->submission_state.lock);
+}
+
+static void guc_exec_queue_replay_pending_state_change(struct xe_exec_queue *q)
+{
+ struct xe_gpu_scheduler *sched = &q->guc->sched;
+ struct xe_sched_msg *msg;
+
+ if (q->guc->needs_cleanup) {
+ msg = q->guc->static_msgs + STATIC_MSG_CLEANUP;
+
+ guc_exec_queue_add_msg(q, msg, CLEANUP);
+ q->guc->needs_cleanup = false;
+ }
+
+ if (q->guc->needs_suspend) {
+ msg = q->guc->static_msgs + STATIC_MSG_SUSPEND;
+
+ xe_sched_msg_lock(sched);
+ guc_exec_queue_try_add_msg_head(q, msg, SUSPEND);
+ xe_sched_msg_unlock(sched);
+
+ q->guc->needs_suspend = false;
+ }
+
+ /*
+ * The resume must be in the message queue before the suspend as it is
+ * not possible for a resume to be issued if a suspend pending is, but
+ * the inverse is possible.
+ */
+ if (q->guc->needs_resume) {
+ msg = q->guc->static_msgs + STATIC_MSG_RESUME;
+
+ xe_sched_msg_lock(sched);
+ guc_exec_queue_try_add_msg_head(q, msg, RESUME);
+ xe_sched_msg_unlock(sched);
+
+ q->guc->needs_resume = false;
+ }
+}
+
+static void guc_exec_queue_unpause(struct xe_guc *guc, struct xe_exec_queue *q)
{
struct xe_gpu_scheduler *sched = &q->guc->sched;
+ bool needs_tdr = exec_queue_killed_or_banned_or_wedged(q);
+
+ lockdep_assert_held(&guc->submission_state.lock);
+ xe_sched_resubmit_jobs(sched);
+ guc_exec_queue_replay_pending_state_change(q);
xe_sched_submission_start(sched);
+ if (needs_tdr)
+ xe_guc_exec_queue_trigger_cleanup(q);
+ xe_sched_submission_resume_tdr(sched);
}
/**
@@ -2092,10 +2314,19 @@ void xe_guc_submit_unpause(struct xe_guc *guc)
struct xe_exec_queue *q;
unsigned long index;
- xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
- guc_exec_queue_unpause(q);
+ mutex_lock(&guc->submission_state.lock);
+ xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) {
+ /*
+ * Prevent redundant attempts to stop parallel queues, or queues
+ * created after resfix done.
+ */
+ if (q->guc->id != index ||
+ !READ_ONCE(q->guc->sched.base.pause_submit))
+ continue;
- wake_up_all(&guc->ct.wq);
+ guc_exec_queue_unpause(guc, q);
+ }
+ mutex_unlock(&guc->submission_state.lock);
}
/**
@@ -2111,6 +2342,10 @@ void xe_guc_submit_pause_abort(struct xe_guc *guc)
xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) {
struct xe_gpu_scheduler *sched = &q->guc->sched;
+ /* Prevent redundant attempts to stop parallel queues */
+ if (q->guc->id != index)
+ continue;
+
xe_sched_submission_start(sched);
if (exec_queue_killed_or_banned_or_wedged(q))
xe_guc_exec_queue_trigger_cleanup(q);
@@ -2696,6 +2931,10 @@ int xe_guc_contexts_hwsp_rebase(struct xe_guc *guc, void *scratch)
mutex_lock(&guc->submission_state.lock);
xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) {
+ /* Prevent redundant attempts to stop parallel queues */
+ if (q->guc->id != index)
+ continue;
+
err = xe_exec_queue_contexts_hwsp_rebase(q, scratch);
if (err)
break;
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.h b/drivers/gpu/drm/xe/xe_guc_submit.h
index fe82c317048e..b49a2748ec46 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.h
+++ b/drivers/gpu/drm/xe/xe_guc_submit.h
@@ -22,6 +22,7 @@ void xe_guc_submit_stop(struct xe_guc *guc);
int xe_guc_submit_start(struct xe_guc *guc);
void xe_guc_submit_pause(struct xe_guc *guc);
void xe_guc_submit_unpause(struct xe_guc *guc);
+void xe_guc_submit_unpause_prepare(struct xe_guc *guc);
void xe_guc_submit_pause_abort(struct xe_guc *guc);
void xe_guc_submit_wedge(struct xe_guc *guc);
diff --git a/drivers/gpu/drm/xe/xe_sched_job_types.h b/drivers/gpu/drm/xe/xe_sched_job_types.h
index 7ce58765a34a..13e7a12b03ad 100644
--- a/drivers/gpu/drm/xe/xe_sched_job_types.h
+++ b/drivers/gpu/drm/xe/xe_sched_job_types.h
@@ -63,6 +63,10 @@ struct xe_sched_job {
bool ring_ops_flush_tlb;
/** @ggtt: mapped in ggtt. */
bool ggtt;
+ /** @skip_emit: skip emitting the job */
+ bool skip_emit;
+ /** @last_replay: last job being replayed */
+ bool last_replay;
/** @ptrs: per instance pointers. */
struct xe_job_ptrs ptrs[];
};
--
2.34.1
^ permalink raw reply related [flat|nested] 42+ messages in thread* [PATCH v9 27/34] drm/xe: Move queue init before LRC creation
2025-10-08 18:04 [PATCH v9 00/34] VF migration redesign Matthew Brost
` (25 preceding siblings ...)
2025-10-08 18:04 ` [PATCH v9 26/34] drm/xe/vf: Replay GuC submission state on pause / unpause Matthew Brost
@ 2025-10-08 18:04 ` Matthew Brost
2025-10-08 18:04 ` [PATCH v9 28/34] drm/xe/vf: Add debug prints for GuC replaying state during VF recovery Matthew Brost
` (10 subsequent siblings)
37 siblings, 0 replies; 42+ messages in thread
From: Matthew Brost @ 2025-10-08 18:04 UTC (permalink / raw)
To: intel-xe
A queue must be in the submission backend's tracking state before the
LRC is created to avoid a race condition where the LRC's GGTT addresses
are not properly fixed up during VF post-migration recovery.
Move the queue initialization—which adds the queue to the submission
backend's tracking state—before LRC creation.
Also wait on pending GGTT fixups before allocating LRCs to avoid racing
with fixups.
v2:
- Wait on VF GGTT fixes before creating LRC (testing)
v5:
- Adjust comment in code (Tomasz)
- Reduce race window
v7:
- Only wakeup waiters in recovery path (CI)
- Wakeup waiters on abort
- Use GT warn on (Michal)
- Fix kernel doc for LRC ring size function (Tomasz)
v8:
- Guard against migration not supported or no memirq (CI)
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Tomasz Lis <tomasz.lis@intel.com>
---
drivers/gpu/drm/xe/xe_exec_queue.c | 45 ++++++++++++++++++-----
drivers/gpu/drm/xe/xe_execlist.c | 2 +-
drivers/gpu/drm/xe/xe_gt_sriov_vf.c | 45 ++++++++++++++++++++++-
drivers/gpu/drm/xe/xe_gt_sriov_vf.h | 2 +
drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h | 5 +++
drivers/gpu/drm/xe/xe_guc_submit.c | 2 +-
drivers/gpu/drm/xe/xe_lrc.h | 10 +++++
7 files changed, 98 insertions(+), 13 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
index 7621089a47fe..90cbc95f8e2e 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.c
+++ b/drivers/gpu/drm/xe/xe_exec_queue.c
@@ -15,6 +15,7 @@
#include "xe_dep_scheduler.h"
#include "xe_device.h"
#include "xe_gt.h"
+#include "xe_gt_sriov_vf.h"
#include "xe_hw_engine_class_sysfs.h"
#include "xe_hw_engine_group.h"
#include "xe_hw_fence.h"
@@ -205,17 +206,34 @@ static int __xe_exec_queue_init(struct xe_exec_queue *q, u32 exec_queue_flags)
if (!(exec_queue_flags & EXEC_QUEUE_FLAG_KERNEL))
flags |= XE_LRC_CREATE_USER_CTX;
+ err = q->ops->init(q);
+ if (err)
+ return err;
+
+ /*
+ * This must occur after q->ops->init to avoid race conditions during VF
+ * post-migration recovery, as the fixups for the LRC GGTT addresses
+ * depend on the queue being present in the backend tracking structure.
+ *
+ * In addition to above, we must wait on inflight GGTT changes to avoid
+ * writing out stale values here. Such wait provides a solid solution
+ * (without a race) only if the function can detect migration instantly
+ * from the moment vCPU resumes execution.
+ */
for (i = 0; i < q->width; ++i) {
- q->lrc[i] = xe_lrc_create(q->hwe, q->vm, SZ_16K, q->msix_vec, flags);
- if (IS_ERR(q->lrc[i])) {
- err = PTR_ERR(q->lrc[i]);
+ struct xe_lrc *lrc;
+
+ xe_gt_sriov_vf_wait_valid_ggtt(q->gt);
+ lrc = xe_lrc_create(q->hwe, q->vm, xe_lrc_ring_size(),
+ q->msix_vec, flags);
+ if (IS_ERR(lrc)) {
+ err = PTR_ERR(lrc);
goto err_lrc;
}
- }
- err = q->ops->init(q);
- if (err)
- goto err_lrc;
+ /* Pairs with READ_ONCE to xe_exec_queue_contexts_hwsp_rebase */
+ WRITE_ONCE(q->lrc[i], lrc);
+ }
return 0;
@@ -1121,9 +1139,16 @@ int xe_exec_queue_contexts_hwsp_rebase(struct xe_exec_queue *q, void *scratch)
int err = 0;
for (i = 0; i < q->width; ++i) {
- xe_lrc_update_memirq_regs_with_address(q->lrc[i], q->hwe, scratch);
- xe_lrc_update_hwctx_regs_with_address(q->lrc[i]);
- err = xe_lrc_setup_wa_bb_with_scratch(q->lrc[i], q->hwe, scratch);
+ struct xe_lrc *lrc;
+
+ /* Pairs with WRITE_ONCE in __xe_exec_queue_init */
+ lrc = READ_ONCE(q->lrc[i]);
+ if (!lrc)
+ continue;
+
+ xe_lrc_update_memirq_regs_with_address(lrc, q->hwe, scratch);
+ xe_lrc_update_hwctx_regs_with_address(lrc);
+ err = xe_lrc_setup_wa_bb_with_scratch(lrc, q->hwe, scratch);
if (err)
break;
}
diff --git a/drivers/gpu/drm/xe/xe_execlist.c b/drivers/gpu/drm/xe/xe_execlist.c
index f83d421ac9d3..769d05517f93 100644
--- a/drivers/gpu/drm/xe/xe_execlist.c
+++ b/drivers/gpu/drm/xe/xe_execlist.c
@@ -339,7 +339,7 @@ static int execlist_exec_queue_init(struct xe_exec_queue *q)
const struct drm_sched_init_args args = {
.ops = &drm_sched_ops,
.num_rqs = 1,
- .credit_limit = q->lrc[0]->ring.size / MAX_JOB_SIZE_BYTES,
+ .credit_limit = xe_lrc_ring_size() / MAX_JOB_SIZE_BYTES,
.hang_limit = XE_SCHED_HANG_LIMIT,
.timeout = XE_SCHED_JOB_TIMEOUT,
.name = q->hwe->name,
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
index 44bda1d4cb38..b9988e227a25 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
@@ -480,6 +480,12 @@ static int vf_get_ggtt_info(struct xe_gt *gt)
xe_tile_sriov_vf_fixup_ggtt_nodes_locked(gt_to_tile(gt), shift);
}
+ if (xe_sriov_vf_migration_supported(gt_to_xe(gt))) {
+ WRITE_ONCE(gt->sriov.vf.migration.ggtt_need_fixes, false);
+ smp_wmb(); /* Ensure above write visible before wake */
+ wake_up_all(>->sriov.vf.migration.wq);
+ }
+
return err;
}
@@ -729,7 +735,8 @@ static void vf_start_migration_recovery(struct xe_gt *gt)
!gt->sriov.vf.migration.recovery_teardown) {
gt->sriov.vf.migration.recovery_queued = true;
WRITE_ONCE(gt->sriov.vf.migration.recovery_inprogress, true);
- smp_wmb(); /* Ensure above write visable before wake */
+ WRITE_ONCE(gt->sriov.vf.migration.ggtt_need_fixes, true);
+ smp_wmb(); /* Ensure above writes visable before wake */
xe_guc_ct_wake_waiters(>->uc.guc.ct);
@@ -1141,8 +1148,11 @@ static void vf_post_migration_abort(struct xe_gt *gt)
{
spin_lock_irq(>->sriov.vf.migration.lock);
WRITE_ONCE(gt->sriov.vf.migration.recovery_inprogress, false);
+ WRITE_ONCE(gt->sriov.vf.migration.ggtt_need_fixes, false);
spin_unlock_irq(>->sriov.vf.migration.lock);
+ wake_up_all(>->sriov.vf.migration.wq);
+
xe_guc_submit_pause_abort(>->uc.guc);
}
@@ -1251,6 +1261,7 @@ int xe_gt_sriov_vf_init_early(struct xe_gt *gt)
gt->sriov.vf.migration.scratch = buf;
spin_lock_init(>->sriov.vf.migration.lock);
INIT_WORK(>->sriov.vf.migration.worker, migration_worker_func);
+ init_waitqueue_head(>->sriov.vf.migration.wq);
return 0;
}
@@ -1300,3 +1311,35 @@ bool xe_gt_sriov_vf_recovery_pending(struct xe_gt *gt)
return READ_ONCE(gt->sriov.vf.migration.recovery_inprogress);
}
+
+static bool vf_valid_ggtt(struct xe_gt *gt)
+{
+ struct xe_memirq *memirq = >_to_tile(gt)->memirq;
+ bool irq_pending = xe_device_uses_memirq(gt_to_xe(gt)) &&
+ xe_memirq_guc_sw_int_0_irq_pending(memirq, >->uc.guc);
+
+ xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
+
+ if (irq_pending || READ_ONCE(gt->sriov.vf.migration.ggtt_need_fixes))
+ return false;
+
+ return true;
+}
+
+/**
+ * xe_gt_sriov_vf_wait_valid_ggtt() - VF wait for valid GGTT addresses
+ * @gt: the &xe_gt
+ */
+void xe_gt_sriov_vf_wait_valid_ggtt(struct xe_gt *gt)
+{
+ int ret;
+
+ if (!IS_SRIOV_VF(gt_to_xe(gt)) ||
+ !xe_sriov_vf_migration_supported(gt_to_xe(gt)))
+ return;
+
+ ret = wait_event_interruptible_timeout(gt->sriov.vf.migration.wq,
+ vf_valid_ggtt(gt),
+ HZ * 5);
+ xe_gt_WARN_ON(gt, !ret);
+}
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.h b/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
index 1d2eaa52f804..af40276790fa 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
@@ -38,4 +38,6 @@ void xe_gt_sriov_vf_print_config(struct xe_gt *gt, struct drm_printer *p);
void xe_gt_sriov_vf_print_runtime(struct xe_gt *gt, struct drm_printer *p);
void xe_gt_sriov_vf_print_version(struct xe_gt *gt, struct drm_printer *p);
+void xe_gt_sriov_vf_wait_valid_ggtt(struct xe_gt *gt);
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h
index bdd9b968f204..420b0e6089de 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h
@@ -7,6 +7,7 @@
#define _XE_GT_SRIOV_VF_TYPES_H_
#include <linux/types.h>
+#include <linux/wait.h>
#include <linux/workqueue.h>
#include "xe_uc_fw_types.h"
@@ -47,6 +48,8 @@ struct xe_gt_sriov_vf_migration {
struct work_struct worker;
/** @lock: Protects recovery_queued, teardown */
spinlock_t lock;
+ /** @wq: wait queue for migration fixes */
+ wait_queue_head_t wq;
/** @scratch: Scratch memory for VF recovery */
void *scratch;
/** @recovery_teardown: VF post migration recovery is being torn down */
@@ -55,6 +58,8 @@ struct xe_gt_sriov_vf_migration {
bool recovery_queued;
/** @recovery_inprogress: VF post migration recovery in progress */
bool recovery_inprogress;
+ /** @ggtt_need_fixes: VF GGTT needs fixes */
+ bool ggtt_need_fixes;
};
/**
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index d07ff014492e..be7aa1e89d13 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -1670,7 +1670,7 @@ static int guc_exec_queue_init(struct xe_exec_queue *q)
timeout = (q->vm && xe_vm_in_lr_mode(q->vm)) ? MAX_SCHEDULE_TIMEOUT :
msecs_to_jiffies(q->sched_props.job_timeout_ms);
err = xe_sched_init(&ge->sched, &drm_sched_ops, &xe_sched_ops,
- NULL, q->lrc[0]->ring.size / MAX_JOB_SIZE_BYTES, 64,
+ NULL, xe_lrc_ring_size() / MAX_JOB_SIZE_BYTES, 64,
timeout, guc_to_gt(guc)->ordered_wq, NULL,
q->name, gt_to_xe(q->gt)->drm.dev);
if (err)
diff --git a/drivers/gpu/drm/xe/xe_lrc.h b/drivers/gpu/drm/xe/xe_lrc.h
index 21a3daab0154..2fb628da5c43 100644
--- a/drivers/gpu/drm/xe/xe_lrc.h
+++ b/drivers/gpu/drm/xe/xe_lrc.h
@@ -76,6 +76,16 @@ static inline void xe_lrc_put(struct xe_lrc *lrc)
kref_put(&lrc->refcount, xe_lrc_destroy);
}
+/**
+ * xe_lrc_ring_size() - Xe LRC ring size
+ *
+ * Return: Size of LRC ring buffer
+ */
+static inline size_t xe_lrc_ring_size(void)
+{
+ return SZ_16K;
+}
+
size_t xe_gt_lrc_size(struct xe_gt *gt, enum xe_engine_class class);
u32 xe_lrc_pphwsp_offset(struct xe_lrc *lrc);
u32 xe_lrc_regs_offset(struct xe_lrc *lrc);
--
2.34.1
^ permalink raw reply related [flat|nested] 42+ messages in thread* [PATCH v9 28/34] drm/xe/vf: Add debug prints for GuC replaying state during VF recovery
2025-10-08 18:04 [PATCH v9 00/34] VF migration redesign Matthew Brost
` (26 preceding siblings ...)
2025-10-08 18:04 ` [PATCH v9 27/34] drm/xe: Move queue init before LRC creation Matthew Brost
@ 2025-10-08 18:04 ` Matthew Brost
2025-10-08 18:04 ` [PATCH v9 29/34] drm/xe/vf: Workaround for race condition in GuC firmware during VF pause Matthew Brost
` (9 subsequent siblings)
37 siblings, 0 replies; 42+ messages in thread
From: Matthew Brost @ 2025-10-08 18:04 UTC (permalink / raw)
To: intel-xe
Helpful to manually verify the GuC state machine can correctly replay
the state during a VF post-migration recovery. All replay paths have
been manually verified as triggered and working during testing.
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Tomasz Lis <tomasz.lis@intel.com>
---
drivers/gpu/drm/xe/xe_guc_submit.c | 23 ++++++++++++++++++++---
1 file changed, 20 insertions(+), 3 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index be7aa1e89d13..1321064d4f70 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -2037,21 +2037,27 @@ void xe_guc_submit_stop(struct xe_guc *guc)
}
-static void guc_exec_queue_revert_pending_state_change(struct xe_exec_queue *q)
+static void guc_exec_queue_revert_pending_state_change(struct xe_guc *guc,
+ struct xe_exec_queue *q)
{
bool pending_enable, pending_disable, pending_resume;
pending_enable = exec_queue_pending_enable(q);
pending_resume = exec_queue_pending_resume(q);
- if (pending_enable && pending_resume)
+ if (pending_enable && pending_resume) {
q->guc->needs_resume = true;
+ xe_gt_dbg(guc_to_gt(guc), "Replay RESUME - guc_id=%d",
+ q->guc->id);
+ }
if (pending_enable && !pending_resume &&
!exec_queue_pending_tdr_exit(q)) {
clear_exec_queue_registered(q);
if (xe_exec_queue_is_lr(q))
xe_exec_queue_put(q);
+ xe_gt_dbg(guc_to_gt(guc), "Replay REGISTER - guc_id=%d",
+ q->guc->id);
}
if (pending_enable) {
@@ -2059,6 +2065,8 @@ static void guc_exec_queue_revert_pending_state_change(struct xe_exec_queue *q)
clear_exec_queue_pending_resume(q);
clear_exec_queue_pending_tdr_exit(q);
clear_exec_queue_pending_enable(q);
+ xe_gt_dbg(guc_to_gt(guc), "Replay ENABLE - guc_id=%d",
+ q->guc->id);
}
if (exec_queue_destroyed(q) && exec_queue_registered(q)) {
@@ -2068,6 +2076,8 @@ static void guc_exec_queue_revert_pending_state_change(struct xe_exec_queue *q)
else
q->guc->needs_cleanup = true;
clear_exec_queue_extra_ref(q);
+ xe_gt_dbg(guc_to_gt(guc), "Replay CLEANUP - guc_id=%d",
+ q->guc->id);
}
pending_disable = exec_queue_pending_disable(q);
@@ -2075,6 +2085,8 @@ static void guc_exec_queue_revert_pending_state_change(struct xe_exec_queue *q)
if (pending_disable && exec_queue_suspended(q)) {
clear_exec_queue_suspended(q);
q->guc->needs_suspend = true;
+ xe_gt_dbg(guc_to_gt(guc), "Replay SUSPEND - guc_id=%d",
+ q->guc->id);
}
if (pending_disable) {
@@ -2082,6 +2094,8 @@ static void guc_exec_queue_revert_pending_state_change(struct xe_exec_queue *q)
set_exec_queue_enabled(q);
clear_exec_queue_pending_disable(q);
clear_exec_queue_check_timeout(q);
+ xe_gt_dbg(guc_to_gt(guc), "Replay DISABLE - guc_id=%d",
+ q->guc->id);
}
q->guc->resume_time = 0;
@@ -2107,7 +2121,7 @@ static void guc_exec_queue_pause(struct xe_guc *guc, struct xe_exec_queue *q)
else
cancel_delayed_work_sync(&sched->base.work_tdr);
- guc_exec_queue_revert_pending_state_change(q);
+ guc_exec_queue_revert_pending_state_change(guc, q);
if (xe_exec_queue_is_parallel(q)) {
struct xe_device *xe = guc_to_xe(guc);
@@ -2222,6 +2236,9 @@ static void guc_exec_queue_unpause_prepare(struct xe_guc *guc,
list_for_each_entry(s_job, &sched->base.pending_list, list) {
job = to_xe_sched_job(s_job);
+ xe_gt_dbg(guc_to_gt(guc), "Replay JOB - guc_id=%d, seqno=%d",
+ q->guc->id, xe_sched_job_seqno(job));
+
q->ring_ops->emit_job(job);
job->skip_emit = true;
}
--
2.34.1
^ permalink raw reply related [flat|nested] 42+ messages in thread* [PATCH v9 29/34] drm/xe/vf: Workaround for race condition in GuC firmware during VF pause
2025-10-08 18:04 [PATCH v9 00/34] VF migration redesign Matthew Brost
` (27 preceding siblings ...)
2025-10-08 18:04 ` [PATCH v9 28/34] drm/xe/vf: Add debug prints for GuC replaying state during VF recovery Matthew Brost
@ 2025-10-08 18:04 ` Matthew Brost
2025-10-08 18:04 ` [PATCH v9 30/34] drm/xe: Use PPGTT addresses for TLB invalidation to avoid GGTT fixups Matthew Brost
` (8 subsequent siblings)
37 siblings, 0 replies; 42+ messages in thread
From: Matthew Brost @ 2025-10-08 18:04 UTC (permalink / raw)
To: intel-xe
A race condition exists where a paused VF's H2G request can be processed
and subsequently rejected. This rejection results in a FAST_REQ failure
being delivered to the KMD, which then terminates the CT via a dead
worker and triggers a GT reset—an undesirable outcome.
This workaround mitigates the issue by checking if a VF post-migration
recovery is in progress and aborting these adverse actions accordingly.
The GuC firmware will address this bug in an upcoming release. Once that
version is available and VF migration depends on it, this workaround can
be safely removed.
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Tomasz Lis <tomasz.lis@intel.com>
---
drivers/gpu/drm/xe/xe_guc_ct.c | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
index 3472e4ea2609..3ae1e8db143a 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.c
+++ b/drivers/gpu/drm/xe/xe_guc_ct.c
@@ -1398,6 +1398,10 @@ static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len)
fast_req_report(ct, fence);
+ /* FIXME: W/A race in the GuC, will get in firmware soon */
+ if (xe_gt_recovery_pending(gt))
+ return 0;
+
CT_DEAD(ct, NULL, PARSE_G2H_RESPONSE);
return -EPROTO;
--
2.34.1
^ permalink raw reply related [flat|nested] 42+ messages in thread* [PATCH v9 30/34] drm/xe: Use PPGTT addresses for TLB invalidation to avoid GGTT fixups
2025-10-08 18:04 [PATCH v9 00/34] VF migration redesign Matthew Brost
` (28 preceding siblings ...)
2025-10-08 18:04 ` [PATCH v9 29/34] drm/xe/vf: Workaround for race condition in GuC firmware during VF pause Matthew Brost
@ 2025-10-08 18:04 ` Matthew Brost
2025-10-08 18:04 ` [PATCH v9 31/34] drm/xe/vf: Use primary GT ordered work queue on media GT on PTL VF Matthew Brost
` (7 subsequent siblings)
37 siblings, 0 replies; 42+ messages in thread
From: Matthew Brost @ 2025-10-08 18:04 UTC (permalink / raw)
To: intel-xe
From: Satyanarayana K V P <satyanarayana.k.v.p@intel.com>
The migrate VM builds the CCS metadata save/restore batch buffer (BB) in
advance and retains it so the GuC can submit it directly when saving a
VM’s state.
When a VM migrates between VFs, the GGTT base can change. Any GGTT-based
addresses embedded in the BB would then have to be parsed and patched.
Use PPGTT addresses in the BB (including for TLB invalidation) so the BB
remains GGTT-agnostic and requires no address fixups during migration.
Signed-off-by: Satyanarayana K V P <satyanarayana.k.v.p@intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
---
drivers/gpu/drm/xe/xe_migrate.c | 28 ++++++++++++++++++++--------
1 file changed, 20 insertions(+), 8 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
index 1d667fa36cf3..ad03afb5145f 100644
--- a/drivers/gpu/drm/xe/xe_migrate.c
+++ b/drivers/gpu/drm/xe/xe_migrate.c
@@ -980,15 +980,27 @@ struct xe_lrc *xe_migrate_lrc(struct xe_migrate *migrate)
return migrate->q->lrc[0];
}
-static int emit_flush_invalidate(struct xe_exec_queue *q, u32 *dw, int i,
- u32 flags)
+static u64 migrate_vm_ppgtt_addr_tlb_inval(void)
{
- struct xe_lrc *lrc = xe_exec_queue_lrc(q);
+ /*
+ * The migrate VM is self-referential so it can modify its own PTEs (see
+ * pte_update_size() or emit_pte() functions). We reserve NUM_KERNEL_PDE
+ * entries for kernel operations (copies, clears, CCS migrate), and
+ * suballocate the rest to user operations (binds/unbinds). With
+ * NUM_KERNEL_PDE = 15, NUM_KERNEL_PDE - 1 is already used for PTE updates,
+ * so assign NUM_KERNEL_PDE - 2 for TLB invalidation.
+ */
+ return (NUM_KERNEL_PDE - 2) * XE_PAGE_SIZE;
+}
+
+static int emit_flush_invalidate(u32 *dw, int i, u32 flags)
+{
+ u64 addr = migrate_vm_ppgtt_addr_tlb_inval();
+
dw[i++] = MI_FLUSH_DW | MI_INVALIDATE_TLB | MI_FLUSH_DW_OP_STOREDW |
MI_FLUSH_IMM_DW | flags;
- dw[i++] = lower_32_bits(xe_lrc_start_seqno_ggtt_addr(lrc)) |
- MI_FLUSH_DW_USE_GTT;
- dw[i++] = upper_32_bits(xe_lrc_start_seqno_ggtt_addr(lrc));
+ dw[i++] = lower_32_bits(addr);
+ dw[i++] = upper_32_bits(addr);
dw[i++] = MI_NOOP;
dw[i++] = MI_NOOP;
@@ -1101,11 +1113,11 @@ int xe_migrate_ccs_rw_copy(struct xe_tile *tile, struct xe_exec_queue *q,
emit_pte(m, bb, ccs_pt, false, false, &ccs_it, ccs_size, src);
- bb->len = emit_flush_invalidate(q, bb->cs, bb->len, flush_flags);
+ bb->len = emit_flush_invalidate(bb->cs, bb->len, flush_flags);
flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs, src_is_pltt,
src_L0_ofs, dst_is_pltt,
src_L0, ccs_ofs, true);
- bb->len = emit_flush_invalidate(q, bb->cs, bb->len, flush_flags);
+ bb->len = emit_flush_invalidate(bb->cs, bb->len, flush_flags);
size -= src_L0;
}
--
2.34.1
^ permalink raw reply related [flat|nested] 42+ messages in thread* [PATCH v9 31/34] drm/xe/vf: Use primary GT ordered work queue on media GT on PTL VF
2025-10-08 18:04 [PATCH v9 00/34] VF migration redesign Matthew Brost
` (29 preceding siblings ...)
2025-10-08 18:04 ` [PATCH v9 30/34] drm/xe: Use PPGTT addresses for TLB invalidation to avoid GGTT fixups Matthew Brost
@ 2025-10-08 18:04 ` Matthew Brost
2025-10-08 18:04 ` [PATCH v9 32/34] drm/xe/vf: Ensure media GT VF recovery runs after primary GT on PTL Matthew Brost
` (6 subsequent siblings)
37 siblings, 0 replies; 42+ messages in thread
From: Matthew Brost @ 2025-10-08 18:04 UTC (permalink / raw)
To: intel-xe
VF CCS restore is a primary GT operation on which the media GT depends.
Therefore, it doesn't make much sense to run these operations in
parallel. To address this, point the media GT's ordered work queue to
the primary GT's ordered work queue on platforms that require (PTL VFs)
CCS restore as part of VF post-migration recovery.
v7:
- Remove bool from xe_gt_alloc (Lucas)
v9:
- Fix typo (Lucas)
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Lucas De Marchi <lucas.demarchi@intel.com>
---
drivers/gpu/drm/xe/xe_device_types.h | 2 ++
drivers/gpu/drm/xe/xe_gt.c | 18 ++++++++++++++----
drivers/gpu/drm/xe/xe_pci.c | 2 ++
drivers/gpu/drm/xe/xe_pci_types.h | 1 +
4 files changed, 19 insertions(+), 4 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
index c66523bf4bf0..02c04ad7296e 100644
--- a/drivers/gpu/drm/xe/xe_device_types.h
+++ b/drivers/gpu/drm/xe/xe_device_types.h
@@ -334,6 +334,8 @@ struct xe_device {
u8 skip_mtcfg:1;
/** @info.skip_pcode: skip access to PCODE uC */
u8 skip_pcode:1;
+ /** @info.needs_shared_vf_gt_wq: needs shared GT WQ on VF */
+ u8 needs_shared_vf_gt_wq:1;
} info;
/** @wa_active: keep track of active workarounds */
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
index 6951fedd4350..d8e94fb8b9bd 100644
--- a/drivers/gpu/drm/xe/xe_gt.c
+++ b/drivers/gpu/drm/xe/xe_gt.c
@@ -67,7 +67,11 @@
struct xe_gt *xe_gt_alloc(struct xe_tile *tile)
{
- struct drm_device *drm = &tile_to_xe(tile)->drm;
+ struct xe_device *xe = tile_to_xe(tile);
+ struct drm_device *drm = &xe->drm;
+ bool shared_wq = xe->info.needs_shared_vf_gt_wq && tile->primary_gt &&
+ IS_SRIOV_VF(xe);
+ struct workqueue_struct *ordered_wq;
struct xe_gt *gt;
gt = drmm_kzalloc(drm, sizeof(*gt), GFP_KERNEL);
@@ -75,9 +79,15 @@ struct xe_gt *xe_gt_alloc(struct xe_tile *tile)
return ERR_PTR(-ENOMEM);
gt->tile = tile;
- gt->ordered_wq = drmm_alloc_ordered_workqueue(drm, "gt-ordered-wq", WQ_MEM_RECLAIM);
- if (IS_ERR(gt->ordered_wq))
- return ERR_CAST(gt->ordered_wq);
+ if (shared_wq && tile->primary_gt->ordered_wq)
+ ordered_wq = tile->primary_gt->ordered_wq;
+ else
+ ordered_wq = drmm_alloc_ordered_workqueue(drm, "gt-ordered-wq",
+ WQ_MEM_RECLAIM);
+ if (IS_ERR(ordered_wq))
+ return ERR_CAST(ordered_wq);
+
+ gt->ordered_wq = ordered_wq;
return gt;
}
diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c
index 3f42b91efa28..f8243622429d 100644
--- a/drivers/gpu/drm/xe/xe_pci.c
+++ b/drivers/gpu/drm/xe/xe_pci.c
@@ -347,6 +347,7 @@ static const struct xe_device_desc ptl_desc = {
.has_sriov = true,
.max_gt_per_tile = 2,
.needs_scratch = true,
+ .needs_shared_vf_gt_wq = true,
};
#undef PLATFORM
@@ -598,6 +599,7 @@ static int xe_info_init_early(struct xe_device *xe,
xe->info.skip_mtcfg = desc->skip_mtcfg;
xe->info.skip_pcode = desc->skip_pcode;
xe->info.needs_scratch = desc->needs_scratch;
+ xe->info.needs_shared_vf_gt_wq = desc->needs_shared_vf_gt_wq;
xe->info.probe_display = IS_ENABLED(CONFIG_DRM_XE_DISPLAY) &&
xe_modparam.probe_display &&
diff --git a/drivers/gpu/drm/xe/xe_pci_types.h b/drivers/gpu/drm/xe/xe_pci_types.h
index 9b9766a3baa3..b11bf6abda5b 100644
--- a/drivers/gpu/drm/xe/xe_pci_types.h
+++ b/drivers/gpu/drm/xe/xe_pci_types.h
@@ -48,6 +48,7 @@ struct xe_device_desc {
u8 skip_guc_pc:1;
u8 skip_mtcfg:1;
u8 skip_pcode:1;
+ u8 needs_shared_vf_gt_wq:1;
};
struct xe_graphics_desc {
--
2.34.1
^ permalink raw reply related [flat|nested] 42+ messages in thread* [PATCH v9 32/34] drm/xe/vf: Ensure media GT VF recovery runs after primary GT on PTL
2025-10-08 18:04 [PATCH v9 00/34] VF migration redesign Matthew Brost
` (30 preceding siblings ...)
2025-10-08 18:04 ` [PATCH v9 31/34] drm/xe/vf: Use primary GT ordered work queue on media GT on PTL VF Matthew Brost
@ 2025-10-08 18:04 ` Matthew Brost
2025-10-08 18:04 ` [PATCH v9 33/34] drm/xe/vf: Rebase CCS save/restore BB GGTT addresses Matthew Brost
` (5 subsequent siblings)
37 siblings, 0 replies; 42+ messages in thread
From: Matthew Brost @ 2025-10-08 18:04 UTC (permalink / raw)
To: intel-xe
It is possible that the media GT's VF post-migration recovery work item
gets scheduled before the primary GT's work item. Since the media GT
depends on the primary GT's work item to complete CCS restore, if the
media GT's work item is scheduled first, detect this condition and
re-queue the media GT's work item for a later time.
v5:
- Adjust debug message (Tomasz)
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Tomasz Lis <tomasz.lis@intel.com>
---
drivers/gpu/drm/xe/xe_gt_sriov_vf.c | 29 +++++++++++++++++++++++++++--
1 file changed, 27 insertions(+), 2 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
index b9988e227a25..47a87a7126cc 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
@@ -1100,8 +1100,22 @@ void xe_gt_sriov_vf_print_version(struct xe_gt *gt, struct drm_printer *p)
pf_version->major, pf_version->minor);
}
-static void vf_post_migration_shutdown(struct xe_gt *gt)
+static bool vf_post_migration_shutdown(struct xe_gt *gt)
{
+ struct xe_device *xe = gt_to_xe(gt);
+
+ /*
+ * On platforms where CCS must be restored by the primary GT, the media
+ * GT's VF post-migration recovery must run afterward. Detect this case
+ * and re-queue the media GT's restore work item if necessary.
+ */
+ if (xe->info.needs_shared_vf_gt_wq && xe_gt_is_media_type(gt)) {
+ struct xe_gt *primary_gt = gt_to_tile(gt)->primary_gt;
+
+ if (xe_gt_sriov_vf_recovery_pending(primary_gt))
+ return true;
+ }
+
spin_lock_irq(>->sriov.vf.migration.lock);
gt->sriov.vf.migration.recovery_queued = false;
spin_unlock_irq(>->sriov.vf.migration.lock);
@@ -1109,6 +1123,8 @@ static void vf_post_migration_shutdown(struct xe_gt *gt)
xe_guc_ct_flush_and_stop(>->uc.guc.ct);
xe_guc_submit_pause(>->uc.guc);
xe_tlb_inval_reset(>->tlb_inval);
+
+ return false;
}
static size_t post_migration_scratch_size(struct xe_device *xe)
@@ -1186,11 +1202,14 @@ static void vf_post_migration_recovery(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
int err;
+ bool retry;
xe_gt_sriov_dbg(gt, "migration recovery in progress\n");
xe_pm_runtime_get(xe);
- vf_post_migration_shutdown(gt);
+ retry = vf_post_migration_shutdown(gt);
+ if (retry)
+ goto queue;
if (!xe_sriov_vf_migration_supported(xe)) {
xe_gt_sriov_err(gt, "migration is not supported\n");
@@ -1218,6 +1237,12 @@ static void vf_post_migration_recovery(struct xe_gt *gt)
xe_pm_runtime_put(xe);
xe_gt_sriov_err(gt, "migration recovery failed (%pe)\n", ERR_PTR(err));
xe_device_declare_wedged(xe);
+ return;
+
+queue:
+ xe_gt_sriov_info(gt, "Re-queuing migration recovery\n");
+ queue_work(gt->ordered_wq, >->sriov.vf.migration.worker);
+ xe_pm_runtime_put(xe);
}
static void migration_worker_func(struct work_struct *w)
--
2.34.1
^ permalink raw reply related [flat|nested] 42+ messages in thread* [PATCH v9 33/34] drm/xe/vf: Rebase CCS save/restore BB GGTT addresses
2025-10-08 18:04 [PATCH v9 00/34] VF migration redesign Matthew Brost
` (31 preceding siblings ...)
2025-10-08 18:04 ` [PATCH v9 32/34] drm/xe/vf: Ensure media GT VF recovery runs after primary GT on PTL Matthew Brost
@ 2025-10-08 18:04 ` Matthew Brost
2025-10-08 18:05 ` [PATCH v9 34/34] drm/xe/guc: Increase wait timeout to 2sec after BUSY reply from GuC Matthew Brost
` (4 subsequent siblings)
37 siblings, 0 replies; 42+ messages in thread
From: Matthew Brost @ 2025-10-08 18:04 UTC (permalink / raw)
To: intel-xe
Rebase the CCS save/restore BB's GGTT addresses during VF post-migration
recovery by setting the software ring tail to zero, the LRC ring head to
zero, and rewriting the jump-to-BB instructions.
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Satyanarayana K V P <satyanarayana.k.v.p@intel.com>
---
drivers/gpu/drm/xe/xe_gt_sriov_vf.c | 4 ++++
drivers/gpu/drm/xe/xe_sriov_vf_ccs.c | 28 ++++++++++++++++++++++++++++
drivers/gpu/drm/xe/xe_sriov_vf_ccs.h | 1 +
3 files changed, 33 insertions(+)
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
index 47a87a7126cc..3ca9b3e84c1b 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
@@ -34,6 +34,7 @@
#include "xe_pm.h"
#include "xe_sriov.h"
#include "xe_sriov_vf.h"
+#include "xe_sriov_vf_ccs.h"
#include "xe_tile_sriov_vf.h"
#include "xe_tlb_inval.h"
#include "xe_uc_fw.h"
@@ -1141,6 +1142,9 @@ static int vf_post_migration_fixups(struct xe_gt *gt)
if (err)
return err;
+ if (xe_gt_is_main_type(gt))
+ xe_sriov_vf_ccs_rebase(gt_to_xe(gt));
+
xe_gt_sriov_vf_default_lrcs_hwsp_rebase(gt);
err = xe_guc_contexts_hwsp_rebase(>->uc.guc, buf);
if (err)
diff --git a/drivers/gpu/drm/xe/xe_sriov_vf_ccs.c b/drivers/gpu/drm/xe/xe_sriov_vf_ccs.c
index 8dec616c37c9..790249801364 100644
--- a/drivers/gpu/drm/xe/xe_sriov_vf_ccs.c
+++ b/drivers/gpu/drm/xe/xe_sriov_vf_ccs.c
@@ -175,6 +175,15 @@ static void ccs_rw_update_ring(struct xe_sriov_vf_ccs_ctx *ctx)
struct xe_lrc *lrc = xe_exec_queue_lrc(ctx->mig_q);
u32 dw[10], i = 0;
+ /*
+ * XXX: Save/restore fixes — for some reason, the GuC only accepts the
+ * save/restore context if the LRC head pointer is zero. This is evident
+ * from repeated VF migrations failing when the LRC head pointer is
+ * non-zero.
+ */
+ lrc->ring.tail = 0;
+ xe_lrc_set_ring_head(lrc, 0);
+
dw[i++] = MI_ARB_ON_OFF | MI_ARB_ENABLE;
dw[i++] = MI_BATCH_BUFFER_START | XE_INSTR_NUM_DW(3);
dw[i++] = lower_32_bits(addr);
@@ -186,6 +195,25 @@ static void ccs_rw_update_ring(struct xe_sriov_vf_ccs_ctx *ctx)
xe_lrc_set_ring_tail(lrc, lrc->ring.tail);
}
+/**
+ * xe_sriov_vf_ccs_rebase - Rebase GGTT addresses for CCS save / restore
+ * @xe: the &xe_device.
+ */
+void xe_sriov_vf_ccs_rebase(struct xe_device *xe)
+{
+ enum xe_sriov_vf_ccs_rw_ctxs ctx_id;
+
+ if (!IS_VF_CCS_READY(xe))
+ return;
+
+ for_each_ccs_rw_ctx(ctx_id) {
+ struct xe_sriov_vf_ccs_ctx *ctx =
+ &xe->sriov.vf.ccs.contexts[ctx_id];
+
+ ccs_rw_update_ring(ctx);
+ }
+}
+
static int register_save_restore_context(struct xe_sriov_vf_ccs_ctx *ctx)
{
int ctx_type;
diff --git a/drivers/gpu/drm/xe/xe_sriov_vf_ccs.h b/drivers/gpu/drm/xe/xe_sriov_vf_ccs.h
index 0745c0ff0228..f8ca6efce9ee 100644
--- a/drivers/gpu/drm/xe/xe_sriov_vf_ccs.h
+++ b/drivers/gpu/drm/xe/xe_sriov_vf_ccs.h
@@ -18,6 +18,7 @@ int xe_sriov_vf_ccs_init(struct xe_device *xe);
int xe_sriov_vf_ccs_attach_bo(struct xe_bo *bo);
int xe_sriov_vf_ccs_detach_bo(struct xe_bo *bo);
int xe_sriov_vf_ccs_register_context(struct xe_device *xe);
+void xe_sriov_vf_ccs_rebase(struct xe_device *xe);
void xe_sriov_vf_ccs_print(struct xe_device *xe, struct drm_printer *p);
static inline bool xe_sriov_vf_ccs_ready(struct xe_device *xe)
--
2.34.1
^ permalink raw reply related [flat|nested] 42+ messages in thread* [PATCH v9 34/34] drm/xe/guc: Increase wait timeout to 2sec after BUSY reply from GuC
2025-10-08 18:04 [PATCH v9 00/34] VF migration redesign Matthew Brost
` (32 preceding siblings ...)
2025-10-08 18:04 ` [PATCH v9 33/34] drm/xe/vf: Rebase CCS save/restore BB GGTT addresses Matthew Brost
@ 2025-10-08 18:05 ` Matthew Brost
2025-10-08 18:28 ` ✗ CI.checkpatch: warning for VF migration redesign (rev9) Patchwork
` (3 subsequent siblings)
37 siblings, 0 replies; 42+ messages in thread
From: Matthew Brost @ 2025-10-08 18:05 UTC (permalink / raw)
To: intel-xe
From: Satyanarayana K V P <satyanarayana.k.v.p@intel.com>
Some VF2GUC actions may take longer to process. Increase default timeout
after received BUSY indication to 2sec to cover all worst case scenarios.
Signed-off-by: Satyanarayana K V P <satyanarayana.k.v.p@intel.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
---
drivers/gpu/drm/xe/xe_guc.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c
index d5adbbb013ec..d94490979adc 100644
--- a/drivers/gpu/drm/xe/xe_guc.c
+++ b/drivers/gpu/drm/xe/xe_guc.c
@@ -1439,7 +1439,7 @@ int xe_guc_mmio_send_recv(struct xe_guc *guc, const u32 *request,
BUILD_BUG_ON((GUC_HXG_TYPE_RESPONSE_SUCCESS ^ GUC_HXG_TYPE_RESPONSE_FAILURE) != 1);
ret = xe_mmio_wait32(mmio, reply_reg, resp_mask, resp_mask,
- 1000000, &header, false);
+ 2000000, &header, false);
if (unlikely(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, header) !=
GUC_HXG_ORIGIN_GUC))
--
2.34.1
^ permalink raw reply related [flat|nested] 42+ messages in thread* ✗ CI.checkpatch: warning for VF migration redesign (rev9)
2025-10-08 18:04 [PATCH v9 00/34] VF migration redesign Matthew Brost
` (33 preceding siblings ...)
2025-10-08 18:05 ` [PATCH v9 34/34] drm/xe/guc: Increase wait timeout to 2sec after BUSY reply from GuC Matthew Brost
@ 2025-10-08 18:28 ` Patchwork
2025-10-08 18:29 ` ✓ CI.KUnit: success " Patchwork
` (2 subsequent siblings)
37 siblings, 0 replies; 42+ messages in thread
From: Patchwork @ 2025-10-08 18:28 UTC (permalink / raw)
To: Matthew Brost; +Cc: intel-xe
== Series Details ==
Series: VF migration redesign (rev9)
URL : https://patchwork.freedesktop.org/series/154627/
State : warning
== Summary ==
+ KERNEL=/kernel
+ git clone https://gitlab.freedesktop.org/drm/maintainer-tools mt
Cloning into 'mt'...
warning: redirecting to https://gitlab.freedesktop.org/drm/maintainer-tools.git/
+ git -C mt rev-list -n1 origin/master
fbd08a78c3a3bb17964db2a326514c69c1dca660
+ cd /kernel
+ git config --global --add safe.directory /kernel
+ git log -n1
commit 87aafbe28147830f0fe5f746b672f57bc898786d
Author: Satyanarayana K V P <satyanarayana.k.v.p@intel.com>
Date: Wed Oct 8 11:05:00 2025 -0700
drm/xe/guc: Increase wait timeout to 2sec after BUSY reply from GuC
Some VF2GUC actions may take longer to process. Increase default timeout
after received BUSY indication to 2sec to cover all worst case scenarios.
Signed-off-by: Satyanarayana K V P <satyanarayana.k.v.p@intel.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
+ /mt/dim checkpatch f929eafc95342ea5377f79705864d50dd325c79f drm-intel
8bb97cfe4b10 drm/xe: Add NULL checks to scratch LRC allocation
961c93afa57a drm/xe: Save off position in ring in which a job was programmed
03f5b84bbe9c drm/xe/guc: Track pending-enable source in submission state
871a281c83c9 drm/xe: Track LR jobs in DRM scheduler pending list
d73ddcea71c1 drm/xe: Return first unsignaled job first pending job helper
1559fe5cb2b5 drm/xe: Don't change LRC ring head on job resubmission
4c6615dd2762 drm/xe: Make LRC W/A scratch buffer usage consistent
16ac06f4c408 drm/xe/vf: Add xe_gt_recovery_pending helper
b5b141b347bb drm/xe/vf: Make VF recovery run on per-GT worker
55bece6b5886 drm/xe/vf: Abort H2G sends during VF post-migration recovery
9ab787a499a8 drm/xe/vf: Remove memory allocations from VF post migration recovery
96b18f3efb67 drm/xe: Move GGTT lock init to alloc
f99d5e676f8a drm/xe/vf: Move LMEM config to tile layer
-:185: WARNING:FILE_PATH_CHANGES: added, moved or deleted file(s), does MAINTAINERS need updating?
#185:
new file mode 100644
total: 0 errors, 1 warnings, 0 checks, 184 lines checked
9ccf9cd1940c drm/xe/vf: Close multi-GT GGTT shift race
759957840acb drm/xe/vf: Teardown VF post migration worker on driver unload
047a6fdd87e4 drm/xe/vf: Don't allow GT reset to be queued during VF post migration recovery
10dfc7949321 drm/xe/vf: Wakeup in GuC backend on VF post migration recovery
22930c664798 drm/xe/vf: Avoid indefinite blocking in preempt rebind worker for VFs supporting migration
256cbbb86198 drm/xe/vf: Use GUC_HXG_TYPE_EVENT for GuC context register
1a00a209d0f5 drm/xe/vf: Flush and stop CTs in VF post migration recovery
1c7b99214ff2 drm/xe/vf: Reset TLB invalidations during VF post migration recovery
5d50f23cc4d2 drm/xe/vf: Kickstart after resfix in VF post migration recovery
b99b40fb28b9 drm/xe: Add CTB_H2G_BUFFER_OFFSET define
5d51ea995af7 drm/xe/vf: Start CTs before resfix VF post migration recovery
c8455ae6f1a0 drm/xe/vf: Abort VF post migration recovery on failure
4d1d20403374 drm/xe/vf: Replay GuC submission state on pause / unpause
c0ee3ef9300a drm/xe: Move queue init before LRC creation
2f5b89531e00 drm/xe/vf: Add debug prints for GuC replaying state during VF recovery
702177d207ec drm/xe/vf: Workaround for race condition in GuC firmware during VF pause
e0aa7170aedc drm/xe: Use PPGTT addresses for TLB invalidation to avoid GGTT fixups
db2d7a8c7c7c drm/xe/vf: Use primary GT ordered work queue on media GT on PTL VF
cb87c61545f2 drm/xe/vf: Ensure media GT VF recovery runs after primary GT on PTL
85eed6d14539 drm/xe/vf: Rebase CCS save/restore BB GGTT addresses
87aafbe28147 drm/xe/guc: Increase wait timeout to 2sec after BUSY reply from GuC
^ permalink raw reply [flat|nested] 42+ messages in thread* ✓ CI.KUnit: success for VF migration redesign (rev9)
2025-10-08 18:04 [PATCH v9 00/34] VF migration redesign Matthew Brost
` (34 preceding siblings ...)
2025-10-08 18:28 ` ✗ CI.checkpatch: warning for VF migration redesign (rev9) Patchwork
@ 2025-10-08 18:29 ` Patchwork
2025-10-08 19:04 ` ✓ Xe.CI.BAT: " Patchwork
2025-10-08 21:40 ` ✗ Xe.CI.Full: failure " Patchwork
37 siblings, 0 replies; 42+ messages in thread
From: Patchwork @ 2025-10-08 18:29 UTC (permalink / raw)
To: Matthew Brost; +Cc: intel-xe
== Series Details ==
Series: VF migration redesign (rev9)
URL : https://patchwork.freedesktop.org/series/154627/
State : success
== Summary ==
+ trap cleanup EXIT
+ /kernel/tools/testing/kunit/kunit.py run --kunitconfig /kernel/drivers/gpu/drm/xe/.kunitconfig
[18:28:17] Configuring KUnit Kernel ...
Generating .config ...
Populating config with:
$ make ARCH=um O=.kunit olddefconfig
[18:28:21] Building KUnit Kernel ...
Populating config with:
$ make ARCH=um O=.kunit olddefconfig
Building with:
$ make all compile_commands.json scripts_gdb ARCH=um O=.kunit --jobs=48
[18:28:50] Starting KUnit Kernel (1/1)...
[18:28:50] ============================================================
Running tests with:
$ .kunit/linux kunit.enable=1 mem=1G console=tty kunit_shutdown=halt
[18:28:51] ================== guc_buf (11 subtests) ===================
[18:28:51] [PASSED] test_smallest
[18:28:51] [PASSED] test_largest
[18:28:51] [PASSED] test_granular
[18:28:51] [PASSED] test_unique
[18:28:51] [PASSED] test_overlap
[18:28:51] [PASSED] test_reusable
[18:28:51] [PASSED] test_too_big
[18:28:51] [PASSED] test_flush
[18:28:51] [PASSED] test_lookup
[18:28:51] [PASSED] test_data
[18:28:51] [PASSED] test_class
[18:28:51] ===================== [PASSED] guc_buf =====================
[18:28:51] =================== guc_dbm (7 subtests) ===================
[18:28:51] [PASSED] test_empty
[18:28:51] [PASSED] test_default
[18:28:51] ======================== test_size ========================
[18:28:51] [PASSED] 4
[18:28:51] [PASSED] 8
[18:28:51] [PASSED] 32
[18:28:51] [PASSED] 256
[18:28:51] ==================== [PASSED] test_size ====================
[18:28:51] ======================= test_reuse ========================
[18:28:51] [PASSED] 4
[18:28:51] [PASSED] 8
[18:28:51] [PASSED] 32
[18:28:51] [PASSED] 256
[18:28:51] =================== [PASSED] test_reuse ====================
[18:28:51] =================== test_range_overlap ====================
[18:28:51] [PASSED] 4
[18:28:51] [PASSED] 8
[18:28:51] [PASSED] 32
[18:28:51] [PASSED] 256
[18:28:51] =============== [PASSED] test_range_overlap ================
[18:28:51] =================== test_range_compact ====================
[18:28:51] [PASSED] 4
[18:28:51] [PASSED] 8
[18:28:51] [PASSED] 32
[18:28:51] [PASSED] 256
[18:28:51] =============== [PASSED] test_range_compact ================
[18:28:51] ==================== test_range_spare =====================
[18:28:51] [PASSED] 4
[18:28:51] [PASSED] 8
[18:28:51] [PASSED] 32
[18:28:51] [PASSED] 256
[18:28:51] ================ [PASSED] test_range_spare =================
[18:28:51] ===================== [PASSED] guc_dbm =====================
[18:28:51] =================== guc_idm (6 subtests) ===================
[18:28:51] [PASSED] bad_init
[18:28:51] [PASSED] no_init
[18:28:51] [PASSED] init_fini
[18:28:51] [PASSED] check_used
[18:28:51] [PASSED] check_quota
[18:28:51] [PASSED] check_all
[18:28:51] ===================== [PASSED] guc_idm =====================
[18:28:51] ================== no_relay (3 subtests) ===================
[18:28:51] [PASSED] xe_drops_guc2pf_if_not_ready
[18:28:51] [PASSED] xe_drops_guc2vf_if_not_ready
[18:28:51] [PASSED] xe_rejects_send_if_not_ready
[18:28:51] ==================== [PASSED] no_relay =====================
[18:28:51] ================== pf_relay (14 subtests) ==================
[18:28:51] [PASSED] pf_rejects_guc2pf_too_short
[18:28:51] [PASSED] pf_rejects_guc2pf_too_long
[18:28:51] [PASSED] pf_rejects_guc2pf_no_payload
[18:28:51] [PASSED] pf_fails_no_payload
[18:28:51] [PASSED] pf_fails_bad_origin
[18:28:51] [PASSED] pf_fails_bad_type
[18:28:51] [PASSED] pf_txn_reports_error
[18:28:51] [PASSED] pf_txn_sends_pf2guc
[18:28:51] [PASSED] pf_sends_pf2guc
[18:28:51] [SKIPPED] pf_loopback_nop
[18:28:51] [SKIPPED] pf_loopback_echo
[18:28:51] [SKIPPED] pf_loopback_fail
[18:28:51] [SKIPPED] pf_loopback_busy
[18:28:51] [SKIPPED] pf_loopback_retry
[18:28:51] ==================== [PASSED] pf_relay =====================
[18:28:51] ================== vf_relay (3 subtests) ===================
[18:28:51] [PASSED] vf_rejects_guc2vf_too_short
[18:28:51] [PASSED] vf_rejects_guc2vf_too_long
[18:28:51] [PASSED] vf_rejects_guc2vf_no_payload
[18:28:51] ==================== [PASSED] vf_relay =====================
[18:28:51] ===================== lmtt (1 subtest) =====================
[18:28:51] ======================== test_ops =========================
[18:28:51] [PASSED] 2-level
[18:28:51] [PASSED] multi-level
[18:28:51] ==================== [PASSED] test_ops =====================
[18:28:51] ====================== [PASSED] lmtt =======================
[18:28:51] ================= pf_service (11 subtests) =================
[18:28:51] [PASSED] pf_negotiate_any
[18:28:51] [PASSED] pf_negotiate_base_match
[18:28:51] [PASSED] pf_negotiate_base_newer
[18:28:51] [PASSED] pf_negotiate_base_next
[18:28:51] [SKIPPED] pf_negotiate_base_older
[18:28:51] [PASSED] pf_negotiate_base_prev
[18:28:51] [PASSED] pf_negotiate_latest_match
[18:28:51] [PASSED] pf_negotiate_latest_newer
[18:28:51] [PASSED] pf_negotiate_latest_next
[18:28:51] [SKIPPED] pf_negotiate_latest_older
[18:28:51] [SKIPPED] pf_negotiate_latest_prev
[18:28:51] =================== [PASSED] pf_service ====================
[18:28:51] ================= xe_guc_g2g (2 subtests) ==================
[18:28:51] ============== xe_live_guc_g2g_kunit_default ==============
[18:28:51] ========= [SKIPPED] xe_live_guc_g2g_kunit_default ==========
[18:28:51] ============== xe_live_guc_g2g_kunit_allmem ===============
[18:28:51] ========== [SKIPPED] xe_live_guc_g2g_kunit_allmem ==========
[18:28:51] =================== [SKIPPED] xe_guc_g2g ===================
[18:28:51] =================== xe_mocs (2 subtests) ===================
[18:28:51] ================ xe_live_mocs_kernel_kunit ================
[18:28:51] =========== [SKIPPED] xe_live_mocs_kernel_kunit ============
[18:28:51] ================ xe_live_mocs_reset_kunit =================
[18:28:51] ============ [SKIPPED] xe_live_mocs_reset_kunit ============
[18:28:51] ==================== [SKIPPED] xe_mocs =====================
[18:28:51] ================= xe_migrate (2 subtests) ==================
[18:28:51] ================= xe_migrate_sanity_kunit =================
[18:28:51] ============ [SKIPPED] xe_migrate_sanity_kunit =============
[18:28:51] ================== xe_validate_ccs_kunit ==================
[18:28:51] ============= [SKIPPED] xe_validate_ccs_kunit ==============
[18:28:51] =================== [SKIPPED] xe_migrate ===================
[18:28:51] ================== xe_dma_buf (1 subtest) ==================
[18:28:51] ==================== xe_dma_buf_kunit =====================
[18:28:51] ================ [SKIPPED] xe_dma_buf_kunit ================
[18:28:51] =================== [SKIPPED] xe_dma_buf ===================
[18:28:51] ================= xe_bo_shrink (1 subtest) =================
[18:28:51] =================== xe_bo_shrink_kunit ====================
[18:28:51] =============== [SKIPPED] xe_bo_shrink_kunit ===============
[18:28:51] ================== [SKIPPED] xe_bo_shrink ==================
[18:28:51] ==================== xe_bo (2 subtests) ====================
[18:28:51] ================== xe_ccs_migrate_kunit ===================
[18:28:51] ============== [SKIPPED] xe_ccs_migrate_kunit ==============
[18:28:51] ==================== xe_bo_evict_kunit ====================
[18:28:51] =============== [SKIPPED] xe_bo_evict_kunit ================
[18:28:51] ===================== [SKIPPED] xe_bo ======================
[18:28:51] ==================== args (11 subtests) ====================
[18:28:51] [PASSED] count_args_test
[18:28:51] [PASSED] call_args_example
[18:28:51] [PASSED] call_args_test
[18:28:51] [PASSED] drop_first_arg_example
[18:28:51] [PASSED] drop_first_arg_test
[18:28:51] [PASSED] first_arg_example
[18:28:51] [PASSED] first_arg_test
[18:28:51] [PASSED] last_arg_example
[18:28:51] [PASSED] last_arg_test
[18:28:51] [PASSED] pick_arg_example
[18:28:51] [PASSED] sep_comma_example
[18:28:51] ====================== [PASSED] args =======================
[18:28:51] =================== xe_pci (3 subtests) ====================
[18:28:51] ==================== check_graphics_ip ====================
[18:28:51] [PASSED] 12.00 Xe_LP
[18:28:51] [PASSED] 12.10 Xe_LP+
[18:28:51] [PASSED] 12.55 Xe_HPG
[18:28:51] [PASSED] 12.60 Xe_HPC
[18:28:51] [PASSED] 12.70 Xe_LPG
[18:28:51] [PASSED] 12.71 Xe_LPG
[18:28:51] [PASSED] 12.74 Xe_LPG+
[18:28:51] [PASSED] 20.01 Xe2_HPG
[18:28:51] [PASSED] 20.02 Xe2_HPG
[18:28:51] [PASSED] 20.04 Xe2_LPG
[18:28:51] [PASSED] 30.00 Xe3_LPG
[18:28:51] [PASSED] 30.01 Xe3_LPG
[18:28:51] [PASSED] 30.03 Xe3_LPG
[18:28:51] ================ [PASSED] check_graphics_ip ================
[18:28:51] ===================== check_media_ip ======================
[18:28:51] [PASSED] 12.00 Xe_M
[18:28:51] [PASSED] 12.55 Xe_HPM
[18:28:51] [PASSED] 13.00 Xe_LPM+
[18:28:51] [PASSED] 13.01 Xe2_HPM
[18:28:51] [PASSED] 20.00 Xe2_LPM
[18:28:51] [PASSED] 30.00 Xe3_LPM
[18:28:51] [PASSED] 30.02 Xe3_LPM
[18:28:51] ================= [PASSED] check_media_ip ==================
[18:28:51] ================= check_platform_gt_count =================
[18:28:51] [PASSED] 0x9A60 (TIGERLAKE)
[18:28:51] [PASSED] 0x9A68 (TIGERLAKE)
[18:28:51] [PASSED] 0x9A70 (TIGERLAKE)
[18:28:51] [PASSED] 0x9A40 (TIGERLAKE)
[18:28:51] [PASSED] 0x9A49 (TIGERLAKE)
[18:28:51] [PASSED] 0x9A59 (TIGERLAKE)
[18:28:51] [PASSED] 0x9A78 (TIGERLAKE)
[18:28:51] [PASSED] 0x9AC0 (TIGERLAKE)
[18:28:51] [PASSED] 0x9AC9 (TIGERLAKE)
[18:28:51] [PASSED] 0x9AD9 (TIGERLAKE)
[18:28:51] [PASSED] 0x9AF8 (TIGERLAKE)
[18:28:51] [PASSED] 0x4C80 (ROCKETLAKE)
[18:28:51] [PASSED] 0x4C8A (ROCKETLAKE)
[18:28:51] [PASSED] 0x4C8B (ROCKETLAKE)
[18:28:51] [PASSED] 0x4C8C (ROCKETLAKE)
[18:28:51] [PASSED] 0x4C90 (ROCKETLAKE)
[18:28:51] [PASSED] 0x4C9A (ROCKETLAKE)
[18:28:51] [PASSED] 0x4680 (ALDERLAKE_S)
[18:28:51] [PASSED] 0x4682 (ALDERLAKE_S)
[18:28:51] [PASSED] 0x4688 (ALDERLAKE_S)
[18:28:51] [PASSED] 0x468A (ALDERLAKE_S)
[18:28:51] [PASSED] 0x468B (ALDERLAKE_S)
[18:28:51] [PASSED] 0x4690 (ALDERLAKE_S)
[18:28:51] [PASSED] 0x4692 (ALDERLAKE_S)
[18:28:51] [PASSED] 0x4693 (ALDERLAKE_S)
[18:28:51] [PASSED] 0x46A0 (ALDERLAKE_P)
[18:28:51] [PASSED] 0x46A1 (ALDERLAKE_P)
[18:28:51] [PASSED] 0x46A2 (ALDERLAKE_P)
[18:28:51] [PASSED] 0x46A3 (ALDERLAKE_P)
[18:28:51] [PASSED] 0x46A6 (ALDERLAKE_P)
[18:28:51] [PASSED] 0x46A8 (ALDERLAKE_P)
[18:28:51] [PASSED] 0x46AA (ALDERLAKE_P)
[18:28:51] [PASSED] 0x462A (ALDERLAKE_P)
[18:28:51] [PASSED] 0x4626 (ALDERLAKE_P)
[18:28:51] [PASSED] 0x4628 (ALDERLAKE_P)
[18:28:51] [PASSED] 0x46B0 (ALDERLAKE_P)
[18:28:51] [PASSED] 0x46B1 (ALDERLAKE_P)
[18:28:51] [PASSED] 0x46B2 (ALDERLAKE_P)
[18:28:51] [PASSED] 0x46B3 (ALDERLAKE_P)
[18:28:51] [PASSED] 0x46C0 (ALDERLAKE_P)
[18:28:51] [PASSED] 0x46C1 (ALDERLAKE_P)
[18:28:51] [PASSED] 0x46C2 (ALDERLAKE_P)
[18:28:51] [PASSED] 0x46C3 (ALDERLAKE_P)
[18:28:51] [PASSED] 0x46D0 (ALDERLAKE_N)
[18:28:51] [PASSED] 0x46D1 (ALDERLAKE_N)
[18:28:51] [PASSED] 0x46D2 (ALDERLAKE_N)
[18:28:51] [PASSED] 0x46D3 (ALDERLAKE_N)
[18:28:51] [PASSED] 0x46D4 (ALDERLAKE_N)
[18:28:51] [PASSED] 0xA721 (ALDERLAKE_P)
[18:28:51] [PASSED] 0xA7A1 (ALDERLAKE_P)
[18:28:51] [PASSED] 0xA7A9 (ALDERLAKE_P)
[18:28:51] [PASSED] 0xA7AC (ALDERLAKE_P)
[18:28:51] [PASSED] 0xA7AD (ALDERLAKE_P)
[18:28:51] [PASSED] 0xA720 (ALDERLAKE_P)
[18:28:51] [PASSED] 0xA7A0 (ALDERLAKE_P)
[18:28:51] [PASSED] 0xA7A8 (ALDERLAKE_P)
[18:28:51] [PASSED] 0xA7AA (ALDERLAKE_P)
[18:28:51] [PASSED] 0xA7AB (ALDERLAKE_P)
[18:28:51] [PASSED] 0xA780 (ALDERLAKE_S)
[18:28:51] [PASSED] 0xA781 (ALDERLAKE_S)
[18:28:51] [PASSED] 0xA782 (ALDERLAKE_S)
[18:28:51] [PASSED] 0xA783 (ALDERLAKE_S)
[18:28:51] [PASSED] 0xA788 (ALDERLAKE_S)
[18:28:51] [PASSED] 0xA789 (ALDERLAKE_S)
[18:28:51] [PASSED] 0xA78A (ALDERLAKE_S)
[18:28:51] [PASSED] 0xA78B (ALDERLAKE_S)
[18:28:51] [PASSED] 0x4905 (DG1)
[18:28:51] [PASSED] 0x4906 (DG1)
[18:28:51] [PASSED] 0x4907 (DG1)
[18:28:51] [PASSED] 0x4908 (DG1)
[18:28:51] [PASSED] 0x4909 (DG1)
[18:28:51] [PASSED] 0x56C0 (DG2)
[18:28:51] [PASSED] 0x56C2 (DG2)
[18:28:51] [PASSED] 0x56C1 (DG2)
[18:28:51] [PASSED] 0x7D51 (METEORLAKE)
[18:28:51] [PASSED] 0x7DD1 (METEORLAKE)
[18:28:51] [PASSED] 0x7D41 (METEORLAKE)
[18:28:51] [PASSED] 0x7D67 (METEORLAKE)
[18:28:51] [PASSED] 0xB640 (METEORLAKE)
[18:28:51] [PASSED] 0x56A0 (DG2)
[18:28:51] [PASSED] 0x56A1 (DG2)
[18:28:51] [PASSED] 0x56A2 (DG2)
[18:28:51] [PASSED] 0x56BE (DG2)
[18:28:51] [PASSED] 0x56BF (DG2)
[18:28:51] [PASSED] 0x5690 (DG2)
[18:28:51] [PASSED] 0x5691 (DG2)
[18:28:51] [PASSED] 0x5692 (DG2)
[18:28:51] [PASSED] 0x56A5 (DG2)
[18:28:51] [PASSED] 0x56A6 (DG2)
[18:28:51] [PASSED] 0x56B0 (DG2)
[18:28:51] [PASSED] 0x56B1 (DG2)
[18:28:51] [PASSED] 0x56BA (DG2)
[18:28:51] [PASSED] 0x56BB (DG2)
[18:28:51] [PASSED] 0x56BC (DG2)
[18:28:51] [PASSED] 0x56BD (DG2)
[18:28:51] [PASSED] 0x5693 (DG2)
[18:28:51] [PASSED] 0x5694 (DG2)
[18:28:51] [PASSED] 0x5695 (DG2)
[18:28:51] [PASSED] 0x56A3 (DG2)
[18:28:51] [PASSED] 0x56A4 (DG2)
[18:28:51] [PASSED] 0x56B2 (DG2)
[18:28:51] [PASSED] 0x56B3 (DG2)
[18:28:51] [PASSED] 0x5696 (DG2)
[18:28:51] [PASSED] 0x5697 (DG2)
[18:28:51] [PASSED] 0xB69 (PVC)
[18:28:51] [PASSED] 0xB6E (PVC)
[18:28:51] [PASSED] 0xBD4 (PVC)
[18:28:51] [PASSED] 0xBD5 (PVC)
[18:28:51] [PASSED] 0xBD6 (PVC)
[18:28:51] [PASSED] 0xBD7 (PVC)
[18:28:51] [PASSED] 0xBD8 (PVC)
[18:28:51] [PASSED] 0xBD9 (PVC)
[18:28:51] [PASSED] 0xBDA (PVC)
[18:28:51] [PASSED] 0xBDB (PVC)
[18:28:51] [PASSED] 0xBE0 (PVC)
[18:28:51] [PASSED] 0xBE1 (PVC)
[18:28:51] [PASSED] 0xBE5 (PVC)
[18:28:51] [PASSED] 0x7D40 (METEORLAKE)
[18:28:51] [PASSED] 0x7D45 (METEORLAKE)
[18:28:51] [PASSED] 0x7D55 (METEORLAKE)
[18:28:51] [PASSED] 0x7D60 (METEORLAKE)
[18:28:51] [PASSED] 0x7DD5 (METEORLAKE)
[18:28:51] [PASSED] 0x6420 (LUNARLAKE)
[18:28:51] [PASSED] 0x64A0 (LUNARLAKE)
[18:28:51] [PASSED] 0x64B0 (LUNARLAKE)
[18:28:51] [PASSED] 0xE202 (BATTLEMAGE)
[18:28:51] [PASSED] 0xE209 (BATTLEMAGE)
[18:28:51] [PASSED] 0xE20B (BATTLEMAGE)
[18:28:51] [PASSED] 0xE20C (BATTLEMAGE)
[18:28:51] [PASSED] 0xE20D (BATTLEMAGE)
[18:28:51] [PASSED] 0xE210 (BATTLEMAGE)
[18:28:51] [PASSED] 0xE211 (BATTLEMAGE)
[18:28:51] [PASSED] 0xE212 (BATTLEMAGE)
[18:28:51] [PASSED] 0xE216 (BATTLEMAGE)
[18:28:51] [PASSED] 0xE220 (BATTLEMAGE)
[18:28:51] [PASSED] 0xE221 (BATTLEMAGE)
[18:28:51] [PASSED] 0xE222 (BATTLEMAGE)
[18:28:51] [PASSED] 0xE223 (BATTLEMAGE)
[18:28:51] [PASSED] 0xB080 (PANTHERLAKE)
[18:28:51] [PASSED] 0xB081 (PANTHERLAKE)
[18:28:51] [PASSED] 0xB082 (PANTHERLAKE)
[18:28:51] [PASSED] 0xB083 (PANTHERLAKE)
[18:28:51] [PASSED] 0xB084 (PANTHERLAKE)
[18:28:51] [PASSED] 0xB085 (PANTHERLAKE)
[18:28:51] [PASSED] 0xB086 (PANTHERLAKE)
[18:28:51] [PASSED] 0xB087 (PANTHERLAKE)
[18:28:51] [PASSED] 0xB08F (PANTHERLAKE)
[18:28:51] [PASSED] 0xB090 (PANTHERLAKE)
[18:28:51] [PASSED] 0xB0A0 (PANTHERLAKE)
[18:28:51] [PASSED] 0xB0B0 (PANTHERLAKE)
[18:28:51] [PASSED] 0xFD80 (PANTHERLAKE)
[18:28:51] [PASSED] 0xFD81 (PANTHERLAKE)
[18:28:51] ============= [PASSED] check_platform_gt_count =============
[18:28:51] ===================== [PASSED] xe_pci ======================
[18:28:51] =================== xe_rtp (2 subtests) ====================
[18:28:51] =============== xe_rtp_process_to_sr_tests ================
[18:28:51] [PASSED] coalesce-same-reg
[18:28:51] [PASSED] no-match-no-add
[18:28:51] [PASSED] match-or
[18:28:51] [PASSED] match-or-xfail
[18:28:51] [PASSED] no-match-no-add-multiple-rules
[18:28:51] [PASSED] two-regs-two-entries
[18:28:51] [PASSED] clr-one-set-other
[18:28:51] [PASSED] set-field
[18:28:51] [PASSED] conflict-duplicate
[18:28:51] [PASSED] conflict-not-disjoint
[18:28:51] [PASSED] conflict-reg-type
[18:28:51] =========== [PASSED] xe_rtp_process_to_sr_tests ============
[18:28:51] ================== xe_rtp_process_tests ===================
[18:28:51] [PASSED] active1
[18:28:51] [PASSED] active2
[18:28:51] [PASSED] active-inactive
[18:28:51] [PASSED] inactive-active
[18:28:51] [PASSED] inactive-1st_or_active-inactive
[18:28:51] [PASSED] inactive-2nd_or_active-inactive
[18:28:51] [PASSED] inactive-last_or_active-inactive
[18:28:51] [PASSED] inactive-no_or_active-inactive
[18:28:51] ============== [PASSED] xe_rtp_process_tests ===============
[18:28:51] ===================== [PASSED] xe_rtp ======================
[18:28:51] ==================== xe_wa (1 subtest) =====================
[18:28:51] ======================== xe_wa_gt =========================
[18:28:51] [PASSED] TIGERLAKE B0
[18:28:51] [PASSED] DG1 A0
[18:28:51] [PASSED] DG1 B0
[18:28:51] [PASSED] ALDERLAKE_S A0
[18:28:51] [PASSED] ALDERLAKE_S B0
stty: 'standard input': Inappropriate ioctl for device
[18:28:51] [PASSED] ALDERLAKE_S C0
[18:28:51] [PASSED] ALDERLAKE_S D0
[18:28:51] [PASSED] ALDERLAKE_P A0
[18:28:51] [PASSED] ALDERLAKE_P B0
[18:28:51] [PASSED] ALDERLAKE_P C0
[18:28:51] [PASSED] ALDERLAKE_S RPLS D0
[18:28:51] [PASSED] ALDERLAKE_P RPLU E0
[18:28:51] [PASSED] DG2 G10 C0
[18:28:51] [PASSED] DG2 G11 B1
[18:28:51] [PASSED] DG2 G12 A1
[18:28:51] [PASSED] METEORLAKE 12.70(Xe_LPG) A0 13.00(Xe_LPM+) A0
[18:28:51] [PASSED] METEORLAKE 12.71(Xe_LPG) A0 13.00(Xe_LPM+) A0
[18:28:51] [PASSED] METEORLAKE 12.74(Xe_LPG+) A0 13.00(Xe_LPM+) A0
[18:28:51] [PASSED] LUNARLAKE 20.04(Xe2_LPG) A0 20.00(Xe2_LPM) A0
[18:28:51] [PASSED] LUNARLAKE 20.04(Xe2_LPG) B0 20.00(Xe2_LPM) A0
[18:28:51] [PASSED] BATTLEMAGE 20.01(Xe2_HPG) A0 13.01(Xe2_HPM) A1
[18:28:51] [PASSED] PANTHERLAKE 30.00(Xe3_LPG) A0 30.00(Xe3_LPM) A0
[18:28:51] ==================== [PASSED] xe_wa_gt =====================
[18:28:51] ====================== [PASSED] xe_wa ======================
[18:28:51] ============================================================
[18:28:51] Testing complete. Ran 306 tests: passed: 288, skipped: 18
[18:28:51] Elapsed time: 33.756s total, 4.282s configuring, 29.109s building, 0.320s running
+ /kernel/tools/testing/kunit/kunit.py run --kunitconfig /kernel/drivers/gpu/drm/tests/.kunitconfig
[18:28:51] Configuring KUnit Kernel ...
Regenerating .config ...
Populating config with:
$ make ARCH=um O=.kunit olddefconfig
[18:28:52] Building KUnit Kernel ...
Populating config with:
$ make ARCH=um O=.kunit olddefconfig
Building with:
$ make all compile_commands.json scripts_gdb ARCH=um O=.kunit --jobs=48
[18:29:16] Starting KUnit Kernel (1/1)...
[18:29:16] ============================================================
Running tests with:
$ .kunit/linux kunit.enable=1 mem=1G console=tty kunit_shutdown=halt
[18:29:16] ============ drm_test_pick_cmdline (2 subtests) ============
[18:29:16] [PASSED] drm_test_pick_cmdline_res_1920_1080_60
[18:29:16] =============== drm_test_pick_cmdline_named ===============
[18:29:16] [PASSED] NTSC
[18:29:16] [PASSED] NTSC-J
[18:29:16] [PASSED] PAL
[18:29:16] [PASSED] PAL-M
[18:29:16] =========== [PASSED] drm_test_pick_cmdline_named ===========
[18:29:16] ============== [PASSED] drm_test_pick_cmdline ==============
[18:29:16] == drm_test_atomic_get_connector_for_encoder (1 subtest) ===
[18:29:16] [PASSED] drm_test_drm_atomic_get_connector_for_encoder
[18:29:16] ==== [PASSED] drm_test_atomic_get_connector_for_encoder ====
[18:29:16] =========== drm_validate_clone_mode (2 subtests) ===========
[18:29:16] ============== drm_test_check_in_clone_mode ===============
[18:29:16] [PASSED] in_clone_mode
[18:29:16] [PASSED] not_in_clone_mode
[18:29:16] ========== [PASSED] drm_test_check_in_clone_mode ===========
[18:29:16] =============== drm_test_check_valid_clones ===============
[18:29:16] [PASSED] not_in_clone_mode
[18:29:16] [PASSED] valid_clone
[18:29:16] [PASSED] invalid_clone
[18:29:16] =========== [PASSED] drm_test_check_valid_clones ===========
[18:29:16] ============= [PASSED] drm_validate_clone_mode =============
[18:29:16] ============= drm_validate_modeset (1 subtest) =============
[18:29:16] [PASSED] drm_test_check_connector_changed_modeset
[18:29:16] ============== [PASSED] drm_validate_modeset ===============
[18:29:16] ====== drm_test_bridge_get_current_state (2 subtests) ======
[18:29:16] [PASSED] drm_test_drm_bridge_get_current_state_atomic
[18:29:16] [PASSED] drm_test_drm_bridge_get_current_state_legacy
[18:29:16] ======== [PASSED] drm_test_bridge_get_current_state ========
[18:29:16] ====== drm_test_bridge_helper_reset_crtc (3 subtests) ======
[18:29:16] [PASSED] drm_test_drm_bridge_helper_reset_crtc_atomic
[18:29:16] [PASSED] drm_test_drm_bridge_helper_reset_crtc_atomic_disabled
[18:29:16] [PASSED] drm_test_drm_bridge_helper_reset_crtc_legacy
[18:29:16] ======== [PASSED] drm_test_bridge_helper_reset_crtc ========
[18:29:16] ============== drm_bridge_alloc (2 subtests) ===============
[18:29:16] [PASSED] drm_test_drm_bridge_alloc_basic
[18:29:16] [PASSED] drm_test_drm_bridge_alloc_get_put
[18:29:16] ================ [PASSED] drm_bridge_alloc =================
[18:29:16] ================== drm_buddy (8 subtests) ==================
[18:29:16] [PASSED] drm_test_buddy_alloc_limit
[18:29:16] [PASSED] drm_test_buddy_alloc_optimistic
[18:29:16] [PASSED] drm_test_buddy_alloc_pessimistic
[18:29:16] [PASSED] drm_test_buddy_alloc_pathological
[18:29:16] [PASSED] drm_test_buddy_alloc_contiguous
[18:29:16] [PASSED] drm_test_buddy_alloc_clear
[18:29:16] [PASSED] drm_test_buddy_alloc_range_bias
[18:29:16] [PASSED] drm_test_buddy_fragmentation_performance
[18:29:16] ==================== [PASSED] drm_buddy ====================
[18:29:16] ============= drm_cmdline_parser (40 subtests) =============
[18:29:16] [PASSED] drm_test_cmdline_force_d_only
[18:29:16] [PASSED] drm_test_cmdline_force_D_only_dvi
[18:29:16] [PASSED] drm_test_cmdline_force_D_only_hdmi
[18:29:16] [PASSED] drm_test_cmdline_force_D_only_not_digital
[18:29:16] [PASSED] drm_test_cmdline_force_e_only
[18:29:16] [PASSED] drm_test_cmdline_res
[18:29:16] [PASSED] drm_test_cmdline_res_vesa
[18:29:16] [PASSED] drm_test_cmdline_res_vesa_rblank
[18:29:16] [PASSED] drm_test_cmdline_res_rblank
[18:29:16] [PASSED] drm_test_cmdline_res_bpp
[18:29:16] [PASSED] drm_test_cmdline_res_refresh
[18:29:16] [PASSED] drm_test_cmdline_res_bpp_refresh
[18:29:16] [PASSED] drm_test_cmdline_res_bpp_refresh_interlaced
[18:29:16] [PASSED] drm_test_cmdline_res_bpp_refresh_margins
[18:29:16] [PASSED] drm_test_cmdline_res_bpp_refresh_force_off
[18:29:16] [PASSED] drm_test_cmdline_res_bpp_refresh_force_on
[18:29:16] [PASSED] drm_test_cmdline_res_bpp_refresh_force_on_analog
[18:29:16] [PASSED] drm_test_cmdline_res_bpp_refresh_force_on_digital
[18:29:16] [PASSED] drm_test_cmdline_res_bpp_refresh_interlaced_margins_force_on
[18:29:16] [PASSED] drm_test_cmdline_res_margins_force_on
[18:29:16] [PASSED] drm_test_cmdline_res_vesa_margins
[18:29:16] [PASSED] drm_test_cmdline_name
[18:29:16] [PASSED] drm_test_cmdline_name_bpp
[18:29:16] [PASSED] drm_test_cmdline_name_option
[18:29:16] [PASSED] drm_test_cmdline_name_bpp_option
[18:29:16] [PASSED] drm_test_cmdline_rotate_0
[18:29:16] [PASSED] drm_test_cmdline_rotate_90
[18:29:16] [PASSED] drm_test_cmdline_rotate_180
[18:29:16] [PASSED] drm_test_cmdline_rotate_270
[18:29:16] [PASSED] drm_test_cmdline_hmirror
[18:29:16] [PASSED] drm_test_cmdline_vmirror
[18:29:16] [PASSED] drm_test_cmdline_margin_options
[18:29:16] [PASSED] drm_test_cmdline_multiple_options
[18:29:16] [PASSED] drm_test_cmdline_bpp_extra_and_option
[18:29:16] [PASSED] drm_test_cmdline_extra_and_option
[18:29:16] [PASSED] drm_test_cmdline_freestanding_options
[18:29:16] [PASSED] drm_test_cmdline_freestanding_force_e_and_options
[18:29:16] [PASSED] drm_test_cmdline_panel_orientation
[18:29:16] ================ drm_test_cmdline_invalid =================
[18:29:16] [PASSED] margin_only
[18:29:16] [PASSED] interlace_only
[18:29:16] [PASSED] res_missing_x
[18:29:16] [PASSED] res_missing_y
[18:29:16] [PASSED] res_bad_y
[18:29:16] [PASSED] res_missing_y_bpp
[18:29:16] [PASSED] res_bad_bpp
[18:29:16] [PASSED] res_bad_refresh
[18:29:16] [PASSED] res_bpp_refresh_force_on_off
[18:29:16] [PASSED] res_invalid_mode
[18:29:16] [PASSED] res_bpp_wrong_place_mode
[18:29:16] [PASSED] name_bpp_refresh
[18:29:16] [PASSED] name_refresh
[18:29:16] [PASSED] name_refresh_wrong_mode
[18:29:16] [PASSED] name_refresh_invalid_mode
[18:29:16] [PASSED] rotate_multiple
[18:29:16] [PASSED] rotate_invalid_val
[18:29:16] [PASSED] rotate_truncated
[18:29:16] [PASSED] invalid_option
[18:29:16] [PASSED] invalid_tv_option
[18:29:16] [PASSED] truncated_tv_option
[18:29:16] ============ [PASSED] drm_test_cmdline_invalid =============
[18:29:16] =============== drm_test_cmdline_tv_options ===============
[18:29:16] [PASSED] NTSC
[18:29:16] [PASSED] NTSC_443
[18:29:16] [PASSED] NTSC_J
[18:29:16] [PASSED] PAL
[18:29:16] [PASSED] PAL_M
[18:29:16] [PASSED] PAL_N
[18:29:16] [PASSED] SECAM
[18:29:16] [PASSED] MONO_525
[18:29:16] [PASSED] MONO_625
[18:29:16] =========== [PASSED] drm_test_cmdline_tv_options ===========
[18:29:16] =============== [PASSED] drm_cmdline_parser ================
[18:29:16] ========== drmm_connector_hdmi_init (20 subtests) ==========
[18:29:16] [PASSED] drm_test_connector_hdmi_init_valid
[18:29:16] [PASSED] drm_test_connector_hdmi_init_bpc_8
[18:29:16] [PASSED] drm_test_connector_hdmi_init_bpc_10
[18:29:16] [PASSED] drm_test_connector_hdmi_init_bpc_12
[18:29:16] [PASSED] drm_test_connector_hdmi_init_bpc_invalid
[18:29:16] [PASSED] drm_test_connector_hdmi_init_bpc_null
[18:29:16] [PASSED] drm_test_connector_hdmi_init_formats_empty
[18:29:16] [PASSED] drm_test_connector_hdmi_init_formats_no_rgb
[18:29:16] === drm_test_connector_hdmi_init_formats_yuv420_allowed ===
[18:29:16] [PASSED] supported_formats=0x9 yuv420_allowed=1
[18:29:16] [PASSED] supported_formats=0x9 yuv420_allowed=0
[18:29:16] [PASSED] supported_formats=0x3 yuv420_allowed=1
[18:29:16] [PASSED] supported_formats=0x3 yuv420_allowed=0
[18:29:16] === [PASSED] drm_test_connector_hdmi_init_formats_yuv420_allowed ===
[18:29:16] [PASSED] drm_test_connector_hdmi_init_null_ddc
[18:29:16] [PASSED] drm_test_connector_hdmi_init_null_product
[18:29:16] [PASSED] drm_test_connector_hdmi_init_null_vendor
[18:29:16] [PASSED] drm_test_connector_hdmi_init_product_length_exact
[18:29:16] [PASSED] drm_test_connector_hdmi_init_product_length_too_long
[18:29:16] [PASSED] drm_test_connector_hdmi_init_product_valid
[18:29:16] [PASSED] drm_test_connector_hdmi_init_vendor_length_exact
[18:29:16] [PASSED] drm_test_connector_hdmi_init_vendor_length_too_long
[18:29:16] [PASSED] drm_test_connector_hdmi_init_vendor_valid
[18:29:16] ========= drm_test_connector_hdmi_init_type_valid =========
[18:29:16] [PASSED] HDMI-A
[18:29:16] [PASSED] HDMI-B
[18:29:16] ===== [PASSED] drm_test_connector_hdmi_init_type_valid =====
[18:29:16] ======== drm_test_connector_hdmi_init_type_invalid ========
[18:29:16] [PASSED] Unknown
[18:29:16] [PASSED] VGA
[18:29:16] [PASSED] DVI-I
[18:29:16] [PASSED] DVI-D
[18:29:16] [PASSED] DVI-A
[18:29:16] [PASSED] Composite
[18:29:16] [PASSED] SVIDEO
[18:29:16] [PASSED] LVDS
[18:29:16] [PASSED] Component
[18:29:16] [PASSED] DIN
[18:29:16] [PASSED] DP
[18:29:16] [PASSED] TV
[18:29:16] [PASSED] eDP
[18:29:16] [PASSED] Virtual
[18:29:16] [PASSED] DSI
[18:29:16] [PASSED] DPI
[18:29:16] [PASSED] Writeback
[18:29:16] [PASSED] SPI
[18:29:16] [PASSED] USB
[18:29:16] ==== [PASSED] drm_test_connector_hdmi_init_type_invalid ====
[18:29:16] ============ [PASSED] drmm_connector_hdmi_init =============
[18:29:16] ============= drmm_connector_init (3 subtests) =============
[18:29:16] [PASSED] drm_test_drmm_connector_init
[18:29:16] [PASSED] drm_test_drmm_connector_init_null_ddc
[18:29:16] ========= drm_test_drmm_connector_init_type_valid =========
[18:29:16] [PASSED] Unknown
[18:29:16] [PASSED] VGA
[18:29:16] [PASSED] DVI-I
[18:29:16] [PASSED] DVI-D
[18:29:16] [PASSED] DVI-A
[18:29:16] [PASSED] Composite
[18:29:16] [PASSED] SVIDEO
[18:29:16] [PASSED] LVDS
[18:29:16] [PASSED] Component
[18:29:16] [PASSED] DIN
[18:29:16] [PASSED] DP
[18:29:16] [PASSED] HDMI-A
[18:29:16] [PASSED] HDMI-B
[18:29:16] [PASSED] TV
[18:29:16] [PASSED] eDP
[18:29:16] [PASSED] Virtual
[18:29:16] [PASSED] DSI
[18:29:16] [PASSED] DPI
[18:29:16] [PASSED] Writeback
[18:29:16] [PASSED] SPI
[18:29:16] [PASSED] USB
[18:29:16] ===== [PASSED] drm_test_drmm_connector_init_type_valid =====
[18:29:16] =============== [PASSED] drmm_connector_init ===============
[18:29:16] ========= drm_connector_dynamic_init (6 subtests) ==========
[18:29:16] [PASSED] drm_test_drm_connector_dynamic_init
[18:29:16] [PASSED] drm_test_drm_connector_dynamic_init_null_ddc
[18:29:16] [PASSED] drm_test_drm_connector_dynamic_init_not_added
[18:29:16] [PASSED] drm_test_drm_connector_dynamic_init_properties
[18:29:16] ===== drm_test_drm_connector_dynamic_init_type_valid ======
[18:29:16] [PASSED] Unknown
[18:29:16] [PASSED] VGA
[18:29:16] [PASSED] DVI-I
[18:29:16] [PASSED] DVI-D
[18:29:16] [PASSED] DVI-A
[18:29:16] [PASSED] Composite
[18:29:16] [PASSED] SVIDEO
[18:29:16] [PASSED] LVDS
[18:29:16] [PASSED] Component
[18:29:16] [PASSED] DIN
[18:29:16] [PASSED] DP
[18:29:16] [PASSED] HDMI-A
[18:29:16] [PASSED] HDMI-B
[18:29:16] [PASSED] TV
[18:29:16] [PASSED] eDP
[18:29:16] [PASSED] Virtual
[18:29:16] [PASSED] DSI
[18:29:16] [PASSED] DPI
[18:29:16] [PASSED] Writeback
[18:29:16] [PASSED] SPI
[18:29:16] [PASSED] USB
[18:29:16] = [PASSED] drm_test_drm_connector_dynamic_init_type_valid ==
[18:29:16] ======== drm_test_drm_connector_dynamic_init_name =========
[18:29:16] [PASSED] Unknown
[18:29:16] [PASSED] VGA
[18:29:16] [PASSED] DVI-I
[18:29:16] [PASSED] DVI-D
[18:29:16] [PASSED] DVI-A
[18:29:16] [PASSED] Composite
[18:29:16] [PASSED] SVIDEO
[18:29:16] [PASSED] LVDS
[18:29:16] [PASSED] Component
[18:29:16] [PASSED] DIN
[18:29:16] [PASSED] DP
[18:29:16] [PASSED] HDMI-A
[18:29:16] [PASSED] HDMI-B
[18:29:16] [PASSED] TV
[18:29:16] [PASSED] eDP
[18:29:16] [PASSED] Virtual
[18:29:16] [PASSED] DSI
[18:29:16] [PASSED] DPI
[18:29:16] [PASSED] Writeback
[18:29:16] [PASSED] SPI
[18:29:16] [PASSED] USB
[18:29:16] ==== [PASSED] drm_test_drm_connector_dynamic_init_name =====
[18:29:16] =========== [PASSED] drm_connector_dynamic_init ============
[18:29:16] ==== drm_connector_dynamic_register_early (4 subtests) =====
[18:29:16] [PASSED] drm_test_drm_connector_dynamic_register_early_on_list
[18:29:16] [PASSED] drm_test_drm_connector_dynamic_register_early_defer
[18:29:16] [PASSED] drm_test_drm_connector_dynamic_register_early_no_init
[18:29:16] [PASSED] drm_test_drm_connector_dynamic_register_early_no_mode_object
[18:29:16] ====== [PASSED] drm_connector_dynamic_register_early =======
[18:29:16] ======= drm_connector_dynamic_register (7 subtests) ========
[18:29:16] [PASSED] drm_test_drm_connector_dynamic_register_on_list
[18:29:16] [PASSED] drm_test_drm_connector_dynamic_register_no_defer
[18:29:16] [PASSED] drm_test_drm_connector_dynamic_register_no_init
[18:29:16] [PASSED] drm_test_drm_connector_dynamic_register_mode_object
[18:29:16] [PASSED] drm_test_drm_connector_dynamic_register_sysfs
[18:29:16] [PASSED] drm_test_drm_connector_dynamic_register_sysfs_name
[18:29:16] [PASSED] drm_test_drm_connector_dynamic_register_debugfs
[18:29:16] ========= [PASSED] drm_connector_dynamic_register ==========
[18:29:16] = drm_connector_attach_broadcast_rgb_property (2 subtests) =
[18:29:16] [PASSED] drm_test_drm_connector_attach_broadcast_rgb_property
[18:29:16] [PASSED] drm_test_drm_connector_attach_broadcast_rgb_property_hdmi_connector
[18:29:16] === [PASSED] drm_connector_attach_broadcast_rgb_property ===
[18:29:16] ========== drm_get_tv_mode_from_name (2 subtests) ==========
[18:29:16] ========== drm_test_get_tv_mode_from_name_valid ===========
[18:29:16] [PASSED] NTSC
[18:29:16] [PASSED] NTSC-443
[18:29:16] [PASSED] NTSC-J
[18:29:16] [PASSED] PAL
[18:29:16] [PASSED] PAL-M
[18:29:16] [PASSED] PAL-N
[18:29:16] [PASSED] SECAM
[18:29:16] [PASSED] Mono
[18:29:16] ====== [PASSED] drm_test_get_tv_mode_from_name_valid =======
[18:29:16] [PASSED] drm_test_get_tv_mode_from_name_truncated
[18:29:16] ============ [PASSED] drm_get_tv_mode_from_name ============
[18:29:16] = drm_test_connector_hdmi_compute_mode_clock (12 subtests) =
[18:29:16] [PASSED] drm_test_drm_hdmi_compute_mode_clock_rgb
[18:29:16] [PASSED] drm_test_drm_hdmi_compute_mode_clock_rgb_10bpc
[18:29:16] [PASSED] drm_test_drm_hdmi_compute_mode_clock_rgb_10bpc_vic_1
[18:29:16] [PASSED] drm_test_drm_hdmi_compute_mode_clock_rgb_12bpc
[18:29:16] [PASSED] drm_test_drm_hdmi_compute_mode_clock_rgb_12bpc_vic_1
[18:29:16] [PASSED] drm_test_drm_hdmi_compute_mode_clock_rgb_double
[18:29:16] = drm_test_connector_hdmi_compute_mode_clock_yuv420_valid =
[18:29:16] [PASSED] VIC 96
[18:29:16] [PASSED] VIC 97
[18:29:16] [PASSED] VIC 101
[18:29:16] [PASSED] VIC 102
[18:29:16] [PASSED] VIC 106
[18:29:16] [PASSED] VIC 107
[18:29:16] === [PASSED] drm_test_connector_hdmi_compute_mode_clock_yuv420_valid ===
[18:29:16] [PASSED] drm_test_connector_hdmi_compute_mode_clock_yuv420_10_bpc
[18:29:16] [PASSED] drm_test_connector_hdmi_compute_mode_clock_yuv420_12_bpc
[18:29:16] [PASSED] drm_test_connector_hdmi_compute_mode_clock_yuv422_8_bpc
[18:29:16] [PASSED] drm_test_connector_hdmi_compute_mode_clock_yuv422_10_bpc
[18:29:16] [PASSED] drm_test_connector_hdmi_compute_mode_clock_yuv422_12_bpc
[18:29:16] === [PASSED] drm_test_connector_hdmi_compute_mode_clock ====
[18:29:16] == drm_hdmi_connector_get_broadcast_rgb_name (2 subtests) ==
[18:29:16] === drm_test_drm_hdmi_connector_get_broadcast_rgb_name ====
[18:29:16] [PASSED] Automatic
[18:29:16] [PASSED] Full
[18:29:16] [PASSED] Limited 16:235
[18:29:16] === [PASSED] drm_test_drm_hdmi_connector_get_broadcast_rgb_name ===
[18:29:16] [PASSED] drm_test_drm_hdmi_connector_get_broadcast_rgb_name_invalid
[18:29:16] ==== [PASSED] drm_hdmi_connector_get_broadcast_rgb_name ====
[18:29:16] == drm_hdmi_connector_get_output_format_name (2 subtests) ==
[18:29:16] === drm_test_drm_hdmi_connector_get_output_format_name ====
[18:29:16] [PASSED] RGB
[18:29:16] [PASSED] YUV 4:2:0
[18:29:16] [PASSED] YUV 4:2:2
[18:29:16] [PASSED] YUV 4:4:4
[18:29:16] === [PASSED] drm_test_drm_hdmi_connector_get_output_format_name ===
[18:29:16] [PASSED] drm_test_drm_hdmi_connector_get_output_format_name_invalid
[18:29:16] ==== [PASSED] drm_hdmi_connector_get_output_format_name ====
[18:29:16] ============= drm_damage_helper (21 subtests) ==============
[18:29:16] [PASSED] drm_test_damage_iter_no_damage
[18:29:16] [PASSED] drm_test_damage_iter_no_damage_fractional_src
[18:29:16] [PASSED] drm_test_damage_iter_no_damage_src_moved
[18:29:16] [PASSED] drm_test_damage_iter_no_damage_fractional_src_moved
[18:29:16] [PASSED] drm_test_damage_iter_no_damage_not_visible
[18:29:16] [PASSED] drm_test_damage_iter_no_damage_no_crtc
[18:29:16] [PASSED] drm_test_damage_iter_no_damage_no_fb
[18:29:16] [PASSED] drm_test_damage_iter_simple_damage
[18:29:16] [PASSED] drm_test_damage_iter_single_damage
[18:29:16] [PASSED] drm_test_damage_iter_single_damage_intersect_src
[18:29:16] [PASSED] drm_test_damage_iter_single_damage_outside_src
[18:29:16] [PASSED] drm_test_damage_iter_single_damage_fractional_src
[18:29:16] [PASSED] drm_test_damage_iter_single_damage_intersect_fractional_src
[18:29:16] [PASSED] drm_test_damage_iter_single_damage_outside_fractional_src
[18:29:16] [PASSED] drm_test_damage_iter_single_damage_src_moved
[18:29:16] [PASSED] drm_test_damage_iter_single_damage_fractional_src_moved
[18:29:16] [PASSED] drm_test_damage_iter_damage
[18:29:16] [PASSED] drm_test_damage_iter_damage_one_intersect
[18:29:16] [PASSED] drm_test_damage_iter_damage_one_outside
[18:29:16] [PASSED] drm_test_damage_iter_damage_src_moved
[18:29:16] [PASSED] drm_test_damage_iter_damage_not_visible
[18:29:16] ================ [PASSED] drm_damage_helper ================
[18:29:16] ============== drm_dp_mst_helper (3 subtests) ==============
[18:29:16] ============== drm_test_dp_mst_calc_pbn_mode ==============
[18:29:16] [PASSED] Clock 154000 BPP 30 DSC disabled
[18:29:16] [PASSED] Clock 234000 BPP 30 DSC disabled
[18:29:16] [PASSED] Clock 297000 BPP 24 DSC disabled
[18:29:16] [PASSED] Clock 332880 BPP 24 DSC enabled
[18:29:16] [PASSED] Clock 324540 BPP 24 DSC enabled
[18:29:16] ========== [PASSED] drm_test_dp_mst_calc_pbn_mode ==========
[18:29:16] ============== drm_test_dp_mst_calc_pbn_div ===============
[18:29:16] [PASSED] Link rate 2000000 lane count 4
[18:29:16] [PASSED] Link rate 2000000 lane count 2
[18:29:16] [PASSED] Link rate 2000000 lane count 1
[18:29:16] [PASSED] Link rate 1350000 lane count 4
[18:29:16] [PASSED] Link rate 1350000 lane count 2
[18:29:16] [PASSED] Link rate 1350000 lane count 1
[18:29:16] [PASSED] Link rate 1000000 lane count 4
[18:29:16] [PASSED] Link rate 1000000 lane count 2
[18:29:16] [PASSED] Link rate 1000000 lane count 1
[18:29:16] [PASSED] Link rate 810000 lane count 4
[18:29:16] [PASSED] Link rate 810000 lane count 2
[18:29:16] [PASSED] Link rate 810000 lane count 1
[18:29:16] [PASSED] Link rate 540000 lane count 4
[18:29:16] [PASSED] Link rate 540000 lane count 2
[18:29:16] [PASSED] Link rate 540000 lane count 1
[18:29:16] [PASSED] Link rate 270000 lane count 4
[18:29:16] [PASSED] Link rate 270000 lane count 2
[18:29:16] [PASSED] Link rate 270000 lane count 1
[18:29:16] [PASSED] Link rate 162000 lane count 4
[18:29:16] [PASSED] Link rate 162000 lane count 2
[18:29:16] [PASSED] Link rate 162000 lane count 1
[18:29:16] ========== [PASSED] drm_test_dp_mst_calc_pbn_div ===========
[18:29:16] ========= drm_test_dp_mst_sideband_msg_req_decode =========
[18:29:16] [PASSED] DP_ENUM_PATH_RESOURCES with port number
[18:29:16] [PASSED] DP_POWER_UP_PHY with port number
[18:29:16] [PASSED] DP_POWER_DOWN_PHY with port number
[18:29:16] [PASSED] DP_ALLOCATE_PAYLOAD with SDP stream sinks
[18:29:16] [PASSED] DP_ALLOCATE_PAYLOAD with port number
[18:29:16] [PASSED] DP_ALLOCATE_PAYLOAD with VCPI
[18:29:16] [PASSED] DP_ALLOCATE_PAYLOAD with PBN
[18:29:16] [PASSED] DP_QUERY_PAYLOAD with port number
[18:29:16] [PASSED] DP_QUERY_PAYLOAD with VCPI
[18:29:16] [PASSED] DP_REMOTE_DPCD_READ with port number
[18:29:16] [PASSED] DP_REMOTE_DPCD_READ with DPCD address
[18:29:16] [PASSED] DP_REMOTE_DPCD_READ with max number of bytes
[18:29:16] [PASSED] DP_REMOTE_DPCD_WRITE with port number
[18:29:16] [PASSED] DP_REMOTE_DPCD_WRITE with DPCD address
[18:29:16] [PASSED] DP_REMOTE_DPCD_WRITE with data array
[18:29:16] [PASSED] DP_REMOTE_I2C_READ with port number
[18:29:16] [PASSED] DP_REMOTE_I2C_READ with I2C device ID
[18:29:16] [PASSED] DP_REMOTE_I2C_READ with transactions array
[18:29:16] [PASSED] DP_REMOTE_I2C_WRITE with port number
[18:29:16] [PASSED] DP_REMOTE_I2C_WRITE with I2C device ID
[18:29:16] [PASSED] DP_REMOTE_I2C_WRITE with data array
[18:29:16] [PASSED] DP_QUERY_STREAM_ENC_STATUS with stream ID
[18:29:16] [PASSED] DP_QUERY_STREAM_ENC_STATUS with client ID
[18:29:16] [PASSED] DP_QUERY_STREAM_ENC_STATUS with stream event
[18:29:16] [PASSED] DP_QUERY_STREAM_ENC_STATUS with valid stream event
[18:29:16] [PASSED] DP_QUERY_STREAM_ENC_STATUS with stream behavior
[18:29:16] [PASSED] DP_QUERY_STREAM_ENC_STATUS with a valid stream behavior
[18:29:16] ===== [PASSED] drm_test_dp_mst_sideband_msg_req_decode =====
[18:29:16] ================ [PASSED] drm_dp_mst_helper ================
[18:29:16] ================== drm_exec (7 subtests) ===================
[18:29:16] [PASSED] sanitycheck
[18:29:16] [PASSED] test_lock
[18:29:16] [PASSED] test_lock_unlock
[18:29:16] [PASSED] test_duplicates
[18:29:16] [PASSED] test_prepare
[18:29:16] [PASSED] test_prepare_array
[18:29:16] [PASSED] test_multiple_loops
[18:29:16] ==================== [PASSED] drm_exec =====================
[18:29:16] =========== drm_format_helper_test (17 subtests) ===========
[18:29:16] ============== drm_test_fb_xrgb8888_to_gray8 ==============
[18:29:16] [PASSED] single_pixel_source_buffer
[18:29:16] [PASSED] single_pixel_clip_rectangle
[18:29:16] [PASSED] well_known_colors
[18:29:16] [PASSED] destination_pitch
[18:29:16] ========== [PASSED] drm_test_fb_xrgb8888_to_gray8 ==========
[18:29:16] ============= drm_test_fb_xrgb8888_to_rgb332 ==============
[18:29:16] [PASSED] single_pixel_source_buffer
[18:29:16] [PASSED] single_pixel_clip_rectangle
[18:29:16] [PASSED] well_known_colors
[18:29:16] [PASSED] destination_pitch
[18:29:16] ========= [PASSED] drm_test_fb_xrgb8888_to_rgb332 ==========
[18:29:16] ============= drm_test_fb_xrgb8888_to_rgb565 ==============
[18:29:16] [PASSED] single_pixel_source_buffer
[18:29:16] [PASSED] single_pixel_clip_rectangle
[18:29:16] [PASSED] well_known_colors
[18:29:16] [PASSED] destination_pitch
[18:29:16] ========= [PASSED] drm_test_fb_xrgb8888_to_rgb565 ==========
[18:29:16] ============ drm_test_fb_xrgb8888_to_xrgb1555 =============
[18:29:16] [PASSED] single_pixel_source_buffer
[18:29:16] [PASSED] single_pixel_clip_rectangle
[18:29:16] [PASSED] well_known_colors
[18:29:16] [PASSED] destination_pitch
[18:29:16] ======== [PASSED] drm_test_fb_xrgb8888_to_xrgb1555 =========
[18:29:16] ============ drm_test_fb_xrgb8888_to_argb1555 =============
[18:29:16] [PASSED] single_pixel_source_buffer
[18:29:16] [PASSED] single_pixel_clip_rectangle
[18:29:16] [PASSED] well_known_colors
[18:29:16] [PASSED] destination_pitch
[18:29:16] ======== [PASSED] drm_test_fb_xrgb8888_to_argb1555 =========
[18:29:16] ============ drm_test_fb_xrgb8888_to_rgba5551 =============
[18:29:16] [PASSED] single_pixel_source_buffer
[18:29:16] [PASSED] single_pixel_clip_rectangle
[18:29:16] [PASSED] well_known_colors
[18:29:16] [PASSED] destination_pitch
[18:29:16] ======== [PASSED] drm_test_fb_xrgb8888_to_rgba5551 =========
[18:29:16] ============= drm_test_fb_xrgb8888_to_rgb888 ==============
[18:29:16] [PASSED] single_pixel_source_buffer
[18:29:16] [PASSED] single_pixel_clip_rectangle
[18:29:16] [PASSED] well_known_colors
[18:29:16] [PASSED] destination_pitch
[18:29:16] ========= [PASSED] drm_test_fb_xrgb8888_to_rgb888 ==========
[18:29:16] ============= drm_test_fb_xrgb8888_to_bgr888 ==============
[18:29:16] [PASSED] single_pixel_source_buffer
[18:29:16] [PASSED] single_pixel_clip_rectangle
[18:29:16] [PASSED] well_known_colors
[18:29:16] [PASSED] destination_pitch
[18:29:16] ========= [PASSED] drm_test_fb_xrgb8888_to_bgr888 ==========
[18:29:16] ============ drm_test_fb_xrgb8888_to_argb8888 =============
[18:29:16] [PASSED] single_pixel_source_buffer
[18:29:16] [PASSED] single_pixel_clip_rectangle
[18:29:16] [PASSED] well_known_colors
[18:29:16] [PASSED] destination_pitch
[18:29:16] ======== [PASSED] drm_test_fb_xrgb8888_to_argb8888 =========
[18:29:16] =========== drm_test_fb_xrgb8888_to_xrgb2101010 ===========
[18:29:16] [PASSED] single_pixel_source_buffer
[18:29:16] [PASSED] single_pixel_clip_rectangle
[18:29:16] [PASSED] well_known_colors
[18:29:16] [PASSED] destination_pitch
[18:29:16] ======= [PASSED] drm_test_fb_xrgb8888_to_xrgb2101010 =======
[18:29:16] =========== drm_test_fb_xrgb8888_to_argb2101010 ===========
[18:29:16] [PASSED] single_pixel_source_buffer
[18:29:16] [PASSED] single_pixel_clip_rectangle
[18:29:16] [PASSED] well_known_colors
[18:29:16] [PASSED] destination_pitch
[18:29:16] ======= [PASSED] drm_test_fb_xrgb8888_to_argb2101010 =======
[18:29:16] ============== drm_test_fb_xrgb8888_to_mono ===============
[18:29:16] [PASSED] single_pixel_source_buffer
[18:29:16] [PASSED] single_pixel_clip_rectangle
[18:29:16] [PASSED] well_known_colors
[18:29:16] [PASSED] destination_pitch
[18:29:16] ========== [PASSED] drm_test_fb_xrgb8888_to_mono ===========
[18:29:16] ==================== drm_test_fb_swab =====================
[18:29:16] [PASSED] single_pixel_source_buffer
[18:29:16] [PASSED] single_pixel_clip_rectangle
[18:29:16] [PASSED] well_known_colors
[18:29:16] [PASSED] destination_pitch
[18:29:16] ================ [PASSED] drm_test_fb_swab =================
[18:29:16] ============ drm_test_fb_xrgb8888_to_xbgr8888 =============
[18:29:16] [PASSED] single_pixel_source_buffer
[18:29:16] [PASSED] single_pixel_clip_rectangle
[18:29:16] [PASSED] well_known_colors
[18:29:16] [PASSED] destination_pitch
[18:29:16] ======== [PASSED] drm_test_fb_xrgb8888_to_xbgr8888 =========
[18:29:16] ============ drm_test_fb_xrgb8888_to_abgr8888 =============
[18:29:16] [PASSED] single_pixel_source_buffer
[18:29:16] [PASSED] single_pixel_clip_rectangle
[18:29:16] [PASSED] well_known_colors
[18:29:16] [PASSED] destination_pitch
[18:29:16] ======== [PASSED] drm_test_fb_xrgb8888_to_abgr8888 =========
[18:29:16] ================= drm_test_fb_clip_offset =================
[18:29:16] [PASSED] pass through
[18:29:16] [PASSED] horizontal offset
[18:29:16] [PASSED] vertical offset
[18:29:16] [PASSED] horizontal and vertical offset
[18:29:16] [PASSED] horizontal offset (custom pitch)
[18:29:16] [PASSED] vertical offset (custom pitch)
[18:29:16] [PASSED] horizontal and vertical offset (custom pitch)
[18:29:16] ============= [PASSED] drm_test_fb_clip_offset =============
[18:29:16] =================== drm_test_fb_memcpy ====================
[18:29:16] [PASSED] single_pixel_source_buffer: XR24 little-endian (0x34325258)
[18:29:16] [PASSED] single_pixel_source_buffer: XRA8 little-endian (0x38415258)
[18:29:16] [PASSED] single_pixel_source_buffer: YU24 little-endian (0x34325559)
[18:29:16] [PASSED] single_pixel_clip_rectangle: XB24 little-endian (0x34324258)
[18:29:16] [PASSED] single_pixel_clip_rectangle: XRA8 little-endian (0x38415258)
[18:29:16] [PASSED] single_pixel_clip_rectangle: YU24 little-endian (0x34325559)
[18:29:16] [PASSED] well_known_colors: XB24 little-endian (0x34324258)
[18:29:16] [PASSED] well_known_colors: XRA8 little-endian (0x38415258)
[18:29:16] [PASSED] well_known_colors: YU24 little-endian (0x34325559)
[18:29:16] [PASSED] destination_pitch: XB24 little-endian (0x34324258)
[18:29:16] [PASSED] destination_pitch: XRA8 little-endian (0x38415258)
[18:29:16] [PASSED] destination_pitch: YU24 little-endian (0x34325559)
[18:29:16] =============== [PASSED] drm_test_fb_memcpy ================
[18:29:16] ============= [PASSED] drm_format_helper_test ==============
[18:29:16] ================= drm_format (18 subtests) =================
[18:29:16] [PASSED] drm_test_format_block_width_invalid
[18:29:16] [PASSED] drm_test_format_block_width_one_plane
[18:29:16] [PASSED] drm_test_format_block_width_two_plane
[18:29:16] [PASSED] drm_test_format_block_width_three_plane
[18:29:16] [PASSED] drm_test_format_block_width_tiled
[18:29:16] [PASSED] drm_test_format_block_height_invalid
[18:29:16] [PASSED] drm_test_format_block_height_one_plane
[18:29:16] [PASSED] drm_test_format_block_height_two_plane
[18:29:16] [PASSED] drm_test_format_block_height_three_plane
[18:29:16] [PASSED] drm_test_format_block_height_tiled
[18:29:16] [PASSED] drm_test_format_min_pitch_invalid
[18:29:16] [PASSED] drm_test_format_min_pitch_one_plane_8bpp
[18:29:16] [PASSED] drm_test_format_min_pitch_one_plane_16bpp
[18:29:16] [PASSED] drm_test_format_min_pitch_one_plane_24bpp
[18:29:16] [PASSED] drm_test_format_min_pitch_one_plane_32bpp
[18:29:16] [PASSED] drm_test_format_min_pitch_two_plane
[18:29:16] [PASSED] drm_test_format_min_pitch_three_plane_8bpp
[18:29:16] [PASSED] drm_test_format_min_pitch_tiled
[18:29:16] =================== [PASSED] drm_format ====================
[18:29:16] ============== drm_framebuffer (10 subtests) ===============
[18:29:16] ========== drm_test_framebuffer_check_src_coords ==========
[18:29:16] [PASSED] Success: source fits into fb
[18:29:16] [PASSED] Fail: overflowing fb with x-axis coordinate
[18:29:16] [PASSED] Fail: overflowing fb with y-axis coordinate
[18:29:16] [PASSED] Fail: overflowing fb with source width
[18:29:16] [PASSED] Fail: overflowing fb with source height
[18:29:16] ====== [PASSED] drm_test_framebuffer_check_src_coords ======
[18:29:16] [PASSED] drm_test_framebuffer_cleanup
[18:29:16] =============== drm_test_framebuffer_create ===============
[18:29:16] [PASSED] ABGR8888 normal sizes
[18:29:16] [PASSED] ABGR8888 max sizes
[18:29:16] [PASSED] ABGR8888 pitch greater than min required
[18:29:16] [PASSED] ABGR8888 pitch less than min required
[18:29:16] [PASSED] ABGR8888 Invalid width
[18:29:16] [PASSED] ABGR8888 Invalid buffer handle
[18:29:16] [PASSED] No pixel format
[18:29:16] [PASSED] ABGR8888 Width 0
[18:29:16] [PASSED] ABGR8888 Height 0
[18:29:16] [PASSED] ABGR8888 Out of bound height * pitch combination
[18:29:16] [PASSED] ABGR8888 Large buffer offset
[18:29:16] [PASSED] ABGR8888 Buffer offset for inexistent plane
[18:29:16] [PASSED] ABGR8888 Invalid flag
[18:29:16] [PASSED] ABGR8888 Set DRM_MODE_FB_MODIFIERS without modifiers
[18:29:16] [PASSED] ABGR8888 Valid buffer modifier
[18:29:16] [PASSED] ABGR8888 Invalid buffer modifier(DRM_FORMAT_MOD_SAMSUNG_64_32_TILE)
[18:29:16] [PASSED] ABGR8888 Extra pitches without DRM_MODE_FB_MODIFIERS
[18:29:16] [PASSED] ABGR8888 Extra pitches with DRM_MODE_FB_MODIFIERS
[18:29:16] [PASSED] NV12 Normal sizes
[18:29:16] [PASSED] NV12 Max sizes
[18:29:16] [PASSED] NV12 Invalid pitch
[18:29:16] [PASSED] NV12 Invalid modifier/missing DRM_MODE_FB_MODIFIERS flag
[18:29:16] [PASSED] NV12 different modifier per-plane
[18:29:16] [PASSED] NV12 with DRM_FORMAT_MOD_SAMSUNG_64_32_TILE
[18:29:16] [PASSED] NV12 Valid modifiers without DRM_MODE_FB_MODIFIERS
[18:29:16] [PASSED] NV12 Modifier for inexistent plane
[18:29:16] [PASSED] NV12 Handle for inexistent plane
[18:29:16] [PASSED] NV12 Handle for inexistent plane without DRM_MODE_FB_MODIFIERS
[18:29:16] [PASSED] YVU420 DRM_MODE_FB_MODIFIERS set without modifier
[18:29:16] [PASSED] YVU420 Normal sizes
[18:29:16] [PASSED] YVU420 Max sizes
[18:29:16] [PASSED] YVU420 Invalid pitch
[18:29:16] [PASSED] YVU420 Different pitches
[18:29:16] [PASSED] YVU420 Different buffer offsets/pitches
[18:29:16] [PASSED] YVU420 Modifier set just for plane 0, without DRM_MODE_FB_MODIFIERS
[18:29:16] [PASSED] YVU420 Modifier set just for planes 0, 1, without DRM_MODE_FB_MODIFIERS
[18:29:16] [PASSED] YVU420 Modifier set just for plane 0, 1, with DRM_MODE_FB_MODIFIERS
[18:29:16] [PASSED] YVU420 Valid modifier
[18:29:16] [PASSED] YVU420 Different modifiers per plane
[18:29:16] [PASSED] YVU420 Modifier for inexistent plane
[18:29:16] [PASSED] YUV420_10BIT Invalid modifier(DRM_FORMAT_MOD_LINEAR)
[18:29:16] [PASSED] X0L2 Normal sizes
[18:29:16] [PASSED] X0L2 Max sizes
[18:29:16] [PASSED] X0L2 Invalid pitch
[18:29:16] [PASSED] X0L2 Pitch greater than minimum required
[18:29:16] [PASSED] X0L2 Handle for inexistent plane
[18:29:16] [PASSED] X0L2 Offset for inexistent plane, without DRM_MODE_FB_MODIFIERS set
[18:29:16] [PASSED] X0L2 Modifier without DRM_MODE_FB_MODIFIERS set
[18:29:16] [PASSED] X0L2 Valid modifier
[18:29:16] [PASSED] X0L2 Modifier for inexistent plane
[18:29:16] =========== [PASSED] drm_test_framebuffer_create ===========
[18:29:16] [PASSED] drm_test_framebuffer_free
[18:29:16] [PASSED] drm_test_framebuffer_init
[18:29:16] [PASSED] drm_test_framebuffer_init_bad_format
[18:29:16] [PASSED] drm_test_framebuffer_init_dev_mismatch
[18:29:16] [PASSED] drm_test_framebuffer_lookup
[18:29:16] [PASSED] drm_test_framebuffer_lookup_inexistent
[18:29:16] [PASSED] drm_test_framebuffer_modifiers_not_supported
[18:29:16] ================= [PASSED] drm_framebuffer =================
[18:29:16] ================ drm_gem_shmem (8 subtests) ================
[18:29:16] [PASSED] drm_gem_shmem_test_obj_create
[18:29:16] [PASSED] drm_gem_shmem_test_obj_create_private
[18:29:16] [PASSED] drm_gem_shmem_test_pin_pages
[18:29:16] [PASSED] drm_gem_shmem_test_vmap
[18:29:16] [PASSED] drm_gem_shmem_test_get_pages_sgt
[18:29:16] [PASSED] drm_gem_shmem_test_get_sg_table
[18:29:16] [PASSED] drm_gem_shmem_test_madvise
[18:29:16] [PASSED] drm_gem_shmem_test_purge
[18:29:16] ================== [PASSED] drm_gem_shmem ==================
[18:29:16] === drm_atomic_helper_connector_hdmi_check (27 subtests) ===
[18:29:16] [PASSED] drm_test_check_broadcast_rgb_auto_cea_mode
[18:29:16] [PASSED] drm_test_check_broadcast_rgb_auto_cea_mode_vic_1
[18:29:16] [PASSED] drm_test_check_broadcast_rgb_full_cea_mode
[18:29:16] [PASSED] drm_test_check_broadcast_rgb_full_cea_mode_vic_1
[18:29:16] [PASSED] drm_test_check_broadcast_rgb_limited_cea_mode
[18:29:16] [PASSED] drm_test_check_broadcast_rgb_limited_cea_mode_vic_1
[18:29:16] ====== drm_test_check_broadcast_rgb_cea_mode_yuv420 =======
[18:29:16] [PASSED] Automatic
[18:29:16] [PASSED] Full
[18:29:16] [PASSED] Limited 16:235
[18:29:16] == [PASSED] drm_test_check_broadcast_rgb_cea_mode_yuv420 ===
[18:29:16] [PASSED] drm_test_check_broadcast_rgb_crtc_mode_changed
[18:29:16] [PASSED] drm_test_check_broadcast_rgb_crtc_mode_not_changed
[18:29:16] [PASSED] drm_test_check_disable_connector
[18:29:16] [PASSED] drm_test_check_hdmi_funcs_reject_rate
[18:29:16] [PASSED] drm_test_check_max_tmds_rate_bpc_fallback_rgb
[18:29:16] [PASSED] drm_test_check_max_tmds_rate_bpc_fallback_yuv420
[18:29:16] [PASSED] drm_test_check_max_tmds_rate_bpc_fallback_ignore_yuv422
[18:29:16] [PASSED] drm_test_check_max_tmds_rate_bpc_fallback_ignore_yuv420
[18:29:16] [PASSED] drm_test_check_driver_unsupported_fallback_yuv420
[18:29:16] [PASSED] drm_test_check_output_bpc_crtc_mode_changed
[18:29:16] [PASSED] drm_test_check_output_bpc_crtc_mode_not_changed
[18:29:16] [PASSED] drm_test_check_output_bpc_dvi
[18:29:16] [PASSED] drm_test_check_output_bpc_format_vic_1
[18:29:16] [PASSED] drm_test_check_output_bpc_format_display_8bpc_only
[18:29:16] [PASSED] drm_test_check_output_bpc_format_display_rgb_only
[18:29:16] [PASSED] drm_test_check_output_bpc_format_driver_8bpc_only
[18:29:16] [PASSED] drm_test_check_output_bpc_format_driver_rgb_only
[18:29:16] [PASSED] drm_test_check_tmds_char_rate_rgb_8bpc
[18:29:16] [PASSED] drm_test_check_tmds_char_rate_rgb_10bpc
[18:29:16] [PASSED] drm_test_check_tmds_char_rate_rgb_12bpc
[18:29:16] ===== [PASSED] drm_atomic_helper_connector_hdmi_check ======
[18:29:16] === drm_atomic_helper_connector_hdmi_reset (6 subtests) ====
[18:29:16] [PASSED] drm_test_check_broadcast_rgb_value
[18:29:16] [PASSED] drm_test_check_bpc_8_value
[18:29:16] [PASSED] drm_test_check_bpc_10_value
[18:29:16] [PASSED] drm_test_check_bpc_12_value
[18:29:16] [PASSED] drm_test_check_format_value
[18:29:16] [PASSED] drm_test_check_tmds_char_value
[18:29:16] ===== [PASSED] drm_atomic_helper_connector_hdmi_reset ======
[18:29:16] = drm_atomic_helper_connector_hdmi_mode_valid (4 subtests) =
[18:29:16] [PASSED] drm_test_check_mode_valid
[18:29:16] [PASSED] drm_test_check_mode_valid_reject
[18:29:16] [PASSED] drm_test_check_mode_valid_reject_rate
[18:29:16] [PASSED] drm_test_check_mode_valid_reject_max_clock
[18:29:16] === [PASSED] drm_atomic_helper_connector_hdmi_mode_valid ===
[18:29:16] ================= drm_managed (2 subtests) =================
[18:29:16] [PASSED] drm_test_managed_release_action
[18:29:16] [PASSED] drm_test_managed_run_action
[18:29:16] =================== [PASSED] drm_managed ===================
[18:29:16] =================== drm_mm (6 subtests) ====================
[18:29:16] [PASSED] drm_test_mm_init
[18:29:16] [PASSED] drm_test_mm_debug
[18:29:16] [PASSED] drm_test_mm_align32
[18:29:16] [PASSED] drm_test_mm_align64
[18:29:16] [PASSED] drm_test_mm_lowest
[18:29:16] [PASSED] drm_test_mm_highest
[18:29:16] ===================== [PASSED] drm_mm ======================
[18:29:16] ============= drm_modes_analog_tv (5 subtests) =============
[18:29:16] [PASSED] drm_test_modes_analog_tv_mono_576i
[18:29:16] [PASSED] drm_test_modes_analog_tv_ntsc_480i
[18:29:16] [PASSED] drm_test_modes_analog_tv_ntsc_480i_inlined
[18:29:16] [PASSED] drm_test_modes_analog_tv_pal_576i
[18:29:16] [PASSED] drm_test_modes_analog_tv_pal_576i_inlined
[18:29:16] =============== [PASSED] drm_modes_analog_tv ===============
[18:29:16] ============== drm_plane_helper (2 subtests) ===============
[18:29:16] =============== drm_test_check_plane_state ================
[18:29:16] [PASSED] clipping_simple
[18:29:16] [PASSED] clipping_rotate_reflect
[18:29:16] [PASSED] positioning_simple
[18:29:16] [PASSED] upscaling
[18:29:16] [PASSED] downscaling
[18:29:16] [PASSED] rounding1
[18:29:16] [PASSED] rounding2
[18:29:16] [PASSED] rounding3
[18:29:16] [PASSED] rounding4
[18:29:16] =========== [PASSED] drm_test_check_plane_state ============
[18:29:16] =========== drm_test_check_invalid_plane_state ============
[18:29:16] [PASSED] positioning_invalid
[18:29:16] [PASSED] upscaling_invalid
[18:29:16] [PASSED] downscaling_invalid
[18:29:16] ======= [PASSED] drm_test_check_invalid_plane_state ========
[18:29:16] ================ [PASSED] drm_plane_helper =================
[18:29:16] ====== drm_connector_helper_tv_get_modes (1 subtest) =======
[18:29:16] ====== drm_test_connector_helper_tv_get_modes_check =======
[18:29:16] [PASSED] None
[18:29:16] [PASSED] PAL
[18:29:16] [PASSED] NTSC
[18:29:16] [PASSED] Both, NTSC Default
[18:29:16] [PASSED] Both, PAL Default
[18:29:16] [PASSED] Both, NTSC Default, with PAL on command-line
[18:29:16] [PASSED] Both, PAL Default, with NTSC on command-line
[18:29:16] == [PASSED] drm_test_connector_helper_tv_get_modes_check ===
[18:29:16] ======== [PASSED] drm_connector_helper_tv_get_modes ========
[18:29:16] ================== drm_rect (9 subtests) ===================
[18:29:16] [PASSED] drm_test_rect_clip_scaled_div_by_zero
[18:29:16] [PASSED] drm_test_rect_clip_scaled_not_clipped
[18:29:16] [PASSED] drm_test_rect_clip_scaled_clipped
[18:29:16] [PASSED] drm_test_rect_clip_scaled_signed_vs_unsigned
[18:29:16] ================= drm_test_rect_intersect =================
[18:29:16] [PASSED] top-left x bottom-right: 2x2+1+1 x 2x2+0+0
[18:29:16] [PASSED] top-right x bottom-left: 2x2+0+0 x 2x2+1-1
[18:29:16] [PASSED] bottom-left x top-right: 2x2+1-1 x 2x2+0+0
[18:29:16] [PASSED] bottom-right x top-left: 2x2+0+0 x 2x2+1+1
[18:29:16] [PASSED] right x left: 2x1+0+0 x 3x1+1+0
[18:29:16] [PASSED] left x right: 3x1+1+0 x 2x1+0+0
[18:29:16] [PASSED] up x bottom: 1x2+0+0 x 1x3+0-1
[18:29:16] [PASSED] bottom x up: 1x3+0-1 x 1x2+0+0
[18:29:16] [PASSED] touching corner: 1x1+0+0 x 2x2+1+1
[18:29:16] [PASSED] touching side: 1x1+0+0 x 1x1+1+0
[18:29:16] [PASSED] equal rects: 2x2+0+0 x 2x2+0+0
[18:29:16] [PASSED] inside another: 2x2+0+0 x 1x1+1+1
[18:29:16] [PASSED] far away: 1x1+0+0 x 1x1+3+6
[18:29:16] [PASSED] points intersecting: 0x0+5+10 x 0x0+5+10
[18:29:16] [PASSED] points not intersecting: 0x0+0+0 x 0x0+5+10
[18:29:16] ============= [PASSED] drm_test_rect_intersect =============
[18:29:16] ================ drm_test_rect_calc_hscale ================
[18:29:16] [PASSED] normal use
[18:29:16] [PASSED] out of max range
[18:29:16] [PASSED] out of min range
[18:29:16] [PASSED] zero dst
[18:29:16] [PASSED] negative src
[18:29:16] [PASSED] negative dst
[18:29:16] ============ [PASSED] drm_test_rect_calc_hscale ============
[18:29:16] ================ drm_test_rect_calc_vscale ================
[18:29:16] [PASSED] normal use
stty: 'standard input': Inappropriate ioctl for device
[18:29:16] [PASSED] out of max range
[18:29:16] [PASSED] out of min range
[18:29:16] [PASSED] zero dst
[18:29:16] [PASSED] negative src
[18:29:16] [PASSED] negative dst
[18:29:16] ============ [PASSED] drm_test_rect_calc_vscale ============
[18:29:16] ================== drm_test_rect_rotate ===================
[18:29:16] [PASSED] reflect-x
[18:29:16] [PASSED] reflect-y
[18:29:16] [PASSED] rotate-0
[18:29:16] [PASSED] rotate-90
[18:29:16] [PASSED] rotate-180
[18:29:16] [PASSED] rotate-270
[18:29:16] ============== [PASSED] drm_test_rect_rotate ===============
[18:29:16] ================ drm_test_rect_rotate_inv =================
[18:29:16] [PASSED] reflect-x
[18:29:16] [PASSED] reflect-y
[18:29:16] [PASSED] rotate-0
[18:29:16] [PASSED] rotate-90
[18:29:16] [PASSED] rotate-180
[18:29:16] [PASSED] rotate-270
[18:29:16] ============ [PASSED] drm_test_rect_rotate_inv =============
[18:29:16] ==================== [PASSED] drm_rect =====================
[18:29:16] ============ drm_sysfb_modeset_test (1 subtest) ============
[18:29:16] ============ drm_test_sysfb_build_fourcc_list =============
[18:29:16] [PASSED] no native formats
[18:29:16] [PASSED] XRGB8888 as native format
[18:29:16] [PASSED] remove duplicates
[18:29:16] [PASSED] convert alpha formats
[18:29:16] [PASSED] random formats
[18:29:16] ======== [PASSED] drm_test_sysfb_build_fourcc_list =========
[18:29:16] ============= [PASSED] drm_sysfb_modeset_test ==============
[18:29:16] ============================================================
[18:29:16] Testing complete. Ran 622 tests: passed: 622
[18:29:17] Elapsed time: 25.717s total, 1.707s configuring, 23.588s building, 0.391s running
+ /kernel/tools/testing/kunit/kunit.py run --kunitconfig /kernel/drivers/gpu/drm/ttm/tests/.kunitconfig
[18:29:17] Configuring KUnit Kernel ...
Regenerating .config ...
Populating config with:
$ make ARCH=um O=.kunit olddefconfig
[18:29:18] Building KUnit Kernel ...
Populating config with:
$ make ARCH=um O=.kunit olddefconfig
Building with:
$ make all compile_commands.json scripts_gdb ARCH=um O=.kunit --jobs=48
[18:29:28] Starting KUnit Kernel (1/1)...
[18:29:28] ============================================================
Running tests with:
$ .kunit/linux kunit.enable=1 mem=1G console=tty kunit_shutdown=halt
[18:29:28] ================= ttm_device (5 subtests) ==================
[18:29:28] [PASSED] ttm_device_init_basic
[18:29:28] [PASSED] ttm_device_init_multiple
[18:29:28] [PASSED] ttm_device_fini_basic
[18:29:28] [PASSED] ttm_device_init_no_vma_man
[18:29:28] ================== ttm_device_init_pools ==================
[18:29:28] [PASSED] No DMA allocations, no DMA32 required
[18:29:28] [PASSED] DMA allocations, DMA32 required
[18:29:28] [PASSED] No DMA allocations, DMA32 required
[18:29:28] [PASSED] DMA allocations, no DMA32 required
[18:29:28] ============== [PASSED] ttm_device_init_pools ==============
[18:29:28] =================== [PASSED] ttm_device ====================
[18:29:28] ================== ttm_pool (8 subtests) ===================
[18:29:28] ================== ttm_pool_alloc_basic ===================
[18:29:28] [PASSED] One page
[18:29:28] [PASSED] More than one page
[18:29:28] [PASSED] Above the allocation limit
[18:29:28] [PASSED] One page, with coherent DMA mappings enabled
[18:29:28] [PASSED] Above the allocation limit, with coherent DMA mappings enabled
[18:29:28] ============== [PASSED] ttm_pool_alloc_basic ===============
[18:29:28] ============== ttm_pool_alloc_basic_dma_addr ==============
[18:29:28] [PASSED] One page
[18:29:28] [PASSED] More than one page
[18:29:28] [PASSED] Above the allocation limit
[18:29:28] [PASSED] One page, with coherent DMA mappings enabled
[18:29:28] [PASSED] Above the allocation limit, with coherent DMA mappings enabled
[18:29:28] ========== [PASSED] ttm_pool_alloc_basic_dma_addr ==========
[18:29:28] [PASSED] ttm_pool_alloc_order_caching_match
[18:29:28] [PASSED] ttm_pool_alloc_caching_mismatch
[18:29:28] [PASSED] ttm_pool_alloc_order_mismatch
[18:29:28] [PASSED] ttm_pool_free_dma_alloc
[18:29:28] [PASSED] ttm_pool_free_no_dma_alloc
[18:29:28] [PASSED] ttm_pool_fini_basic
[18:29:28] ==================== [PASSED] ttm_pool =====================
[18:29:28] ================ ttm_resource (8 subtests) =================
[18:29:28] ================= ttm_resource_init_basic =================
[18:29:28] [PASSED] Init resource in TTM_PL_SYSTEM
[18:29:28] [PASSED] Init resource in TTM_PL_VRAM
[18:29:28] [PASSED] Init resource in a private placement
[18:29:28] [PASSED] Init resource in TTM_PL_SYSTEM, set placement flags
[18:29:28] ============= [PASSED] ttm_resource_init_basic =============
[18:29:28] [PASSED] ttm_resource_init_pinned
[18:29:28] [PASSED] ttm_resource_fini_basic
[18:29:28] [PASSED] ttm_resource_manager_init_basic
[18:29:28] [PASSED] ttm_resource_manager_usage_basic
[18:29:28] [PASSED] ttm_resource_manager_set_used_basic
[18:29:28] [PASSED] ttm_sys_man_alloc_basic
[18:29:28] [PASSED] ttm_sys_man_free_basic
[18:29:28] ================== [PASSED] ttm_resource ===================
[18:29:28] =================== ttm_tt (15 subtests) ===================
[18:29:28] ==================== ttm_tt_init_basic ====================
[18:29:28] [PASSED] Page-aligned size
[18:29:28] [PASSED] Extra pages requested
[18:29:28] ================ [PASSED] ttm_tt_init_basic ================
[18:29:28] [PASSED] ttm_tt_init_misaligned
[18:29:28] [PASSED] ttm_tt_fini_basic
[18:29:28] [PASSED] ttm_tt_fini_sg
[18:29:28] [PASSED] ttm_tt_fini_shmem
[18:29:28] [PASSED] ttm_tt_create_basic
[18:29:28] [PASSED] ttm_tt_create_invalid_bo_type
[18:29:28] [PASSED] ttm_tt_create_ttm_exists
[18:29:28] [PASSED] ttm_tt_create_failed
[18:29:28] [PASSED] ttm_tt_destroy_basic
[18:29:28] [PASSED] ttm_tt_populate_null_ttm
[18:29:28] [PASSED] ttm_tt_populate_populated_ttm
[18:29:28] [PASSED] ttm_tt_unpopulate_basic
[18:29:28] [PASSED] ttm_tt_unpopulate_empty_ttm
[18:29:28] [PASSED] ttm_tt_swapin_basic
[18:29:28] ===================== [PASSED] ttm_tt ======================
[18:29:28] =================== ttm_bo (14 subtests) ===================
[18:29:28] =========== ttm_bo_reserve_optimistic_no_ticket ===========
[18:29:28] [PASSED] Cannot be interrupted and sleeps
[18:29:28] [PASSED] Cannot be interrupted, locks straight away
[18:29:28] [PASSED] Can be interrupted, sleeps
[18:29:28] ======= [PASSED] ttm_bo_reserve_optimistic_no_ticket =======
[18:29:28] [PASSED] ttm_bo_reserve_locked_no_sleep
[18:29:28] [PASSED] ttm_bo_reserve_no_wait_ticket
[18:29:28] [PASSED] ttm_bo_reserve_double_resv
[18:29:28] [PASSED] ttm_bo_reserve_interrupted
[18:29:28] [PASSED] ttm_bo_reserve_deadlock
[18:29:28] [PASSED] ttm_bo_unreserve_basic
[18:29:28] [PASSED] ttm_bo_unreserve_pinned
[18:29:28] [PASSED] ttm_bo_unreserve_bulk
[18:29:28] [PASSED] ttm_bo_fini_basic
[18:29:28] [PASSED] ttm_bo_fini_shared_resv
[18:29:28] [PASSED] ttm_bo_pin_basic
[18:29:28] [PASSED] ttm_bo_pin_unpin_resource
[18:29:28] [PASSED] ttm_bo_multiple_pin_one_unpin
[18:29:28] ===================== [PASSED] ttm_bo ======================
[18:29:28] ============== ttm_bo_validate (21 subtests) ===============
[18:29:28] ============== ttm_bo_init_reserved_sys_man ===============
[18:29:28] [PASSED] Buffer object for userspace
[18:29:28] [PASSED] Kernel buffer object
[18:29:28] [PASSED] Shared buffer object
[18:29:28] ========== [PASSED] ttm_bo_init_reserved_sys_man ===========
[18:29:28] ============== ttm_bo_init_reserved_mock_man ==============
[18:29:28] [PASSED] Buffer object for userspace
[18:29:28] [PASSED] Kernel buffer object
[18:29:28] [PASSED] Shared buffer object
[18:29:28] ========== [PASSED] ttm_bo_init_reserved_mock_man ==========
[18:29:28] [PASSED] ttm_bo_init_reserved_resv
[18:29:28] ================== ttm_bo_validate_basic ==================
[18:29:28] [PASSED] Buffer object for userspace
[18:29:28] [PASSED] Kernel buffer object
[18:29:28] [PASSED] Shared buffer object
[18:29:28] ============== [PASSED] ttm_bo_validate_basic ==============
[18:29:28] [PASSED] ttm_bo_validate_invalid_placement
[18:29:28] ============= ttm_bo_validate_same_placement ==============
[18:29:28] [PASSED] System manager
[18:29:28] [PASSED] VRAM manager
[18:29:28] ========= [PASSED] ttm_bo_validate_same_placement ==========
[18:29:28] [PASSED] ttm_bo_validate_failed_alloc
[18:29:28] [PASSED] ttm_bo_validate_pinned
[18:29:28] [PASSED] ttm_bo_validate_busy_placement
[18:29:28] ================ ttm_bo_validate_multihop =================
[18:29:28] [PASSED] Buffer object for userspace
[18:29:28] [PASSED] Kernel buffer object
[18:29:28] [PASSED] Shared buffer object
[18:29:28] ============ [PASSED] ttm_bo_validate_multihop =============
[18:29:28] ========== ttm_bo_validate_no_placement_signaled ==========
[18:29:28] [PASSED] Buffer object in system domain, no page vector
[18:29:28] [PASSED] Buffer object in system domain with an existing page vector
[18:29:28] ====== [PASSED] ttm_bo_validate_no_placement_signaled ======
[18:29:28] ======== ttm_bo_validate_no_placement_not_signaled ========
[18:29:28] [PASSED] Buffer object for userspace
[18:29:28] [PASSED] Kernel buffer object
[18:29:28] [PASSED] Shared buffer object
[18:29:28] ==== [PASSED] ttm_bo_validate_no_placement_not_signaled ====
[18:29:28] [PASSED] ttm_bo_validate_move_fence_signaled
[18:29:28] ========= ttm_bo_validate_move_fence_not_signaled =========
[18:29:28] [PASSED] Waits for GPU
[18:29:28] [PASSED] Tries to lock straight away
[18:29:28] ===== [PASSED] ttm_bo_validate_move_fence_not_signaled =====
[18:29:28] [PASSED] ttm_bo_validate_happy_evict
[18:29:28] [PASSED] ttm_bo_validate_all_pinned_evict
[18:29:28] [PASSED] ttm_bo_validate_allowed_only_evict
[18:29:28] [PASSED] ttm_bo_validate_deleted_evict
[18:29:28] [PASSED] ttm_bo_validate_busy_domain_evict
[18:29:28] [PASSED] ttm_bo_validate_evict_gutting
[18:29:28] [PASSED] ttm_bo_validate_recrusive_evict
stty: 'standard input': Inappropriate ioctl for device
[18:29:28] ================= [PASSED] ttm_bo_validate =================
[18:29:28] ============================================================
[18:29:28] Testing complete. Ran 101 tests: passed: 101
[18:29:28] Elapsed time: 11.301s total, 1.763s configuring, 9.271s building, 0.230s running
+ cleanup
++ stat -c %u:%g /kernel
+ chown -R 1003:1003 /kernel
^ permalink raw reply [flat|nested] 42+ messages in thread* ✓ Xe.CI.BAT: success for VF migration redesign (rev9)
2025-10-08 18:04 [PATCH v9 00/34] VF migration redesign Matthew Brost
` (35 preceding siblings ...)
2025-10-08 18:29 ` ✓ CI.KUnit: success " Patchwork
@ 2025-10-08 19:04 ` Patchwork
2025-10-08 21:40 ` ✗ Xe.CI.Full: failure " Patchwork
37 siblings, 0 replies; 42+ messages in thread
From: Patchwork @ 2025-10-08 19:04 UTC (permalink / raw)
To: Matthew Brost; +Cc: intel-xe
[-- Attachment #1: Type: text/plain, Size: 1307 bytes --]
== Series Details ==
Series: VF migration redesign (rev9)
URL : https://patchwork.freedesktop.org/series/154627/
State : success
== Summary ==
CI Bug Log - changes from xe-3886-f929eafc95342ea5377f79705864d50dd325c79f_BAT -> xe-pw-154627v9_BAT
====================================================
Summary
-------
**SUCCESS**
No regressions found.
Participating hosts (11 -> 11)
------------------------------
No changes in participating hosts
Known issues
------------
Here are the changes found in xe-pw-154627v9_BAT that come from known issues:
### IGT changes ###
{name}: This element is suppressed. This means it is ignored when computing
the status of the difference (SUCCESS, WARNING, or FAILURE).
[Intel XE#6287]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/6287
Build changes
-------------
* IGT: IGT_8578 -> IGT_8579
* Linux: xe-3886-f929eafc95342ea5377f79705864d50dd325c79f -> xe-pw-154627v9
IGT_8578: fd1f7d868fb6df61db6891ce0bbc8fcd26a0e7ba @ https://gitlab.freedesktop.org/drm/igt-gpu-tools.git
IGT_8579: 8579
xe-3886-f929eafc95342ea5377f79705864d50dd325c79f: f929eafc95342ea5377f79705864d50dd325c79f
xe-pw-154627v9: 154627v9
== Logs ==
For more details see: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/index.html
[-- Attachment #2: Type: text/html, Size: 1798 bytes --]
^ permalink raw reply [flat|nested] 42+ messages in thread* ✗ Xe.CI.Full: failure for VF migration redesign (rev9)
2025-10-08 18:04 [PATCH v9 00/34] VF migration redesign Matthew Brost
` (36 preceding siblings ...)
2025-10-08 19:04 ` ✓ Xe.CI.BAT: " Patchwork
@ 2025-10-08 21:40 ` Patchwork
37 siblings, 0 replies; 42+ messages in thread
From: Patchwork @ 2025-10-08 21:40 UTC (permalink / raw)
To: Matthew Brost; +Cc: intel-xe
[-- Attachment #1: Type: text/plain, Size: 94714 bytes --]
== Series Details ==
Series: VF migration redesign (rev9)
URL : https://patchwork.freedesktop.org/series/154627/
State : failure
== Summary ==
CI Bug Log - changes from xe-3886-f929eafc95342ea5377f79705864d50dd325c79f_FULL -> xe-pw-154627v9_FULL
====================================================
Summary
-------
**FAILURE**
Serious unknown changes coming with xe-pw-154627v9_FULL absolutely need to be
verified manually.
If you think the reported changes have nothing to do with the changes
introduced in xe-pw-154627v9_FULL, please notify your bug team (I915-ci-infra@lists.freedesktop.org) to allow them
to document this new failure mode, which will reduce false positives in CI.
Participating hosts (4 -> 4)
------------------------------
No changes in participating hosts
Possible new issues
-------------------
Here are the unknown changes that may have been introduced in xe-pw-154627v9_FULL:
### IGT changes ###
#### Possible regressions ####
* igt@xe_evict@evict-mixed-many-threads-small:
- shard-bmg: NOTRUN -> [INCOMPLETE][1]
[1]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-7/igt@xe_evict@evict-mixed-many-threads-small.html
* igt@xe_live_ktest@xe_dma_buf:
- shard-adlp: [PASS][2] -> [FAIL][3] +1 other test fail
[2]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-adlp-1/igt@xe_live_ktest@xe_dma_buf.html
[3]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-3/igt@xe_live_ktest@xe_dma_buf.html
#### Suppressed ####
The following results come from untrusted machines, tests, or statuses.
They do not affect the overall result.
* {igt@kms_frontbuffer_tracking@psr-1p-offscreen-pri-indfb-draw-blt}:
- shard-adlp: NOTRUN -> [SKIP][4] +1 other test skip
[4]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-8/igt@kms_frontbuffer_tracking@psr-1p-offscreen-pri-indfb-draw-blt.html
* {igt@kms_frontbuffer_tracking@psr-1p-offscreen-pri-shrfb-draw-blt}:
- shard-dg2-set2: NOTRUN -> [SKIP][5] +2 other tests skip
[5]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-436/igt@kms_frontbuffer_tracking@psr-1p-offscreen-pri-shrfb-draw-blt.html
* {igt@xe_pmu@engine-activity-suspend}:
- shard-dg2-set2: [PASS][6] -> [TIMEOUT][7]
[6]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-dg2-436/igt@xe_pmu@engine-activity-suspend.html
[7]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-466/igt@xe_pmu@engine-activity-suspend.html
Known issues
------------
Here are the changes found in xe-pw-154627v9_FULL that come from known issues:
### IGT changes ###
#### Issues hit ####
* igt@intel_hwmon@hwmon-write:
- shard-bmg: [PASS][8] -> [FAIL][9] ([Intel XE#4665])
[8]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-bmg-5/igt@intel_hwmon@hwmon-write.html
[9]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-3/igt@intel_hwmon@hwmon-write.html
* igt@kms_async_flips@async-flip-with-page-flip-events-tiled@pipe-b-hdmi-a-1-y:
- shard-adlp: NOTRUN -> [DMESG-WARN][10] ([Intel XE#4543]) +3 other tests dmesg-warn
[10]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-9/igt@kms_async_flips@async-flip-with-page-flip-events-tiled@pipe-b-hdmi-a-1-y.html
* igt@kms_async_flips@crc-atomic@pipe-d-hdmi-a-1:
- shard-adlp: [PASS][11] -> [FAIL][12] ([Intel XE#3884])
[11]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-adlp-6/igt@kms_async_flips@crc-atomic@pipe-d-hdmi-a-1.html
[12]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-1/igt@kms_async_flips@crc-atomic@pipe-d-hdmi-a-1.html
* igt@kms_big_fb@4-tiled-32bpp-rotate-90:
- shard-dg2-set2: NOTRUN -> [SKIP][13] ([Intel XE#316])
[13]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-434/igt@kms_big_fb@4-tiled-32bpp-rotate-90.html
* igt@kms_big_fb@linear-64bpp-rotate-0:
- shard-adlp: [PASS][14] -> [FAIL][15] ([Intel XE#5395])
[14]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-adlp-6/igt@kms_big_fb@linear-64bpp-rotate-0.html
[15]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-3/igt@kms_big_fb@linear-64bpp-rotate-0.html
* igt@kms_big_fb@x-tiled-32bpp-rotate-90:
- shard-bmg: NOTRUN -> [SKIP][16] ([Intel XE#2327]) +3 other tests skip
[16]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-3/igt@kms_big_fb@x-tiled-32bpp-rotate-90.html
* igt@kms_big_fb@x-tiled-max-hw-stride-64bpp-rotate-180:
- shard-adlp: NOTRUN -> [DMESG-FAIL][17] ([Intel XE#4543]) +1 other test dmesg-fail
[17]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-1/igt@kms_big_fb@x-tiled-max-hw-stride-64bpp-rotate-180.html
* igt@kms_big_fb@y-tiled-max-hw-stride-32bpp-rotate-0:
- shard-dg2-set2: NOTRUN -> [SKIP][18] ([Intel XE#1124]) +3 other tests skip
[18]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-435/igt@kms_big_fb@y-tiled-max-hw-stride-32bpp-rotate-0.html
* igt@kms_big_fb@yf-tiled-max-hw-stride-64bpp-rotate-180-async-flip:
- shard-adlp: NOTRUN -> [SKIP][19] ([Intel XE#1124]) +4 other tests skip
[19]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-3/igt@kms_big_fb@yf-tiled-max-hw-stride-64bpp-rotate-180-async-flip.html
- shard-bmg: NOTRUN -> [SKIP][20] ([Intel XE#1124]) +5 other tests skip
[20]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-5/igt@kms_big_fb@yf-tiled-max-hw-stride-64bpp-rotate-180-async-flip.html
- shard-lnl: NOTRUN -> [SKIP][21] ([Intel XE#1124])
[21]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-lnl-7/igt@kms_big_fb@yf-tiled-max-hw-stride-64bpp-rotate-180-async-flip.html
* igt@kms_bw@connected-linear-tiling-2-displays-1920x1080p:
- shard-bmg: [PASS][22] -> [SKIP][23] ([Intel XE#2314] / [Intel XE#2894])
[22]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-bmg-4/igt@kms_bw@connected-linear-tiling-2-displays-1920x1080p.html
[23]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-6/igt@kms_bw@connected-linear-tiling-2-displays-1920x1080p.html
* igt@kms_bw@connected-linear-tiling-2-displays-2160x1440p:
- shard-lnl: NOTRUN -> [SKIP][24] ([Intel XE#2191])
[24]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-lnl-7/igt@kms_bw@connected-linear-tiling-2-displays-2160x1440p.html
* igt@kms_bw@connected-linear-tiling-4-displays-2560x1440p:
- shard-adlp: NOTRUN -> [SKIP][25] ([Intel XE#2191]) +1 other test skip
[25]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-8/igt@kms_bw@connected-linear-tiling-4-displays-2560x1440p.html
* igt@kms_bw@linear-tiling-1-displays-2560x1440p:
- shard-bmg: NOTRUN -> [SKIP][26] ([Intel XE#367]) +2 other tests skip
[26]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-8/igt@kms_bw@linear-tiling-1-displays-2560x1440p.html
* igt@kms_bw@linear-tiling-3-displays-2560x1440p:
- shard-dg2-set2: NOTRUN -> [SKIP][27] ([Intel XE#367]) +1 other test skip
[27]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-464/igt@kms_bw@linear-tiling-3-displays-2560x1440p.html
* igt@kms_bw@linear-tiling-4-displays-2160x1440p:
- shard-adlp: NOTRUN -> [SKIP][28] ([Intel XE#367])
[28]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-9/igt@kms_bw@linear-tiling-4-displays-2160x1440p.html
- shard-lnl: NOTRUN -> [SKIP][29] ([Intel XE#1512])
[29]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-lnl-2/igt@kms_bw@linear-tiling-4-displays-2160x1440p.html
* igt@kms_ccs@bad-pixel-format-4-tiled-dg2-mc-ccs:
- shard-bmg: NOTRUN -> [SKIP][30] ([Intel XE#2887]) +9 other tests skip
[30]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-7/igt@kms_ccs@bad-pixel-format-4-tiled-dg2-mc-ccs.html
* igt@kms_ccs@bad-rotation-90-4-tiled-lnl-ccs:
- shard-adlp: NOTRUN -> [SKIP][31] ([Intel XE#2907])
[31]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-9/igt@kms_ccs@bad-rotation-90-4-tiled-lnl-ccs.html
- shard-dg2-set2: NOTRUN -> [SKIP][32] ([Intel XE#2907])
[32]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-434/igt@kms_ccs@bad-rotation-90-4-tiled-lnl-ccs.html
* igt@kms_ccs@crc-primary-basic-yf-tiled-ccs@pipe-d-dp-2:
- shard-dg2-set2: NOTRUN -> [SKIP][33] ([Intel XE#455] / [Intel XE#787]) +25 other tests skip
[33]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-432/igt@kms_ccs@crc-primary-basic-yf-tiled-ccs@pipe-d-dp-2.html
* igt@kms_ccs@crc-primary-rotation-180-4-tiled-dg2-rc-ccs@pipe-d-hdmi-a-1:
- shard-adlp: NOTRUN -> [SKIP][34] ([Intel XE#455] / [Intel XE#787]) +9 other tests skip
[34]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-8/igt@kms_ccs@crc-primary-rotation-180-4-tiled-dg2-rc-ccs@pipe-d-hdmi-a-1.html
* igt@kms_ccs@crc-primary-rotation-180-4-tiled-mtl-rc-ccs:
- shard-lnl: NOTRUN -> [SKIP][35] ([Intel XE#2887]) +3 other tests skip
[35]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-lnl-5/igt@kms_ccs@crc-primary-rotation-180-4-tiled-mtl-rc-ccs.html
* igt@kms_ccs@crc-primary-rotation-180-4-tiled-mtl-rc-ccs@pipe-b-hdmi-a-1:
- shard-adlp: NOTRUN -> [SKIP][36] ([Intel XE#787]) +14 other tests skip
[36]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-8/igt@kms_ccs@crc-primary-rotation-180-4-tiled-mtl-rc-ccs@pipe-b-hdmi-a-1.html
* igt@kms_ccs@crc-sprite-planes-basic-4-tiled-lnl-ccs@pipe-b-dp-2:
- shard-bmg: NOTRUN -> [SKIP][37] ([Intel XE#2652] / [Intel XE#787]) +3 other tests skip
[37]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-7/igt@kms_ccs@crc-sprite-planes-basic-4-tiled-lnl-ccs@pipe-b-dp-2.html
* igt@kms_ccs@crc-sprite-planes-basic-4-tiled-mtl-rc-ccs-cc@pipe-c-hdmi-a-6:
- shard-dg2-set2: NOTRUN -> [SKIP][38] ([Intel XE#787]) +146 other tests skip
[38]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-434/igt@kms_ccs@crc-sprite-planes-basic-4-tiled-mtl-rc-ccs-cc@pipe-c-hdmi-a-6.html
* igt@kms_ccs@random-ccs-data-4-tiled-dg2-mc-ccs@pipe-b-hdmi-a-6:
- shard-dg2-set2: [PASS][39] -> [INCOMPLETE][40] ([Intel XE#1727] / [Intel XE#3113] / [Intel XE#4345] / [Intel XE#6168])
[39]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-dg2-464/igt@kms_ccs@random-ccs-data-4-tiled-dg2-mc-ccs@pipe-b-hdmi-a-6.html
[40]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-434/igt@kms_ccs@random-ccs-data-4-tiled-dg2-mc-ccs@pipe-b-hdmi-a-6.html
* igt@kms_chamelium_color@ctm-0-25:
- shard-dg2-set2: NOTRUN -> [SKIP][41] ([Intel XE#306])
[41]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-435/igt@kms_chamelium_color@ctm-0-25.html
* igt@kms_chamelium_edid@dp-edid-change-during-suspend:
- shard-dg2-set2: NOTRUN -> [SKIP][42] ([Intel XE#373]) +3 other tests skip
[42]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-466/igt@kms_chamelium_edid@dp-edid-change-during-suspend.html
- shard-lnl: NOTRUN -> [SKIP][43] ([Intel XE#373]) +3 other tests skip
[43]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-lnl-8/igt@kms_chamelium_edid@dp-edid-change-during-suspend.html
* igt@kms_chamelium_edid@dp-edid-stress-resolution-4k:
- shard-adlp: NOTRUN -> [SKIP][44] ([Intel XE#373]) +3 other tests skip
[44]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-8/igt@kms_chamelium_edid@dp-edid-stress-resolution-4k.html
* igt@kms_chamelium_hpd@common-hpd-after-suspend:
- shard-bmg: NOTRUN -> [SKIP][45] ([Intel XE#2252]) +3 other tests skip
[45]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-2/igt@kms_chamelium_hpd@common-hpd-after-suspend.html
* igt@kms_content_protection@atomic-dpms:
- shard-dg2-set2: NOTRUN -> [FAIL][46] ([Intel XE#1178]) +1 other test fail
[46]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-434/igt@kms_content_protection@atomic-dpms.html
* igt@kms_content_protection@content-type-change:
- shard-bmg: NOTRUN -> [SKIP][47] ([Intel XE#2341])
[47]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-8/igt@kms_content_protection@content-type-change.html
* igt@kms_content_protection@srm@pipe-a-dp-2:
- shard-bmg: NOTRUN -> [FAIL][48] ([Intel XE#1178])
[48]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-4/igt@kms_content_protection@srm@pipe-a-dp-2.html
* igt@kms_cursor_crc@cursor-offscreen-256x85:
- shard-bmg: NOTRUN -> [SKIP][49] ([Intel XE#2320]) +1 other test skip
[49]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-1/igt@kms_cursor_crc@cursor-offscreen-256x85.html
* igt@kms_cursor_crc@cursor-offscreen-512x512:
- shard-adlp: NOTRUN -> [SKIP][50] ([Intel XE#308])
[50]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-2/igt@kms_cursor_crc@cursor-offscreen-512x512.html
* igt@kms_cursor_legacy@2x-long-nonblocking-modeset-vs-cursor-atomic:
- shard-lnl: NOTRUN -> [SKIP][51] ([Intel XE#309]) +1 other test skip
[51]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-lnl-7/igt@kms_cursor_legacy@2x-long-nonblocking-modeset-vs-cursor-atomic.html
* igt@kms_cursor_legacy@cursora-vs-flipb-atomic-transitions-varying-size:
- shard-adlp: NOTRUN -> [SKIP][52] ([Intel XE#309]) +2 other tests skip
[52]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-1/igt@kms_cursor_legacy@cursora-vs-flipb-atomic-transitions-varying-size.html
* igt@kms_cursor_legacy@cursora-vs-flipb-varying-size:
- shard-bmg: [PASS][53] -> [SKIP][54] ([Intel XE#2291]) +1 other test skip
[53]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-bmg-1/igt@kms_cursor_legacy@cursora-vs-flipb-varying-size.html
[54]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-6/igt@kms_cursor_legacy@cursora-vs-flipb-varying-size.html
* igt@kms_cursor_legacy@short-busy-flip-before-cursor-atomic-transitions:
- shard-dg2-set2: NOTRUN -> [SKIP][55] ([Intel XE#323])
[55]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-434/igt@kms_cursor_legacy@short-busy-flip-before-cursor-atomic-transitions.html
- shard-lnl: NOTRUN -> [SKIP][56] ([Intel XE#323])
[56]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-lnl-7/igt@kms_cursor_legacy@short-busy-flip-before-cursor-atomic-transitions.html
- shard-adlp: NOTRUN -> [SKIP][57] ([Intel XE#323])
[57]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-8/igt@kms_cursor_legacy@short-busy-flip-before-cursor-atomic-transitions.html
- shard-bmg: NOTRUN -> [SKIP][58] ([Intel XE#2286])
[58]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-7/igt@kms_cursor_legacy@short-busy-flip-before-cursor-atomic-transitions.html
* igt@kms_dp_link_training@non-uhbr-mst:
- shard-dg2-set2: NOTRUN -> [SKIP][59] ([Intel XE#4354])
[59]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-463/igt@kms_dp_link_training@non-uhbr-mst.html
- shard-lnl: NOTRUN -> [SKIP][60] ([Intel XE#4354])
[60]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-lnl-1/igt@kms_dp_link_training@non-uhbr-mst.html
- shard-adlp: NOTRUN -> [SKIP][61] ([Intel XE#4354])
[61]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-4/igt@kms_dp_link_training@non-uhbr-mst.html
- shard-bmg: NOTRUN -> [SKIP][62] ([Intel XE#4354])
[62]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-3/igt@kms_dp_link_training@non-uhbr-mst.html
* igt@kms_fbc_dirty_rect@fbc-dirty-rectangle-out-visible-area:
- shard-dg2-set2: NOTRUN -> [SKIP][63] ([Intel XE#4422])
[63]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-463/igt@kms_fbc_dirty_rect@fbc-dirty-rectangle-out-visible-area.html
* igt@kms_feature_discovery@display-4x:
- shard-bmg: NOTRUN -> [SKIP][64] ([Intel XE#1138])
[64]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-3/igt@kms_feature_discovery@display-4x.html
* igt@kms_feature_discovery@dp-mst:
- shard-bmg: NOTRUN -> [SKIP][65] ([Intel XE#2375])
[65]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-7/igt@kms_feature_discovery@dp-mst.html
* igt@kms_flip@2x-blocking-absolute-wf_vblank:
- shard-lnl: NOTRUN -> [SKIP][66] ([Intel XE#1421]) +1 other test skip
[66]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-lnl-7/igt@kms_flip@2x-blocking-absolute-wf_vblank.html
- shard-adlp: NOTRUN -> [SKIP][67] ([Intel XE#310])
[67]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-1/igt@kms_flip@2x-blocking-absolute-wf_vblank.html
* igt@kms_flip@2x-plain-flip-fb-recreate:
- shard-bmg: [PASS][68] -> [SKIP][69] ([Intel XE#2316]) +1 other test skip
[68]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-bmg-3/igt@kms_flip@2x-plain-flip-fb-recreate.html
[69]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-6/igt@kms_flip@2x-plain-flip-fb-recreate.html
* igt@kms_flip@flip-vs-suspend-interruptible@d-hdmi-a1:
- shard-adlp: [PASS][70] -> [DMESG-WARN][71] ([Intel XE#4543]) +4 other tests dmesg-warn
[70]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-adlp-8/igt@kms_flip@flip-vs-suspend-interruptible@d-hdmi-a1.html
[71]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-8/igt@kms_flip@flip-vs-suspend-interruptible@d-hdmi-a1.html
* igt@kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling:
- shard-lnl: NOTRUN -> [SKIP][72] ([Intel XE#1397] / [Intel XE#1745])
[72]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-lnl-2/igt@kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling.html
* igt@kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling@pipe-a-default-mode:
- shard-lnl: NOTRUN -> [SKIP][73] ([Intel XE#1397])
[73]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-lnl-2/igt@kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling@pipe-a-default-mode.html
* igt@kms_flip_scaled_crc@flip-64bpp-4tile-to-32bpp-4tiledg2rcccs-downscaling:
- shard-bmg: NOTRUN -> [SKIP][74] ([Intel XE#2380]) +1 other test skip
[74]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-6/igt@kms_flip_scaled_crc@flip-64bpp-4tile-to-32bpp-4tiledg2rcccs-downscaling.html
- shard-lnl: NOTRUN -> [SKIP][75] ([Intel XE#1401] / [Intel XE#1745])
[75]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-lnl-5/igt@kms_flip_scaled_crc@flip-64bpp-4tile-to-32bpp-4tiledg2rcccs-downscaling.html
* igt@kms_flip_scaled_crc@flip-64bpp-4tile-to-32bpp-4tiledg2rcccs-downscaling@pipe-a-default-mode:
- shard-lnl: NOTRUN -> [SKIP][76] ([Intel XE#1401])
[76]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-lnl-5/igt@kms_flip_scaled_crc@flip-64bpp-4tile-to-32bpp-4tiledg2rcccs-downscaling@pipe-a-default-mode.html
* igt@kms_flip_scaled_crc@flip-64bpp-yftile-to-16bpp-yftile-upscaling:
- shard-bmg: NOTRUN -> [SKIP][77] ([Intel XE#2293] / [Intel XE#2380])
[77]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-5/igt@kms_flip_scaled_crc@flip-64bpp-yftile-to-16bpp-yftile-upscaling.html
* igt@kms_flip_scaled_crc@flip-64bpp-yftile-to-16bpp-yftile-upscaling@pipe-a-valid-mode:
- shard-bmg: NOTRUN -> [SKIP][78] ([Intel XE#2293])
[78]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-5/igt@kms_flip_scaled_crc@flip-64bpp-yftile-to-16bpp-yftile-upscaling@pipe-a-valid-mode.html
* igt@kms_force_connector_basic@force-connector-state:
- shard-lnl: NOTRUN -> [SKIP][79] ([Intel XE#352]) +1 other test skip
[79]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-lnl-2/igt@kms_force_connector_basic@force-connector-state.html
* igt@kms_frontbuffer_tracking@drrs-1p-primscrn-shrfb-pgflip-blt:
- shard-lnl: NOTRUN -> [SKIP][80] ([Intel XE#651]) +4 other tests skip
[80]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-lnl-3/igt@kms_frontbuffer_tracking@drrs-1p-primscrn-shrfb-pgflip-blt.html
* igt@kms_frontbuffer_tracking@drrs-2p-pri-indfb-multidraw:
- shard-bmg: NOTRUN -> [SKIP][81] ([Intel XE#2311]) +20 other tests skip
[81]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-1/igt@kms_frontbuffer_tracking@drrs-2p-pri-indfb-multidraw.html
* igt@kms_frontbuffer_tracking@drrs-2p-primscrn-cur-indfb-draw-render:
- shard-adlp: NOTRUN -> [SKIP][82] ([Intel XE#656]) +12 other tests skip
[82]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-4/igt@kms_frontbuffer_tracking@drrs-2p-primscrn-cur-indfb-draw-render.html
* igt@kms_frontbuffer_tracking@drrs-2p-primscrn-indfb-plflip-blt:
- shard-dg2-set2: NOTRUN -> [SKIP][83] ([Intel XE#651]) +13 other tests skip
[83]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-466/igt@kms_frontbuffer_tracking@drrs-2p-primscrn-indfb-plflip-blt.html
* igt@kms_frontbuffer_tracking@fbc-2p-primscrn-spr-indfb-fullscreen:
- shard-bmg: NOTRUN -> [SKIP][84] ([Intel XE#5390]) +6 other tests skip
[84]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-4/igt@kms_frontbuffer_tracking@fbc-2p-primscrn-spr-indfb-fullscreen.html
* igt@kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-spr-indfb-draw-mmap-wc:
- shard-bmg: NOTRUN -> [SKIP][85] ([Intel XE#2312])
[85]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-6/igt@kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-spr-indfb-draw-mmap-wc.html
* igt@kms_frontbuffer_tracking@fbcdrrs-shrfb-scaledprimary:
- shard-adlp: NOTRUN -> [SKIP][86] ([Intel XE#651]) +3 other tests skip
[86]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-3/igt@kms_frontbuffer_tracking@fbcdrrs-shrfb-scaledprimary.html
* igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-cur-indfb-move:
- shard-bmg: NOTRUN -> [SKIP][87] ([Intel XE#2313]) +13 other tests skip
[87]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-3/igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-cur-indfb-move.html
* igt@kms_frontbuffer_tracking@fbcpsr-tiling-y:
- shard-bmg: NOTRUN -> [SKIP][88] ([Intel XE#2352]) +1 other test skip
[88]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-5/igt@kms_frontbuffer_tracking@fbcpsr-tiling-y.html
- shard-dg2-set2: NOTRUN -> [SKIP][89] ([Intel XE#658])
[89]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-432/igt@kms_frontbuffer_tracking@fbcpsr-tiling-y.html
* igt@kms_frontbuffer_tracking@psr-2p-primscrn-pri-indfb-draw-blt:
- shard-lnl: NOTRUN -> [SKIP][90] ([Intel XE#656]) +8 other tests skip
[90]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-lnl-3/igt@kms_frontbuffer_tracking@psr-2p-primscrn-pri-indfb-draw-blt.html
* igt@kms_frontbuffer_tracking@psr-2p-primscrn-shrfb-msflip-blt:
- shard-dg2-set2: NOTRUN -> [SKIP][91] ([Intel XE#653]) +8 other tests skip
[91]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-464/igt@kms_frontbuffer_tracking@psr-2p-primscrn-shrfb-msflip-blt.html
* igt@kms_frontbuffer_tracking@psr-rgb565-draw-mmap-wc:
- shard-adlp: NOTRUN -> [SKIP][92] ([Intel XE#653]) +2 other tests skip
[92]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-1/igt@kms_frontbuffer_tracking@psr-rgb565-draw-mmap-wc.html
* igt@kms_hdr@brightness-with-hdr:
- shard-dg2-set2: NOTRUN -> [SKIP][93] ([Intel XE#455]) +7 other tests skip
[93]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-433/igt@kms_hdr@brightness-with-hdr.html
- shard-bmg: NOTRUN -> [SKIP][94] ([Intel XE#3374] / [Intel XE#3544])
[94]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-4/igt@kms_hdr@brightness-with-hdr.html
* igt@kms_joiner@basic-big-joiner:
- shard-bmg: NOTRUN -> [SKIP][95] ([Intel XE#346])
[95]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-7/igt@kms_joiner@basic-big-joiner.html
* igt@kms_joiner@basic-force-ultra-joiner:
- shard-bmg: NOTRUN -> [SKIP][96] ([Intel XE#2934])
[96]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-6/igt@kms_joiner@basic-force-ultra-joiner.html
- shard-dg2-set2: NOTRUN -> [SKIP][97] ([Intel XE#2925])
[97]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-434/igt@kms_joiner@basic-force-ultra-joiner.html
* igt@kms_multipipe_modeset@basic-max-pipe-crc-check:
- shard-adlp: NOTRUN -> [SKIP][98] ([Intel XE#356])
[98]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-1/igt@kms_multipipe_modeset@basic-max-pipe-crc-check.html
* igt@kms_panel_fitting@legacy:
- shard-bmg: NOTRUN -> [SKIP][99] ([Intel XE#2486])
[99]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-8/igt@kms_panel_fitting@legacy.html
* igt@kms_plane_scaling@intel-max-src-size:
- shard-bmg: [PASS][100] -> [SKIP][101] ([Intel XE#2685] / [Intel XE#3307])
[100]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-bmg-2/igt@kms_plane_scaling@intel-max-src-size.html
[101]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-8/igt@kms_plane_scaling@intel-max-src-size.html
- shard-dg2-set2: [PASS][102] -> [SKIP][103] ([Intel XE#455])
[102]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-dg2-463/igt@kms_plane_scaling@intel-max-src-size.html
[103]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-433/igt@kms_plane_scaling@intel-max-src-size.html
* igt@kms_pm_dc@dc6-psr:
- shard-lnl: [PASS][104] -> [FAIL][105] ([Intel XE#718]) +1 other test fail
[104]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-lnl-4/igt@kms_pm_dc@dc6-psr.html
[105]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-lnl-5/igt@kms_pm_dc@dc6-psr.html
- shard-dg2-set2: NOTRUN -> [SKIP][106] ([Intel XE#1129])
[106]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-434/igt@kms_pm_dc@dc6-psr.html
* igt@kms_pm_dc@dc9-dpms:
- shard-adlp: NOTRUN -> [SKIP][107] ([Intel XE#734])
[107]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-2/igt@kms_pm_dc@dc9-dpms.html
* igt@kms_pm_rpm@dpms-mode-unset-lpsp:
- shard-adlp: [PASS][108] -> [SKIP][109] ([Intel XE#6070])
[108]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-adlp-1/igt@kms_pm_rpm@dpms-mode-unset-lpsp.html
[109]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-3/igt@kms_pm_rpm@dpms-mode-unset-lpsp.html
* igt@kms_psr2_sf@fbc-pr-overlay-plane-move-continuous-exceed-fully-sf:
- shard-dg2-set2: NOTRUN -> [SKIP][110] ([Intel XE#1406] / [Intel XE#1489]) +4 other tests skip
[110]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-432/igt@kms_psr2_sf@fbc-pr-overlay-plane-move-continuous-exceed-fully-sf.html
* igt@kms_psr2_sf@fbc-psr2-cursor-plane-move-continuous-sf:
- shard-adlp: NOTRUN -> [SKIP][111] ([Intel XE#1406] / [Intel XE#1489]) +3 other tests skip
[111]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-3/igt@kms_psr2_sf@fbc-psr2-cursor-plane-move-continuous-sf.html
- shard-bmg: NOTRUN -> [SKIP][112] ([Intel XE#1406] / [Intel XE#1489]) +4 other tests skip
[112]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-3/igt@kms_psr2_sf@fbc-psr2-cursor-plane-move-continuous-sf.html
- shard-lnl: NOTRUN -> [SKIP][113] ([Intel XE#1406] / [Intel XE#2893] / [Intel XE#4608])
[113]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-lnl-1/igt@kms_psr2_sf@fbc-psr2-cursor-plane-move-continuous-sf.html
* igt@kms_psr2_sf@fbc-psr2-cursor-plane-move-continuous-sf@pipe-a-edp-1:
- shard-lnl: NOTRUN -> [SKIP][114] ([Intel XE#1406] / [Intel XE#4608]) +1 other test skip
[114]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-lnl-1/igt@kms_psr2_sf@fbc-psr2-cursor-plane-move-continuous-sf@pipe-a-edp-1.html
* igt@kms_psr2_sf@pr-overlay-plane-update-continuous-sf:
- shard-lnl: NOTRUN -> [SKIP][115] ([Intel XE#1406] / [Intel XE#2893])
[115]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-lnl-5/igt@kms_psr2_sf@pr-overlay-plane-update-continuous-sf.html
* igt@kms_psr2_su@frontbuffer-xrgb8888:
- shard-adlp: NOTRUN -> [SKIP][116] ([Intel XE#1122] / [Intel XE#1406] / [Intel XE#5580])
[116]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-4/igt@kms_psr2_su@frontbuffer-xrgb8888.html
* igt@kms_psr2_su@page_flip-xrgb8888:
- shard-dg2-set2: NOTRUN -> [SKIP][117] ([Intel XE#1122] / [Intel XE#1406])
[117]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-435/igt@kms_psr2_su@page_flip-xrgb8888.html
* igt@kms_psr@fbc-psr2-cursor-plane-move:
- shard-bmg: NOTRUN -> [SKIP][118] ([Intel XE#1406] / [Intel XE#2234] / [Intel XE#2850]) +9 other tests skip
[118]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-4/igt@kms_psr@fbc-psr2-cursor-plane-move.html
* igt@kms_psr@pr-primary-page-flip:
- shard-adlp: NOTRUN -> [SKIP][119] ([Intel XE#1406] / [Intel XE#2850] / [Intel XE#929]) +4 other tests skip
[119]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-8/igt@kms_psr@pr-primary-page-flip.html
* igt@kms_psr@pr-sprite-plane-onoff:
- shard-dg2-set2: NOTRUN -> [SKIP][120] ([Intel XE#1406] / [Intel XE#2850] / [Intel XE#929]) +1 other test skip
[120]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-435/igt@kms_psr@pr-sprite-plane-onoff.html
- shard-lnl: NOTRUN -> [SKIP][121] ([Intel XE#1406]) +2 other tests skip
[121]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-lnl-8/igt@kms_psr@pr-sprite-plane-onoff.html
* igt@kms_psr_stress_test@flip-primary-invalidate-overlay:
- shard-bmg: NOTRUN -> [SKIP][122] ([Intel XE#1406] / [Intel XE#2414])
[122]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-8/igt@kms_psr_stress_test@flip-primary-invalidate-overlay.html
* igt@kms_psr_stress_test@invalidate-primary-flip-overlay:
- shard-dg2-set2: NOTRUN -> [SKIP][123] ([Intel XE#1406] / [Intel XE#2939])
[123]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-466/igt@kms_psr_stress_test@invalidate-primary-flip-overlay.html
* igt@kms_rotation_crc@primary-yf-tiled-reflect-x-180:
- shard-bmg: NOTRUN -> [SKIP][124] ([Intel XE#2330]) +1 other test skip
[124]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-2/igt@kms_rotation_crc@primary-yf-tiled-reflect-x-180.html
* igt@kms_scaling_modes@scaling-mode-full:
- shard-bmg: NOTRUN -> [SKIP][125] ([Intel XE#2413])
[125]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-1/igt@kms_scaling_modes@scaling-mode-full.html
* igt@kms_setmode@clone-exclusive-crtc:
- shard-bmg: NOTRUN -> [SKIP][126] ([Intel XE#1435])
[126]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-6/igt@kms_setmode@clone-exclusive-crtc.html
* igt@kms_universal_plane@disable-primary-vs-flip:
- shard-adlp: [PASS][127] -> [DMESG-WARN][128] ([Intel XE#2953] / [Intel XE#4173]) +4 other tests dmesg-warn
[127]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-adlp-3/igt@kms_universal_plane@disable-primary-vs-flip.html
[128]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-8/igt@kms_universal_plane@disable-primary-vs-flip.html
* igt@kms_vrr@flip-dpms:
- shard-bmg: NOTRUN -> [SKIP][129] ([Intel XE#1499])
[129]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-3/igt@kms_vrr@flip-dpms.html
* igt@kms_vrr@max-min:
- shard-lnl: [PASS][130] -> [FAIL][131] ([Intel XE#4227]) +1 other test fail
[130]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-lnl-1/igt@kms_vrr@max-min.html
[131]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-lnl-5/igt@kms_vrr@max-min.html
* igt@kms_vrr@seamless-rr-switch-vrr:
- shard-adlp: NOTRUN -> [SKIP][132] ([Intel XE#455]) +3 other tests skip
[132]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-6/igt@kms_vrr@seamless-rr-switch-vrr.html
* igt@sriov_basic@enable-vfs-bind-unbind-each-numvfs-all:
- shard-dg2-set2: NOTRUN -> [SKIP][133] ([Intel XE#1091] / [Intel XE#2849])
[133]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-435/igt@sriov_basic@enable-vfs-bind-unbind-each-numvfs-all.html
* igt@xe_compute_preempt@compute-preempt-many:
- shard-adlp: NOTRUN -> [SKIP][134] ([Intel XE#455] / [Intel XE#5632]) +1 other test skip
[134]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-2/igt@xe_compute_preempt@compute-preempt-many.html
* igt@xe_compute_preempt@compute-preempt-many-all-ram@engine-drm_xe_engine_class_compute:
- shard-dg2-set2: NOTRUN -> [FAIL][135] ([Intel XE#5890]) +1 other test fail
[135]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-433/igt@xe_compute_preempt@compute-preempt-many-all-ram@engine-drm_xe_engine_class_compute.html
* igt@xe_copy_basic@mem-set-linear-0x3fff:
- shard-dg2-set2: NOTRUN -> [SKIP][136] ([Intel XE#1126])
[136]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-432/igt@xe_copy_basic@mem-set-linear-0x3fff.html
* igt@xe_copy_basic@mem-set-linear-0xfffe:
- shard-adlp: NOTRUN -> [SKIP][137] ([Intel XE#1126])
[137]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-1/igt@xe_copy_basic@mem-set-linear-0xfffe.html
* igt@xe_eu_stall@non-blocking-re-enable:
- shard-adlp: NOTRUN -> [SKIP][138] ([Intel XE#5626]) +1 other test skip
[138]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-3/igt@xe_eu_stall@non-blocking-re-enable.html
- shard-dg2-set2: NOTRUN -> [SKIP][139] ([Intel XE#5626])
[139]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-436/igt@xe_eu_stall@non-blocking-re-enable.html
* igt@xe_eudebug@discovery-race-vmbind:
- shard-adlp: NOTRUN -> [SKIP][140] ([Intel XE#4837] / [Intel XE#5565]) +4 other tests skip
[140]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-2/igt@xe_eudebug@discovery-race-vmbind.html
* igt@xe_eudebug@multigpu-basic-client-many:
- shard-lnl: NOTRUN -> [SKIP][141] ([Intel XE#4837]) +2 other tests skip
[141]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-lnl-3/igt@xe_eudebug@multigpu-basic-client-many.html
* igt@xe_eudebug_online@single-step-one:
- shard-bmg: NOTRUN -> [SKIP][142] ([Intel XE#4837]) +7 other tests skip
[142]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-2/igt@xe_eudebug_online@single-step-one.html
* igt@xe_eudebug_online@writes-caching-sram-bb-sram-target-sram:
- shard-dg2-set2: NOTRUN -> [SKIP][143] ([Intel XE#4837]) +4 other tests skip
[143]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-433/igt@xe_eudebug_online@writes-caching-sram-bb-sram-target-sram.html
* igt@xe_evict@evict-beng-small-external-cm:
- shard-adlp: NOTRUN -> [SKIP][144] ([Intel XE#261] / [Intel XE#5564] / [Intel XE#688]) +1 other test skip
[144]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-2/igt@xe_evict@evict-beng-small-external-cm.html
* igt@xe_evict@evict-beng-threads-large-multi-vm:
- shard-lnl: NOTRUN -> [SKIP][145] ([Intel XE#688]) +33 other tests skip
[145]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-lnl-7/igt@xe_evict@evict-beng-threads-large-multi-vm.html
* igt@xe_evict@evict-mixed-many-threads-small:
- shard-adlp: NOTRUN -> [SKIP][146] ([Intel XE#261]) +25 other tests skip
[146]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-3/igt@xe_evict@evict-mixed-many-threads-small.html
* igt@xe_evict@evict-threads-small:
- shard-adlp: NOTRUN -> [SKIP][147] ([Intel XE#261] / [Intel XE#688]) +5 other tests skip
[147]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-1/igt@xe_evict@evict-threads-small.html
* igt@xe_exec_basic@multigpu-many-execqueues-many-vm-bindexecqueue-rebind:
- shard-dg2-set2: [PASS][148] -> [SKIP][149] ([Intel XE#1392]) +3 other tests skip
[148]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-dg2-435/igt@xe_exec_basic@multigpu-many-execqueues-many-vm-bindexecqueue-rebind.html
[149]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-432/igt@xe_exec_basic@multigpu-many-execqueues-many-vm-bindexecqueue-rebind.html
* igt@xe_exec_basic@multigpu-many-execqueues-many-vm-bindexecqueue-userptr-rebind:
- shard-lnl: NOTRUN -> [SKIP][150] ([Intel XE#1392]) +2 other tests skip
[150]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-lnl-1/igt@xe_exec_basic@multigpu-many-execqueues-many-vm-bindexecqueue-userptr-rebind.html
* igt@xe_exec_basic@multigpu-no-exec-basic-defer-bind:
- shard-bmg: NOTRUN -> [SKIP][151] ([Intel XE#2322]) +6 other tests skip
[151]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-5/igt@xe_exec_basic@multigpu-no-exec-basic-defer-bind.html
* igt@xe_exec_basic@multigpu-no-exec-null-rebind:
- shard-adlp: NOTRUN -> [SKIP][152] ([Intel XE#1392] / [Intel XE#5575]) +2 other tests skip
[152]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-8/igt@xe_exec_basic@multigpu-no-exec-null-rebind.html
* igt@xe_exec_fault_mode@many-bindexecqueue-userptr-rebind:
- shard-adlp: NOTRUN -> [SKIP][153] ([Intel XE#288] / [Intel XE#5561]) +8 other tests skip
[153]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-9/igt@xe_exec_fault_mode@many-bindexecqueue-userptr-rebind.html
* igt@xe_exec_fault_mode@many-execqueues-bindexecqueue-prefetch:
- shard-dg2-set2: NOTRUN -> [SKIP][154] ([Intel XE#288]) +12 other tests skip
[154]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-434/igt@xe_exec_fault_mode@many-execqueues-bindexecqueue-prefetch.html
* igt@xe_exec_mix_modes@exec-spinner-interrupted-lr:
- shard-adlp: NOTRUN -> [SKIP][155] ([Intel XE#2360])
[155]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-8/igt@xe_exec_mix_modes@exec-spinner-interrupted-lr.html
* igt@xe_exec_system_allocator@process-many-large-execqueues-mmap-nomemset:
- shard-dg2-set2: NOTRUN -> [SKIP][156] ([Intel XE#4915]) +123 other tests skip
[156]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-432/igt@xe_exec_system_allocator@process-many-large-execqueues-mmap-nomemset.html
* igt@xe_exec_system_allocator@threads-many-mmap-new-huge-nomemset:
- shard-bmg: NOTRUN -> [SKIP][157] ([Intel XE#4943]) +5 other tests skip
[157]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-7/igt@xe_exec_system_allocator@threads-many-mmap-new-huge-nomemset.html
* igt@xe_exec_system_allocator@threads-shared-vm-many-large-mmap-new-huge-nomemset:
- shard-lnl: NOTRUN -> [SKIP][158] ([Intel XE#4943]) +3 other tests skip
[158]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-lnl-3/igt@xe_exec_system_allocator@threads-shared-vm-many-large-mmap-new-huge-nomemset.html
* igt@xe_exec_system_allocator@twice-free:
- shard-adlp: NOTRUN -> [SKIP][159] ([Intel XE#4915]) +89 other tests skip
[159]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-9/igt@xe_exec_system_allocator@twice-free.html
* igt@xe_fault_injection@probe-fail-guc-xe_guc_mmio_send_recv:
- shard-dg2-set2: [PASS][160] -> [DMESG-WARN][161] ([Intel XE#5893])
[160]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-dg2-435/igt@xe_fault_injection@probe-fail-guc-xe_guc_mmio_send_recv.html
[161]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-466/igt@xe_fault_injection@probe-fail-guc-xe_guc_mmio_send_recv.html
* igt@xe_media_fill@media-fill:
- shard-dg2-set2: NOTRUN -> [SKIP][162] ([Intel XE#560])
[162]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-435/igt@xe_media_fill@media-fill.html
* igt@xe_module_load@force-load:
- shard-dg2-set2: NOTRUN -> [SKIP][163] ([Intel XE#378])
[163]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-463/igt@xe_module_load@force-load.html
* igt@xe_oa@oa-regs-whitelisted:
- shard-dg2-set2: NOTRUN -> [SKIP][164] ([Intel XE#3573]) +2 other tests skip
[164]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-436/igt@xe_oa@oa-regs-whitelisted.html
* igt@xe_oa@syncs-ufence-wait:
- shard-adlp: NOTRUN -> [SKIP][165] ([Intel XE#3573]) +2 other tests skip
[165]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-4/igt@xe_oa@syncs-ufence-wait.html
* igt@xe_peer2peer@read@read-gpua-vram01-gpub-system-p2p:
- shard-dg2-set2: NOTRUN -> [FAIL][166] ([Intel XE#1173]) +1 other test fail
[166]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-435/igt@xe_peer2peer@read@read-gpua-vram01-gpub-system-p2p.html
* igt@xe_pm@d3hot-i2c:
- shard-dg2-set2: NOTRUN -> [SKIP][167] ([Intel XE#5742])
[167]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-436/igt@xe_pm@d3hot-i2c.html
- shard-lnl: NOTRUN -> [SKIP][168] ([Intel XE#5742])
[168]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-lnl-3/igt@xe_pm@d3hot-i2c.html
- shard-adlp: NOTRUN -> [SKIP][169] ([Intel XE#5742])
[169]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-2/igt@xe_pm@d3hot-i2c.html
- shard-bmg: NOTRUN -> [SKIP][170] ([Intel XE#5742])
[170]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-5/igt@xe_pm@d3hot-i2c.html
* igt@xe_pm@s3-d3cold-basic-exec:
- shard-bmg: NOTRUN -> [SKIP][171] ([Intel XE#2284]) +3 other tests skip
[171]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-4/igt@xe_pm@s3-d3cold-basic-exec.html
* igt@xe_pm@s3-multiple-execs:
- shard-lnl: NOTRUN -> [SKIP][172] ([Intel XE#584]) +1 other test skip
[172]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-lnl-2/igt@xe_pm@s3-multiple-execs.html
* igt@xe_pm@s4-d3cold-basic-exec:
- shard-lnl: NOTRUN -> [SKIP][173] ([Intel XE#2284] / [Intel XE#366])
[173]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-lnl-2/igt@xe_pm@s4-d3cold-basic-exec.html
- shard-adlp: NOTRUN -> [SKIP][174] ([Intel XE#2284] / [Intel XE#366])
[174]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-4/igt@xe_pm@s4-d3cold-basic-exec.html
- shard-dg2-set2: NOTRUN -> [SKIP][175] ([Intel XE#2284] / [Intel XE#366])
[175]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-435/igt@xe_pm@s4-d3cold-basic-exec.html
* igt@xe_pxp@display-pxp-fb:
- shard-adlp: NOTRUN -> [SKIP][176] ([Intel XE#4733])
[176]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-8/igt@xe_pxp@display-pxp-fb.html
* igt@xe_pxp@pxp-stale-bo-bind-post-rpm:
- shard-adlp: NOTRUN -> [SKIP][177] ([Intel XE#4733] / [Intel XE#5594])
[177]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-8/igt@xe_pxp@pxp-stale-bo-bind-post-rpm.html
* igt@xe_pxp@pxp-stale-bo-bind-post-suspend:
- shard-dg2-set2: NOTRUN -> [SKIP][178] ([Intel XE#4733])
[178]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-435/igt@xe_pxp@pxp-stale-bo-bind-post-suspend.html
* igt@xe_pxp@pxp-stale-queue-post-termination-irq:
- shard-bmg: NOTRUN -> [SKIP][179] ([Intel XE#4733])
[179]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-1/igt@xe_pxp@pxp-stale-queue-post-termination-irq.html
* igt@xe_query@multigpu-query-invalid-extension:
- shard-bmg: NOTRUN -> [SKIP][180] ([Intel XE#944]) +1 other test skip
[180]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-3/igt@xe_query@multigpu-query-invalid-extension.html
- shard-adlp: NOTRUN -> [FAIL][181] ([Intel XE#6249])
[181]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-3/igt@xe_query@multigpu-query-invalid-extension.html
- shard-dg2-set2: NOTRUN -> [SKIP][182] ([Intel XE#944]) +1 other test skip
[182]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-436/igt@xe_query@multigpu-query-invalid-extension.html
- shard-lnl: NOTRUN -> [SKIP][183] ([Intel XE#944])
[183]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-lnl-1/igt@xe_query@multigpu-query-invalid-extension.html
* igt@xe_render_copy@render-stress-2-copies:
- shard-adlp: NOTRUN -> [SKIP][184] ([Intel XE#4814] / [Intel XE#5614])
[184]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-1/igt@xe_render_copy@render-stress-2-copies.html
* igt@xe_sriov_auto_provisioning@selfconfig-reprovision-increase-numvfs:
- shard-dg2-set2: NOTRUN -> [SKIP][185] ([Intel XE#4130])
[185]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-463/igt@xe_sriov_auto_provisioning@selfconfig-reprovision-increase-numvfs.html
- shard-lnl: NOTRUN -> [SKIP][186] ([Intel XE#4130])
[186]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-lnl-1/igt@xe_sriov_auto_provisioning@selfconfig-reprovision-increase-numvfs.html
* igt@xe_sriov_scheduling@equal-throughput:
- shard-adlp: NOTRUN -> [DMESG-FAIL][187] ([Intel XE#3868] / [Intel XE#5213]) +1 other test dmesg-fail
[187]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-2/igt@xe_sriov_scheduling@equal-throughput.html
- shard-dg2-set2: NOTRUN -> [SKIP][188] ([Intel XE#4351])
[188]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-436/igt@xe_sriov_scheduling@equal-throughput.html
- shard-lnl: NOTRUN -> [SKIP][189] ([Intel XE#4351])
[189]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-lnl-3/igt@xe_sriov_scheduling@equal-throughput.html
* igt@xe_sriov_scheduling@nonpreempt-engine-resets@numvfs-random:
- shard-adlp: [PASS][190] -> [ABORT][191] ([Intel XE#4917]) +1 other test abort
[190]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-adlp-2/igt@xe_sriov_scheduling@nonpreempt-engine-resets@numvfs-random.html
[191]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-1/igt@xe_sriov_scheduling@nonpreempt-engine-resets@numvfs-random.html
#### Possible fixes ####
* igt@kms_async_flips@async-flip-with-page-flip-events-linear@pipe-c-edp-1:
- shard-lnl: [FAIL][192] ([Intel XE#5993]) -> [PASS][193] +3 other tests pass
[192]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-lnl-5/igt@kms_async_flips@async-flip-with-page-flip-events-linear@pipe-c-edp-1.html
[193]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-lnl-3/igt@kms_async_flips@async-flip-with-page-flip-events-linear@pipe-c-edp-1.html
* igt@kms_ccs@random-ccs-data-4-tiled-dg2-rc-ccs-cc:
- shard-dg2-set2: [INCOMPLETE][194] ([Intel XE#1727] / [Intel XE#3113] / [Intel XE#4212] / [Intel XE#4345] / [Intel XE#4522]) -> [PASS][195]
[194]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-dg2-434/igt@kms_ccs@random-ccs-data-4-tiled-dg2-rc-ccs-cc.html
[195]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-436/igt@kms_ccs@random-ccs-data-4-tiled-dg2-rc-ccs-cc.html
* igt@kms_ccs@random-ccs-data-4-tiled-dg2-rc-ccs-cc@pipe-b-dp-4:
- shard-dg2-set2: [INCOMPLETE][196] ([Intel XE#1727] / [Intel XE#3113] / [Intel XE#4212] / [Intel XE#4522]) -> [PASS][197]
[196]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-dg2-434/igt@kms_ccs@random-ccs-data-4-tiled-dg2-rc-ccs-cc@pipe-b-dp-4.html
[197]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-436/igt@kms_ccs@random-ccs-data-4-tiled-dg2-rc-ccs-cc@pipe-b-dp-4.html
* igt@kms_cursor_legacy@2x-long-cursor-vs-flip-legacy:
- shard-bmg: [SKIP][198] ([Intel XE#2291]) -> [PASS][199] +2 other tests pass
[198]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-bmg-6/igt@kms_cursor_legacy@2x-long-cursor-vs-flip-legacy.html
[199]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-3/igt@kms_cursor_legacy@2x-long-cursor-vs-flip-legacy.html
* igt@kms_cursor_legacy@flip-vs-cursor-atomic:
- shard-bmg: [FAIL][200] ([Intel XE#4633]) -> [PASS][201]
[200]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-bmg-5/igt@kms_cursor_legacy@flip-vs-cursor-atomic.html
[201]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-4/igt@kms_cursor_legacy@flip-vs-cursor-atomic.html
* igt@kms_cursor_legacy@flip-vs-cursor-varying-size:
- shard-bmg: [DMESG-WARN][202] ([Intel XE#5354]) -> [PASS][203]
[202]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-bmg-2/igt@kms_cursor_legacy@flip-vs-cursor-varying-size.html
[203]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-2/igt@kms_cursor_legacy@flip-vs-cursor-varying-size.html
* igt@kms_dither@fb-8bpc-vs-panel-6bpc:
- shard-bmg: [SKIP][204] ([Intel XE#1340]) -> [PASS][205]
[204]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-bmg-6/igt@kms_dither@fb-8bpc-vs-panel-6bpc.html
[205]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-8/igt@kms_dither@fb-8bpc-vs-panel-6bpc.html
* igt@kms_dp_linktrain_fallback@dp-fallback:
- shard-bmg: [SKIP][206] ([Intel XE#4294]) -> [PASS][207]
[206]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-bmg-6/igt@kms_dp_linktrain_fallback@dp-fallback.html
[207]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-3/igt@kms_dp_linktrain_fallback@dp-fallback.html
* igt@kms_flip@2x-nonexisting-fb:
- shard-bmg: [SKIP][208] ([Intel XE#2316]) -> [PASS][209] +7 other tests pass
[208]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-bmg-6/igt@kms_flip@2x-nonexisting-fb.html
[209]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-2/igt@kms_flip@2x-nonexisting-fb.html
* igt@kms_flip@flip-vs-expired-vblank-interruptible:
- shard-lnl: [FAIL][210] ([Intel XE#301]) -> [PASS][211] +1 other test pass
[210]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-lnl-1/igt@kms_flip@flip-vs-expired-vblank-interruptible.html
[211]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-lnl-4/igt@kms_flip@flip-vs-expired-vblank-interruptible.html
* igt@kms_flip@flip-vs-rmfb:
- shard-adlp: [DMESG-WARN][212] ([Intel XE#4543] / [Intel XE#5208]) -> [PASS][213]
[212]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-adlp-1/igt@kms_flip@flip-vs-rmfb.html
[213]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-3/igt@kms_flip@flip-vs-rmfb.html
* igt@kms_flip@flip-vs-rmfb-interruptible:
- shard-adlp: [DMESG-WARN][214] ([Intel XE#5208]) -> [PASS][215]
[214]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-adlp-3/igt@kms_flip@flip-vs-rmfb-interruptible.html
[215]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-4/igt@kms_flip@flip-vs-rmfb-interruptible.html
* igt@kms_flip@flip-vs-rmfb@b-hdmi-a1:
- shard-adlp: [DMESG-WARN][216] ([Intel XE#4543]) -> [PASS][217] +10 other tests pass
[216]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-adlp-1/igt@kms_flip@flip-vs-rmfb@b-hdmi-a1.html
[217]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-3/igt@kms_flip@flip-vs-rmfb@b-hdmi-a1.html
* igt@kms_flip_tiling@flip-change-tiling:
- shard-adlp: [DMESG-FAIL][218] ([Intel XE#4543]) -> [PASS][219] +1 other test pass
[218]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-adlp-6/igt@kms_flip_tiling@flip-change-tiling.html
[219]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-3/igt@kms_flip_tiling@flip-change-tiling.html
* igt@kms_hdr@invalid-hdr:
- shard-dg2-set2: [SKIP][220] ([Intel XE#455]) -> [PASS][221]
[220]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-dg2-434/igt@kms_hdr@invalid-hdr.html
[221]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-463/igt@kms_hdr@invalid-hdr.html
* igt@kms_hdr@invalid-metadata-sizes:
- shard-bmg: [SKIP][222] ([Intel XE#1503]) -> [PASS][223]
[222]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-bmg-6/igt@kms_hdr@invalid-metadata-sizes.html
[223]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-7/igt@kms_hdr@invalid-metadata-sizes.html
* igt@kms_plane_multiple@2x-tiling-4:
- shard-bmg: [SKIP][224] ([Intel XE#4596]) -> [PASS][225]
[224]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-bmg-6/igt@kms_plane_multiple@2x-tiling-4.html
[225]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-3/igt@kms_plane_multiple@2x-tiling-4.html
* igt@kms_plane_scaling@2x-scaler-multi-pipe:
- shard-bmg: [SKIP][226] ([Intel XE#2571]) -> [PASS][227]
[226]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-bmg-6/igt@kms_plane_scaling@2x-scaler-multi-pipe.html
[227]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-4/igt@kms_plane_scaling@2x-scaler-multi-pipe.html
* igt@kms_plane_scaling@planes-upscale-20x20:
- shard-adlp: [DMESG-WARN][228] ([Intel XE#2953] / [Intel XE#4173]) -> [PASS][229] +6 other tests pass
[228]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-adlp-8/igt@kms_plane_scaling@planes-upscale-20x20.html
[229]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-9/igt@kms_plane_scaling@planes-upscale-20x20.html
* igt@kms_pm_dc@dc5-dpms:
- shard-lnl: [FAIL][230] ([Intel XE#718]) -> [PASS][231]
[230]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-lnl-8/igt@kms_pm_dc@dc5-dpms.html
[231]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-lnl-7/igt@kms_pm_dc@dc5-dpms.html
* igt@xe_exec_basic@multigpu-no-exec-null-defer-mmap:
- shard-adlp: [SKIP][232] ([Intel XE#1392] / [Intel XE#5575]) -> [PASS][233]
[232]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-adlp-4/igt@xe_exec_basic@multigpu-no-exec-null-defer-mmap.html
[233]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-3/igt@xe_exec_basic@multigpu-no-exec-null-defer-mmap.html
* igt@xe_exec_basic@multigpu-no-exec-userptr-invalidate-race:
- shard-dg2-set2: [SKIP][234] ([Intel XE#1392]) -> [PASS][235] +3 other tests pass
[234]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-dg2-432/igt@xe_exec_basic@multigpu-no-exec-userptr-invalidate-race.html
[235]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-463/igt@xe_exec_basic@multigpu-no-exec-userptr-invalidate-race.html
* igt@xe_exec_reset@parallel-gt-reset:
- shard-adlp: [DMESG-WARN][236] ([Intel XE#3876]) -> [PASS][237]
[236]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-adlp-6/igt@xe_exec_reset@parallel-gt-reset.html
[237]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-6/igt@xe_exec_reset@parallel-gt-reset.html
- shard-bmg: [DMESG-WARN][238] ([Intel XE#3876]) -> [PASS][239]
[238]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-bmg-8/igt@xe_exec_reset@parallel-gt-reset.html
[239]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-2/igt@xe_exec_reset@parallel-gt-reset.html
* igt@xe_exec_threads@threads-mixed-shared-vm-rebind:
- shard-adlp: [DMESG-FAIL][240] ([Intel XE#3876]) -> [PASS][241]
[240]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-adlp-6/igt@xe_exec_threads@threads-mixed-shared-vm-rebind.html
[241]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-8/igt@xe_exec_threads@threads-mixed-shared-vm-rebind.html
- shard-bmg: [DMESG-FAIL][242] ([Intel XE#3876]) -> [PASS][243]
[242]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-bmg-8/igt@xe_exec_threads@threads-mixed-shared-vm-rebind.html
[243]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-2/igt@xe_exec_threads@threads-mixed-shared-vm-rebind.html
* igt@xe_module_load@load:
- shard-bmg: ([SKIP][244], [PASS][245], [PASS][246], [PASS][247], [PASS][248], [PASS][249], [PASS][250], [PASS][251], [PASS][252], [PASS][253], [PASS][254], [PASS][255], [PASS][256], [PASS][257], [PASS][258], [PASS][259], [PASS][260], [PASS][261], [PASS][262], [PASS][263], [PASS][264], [PASS][265], [PASS][266], [PASS][267], [PASS][268], [PASS][269]) ([Intel XE#2457]) -> ([PASS][270], [PASS][271], [PASS][272], [PASS][273], [PASS][274], [PASS][275], [PASS][276], [PASS][277], [PASS][278], [PASS][279], [PASS][280], [PASS][281], [PASS][282], [PASS][283], [PASS][284], [PASS][285], [PASS][286], [PASS][287], [PASS][288], [PASS][289], [PASS][290], [PASS][291], [PASS][292], [PASS][293], [PASS][294])
[244]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-bmg-3/igt@xe_module_load@load.html
[245]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-bmg-8/igt@xe_module_load@load.html
[246]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-bmg-7/igt@xe_module_load@load.html
[247]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-bmg-7/igt@xe_module_load@load.html
[248]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-bmg-5/igt@xe_module_load@load.html
[249]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-bmg-5/igt@xe_module_load@load.html
[250]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-bmg-8/igt@xe_module_load@load.html
[251]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-bmg-8/igt@xe_module_load@load.html
[252]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-bmg-8/igt@xe_module_load@load.html
[253]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-bmg-1/igt@xe_module_load@load.html
[254]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-bmg-1/igt@xe_module_load@load.html
[255]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-bmg-3/igt@xe_module_load@load.html
[256]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-bmg-3/igt@xe_module_load@load.html
[257]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-bmg-2/igt@xe_module_load@load.html
[258]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-bmg-3/igt@xe_module_load@load.html
[259]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-bmg-6/igt@xe_module_load@load.html
[260]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-bmg-5/igt@xe_module_load@load.html
[261]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-bmg-4/igt@xe_module_load@load.html
[262]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-bmg-4/igt@xe_module_load@load.html
[263]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-bmg-4/igt@xe_module_load@load.html
[264]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-bmg-6/igt@xe_module_load@load.html
[265]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-bmg-6/igt@xe_module_load@load.html
[266]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-bmg-7/igt@xe_module_load@load.html
[267]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-bmg-1/igt@xe_module_load@load.html
[268]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-bmg-2/igt@xe_module_load@load.html
[269]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-bmg-2/igt@xe_module_load@load.html
[270]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-1/igt@xe_module_load@load.html
[271]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-1/igt@xe_module_load@load.html
[272]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-4/igt@xe_module_load@load.html
[273]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-5/igt@xe_module_load@load.html
[274]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-5/igt@xe_module_load@load.html
[275]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-5/igt@xe_module_load@load.html
[276]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-2/igt@xe_module_load@load.html
[277]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-7/igt@xe_module_load@load.html
[278]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-3/igt@xe_module_load@load.html
[279]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-2/igt@xe_module_load@load.html
[280]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-8/igt@xe_module_load@load.html
[281]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-8/igt@xe_module_load@load.html
[282]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-7/igt@xe_module_load@load.html
[283]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-7/igt@xe_module_load@load.html
[284]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-7/igt@xe_module_load@load.html
[285]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-6/igt@xe_module_load@load.html
[286]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-6/igt@xe_module_load@load.html
[287]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-6/igt@xe_module_load@load.html
[288]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-3/igt@xe_module_load@load.html
[289]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-3/igt@xe_module_load@load.html
[290]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-8/igt@xe_module_load@load.html
[291]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-1/igt@xe_module_load@load.html
[292]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-4/igt@xe_module_load@load.html
[293]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-4/igt@xe_module_load@load.html
[294]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-2/igt@xe_module_load@load.html
- shard-dg2-set2: ([PASS][295], [PASS][296], [PASS][297], [PASS][298], [PASS][299], [PASS][300], [PASS][301], [PASS][302], [PASS][303], [PASS][304], [PASS][305], [PASS][306], [PASS][307], [PASS][308], [PASS][309], [SKIP][310], [PASS][311], [PASS][312], [PASS][313], [PASS][314], [PASS][315], [PASS][316], [PASS][317], [PASS][318], [PASS][319], [PASS][320]) ([Intel XE#378]) -> ([PASS][321], [PASS][322], [PASS][323], [PASS][324], [PASS][325], [PASS][326], [PASS][327], [PASS][328], [PASS][329], [PASS][330], [PASS][331], [PASS][332], [PASS][333], [PASS][334], [PASS][335], [PASS][336], [PASS][337], [PASS][338], [PASS][339], [PASS][340], [PASS][341], [PASS][342], [PASS][343], [PASS][344], [PASS][345])
[295]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-dg2-433/igt@xe_module_load@load.html
[296]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-dg2-433/igt@xe_module_load@load.html
[297]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-dg2-466/igt@xe_module_load@load.html
[298]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-dg2-433/igt@xe_module_load@load.html
[299]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-dg2-436/igt@xe_module_load@load.html
[300]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-dg2-466/igt@xe_module_load@load.html
[301]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-dg2-435/igt@xe_module_load@load.html
[302]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-dg2-435/igt@xe_module_load@load.html
[303]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-dg2-435/igt@xe_module_load@load.html
[304]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-dg2-464/igt@xe_module_load@load.html
[305]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-dg2-432/igt@xe_module_load@load.html
[306]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-dg2-432/igt@xe_module_load@load.html
[307]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-dg2-464/igt@xe_module_load@load.html
[308]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-dg2-464/igt@xe_module_load@load.html
[309]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-dg2-432/igt@xe_module_load@load.html
[310]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-dg2-463/igt@xe_module_load@load.html
[311]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-dg2-436/igt@xe_module_load@load.html
[312]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-dg2-463/igt@xe_module_load@load.html
[313]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-dg2-466/igt@xe_module_load@load.html
[314]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-dg2-463/igt@xe_module_load@load.html
[315]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-dg2-463/igt@xe_module_load@load.html
[316]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-dg2-463/igt@xe_module_load@load.html
[317]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-dg2-436/igt@xe_module_load@load.html
[318]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-dg2-436/igt@xe_module_load@load.html
[319]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-dg2-435/igt@xe_module_load@load.html
[320]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-dg2-434/igt@xe_module_load@load.html
[321]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-463/igt@xe_module_load@load.html
[322]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-463/igt@xe_module_load@load.html
[323]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-464/igt@xe_module_load@load.html
[324]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-464/igt@xe_module_load@load.html
[325]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-433/igt@xe_module_load@load.html
[326]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-432/igt@xe_module_load@load.html
[327]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-432/igt@xe_module_load@load.html
[328]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-434/igt@xe_module_load@load.html
[329]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-436/igt@xe_module_load@load.html
[330]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-435/igt@xe_module_load@load.html
[331]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-436/igt@xe_module_load@load.html
[332]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-433/igt@xe_module_load@load.html
[333]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-463/igt@xe_module_load@load.html
[334]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-464/igt@xe_module_load@load.html
[335]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-466/igt@xe_module_load@load.html
[336]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-432/igt@xe_module_load@load.html
[337]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-434/igt@xe_module_load@load.html
[338]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-435/igt@xe_module_load@load.html
[339]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-466/igt@xe_module_load@load.html
[340]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-466/igt@xe_module_load@load.html
[341]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-435/igt@xe_module_load@load.html
[342]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-435/igt@xe_module_load@load.html
[343]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-436/igt@xe_module_load@load.html
[344]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-432/igt@xe_module_load@load.html
[345]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-433/igt@xe_module_load@load.html
* igt@xe_pm@s2idle-vm-bind-userptr:
- shard-adlp: [INCOMPLETE][346] ([Intel XE#4504]) -> [PASS][347]
[346]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-adlp-6/igt@xe_pm@s2idle-vm-bind-userptr.html
[347]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-9/igt@xe_pm@s2idle-vm-bind-userptr.html
#### Warnings ####
* igt@kms_async_flips@async-flip-suspend-resume:
- shard-adlp: [DMESG-WARN][348] ([Intel XE#4543]) -> [DMESG-FAIL][349] ([Intel XE#4543]) +1 other test dmesg-fail
[348]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-adlp-4/igt@kms_async_flips@async-flip-suspend-resume.html
[349]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-8/igt@kms_async_flips@async-flip-suspend-resume.html
* igt@kms_content_protection@srm:
- shard-bmg: [SKIP][350] ([Intel XE#2341]) -> [FAIL][351] ([Intel XE#1178])
[350]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-bmg-6/igt@kms_content_protection@srm.html
[351]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-4/igt@kms_content_protection@srm.html
* igt@kms_flip@flip-vs-suspend-interruptible:
- shard-adlp: [DMESG-WARN][352] ([Intel XE#2953] / [Intel XE#4173] / [Intel XE#4543]) -> [DMESG-WARN][353] ([Intel XE#4543])
[352]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-adlp-8/igt@kms_flip@flip-vs-suspend-interruptible.html
[353]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-8/igt@kms_flip@flip-vs-suspend-interruptible.html
* igt@kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-downscaling:
- shard-adlp: [DMESG-FAIL][354] ([Intel XE#4543] / [Intel XE#4921]) -> [DMESG-FAIL][355] ([Intel XE#4921]) +1 other test dmesg-fail
[354]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-adlp-8/igt@kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-downscaling.html
[355]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-1/igt@kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-downscaling.html
* igt@kms_frontbuffer_tracking@drrs-2p-primscrn-spr-indfb-draw-blt:
- shard-bmg: [SKIP][356] ([Intel XE#2311]) -> [SKIP][357] ([Intel XE#2312]) +2 other tests skip
[356]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-bmg-3/igt@kms_frontbuffer_tracking@drrs-2p-primscrn-spr-indfb-draw-blt.html
[357]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-6/igt@kms_frontbuffer_tracking@drrs-2p-primscrn-spr-indfb-draw-blt.html
* igt@kms_frontbuffer_tracking@fbc-2p-scndscrn-pri-indfb-draw-render:
- shard-bmg: [SKIP][358] ([Intel XE#2312]) -> [SKIP][359] ([Intel XE#5390])
[358]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-bmg-6/igt@kms_frontbuffer_tracking@fbc-2p-scndscrn-pri-indfb-draw-render.html
[359]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-4/igt@kms_frontbuffer_tracking@fbc-2p-scndscrn-pri-indfb-draw-render.html
* igt@kms_frontbuffer_tracking@fbc-2p-scndscrn-shrfb-msflip-blt:
- shard-bmg: [SKIP][360] ([Intel XE#5390]) -> [SKIP][361] ([Intel XE#2312]) +2 other tests skip
[360]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-bmg-5/igt@kms_frontbuffer_tracking@fbc-2p-scndscrn-shrfb-msflip-blt.html
[361]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-6/igt@kms_frontbuffer_tracking@fbc-2p-scndscrn-shrfb-msflip-blt.html
* igt@kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-cur-indfb-draw-mmap-wc:
- shard-bmg: [SKIP][362] ([Intel XE#2312]) -> [SKIP][363] ([Intel XE#2311]) +5 other tests skip
[362]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-bmg-6/igt@kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-cur-indfb-draw-mmap-wc.html
[363]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-7/igt@kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-cur-indfb-draw-mmap-wc.html
* igt@kms_frontbuffer_tracking@fbcpsr-2p-pri-indfb-multidraw:
- shard-bmg: [SKIP][364] ([Intel XE#2312]) -> [SKIP][365] ([Intel XE#2313]) +4 other tests skip
[364]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-bmg-6/igt@kms_frontbuffer_tracking@fbcpsr-2p-pri-indfb-multidraw.html
[365]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-1/igt@kms_frontbuffer_tracking@fbcpsr-2p-pri-indfb-multidraw.html
* igt@kms_frontbuffer_tracking@psr-2p-scndscrn-pri-indfb-draw-mmap-wc:
- shard-bmg: [SKIP][366] ([Intel XE#2313]) -> [SKIP][367] ([Intel XE#2312]) +3 other tests skip
[366]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-bmg-8/igt@kms_frontbuffer_tracking@psr-2p-scndscrn-pri-indfb-draw-mmap-wc.html
[367]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-6/igt@kms_frontbuffer_tracking@psr-2p-scndscrn-pri-indfb-draw-mmap-wc.html
* igt@kms_tiled_display@basic-test-pattern-with-chamelium:
- shard-dg2-set2: [SKIP][368] ([Intel XE#362]) -> [SKIP][369] ([Intel XE#1500])
[368]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-dg2-463/igt@kms_tiled_display@basic-test-pattern-with-chamelium.html
[369]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-dg2-466/igt@kms_tiled_display@basic-test-pattern-with-chamelium.html
* igt@xe_exec_basic@multigpu-many-execqueues-many-vm-bindexecqueue-userptr-invalidate:
- shard-adlp: [DMESG-FAIL][370] ([Intel XE#5213]) -> [SKIP][371] ([Intel XE#1392] / [Intel XE#5575])
[370]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-adlp-6/igt@xe_exec_basic@multigpu-many-execqueues-many-vm-bindexecqueue-userptr-invalidate.html
[371]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-1/igt@xe_exec_basic@multigpu-many-execqueues-many-vm-bindexecqueue-userptr-invalidate.html
* igt@xe_exec_basic@multigpu-once-bindexecqueue-rebind:
- shard-adlp: [SKIP][372] ([Intel XE#1392] / [Intel XE#5575]) -> [DMESG-FAIL][373] ([Intel XE#5213]) +1 other test dmesg-fail
[372]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-adlp-3/igt@xe_exec_basic@multigpu-once-bindexecqueue-rebind.html
[373]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-3/igt@xe_exec_basic@multigpu-once-bindexecqueue-rebind.html
* igt@xe_exec_basic@multigpu-once-null-defer-mmap:
- shard-adlp: [DMESG-WARN][374] ([Intel XE#4917] / [Intel XE#5213]) -> [SKIP][375] ([Intel XE#1392] / [Intel XE#5575])
[374]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-adlp-6/igt@xe_exec_basic@multigpu-once-null-defer-mmap.html
[375]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-3/igt@xe_exec_basic@multigpu-once-null-defer-mmap.html
* igt@xe_exec_reset@cm-cat-error:
- shard-adlp: [DMESG-FAIL][376] ([Intel XE#3868]) -> [DMESG-WARN][377] ([Intel XE#3868])
[376]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-adlp-1/igt@xe_exec_reset@cm-cat-error.html
[377]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-3/igt@xe_exec_reset@cm-cat-error.html
* igt@xe_fault_injection@probe-fail-guc-xe_guc_ct_send_recv:
- shard-adlp: [ABORT][378] ([Intel XE#5530]) -> [ABORT][379] ([Intel XE#4917] / [Intel XE#5530])
[378]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-adlp-2/igt@xe_fault_injection@probe-fail-guc-xe_guc_ct_send_recv.html
[379]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-adlp-3/igt@xe_fault_injection@probe-fail-guc-xe_guc_ct_send_recv.html
- shard-bmg: [ABORT][380] ([Intel XE#4917] / [Intel XE#5466] / [Intel XE#5530]) -> [ABORT][381] ([Intel XE#5466] / [Intel XE#5530])
[380]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-3886-f929eafc95342ea5377f79705864d50dd325c79f/shard-bmg-7/igt@xe_fault_injection@probe-fail-guc-xe_guc_ct_send_recv.html
[381]: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/shard-bmg-5/igt@xe_fault_injection@probe-fail-guc-xe_guc_ct_send_recv.html
{name}: This element is suppressed. This means it is ignored when computing
the status of the difference (SUCCESS, WARNING, or FAILURE).
[Intel XE#1091]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/1091
[Intel XE#1122]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/1122
[Intel XE#1124]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/1124
[Intel XE#1126]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/1126
[Intel XE#1129]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/1129
[Intel XE#1138]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/1138
[Intel XE#1173]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/1173
[Intel XE#1178]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/1178
[Intel XE#1340]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/1340
[Intel XE#1392]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/1392
[Intel XE#1397]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/1397
[Intel XE#1401]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/1401
[Intel XE#1406]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/1406
[Intel XE#1421]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/1421
[Intel XE#1435]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/1435
[Intel XE#1489]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/1489
[Intel XE#1499]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/1499
[Intel XE#1500]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/1500
[Intel XE#1503]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/1503
[Intel XE#1512]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/1512
[Intel XE#1727]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/1727
[Intel XE#1745]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/1745
[Intel XE#2191]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2191
[Intel XE#2234]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2234
[Intel XE#2252]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2252
[Intel XE#2284]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2284
[Intel XE#2286]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2286
[Intel XE#2291]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2291
[Intel XE#2293]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2293
[Intel XE#2311]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2311
[Intel XE#2312]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2312
[Intel XE#2313]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2313
[Intel XE#2314]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2314
[Intel XE#2316]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2316
[Intel XE#2320]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2320
[Intel XE#2322]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2322
[Intel XE#2327]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2327
[Intel XE#2330]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2330
[Intel XE#2341]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2341
[Intel XE#2352]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2352
[Intel XE#2360]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2360
[Intel XE#2375]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2375
[Intel XE#2380]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2380
[Intel XE#2413]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2413
[Intel XE#2414]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2414
[Intel XE#2457]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2457
[Intel XE#2486]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2486
[Intel XE#2571]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2571
[Intel XE#261]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/261
[Intel XE#2652]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2652
[Intel XE#2685]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2685
[Intel XE#2849]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2849
[Intel XE#2850]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2850
[Intel XE#288]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/288
[Intel XE#2887]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2887
[Intel XE#2893]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2893
[Intel XE#2894]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2894
[Intel XE#2907]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2907
[Intel XE#2925]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2925
[Intel XE#2934]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2934
[Intel XE#2939]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2939
[Intel XE#2953]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2953
[Intel XE#301]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/301
[Intel XE#306]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/306
[Intel XE#308]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/308
[Intel XE#309]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/309
[Intel XE#310]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/310
[Intel XE#3113]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/3113
[Intel XE#316]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/316
[Intel XE#323]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/323
[Intel XE#3307]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/3307
[Intel XE#3374]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/3374
[Intel XE#346]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/346
[Intel XE#352]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/352
[Intel XE#3544]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/3544
[Intel XE#356]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/356
[Intel XE#3573]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/3573
[Intel XE#362]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/362
[Intel XE#366]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/366
[Intel XE#367]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/367
[Intel XE#373]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/373
[Intel XE#378]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/378
[Intel XE#3868]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/3868
[Intel XE#3876]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/3876
[Intel XE#3884]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/3884
[Intel XE#4130]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/4130
[Intel XE#4173]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/4173
[Intel XE#4212]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/4212
[Intel XE#4227]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/4227
[Intel XE#4294]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/4294
[Intel XE#4345]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/4345
[Intel XE#4351]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/4351
[Intel XE#4354]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/4354
[Intel XE#4422]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/4422
[Intel XE#4504]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/4504
[Intel XE#4522]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/4522
[Intel XE#4543]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/4543
[Intel XE#455]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/455
[Intel XE#4596]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/4596
[Intel XE#4608]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/4608
[Intel XE#4633]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/4633
[Intel XE#4665]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/4665
[Intel XE#4733]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/4733
[Intel XE#4814]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/4814
[Intel XE#4837]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/4837
[Intel XE#4915]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/4915
[Intel XE#4917]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/4917
[Intel XE#4921]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/4921
[Intel XE#4943]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/4943
[Intel XE#5007]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/5007
[Intel XE#5208]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/5208
[Intel XE#5213]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/5213
[Intel XE#5354]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/5354
[Intel XE#5390]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/5390
[Intel XE#5395]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/5395
[Intel XE#5466]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/5466
[Intel XE#5530]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/5530
[Intel XE#5561]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/5561
[Intel XE#5564]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/5564
[Intel XE#5565]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/5565
[Intel XE#5575]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/5575
[Intel XE#5580]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/5580
[Intel XE#5594]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/5594
[Intel XE#560]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/560
[Intel XE#5614]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/5614
[Intel XE#5626]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/5626
[Intel XE#5632]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/5632
[Intel XE#5742]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/5742
[Intel XE#584]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/584
[Intel XE#5890]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/5890
[Intel XE#5893]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/5893
[Intel XE#5993]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/5993
[Intel XE#6070]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/6070
[Intel XE#6168]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/6168
[Intel XE#6249]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/6249
[Intel XE#6267]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/6267
[Intel XE#651]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/651
[Intel XE#653]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/653
[Intel XE#656]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/656
[Intel XE#658]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/658
[Intel XE#688]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/688
[Intel XE#718]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/718
[Intel XE#734]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/734
[Intel XE#787]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/787
[Intel XE#929]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/929
[Intel XE#944]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/944
Build changes
-------------
* IGT: IGT_8578 -> IGT_8579
* Linux: xe-3886-f929eafc95342ea5377f79705864d50dd325c79f -> xe-pw-154627v9
IGT_8578: fd1f7d868fb6df61db6891ce0bbc8fcd26a0e7ba @ https://gitlab.freedesktop.org/drm/igt-gpu-tools.git
IGT_8579: 8579
xe-3886-f929eafc95342ea5377f79705864d50dd325c79f: f929eafc95342ea5377f79705864d50dd325c79f
xe-pw-154627v9: 154627v9
== Logs ==
For more details see: https://intel-gfx-ci.01.org/tree/intel-xe/xe-pw-154627v9/index.html
[-- Attachment #2: Type: text/html, Size: 110577 bytes --]
^ permalink raw reply [flat|nested] 42+ messages in thread