Intel-XE Archive on lore.kernel.org
 help / color / mirror / Atom feed
From: "Lis, Tomasz" <tomasz.lis@intel.com>
To: Matthew Brost <matthew.brost@intel.com>,
	<intel-xe@lists.freedesktop.org>
Subject: Re: [PATCH v8 26/33] drm/xe: Move queue init before LRC creation
Date: Tue, 7 Oct 2025 16:36:01 +0200	[thread overview]
Message-ID: <6d581695-b640-4308-86e6-a77322d7cab9@intel.com> (raw)
In-Reply-To: <20251007130505.2694829-27-matthew.brost@intel.com>


On 10/7/2025 3:04 PM, Matthew Brost wrote:
> A queue must be in the submission backend's tracking state before the
> LRC is created to avoid a race condition where the LRC's GGTT addresses
> are not properly fixed up during VF post-migration recovery.
>
> Move the queue initialization—which adds the queue to the submission
> backend's tracking state—before LRC creation.
>
> Also wait on pending GGTT fixups before allocating LRCs to avoid racing
> with fixups.
>
> v2:
>   - Wait on VF GGTT fixes before creating LRC (testing)
> v5:
>   - Adjust comment in code (Tomasz)
>   - Reduce race window
> v7:
>   - Only wakeup waiters in recovery path (CI)
>   - Wakeup waiters on abort
>   - Use GT warn on (Michal)
>   - Fix kernel doc for LRC ring size function (Tomasz)
> v8:
>   - Guard against migration not supported or no memirq (CI)
Reviewed-by: Tomasz Lis <tomasz.lis@intel.com>
> Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> ---
>   drivers/gpu/drm/xe/xe_exec_queue.c        | 45 ++++++++++++++++++-----
>   drivers/gpu/drm/xe/xe_execlist.c          |  2 +-
>   drivers/gpu/drm/xe/xe_gt_sriov_vf.c       | 45 ++++++++++++++++++++++-
>   drivers/gpu/drm/xe/xe_gt_sriov_vf.h       |  2 +
>   drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h |  5 +++
>   drivers/gpu/drm/xe/xe_guc_submit.c        |  2 +-
>   drivers/gpu/drm/xe/xe_lrc.h               | 10 +++++
>   7 files changed, 98 insertions(+), 13 deletions(-)
>
> diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
> index 7621089a47fe..90cbc95f8e2e 100644
> --- a/drivers/gpu/drm/xe/xe_exec_queue.c
> +++ b/drivers/gpu/drm/xe/xe_exec_queue.c
> @@ -15,6 +15,7 @@
>   #include "xe_dep_scheduler.h"
>   #include "xe_device.h"
>   #include "xe_gt.h"
> +#include "xe_gt_sriov_vf.h"
>   #include "xe_hw_engine_class_sysfs.h"
>   #include "xe_hw_engine_group.h"
>   #include "xe_hw_fence.h"
> @@ -205,17 +206,34 @@ static int __xe_exec_queue_init(struct xe_exec_queue *q, u32 exec_queue_flags)
>   	if (!(exec_queue_flags & EXEC_QUEUE_FLAG_KERNEL))
>   		flags |= XE_LRC_CREATE_USER_CTX;
>   
> +	err = q->ops->init(q);
> +	if (err)
> +		return err;
> +
> +	/*
> +	 * This must occur after q->ops->init to avoid race conditions during VF
> +	 * post-migration recovery, as the fixups for the LRC GGTT addresses
> +	 * depend on the queue being present in the backend tracking structure.
> +	 *
> +	 * In addition to above, we must wait on inflight GGTT changes to avoid
> +	 * writing out stale values here. Such wait provides a solid solution
> +	 * (without a race) only if the function can detect migration instantly
> +	 * from the moment vCPU resumes execution.
> +	 */
>   	for (i = 0; i < q->width; ++i) {
> -		q->lrc[i] = xe_lrc_create(q->hwe, q->vm, SZ_16K, q->msix_vec, flags);
> -		if (IS_ERR(q->lrc[i])) {
> -			err = PTR_ERR(q->lrc[i]);
> +		struct xe_lrc *lrc;
> +
> +		xe_gt_sriov_vf_wait_valid_ggtt(q->gt);
> +		lrc = xe_lrc_create(q->hwe, q->vm, xe_lrc_ring_size(),
> +				    q->msix_vec, flags);
> +		if (IS_ERR(lrc)) {
> +			err = PTR_ERR(lrc);
>   			goto err_lrc;
>   		}
> -	}
>   
> -	err = q->ops->init(q);
> -	if (err)
> -		goto err_lrc;
> +		/* Pairs with READ_ONCE to xe_exec_queue_contexts_hwsp_rebase */
> +		WRITE_ONCE(q->lrc[i], lrc);
> +	}
>   
>   	return 0;
>   
> @@ -1121,9 +1139,16 @@ int xe_exec_queue_contexts_hwsp_rebase(struct xe_exec_queue *q, void *scratch)
>   	int err = 0;
>   
>   	for (i = 0; i < q->width; ++i) {
> -		xe_lrc_update_memirq_regs_with_address(q->lrc[i], q->hwe, scratch);
> -		xe_lrc_update_hwctx_regs_with_address(q->lrc[i]);
> -		err = xe_lrc_setup_wa_bb_with_scratch(q->lrc[i], q->hwe, scratch);
> +		struct xe_lrc *lrc;
> +
> +		/* Pairs with WRITE_ONCE in __xe_exec_queue_init  */
> +		lrc = READ_ONCE(q->lrc[i]);
> +		if (!lrc)
> +			continue;
> +
> +		xe_lrc_update_memirq_regs_with_address(lrc, q->hwe, scratch);
> +		xe_lrc_update_hwctx_regs_with_address(lrc);
> +		err = xe_lrc_setup_wa_bb_with_scratch(lrc, q->hwe, scratch);
>   		if (err)
>   			break;
>   	}
> diff --git a/drivers/gpu/drm/xe/xe_execlist.c b/drivers/gpu/drm/xe/xe_execlist.c
> index f83d421ac9d3..769d05517f93 100644
> --- a/drivers/gpu/drm/xe/xe_execlist.c
> +++ b/drivers/gpu/drm/xe/xe_execlist.c
> @@ -339,7 +339,7 @@ static int execlist_exec_queue_init(struct xe_exec_queue *q)
>   	const struct drm_sched_init_args args = {
>   		.ops = &drm_sched_ops,
>   		.num_rqs = 1,
> -		.credit_limit = q->lrc[0]->ring.size / MAX_JOB_SIZE_BYTES,
> +		.credit_limit = xe_lrc_ring_size() / MAX_JOB_SIZE_BYTES,
>   		.hang_limit = XE_SCHED_HANG_LIMIT,
>   		.timeout = XE_SCHED_JOB_TIMEOUT,
>   		.name = q->hwe->name,
> diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
> index 4a3394566c24..dc589cf6ec98 100644
> --- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
> +++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
> @@ -480,6 +480,12 @@ static int vf_get_ggtt_info(struct xe_gt *gt)
>   		xe_tile_sriov_vf_fixup_ggtt_nodes_locked(gt_to_tile(gt), shift);
>   	}
>   
> +	if (xe_sriov_vf_migration_supported(gt_to_xe(gt))) {
> +		WRITE_ONCE(gt->sriov.vf.migration.ggtt_need_fixes, false);
> +		smp_wmb();	/* Ensure above write visible before wake */
> +		wake_up_all(&gt->sriov.vf.migration.wq);
> +	}
> +
>   	return err;
>   }
>   
> @@ -734,7 +740,8 @@ static void vf_start_migration_recovery(struct xe_gt *gt)
>   	    !gt->sriov.vf.migration.recovery_teardown) {
>   		gt->sriov.vf.migration.recovery_queued = true;
>   		WRITE_ONCE(gt->sriov.vf.migration.recovery_inprogress, true);
> -		smp_wmb();	/* Ensure above write visable before wake */
> +		WRITE_ONCE(gt->sriov.vf.migration.ggtt_need_fixes, true);
> +		smp_wmb();	/* Ensure above writes visable before wake */
>   
>   		xe_guc_ct_wake_waiters(&gt->uc.guc.ct);
>   
> @@ -1143,8 +1150,11 @@ static void vf_post_migration_abort(struct xe_gt *gt)
>   {
>   	spin_lock_irq(&gt->sriov.vf.migration.lock);
>   	WRITE_ONCE(gt->sriov.vf.migration.recovery_inprogress, false);
> +	WRITE_ONCE(gt->sriov.vf.migration.ggtt_need_fixes, false);
>   	spin_unlock_irq(&gt->sriov.vf.migration.lock);
>   
> +	wake_up_all(&gt->sriov.vf.migration.wq);
> +
>   	xe_guc_submit_pause_abort(&gt->uc.guc);
>   }
>   
> @@ -1253,6 +1263,7 @@ int xe_gt_sriov_vf_init_early(struct xe_gt *gt)
>   	gt->sriov.vf.migration.scratch = buf;
>   	spin_lock_init(&gt->sriov.vf.migration.lock);
>   	INIT_WORK(&gt->sriov.vf.migration.worker, migration_worker_func);
> +	init_waitqueue_head(&gt->sriov.vf.migration.wq);
>   
>   	return 0;
>   }
> @@ -1302,3 +1313,35 @@ bool xe_gt_sriov_vf_recovery_pending(struct xe_gt *gt)
>   
>   	return READ_ONCE(gt->sriov.vf.migration.recovery_inprogress);
>   }
> +
> +static bool vf_valid_ggtt(struct xe_gt *gt)
> +{
> +	struct xe_memirq *memirq = &gt_to_tile(gt)->memirq;
> +	bool irq_pending = xe_device_uses_memirq(gt_to_xe(gt)) &&
> +		xe_memirq_guc_sw_int_0_irq_pending(memirq, &gt->uc.guc);
> +
> +	xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
> +
> +	if (irq_pending || READ_ONCE(gt->sriov.vf.migration.ggtt_need_fixes))
> +		return false;
> +
> +	return true;
> +}
> +
> +/**
> + * xe_gt_sriov_vf_wait_valid_ggtt() - VF wait for valid GGTT addresses
> + * @gt: the &xe_gt
> + */
> +void xe_gt_sriov_vf_wait_valid_ggtt(struct xe_gt *gt)
> +{
> +	int ret;
> +
> +	if (!IS_SRIOV_VF(gt_to_xe(gt)) ||
> +	    !xe_sriov_vf_migration_supported(gt_to_xe(gt)))
> +		return;
> +
> +	ret = wait_event_interruptible_timeout(gt->sriov.vf.migration.wq,
> +					       vf_valid_ggtt(gt),
> +					       HZ * 5);
> +	xe_gt_WARN_ON(gt, !ret);
> +}
> diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.h b/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
> index 1d2eaa52f804..af40276790fa 100644
> --- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
> +++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
> @@ -38,4 +38,6 @@ void xe_gt_sriov_vf_print_config(struct xe_gt *gt, struct drm_printer *p);
>   void xe_gt_sriov_vf_print_runtime(struct xe_gt *gt, struct drm_printer *p);
>   void xe_gt_sriov_vf_print_version(struct xe_gt *gt, struct drm_printer *p);
>   
> +void xe_gt_sriov_vf_wait_valid_ggtt(struct xe_gt *gt);
> +
>   #endif
> diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h
> index c1bd6fdd9ab1..f0bc45a782a4 100644
> --- a/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h
> +++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h
> @@ -8,6 +8,7 @@
>   
>   #include <linux/rwsem.h>
>   #include <linux/types.h>
> +#include <linux/wait.h>
>   #include <linux/workqueue.h>
>   #include "xe_uc_fw_types.h"
>   
> @@ -50,6 +51,8 @@ struct xe_gt_sriov_vf_migration {
>   	struct work_struct worker;
>   	/** @lock: Protects recovery_queued, teardown */
>   	spinlock_t lock;
> +	/** @wq: wait queue for migration fixes */
> +	wait_queue_head_t wq;
>   	/** @scratch: Scratch memory for VF recovery */
>   	void *scratch;
>   	/** @recovery_teardown: VF post migration recovery is being torn down */
> @@ -58,6 +61,8 @@ struct xe_gt_sriov_vf_migration {
>   	bool recovery_queued;
>   	/** @recovery_inprogress: VF post migration recovery in progress */
>   	bool recovery_inprogress;
> +	/** @ggtt_need_fixes: VF GGTT needs fixes */
> +	bool ggtt_need_fixes;
>   };
>   
>   /**
> diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
> index d07ff014492e..be7aa1e89d13 100644
> --- a/drivers/gpu/drm/xe/xe_guc_submit.c
> +++ b/drivers/gpu/drm/xe/xe_guc_submit.c
> @@ -1670,7 +1670,7 @@ static int guc_exec_queue_init(struct xe_exec_queue *q)
>   	timeout = (q->vm && xe_vm_in_lr_mode(q->vm)) ? MAX_SCHEDULE_TIMEOUT :
>   		  msecs_to_jiffies(q->sched_props.job_timeout_ms);
>   	err = xe_sched_init(&ge->sched, &drm_sched_ops, &xe_sched_ops,
> -			    NULL, q->lrc[0]->ring.size / MAX_JOB_SIZE_BYTES, 64,
> +			    NULL, xe_lrc_ring_size() / MAX_JOB_SIZE_BYTES, 64,
>   			    timeout, guc_to_gt(guc)->ordered_wq, NULL,
>   			    q->name, gt_to_xe(q->gt)->drm.dev);
>   	if (err)
> diff --git a/drivers/gpu/drm/xe/xe_lrc.h b/drivers/gpu/drm/xe/xe_lrc.h
> index 21a3daab0154..2fb628da5c43 100644
> --- a/drivers/gpu/drm/xe/xe_lrc.h
> +++ b/drivers/gpu/drm/xe/xe_lrc.h
> @@ -76,6 +76,16 @@ static inline void xe_lrc_put(struct xe_lrc *lrc)
>   	kref_put(&lrc->refcount, xe_lrc_destroy);
>   }
>   
> +/**
> + * xe_lrc_ring_size() - Xe LRC ring size
> + *
> + * Return: Size of LRC ring buffer
> + */
> +static inline size_t xe_lrc_ring_size(void)
> +{
> +	return SZ_16K;
> +}
> +
>   size_t xe_gt_lrc_size(struct xe_gt *gt, enum xe_engine_class class);
>   u32 xe_lrc_pphwsp_offset(struct xe_lrc *lrc);
>   u32 xe_lrc_regs_offset(struct xe_lrc *lrc);

  reply	other threads:[~2025-10-07 14:36 UTC|newest]

Thread overview: 45+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-10-07 13:04 [PATCH v8 00/33] VF migration redesign Matthew Brost
2025-10-07 13:04 ` [PATCH v8 01/33] drm/xe: Add NULL checks to scratch LRC allocation Matthew Brost
2025-10-07 13:04 ` [PATCH v8 02/33] drm/xe: Save off position in ring in which a job was programmed Matthew Brost
2025-10-07 13:04 ` [PATCH v8 03/33] drm/xe/guc: Track pending-enable source in submission state Matthew Brost
2025-10-07 13:04 ` [PATCH v8 04/33] drm/xe: Track LR jobs in DRM scheduler pending list Matthew Brost
2025-10-07 13:04 ` [PATCH v8 05/33] drm/xe: Return first unsignaled job first pending job helper Matthew Brost
2025-10-08  8:27   ` Matthew Auld
2025-10-07 13:04 ` [PATCH v8 06/33] drm/xe: Don't change LRC ring head on job resubmission Matthew Brost
2025-10-07 13:04 ` [PATCH v8 07/33] drm/xe: Make LRC W/A scratch buffer usage consistent Matthew Brost
2025-10-07 13:04 ` [PATCH v8 08/33] drm/xe/vf: Add xe_gt_recovery_pending helper Matthew Brost
2025-10-07 13:04 ` [PATCH v8 09/33] drm/xe/vf: Make VF recovery run on per-GT worker Matthew Brost
2025-10-07 13:04 ` [PATCH v8 10/33] drm/xe/vf: Abort H2G sends during VF post-migration recovery Matthew Brost
2025-10-07 13:04 ` [PATCH v8 11/33] drm/xe/vf: Remove memory allocations from VF post migration recovery Matthew Brost
2025-10-07 13:04 ` [PATCH v8 12/33] drm/xe: Move GGTT lock init to alloc Matthew Brost
2025-10-07 13:37   ` Michal Wajdeczko
2025-10-07 13:04 ` [PATCH v8 13/33] drm/xe/vf: Close multi-GT GGTT shift race Matthew Brost
2025-10-07 13:04 ` [PATCH v8 14/33] drm/xe/vf: Teardown VF post migration worker on driver unload Matthew Brost
2025-10-07 13:04 ` [PATCH v8 15/33] drm/xe/vf: Don't allow GT reset to be queued during VF post migration recovery Matthew Brost
2025-10-07 13:04 ` [PATCH v8 16/33] drm/xe/vf: Wakeup in GuC backend on " Matthew Brost
2025-10-07 13:04 ` [PATCH v8 17/33] drm/xe/vf: Avoid indefinite blocking in preempt rebind worker for VFs supporting migration Matthew Brost
2025-10-07 13:04 ` [PATCH v8 18/33] drm/xe/vf: Use GUC_HXG_TYPE_EVENT for GuC context register Matthew Brost
2025-10-07 13:04 ` [PATCH v8 19/33] drm/xe/vf: Flush and stop CTs in VF post migration recovery Matthew Brost
2025-10-07 13:04 ` [PATCH v8 20/33] drm/xe/vf: Reset TLB invalidations during " Matthew Brost
2025-10-07 13:04 ` [PATCH v8 21/33] drm/xe/vf: Kickstart after resfix in " Matthew Brost
2025-10-07 13:04 ` [PATCH v8 22/33] drm/xe: Add CTB_H2G_BUFFER_OFFSET define Matthew Brost
2025-10-07 13:34   ` Michal Wajdeczko
2025-10-07 13:04 ` [PATCH v8 23/33] drm/xe/vf: Start CTs before resfix VF post migration recovery Matthew Brost
2025-10-07 14:24   ` Michal Wajdeczko
2025-10-07 13:04 ` [PATCH v8 24/33] drm/xe/vf: Abort VF post migration recovery on failure Matthew Brost
2025-10-07 13:04 ` [PATCH v8 25/33] drm/xe/vf: Replay GuC submission state on pause / unpause Matthew Brost
2025-10-07 13:04 ` [PATCH v8 26/33] drm/xe: Move queue init before LRC creation Matthew Brost
2025-10-07 14:36   ` Lis, Tomasz [this message]
2025-10-07 13:04 ` [PATCH v8 27/33] drm/xe/vf: Add debug prints for GuC replaying state during VF recovery Matthew Brost
2025-10-07 13:05 ` [PATCH v8 28/33] drm/xe/vf: Workaround for race condition in GuC firmware during VF pause Matthew Brost
2025-10-07 13:05 ` [PATCH v8 29/33] drm/xe: Use PPGTT addresses for TLB invalidation to avoid GGTT fixups Matthew Brost
2025-10-07 13:05 ` [PATCH v8 30/33] drm/xe/vf: Use primary GT ordered work queue on media GT on PTL VF Matthew Brost
2025-10-08 17:34   ` Lucas De Marchi
2025-10-07 13:05 ` [PATCH v8 31/33] drm/xe/vf: Ensure media GT VF recovery runs after primary GT on PTL Matthew Brost
2025-10-07 13:05 ` [PATCH v8 32/33] drm/xe/vf: Rebase CCS save/restore BB GGTT addresses Matthew Brost
2025-10-07 13:05 ` [PATCH v8 33/33] drm/xe/guc: Increase wait timeout to 2sec after BUSY reply from GuC Matthew Brost
2025-10-07 13:17 ` ✗ CI.checkpatch: warning for VF migration redesign (rev8) Patchwork
2025-10-07 13:18 ` ✓ CI.KUnit: success " Patchwork
2025-10-07 13:57 ` ✓ Xe.CI.BAT: " Patchwork
2025-10-07 17:02 ` ✗ Xe.CI.Full: failure " Patchwork
2025-10-07 20:49   ` Matthew Brost

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=6d581695-b640-4308-86e6-a77322d7cab9@intel.com \
    --to=tomasz.lis@intel.com \
    --cc=intel-xe@lists.freedesktop.org \
    --cc=matthew.brost@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox