From: Michal Wajdeczko <michal.wajdeczko@intel.com>
To: Matthew Brost <matthew.brost@intel.com>,
<intel-xe@lists.freedesktop.org>
Subject: Re: [PATCH v3 12/36] drm/xe/vf: Add xe_gt_recovery_inprogress helper
Date: Mon, 29 Sep 2025 10:04:51 +0200 [thread overview]
Message-ID: <2ccc4c58-5ca0-48ef-be4a-902073f7b8df@intel.com> (raw)
In-Reply-To: <20250929025542.1486303-13-matthew.brost@intel.com>
On 9/29/2025 4:55 AM, Matthew Brost wrote:
> Add xe_gt_recovery_inprogress helper.
>
> This helper serves as the singular point to determine whether a GT
> recovery is currently in progress. Expected callers include the GuC CT
> layer and the GuC submission layer. Atomically visable as soon as vCPU
> are unhalted until VF recovery completes.
>
> v3:
> - Add GT layer xe_gt_recovery_inprogress (Michal)
> - Don't blow up in memirq not enabled (CI)
> - Add __memirq_received with clear argument (Michal)
> - xe_memirq_sw_int_0_irq_pending rename (Michal)
> - Use offset in xe_memirq_sw_int_0_irq_pending (Michal)
>
> Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> ---
> drivers/gpu/drm/xe/xe_gt.h | 13 ++++++
> drivers/gpu/drm/xe/xe_gt_sriov_vf.c | 25 ++++++++++++
> drivers/gpu/drm/xe/xe_gt_sriov_vf.h | 2 +
> drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h | 10 +++++
> drivers/gpu/drm/xe/xe_memirq.c | 48 +++++++++++++++++++++--
> drivers/gpu/drm/xe/xe_memirq.h | 2 +
> 6 files changed, 96 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/gpu/drm/xe/xe_gt.h b/drivers/gpu/drm/xe/xe_gt.h
> index 41880979f4de..ee0239b2f48c 100644
> --- a/drivers/gpu/drm/xe/xe_gt.h
> +++ b/drivers/gpu/drm/xe/xe_gt.h
> @@ -12,6 +12,7 @@
>
> #include "xe_device.h"
> #include "xe_device_types.h"
> +#include "xe_gt_sriov_vf.h"
> #include "xe_hw_engine.h"
>
> #define for_each_hw_engine(hwe__, gt__, id__) \
> @@ -124,4 +125,16 @@ static inline bool xe_gt_is_usm_hwe(struct xe_gt *gt, struct xe_hw_engine *hwe)
> hwe->instance == gt->usm.reserved_bcs_instance;
> }
>
> +/**
> + * xe_gt_recovery_inprogress() - GT recovery in progress
> + * @gt: the &xe_gt
> + *
> + * Return: True if GT recovery in progress, False otherwise
> + */
> +static inline bool xe_gt_recovery_inprogress(struct xe_gt *gt)
> +{
> + return IS_SRIOV_VF(gt_to_xe(gt)) &&
> + xe_gt_sriov_vf_recovery_inprogress(gt);
> +}
> +
> #endif
> diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
> index 016c867e5e2b..71309219a4b7 100644
> --- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
> +++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
> @@ -26,6 +26,7 @@
> #include "xe_guc_hxg_helpers.h"
> #include "xe_guc_relay.h"
> #include "xe_lrc.h"
> +#include "xe_memirq.h"
> #include "xe_mmio.h"
> #include "xe_sriov.h"
> #include "xe_sriov_vf.h"
> @@ -828,6 +829,7 @@ void xe_gt_sriov_vf_migrated_event_handler(struct xe_gt *gt)
> struct xe_device *xe = gt_to_xe(gt);
>
> xe_gt_assert(gt, IS_SRIOV_VF(xe));
> + xe_gt_assert(gt, xe_gt_sriov_vf_recovery_inprogress(gt));
do we really need this?
with current code this function will be limited to memirq platforms only
>
> set_bit(gt->info.id, &xe->sriov.vf.migration.gt_flags);
> /*
> @@ -1172,3 +1174,26 @@ void xe_gt_sriov_vf_print_version(struct xe_gt *gt, struct drm_printer *p)
> drm_printf(p, "\thandshake:\t%u.%u\n",
> pf_version->major, pf_version->minor);
> }
> +
> +/**
> + * xe_gt_sriov_vf_recovery_inprogress() - VF post migration recovery in progress
> + * @gt: the &xe_gt
> + *
> + * Return: True if VF post migration recovery in progress, False otherwise
> + */
> +bool xe_gt_sriov_vf_recovery_inprogress(struct xe_gt *gt)
> +{
> + struct xe_memirq *memirq = >_to_tile(gt)->memirq;
> +
> + xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
> +
> + /*
> + * In practice, VF migration will never be supported on platforms
> + * without memirq, avoid CI blowing up on older VF platforms.
> + */
maybe instead of closing that door simply code this as:
/* early detection until recovery starts */
if (xe_device_uses_memirq(gt_to_xe(gt)) &&
xe_memirq_sw_int_0_irq_pending(memirq, >->uc.guc))
return true;
return READ_ONCE(gt->sriov.vf.migration.recovery_inprogress);
> + if (!xe_device_uses_memirq(gt_to_xe(gt)))
> + return false;
> +
> + return (xe_memirq_sw_int_0_irq_pending(memirq, >->uc.guc) ||
> + READ_ONCE(gt->sriov.vf.migration.recovery_inprogress));
> +}
> diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.h b/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
> index 0af1dc769fe0..bb5f8eace19b 100644
> --- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
> +++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
> @@ -25,6 +25,8 @@ void xe_gt_sriov_vf_default_lrcs_hwsp_rebase(struct xe_gt *gt);
> int xe_gt_sriov_vf_notify_resfix_done(struct xe_gt *gt);
> void xe_gt_sriov_vf_migrated_event_handler(struct xe_gt *gt);
>
> +bool xe_gt_sriov_vf_recovery_inprogress(struct xe_gt *gt);
> +
> u32 xe_gt_sriov_vf_gmdid(struct xe_gt *gt);
> u16 xe_gt_sriov_vf_guc_ids(struct xe_gt *gt);
> u64 xe_gt_sriov_vf_lmem(struct xe_gt *gt);
> diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h
> index d95857bd789b..7b10b8e1e10e 100644
> --- a/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h
> +++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h
> @@ -49,6 +49,14 @@ struct xe_gt_sriov_vf_runtime {
> } *regs;
> };
>
> +/**
> + * xe_gt_sriov_vf_migration - VF migration data.
> + */
> +struct xe_gt_sriov_vf_migration {
> + /** @recovery_inprogress: VF post migration recovery in progress */
> + bool recovery_inprogress;
> +};
> +
> /**
> * struct xe_gt_sriov_vf - GT level VF virtualization data.
> */
> @@ -61,6 +69,8 @@ struct xe_gt_sriov_vf {
> struct xe_gt_sriov_vf_selfconfig self_config;
> /** @runtime: runtime data retrieved from the PF. */
> struct xe_gt_sriov_vf_runtime runtime;
> + /** @migration: migration data for the VF. */
> + struct xe_gt_sriov_vf_migration migration;
> };
>
> #endif
> diff --git a/drivers/gpu/drm/xe/xe_memirq.c b/drivers/gpu/drm/xe/xe_memirq.c
> index 49c45ec3e83c..b681c67dcace 100644
> --- a/drivers/gpu/drm/xe/xe_memirq.c
> +++ b/drivers/gpu/drm/xe/xe_memirq.c
> @@ -398,8 +398,9 @@ void xe_memirq_postinstall(struct xe_memirq *memirq)
> memirq_set_enable(memirq, true);
> }
>
> -static bool memirq_received(struct xe_memirq *memirq, struct iosys_map *vector,
> - u16 offset, const char *name)
> +static bool __memirq_received(struct xe_memirq *memirq,
> + struct iosys_map *vector, u16 offset,
> + const char *name, bool clear)
> {
> u8 value;
>
> @@ -409,12 +410,26 @@ static bool memirq_received(struct xe_memirq *memirq, struct iosys_map *vector,
> memirq_err_ratelimited(memirq,
> "Unexpected memirq value %#x from %s at %u\n",
> value, name, offset);
> - iosys_map_wr(vector, offset, u8, 0x00);
> + if (clear)
> + iosys_map_wr(vector, offset, u8, 0x00);
> }
>
> return value;
> }
>
> +static bool memirq_received_noclear(struct xe_memirq *memirq,
> + struct iosys_map *vector,
> + u16 offset, const char *name)
> +{
> + return __memirq_received(memirq, vector, offset, name, false);
> +}
> +
> +static bool memirq_received(struct xe_memirq *memirq, struct iosys_map *vector,
> + u16 offset, const char *name)
> +{
> + return __memirq_received(memirq, vector, offset, name, true);
> +}
> +
> static void memirq_dispatch_engine(struct xe_memirq *memirq, struct iosys_map *status,
> struct xe_hw_engine *hwe)
> {
> @@ -434,8 +449,16 @@ static void memirq_dispatch_guc(struct xe_memirq *memirq, struct iosys_map *stat
> if (memirq_received(memirq, status, ilog2(GUC_INTR_GUC2HOST), name))
> xe_guc_irq_handler(guc, GUC_INTR_GUC2HOST);
>
> - if (memirq_received(memirq, status, ilog2(GUC_INTR_SW_INT_0), name))
> + /*
> + * We must wait to perform the clear operation until after
> + * xe_gt_sriov_vf_start_migration_recovery() runs, to avoid race
> + * conditions where xe_gt_sriov_vf_recovery_inprogress() returns false.
> + */
> + if (memirq_received_noclear(memirq, status, ilog2(GUC_INTR_SW_INT_0),
> + name)) {
> xe_guc_irq_handler(guc, GUC_INTR_SW_INT_0);
> + iosys_map_wr(status, ilog2(GUC_INTR_SW_INT_0), u8, 0x00);
> + }
> }
>
> /**
> @@ -460,6 +483,23 @@ void xe_memirq_hwe_handler(struct xe_memirq *memirq, struct xe_hw_engine *hwe)
> }
> }
>
> +/**
> + * xe_memirq_sw_int_0_irq_pending() - SW_INT_0 IRQ is pending
> + * @memirq: the &xe_memirq
> + * @guc: the &xe_guc to check for IRQ
> + *
> + * Return: True if SW_INT_0 IRQ is pending on @guc, False otherwise
> + */
> +bool xe_memirq_sw_int_0_irq_pending(struct xe_memirq *memirq,struct xe_guc *guc)
> +{
> + struct xe_gt *gt = guc_to_gt(guc);
> + u32 offset = xe_gt_is_media_type(gt) ? ilog2(INTR_MGUC) : ilog2(INTR_GUC);
> + struct iosys_map map = IOSYS_MAP_INIT_OFFSET(&memirq->status, offset * SZ_16);
> +
> + return memirq_received_noclear(memirq, &map, ilog2(GUC_INTR_SW_INT_0),
> + guc_name(guc));
> +}
> +
> /**
> * xe_memirq_handler - The `Memory Based Interrupts`_ Handler.
> * @memirq: the &xe_memirq
> diff --git a/drivers/gpu/drm/xe/xe_memirq.h b/drivers/gpu/drm/xe/xe_memirq.h
> index 06130650e9d6..f87e1274b730 100644
> --- a/drivers/gpu/drm/xe/xe_memirq.h
> +++ b/drivers/gpu/drm/xe/xe_memirq.h
> @@ -25,4 +25,6 @@ void xe_memirq_handler(struct xe_memirq *memirq);
>
> int xe_memirq_init_guc(struct xe_memirq *memirq, struct xe_guc *guc);
>
> +bool xe_memirq_sw_int_0_irq_pending(struct xe_memirq *memirq,struct xe_guc *guc);
> +
> #endif
next prev parent reply other threads:[~2025-09-29 8:05 UTC|newest]
Thread overview: 83+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-09-29 2:55 [PATCH v3 00/36] VF migration redesign Matthew Brost
2025-09-29 2:55 ` [PATCH v3 01/36] drm/xe: Add NULL checks to scratch LRC allocation Matthew Brost
2025-09-30 2:06 ` Lis, Tomasz
2025-09-30 22:53 ` Matthew Brost
2025-09-29 2:55 ` [PATCH v3 02/36] drm/xe/vf: Lock querying GGTT config during driver init Matthew Brost
2025-09-29 7:42 ` Michal Wajdeczko
2025-09-29 12:15 ` Matthew Brost
2025-09-30 0:42 ` Lis, Tomasz
2025-09-30 10:25 ` Michal Wajdeczko
2025-09-29 8:13 ` Ville Syrjälä
2025-09-30 13:22 ` Lis, Tomasz
2025-09-29 2:55 ` [PATCH v3 03/36] Revert "drm/xe/vf: Rebase exec queue parallel commands during migration recovery" Matthew Brost
2025-09-30 15:22 ` Michal Wajdeczko
2025-09-29 2:55 ` [PATCH v3 04/36] Revert "drm/xe/vf: Post migration, repopulate ring area for pending request" Matthew Brost
2025-09-30 15:24 ` Michal Wajdeczko
2025-09-29 2:55 ` [PATCH v3 05/36] Revert "drm/xe/vf: Fixup CTB send buffer messages after migration" Matthew Brost
2025-09-30 15:27 ` Michal Wajdeczko
2025-09-29 2:55 ` [PATCH v3 06/36] drm/xe: Save off position in ring in which a job was programmed Matthew Brost
2025-09-29 2:55 ` [PATCH v3 07/36] drm/xe/guc: Track pending-enable source in submission state Matthew Brost
2025-09-29 2:55 ` [PATCH v3 08/36] drm/xe: Track LR jobs in DRM scheduler pending list Matthew Brost
2025-09-29 2:55 ` [PATCH v3 09/36] drm/xe: Don't change LRC ring head on job resubmission Matthew Brost
2025-09-30 2:38 ` Lis, Tomasz
2025-09-29 2:55 ` [PATCH v3 10/36] drm/xe: Make LRC W/A scratch buffer usage consistent Matthew Brost
2025-09-29 2:55 ` [PATCH v3 11/36] drm/xe/guc: Document GuC submission backend Matthew Brost
2025-09-30 3:28 ` Lis, Tomasz
2025-09-30 6:30 ` Matthew Brost
2025-09-29 2:55 ` [PATCH v3 12/36] drm/xe/vf: Add xe_gt_recovery_inprogress helper Matthew Brost
2025-09-29 8:04 ` Michal Wajdeczko [this message]
2025-09-29 8:52 ` Matthew Brost
2025-09-29 2:55 ` [PATCH v3 13/36] drm/xe/vf: Make VF recovery run on per-GT worker Matthew Brost
2025-09-30 14:47 ` Lis, Tomasz
2025-09-29 2:55 ` [PATCH v3 14/36] drm/xe/vf: Abort H2G sends during VF post-migration recovery Matthew Brost
2025-09-29 8:17 ` Michal Wajdeczko
2025-09-29 2:55 ` [PATCH v3 15/36] drm/xe/vf: Remove memory allocations from VF post migration recovery Matthew Brost
2025-09-30 15:00 ` Lis, Tomasz
2025-09-29 2:55 ` [PATCH v3 16/36] drm/xe/vf: Close multi-GT GGTT shift race Matthew Brost
2025-09-29 8:44 ` Michal Wajdeczko
2025-09-29 12:31 ` Matthew Brost
2025-09-29 2:55 ` [PATCH v3 17/36] drm/xe/vf: Teardown VF post migration worker on driver unload Matthew Brost
2025-09-30 16:24 ` Lis, Tomasz
2025-09-29 2:55 ` [PATCH v3 18/36] drm/xe/vf: Don't allow GT reset to be queued during VF post migration recovery Matthew Brost
2025-09-29 9:17 ` Michal Wajdeczko
2025-09-29 12:50 ` Matthew Brost
2025-09-29 2:55 ` [PATCH v3 19/36] drm/xe/vf: Wakeup in GuC backend on " Matthew Brost
2025-09-29 2:55 ` [PATCH v3 20/36] drm/xe/vf: Avoid indefinite blocking in preempt rebind worker for VFs supporting migration Matthew Brost
2025-10-01 13:45 ` Lis, Tomasz
2025-10-01 13:56 ` Lis, Tomasz
2025-09-29 2:55 ` [PATCH v3 21/36] drm/xe/vf: Extra debug on GGTT shift Matthew Brost
2025-09-29 2:55 ` [PATCH v3 22/36] drm/xe/vf: Use GUC_HXG_TYPE_EVENT for GuC context register Matthew Brost
2025-09-29 2:55 ` [PATCH v3 23/36] drm/xe/vf: Flush and stop CTs in VF post migration recovery Matthew Brost
2025-09-29 21:31 ` Michal Wajdeczko
2025-09-29 2:55 ` [PATCH v3 24/36] drm/xe/vf: Reset TLB invalidations during " Matthew Brost
2025-10-01 13:53 ` Lis, Tomasz
2025-09-29 2:55 ` [PATCH v3 25/36] drm/xe/vf: Kickstart after resfix in " Matthew Brost
2025-09-29 2:55 ` [PATCH v3 26/36] drm/xe/vf: Start CTs before resfix " Matthew Brost
2025-09-29 21:49 ` Michal Wajdeczko
2025-09-30 6:26 ` Matthew Brost
2025-09-29 2:55 ` [PATCH v3 27/36] drm/xe/vf: Abort VF post migration recovery on failure Matthew Brost
2025-10-01 14:06 ` Lis, Tomasz
2025-09-29 2:55 ` [PATCH v3 28/36] drm/xe/vf: Replay GuC submission state on pause / unpause Matthew Brost
2025-10-01 14:37 ` Lis, Tomasz
2025-09-29 2:55 ` [PATCH v3 29/36] drm/xe: Move queue init before LRC creation Matthew Brost
2025-10-02 0:44 ` Lis, Tomasz
2025-10-02 7:36 ` Matthew Brost
2025-10-02 14:54 ` Lis, Tomasz
2025-09-29 2:55 ` [PATCH v3 30/36] drm/xe/vf: Add debug prints for GuC replaying state during VF recovery Matthew Brost
2025-10-02 1:02 ` Lis, Tomasz
2025-09-29 2:55 ` [PATCH v3 31/36] drm/xe/vf: Workaround for race condition in GuC firmware during VF pause Matthew Brost
2025-10-02 1:09 ` Lis, Tomasz
2025-10-02 6:12 ` Matthew Brost
2025-09-29 2:55 ` [PATCH v3 32/36] drm/xe: Use PPGTT addresses for TLB invalidation to avoid GGTT fixups Matthew Brost
2025-10-02 1:25 ` Lis, Tomasz
2025-09-29 2:55 ` [PATCH v3 33/36] drm/xe/vf: Use primary GT ordered work queue on media GT on PTL VF Matthew Brost
2025-09-29 2:55 ` [PATCH v3 34/36] drm/xe/vf: Ensure media GT VF recovery runs after primary GT on PTL Matthew Brost
2025-09-29 2:55 ` [PATCH v3 35/36] drm/xe/vf: Rebase CCS save/restore BB GGTT addresses Matthew Brost
2025-09-29 2:55 ` [PATCH v3 36/36] drm/xe/guc: Increase wait timeout to 2sec after BUSY reply from GuC Matthew Brost
2025-09-29 15:17 ` K V P, Satyanarayana
2025-09-30 12:39 ` Matthew Brost
2025-09-30 13:38 ` Michal Wajdeczko
2025-09-30 14:39 ` Matthew Brost
2025-09-29 3:06 ` ✗ CI.checkpatch: warning for VF migration redesign (rev3) Patchwork
2025-09-29 3:08 ` ✓ CI.KUnit: success " Patchwork
2025-09-29 6:28 ` ✗ Xe.CI.Full: failure " Patchwork
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=2ccc4c58-5ca0-48ef-be4a-902073f7b8df@intel.com \
--to=michal.wajdeczko@intel.com \
--cc=intel-xe@lists.freedesktop.org \
--cc=matthew.brost@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox