From: Matthew Brost <matthew.brost@intel.com>
To: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: <intel-xe@lists.freedesktop.org>
Subject: Re: [PATCH v4 24/34] drm/xe/vf: Start CTs before resfix VF post migration recovery
Date: Sat, 4 Oct 2025 23:49:52 -0700 [thread overview]
Message-ID: <aOIVEF6cPfw+BH56@lstrano-desk.jf.intel.com> (raw)
In-Reply-To: <0cfeca19-dd2c-4cc4-8725-c7526fe0611f@intel.com>
On Fri, Oct 03, 2025 at 05:10:12PM +0200, Michal Wajdeczko wrote:
>
>
> On 10/2/2025 7:53 AM, Matthew Brost wrote:
> > Before `resfix`, all CTs stuck in the H2G queue need to be squashed, as
> > they may contain stale or invalid data.
> >
> > Starting the CTs clears all H2Gs in the queue. Any lost H2Gs are
> > resubmitted by the GuC submission state machine.
> >
> > v3:
> > - Don't mess with head / tail values (Michal)
> > v4:
> > - Don't mess with broke (Michal)
> > - Add CTB_H2G_BUFFER_OFFSET (Michal)
>
> I guess those small fixes shall be done separately
>
Are you suggesting I break this is different patch? Seems overkill and
not particularly how I want to spend my time. This was basically
unrelated nit of a suggestion to add CTB_H2G_BUFFER_OFFSET which I
absord, now further nit to break into different patch. This is a great
way to get to me just abandon this series.
> >
> > Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> > ---
> > drivers/gpu/drm/xe/xe_gt_sriov_vf.c | 7 +++
> > drivers/gpu/drm/xe/xe_guc_ct.c | 70 +++++++++++++++++++++--------
> > drivers/gpu/drm/xe/xe_guc_ct.h | 1 +
> > 3 files changed, 60 insertions(+), 18 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
> > index c7bd1f6e9dca..55662b9a4f5b 100644
> > --- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
> > +++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
> > @@ -1137,6 +1137,11 @@ static int vf_post_migration_fixups(struct xe_gt *gt)
> > return 0;
> > }
> >
> > +static void vf_post_migration_rearm(struct xe_gt *gt)
> > +{
> > + xe_guc_ct_restart(>->uc.guc.ct);
> > +}
> > +
> > static void vf_post_migration_kickstart(struct xe_gt *gt)
> > {
> > xe_guc_submit_unpause(>->uc.guc);
> > @@ -1188,6 +1193,8 @@ static void vf_post_migration_recovery(struct xe_gt *gt)
> > if (err)
> > goto fail;
> >
> > + vf_post_migration_rearm(gt);
> > +
> > err = vf_post_migration_notify_resfix_done(gt);
> > if (err && err != -EAGAIN)
> > goto fail;
> > diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
> > index fd6e731c0395..92822d131612 100644
> > --- a/drivers/gpu/drm/xe/xe_guc_ct.c
> > +++ b/drivers/gpu/drm/xe/xe_guc_ct.c
> > @@ -166,6 +166,7 @@ ct_to_xe(struct xe_guc_ct *ct)
> > */
> >
> > #define CTB_DESC_SIZE ALIGN(sizeof(struct guc_ct_buffer_desc), SZ_2K)
> > +#define CTB_H2G_BUFFER_OFFSET (CTB_DESC_SIZE * 2)
> > #define CTB_H2G_BUFFER_SIZE (SZ_4K)
> > #define CTB_G2H_BUFFER_SIZE (SZ_128K)
> > #define G2H_ROOM_BUFFER_SIZE (CTB_G2H_BUFFER_SIZE / 2)
> > @@ -189,7 +190,7 @@ long xe_guc_ct_queue_proc_time_jiffies(struct xe_guc_ct *ct)
> >
> > static size_t guc_ct_size(void)
> > {
> > - return 2 * CTB_DESC_SIZE + CTB_H2G_BUFFER_SIZE +
> > + return CTB_H2G_BUFFER_OFFSET + CTB_H2G_BUFFER_SIZE +
> > CTB_G2H_BUFFER_SIZE;
> > }
> >
> > @@ -330,7 +331,7 @@ static void guc_ct_ctb_h2g_init(struct xe_device *xe, struct guc_ctb *h2g,
> > h2g->desc = *map;
> > xe_map_memset(xe, &h2g->desc, 0, 0, sizeof(struct guc_ct_buffer_desc));
> >
> > - h2g->cmds = IOSYS_MAP_INIT_OFFSET(map, CTB_DESC_SIZE * 2);
> > + h2g->cmds = IOSYS_MAP_INIT_OFFSET(map, CTB_H2G_BUFFER_OFFSET);
> > }
> >
> > static void guc_ct_ctb_g2h_init(struct xe_device *xe, struct guc_ctb *g2h,
> > @@ -348,7 +349,7 @@ static void guc_ct_ctb_g2h_init(struct xe_device *xe, struct guc_ctb *g2h,
> > g2h->desc = IOSYS_MAP_INIT_OFFSET(map, CTB_DESC_SIZE);
> > xe_map_memset(xe, &g2h->desc, 0, 0, sizeof(struct guc_ct_buffer_desc));
> >
> > - g2h->cmds = IOSYS_MAP_INIT_OFFSET(map, CTB_DESC_SIZE * 2 +
> > + g2h->cmds = IOSYS_MAP_INIT_OFFSET(map, CTB_H2G_BUFFER_OFFSET +
> > CTB_H2G_BUFFER_SIZE);
> > }
> >
> > @@ -359,7 +360,7 @@ static int guc_ct_ctb_h2g_register(struct xe_guc_ct *ct)
> > int err;
> >
> > desc_addr = xe_bo_ggtt_addr(ct->bo);
> > - ctb_addr = xe_bo_ggtt_addr(ct->bo) + CTB_DESC_SIZE * 2;
> > + ctb_addr = xe_bo_ggtt_addr(ct->bo) + CTB_H2G_BUFFER_OFFSET;
> > size = ct->ctbs.h2g.info.size * sizeof(u32);
> >
> > err = xe_guc_self_cfg64(guc,
> > @@ -386,7 +387,7 @@ static int guc_ct_ctb_g2h_register(struct xe_guc_ct *ct)
> > int err;
> >
> > desc_addr = xe_bo_ggtt_addr(ct->bo) + CTB_DESC_SIZE;
> > - ctb_addr = xe_bo_ggtt_addr(ct->bo) + CTB_DESC_SIZE * 2 +
> > + ctb_addr = xe_bo_ggtt_addr(ct->bo) + CTB_H2G_BUFFER_OFFSET +
> > CTB_H2G_BUFFER_SIZE;
> > size = ct->ctbs.g2h.info.size * sizeof(u32);
> >
> > @@ -500,7 +501,7 @@ static void ct_exit_safe_mode(struct xe_guc_ct *ct)
> > xe_gt_dbg(ct_to_gt(ct), "GuC CT safe-mode disabled\n");
> > }
> >
> > -int xe_guc_ct_enable(struct xe_guc_ct *ct)
> > +static int __xe_guc_ct_start(struct xe_guc_ct *ct, bool needs_register)
> > {
> > struct xe_device *xe = ct_to_xe(ct);
> > struct xe_gt *gt = ct_to_gt(ct);
> > @@ -508,21 +509,28 @@ int xe_guc_ct_enable(struct xe_guc_ct *ct)
> >
> > xe_gt_assert(gt, !xe_guc_ct_enabled(ct));
> >
> > - xe_map_memset(xe, &ct->bo->vmap, 0, 0, xe_bo_size(ct->bo));
> > - guc_ct_ctb_h2g_init(xe, &ct->ctbs.h2g, &ct->bo->vmap);
> > - guc_ct_ctb_g2h_init(xe, &ct->ctbs.g2h, &ct->bo->vmap);
> > + if (needs_register) {
> > + xe_map_memset(xe, &ct->bo->vmap, 0, 0, xe_bo_size(ct->bo));
> > + guc_ct_ctb_h2g_init(xe, &ct->ctbs.h2g, &ct->bo->vmap);
> > + guc_ct_ctb_g2h_init(xe, &ct->ctbs.g2h, &ct->bo->vmap);
> >
> > - err = guc_ct_ctb_h2g_register(ct);
> > - if (err)
> > - goto err_out;
> > + err = guc_ct_ctb_h2g_register(ct);
> > + if (err)
> > + goto err_out;
> >
> > - err = guc_ct_ctb_g2h_register(ct);
> > - if (err)
> > - goto err_out;
> > + err = guc_ct_ctb_g2h_register(ct);
> > + if (err)
> > + goto err_out;
> >
> > - err = guc_ct_control_toggle(ct, true);
> > - if (err)
> > - goto err_out;
> > + err = guc_ct_control_toggle(ct, true);
> > + if (err)
> > + goto err_out;
> > + } else {
> > + ct->ctbs.h2g.info.broken = false;
> > + ct->ctbs.g2h.info.broken = false;
> > + xe_map_memset(xe, &ct->bo->vmap, CTB_H2G_BUFFER_OFFSET, 0,
> > + CTB_H2G_BUFFER_SIZE);
>
> nit: we may want to add some debug dump to see what H2G actually are about to be lost by this memset
>
> this would also allow us to verify test scenarios which may assume something was not processed by the source GuC before VF pause
>
The debug messages in [1] provide all information needed to reason which
code paths are being tested on VF recovery.
Matt
[1] https://patchwork.freedesktop.org/patch/677965/?series=154627&rev=4
> but we can do that as follow up
>
> > + }
> >
> > guc_ct_change_state(ct, XE_GUC_CT_STATE_ENABLED);
> >
> > @@ -554,6 +562,32 @@ int xe_guc_ct_enable(struct xe_guc_ct *ct)
> > return err;
> > }
> >
> > +/**
> > + * xe_guc_ct_restart() - Restart GuC CT
> > + * @ct: the &xe_guc_ct
> > + *
> > + * Restart GuC CT to an empty state without issuing a CT register MMIO command.
> > + *
> > + * Return: 0 on success, or a negative errno on failure.
> > + */
> > +int xe_guc_ct_restart(struct xe_guc_ct *ct)
> > +{
> > + return __xe_guc_ct_start(ct, false);
> > +}
> > +
> > +/**
> > + * xe_guc_ct_enable() - Enable GuC CT
> > + * @ct: the &xe_guc_ct
> > + *
> > + * Enable GuC CT to an empty state and issue a CT register MMIO command.
> > + *
> > + * Return: 0 on success, or a negative errno on failure.
> > + */
> > +int xe_guc_ct_enable(struct xe_guc_ct *ct)
> > +{
> > + return __xe_guc_ct_start(ct, true);
> > +}
> > +
> > static void stop_g2h_handler(struct xe_guc_ct *ct)
> > {
> > cancel_work_sync(&ct->g2h_worker);
> > diff --git a/drivers/gpu/drm/xe/xe_guc_ct.h b/drivers/gpu/drm/xe/xe_guc_ct.h
> > index 0a88f4e447fa..b1cba250c51c 100644
> > --- a/drivers/gpu/drm/xe/xe_guc_ct.h
> > +++ b/drivers/gpu/drm/xe/xe_guc_ct.h
> > @@ -15,6 +15,7 @@ int xe_guc_ct_init_noalloc(struct xe_guc_ct *ct);
> > int xe_guc_ct_init(struct xe_guc_ct *ct);
> > int xe_guc_ct_init_post_hwconfig(struct xe_guc_ct *ct);
> > int xe_guc_ct_enable(struct xe_guc_ct *ct);
> > +int xe_guc_ct_restart(struct xe_guc_ct *ct);
> > void xe_guc_ct_disable(struct xe_guc_ct *ct);
> > void xe_guc_ct_stop(struct xe_guc_ct *ct);
> > void xe_guc_ct_flush_and_stop(struct xe_guc_ct *ct);
>
> otherwise, lgtm
>
next prev parent reply other threads:[~2025-10-05 6:50 UTC|newest]
Thread overview: 71+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-10-02 5:53 [PATCH v4 00/34] VF migration redesign Matthew Brost
2025-10-02 5:53 ` [PATCH v4 01/34] drm/xe: Add NULL checks to scratch LRC allocation Matthew Brost
2025-10-02 22:02 ` Lis, Tomasz
2025-10-02 5:53 ` [PATCH v4 02/34] Revert "drm/xe/vf: Rebase exec queue parallel commands during migration recovery" Matthew Brost
2025-10-02 5:53 ` [PATCH v4 03/34] Revert "drm/xe/vf: Post migration, repopulate ring area for pending request" Matthew Brost
2025-10-02 5:53 ` [PATCH v4 04/34] Revert "drm/xe/vf: Fixup CTB send buffer messages after migration" Matthew Brost
2025-10-02 5:53 ` [PATCH v4 05/34] drm/xe: Save off position in ring in which a job was programmed Matthew Brost
2025-10-02 5:53 ` [PATCH v4 06/34] drm/xe/guc: Track pending-enable source in submission state Matthew Brost
2025-10-02 5:53 ` [PATCH v4 07/34] drm/xe: Track LR jobs in DRM scheduler pending list Matthew Brost
2025-10-02 16:14 ` Matthew Auld
2025-10-05 5:21 ` Matthew Brost
2025-10-02 5:53 ` [PATCH v4 08/34] drm/xe: Don't change LRC ring head on job resubmission Matthew Brost
2025-10-02 14:15 ` Matthew Auld
2025-10-05 5:25 ` Matthew Brost
2025-10-05 6:53 ` Matthew Brost
2025-10-06 8:59 ` Matthew Auld
2025-10-02 5:53 ` [PATCH v4 09/34] drm/xe: Make LRC W/A scratch buffer usage consistent Matthew Brost
2025-10-02 5:53 ` [PATCH v4 10/34] drm/xe/guc: Document GuC submission backend Matthew Brost
2025-10-03 14:30 ` Lis, Tomasz
2025-10-02 5:53 ` [PATCH v4 11/34] drm/xe/vf: Add xe_gt_recovery_inprogress helper Matthew Brost
2025-10-03 1:39 ` Lis, Tomasz
2025-10-04 4:32 ` Matthew Brost
2025-10-03 8:40 ` Michal Wajdeczko
2025-10-04 4:32 ` Matthew Brost
2025-10-02 5:53 ` [PATCH v4 12/34] drm/xe/vf: Make VF recovery run on per-GT worker Matthew Brost
2025-10-02 5:53 ` [PATCH v4 13/34] drm/xe/vf: Abort H2G sends during VF post-migration recovery Matthew Brost
2025-10-02 5:53 ` [PATCH v4 14/34] drm/xe/vf: Remove memory allocations from VF post migration recovery Matthew Brost
2025-10-02 5:53 ` [PATCH v4 15/34] drm/xe/vf: Close multi-GT GGTT shift race Matthew Brost
2025-10-03 14:24 ` Michal Wajdeczko
2025-10-04 4:36 ` Matthew Brost
2025-10-02 5:53 ` [PATCH v4 16/34] drm/xe/vf: Teardown VF post migration worker on driver unload Matthew Brost
2025-10-02 5:53 ` [PATCH v4 17/34] drm/xe/vf: Don't allow GT reset to be queued during VF post migration recovery Matthew Brost
2025-10-03 16:09 ` Lis, Tomasz
2025-10-02 5:53 ` [PATCH v4 18/34] drm/xe/vf: Wakeup in GuC backend on " Matthew Brost
2025-10-03 14:38 ` Michal Wajdeczko
2025-10-05 6:22 ` Matthew Brost
2025-10-05 6:35 ` Matthew Brost
2025-10-02 5:53 ` [PATCH v4 19/34] drm/xe/vf: Avoid indefinite blocking in preempt rebind worker for VFs supporting migration Matthew Brost
2025-10-02 5:53 ` [PATCH v4 20/34] drm/xe/vf: Use GUC_HXG_TYPE_EVENT for GuC context register Matthew Brost
2025-10-03 14:26 ` Lis, Tomasz
2025-10-05 5:43 ` Matthew Brost
2025-10-03 14:57 ` Michal Wajdeczko
2025-10-02 5:53 ` [PATCH v4 21/34] drm/xe/vf: Flush and stop CTs in VF post migration recovery Matthew Brost
2025-10-02 5:53 ` [PATCH v4 22/34] drm/xe/vf: Reset TLB invalidations during " Matthew Brost
2025-10-02 5:53 ` [PATCH v4 23/34] drm/xe/vf: Kickstart after resfix in " Matthew Brost
2025-10-02 5:53 ` [PATCH v4 24/34] drm/xe/vf: Start CTs before resfix " Matthew Brost
2025-10-02 21:50 ` Lis, Tomasz
2025-10-03 15:10 ` Michal Wajdeczko
2025-10-05 6:49 ` Matthew Brost [this message]
2025-10-05 12:28 ` Michal Wajdeczko
2025-10-02 5:53 ` [PATCH v4 25/34] drm/xe/vf: Abort VF post migration recovery on failure Matthew Brost
2025-10-02 5:53 ` [PATCH v4 26/34] drm/xe/vf: Replay GuC submission state on pause / unpause Matthew Brost
2025-10-02 5:53 ` [PATCH v4 27/34] drm/xe: Move queue init before LRC creation Matthew Brost
2025-10-03 13:25 ` Lis, Tomasz
2025-10-05 8:03 ` Matthew Brost
2025-10-02 5:53 ` [PATCH v4 28/34] drm/xe/vf: Add debug prints for GuC replaying state during VF recovery Matthew Brost
2025-10-03 13:08 ` Lis, Tomasz
2025-10-02 5:53 ` [PATCH v4 29/34] drm/xe/vf: Workaround for race condition in GuC firmware during VF pause Matthew Brost
2025-10-03 13:06 ` Lis, Tomasz
2025-10-02 5:53 ` [PATCH v4 30/34] drm/xe: Use PPGTT addresses for TLB invalidation to avoid GGTT fixups Matthew Brost
2025-10-02 5:53 ` [PATCH v4 31/34] drm/xe/vf: Use primary GT ordered work queue on media GT on PTL VF Matthew Brost
2025-10-02 21:00 ` Lis, Tomasz
2025-10-05 7:03 ` Matthew Brost
2025-10-02 5:54 ` [PATCH v4 32/34] drm/xe/vf: Ensure media GT VF recovery runs after primary GT on PTL Matthew Brost
2025-10-02 20:19 ` Lis, Tomasz
2025-10-02 5:54 ` [PATCH v4 33/34] drm/xe/vf: Rebase CCS save/restore BB GGTT addresses Matthew Brost
2025-10-02 5:54 ` [PATCH v4 34/34] drm/xe/guc: Increase wait timeout to 2sec after BUSY reply from GuC Matthew Brost
2025-10-02 6:45 ` ✗ CI.checkpatch: warning for VF migration redesign (rev4) Patchwork
2025-10-02 6:47 ` ✓ CI.KUnit: success " Patchwork
2025-10-02 7:33 ` ✗ Xe.CI.BAT: failure " Patchwork
2025-10-02 9:19 ` ✗ Xe.CI.Full: " Patchwork
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=aOIVEF6cPfw+BH56@lstrano-desk.jf.intel.com \
--to=matthew.brost@intel.com \
--cc=intel-xe@lists.freedesktop.org \
--cc=michal.wajdeczko@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox