From: "Lis, Tomasz" <tomasz.lis@intel.com>
To: Matthew Brost <matthew.brost@intel.com>,
<intel-xe@lists.freedesktop.org>
Subject: Re: [PATCH v2 15/34] drm/xe/vf: Close multi-GT GGTT shift race
Date: Fri, 26 Sep 2025 04:33:36 +0200 [thread overview]
Message-ID: <35a54c4b-b824-41c4-b9e2-b57a6aa1280d@intel.com> (raw)
In-Reply-To: <20250924011601.888293-16-matthew.brost@intel.com>
On 9/24/2025 3:15 AM, Matthew Brost wrote:
> As multi-GT VF post-migration recovery can run in parallel on different
> workqueues, but both GTs point to the same GGTT, only one GT needs to
> shift the GGTT. However, both GTs need to know when this step has
> completed. To coordinate this, share the VF config lock among all GTs
> that share a GGTT, and perform the GGTT shift under this lock.
The description does not mention removal of ggtt_shift variable; this
removal is not related to the locking change, so should be separately
mentioned.
> Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> ---
> drivers/gpu/drm/xe/xe_gt_sriov_vf.c | 95 +++++++++--------------
> drivers/gpu/drm/xe/xe_gt_sriov_vf.h | 3 +-
> drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h | 11 ++-
> drivers/gpu/drm/xe/xe_guc.c | 2 +-
> drivers/gpu/drm/xe/xe_tile_sriov_vf.c | 6 +-
> drivers/gpu/drm/xe/xe_tile_sriov_vf.h | 1 -
> 6 files changed, 51 insertions(+), 67 deletions(-)
>
> diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
> index 8304c26c076e..807fdced0228 100644
> --- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
> +++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
> @@ -436,16 +436,19 @@ u32 xe_gt_sriov_vf_gmdid(struct xe_gt *gt)
> return value;
> }
>
> -static int vf_get_ggtt_info(struct xe_gt *gt)
> +static int vf_get_ggtt_info(struct xe_gt *gt, bool recovery)
> {
> struct xe_gt_sriov_vf_selfconfig *config = >->sriov.vf.self_config;
> + struct xe_gt_sriov_vf_selfconfig *primary_config =
> + >_to_tile(gt)->primary_gt->sriov.vf.self_config;
> struct xe_guc *guc = >->uc.guc;
> u64 start, size;
> + s64 shift;
> int err;
>
> xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
>
> - down_write(&config->lock);
> + down_write(config->lock);
>
> err = guc_action_query_single_klv64(guc, GUC_KLV_VF_CFG_GGTT_START_KEY, &start);
> if (unlikely(err))
> @@ -465,13 +468,17 @@ static int vf_get_ggtt_info(struct xe_gt *gt)
> xe_gt_sriov_dbg_verbose(gt, "GGTT %#llx-%#llx = %lluK\n",
> start, start + size - 1, size / SZ_1K);
>
> - config->ggtt_shift = start - (s64)config->ggtt_base;
> + shift = start - (s64)primary_config->ggtt_base;
> config->ggtt_base = start;
> config->ggtt_size = size;
> + if (recovery)
> + primary_config->ggtt_base = start;
> err = config->ggtt_size ? 0 : -ENODATA;
>
> + if (!err && shift && recovery)
> + xe_tile_sriov_vf_fixup_ggtt_nodes(gt_to_tile(gt), shift);
> out:
> - up_write(&config->lock);
> + up_write(config->lock);
> return err;
> }
>
> @@ -485,7 +492,7 @@ static int vf_get_lmem_info(struct xe_gt *gt)
>
> xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
>
> - down_write(&config->lock);
> + down_write(config->lock);
>
> err = guc_action_query_single_klv64(guc, GUC_KLV_VF_CFG_LMEM_SIZE_KEY, &size);
> if (unlikely(err))
> @@ -505,7 +512,7 @@ static int vf_get_lmem_info(struct xe_gt *gt)
> err = config->lmem_size ? 0 : -ENODATA;
>
> out:
> - up_write(&config->lock);
> + up_write(config->lock);
> return err;
> }
>
> @@ -518,7 +525,7 @@ static int vf_get_submission_cfg(struct xe_gt *gt)
>
> xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
>
> - down_write(&config->lock);
> + down_write(config->lock);
>
> err = guc_action_query_single_klv32(guc, GUC_KLV_VF_CFG_NUM_CONTEXTS_KEY, &num_ctxs);
> if (unlikely(err))
> @@ -549,7 +556,7 @@ static int vf_get_submission_cfg(struct xe_gt *gt)
> err = config->num_ctxs ? 0 : -ENODATA;
>
> out:
> - up_write(&config->lock);
> + up_write(config->lock);
> return err;
> }
>
> @@ -564,17 +571,18 @@ static void vf_cache_gmdid(struct xe_gt *gt)
> /**
> * xe_gt_sriov_vf_query_config - Query SR-IOV config data over MMIO.
> * @gt: the &xe_gt
> + * @recovery: VF post migration recovery path
> *
> * This function is for VF use only.
> *
> * Return: 0 on success or a negative error code on failure.
> */
> -int xe_gt_sriov_vf_query_config(struct xe_gt *gt)
> +int xe_gt_sriov_vf_query_config(struct xe_gt *gt, bool recovery)
> {
> struct xe_device *xe = gt_to_xe(gt);
> int err;
>
> - err = vf_get_ggtt_info(gt);
> + err = vf_get_ggtt_info(gt, recovery);
> if (unlikely(err))
> return err;
>
> @@ -610,10 +618,10 @@ u16 xe_gt_sriov_vf_guc_ids(struct xe_gt *gt)
> xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
> xe_gt_assert(gt, gt->sriov.vf.guc_version.major);
>
> - down_read(&config->lock);
> + down_read(config->lock);
> xe_gt_assert(gt, config->num_ctxs);
> val = config->num_ctxs;
> - up_read(&config->lock);
> + up_read(config->lock);
>
> return val;
> }
> @@ -634,10 +642,10 @@ u64 xe_gt_sriov_vf_lmem(struct xe_gt *gt)
> xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
> xe_gt_assert(gt, gt->sriov.vf.guc_version.major);
>
> - down_read(&config->lock);
> + down_read(config->lock);
> xe_gt_assert(gt, config->lmem_size);
> val = config->lmem_size;
> - up_read(&config->lock);
> + up_read(config->lock);
>
> return val;
> }
> @@ -656,11 +664,9 @@ u64 xe_gt_sriov_vf_ggtt(struct xe_gt *gt)
> u64 val;
>
> xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
> - xe_gt_assert(gt, gt->sriov.vf.guc_version.major);
> + lockdep_assert_held(config->lock);
>
> - down_read(&config->lock);
> val = config->ggtt_size;
> - up_read(&config->lock);
>
> return val;
> }
> @@ -680,34 +686,10 @@ u64 xe_gt_sriov_vf_ggtt_base(struct xe_gt *gt)
>
> xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
> xe_gt_assert(gt, gt->sriov.vf.guc_version.major);
> -
> - down_read(&config->lock);
> xe_gt_assert(gt, config->ggtt_size);
> - val = config->ggtt_base;
> - up_read(&config->lock);
> -
> - return val;
> -}
> + lockdep_assert_held(config->lock);
>
> -/**
> - * xe_gt_sriov_vf_ggtt_shift - Return shift in GGTT range due to VF migration
> - * @gt: the &xe_gt struct instance
> - *
> - * This function is for VF use only.
> - *
> - * Return: The shift value; could be negative
> - */
> -s64 xe_gt_sriov_vf_ggtt_shift(struct xe_gt *gt)
> -{
> - struct xe_gt_sriov_vf_selfconfig *config = >->sriov.vf.self_config;
> - s64 val;
> -
> - xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
> - xe_gt_assert(gt, xe_gt_is_main_type(gt));
> -
> - down_read(&config->lock);
> - val = config->ggtt_shift;
> - up_read(&config->lock);
> + val = config->ggtt_base;
>
> return val;
> }
> @@ -1115,7 +1097,7 @@ void xe_gt_sriov_vf_print_config(struct xe_gt *gt, struct drm_printer *p)
>
> xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
>
> - down_read(&config->lock);
> + down_read(config->lock);
> drm_printf(p, "GGTT range:\t%#llx-%#llx\n",
> config->ggtt_base,
> config->ggtt_base + config->ggtt_size - 1);
> @@ -1123,8 +1105,6 @@ void xe_gt_sriov_vf_print_config(struct xe_gt *gt, struct drm_printer *p)
> string_get_size(config->ggtt_size, 1, STRING_UNITS_2, buf, sizeof(buf));
> drm_printf(p, "GGTT size:\t%llu (%s)\n", config->ggtt_size, buf);
>
> - drm_printf(p, "GGTT shift on last restore:\t%lld\n", config->ggtt_shift);
> -
Right.. so by that we're losing a useful debug value.
I'm not very attached to it as I did not originated the idea to have it
there.
IMO the GGTT config is printed to dmesg and that's enough, shift can be
computed from that.
For the main change - the lock and the multi-GT support - looks good, no
issues.
-Tomasz
> if (IS_DGFX(xe) && xe_gt_is_main_type(gt)) {
> string_get_size(config->lmem_size, 1, STRING_UNITS_2, buf, sizeof(buf));
> drm_printf(p, "LMEM size:\t%llu (%s)\n", config->lmem_size, buf);
> @@ -1132,7 +1112,7 @@ void xe_gt_sriov_vf_print_config(struct xe_gt *gt, struct drm_printer *p)
>
> drm_printf(p, "GuC contexts:\t%u\n", config->num_ctxs);
> drm_printf(p, "GuC doorbells:\t%u\n", config->num_dbs);
> - up_read(&config->lock);
> + up_read(config->lock);
> }
>
> /**
> @@ -1215,21 +1195,16 @@ static size_t post_migration_scratch_size(struct xe_device *xe)
> static int vf_post_migration_fixups(struct xe_gt *gt)
> {
> void *buf = gt->sriov.vf.migration.lrc_wa_bb;
> - s64 shift;
> int err;
>
> - err = xe_gt_sriov_vf_query_config(gt);
> + err = xe_gt_sriov_vf_query_config(gt, true);
> if (err)
> return err;
>
> - shift = xe_gt_sriov_vf_ggtt_shift(gt);
> - if (shift) {
> - xe_tile_sriov_vf_fixup_ggtt_nodes(gt_to_tile(gt), shift);
> - xe_gt_sriov_vf_default_lrcs_hwsp_rebase(gt);
> - err = xe_guc_contexts_hwsp_rebase(>->uc.guc, buf);
> - if (err)
> - return err;
> - }
> + xe_gt_sriov_vf_default_lrcs_hwsp_rebase(gt);
> + err = xe_guc_contexts_hwsp_rebase(>->uc.guc, buf);
> + if (err)
> + return err;
>
> return 0;
> }
> @@ -1313,6 +1288,7 @@ static void migration_worker_func(struct work_struct *w)
> */
> int xe_gt_sriov_vf_migration_init_early(struct xe_gt *gt)
> {
> + struct xe_tile *tile = gt_to_tile(gt);
> void *buf;
>
> buf = drmm_kmalloc(>_to_xe(gt)->drm,
> @@ -1322,7 +1298,10 @@ int xe_gt_sriov_vf_migration_init_early(struct xe_gt *gt)
> return -ENOMEM;
>
> gt->sriov.vf.migration.lrc_wa_bb = buf;
> - init_rwsem(>->sriov.vf.self_config.lock);
> + if (xe_gt_is_main_type(gt))
> + init_rwsem(>->sriov.vf.self_config.__lock);
> + gt->sriov.vf.self_config.lock =
> + &tile->primary_gt->sriov.vf.self_config.__lock;
> spin_lock_init(>->sriov.vf.migration.lock);
> INIT_WORK(>->sriov.vf.migration.worker, migration_worker_func);
>
> diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.h b/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
> index 195dbebe941e..535237003915 100644
> --- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
> +++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
> @@ -18,7 +18,7 @@ int xe_gt_sriov_vf_bootstrap(struct xe_gt *gt);
> void xe_gt_sriov_vf_guc_versions(struct xe_gt *gt,
> struct xe_uc_fw_version *wanted,
> struct xe_uc_fw_version *found);
> -int xe_gt_sriov_vf_query_config(struct xe_gt *gt);
> +int xe_gt_sriov_vf_query_config(struct xe_gt *gt, bool recovery);
> int xe_gt_sriov_vf_connect(struct xe_gt *gt);
> int xe_gt_sriov_vf_query_runtime(struct xe_gt *gt);
> void xe_gt_sriov_vf_migrated_event_handler(struct xe_gt *gt);
> @@ -31,7 +31,6 @@ u16 xe_gt_sriov_vf_guc_ids(struct xe_gt *gt);
> u64 xe_gt_sriov_vf_lmem(struct xe_gt *gt);
> u64 xe_gt_sriov_vf_ggtt(struct xe_gt *gt);
> u64 xe_gt_sriov_vf_ggtt_base(struct xe_gt *gt);
> -s64 xe_gt_sriov_vf_ggtt_shift(struct xe_gt *gt);
>
> u32 xe_gt_sriov_vf_read32(struct xe_gt *gt, struct xe_reg reg);
> void xe_gt_sriov_vf_write32(struct xe_gt *gt, struct xe_reg reg, u32 val);
> diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h
> index 496b657119de..61484c7c9a36 100644
> --- a/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h
> +++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h
> @@ -19,16 +19,19 @@ struct xe_gt_sriov_vf_selfconfig {
> u64 ggtt_base;
> /** @ggtt_size: assigned size of the GGTT region. */
> u64 ggtt_size;
> - /** @ggtt_shift: difference in ggtt_base on last migration */
> - s64 ggtt_shift;
> /** @lmem_size: assigned size of the LMEM. */
> u64 lmem_size;
> /** @num_ctxs: assigned number of GuC submission context IDs. */
> u16 num_ctxs;
> /** @num_dbs: assigned number of GuC doorbells IDs. */
> u16 num_dbs;
> - /** @lock: lock for protecting access to all selfconfig fields. */
> - struct rw_semaphore lock;
> + /** @__lock: lock for protecting access to all selfconfig fields. */
> + struct rw_semaphore __lock;
> + /**
> + * @lock: pointer to lock for protecting access to all selfconfig
> + * fields, all GTs point to primary GT.
> + */
> + struct rw_semaphore *lock;
> };
>
> /**
> diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c
> index 00789844ea4d..ac60da51da2c 100644
> --- a/drivers/gpu/drm/xe/xe_guc.c
> +++ b/drivers/gpu/drm/xe/xe_guc.c
> @@ -712,7 +712,7 @@ static int vf_guc_init_noalloc(struct xe_guc *guc)
> if (err)
> return err;
>
> - err = xe_gt_sriov_vf_query_config(gt);
> + err = xe_gt_sriov_vf_query_config(gt, false);
> if (err)
> return err;
>
> diff --git a/drivers/gpu/drm/xe/xe_tile_sriov_vf.c b/drivers/gpu/drm/xe/xe_tile_sriov_vf.c
> index f221dbed16f0..dc6221fc0520 100644
> --- a/drivers/gpu/drm/xe/xe_tile_sriov_vf.c
> +++ b/drivers/gpu/drm/xe/xe_tile_sriov_vf.c
> @@ -40,7 +40,7 @@ static int vf_init_ggtt_balloons(struct xe_tile *tile)
> *
> * Return: 0 on success or a negative error code on failure.
> */
> -int xe_tile_sriov_vf_balloon_ggtt_locked(struct xe_tile *tile)
> +static int xe_tile_sriov_vf_balloon_ggtt_locked(struct xe_tile *tile)
> {
> u64 ggtt_base = xe_gt_sriov_vf_ggtt_base(tile->primary_gt);
> u64 ggtt_size = xe_gt_sriov_vf_ggtt(tile->primary_gt);
> @@ -100,12 +100,16 @@ int xe_tile_sriov_vf_balloon_ggtt_locked(struct xe_tile *tile)
>
> static int vf_balloon_ggtt(struct xe_tile *tile)
> {
> + struct xe_gt_sriov_vf_selfconfig *config =
> + &tile->primary_gt->sriov.vf.self_config;
> struct xe_ggtt *ggtt = tile->mem.ggtt;
> int err;
>
> + down_read(config->lock);
> mutex_lock(&ggtt->lock);
> err = xe_tile_sriov_vf_balloon_ggtt_locked(tile);
> mutex_unlock(&ggtt->lock);
> + up_read(config->lock);
>
> return err;
> }
> diff --git a/drivers/gpu/drm/xe/xe_tile_sriov_vf.h b/drivers/gpu/drm/xe/xe_tile_sriov_vf.h
> index 93eb043171e8..4ee68d1fb28e 100644
> --- a/drivers/gpu/drm/xe/xe_tile_sriov_vf.h
> +++ b/drivers/gpu/drm/xe/xe_tile_sriov_vf.h
> @@ -11,7 +11,6 @@
> struct xe_tile;
>
> int xe_tile_sriov_vf_prepare_ggtt(struct xe_tile *tile);
> -int xe_tile_sriov_vf_balloon_ggtt_locked(struct xe_tile *tile);
> void xe_tile_sriov_vf_deballoon_ggtt_locked(struct xe_tile *tile);
> void xe_tile_sriov_vf_fixup_ggtt_nodes(struct xe_tile *tile, s64 shift);
>
next prev parent reply other threads:[~2025-09-26 2:33 UTC|newest]
Thread overview: 90+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-09-24 1:15 [PATCH v2 00/34] VF migration redesign Matthew Brost
2025-09-24 1:15 ` [PATCH v2 01/34] drm/xe/vf: Lock querying GGTT config during driver init Matthew Brost
2025-09-24 9:29 ` Michal Wajdeczko
2025-09-24 20:23 ` Matthew Brost
2025-09-30 0:42 ` Lis, Tomasz
2025-09-24 1:15 ` [PATCH v2 02/34] Revert "drm/xe/vf: Rebase exec queue parallel commands during migration recovery" Matthew Brost
2025-09-24 9:32 ` Michal Wajdeczko
2025-09-24 20:17 ` Matthew Brost
2025-09-24 1:15 ` [PATCH v2 03/34] Revert "drm/xe/vf: Post migration, repopulate ring area for pending request" Matthew Brost
2025-09-24 1:15 ` [PATCH v2 04/34] Revert "drm/xe/vf: Fixup CTB send buffer messages after migration" Matthew Brost
2025-09-24 1:15 ` [PATCH v2 05/34] drm/xe: Save off position in ring in which a job was programmed Matthew Brost
2025-09-24 1:15 ` [PATCH v2 06/34] drm/xe/guc: Track pending-enable source in submission state Matthew Brost
2025-09-24 1:15 ` [PATCH v2 07/34] drm/xe: Track LR jobs in DRM scheduler pending list Matthew Brost
2025-09-24 1:15 ` [PATCH v2 08/34] drm/xe: Don't change LRC ring head on job resubmission Matthew Brost
2025-09-24 15:14 ` Lis, Tomasz
2025-09-25 16:12 ` Matthew Brost
2025-09-24 1:15 ` [PATCH v2 09/34] drm/xe: Make LRC W/A scratch buffer usage consistent Matthew Brost
2025-09-24 14:23 ` Lis, Tomasz
2025-09-24 18:01 ` Lucas De Marchi
2025-09-25 20:25 ` Matthew Brost
2025-09-24 1:15 ` [PATCH v2 10/34] drm/xe/guc: Document GuC submission backend Matthew Brost
2025-09-24 9:35 ` Michal Wajdeczko
2025-09-24 20:20 ` Matthew Brost
2025-09-24 1:15 ` [PATCH v2 11/34] drm/xe/vf: Add xe_gt_sriov_vf_recovery_inprogress helper Matthew Brost
2025-09-24 10:14 ` Michal Wajdeczko
2025-09-24 19:39 ` Matthew Brost
2025-09-24 20:12 ` Michal Wajdeczko
2025-09-24 20:30 ` Matthew Brost
2025-09-24 1:15 ` [PATCH v2 12/34] drm/xe/vf: Make VF recovery run on per-GT worker Matthew Brost
2025-09-24 10:49 ` Michal Wajdeczko
2025-09-24 19:50 ` Matthew Brost
2025-09-24 20:21 ` Michal Wajdeczko
2025-09-24 20:35 ` Matthew Brost
2025-09-25 16:27 ` Lis, Tomasz
2025-09-25 16:56 ` Matthew Brost
2025-09-24 1:15 ` [PATCH v2 13/34] drm/xe/vf: Abort H2G sends during VF post-migration recovery Matthew Brost
2025-09-24 11:00 ` Michal Wajdeczko
2025-09-24 20:01 ` Matthew Brost
2025-09-24 1:15 ` [PATCH v2 14/34] drm/xe/vf: Remove memory allocations from VF post migration recovery Matthew Brost
2025-09-26 1:35 ` Lis, Tomasz
2025-09-26 1:43 ` Matthew Brost
2025-09-24 1:15 ` [PATCH v2 15/34] drm/xe/vf: Close multi-GT GGTT shift race Matthew Brost
2025-09-26 2:33 ` Lis, Tomasz [this message]
2025-09-26 19:09 ` Matthew Brost
2025-09-24 1:15 ` [PATCH v2 16/34] drm/xe/vf: Teardown VF post migration worker on driver unload Matthew Brost
2025-09-26 15:40 ` Lis, Tomasz
2025-09-26 19:13 ` Matthew Brost
2025-09-24 1:15 ` [PATCH v2 17/34] drm/xe/vf: Don't allow GT reset to be queued during VF post migration recovery Matthew Brost
2025-09-27 2:59 ` Lis, Tomasz
2025-09-27 22:33 ` Matthew Brost
2025-09-24 1:15 ` [PATCH v2 18/34] drm/xe/vf: Wakeup in GuC backend on " Matthew Brost
2025-09-25 19:06 ` Matthew Brost
2025-09-24 1:15 ` [PATCH v2 19/34] drm/xe/vf: Extra debug on GGTT shift Matthew Brost
2025-09-27 3:16 ` Lis, Tomasz
2025-09-27 11:06 ` Michal Wajdeczko
2025-09-27 22:56 ` Matthew Brost
2025-09-24 1:15 ` [PATCH v2 20/34] drm/xe/vf: Use GUC_HXG_TYPE_EVENT for GuC context register Matthew Brost
2025-09-24 11:15 ` Michal Wajdeczko
2025-09-24 20:16 ` Matthew Brost
2025-09-24 1:15 ` [PATCH v2 21/34] drm/xe/vf: Stop and flush CTs in VF post migration recovery Matthew Brost
2025-09-24 11:21 ` Michal Wajdeczko
2025-09-24 20:12 ` Matthew Brost
2025-09-24 1:15 ` [PATCH v2 22/34] drm/xe/vf: Reset TLB invalidations during " Matthew Brost
2025-09-27 3:43 ` Lis, Tomasz
2025-09-27 22:29 ` Matthew Brost
2025-09-24 1:15 ` [PATCH v2 23/34] drm/xe/vf: Kickstart after resfix in " Matthew Brost
2025-09-27 11:21 ` Lis, Tomasz
2025-09-24 1:15 ` [PATCH v2 24/34] drm/xe/vf: Start CTs before resfix " Matthew Brost
2025-09-24 11:50 ` Michal Wajdeczko
2025-09-24 20:10 ` Matthew Brost
2025-09-24 1:15 ` [PATCH v2 25/34] drm/xe/vf: Abort VF post migration recovery on failure Matthew Brost
2025-09-27 11:54 ` Lis, Tomasz
2025-09-27 22:38 ` Matthew Brost
2025-09-24 1:15 ` [PATCH v2 26/34] drm/xe/vf: Replay GuC submission state on pause / unpause Matthew Brost
2025-09-27 13:33 ` Lis, Tomasz
2025-09-27 23:11 ` Matthew Brost
2025-09-24 1:15 ` [PATCH v2 27/34] drm/xe: Move queue init before LRC creation Matthew Brost
2025-09-24 1:15 ` [PATCH v2 28/34] drm/xe/vf: Add debug prints for GuC replaying state during VF recovery Matthew Brost
2025-09-24 1:15 ` [PATCH v2 29/34] drm/xe/vf: Workaround for race condition in GuC firmware during VF pause Matthew Brost
2025-09-24 1:15 ` [PATCH v2 30/34] drm/xe: Use PPGTT addresses for TLB invalidation to avoid GGTT fixups Matthew Brost
2025-09-24 1:15 ` [PATCH v2 31/34] drm/xe/vf: Use primary GT ordered work queue on media GT on PTL VF Matthew Brost
2025-09-24 1:15 ` [PATCH v2 32/34] drm/xe/vf: Ensure media GT VF recovery runs after primary GT on PTL Matthew Brost
2025-09-24 1:16 ` [PATCH v2 33/34] drm/xe/vf: Rebase CCS save/restore BB GGTT addresses Matthew Brost
2025-09-24 4:04 ` K V P, Satyanarayana
2025-09-24 6:32 ` Matthew Brost
2025-09-24 6:36 ` K V P, Satyanarayana
2025-09-24 1:16 ` [PATCH v2 34/34] drm/xe/guc: Increase wait timeout to 2sec after BUSY reply from GuC Matthew Brost
2025-09-24 1:29 ` ✓ CI.KUnit: success for VF migration redesign (rev2) Patchwork
2025-09-24 2:14 ` ✗ Xe.CI.BAT: failure " Patchwork
2025-09-24 7:37 ` ✗ Xe.CI.Full: " Patchwork
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=35a54c4b-b824-41c4-b9e2-b57a6aa1280d@intel.com \
--to=tomasz.lis@intel.com \
--cc=intel-xe@lists.freedesktop.org \
--cc=matthew.brost@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox