From: Michal Wajdeczko <michal.wajdeczko@intel.com>
To: "Michał Winiarski" <michal.winiarski@intel.com>,
"Alex Williamson" <alex@shazbot.org>,
"Lucas De Marchi" <lucas.demarchi@intel.com>,
"Thomas Hellström" <thomas.hellstrom@linux.intel.com>,
"Rodrigo Vivi" <rodrigo.vivi@intel.com>,
"Jason Gunthorpe" <jgg@ziepe.ca>,
"Yishai Hadas" <yishaih@nvidia.com>,
"Kevin Tian" <kevin.tian@intel.com>,
"Shameer Kolothum" <skolothumtho@nvidia.com>,
intel-xe@lists.freedesktop.org, linux-kernel@vger.kernel.org,
kvm@vger.kernel.org, "Matthew Brost" <matthew.brost@intel.com>
Cc: <dri-devel@lists.freedesktop.org>,
Jani Nikula <jani.nikula@linux.intel.com>,
Joonas Lahtinen <joonas.lahtinen@linux.intel.com>,
Tvrtko Ursulin <tursulin@ursulin.net>,
David Airlie <airlied@gmail.com>, Simona Vetter <simona@ffwll.ch>,
"Lukasz Laguna" <lukasz.laguna@intel.com>,
Christoph Hellwig <hch@infradead.org>
Subject: Re: [PATCH v3 15/28] drm/xe/pf: Switch VF migration GuC save/restore to struct migration data
Date: Mon, 3 Nov 2025 19:30:36 +0100 [thread overview]
Message-ID: <efde06ef-1ce4-4bc7-a0ca-01a27b697ae0@intel.com> (raw)
In-Reply-To: <20251030203135.337696-16-michal.winiarski@intel.com>
On 10/30/2025 9:31 PM, Michał Winiarski wrote:
> In upcoming changes, the GuC VF migration data will be handled as part
> of separate SAVE/RESTORE states in VF control state machine.
> Now that the data is decoupled from both guc_state debugfs and PAUSE
> state, we can safely remove the struct xe_gt_sriov_state_snapshot and
> modify the GuC save/restore functions to operate on struct
> xe_sriov_migration_data.
hmm, that reminded me that maybe instead
xe_sriov_migration_data
better name for this "data" struct could be
xe_sriov_migration_packet
to make more distinguished from
xe_gt_sriov_migration_data
which has completely different usage
>
> Signed-off-by: Michał Winiarski <michal.winiarski@intel.com>
> ---
> drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c | 265 +++++-------------
> drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h | 13 +-
> .../drm/xe/xe_gt_sriov_pf_migration_types.h | 27 --
> drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h | 4 -
> 4 files changed, 79 insertions(+), 230 deletions(-)
>
> diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c
> index a2db127982638..4a716e0a29fe4 100644
> --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c
> +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c
> @@ -28,6 +28,17 @@ static struct xe_gt_sriov_migration_data *pf_pick_gt_migration(struct xe_gt *gt,
> return >->sriov.pf.vfs[vfid].migration;
> }
>
> +static void pf_dump_mig_data(struct xe_gt *gt, unsigned int vfid,
> + struct xe_sriov_migration_data *data)
> +{
> + if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) {
> + print_hex_dump_bytes("mig_hdr: ", DUMP_PREFIX_OFFSET,
> + &data->hdr, sizeof(data->hdr));
> + print_hex_dump_bytes("mig_data: ", DUMP_PREFIX_OFFSET,
> + data->vaddr, min(SZ_64, data->size));
> + }
nit: maybe this function should be based on the drm_printer/drm_print_hex_dump
then we will get proper GTn: prefix
> +}
> +
> /* Return: number of dwords saved/restored/required or a negative error code on failure */
> static int guc_action_vf_save_restore(struct xe_guc *guc, u32 vfid, u32 opcode,
> u64 addr, u32 ndwords)
> @@ -47,7 +58,7 @@ static int guc_action_vf_save_restore(struct xe_guc *guc, u32 vfid, u32 opcode,
> }
>
> /* Return: size of the state in dwords or a negative error code on failure */
> -static int pf_send_guc_query_vf_state_size(struct xe_gt *gt, unsigned int vfid)
> +static int pf_send_guc_query_vf_mig_data_size(struct xe_gt *gt, unsigned int vfid)
> {
> int ret;
>
> @@ -56,8 +67,8 @@ static int pf_send_guc_query_vf_state_size(struct xe_gt *gt, unsigned int vfid)
> }
>
> /* Return: number of state dwords saved or a negative error code on failure */
> -static int pf_send_guc_save_vf_state(struct xe_gt *gt, unsigned int vfid,
> - void *dst, size_t size)
> +static int pf_send_guc_save_vf_mig_data(struct xe_gt *gt, unsigned int vfid,
> + void *dst, size_t size)
> {
> const int ndwords = size / sizeof(u32);
> struct xe_guc *guc = >->uc.guc;
> @@ -85,8 +96,8 @@ static int pf_send_guc_save_vf_state(struct xe_gt *gt, unsigned int vfid,
> }
>
> /* Return: number of state dwords restored or a negative error code on failure */
> -static int pf_send_guc_restore_vf_state(struct xe_gt *gt, unsigned int vfid,
> - const void *src, size_t size)
> +static int pf_send_guc_restore_vf_mig_data(struct xe_gt *gt, unsigned int vfid,
> + const void *src, size_t size)
> {
> const int ndwords = size / sizeof(u32);
> struct xe_guc *guc = >->uc.guc;
> @@ -114,120 +125,67 @@ static bool pf_migration_supported(struct xe_gt *gt)
> return xe_sriov_pf_migration_supported(gt_to_xe(gt));
> }
>
> -static struct mutex *pf_migration_mutex(struct xe_gt *gt)
> +static int pf_save_vf_guc_mig_data(struct xe_gt *gt, unsigned int vfid)
> {
> - xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
> - return >->sriov.pf.migration.snapshot_lock;
> -}
> -
> -static struct xe_gt_sriov_state_snapshot *pf_pick_vf_snapshot(struct xe_gt *gt,
> - unsigned int vfid)
> -{
> - xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
> - xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
> - lockdep_assert_held(pf_migration_mutex(gt));
> -
> - return >->sriov.pf.vfs[vfid].snapshot;
> -}
> -
> -static unsigned int pf_snapshot_index(struct xe_gt *gt, struct xe_gt_sriov_state_snapshot *snapshot)
> -{
> - return container_of(snapshot, struct xe_gt_sriov_metadata, snapshot) - gt->sriov.pf.vfs;
> -}
> -
> -static void pf_free_guc_state(struct xe_gt *gt, struct xe_gt_sriov_state_snapshot *snapshot)
> -{
> - struct xe_device *xe = gt_to_xe(gt);
> -
> - drmm_kfree(&xe->drm, snapshot->guc.buff);
> - snapshot->guc.buff = NULL;
> - snapshot->guc.size = 0;
> -}
> -
> -static int pf_alloc_guc_state(struct xe_gt *gt,
> - struct xe_gt_sriov_state_snapshot *snapshot,
> - size_t size)
> -{
> - struct xe_device *xe = gt_to_xe(gt);
> - void *p;
> -
> - pf_free_guc_state(gt, snapshot);
> -
> - if (!size)
> - return -ENODATA;
> -
> - if (size % sizeof(u32))
> - return -EINVAL;
> -
> - if (size > SZ_2M)
> - return -EFBIG;
> -
> - p = drmm_kzalloc(&xe->drm, size, GFP_KERNEL);
> - if (!p)
> - return -ENOMEM;
> -
> - snapshot->guc.buff = p;
> - snapshot->guc.size = size;
> - return 0;
> -}
> -
> -static void pf_dump_guc_state(struct xe_gt *gt, struct xe_gt_sriov_state_snapshot *snapshot)
> -{
> - if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) {
> - unsigned int vfid __maybe_unused = pf_snapshot_index(gt, snapshot);
> -
> - xe_gt_sriov_dbg_verbose(gt, "VF%u GuC state is %zu dwords:\n",
> - vfid, snapshot->guc.size / sizeof(u32));
> - print_hex_dump_bytes("state: ", DUMP_PREFIX_OFFSET,
> - snapshot->guc.buff, min(SZ_64, snapshot->guc.size));
> - }
> -}
> -
> -static int pf_save_vf_guc_state(struct xe_gt *gt, unsigned int vfid)
> -{
> - struct xe_gt_sriov_state_snapshot *snapshot = pf_pick_vf_snapshot(gt, vfid);
> + struct xe_sriov_migration_data *data;
> size_t size;
> int ret;
>
> - ret = pf_send_guc_query_vf_state_size(gt, vfid);
> + ret = pf_send_guc_query_vf_mig_data_size(gt, vfid);
> if (ret < 0)
> goto fail;
> +
> size = ret * sizeof(u32);
> - xe_gt_sriov_dbg_verbose(gt, "VF%u state size is %d dwords (%zu bytes)\n", vfid, ret, size);
>
> - ret = pf_alloc_guc_state(gt, snapshot, size);
> - if (ret < 0)
> + data = xe_sriov_migration_data_alloc(gt_to_xe(gt));
> + if (!data) {
> + ret = -ENOMEM;
> goto fail;
> + }
> +
> + ret = xe_sriov_migration_data_init(data, gt->tile->id, gt->info.id,
> + XE_SRIOV_MIGRATION_DATA_TYPE_GUC, 0, size);
> + if (ret)
> + goto fail_free;
>
> - ret = pf_send_guc_save_vf_state(gt, vfid, snapshot->guc.buff, size);
> + ret = pf_send_guc_save_vf_mig_data(gt, vfid, data->vaddr, size);
> if (ret < 0)
> - goto fail;
> + goto fail_free;
> size = ret * sizeof(u32);
> xe_gt_assert(gt, size);
> - xe_gt_assert(gt, size <= snapshot->guc.size);
> - snapshot->guc.size = size;
> + xe_gt_assert(gt, size <= data->size);
> + data->size = size;
> + data->remaining = size;
> +
> + xe_gt_sriov_dbg_verbose(gt, "VF%u GuC data save (%zu bytes)\n", vfid, size);
> + pf_dump_mig_data(gt, vfid, data);
as already commented elsewhere, these two lines are always together,
we can combine them into improved pf_dump_mig_data(gt, vfid, data, what)
> +
> + ret = xe_gt_sriov_pf_migration_save_produce(gt, vfid, data);
> + if (ret)
> + goto fail_free;
>
> - pf_dump_guc_state(gt, snapshot);
> return 0;
>
> +fail_free:
> + xe_sriov_migration_data_free(data);
> fail:
> - xe_gt_sriov_dbg(gt, "Unable to save VF%u state (%pe)\n", vfid, ERR_PTR(ret));
> - pf_free_guc_state(gt, snapshot);
> + xe_gt_sriov_err(gt, "Failed to save VF%u GuC data (%pe)\n",
> + vfid, ERR_PTR(ret));
> return ret;
> }
>
> /**
> - * xe_gt_sriov_pf_migration_save_guc_state() - Take a GuC VF state snapshot.
> + * xe_gt_sriov_pf_migration_guc_size() - Get the size of VF GuC migration data.
> * @gt: the &xe_gt
> * @vfid: the VF identifier
> *
> * This function is for PF only.
> *
> - * Return: 0 on success or a negative error code on failure.
> + * Return: size in bytes or a negative error code on failure.
> */
> -int xe_gt_sriov_pf_migration_save_guc_state(struct xe_gt *gt, unsigned int vfid)
> +ssize_t xe_gt_sriov_pf_migration_guc_size(struct xe_gt *gt, unsigned int vfid)
> {
> - int err;
> + ssize_t size;
>
> xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
> xe_gt_assert(gt, vfid != PFID);
> @@ -236,37 +194,15 @@ int xe_gt_sriov_pf_migration_save_guc_state(struct xe_gt *gt, unsigned int vfid)
> if (!pf_migration_supported(gt))
> return -ENOPKG;
>
> - mutex_lock(pf_migration_mutex(gt));
> - err = pf_save_vf_guc_state(gt, vfid);
> - mutex_unlock(pf_migration_mutex(gt));
> -
> - return err;
> -}
> -
> -static int pf_restore_vf_guc_state(struct xe_gt *gt, unsigned int vfid)
> -{
> - struct xe_gt_sriov_state_snapshot *snapshot = pf_pick_vf_snapshot(gt, vfid);
> - int ret;
> -
> - if (!snapshot->guc.size)
> - return -ENODATA;
> -
> - xe_gt_sriov_dbg_verbose(gt, "restoring %zu dwords of VF%u GuC state\n",
> - snapshot->guc.size / sizeof(u32), vfid);
> - ret = pf_send_guc_restore_vf_state(gt, vfid, snapshot->guc.buff, snapshot->guc.size);
> - if (ret < 0)
> - goto fail;
> -
> - xe_gt_sriov_dbg_verbose(gt, "restored %d dwords of VF%u GuC state\n", ret, vfid);
> - return 0;
> + size = pf_send_guc_query_vf_mig_data_size(gt, vfid);
> + if (size >= 0)
> + size *= sizeof(u32);
>
> -fail:
> - xe_gt_sriov_dbg(gt, "Failed to restore VF%u GuC state (%pe)\n", vfid, ERR_PTR(ret));
> - return ret;
> + return size;
> }
>
> /**
> - * xe_gt_sriov_pf_migration_restore_guc_state() - Restore a GuC VF state.
> + * xe_gt_sriov_pf_migration_guc_save() - Save VF GuC migration data.
> * @gt: the &xe_gt
> * @vfid: the VF identifier
> *
> @@ -274,10 +210,8 @@ static int pf_restore_vf_guc_state(struct xe_gt *gt, unsigned int vfid)
> *
> * Return: 0 on success or a negative error code on failure.
> */
> -int xe_gt_sriov_pf_migration_restore_guc_state(struct xe_gt *gt, unsigned int vfid)
> +int xe_gt_sriov_pf_migration_guc_save(struct xe_gt *gt, unsigned int vfid)
> {
> - int ret;
> -
> xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
> xe_gt_assert(gt, vfid != PFID);
> xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
> @@ -285,75 +219,43 @@ int xe_gt_sriov_pf_migration_restore_guc_state(struct xe_gt *gt, unsigned int vf
> if (!pf_migration_supported(gt))
> return -ENOPKG;
>
> - mutex_lock(pf_migration_mutex(gt));
> - ret = pf_restore_vf_guc_state(gt, vfid);
> - mutex_unlock(pf_migration_mutex(gt));
> -
> - return ret;
> + return pf_save_vf_guc_mig_data(gt, vfid);
> }
>
> -#ifdef CONFIG_DEBUG_FS
> -/**
> - * xe_gt_sriov_pf_migration_read_guc_state() - Read a GuC VF state.
> - * @gt: the &xe_gt
> - * @vfid: the VF identifier
> - * @buf: the user space buffer to read to
> - * @count: the maximum number of bytes to read
> - * @pos: the current position in the buffer
> - *
> - * This function is for PF only.
> - *
> - * This function reads up to @count bytes from the saved VF GuC state buffer
> - * at offset @pos into the user space address starting at @buf.
> - *
> - * Return: the number of bytes read or a negative error code on failure.
> - */
> -ssize_t xe_gt_sriov_pf_migration_read_guc_state(struct xe_gt *gt, unsigned int vfid,
> - char __user *buf, size_t count, loff_t *pos)
> +static int pf_restore_vf_guc_state(struct xe_gt *gt, unsigned int vfid,
> + struct xe_sriov_migration_data *data)
> {
> - struct xe_gt_sriov_state_snapshot *snapshot;
> - ssize_t ret;
> + int ret;
>
> - xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
> - xe_gt_assert(gt, vfid != PFID);
> - xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
> + xe_gt_assert(gt, data->size);
>
> - if (!pf_migration_supported(gt))
> - return -ENOPKG;
> + xe_gt_sriov_dbg_verbose(gt, "VF%u GuC data restore (%llu bytes)\n", vfid, data->size);
> + pf_dump_mig_data(gt, vfid, data);
>
> - mutex_lock(pf_migration_mutex(gt));
> - snapshot = pf_pick_vf_snapshot(gt, vfid);
> - if (snapshot->guc.size)
> - ret = simple_read_from_buffer(buf, count, pos, snapshot->guc.buff,
> - snapshot->guc.size);
> - else
> - ret = -ENODATA;
> - mutex_unlock(pf_migration_mutex(gt));
> + ret = pf_send_guc_restore_vf_mig_data(gt, vfid, data->vaddr, data->size);
> + if (ret < 0)
> + goto fail;
> +
> + return 0;
>
> +fail:
> + xe_gt_sriov_err(gt, "Failed to restore VF%u GuC data (%pe)\n",
> + vfid, ERR_PTR(ret));
> return ret;
> }
>
> /**
> - * xe_gt_sriov_pf_migration_write_guc_state() - Write a GuC VF state.
> + * xe_gt_sriov_pf_migration_guc_restore() - Restore VF GuC migration data.
> * @gt: the &xe_gt
> * @vfid: the VF identifier
> - * @buf: the user space buffer with GuC VF state
> - * @size: the size of GuC VF state (in bytes)
> *
> * This function is for PF only.
> *
> - * This function reads @size bytes of the VF GuC state stored at user space
> - * address @buf and writes it into a internal VF state buffer.
> - *
> - * Return: the number of bytes used or a negative error code on failure.
> + * Return: 0 on success or a negative error code on failure.
> */
> -ssize_t xe_gt_sriov_pf_migration_write_guc_state(struct xe_gt *gt, unsigned int vfid,
> - const char __user *buf, size_t size)
> +int xe_gt_sriov_pf_migration_guc_restore(struct xe_gt *gt, unsigned int vfid,
> + struct xe_sriov_migration_data *data)
> {
> - struct xe_gt_sriov_state_snapshot *snapshot;
> - loff_t pos = 0;
> - ssize_t ret;
> -
> xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
> xe_gt_assert(gt, vfid != PFID);
> xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
> @@ -361,21 +263,8 @@ ssize_t xe_gt_sriov_pf_migration_write_guc_state(struct xe_gt *gt, unsigned int
> if (!pf_migration_supported(gt))
> return -ENOPKG;
>
> - mutex_lock(pf_migration_mutex(gt));
> - snapshot = pf_pick_vf_snapshot(gt, vfid);
> - ret = pf_alloc_guc_state(gt, snapshot, size);
> - if (!ret) {
> - ret = simple_write_to_buffer(snapshot->guc.buff, size, &pos, buf, size);
> - if (ret < 0)
> - pf_free_guc_state(gt, snapshot);
> - else
> - pf_dump_guc_state(gt, snapshot);
> - }
> - mutex_unlock(pf_migration_mutex(gt));
> -
> - return ret;
> + return pf_restore_vf_guc_state(gt, vfid, data);
> }
> -#endif /* CONFIG_DEBUG_FS */
>
> /**
> * xe_gt_sriov_pf_migration_size() - Total size of migration data from all components within a GT.
> @@ -599,10 +488,6 @@ int xe_gt_sriov_pf_migration_init(struct xe_gt *gt)
> if (!pf_migration_supported(gt))
> return 0;
>
> - err = drmm_mutex_init(&xe->drm, >->sriov.pf.migration.snapshot_lock);
> - if (err)
> - return err;
> -
> totalvfs = xe_sriov_pf_get_totalvfs(xe);
> for (n = 1; n <= totalvfs; n++) {
> struct xe_gt_sriov_migration_data *migration = pf_pick_gt_migration(gt, n);
> diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h
> index 4f2f2783339c3..b3c18e369df79 100644
> --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h
> +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h
> @@ -15,8 +15,10 @@ struct xe_sriov_migration_data;
> #define XE_GT_SRIOV_PF_MIGRATION_GUC_DATA_MAX_SIZE SZ_8M
>
> int xe_gt_sriov_pf_migration_init(struct xe_gt *gt);
> -int xe_gt_sriov_pf_migration_save_guc_state(struct xe_gt *gt, unsigned int vfid);
> -int xe_gt_sriov_pf_migration_restore_guc_state(struct xe_gt *gt, unsigned int vfid);
> +ssize_t xe_gt_sriov_pf_migration_guc_size(struct xe_gt *gt, unsigned int vfid);
> +int xe_gt_sriov_pf_migration_guc_save(struct xe_gt *gt, unsigned int vfid);
> +int xe_gt_sriov_pf_migration_guc_restore(struct xe_gt *gt, unsigned int vfid,
> + struct xe_sriov_migration_data *data);
>
> ssize_t xe_gt_sriov_pf_migration_size(struct xe_gt *gt, unsigned int vfid);
>
> @@ -34,11 +36,4 @@ int xe_gt_sriov_pf_migration_restore_produce(struct xe_gt *gt, unsigned int vfid
> struct xe_sriov_migration_data *
> xe_gt_sriov_pf_migration_save_consume(struct xe_gt *gt, unsigned int vfid);
>
> -#ifdef CONFIG_DEBUG_FS
> -ssize_t xe_gt_sriov_pf_migration_read_guc_state(struct xe_gt *gt, unsigned int vfid,
> - char __user *buf, size_t count, loff_t *pos);
> -ssize_t xe_gt_sriov_pf_migration_write_guc_state(struct xe_gt *gt, unsigned int vfid,
> - const char __user *buf, size_t count);
> -#endif
> -
> #endif
> diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration_types.h
> index 84be6fac16c8b..75d8b94cbbefb 100644
> --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration_types.h
> +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration_types.h
> @@ -6,24 +6,7 @@
> #ifndef _XE_GT_SRIOV_PF_MIGRATION_TYPES_H_
> #define _XE_GT_SRIOV_PF_MIGRATION_TYPES_H_
>
> -#include <linux/mutex.h>
> #include <linux/ptr_ring.h>
> -#include <linux/types.h>
> -
> -/**
> - * struct xe_gt_sriov_state_snapshot - GT-level per-VF state snapshot data.
> - *
> - * Used by the PF driver to maintain per-VF migration data.
> - */
> -struct xe_gt_sriov_state_snapshot {
> - /** @guc: GuC VF state snapshot */
> - struct {
> - /** @guc.buff: buffer with the VF state */
> - u32 *buff;
> - /** @guc.size: size of the buffer (must be dwords aligned) */
> - u32 size;
> - } guc;
> -};
>
> /**
> * struct xe_gt_sriov_migration_data - GT-level per-VF migration data.
> @@ -35,14 +18,4 @@ struct xe_gt_sriov_migration_data {
> struct ptr_ring ring;
> };
>
> -/**
> - * struct xe_gt_sriov_pf_migration - GT-level data.
> - *
> - * Used by the PF driver to maintain non-VF specific per-GT data.
> - */
> -struct xe_gt_sriov_pf_migration {
> - /** @snapshot_lock: protects all VFs snapshots */
> - struct mutex snapshot_lock;
> -};
> -
> #endif
> diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h
> index 812e74d3f8f80..667b8310478d4 100644
> --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h
> +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h
> @@ -31,9 +31,6 @@ struct xe_gt_sriov_metadata {
> /** @version: negotiated VF/PF ABI version */
> struct xe_gt_sriov_pf_service_version version;
>
> - /** @snapshot: snapshot of the VF state data */
> - struct xe_gt_sriov_state_snapshot snapshot;
> -
> /** @migration: per-VF migration data. */
> struct xe_gt_sriov_migration_data migration;
> };
> @@ -61,7 +58,6 @@ struct xe_gt_sriov_pf {
> struct xe_gt_sriov_pf_service service;
> struct xe_gt_sriov_pf_control control;
> struct xe_gt_sriov_pf_policy policy;
> - struct xe_gt_sriov_pf_migration migration;
> struct xe_gt_sriov_spare_config spare;
> struct xe_gt_sriov_metadata *vfs;
> };
otherwise LGTM, so with dump helper improved
Reviewed-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
next prev parent reply other threads:[~2025-11-03 18:30 UTC|newest]
Thread overview: 60+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-10-30 20:31 [PATCH v3 00/28] vfio/xe: Add driver variant for Xe VF migration Michał Winiarski
2025-10-30 20:31 ` [PATCH v3 01/28] drm/xe/pf: Remove GuC version check for migration support Michał Winiarski
2025-10-30 20:31 ` [PATCH v3 02/28] drm/xe: Move migration support to device-level struct Michał Winiarski
2025-11-03 18:55 ` Michal Wajdeczko
2025-10-30 20:31 ` [PATCH v3 03/28] drm/xe/pf: Convert control state to bitmap Michał Winiarski
2025-10-30 22:57 ` Michal Wajdeczko
2025-10-31 7:50 ` Michał Winiarski
2025-10-30 20:31 ` [PATCH v3 04/28] drm/xe/pf: Add save/restore control state stubs and connect to debugfs Michał Winiarski
2025-10-30 20:31 ` [PATCH v3 05/28] drm/xe/pf: Add data structures and handlers for migration rings Michał Winiarski
2025-10-31 16:17 ` Michal Wajdeczko
2025-11-04 10:25 ` Michał Winiarski
2025-10-30 20:31 ` [PATCH v3 06/28] drm/xe/pf: Add helpers for migration data allocation / free Michał Winiarski
2025-10-30 20:31 ` [PATCH v3 07/28] drm/xe/pf: Add support for encap/decap of bitstream to/from packet Michał Winiarski
2025-10-31 16:31 ` Michal Wajdeczko
2025-11-04 11:16 ` Michał Winiarski
2025-10-30 20:31 ` [PATCH v3 08/28] drm/xe/pf: Add minimalistic migration descriptor Michał Winiarski
2025-10-31 16:41 ` Michal Wajdeczko
2025-10-30 20:31 ` [PATCH v3 09/28] drm/xe/pf: Expose VF migration data size over debugfs Michał Winiarski
2025-10-30 20:31 ` [PATCH v3 10/28] drm/xe: Add sa/guc_buf_cache sync interface Michał Winiarski
2025-10-30 20:31 ` [PATCH v3 11/28] drm/xe: Allow the caller to pass guc_buf_cache size Michał Winiarski
2025-10-30 20:31 ` [PATCH v3 12/28] drm/xe/pf: Increase PF GuC Buffer Cache size and use it for VF migration Michał Winiarski
2025-10-31 16:48 ` Michal Wajdeczko
2025-10-30 20:31 ` [PATCH v3 13/28] drm/xe/pf: Remove GuC migration data save/restore from GT debugfs Michał Winiarski
2025-10-30 20:31 ` [PATCH v3 14/28] drm/xe/pf: Don't save GuC VF migration data on pause Michał Winiarski
2025-10-30 20:31 ` [PATCH v3 15/28] drm/xe/pf: Switch VF migration GuC save/restore to struct migration data Michał Winiarski
2025-11-03 18:30 ` Michal Wajdeczko [this message]
2025-10-30 20:31 ` [PATCH v3 16/28] drm/xe/pf: Handle GuC migration data as part of PF control Michał Winiarski
2025-10-31 18:15 ` Michal Wajdeczko
2025-11-04 11:55 ` Michał Winiarski
2025-10-30 20:31 ` [PATCH v3 17/28] drm/xe/pf: Add helpers for VF GGTT migration data handling Michał Winiarski
2025-10-31 16:59 ` Michal Wajdeczko
2025-10-30 20:31 ` [PATCH v3 18/28] drm/xe/pf: Handle GGTT migration data as part of PF control Michał Winiarski
2025-10-31 18:26 ` Michal Wajdeczko
2025-11-04 12:12 ` Michał Winiarski
2025-10-30 20:31 ` [PATCH v3 19/28] drm/xe/pf: Handle MMIO " Michał Winiarski
2025-10-31 18:39 ` Michal Wajdeczko
2025-11-04 12:29 ` Michał Winiarski
2025-10-30 20:31 ` [PATCH v3 20/28] drm/xe/pf: Add helper to retrieve VF's LMEM object Michał Winiarski
2025-10-30 20:31 ` [PATCH v3 21/28] drm/xe/migrate: Add function to copy of VRAM data in chunks Michał Winiarski
2025-11-03 22:29 ` Matthew Brost
2025-10-30 20:31 ` [PATCH v3 22/28] drm/xe/pf: Handle VRAM migration data as part of PF control Michał Winiarski
2025-11-03 22:37 ` Matthew Brost
2025-11-04 12:39 ` Michał Winiarski
2025-10-30 20:31 ` [PATCH v3 23/28] drm/xe/pf: Add wait helper for VF FLR Michał Winiarski
2025-10-30 20:31 ` [PATCH v3 24/28] drm/xe/pf: Enable SR-IOV VF migration Michał Winiarski
2025-10-31 17:06 ` Michal Wajdeczko
2025-10-30 20:31 ` [PATCH v3 25/28] drm/xe/pci: Introduce a helper to allow VF access to PF xe_device Michał Winiarski
2025-10-31 17:39 ` Michal Wajdeczko
2025-10-30 20:31 ` [PATCH v3 26/28] drm/xe/pf: Export helpers for VFIO Michał Winiarski
2025-10-30 20:31 ` [PATCH v3 27/28] drm/intel/pciids: Add match with VFIO override Michał Winiarski
2025-11-03 21:30 ` Lucas De Marchi
2025-11-04 12:59 ` Michał Winiarski
2025-11-04 17:41 ` Lucas De Marchi
2025-11-04 19:27 ` Jason Gunthorpe
2025-11-05 15:20 ` Michał Winiarski
2025-11-05 17:15 ` Jason Gunthorpe
2025-11-05 17:42 ` Lucas De Marchi
2025-10-30 20:31 ` [PATCH v3 28/28] vfio/xe: Add device specific vfio_pci driver variant for Intel graphics Michał Winiarski
2025-11-07 9:38 ` Muqthyar Ahmed, Syed Abdul
2025-11-07 9:54 ` Winiarski, Michal
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=efde06ef-1ce4-4bc7-a0ca-01a27b697ae0@intel.com \
--to=michal.wajdeczko@intel.com \
--cc=airlied@gmail.com \
--cc=alex@shazbot.org \
--cc=dri-devel@lists.freedesktop.org \
--cc=hch@infradead.org \
--cc=intel-xe@lists.freedesktop.org \
--cc=jani.nikula@linux.intel.com \
--cc=jgg@ziepe.ca \
--cc=joonas.lahtinen@linux.intel.com \
--cc=kevin.tian@intel.com \
--cc=kvm@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=lucas.demarchi@intel.com \
--cc=lukasz.laguna@intel.com \
--cc=matthew.brost@intel.com \
--cc=michal.winiarski@intel.com \
--cc=rodrigo.vivi@intel.com \
--cc=simona@ffwll.ch \
--cc=skolothumtho@nvidia.com \
--cc=thomas.hellstrom@linux.intel.com \
--cc=tursulin@ursulin.net \
--cc=yishaih@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox