From: Michal Wajdeczko <michal.wajdeczko@intel.com>
To: "Michał Winiarski" <michal.winiarski@intel.com>,
"Alex Williamson" <alex@shazbot.org>,
"Lucas De Marchi" <lucas.demarchi@intel.com>,
"Thomas Hellström" <thomas.hellstrom@linux.intel.com>,
"Rodrigo Vivi" <rodrigo.vivi@intel.com>,
"Jason Gunthorpe" <jgg@ziepe.ca>,
"Yishai Hadas" <yishaih@nvidia.com>,
"Kevin Tian" <kevin.tian@intel.com>,
"Shameer Kolothum" <skolothumtho@nvidia.com>,
intel-xe@lists.freedesktop.org, linux-kernel@vger.kernel.org,
kvm@vger.kernel.org, "Matthew Brost" <matthew.brost@intel.com>
Cc: <dri-devel@lists.freedesktop.org>,
Jani Nikula <jani.nikula@linux.intel.com>,
Joonas Lahtinen <joonas.lahtinen@linux.intel.com>,
Tvrtko Ursulin <tursulin@ursulin.net>,
David Airlie <airlied@gmail.com>, Simona Vetter <simona@ffwll.ch>,
"Lukasz Laguna" <lukasz.laguna@intel.com>,
Christoph Hellwig <hch@infradead.org>
Subject: Re: [PATCH v3 19/28] drm/xe/pf: Handle MMIO migration data as part of PF control
Date: Fri, 31 Oct 2025 19:39:58 +0100 [thread overview]
Message-ID: <63446059-7af2-45b8-906b-4ba0688ed0d4@intel.com> (raw)
In-Reply-To: <20251030203135.337696-20-michal.winiarski@intel.com>
On 10/30/2025 9:31 PM, Michał Winiarski wrote:
> Implement the helpers and use them for save and restore of MMIO
> migration data in stop_copy / resume device state.
>
> Signed-off-by: Michał Winiarski <michal.winiarski@intel.com>
> ---
> drivers/gpu/drm/xe/xe_gt_sriov_pf.h | 2 +
> drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c | 13 ++
> drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c | 158 ++++++++++++++++++
> drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h | 3 +
> 4 files changed, 176 insertions(+)
>
> diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf.h
> index e7fde3f9937af..c0dcbb1054e4d 100644
> --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf.h
> +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf.h
> @@ -6,6 +6,8 @@
> #ifndef _XE_GT_SRIOV_PF_H_
> #define _XE_GT_SRIOV_PF_H_
>
> +#include <linux/types.h>
?
> +
> struct xe_gt;
>
> #ifdef CONFIG_PCI_IOV
> diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
> index e7ea9b88fd246..7cd7cae950bc7 100644
> --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
> +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
> @@ -870,6 +870,16 @@ static int pf_handle_vf_save_data(struct xe_gt *gt, unsigned int vfid)
> return -EAGAIN;
> }
>
> + if (xe_gt_sriov_pf_migration_save_test(gt, vfid, XE_SRIOV_MIGRATION_DATA_TYPE_MMIO)) {
> + ret = xe_gt_sriov_pf_migration_mmio_save(gt, vfid);
> + if (ret)
> + return ret;
> +
> + xe_gt_sriov_pf_migration_save_clear(gt, vfid, XE_SRIOV_MIGRATION_DATA_TYPE_MMIO);
> +
> + return -EAGAIN;
> + }
> +
> return 0;
> }
>
> @@ -1079,6 +1089,9 @@ static int pf_handle_vf_restore_data(struct xe_gt *gt, unsigned int vfid)
> case XE_SRIOV_MIGRATION_DATA_TYPE_GGTT:
> ret = xe_gt_sriov_pf_migration_ggtt_restore(gt, vfid, data);
> break;
> + case XE_SRIOV_MIGRATION_DATA_TYPE_MMIO:
> + ret = xe_gt_sriov_pf_migration_mmio_restore(gt, vfid, data);
> + break;
> case XE_SRIOV_MIGRATION_DATA_TYPE_GUC:
> ret = xe_gt_sriov_pf_migration_guc_restore(gt, vfid, data);
> break;
> diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c
> index 6f2ee5820bdd4..5e90aeafeeb41 100644
> --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c
> +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c
> @@ -5,10 +5,13 @@
>
> #include <drm/drm_managed.h>
>
> +#include "regs/xe_guc_regs.h"
> +
> #include "abi/guc_actions_sriov_abi.h"
> #include "xe_bo.h"
> #include "xe_ggtt.h"
> #include "xe_gt.h"
> +#include "xe_gt_sriov_pf.h"
> #include "xe_gt_sriov_pf_config.h"
> #include "xe_gt_sriov_pf_control.h"
> #include "xe_gt_sriov_pf_helpers.h"
> @@ -16,6 +19,7 @@
> #include "xe_gt_sriov_printk.h"
> #include "xe_guc_buf.h"
> #include "xe_guc_ct.h"
> +#include "xe_mmio.h"
> #include "xe_sriov.h"
> #include "xe_sriov_migration_data.h"
> #include "xe_sriov_pf_migration.h"
> @@ -357,6 +361,150 @@ int xe_gt_sriov_pf_migration_guc_restore(struct xe_gt *gt, unsigned int vfid,
> return pf_restore_vf_guc_state(gt, vfid, data);
> }
>
> +static ssize_t pf_migration_mmio_size(struct xe_gt *gt, unsigned int vfid)
> +{
> + if (xe_gt_is_media_type(gt))
> + return MED_VF_SW_FLAG_COUNT * sizeof(u32);
> + else
> + return VF_SW_FLAG_COUNT * sizeof(u32);
> +}
> +
> +static int pf_migration_mmio_save(struct xe_gt *gt, unsigned int vfid, void *buf, size_t size)
> +{
> + struct xe_mmio mmio;
> + u32 *regs = buf;
> + int n;
> +
> + xe_mmio_init_vf_view(&mmio, >->mmio, vfid);
> +
> + if (size != pf_migration_mmio_size(gt, vfid))
> + return -EINVAL;
you may want to check that first (before init vf view)
> +
> + if (xe_gt_is_media_type(gt))
> + for (n = 0; n < MED_VF_SW_FLAG_COUNT; n++)
> + regs[n] = xe_mmio_read32(>->mmio, MED_VF_SW_FLAG(n));
> + else
> + for (n = 0; n < VF_SW_FLAG_COUNT; n++)
> + regs[n] = xe_mmio_read32(>->mmio, VF_SW_FLAG(n));
> +
> + return 0;
> +}
> +
> +static int pf_migration_mmio_restore(struct xe_gt *gt, unsigned int vfid,
> + const void *buf, size_t size)
> +{
> + const u32 *regs = buf;
> + struct xe_mmio mmio;
> + int n;
> +
> + xe_mmio_init_vf_view(&mmio, >->mmio, vfid);
> +
> + if (size != pf_migration_mmio_size(gt, vfid))
> + return -EINVAL;
ditto
> +
> + if (xe_gt_is_media_type(gt))
> + for (n = 0; n < MED_VF_SW_FLAG_COUNT; n++)
> + xe_mmio_write32(>->mmio, MED_VF_SW_FLAG(n), regs[n]);
> + else
> + for (n = 0; n < VF_SW_FLAG_COUNT; n++)
> + xe_mmio_write32(>->mmio, VF_SW_FLAG(n), regs[n]);
> +
> + return 0;
> +}
> +
> +static int pf_save_vf_mmio_mig_data(struct xe_gt *gt, unsigned int vfid)
> +{
> + struct xe_sriov_migration_data *data;
> + size_t size;
> + int ret;
> +
> + size = pf_migration_mmio_size(gt, vfid);
> + xe_gt_assert(gt, size);
> +
> + data = xe_sriov_migration_data_alloc(gt_to_xe(gt));
> + if (!data)
> + return -ENOMEM;
> +
> + ret = xe_sriov_migration_data_init(data, gt->tile->id, gt->info.id,
> + XE_SRIOV_MIGRATION_DATA_TYPE_MMIO, 0, size);
> + if (ret)
> + goto fail;
> +
> + ret = pf_migration_mmio_save(gt, vfid, data->vaddr, size);
> + if (ret)
> + goto fail;
> +
> + xe_gt_sriov_dbg_verbose(gt, "VF%u MMIO data save (%zu bytes)\n", vfid, size);
maybe make it part of the pf_dump_mig_data() ?
pf_dump_mig_data(gt, vfid, data, "MMIO saved");
> + pf_dump_mig_data(gt, vfid, data);
> +
> + ret = xe_gt_sriov_pf_migration_save_produce(gt, vfid, data);
> + if (ret)
> + goto fail;
> +
> + return 0;
> +
> +fail:
> + xe_sriov_migration_data_free(data);
> + xe_gt_sriov_err(gt, "Failed to save VF%u MMIO data (%pe)\n", vfid, ERR_PTR(ret));
> + return ret;
> +}
> +
> +static int pf_restore_vf_mmio_mig_data(struct xe_gt *gt, unsigned int vfid,
> + struct xe_sriov_migration_data *data)
> +{
> + int ret;
> +
> + xe_gt_sriov_dbg_verbose(gt, "VF%u MMIO data restore (%llu bytes)\n", vfid, data->size);
and here:
pf_dump_mig_data(gt, vfid, data, "restoring MMIO");
> + pf_dump_mig_data(gt, vfid, data);
> +
> + ret = pf_migration_mmio_restore(gt, vfid, data->vaddr, data->size);
> + if (ret) {
> + xe_gt_sriov_err(gt, "Failed to restore VF%u MMIO data (%pe)\n",
> + vfid, ERR_PTR(ret));
> +
> + return ret;
> + }
> +
> + return 0;
> +}
> +
> +/**
> + * xe_gt_sriov_pf_migration_mmio_save() - Save VF MMIO migration data.
> + * @gt: the &xe_gt
> + * @vfid: the VF identifier (can't be 0)
> + *
> + * This function is for PF only.
> + *
> + * Return: 0 on success or a negative error code on failure.
> + */
> +int xe_gt_sriov_pf_migration_mmio_save(struct xe_gt *gt, unsigned int vfid)
> +{
> + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
> + xe_gt_assert(gt, vfid != PFID);
> + xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
> +
> + return pf_save_vf_mmio_mig_data(gt, vfid);
> +}
> +
> +/**
> + * xe_gt_sriov_pf_migration_mmio_restore() - Restore VF MMIO migration data.
> + * @gt: the &xe_gt
> + * @vfid: the VF identifier (can't be 0)
> + *
> + * This function is for PF only.
> + *
> + * Return: 0 on success or a negative error code on failure.
> + */
> +int xe_gt_sriov_pf_migration_mmio_restore(struct xe_gt *gt, unsigned int vfid,
> + struct xe_sriov_migration_data *data)
> +{
> + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
> + xe_gt_assert(gt, vfid != PFID);
> + xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
> +
> + return pf_restore_vf_mmio_mig_data(gt, vfid, data);
> +}
> +
> /**
> * xe_gt_sriov_pf_migration_size() - Total size of migration data from all components within a GT.
> * @gt: the &xe_gt
> @@ -389,6 +537,13 @@ ssize_t xe_gt_sriov_pf_migration_size(struct xe_gt *gt, unsigned int vfid)
> size += sizeof(struct xe_sriov_pf_migration_hdr);
> total += size;
>
> + size = pf_migration_mmio_size(gt, vfid);
> + if (size < 0)
> + return size;
> + if (size > 0)
> + size += sizeof(struct xe_sriov_pf_migration_hdr);
> + total += size;
> +
> return total;
> }
>
> @@ -453,6 +608,9 @@ void xe_gt_sriov_pf_migration_save_init(struct xe_gt *gt, unsigned int vfid)
>
> if (pf_migration_ggtt_size(gt, vfid) > 0)
> set_bit(XE_SRIOV_MIGRATION_DATA_TYPE_GGTT, &migration->save.data_remaining);
> +
> + xe_gt_assert(gt, pf_migration_mmio_size(gt, vfid) > 0);
> + set_bit(XE_SRIOV_MIGRATION_DATA_TYPE_MMIO, &migration->save.data_remaining);
> }
>
> /**
> diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h
> index bc201d8f3147a..b0eec94fea3a6 100644
> --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h
> +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h
> @@ -22,6 +22,9 @@ int xe_gt_sriov_pf_migration_guc_restore(struct xe_gt *gt, unsigned int vfid,
> int xe_gt_sriov_pf_migration_ggtt_save(struct xe_gt *gt, unsigned int vfid);
> int xe_gt_sriov_pf_migration_ggtt_restore(struct xe_gt *gt, unsigned int vfid,
> struct xe_sriov_migration_data *data);
> +int xe_gt_sriov_pf_migration_mmio_save(struct xe_gt *gt, unsigned int vfid);
> +int xe_gt_sriov_pf_migration_mmio_restore(struct xe_gt *gt, unsigned int vfid,
> + struct xe_sriov_migration_data *data);
>
> ssize_t xe_gt_sriov_pf_migration_size(struct xe_gt *gt, unsigned int vfid);
>
but patch looks ok, so with #include fixed (and maybe with better dump helper),
Reviewed-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
next prev parent reply other threads:[~2025-10-31 18:40 UTC|newest]
Thread overview: 62+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-10-30 20:31 [PATCH v3 00/28] vfio/xe: Add driver variant for Xe VF migration Michał Winiarski
2025-10-30 20:31 ` [PATCH v3 01/28] drm/xe/pf: Remove GuC version check for migration support Michał Winiarski
2025-10-30 20:31 ` [PATCH v3 02/28] drm/xe: Move migration support to device-level struct Michał Winiarski
2025-11-03 18:55 ` Michal Wajdeczko
2025-10-30 20:31 ` [PATCH v3 03/28] drm/xe/pf: Convert control state to bitmap Michał Winiarski
2025-10-30 22:57 ` Michal Wajdeczko
2025-10-31 7:50 ` Michał Winiarski
2025-10-30 20:31 ` [PATCH v3 04/28] drm/xe/pf: Add save/restore control state stubs and connect to debugfs Michał Winiarski
2025-10-30 20:31 ` [PATCH v3 05/28] drm/xe/pf: Add data structures and handlers for migration rings Michał Winiarski
2025-10-31 16:17 ` Michal Wajdeczko
2025-11-04 10:25 ` Michał Winiarski
2025-10-30 20:31 ` [PATCH v3 06/28] drm/xe/pf: Add helpers for migration data allocation / free Michał Winiarski
2025-10-30 20:31 ` [PATCH v3 07/28] drm/xe/pf: Add support for encap/decap of bitstream to/from packet Michał Winiarski
2025-10-31 16:31 ` Michal Wajdeczko
2025-11-04 11:16 ` Michał Winiarski
2025-10-30 20:31 ` [PATCH v3 08/28] drm/xe/pf: Add minimalistic migration descriptor Michał Winiarski
2025-10-31 16:41 ` Michal Wajdeczko
2025-10-30 20:31 ` [PATCH v3 09/28] drm/xe/pf: Expose VF migration data size over debugfs Michał Winiarski
2025-10-30 20:31 ` [PATCH v3 10/28] drm/xe: Add sa/guc_buf_cache sync interface Michał Winiarski
2025-10-30 20:31 ` [PATCH v3 11/28] drm/xe: Allow the caller to pass guc_buf_cache size Michał Winiarski
2025-10-30 20:31 ` [PATCH v3 12/28] drm/xe/pf: Increase PF GuC Buffer Cache size and use it for VF migration Michał Winiarski
2025-10-31 16:48 ` Michal Wajdeczko
2025-10-30 20:31 ` [PATCH v3 13/28] drm/xe/pf: Remove GuC migration data save/restore from GT debugfs Michał Winiarski
2025-10-30 20:31 ` [PATCH v3 14/28] drm/xe/pf: Don't save GuC VF migration data on pause Michał Winiarski
2025-10-30 20:31 ` [PATCH v3 15/28] drm/xe/pf: Switch VF migration GuC save/restore to struct migration data Michał Winiarski
2025-11-03 18:30 ` Michal Wajdeczko
2025-10-30 20:31 ` [PATCH v3 16/28] drm/xe/pf: Handle GuC migration data as part of PF control Michał Winiarski
2025-10-31 18:15 ` Michal Wajdeczko
2025-11-04 11:55 ` Michał Winiarski
2025-10-30 20:31 ` [PATCH v3 17/28] drm/xe/pf: Add helpers for VF GGTT migration data handling Michał Winiarski
2025-10-31 16:59 ` Michal Wajdeczko
2025-10-30 20:31 ` [PATCH v3 18/28] drm/xe/pf: Handle GGTT migration data as part of PF control Michał Winiarski
2025-10-31 18:26 ` Michal Wajdeczko
2025-11-04 12:12 ` Michał Winiarski
2025-10-30 20:31 ` [PATCH v3 19/28] drm/xe/pf: Handle MMIO " Michał Winiarski
2025-10-31 18:39 ` Michal Wajdeczko [this message]
2025-11-04 12:29 ` Michał Winiarski
2025-10-30 20:31 ` [PATCH v3 20/28] drm/xe/pf: Add helper to retrieve VF's LMEM object Michał Winiarski
2025-10-30 20:31 ` [PATCH v3 21/28] drm/xe/migrate: Add function to copy of VRAM data in chunks Michał Winiarski
2025-11-03 22:29 ` Matthew Brost
2025-10-30 20:31 ` [PATCH v3 22/28] drm/xe/pf: Handle VRAM migration data as part of PF control Michał Winiarski
2025-11-03 22:37 ` Matthew Brost
2025-11-04 12:39 ` Michał Winiarski
2025-10-30 20:31 ` [PATCH v3 23/28] drm/xe/pf: Add wait helper for VF FLR Michał Winiarski
2025-10-30 20:31 ` [PATCH v3 24/28] drm/xe/pf: Enable SR-IOV VF migration Michał Winiarski
2025-10-31 17:06 ` Michal Wajdeczko
2025-10-30 20:31 ` [PATCH v3 25/28] drm/xe/pci: Introduce a helper to allow VF access to PF xe_device Michał Winiarski
2025-10-31 17:39 ` Michal Wajdeczko
2025-10-30 20:31 ` [PATCH v3 26/28] drm/xe/pf: Export helpers for VFIO Michał Winiarski
2025-10-30 20:31 ` [PATCH v3 27/28] drm/intel/pciids: Add match with VFIO override Michał Winiarski
2025-11-03 21:30 ` Lucas De Marchi
2025-11-04 12:59 ` Michał Winiarski
2025-11-04 17:41 ` Lucas De Marchi
[not found] ` <20251104192714.GK1204670@ziepe.ca>
2025-11-05 15:20 ` Michał Winiarski
2025-11-05 17:42 ` Lucas De Marchi
2025-10-30 20:31 ` [PATCH v3 28/28] vfio/xe: Add device specific vfio_pci driver variant for Intel graphics Michał Winiarski
2025-11-07 9:38 ` Muqthyar Ahmed, Syed Abdul
2025-11-07 9:54 ` Winiarski, Michal
2025-10-30 22:34 ` ✗ CI.checkpatch: warning for vfio/xe: Add driver variant for Xe VF migration (rev3) Patchwork
2025-10-30 22:35 ` ✓ CI.KUnit: success " Patchwork
2025-10-30 23:57 ` ✓ Xe.CI.BAT: " Patchwork
2025-10-31 7:50 ` ✗ Xe.CI.Full: failure " Patchwork
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=63446059-7af2-45b8-906b-4ba0688ed0d4@intel.com \
--to=michal.wajdeczko@intel.com \
--cc=airlied@gmail.com \
--cc=alex@shazbot.org \
--cc=dri-devel@lists.freedesktop.org \
--cc=hch@infradead.org \
--cc=intel-xe@lists.freedesktop.org \
--cc=jani.nikula@linux.intel.com \
--cc=jgg@ziepe.ca \
--cc=joonas.lahtinen@linux.intel.com \
--cc=kevin.tian@intel.com \
--cc=kvm@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=lucas.demarchi@intel.com \
--cc=lukasz.laguna@intel.com \
--cc=matthew.brost@intel.com \
--cc=michal.winiarski@intel.com \
--cc=rodrigo.vivi@intel.com \
--cc=simona@ffwll.ch \
--cc=skolothumtho@nvidia.com \
--cc=thomas.hellstrom@linux.intel.com \
--cc=tursulin@ursulin.net \
--cc=yishaih@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox