From: Michal Wajdeczko <michal.wajdeczko@intel.com>
To: "Michał Winiarski" <michal.winiarski@intel.com>,
"Alex Williamson" <alex.williamson@redhat.com>,
"Lucas De Marchi" <lucas.demarchi@intel.com>,
"Thomas Hellström" <thomas.hellstrom@linux.intel.com>,
"Rodrigo Vivi" <rodrigo.vivi@intel.com>,
"Jason Gunthorpe" <jgg@ziepe.ca>,
"Yishai Hadas" <yishaih@nvidia.com>,
"Kevin Tian" <kevin.tian@intel.com>,
"Shameer Kolothum" <shameerali.kolothum.thodi@huawei.com>,
intel-xe@lists.freedesktop.org, linux-kernel@vger.kernel.org,
kvm@vger.kernel.org
Cc: <dri-devel@lists.freedesktop.org>,
Matthew Brost <matthew.brost@intel.com>,
Jani Nikula <jani.nikula@linux.intel.com>,
"Joonas Lahtinen" <joonas.lahtinen@linux.intel.com>,
Tvrtko Ursulin <tursulin@ursulin.net>,
David Airlie <airlied@gmail.com>, Simona Vetter <simona@ffwll.ch>,
Lukasz Laguna <lukasz.laguna@intel.com>
Subject: Re: [PATCH 18/26] drm/xe/pf: Handle GGTT migration data as part of PF control
Date: Mon, 13 Oct 2025 14:36:56 +0200 [thread overview]
Message-ID: <07d33e0b-0078-4075-bc70-e09a8ec17a97@intel.com> (raw)
In-Reply-To: <20251011193847.1836454-19-michal.winiarski@intel.com>
On 10/11/2025 9:38 PM, Michał Winiarski wrote:
> Connect the helpers to allow save and restore of GGTT migration data in
> stop_copy / resume device state.
>
> Signed-off-by: Michał Winiarski <michal.winiarski@intel.com>
> ---
> drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c | 13 ++
> .../gpu/drm/xe/xe_gt_sriov_pf_control_types.h | 1 +
> drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c | 119 ++++++++++++++++++
> drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h | 4 +
> 4 files changed, 137 insertions(+)
>
> diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
> index f73a3bf40037c..a74f6feca4830 100644
> --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
> +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
> @@ -188,6 +188,7 @@ static const char *control_bit_to_string(enum xe_gt_sriov_control_bits bit)
> CASE2STR(MIGRATION_DATA_WIP);
> CASE2STR(SAVE_WIP);
> CASE2STR(SAVE_DATA_GUC);
> + CASE2STR(SAVE_DATA_GGTT);
> CASE2STR(SAVE_FAILED);
> CASE2STR(SAVED);
> CASE2STR(RESTORE_WIP);
> @@ -803,6 +804,7 @@ void xe_gt_sriov_pf_control_vf_data_eof(struct xe_gt *gt, unsigned int vfid)
>
> static void pf_exit_vf_save_wip(struct xe_gt *gt, unsigned int vfid)
> {
> + pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_DATA_GGTT);
> pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_DATA_GUC);
> pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_WIP);
> }
> @@ -843,6 +845,13 @@ static bool pf_handle_vf_save_wip(struct xe_gt *gt, unsigned int vfid)
> return true;
> }
>
> + if (pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_DATA_GGTT)) {
> + ret = xe_gt_sriov_pf_migration_ggtt_save(gt, vfid);
> + if (ret)
> + goto err;
> + return true;
> + }
> +
> xe_gt_sriov_pf_control_vf_data_eof(gt, vfid);
> pf_exit_vf_save_wip(gt, vfid);
> pf_enter_vf_saved(gt, vfid);
> @@ -862,6 +871,8 @@ static bool pf_enter_vf_save_wip(struct xe_gt *gt, unsigned int vfid)
> pf_enter_vf_wip(gt, vfid);
> if (xe_gt_sriov_pf_migration_guc_size(gt, vfid) > 0)
> pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_DATA_GUC);
> + if (xe_gt_sriov_pf_migration_ggtt_size(gt, vfid) > 0)
> + pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_DATA_GGTT);
> pf_queue_vf(gt, vfid);
> return true;
> }
> @@ -970,6 +981,8 @@ static int pf_handle_vf_restore_data(struct xe_gt *gt, unsigned int vfid,
> struct xe_sriov_pf_migration_data *data)
> {
> switch (data->type) {
> + case XE_SRIOV_MIG_DATA_GGTT:
> + return xe_gt_sriov_pf_migration_ggtt_restore(gt, vfid, data);
> case XE_SRIOV_MIG_DATA_GUC:
> return xe_gt_sriov_pf_migration_guc_restore(gt, vfid, data);
> default:
> diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h
> index b9787c425d9f6..c94ff0258306a 100644
> --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h
> +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h
> @@ -72,6 +72,7 @@ enum xe_gt_sriov_control_bits {
>
> XE_GT_SRIOV_STATE_SAVE_WIP,
> XE_GT_SRIOV_STATE_SAVE_DATA_GUC,
> + XE_GT_SRIOV_STATE_SAVE_DATA_GGTT,
> XE_GT_SRIOV_STATE_SAVE_FAILED,
> XE_GT_SRIOV_STATE_SAVED,
>
> diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c
> index 0c10284f0b09a..92ecf47e71bc7 100644
> --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c
> +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c
> @@ -7,6 +7,7 @@
>
> #include "abi/guc_actions_sriov_abi.h"
> #include "xe_bo.h"
> +#include "xe_gt_sriov_pf_config.h"
> #include "xe_gt_sriov_pf_control.h"
> #include "xe_gt_sriov_pf_helpers.h"
> #include "xe_gt_sriov_pf_migration.h"
> @@ -37,6 +38,117 @@ static void pf_dump_mig_data(struct xe_gt *gt, unsigned int vfid,
> }
> }
>
> +static int pf_save_vf_ggtt_mig_data(struct xe_gt *gt, unsigned int vfid)
> +{
> + struct xe_sriov_pf_migration_data *data;
> + size_t size;
> + int ret;
> +
> + size = xe_gt_sriov_pf_config_get_ggtt(gt, vfid);
> + if (size == 0)
> + return 0;
> + size = size / XE_PAGE_SIZE * sizeof(u64);
maybe it would be better to avoid reusing the var and have two:
u64 alloc_size = xe_gt_sriov_pf_config_get_ggtt(...);
u64 pte_size = xe_ggtt_pte_size(alloc_size);
> +
> + data = xe_sriov_pf_migration_data_alloc(gt_to_xe(gt));
> + if (!data)
> + return -ENOMEM;
> +
> + ret = xe_sriov_pf_migration_data_init(data, gt->tile->id, gt->info.id,
> + XE_SRIOV_MIG_DATA_GGTT, 0, size);
> + if (ret)
> + goto fail;
> +
> + ret = xe_gt_sriov_pf_config_ggtt_save(gt, vfid, data->vaddr, size);
> + if (ret)
> + goto fail;
> +
> + pf_dump_mig_data(gt, vfid, data);
> +
> + ret = xe_gt_sriov_pf_migration_ring_produce(gt, vfid, data);
> + if (ret)
> + goto fail;
> +
> + return 0;
> +
> +fail:
> + xe_sriov_pf_migration_data_free(data);
> + xe_gt_sriov_err(gt, "Unable to save VF%u GGTT data (%d)\n", vfid, ret);
use %pe for errors
> + return ret;
> +}
> +
> +static int pf_restore_vf_ggtt_mig_data(struct xe_gt *gt, unsigned int vfid,
> + struct xe_sriov_pf_migration_data *data)
> +{
> + size_t size;
> + int ret;
> +
> + size = xe_gt_sriov_pf_config_get_ggtt(gt, vfid) / XE_PAGE_SIZE * sizeof(u64);
> + if (size != data->hdr.size)
> + return -EINVAL;
do we need this ?
there seems to be similar check in xe_ggtt_node_load() called by restore() below
> +
> + pf_dump_mig_data(gt, vfid, data);
> +
> + ret = xe_gt_sriov_pf_config_ggtt_restore(gt, vfid, data->vaddr, size);
> + if (ret)
> + return ret;
> +
> + return 0;
> +}
> +
> +/**
> + * xe_gt_sriov_pf_migration_ggtt_size() - Get the size of VF GGTT migration data.
> + * @gt: the &struct xe_gt
> + * @vfid: the VF identifier
> + *
> + * This function is for PF only.
> + *
> + * Return: size in bytes or a negative error code on failure.
> + */
> +ssize_t xe_gt_sriov_pf_migration_ggtt_size(struct xe_gt *gt, unsigned int vfid)
> +{
> + if (gt != xe_root_mmio_gt(gt_to_xe(gt)))
> + return 0;
> +
> + return xe_gt_sriov_pf_config_get_ggtt(gt, vfid) / XE_PAGE_SIZE * sizeof(u64);
this conversion logic should be done by xe_ggtt layer helper
> +}
> +
> +/**
> + * xe_gt_sriov_pf_migration_ggtt_save() - Save VF GGTT migration data.
> + * @gt: the &struct xe_gt
> + * @vfid: the VF identifier
since there is assert, probably you should also say: "(can't be 0)"
> + *
> + * This function is for PF only.
> + *
> + * Return: 0 on success or a negative error code on failure.
> + */
> +int xe_gt_sriov_pf_migration_ggtt_save(struct xe_gt *gt, unsigned int vfid)
> +{
> + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
> + xe_gt_assert(gt, vfid != PFID);
> + xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
> +
> + return pf_save_vf_ggtt_mig_data(gt, vfid);
> +}
> +
> +/**
> + * xe_gt_sriov_pf_migration_ggtt_restore() - Restore VF GGTT migration data.
> + * @gt: the &struct xe_gt
> + * @vfid: the VF identifier
> + *
> + * This function is for PF only.
> + *
> + * Return: 0 on success or a negative error code on failure.
> + */
> +int xe_gt_sriov_pf_migration_ggtt_restore(struct xe_gt *gt, unsigned int vfid,
> + struct xe_sriov_pf_migration_data *data)
> +{
> + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
> + xe_gt_assert(gt, vfid != PFID);
> + xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
> +
> + return pf_restore_vf_ggtt_mig_data(gt, vfid, data);
> +}
> +
> /* Return: number of dwords saved/restored/required or a negative error code on failure */
> static int guc_action_vf_save_restore(struct xe_guc *guc, u32 vfid, u32 opcode,
> u64 addr, u32 ndwords)
> @@ -290,6 +402,13 @@ ssize_t xe_gt_sriov_pf_migration_size(struct xe_gt *gt, unsigned int vfid)
> size += sizeof(struct xe_sriov_pf_migration_hdr);
> total += size;
>
> + size = xe_gt_sriov_pf_migration_ggtt_size(gt, vfid);
> + if (size < 0)
> + return size;
> + else if (size > 0)
> + size += sizeof(struct xe_sriov_pf_migration_hdr);
> + total += size;
> +
> return total;
> }
>
> diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h
> index 5df64449232bc..5bb8cba2ea0cb 100644
> --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h
> +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h
> @@ -16,6 +16,10 @@ ssize_t xe_gt_sriov_pf_migration_guc_size(struct xe_gt *gt, unsigned int vfid);
> int xe_gt_sriov_pf_migration_guc_save(struct xe_gt *gt, unsigned int vfid);
> int xe_gt_sriov_pf_migration_guc_restore(struct xe_gt *gt, unsigned int vfid,
> struct xe_sriov_pf_migration_data *data);
> +ssize_t xe_gt_sriov_pf_migration_ggtt_size(struct xe_gt *gt, unsigned int vfid);
> +int xe_gt_sriov_pf_migration_ggtt_save(struct xe_gt *gt, unsigned int vfid);
> +int xe_gt_sriov_pf_migration_ggtt_restore(struct xe_gt *gt, unsigned int vfid,
> + struct xe_sriov_pf_migration_data *data);
>
> ssize_t xe_gt_sriov_pf_migration_size(struct xe_gt *gt, unsigned int vfid);
>
next prev parent reply other threads:[~2025-10-13 12:37 UTC|newest]
Thread overview: 82+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-10-11 19:38 [PATCH 00/26] vfio/xe: Add driver variant for Xe VF migration Michał Winiarski
2025-10-11 19:38 ` [PATCH 01/26] drm/xe/pf: Remove GuC version check for migration support Michał Winiarski
2025-10-12 18:31 ` Michal Wajdeczko
2025-10-20 14:46 ` Michał Winiarski
2025-10-11 19:38 ` [PATCH 02/26] drm/xe: Move migration support to device-level struct Michał Winiarski
2025-10-12 18:58 ` Michal Wajdeczko
2025-10-20 14:48 ` Michał Winiarski
2025-10-11 19:38 ` [PATCH 03/26] drm/xe/pf: Add save/restore control state stubs and connect to debugfs Michał Winiarski
2025-10-12 20:09 ` Michal Wajdeczko
2025-10-11 19:38 ` [PATCH 04/26] drm/xe/pf: Extract migration mutex out of its struct Michał Winiarski
2025-10-12 19:08 ` Matthew Brost
2025-10-20 14:50 ` Michał Winiarski
2025-10-11 19:38 ` [PATCH 05/26] drm/xe/pf: Add data structures and handlers for migration rings Michał Winiarski
2025-10-12 21:06 ` Michal Wajdeczko
2025-10-20 14:56 ` Michał Winiarski
2025-10-11 19:38 ` [PATCH 06/26] drm/xe/pf: Add helpers for migration data allocation / free Michał Winiarski
2025-10-12 19:12 ` Matthew Brost
2025-10-21 0:26 ` Michał Winiarski
2025-10-13 10:15 ` Michal Wajdeczko
2025-10-21 0:01 ` Michał Winiarski
2025-10-11 19:38 ` [PATCH 07/26] drm/xe/pf: Add support for encap/decap of bitstream to/from packet Michał Winiarski
2025-10-11 22:28 ` kernel test robot
2025-10-13 10:46 ` Michal Wajdeczko
2025-10-21 0:25 ` Michał Winiarski
2025-10-11 19:38 ` [PATCH 08/26] drm/xe/pf: Add minimalistic migration descriptor Michał Winiarski
2025-10-11 22:52 ` kernel test robot
2025-10-13 10:56 ` Michal Wajdeczko
2025-10-21 0:31 ` Michał Winiarski
2025-10-11 19:38 ` [PATCH 09/26] drm/xe/pf: Expose VF migration data size over debugfs Michał Winiarski
2025-10-12 19:15 ` Matthew Brost
2025-10-21 0:37 ` Michał Winiarski
2025-10-13 11:04 ` Michal Wajdeczko
2025-10-21 0:42 ` Michał Winiarski
2025-10-11 19:38 ` [PATCH 10/26] drm/xe: Add sa/guc_buf_cache sync interface Michał Winiarski
2025-10-12 18:06 ` Matthew Brost
2025-10-21 0:45 ` Michał Winiarski
2025-10-13 11:20 ` Michal Wajdeczko
2025-10-21 0:44 ` Michał Winiarski
2025-10-11 19:38 ` [PATCH 11/26] drm/xe: Allow the caller to pass guc_buf_cache size Michał Winiarski
2025-10-11 23:35 ` kernel test robot
2025-10-13 11:08 ` Michal Wajdeczko
2025-10-21 0:47 ` Michał Winiarski
2025-10-11 19:38 ` [PATCH 12/26] drm/xe/pf: Increase PF GuC Buffer Cache size and use it for VF migration Michał Winiarski
2025-10-13 11:27 ` Michal Wajdeczko
2025-10-21 0:50 ` Michał Winiarski
2025-10-11 19:38 ` [PATCH 13/26] drm/xe/pf: Remove GuC migration data save/restore from GT debugfs Michał Winiarski
2025-10-13 11:36 ` Michal Wajdeczko
2025-10-11 19:38 ` [PATCH 14/26] drm/xe/pf: Don't save GuC VF migration data on pause Michał Winiarski
2025-10-13 11:42 ` Michal Wajdeczko
2025-10-11 19:38 ` [PATCH 15/26] drm/xe/pf: Switch VF migration GuC save/restore to struct migration data Michał Winiarski
2025-10-11 19:38 ` [PATCH 16/26] drm/xe/pf: Handle GuC migration data as part of PF control Michał Winiarski
2025-10-13 11:56 ` Michal Wajdeczko
2025-10-21 0:52 ` Michał Winiarski
2025-10-11 19:38 ` [PATCH 17/26] drm/xe/pf: Add helpers for VF GGTT migration data handling Michał Winiarski
2025-10-13 12:17 ` Michal Wajdeczko
2025-10-21 1:00 ` Michał Winiarski
2025-10-11 19:38 ` [PATCH 18/26] drm/xe/pf: Handle GGTT migration data as part of PF control Michał Winiarski
2025-10-13 12:36 ` Michal Wajdeczko [this message]
2025-10-21 1:16 ` Michał Winiarski
2025-10-11 19:38 ` [PATCH 19/26] drm/xe/pf: Add helpers for VF MMIO migration data handling Michał Winiarski
2025-10-13 13:28 ` Michal Wajdeczko
2025-10-11 19:38 ` [PATCH 20/26] drm/xe/pf: Handle MMIO migration data as part of PF control Michał Winiarski
2025-10-11 19:38 ` [PATCH 21/26] drm/xe/pf: Add helper to retrieve VF's LMEM object Michał Winiarski
2025-10-11 19:38 ` [PATCH 22/26] drm/xe/migrate: Add function for raw copy of VRAM and CCS Michał Winiarski
2025-10-12 18:54 ` Matthew Brost
2025-10-11 19:38 ` [PATCH 23/26] drm/xe/pf: Handle VRAM migration data as part of PF control Michał Winiarski
2025-10-11 19:38 ` [PATCH 24/26] drm/xe/pf: Add wait helper for VF FLR Michał Winiarski
2025-10-13 13:49 ` Michal Wajdeczko
2025-10-11 19:38 ` [PATCH 25/26] drm/xe/pf: Export helpers for VFIO Michał Winiarski
2025-10-12 18:32 ` Matthew Brost
2025-10-21 1:38 ` Michał Winiarski
2025-10-13 14:02 ` Michal Wajdeczko
2025-10-21 1:49 ` Michał Winiarski
2025-10-11 19:38 ` [PATCH 26/26] vfio/xe: Add vendor-specific vfio_pci driver for Intel graphics Michał Winiarski
2025-10-13 19:00 ` Rodrigo Vivi
2025-10-21 23:03 ` Jason Gunthorpe
2025-10-21 23:14 ` Matthew Brost
2025-10-21 23:38 ` Jason Gunthorpe
2025-10-22 1:15 ` Matthew Brost
2025-10-22 13:02 ` Jason Gunthorpe
2025-10-22 9:05 ` Michał Winiarski
2025-10-27 7:02 ` Tian, Kevin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=07d33e0b-0078-4075-bc70-e09a8ec17a97@intel.com \
--to=michal.wajdeczko@intel.com \
--cc=airlied@gmail.com \
--cc=alex.williamson@redhat.com \
--cc=dri-devel@lists.freedesktop.org \
--cc=intel-xe@lists.freedesktop.org \
--cc=jani.nikula@linux.intel.com \
--cc=jgg@ziepe.ca \
--cc=joonas.lahtinen@linux.intel.com \
--cc=kevin.tian@intel.com \
--cc=kvm@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=lucas.demarchi@intel.com \
--cc=lukasz.laguna@intel.com \
--cc=matthew.brost@intel.com \
--cc=michal.winiarski@intel.com \
--cc=rodrigo.vivi@intel.com \
--cc=shameerali.kolothum.thodi@huawei.com \
--cc=simona@ffwll.ch \
--cc=thomas.hellstrom@linux.intel.com \
--cc=tursulin@ursulin.net \
--cc=yishaih@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox