From: "Michał Winiarski" <michal.winiarski@intel.com>
To: "Alex Williamson" <alex@shazbot.org>,
"Lucas De Marchi" <lucas.demarchi@intel.com>,
"Thomas Hellström" <thomas.hellstrom@linux.intel.com>,
"Rodrigo Vivi" <rodrigo.vivi@intel.com>,
"Jason Gunthorpe" <jgg@ziepe.ca>,
"Yishai Hadas" <yishaih@nvidia.com>,
"Kevin Tian" <kevin.tian@intel.com>,
"Shameer Kolothum" <skolothumtho@nvidia.com>,
intel-xe@lists.freedesktop.org, linux-kernel@vger.kernel.org,
kvm@vger.kernel.org, "Matthew Brost" <matthew.brost@intel.com>,
"Michal Wajdeczko" <michal.wajdeczko@intel.com>
Cc: dri-devel@lists.freedesktop.org,
"Jani Nikula" <jani.nikula@linux.intel.com>,
"Joonas Lahtinen" <joonas.lahtinen@linux.intel.com>,
"Tvrtko Ursulin" <tursulin@ursulin.net>,
"David Airlie" <airlied@gmail.com>,
"Simona Vetter" <simona@ffwll.ch>,
"Lukasz Laguna" <lukasz.laguna@intel.com>,
"Christoph Hellwig" <hch@infradead.org>,
"Michał Winiarski" <michal.winiarski@intel.com>
Subject: [PATCH v4 15/28] drm/xe/pf: Switch VF migration GuC save/restore to struct migration data
Date: Wed, 5 Nov 2025 16:10:13 +0100 [thread overview]
Message-ID: <20251105151027.540712-16-michal.winiarski@intel.com> (raw)
In-Reply-To: <20251105151027.540712-1-michal.winiarski@intel.com>
In upcoming changes, the GuC VF migration data will be handled as part
of separate SAVE/RESTORE states in VF control state machine.
Now that the data is decoupled from both guc_state debugfs and PAUSE
state, we can safely remove the struct xe_gt_sriov_state_snapshot and
modify the GuC save/restore functions to operate on struct
xe_sriov_migration_data.
Signed-off-by: Michał Winiarski <michal.winiarski@intel.com>
Reviewed-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
---
drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c | 265 +++++-------------
drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h | 13 +-
.../drm/xe/xe_gt_sriov_pf_migration_types.h | 27 --
drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h | 4 -
4 files changed, 79 insertions(+), 230 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c
index 88371c205add7..6e2b076065f3b 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c
@@ -28,6 +28,19 @@ static struct xe_gt_sriov_migration_data *pf_pick_gt_migration(struct xe_gt *gt,
return >->sriov.pf.vfs[vfid].migration;
}
+static void pf_dump_mig_data(struct xe_gt *gt, unsigned int vfid,
+ struct xe_sriov_packet *data,
+ const char *what)
+{
+ if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) {
+ struct drm_printer p = xe_gt_dbg_printer(gt);
+
+ drm_printf(&p, "VF%u %s (%llu bytes)\n", vfid, what, data->size);
+ drm_print_hex_dump(&p, "mig_hdr: ", (void *)&data->hdr, sizeof(data->hdr));
+ drm_print_hex_dump(&p, "mig_data: ", data->vaddr, min(SZ_64, data->size));
+ }
+}
+
/* Return: number of dwords saved/restored/required or a negative error code on failure */
static int guc_action_vf_save_restore(struct xe_guc *guc, u32 vfid, u32 opcode,
u64 addr, u32 ndwords)
@@ -47,7 +60,7 @@ static int guc_action_vf_save_restore(struct xe_guc *guc, u32 vfid, u32 opcode,
}
/* Return: size of the state in dwords or a negative error code on failure */
-static int pf_send_guc_query_vf_state_size(struct xe_gt *gt, unsigned int vfid)
+static int pf_send_guc_query_vf_mig_data_size(struct xe_gt *gt, unsigned int vfid)
{
int ret;
@@ -56,8 +69,8 @@ static int pf_send_guc_query_vf_state_size(struct xe_gt *gt, unsigned int vfid)
}
/* Return: number of state dwords saved or a negative error code on failure */
-static int pf_send_guc_save_vf_state(struct xe_gt *gt, unsigned int vfid,
- void *dst, size_t size)
+static int pf_send_guc_save_vf_mig_data(struct xe_gt *gt, unsigned int vfid,
+ void *dst, size_t size)
{
const int ndwords = size / sizeof(u32);
struct xe_guc *guc = >->uc.guc;
@@ -86,8 +99,8 @@ static int pf_send_guc_save_vf_state(struct xe_gt *gt, unsigned int vfid,
}
/* Return: number of state dwords restored or a negative error code on failure */
-static int pf_send_guc_restore_vf_state(struct xe_gt *gt, unsigned int vfid,
- const void *src, size_t size)
+static int pf_send_guc_restore_vf_mig_data(struct xe_gt *gt, unsigned int vfid,
+ const void *src, size_t size)
{
const int ndwords = size / sizeof(u32);
struct xe_guc *guc = >->uc.guc;
@@ -115,120 +128,66 @@ static bool pf_migration_supported(struct xe_gt *gt)
return xe_sriov_pf_migration_supported(gt_to_xe(gt));
}
-static struct mutex *pf_migration_mutex(struct xe_gt *gt)
+static int pf_save_vf_guc_mig_data(struct xe_gt *gt, unsigned int vfid)
{
- xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
- return >->sriov.pf.migration.snapshot_lock;
-}
-
-static struct xe_gt_sriov_state_snapshot *pf_pick_vf_snapshot(struct xe_gt *gt,
- unsigned int vfid)
-{
- xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
- xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
- lockdep_assert_held(pf_migration_mutex(gt));
-
- return >->sriov.pf.vfs[vfid].snapshot;
-}
-
-static unsigned int pf_snapshot_index(struct xe_gt *gt, struct xe_gt_sriov_state_snapshot *snapshot)
-{
- return container_of(snapshot, struct xe_gt_sriov_metadata, snapshot) - gt->sriov.pf.vfs;
-}
-
-static void pf_free_guc_state(struct xe_gt *gt, struct xe_gt_sriov_state_snapshot *snapshot)
-{
- struct xe_device *xe = gt_to_xe(gt);
-
- drmm_kfree(&xe->drm, snapshot->guc.buff);
- snapshot->guc.buff = NULL;
- snapshot->guc.size = 0;
-}
-
-static int pf_alloc_guc_state(struct xe_gt *gt,
- struct xe_gt_sriov_state_snapshot *snapshot,
- size_t size)
-{
- struct xe_device *xe = gt_to_xe(gt);
- void *p;
-
- pf_free_guc_state(gt, snapshot);
-
- if (!size)
- return -ENODATA;
-
- if (size % sizeof(u32))
- return -EINVAL;
-
- if (size > SZ_2M)
- return -EFBIG;
-
- p = drmm_kzalloc(&xe->drm, size, GFP_KERNEL);
- if (!p)
- return -ENOMEM;
-
- snapshot->guc.buff = p;
- snapshot->guc.size = size;
- return 0;
-}
-
-static void pf_dump_guc_state(struct xe_gt *gt, struct xe_gt_sriov_state_snapshot *snapshot)
-{
- if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) {
- unsigned int vfid __maybe_unused = pf_snapshot_index(gt, snapshot);
-
- xe_gt_sriov_dbg_verbose(gt, "VF%u GuC state is %zu dwords:\n",
- vfid, snapshot->guc.size / sizeof(u32));
- print_hex_dump_bytes("state: ", DUMP_PREFIX_OFFSET,
- snapshot->guc.buff, min(SZ_64, snapshot->guc.size));
- }
-}
-
-static int pf_save_vf_guc_state(struct xe_gt *gt, unsigned int vfid)
-{
- struct xe_gt_sriov_state_snapshot *snapshot = pf_pick_vf_snapshot(gt, vfid);
+ struct xe_sriov_packet *data;
size_t size;
int ret;
- ret = pf_send_guc_query_vf_state_size(gt, vfid);
+ ret = pf_send_guc_query_vf_mig_data_size(gt, vfid);
if (ret < 0)
goto fail;
+
size = ret * sizeof(u32);
- xe_gt_sriov_dbg_verbose(gt, "VF%u state size is %d dwords (%zu bytes)\n", vfid, ret, size);
- ret = pf_alloc_guc_state(gt, snapshot, size);
- if (ret < 0)
+ data = xe_sriov_packet_alloc(gt_to_xe(gt));
+ if (!data) {
+ ret = -ENOMEM;
goto fail;
+ }
+
+ ret = xe_sriov_packet_init(data, gt->tile->id, gt->info.id,
+ XE_SRIOV_PACKET_TYPE_GUC, 0, size);
+ if (ret)
+ goto fail_free;
- ret = pf_send_guc_save_vf_state(gt, vfid, snapshot->guc.buff, size);
+ ret = pf_send_guc_save_vf_mig_data(gt, vfid, data->vaddr, size);
if (ret < 0)
- goto fail;
+ goto fail_free;
size = ret * sizeof(u32);
xe_gt_assert(gt, size);
- xe_gt_assert(gt, size <= snapshot->guc.size);
- snapshot->guc.size = size;
+ xe_gt_assert(gt, size <= data->size);
+ data->size = size;
+ data->remaining = size;
+
+ pf_dump_mig_data(gt, vfid, data, "GuC data save");
+
+ ret = xe_gt_sriov_pf_migration_save_produce(gt, vfid, data);
+ if (ret)
+ goto fail_free;
- pf_dump_guc_state(gt, snapshot);
return 0;
+fail_free:
+ xe_sriov_packet_free(data);
fail:
- xe_gt_sriov_dbg(gt, "Unable to save VF%u state (%pe)\n", vfid, ERR_PTR(ret));
- pf_free_guc_state(gt, snapshot);
+ xe_gt_sriov_err(gt, "Failed to save VF%u GuC data (%pe)\n",
+ vfid, ERR_PTR(ret));
return ret;
}
/**
- * xe_gt_sriov_pf_migration_save_guc_state() - Take a GuC VF state snapshot.
+ * xe_gt_sriov_pf_migration_guc_size() - Get the size of VF GuC migration data.
* @gt: the &xe_gt
* @vfid: the VF identifier
*
* This function is for PF only.
*
- * Return: 0 on success or a negative error code on failure.
+ * Return: size in bytes or a negative error code on failure.
*/
-int xe_gt_sriov_pf_migration_save_guc_state(struct xe_gt *gt, unsigned int vfid)
+ssize_t xe_gt_sriov_pf_migration_guc_size(struct xe_gt *gt, unsigned int vfid)
{
- int err;
+ ssize_t size;
xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
xe_gt_assert(gt, vfid != PFID);
@@ -237,37 +196,15 @@ int xe_gt_sriov_pf_migration_save_guc_state(struct xe_gt *gt, unsigned int vfid)
if (!pf_migration_supported(gt))
return -ENOPKG;
- mutex_lock(pf_migration_mutex(gt));
- err = pf_save_vf_guc_state(gt, vfid);
- mutex_unlock(pf_migration_mutex(gt));
+ size = pf_send_guc_query_vf_mig_data_size(gt, vfid);
+ if (size >= 0)
+ size *= sizeof(u32);
- return err;
-}
-
-static int pf_restore_vf_guc_state(struct xe_gt *gt, unsigned int vfid)
-{
- struct xe_gt_sriov_state_snapshot *snapshot = pf_pick_vf_snapshot(gt, vfid);
- int ret;
-
- if (!snapshot->guc.size)
- return -ENODATA;
-
- xe_gt_sriov_dbg_verbose(gt, "restoring %zu dwords of VF%u GuC state\n",
- snapshot->guc.size / sizeof(u32), vfid);
- ret = pf_send_guc_restore_vf_state(gt, vfid, snapshot->guc.buff, snapshot->guc.size);
- if (ret < 0)
- goto fail;
-
- xe_gt_sriov_dbg_verbose(gt, "restored %d dwords of VF%u GuC state\n", ret, vfid);
- return 0;
-
-fail:
- xe_gt_sriov_dbg(gt, "Failed to restore VF%u GuC state (%pe)\n", vfid, ERR_PTR(ret));
- return ret;
+ return size;
}
/**
- * xe_gt_sriov_pf_migration_restore_guc_state() - Restore a GuC VF state.
+ * xe_gt_sriov_pf_migration_guc_save() - Save VF GuC migration data.
* @gt: the &xe_gt
* @vfid: the VF identifier
*
@@ -275,10 +212,8 @@ static int pf_restore_vf_guc_state(struct xe_gt *gt, unsigned int vfid)
*
* Return: 0 on success or a negative error code on failure.
*/
-int xe_gt_sriov_pf_migration_restore_guc_state(struct xe_gt *gt, unsigned int vfid)
+int xe_gt_sriov_pf_migration_guc_save(struct xe_gt *gt, unsigned int vfid)
{
- int ret;
-
xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
xe_gt_assert(gt, vfid != PFID);
xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
@@ -286,75 +221,42 @@ int xe_gt_sriov_pf_migration_restore_guc_state(struct xe_gt *gt, unsigned int vf
if (!pf_migration_supported(gt))
return -ENOPKG;
- mutex_lock(pf_migration_mutex(gt));
- ret = pf_restore_vf_guc_state(gt, vfid);
- mutex_unlock(pf_migration_mutex(gt));
-
- return ret;
+ return pf_save_vf_guc_mig_data(gt, vfid);
}
-#ifdef CONFIG_DEBUG_FS
-/**
- * xe_gt_sriov_pf_migration_read_guc_state() - Read a GuC VF state.
- * @gt: the &xe_gt
- * @vfid: the VF identifier
- * @buf: the user space buffer to read to
- * @count: the maximum number of bytes to read
- * @pos: the current position in the buffer
- *
- * This function is for PF only.
- *
- * This function reads up to @count bytes from the saved VF GuC state buffer
- * at offset @pos into the user space address starting at @buf.
- *
- * Return: the number of bytes read or a negative error code on failure.
- */
-ssize_t xe_gt_sriov_pf_migration_read_guc_state(struct xe_gt *gt, unsigned int vfid,
- char __user *buf, size_t count, loff_t *pos)
+static int pf_restore_vf_guc_state(struct xe_gt *gt, unsigned int vfid,
+ struct xe_sriov_packet *data)
{
- struct xe_gt_sriov_state_snapshot *snapshot;
- ssize_t ret;
+ int ret;
- xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
- xe_gt_assert(gt, vfid != PFID);
- xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
+ xe_gt_assert(gt, data->size);
- if (!pf_migration_supported(gt))
- return -ENOPKG;
+ pf_dump_mig_data(gt, vfid, data, "GuC data restore");
- mutex_lock(pf_migration_mutex(gt));
- snapshot = pf_pick_vf_snapshot(gt, vfid);
- if (snapshot->guc.size)
- ret = simple_read_from_buffer(buf, count, pos, snapshot->guc.buff,
- snapshot->guc.size);
- else
- ret = -ENODATA;
- mutex_unlock(pf_migration_mutex(gt));
+ ret = pf_send_guc_restore_vf_mig_data(gt, vfid, data->vaddr, data->size);
+ if (ret < 0)
+ goto fail;
+
+ return 0;
+fail:
+ xe_gt_sriov_err(gt, "Failed to restore VF%u GuC data (%pe)\n",
+ vfid, ERR_PTR(ret));
return ret;
}
/**
- * xe_gt_sriov_pf_migration_write_guc_state() - Write a GuC VF state.
+ * xe_gt_sriov_pf_migration_guc_restore() - Restore VF GuC migration data.
* @gt: the &xe_gt
* @vfid: the VF identifier
- * @buf: the user space buffer with GuC VF state
- * @size: the size of GuC VF state (in bytes)
*
* This function is for PF only.
*
- * This function reads @size bytes of the VF GuC state stored at user space
- * address @buf and writes it into a internal VF state buffer.
- *
- * Return: the number of bytes used or a negative error code on failure.
+ * Return: 0 on success or a negative error code on failure.
*/
-ssize_t xe_gt_sriov_pf_migration_write_guc_state(struct xe_gt *gt, unsigned int vfid,
- const char __user *buf, size_t size)
+int xe_gt_sriov_pf_migration_guc_restore(struct xe_gt *gt, unsigned int vfid,
+ struct xe_sriov_packet *data)
{
- struct xe_gt_sriov_state_snapshot *snapshot;
- loff_t pos = 0;
- ssize_t ret;
-
xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
xe_gt_assert(gt, vfid != PFID);
xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
@@ -362,21 +264,8 @@ ssize_t xe_gt_sriov_pf_migration_write_guc_state(struct xe_gt *gt, unsigned int
if (!pf_migration_supported(gt))
return -ENOPKG;
- mutex_lock(pf_migration_mutex(gt));
- snapshot = pf_pick_vf_snapshot(gt, vfid);
- ret = pf_alloc_guc_state(gt, snapshot, size);
- if (!ret) {
- ret = simple_write_to_buffer(snapshot->guc.buff, size, &pos, buf, size);
- if (ret < 0)
- pf_free_guc_state(gt, snapshot);
- else
- pf_dump_guc_state(gt, snapshot);
- }
- mutex_unlock(pf_migration_mutex(gt));
-
- return ret;
+ return pf_restore_vf_guc_state(gt, vfid, data);
}
-#endif /* CONFIG_DEBUG_FS */
/**
* xe_gt_sriov_pf_migration_size() - Total size of migration data from all components within a GT.
@@ -618,10 +507,6 @@ int xe_gt_sriov_pf_migration_init(struct xe_gt *gt)
if (!pf_migration_supported(gt))
return 0;
- err = drmm_mutex_init(&xe->drm, >->sriov.pf.migration.snapshot_lock);
- if (err)
- return err;
-
totalvfs = xe_sriov_pf_get_totalvfs(xe);
for (n = 1; n <= totalvfs; n++) {
struct xe_gt_sriov_migration_data *migration = pf_pick_gt_migration(gt, n);
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h
index 148ee07dd23a9..fd81942bfd7a2 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h
@@ -15,8 +15,10 @@ struct xe_sriov_packet;
#define XE_GT_SRIOV_PF_MIGRATION_GUC_DATA_MAX_SIZE SZ_8M
int xe_gt_sriov_pf_migration_init(struct xe_gt *gt);
-int xe_gt_sriov_pf_migration_save_guc_state(struct xe_gt *gt, unsigned int vfid);
-int xe_gt_sriov_pf_migration_restore_guc_state(struct xe_gt *gt, unsigned int vfid);
+ssize_t xe_gt_sriov_pf_migration_guc_size(struct xe_gt *gt, unsigned int vfid);
+int xe_gt_sriov_pf_migration_guc_save(struct xe_gt *gt, unsigned int vfid);
+int xe_gt_sriov_pf_migration_guc_restore(struct xe_gt *gt, unsigned int vfid,
+ struct xe_sriov_packet *data);
ssize_t xe_gt_sriov_pf_migration_size(struct xe_gt *gt, unsigned int vfid);
@@ -34,11 +36,4 @@ int xe_gt_sriov_pf_migration_restore_produce(struct xe_gt *gt, unsigned int vfid
struct xe_sriov_packet *
xe_gt_sriov_pf_migration_save_consume(struct xe_gt *gt, unsigned int vfid);
-#ifdef CONFIG_DEBUG_FS
-ssize_t xe_gt_sriov_pf_migration_read_guc_state(struct xe_gt *gt, unsigned int vfid,
- char __user *buf, size_t count, loff_t *pos);
-ssize_t xe_gt_sriov_pf_migration_write_guc_state(struct xe_gt *gt, unsigned int vfid,
- const char __user *buf, size_t count);
-#endif
-
#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration_types.h
index 84be6fac16c8b..75d8b94cbbefb 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration_types.h
@@ -6,24 +6,7 @@
#ifndef _XE_GT_SRIOV_PF_MIGRATION_TYPES_H_
#define _XE_GT_SRIOV_PF_MIGRATION_TYPES_H_
-#include <linux/mutex.h>
#include <linux/ptr_ring.h>
-#include <linux/types.h>
-
-/**
- * struct xe_gt_sriov_state_snapshot - GT-level per-VF state snapshot data.
- *
- * Used by the PF driver to maintain per-VF migration data.
- */
-struct xe_gt_sriov_state_snapshot {
- /** @guc: GuC VF state snapshot */
- struct {
- /** @guc.buff: buffer with the VF state */
- u32 *buff;
- /** @guc.size: size of the buffer (must be dwords aligned) */
- u32 size;
- } guc;
-};
/**
* struct xe_gt_sriov_migration_data - GT-level per-VF migration data.
@@ -35,14 +18,4 @@ struct xe_gt_sriov_migration_data {
struct ptr_ring ring;
};
-/**
- * struct xe_gt_sriov_pf_migration - GT-level data.
- *
- * Used by the PF driver to maintain non-VF specific per-GT data.
- */
-struct xe_gt_sriov_pf_migration {
- /** @snapshot_lock: protects all VFs snapshots */
- struct mutex snapshot_lock;
-};
-
#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h
index 812e74d3f8f80..667b8310478d4 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h
@@ -31,9 +31,6 @@ struct xe_gt_sriov_metadata {
/** @version: negotiated VF/PF ABI version */
struct xe_gt_sriov_pf_service_version version;
- /** @snapshot: snapshot of the VF state data */
- struct xe_gt_sriov_state_snapshot snapshot;
-
/** @migration: per-VF migration data. */
struct xe_gt_sriov_migration_data migration;
};
@@ -61,7 +58,6 @@ struct xe_gt_sriov_pf {
struct xe_gt_sriov_pf_service service;
struct xe_gt_sriov_pf_control control;
struct xe_gt_sriov_pf_policy policy;
- struct xe_gt_sriov_pf_migration migration;
struct xe_gt_sriov_spare_config spare;
struct xe_gt_sriov_metadata *vfs;
};
--
2.51.2
next prev parent reply other threads:[~2025-11-05 15:12 UTC|newest]
Thread overview: 44+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-11-05 15:09 [PATCH v4 00/28] vfio/xe: Add driver variant for Xe VF migration Michał Winiarski
2025-11-05 15:09 ` [PATCH v4 01/28] drm/xe/pf: Remove GuC version check for migration support Michał Winiarski
2025-11-05 15:10 ` [PATCH v4 02/28] drm/xe: Move migration support to device-level struct Michał Winiarski
2025-11-05 15:10 ` [PATCH v4 03/28] drm/xe/pf: Convert control state to bitmap Michał Winiarski
2025-11-05 18:51 ` Michal Wajdeczko
2025-11-05 15:10 ` [PATCH v4 04/28] drm/xe/pf: Add save/restore control state stubs and connect to debugfs Michał Winiarski
2025-11-05 15:10 ` [PATCH v4 05/28] drm/xe/pf: Add data structures and handlers for migration rings Michał Winiarski
2025-11-05 20:17 ` Michal Wajdeczko
2025-11-06 11:24 ` Michał Winiarski
2025-11-05 15:10 ` [PATCH v4 06/28] drm/xe/pf: Add helpers for migration data packet allocation / free Michał Winiarski
2025-11-05 21:12 ` Michal Wajdeczko
2025-11-06 11:30 ` Michał Winiarski
2025-11-05 15:10 ` [PATCH v4 07/28] drm/xe/pf: Add support for encap/decap of bitstream to/from packet Michał Winiarski
2025-11-05 15:10 ` [PATCH v4 08/28] drm/xe/pf: Add minimalistic migration descriptor Michał Winiarski
2025-11-05 15:10 ` [PATCH v4 09/28] drm/xe/pf: Expose VF migration data size over debugfs Michał Winiarski
2025-11-05 15:10 ` [PATCH v4 10/28] drm/xe: Add sa/guc_buf_cache sync interface Michał Winiarski
2025-11-05 15:10 ` [PATCH v4 11/28] drm/xe: Allow the caller to pass guc_buf_cache size Michał Winiarski
2025-11-05 15:10 ` [PATCH v4 12/28] drm/xe/pf: Increase PF GuC Buffer Cache size and use it for VF migration Michał Winiarski
2025-11-05 15:10 ` [PATCH v4 13/28] drm/xe/pf: Remove GuC migration data save/restore from GT debugfs Michał Winiarski
2025-11-05 15:10 ` [PATCH v4 14/28] drm/xe/pf: Don't save GuC VF migration data on pause Michał Winiarski
2025-11-05 15:10 ` Michał Winiarski [this message]
2025-11-05 15:10 ` [PATCH v4 16/28] drm/xe/pf: Handle GuC migration data as part of PF control Michał Winiarski
2025-11-05 15:10 ` [PATCH v4 17/28] drm/xe/pf: Add helpers for VF GGTT migration data handling Michał Winiarski
2025-11-05 21:45 ` Michal Wajdeczko
2025-11-06 11:31 ` Michał Winiarski
2025-11-05 15:10 ` [PATCH v4 18/28] drm/xe/pf: Handle GGTT migration data as part of PF control Michał Winiarski
2025-11-05 15:10 ` [PATCH v4 19/28] drm/xe/pf: Handle MMIO " Michał Winiarski
2025-11-05 15:10 ` [PATCH v4 20/28] drm/xe/pf: Add helper to retrieve VF's LMEM object Michał Winiarski
2025-11-05 15:10 ` [PATCH v4 21/28] drm/xe/migrate: Add function to copy of VRAM data in chunks Michał Winiarski
2025-11-05 15:10 ` [PATCH v4 22/28] drm/xe/pf: Handle VRAM migration data as part of PF control Michał Winiarski
2025-11-08 4:31 ` Matthew Brost
2025-11-05 15:10 ` [PATCH v4 23/28] drm/xe/pf: Add wait helper for VF FLR Michał Winiarski
2025-11-05 15:10 ` [PATCH v4 24/28] drm/xe/pf: Enable SR-IOV VF migration Michał Winiarski
2025-11-05 15:10 ` [PATCH v4 25/28] drm/xe/pci: Introduce a helper to allow VF access to PF xe_device Michał Winiarski
2025-11-05 15:10 ` [PATCH v4 26/28] drm/xe/pf: Export helpers for VFIO Michał Winiarski
2025-11-05 15:10 ` [PATCH v4 27/28] drm/intel/bmg: Allow device ID usage with single-argument macros Michał Winiarski
2025-11-07 14:51 ` Lucas De Marchi
2025-11-05 15:10 ` [PATCH v4 28/28] vfio/xe: Add device specific vfio_pci driver variant for Intel graphics Michał Winiarski
2025-11-06 8:20 ` Tian, Kevin
2025-11-06 10:55 ` Winiarski, Michal
2025-11-07 3:10 ` Tian, Kevin
2025-11-08 0:47 ` Jason Gunthorpe
2025-11-08 1:05 ` Tian, Kevin
2025-11-08 1:11 ` Jason Gunthorpe
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251105151027.540712-16-michal.winiarski@intel.com \
--to=michal.winiarski@intel.com \
--cc=airlied@gmail.com \
--cc=alex@shazbot.org \
--cc=dri-devel@lists.freedesktop.org \
--cc=hch@infradead.org \
--cc=intel-xe@lists.freedesktop.org \
--cc=jani.nikula@linux.intel.com \
--cc=jgg@ziepe.ca \
--cc=joonas.lahtinen@linux.intel.com \
--cc=kevin.tian@intel.com \
--cc=kvm@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=lucas.demarchi@intel.com \
--cc=lukasz.laguna@intel.com \
--cc=matthew.brost@intel.com \
--cc=michal.wajdeczko@intel.com \
--cc=rodrigo.vivi@intel.com \
--cc=simona@ffwll.ch \
--cc=skolothumtho@nvidia.com \
--cc=thomas.hellstrom@linux.intel.com \
--cc=tursulin@ursulin.net \
--cc=yishaih@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).