From: Michal Wajdeczko <michal.wajdeczko@intel.com>
To: "Michał Winiarski" <michal.winiarski@intel.com>,
"Alex Williamson" <alex.williamson@redhat.com>,
"Lucas De Marchi" <lucas.demarchi@intel.com>,
"Thomas Hellström" <thomas.hellstrom@linux.intel.com>,
"Rodrigo Vivi" <rodrigo.vivi@intel.com>,
"Jason Gunthorpe" <jgg@ziepe.ca>,
"Yishai Hadas" <yishaih@nvidia.com>,
"Kevin Tian" <kevin.tian@intel.com>,
intel-xe@lists.freedesktop.org, linux-kernel@vger.kernel.org,
kvm@vger.kernel.org, "Matthew Brost" <matthew.brost@intel.com>
Cc: <dri-devel@lists.freedesktop.org>,
Jani Nikula <jani.nikula@linux.intel.com>,
Joonas Lahtinen <joonas.lahtinen@linux.intel.com>,
Tvrtko Ursulin <tursulin@ursulin.net>,
David Airlie <airlied@gmail.com>, Simona Vetter <simona@ffwll.ch>,
"Lukasz Laguna" <lukasz.laguna@intel.com>
Subject: Re: [PATCH v2 11/26] drm/xe/pf: Increase PF GuC Buffer Cache size and use it for VF migration
Date: Thu, 23 Oct 2025 19:37:48 +0200 [thread overview]
Message-ID: <687d4d54-09d6-4e30-921b-66c0e9bd0d51@intel.com> (raw)
In-Reply-To: <20251021224133.577765-12-michal.winiarski@intel.com>
On 10/22/2025 12:41 AM, Michał Winiarski wrote:
> Contiguous PF GGTT VMAs can be scarce after creating VFs.
> Increase the GuC buffer cache size to 4M for PF so that we can fit GuC
> migration data (which currently maxes out at just under 4M) and use the
but the code below still uses 8M
> cache instead of allocating fresh BOs.
>
> Signed-off-by: Michał Winiarski <michal.winiarski@intel.com>
> ---
> drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c | 46 ++++++-------------
> drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h | 3 ++
> drivers/gpu/drm/xe/xe_guc.c | 12 ++++-
> 3 files changed, 28 insertions(+), 33 deletions(-)
>
> diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c
> index 4e26feb9c267f..04fad3126865c 100644
> --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c
> +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c
> @@ -11,7 +11,7 @@
> #include "xe_gt_sriov_pf_helpers.h"
> #include "xe_gt_sriov_pf_migration.h"
> #include "xe_gt_sriov_printk.h"
> -#include "xe_guc.h"
> +#include "xe_guc_buf.h"
> #include "xe_guc_ct.h"
> #include "xe_sriov.h"
> #include "xe_sriov_migration_data.h"
> @@ -57,73 +57,55 @@ static int pf_send_guc_query_vf_state_size(struct xe_gt *gt, unsigned int vfid)
>
> /* Return: number of state dwords saved or a negative error code on failure */
> static int pf_send_guc_save_vf_state(struct xe_gt *gt, unsigned int vfid,
> - void *buff, size_t size)
> + void *dst, size_t size)
> {
> const int ndwords = size / sizeof(u32);
> - struct xe_tile *tile = gt_to_tile(gt);
> - struct xe_device *xe = tile_to_xe(tile);
> struct xe_guc *guc = >->uc.guc;
> - struct xe_bo *bo;
> + CLASS(xe_guc_buf, buf)(&guc->buf, ndwords);
> int ret;
>
> xe_gt_assert(gt, size % sizeof(u32) == 0);
> xe_gt_assert(gt, size == ndwords * sizeof(u32));
>
> - bo = xe_bo_create_pin_map_novm(xe, tile,
> - ALIGN(size, PAGE_SIZE),
> - ttm_bo_type_kernel,
> - XE_BO_FLAG_SYSTEM |
> - XE_BO_FLAG_GGTT |
> - XE_BO_FLAG_GGTT_INVALIDATE, false);
> - if (IS_ERR(bo))
> - return PTR_ERR(bo);
> + if (!xe_guc_buf_is_valid(buf))
> + return -ENOBUFS;
> +
> + memset(xe_guc_buf_cpu_ptr(buf), 0, size);
hmm, I didn't find in the GuC spec that this buffer must be zeroed, so why bother?
>
> ret = guc_action_vf_save_restore(guc, vfid, GUC_PF_OPCODE_VF_SAVE,
> - xe_bo_ggtt_addr(bo), ndwords);
> + xe_guc_buf_flush(buf), ndwords);
> if (!ret)
> ret = -ENODATA;
> else if (ret > ndwords)
> ret = -EPROTO;
> else if (ret > 0)
> - xe_map_memcpy_from(xe, buff, &bo->vmap, 0, ret * sizeof(u32));
> + memcpy(dst, xe_guc_buf_sync_read(buf), ret * sizeof(u32));
nit: given this usage, maybe one day we should add optimized variant that copies directly to dst?
xe_guc_buf_sync_into(buf, dst, size);
>
> - xe_bo_unpin_map_no_vm(bo);
> return ret;
> }
>
> /* Return: number of state dwords restored or a negative error code on failure */
> static int pf_send_guc_restore_vf_state(struct xe_gt *gt, unsigned int vfid,
> - const void *buff, size_t size)
> + const void *src, size_t size)
> {
> const int ndwords = size / sizeof(u32);
> - struct xe_tile *tile = gt_to_tile(gt);
> - struct xe_device *xe = tile_to_xe(tile);
> struct xe_guc *guc = >->uc.guc;
> - struct xe_bo *bo;
> + CLASS(xe_guc_buf_from_data, buf)(&guc->buf, src, size);
> int ret;
>
> xe_gt_assert(gt, size % sizeof(u32) == 0);
> xe_gt_assert(gt, size == ndwords * sizeof(u32));
>
> - bo = xe_bo_create_pin_map_novm(xe, tile,
> - ALIGN(size, PAGE_SIZE),
> - ttm_bo_type_kernel,
> - XE_BO_FLAG_SYSTEM |
> - XE_BO_FLAG_GGTT |
> - XE_BO_FLAG_GGTT_INVALIDATE, false);
> - if (IS_ERR(bo))
> - return PTR_ERR(bo);
> -
> - xe_map_memcpy_to(xe, &bo->vmap, 0, buff, size);
> + if (!xe_guc_buf_is_valid(buf))
> + return -ENOBUFS;
>
> ret = guc_action_vf_save_restore(guc, vfid, GUC_PF_OPCODE_VF_RESTORE,
> - xe_bo_ggtt_addr(bo), ndwords);
> + xe_guc_buf_flush(buf), ndwords);
> if (!ret)
> ret = -ENODATA;
> else if (ret > ndwords)
> ret = -EPROTO;
>
> - xe_bo_unpin_map_no_vm(bo);
> return ret;
> }
>
> diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h
> index e2d41750f863c..4f2f2783339c3 100644
> --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h
> +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h
> @@ -11,6 +11,9 @@
> struct xe_gt;
> struct xe_sriov_migration_data;
>
> +/* TODO: get this information by querying GuC in the future */
> +#define XE_GT_SRIOV_PF_MIGRATION_GUC_DATA_MAX_SIZE SZ_8M
so it's 8M or 4M ?
maybe wrap that into function now
u32 xe_gt_sriov_pf_migration_guc_data_size(struct xe_gt *gt)
{
if (xe_sriov_pf_migration_supported(gt_to_xe))
return SZ_4M; /* TODO: ... */
return 0;
}
> +
> int xe_gt_sriov_pf_migration_init(struct xe_gt *gt);
> int xe_gt_sriov_pf_migration_save_guc_state(struct xe_gt *gt, unsigned int vfid);
> int xe_gt_sriov_pf_migration_restore_guc_state(struct xe_gt *gt, unsigned int vfid);
> diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c
> index 7c65528859ecb..cd6ab277a7876 100644
> --- a/drivers/gpu/drm/xe/xe_guc.c
> +++ b/drivers/gpu/drm/xe/xe_guc.c
> @@ -24,6 +24,7 @@
> #include "xe_gt_printk.h"
> #include "xe_gt_sriov_vf.h"
> #include "xe_gt_throttle.h"
> +#include "xe_gt_sriov_pf_migration.h"
> #include "xe_guc_ads.h"
> #include "xe_guc_buf.h"
> #include "xe_guc_capture.h"
> @@ -40,6 +41,7 @@
> #include "xe_mmio.h"
> #include "xe_platform_types.h"
> #include "xe_sriov.h"
> +#include "xe_sriov_pf_migration.h"
> #include "xe_uc.h"
> #include "xe_uc_fw.h"
> #include "xe_wa.h"
> @@ -821,6 +823,14 @@ static int vf_guc_init_post_hwconfig(struct xe_guc *guc)
> return 0;
> }
>
> +static u32 guc_buf_cache_size(struct xe_guc *guc)
> +{
> + if (IS_SRIOV_PF(guc_to_xe(guc)) && xe_sriov_pf_migration_supported(guc_to_xe(guc)))
> + return XE_GT_SRIOV_PF_MIGRATION_GUC_DATA_MAX_SIZE;
then
u32 size = XE_GUC_BUF_CACHE_DEFAULT_SIZE;
if (IS_SRIOV_PF(guc_to_xe(guc)))
size += xe_gt_sriov_pf_migration_guc_data_size(guc_to_gt(guc));
return size;
> + else
> + return XE_GUC_BUF_CACHE_DEFAULT_SIZE;
> +}
> +
> /**
> * xe_guc_init_post_hwconfig - initialize GuC post hwconfig load
> * @guc: The GuC object
> @@ -860,7 +870,7 @@ int xe_guc_init_post_hwconfig(struct xe_guc *guc)
> if (ret)
> return ret;
>
> - ret = xe_guc_buf_cache_init(&guc->buf, XE_GUC_BUF_CACHE_DEFAULT_SIZE);
> + ret = xe_guc_buf_cache_init(&guc->buf, guc_buf_cache_size(guc));
> if (ret)
> return ret;
>
next prev parent reply other threads:[~2025-10-23 17:38 UTC|newest]
Thread overview: 75+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-10-21 22:41 [PATCH v2 00/26] vfio/xe: Add driver variant for Xe VF migration Michał Winiarski
2025-10-21 22:41 ` [PATCH v2 01/26] drm/xe/pf: Remove GuC version check for migration support Michał Winiarski
2025-10-28 2:33 ` Tian, Kevin
2025-10-28 8:06 ` Winiarski, Michal
2025-10-21 22:41 ` [PATCH v2 02/26] drm/xe: Move migration support to device-level struct Michał Winiarski
2025-10-21 22:41 ` [PATCH v2 03/26] drm/xe/pf: Add save/restore control state stubs and connect to debugfs Michał Winiarski
2025-10-22 22:31 ` Michal Wajdeczko
2025-10-27 12:02 ` Michał Winiarski
2025-10-28 3:06 ` Tian, Kevin
2025-10-28 8:02 ` Michal Wajdeczko
2025-10-21 22:41 ` [PATCH v2 04/26] drm/xe/pf: Add data structures and handlers for migration rings Michał Winiarski
2025-10-22 22:06 ` Michal Wajdeczko
2025-10-27 12:33 ` Michał Winiarski
2025-10-21 22:41 ` [PATCH v2 05/26] drm/xe/pf: Add helpers for migration data allocation / free Michał Winiarski
2025-10-22 22:18 ` Michal Wajdeczko
2025-10-27 12:47 ` Michał Winiarski
2025-10-21 22:41 ` [PATCH v2 06/26] drm/xe/pf: Add support for encap/decap of bitstream to/from packet Michał Winiarski
2025-10-22 22:34 ` Michal Wajdeczko
2025-10-27 13:27 ` Michał Winiarski
2025-10-21 22:41 ` [PATCH v2 07/26] drm/xe/pf: Add minimalistic migration descriptor Michał Winiarski
2025-10-22 22:49 ` Michal Wajdeczko
2025-10-27 14:52 ` Michał Winiarski
2025-10-21 22:41 ` [PATCH v2 08/26] drm/xe/pf: Expose VF migration data size over debugfs Michał Winiarski
2025-10-22 23:02 ` Michal Wajdeczko
2025-10-21 22:41 ` [PATCH v2 09/26] drm/xe: Add sa/guc_buf_cache sync interface Michał Winiarski
2025-10-22 23:05 ` Michal Wajdeczko
2025-10-21 22:41 ` [PATCH v2 10/26] drm/xe: Allow the caller to pass guc_buf_cache size Michał Winiarski
2025-10-22 23:13 ` Michal Wajdeczko
2025-10-21 22:41 ` [PATCH v2 11/26] drm/xe/pf: Increase PF GuC Buffer Cache size and use it for VF migration Michał Winiarski
2025-10-23 17:37 ` Michal Wajdeczko [this message]
2025-10-28 10:46 ` Michał Winiarski
2025-10-21 22:41 ` [PATCH v2 12/26] drm/xe/pf: Remove GuC migration data save/restore from GT debugfs Michał Winiarski
2025-10-21 22:41 ` [PATCH v2 13/26] drm/xe/pf: Don't save GuC VF migration data on pause Michał Winiarski
2025-10-21 22:41 ` [PATCH v2 14/26] drm/xe/pf: Switch VF migration GuC save/restore to struct migration data Michał Winiarski
2025-10-21 22:41 ` [PATCH v2 15/26] drm/xe/pf: Handle GuC migration data as part of PF control Michał Winiarski
2025-10-23 20:39 ` Michal Wajdeczko
2025-10-28 13:04 ` Michał Winiarski
2025-10-21 22:41 ` [PATCH v2 16/26] drm/xe/pf: Add helpers for VF GGTT migration data handling Michał Winiarski
2025-10-23 21:50 ` Michal Wajdeczko
2025-10-28 17:03 ` Michał Winiarski
2025-10-28 3:22 ` Tian, Kevin
2025-10-28 7:38 ` Michal Wajdeczko
2025-10-21 22:41 ` [PATCH v2 17/26] drm/xe/pf: Handle GGTT migration data as part of PF control Michał Winiarski
2025-10-21 22:41 ` [PATCH v2 18/26] drm/xe/pf: Add helpers for VF MMIO migration data handling Michał Winiarski
2025-10-23 22:10 ` Michal Wajdeczko
2025-10-28 23:37 ` Michał Winiarski
2025-10-21 22:41 ` [PATCH v2 19/26] drm/xe/pf: Handle MMIO migration data as part of PF control Michał Winiarski
2025-10-21 22:41 ` [PATCH v2 20/26] drm/xe/pf: Add helper to retrieve VF's LMEM object Michał Winiarski
2025-10-23 20:25 ` Michal Wajdeczko
2025-10-28 23:40 ` Michał Winiarski
2025-10-21 22:41 ` [PATCH v2 21/26] drm/xe/migrate: Add function to copy of VRAM data in chunks Michał Winiarski
2025-10-23 19:29 ` Michal Wajdeczko
2025-10-30 6:07 ` Laguna, Lukasz
2025-10-21 22:41 ` [PATCH v2 22/26] drm/xe/pf: Handle VRAM migration data as part of PF control Michał Winiarski
2025-10-23 11:44 ` kernel test robot
2025-10-23 19:54 ` Michal Wajdeczko
2025-10-29 8:54 ` Michał Winiarski
2025-10-21 22:41 ` [PATCH v2 23/26] drm/xe/pf: Add wait helper for VF FLR Michał Winiarski
2025-10-21 22:41 ` [PATCH v2 24/26] drm/xe/pf: Enable SR-IOV VF migration for PTL and BMG Michał Winiarski
2025-10-23 20:15 ` Michal Wajdeczko
2025-10-21 22:41 ` [PATCH v2 25/26] drm/xe/pf: Export helpers for VFIO Michał Winiarski
2025-10-28 3:28 ` Tian, Kevin
2025-10-21 22:41 ` [PATCH v2 26/26] vfio/xe: Add vendor-specific vfio_pci driver for Intel graphics Michał Winiarski
2025-10-22 7:12 ` Christoph Hellwig
2025-10-22 8:52 ` Michał Winiarski
2025-10-22 8:54 ` Christoph Hellwig
2025-10-22 9:12 ` Michał Winiarski
[not found] ` <20251022113355.GC21554@ziepe.ca>
2025-10-22 13:27 ` Michał Winiarski
2025-10-27 7:24 ` Tian, Kevin
2025-10-29 20:46 ` Winiarski, Michal
2025-10-27 7:26 ` Tian, Kevin
2025-10-21 22:50 ` ✗ CI.checkpatch: warning for vfio/xe: Add driver variant for Xe VF migration (rev2) Patchwork
2025-10-21 22:52 ` ✓ CI.KUnit: success " Patchwork
2025-10-21 23:31 ` ✓ Xe.CI.BAT: " Patchwork
2025-10-22 2:54 ` ✗ Xe.CI.Full: failure " Patchwork
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=687d4d54-09d6-4e30-921b-66c0e9bd0d51@intel.com \
--to=michal.wajdeczko@intel.com \
--cc=airlied@gmail.com \
--cc=alex.williamson@redhat.com \
--cc=dri-devel@lists.freedesktop.org \
--cc=intel-xe@lists.freedesktop.org \
--cc=jani.nikula@linux.intel.com \
--cc=jgg@ziepe.ca \
--cc=joonas.lahtinen@linux.intel.com \
--cc=kevin.tian@intel.com \
--cc=kvm@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=lucas.demarchi@intel.com \
--cc=lukasz.laguna@intel.com \
--cc=matthew.brost@intel.com \
--cc=michal.winiarski@intel.com \
--cc=rodrigo.vivi@intel.com \
--cc=simona@ffwll.ch \
--cc=thomas.hellstrom@linux.intel.com \
--cc=tursulin@ursulin.net \
--cc=yishaih@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox