From: "K V P, Satyanarayana" <satyanarayana.k.v.p@intel.com>
To: Michal Wajdeczko <michal.wajdeczko@intel.com>,
<intel-xe@lists.freedesktop.org>
Subject: Re: [PATCH v2 6/7] drm/xe/vf: Move VF CCS data to xe_device
Date: Tue, 9 Sep 2025 13:49:39 +0530 [thread overview]
Message-ID: <d1c96319-5672-4c39-afcc-2e64f7d5325c@intel.com> (raw)
In-Reply-To: <20250908123025.747-7-michal.wajdeczko@intel.com>
On 08-09-2025 18:00, Michal Wajdeczko wrote:
> We only need single set of VF CCS contexts, they are not per-tile
> as initial implementation might suggest. Move all VF CCS data from
> xe_tile.sriov.vf to xe_device.sriov.vf. Also rename some structs to
> align with the usage and fix their kernel-doc.
>
> Signed-off-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
> Cc: Satyanarayana K V P <satyanarayana.k.v.p@intel.com>
> ---
> drivers/gpu/drm/xe/xe_bb.c | 4 ++--
> drivers/gpu/drm/xe/xe_device_types.h | 3 ---
> drivers/gpu/drm/xe/xe_gt_debugfs.c | 4 ++--
> drivers/gpu/drm/xe/xe_sriov_vf_ccs.c | 21 +++++++++----------
> drivers/gpu/drm/xe/xe_sriov_vf_ccs_types.h | 24 +++++++++++++++++++---
> drivers/gpu/drm/xe/xe_sriov_vf_types.h | 7 +++----
> 6 files changed, 38 insertions(+), 25 deletions(-)
>
> diff --git a/drivers/gpu/drm/xe/xe_bb.c b/drivers/gpu/drm/xe/xe_bb.c
> index feb6e013dc38..6d20229c11de 100644
> --- a/drivers/gpu/drm/xe/xe_bb.c
> +++ b/drivers/gpu/drm/xe/xe_bb.c
> @@ -64,7 +64,7 @@ struct xe_bb *xe_bb_ccs_new(struct xe_gt *gt, u32 dwords,
> enum xe_sriov_vf_ccs_rw_ctxs ctx_id)
> {
> struct xe_bb *bb = kmalloc(sizeof(*bb), GFP_KERNEL);
> - struct xe_tile *tile = gt_to_tile(gt);
> + struct xe_device *xe = gt_to_xe(gt);
> struct xe_sa_manager *bb_pool;
> int err;
>
> @@ -78,7 +78,7 @@ struct xe_bb *xe_bb_ccs_new(struct xe_gt *gt, u32 dwords,
> * So, this extra DW acts as a guard here.
> */
>
> - bb_pool = tile->sriov.vf.ccs[ctx_id].mem.ccs_bb_pool;
> + bb_pool = xe->sriov.vf.ccs.contexts[ctx_id].mem.ccs_bb_pool;
> bb->bo = xe_sa_bo_new(bb_pool, 4 * (dwords + 1));
>
> if (IS_ERR(bb->bo)) {
> diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
> index 1e780f8a2a8c..c0a1f71f767d 100644
> --- a/drivers/gpu/drm/xe/xe_device_types.h
> +++ b/drivers/gpu/drm/xe/xe_device_types.h
> @@ -183,9 +183,6 @@ struct xe_tile {
> struct {
> /** @sriov.vf.ggtt_balloon: GGTT regions excluded from use. */
> struct xe_ggtt_node *ggtt_balloon[2];
> -
> - /** @sriov.vf.ccs: CCS read and write contexts for VF. */
> - struct xe_tile_vf_ccs ccs[XE_SRIOV_VF_CCS_CTX_COUNT];
> } vf;
> } sriov;
>
> diff --git a/drivers/gpu/drm/xe/xe_gt_debugfs.c b/drivers/gpu/drm/xe/xe_gt_debugfs.c
> index f6f2c14b642d..a9d960de0e5e 100644
> --- a/drivers/gpu/drm/xe/xe_gt_debugfs.c
> +++ b/drivers/gpu/drm/xe/xe_gt_debugfs.c
> @@ -126,7 +126,7 @@ static int powergate_info(struct xe_gt *gt, struct drm_printer *p)
>
> static int sa_info_vf_ccs(struct xe_gt *gt, struct drm_printer *p)
> {
> - struct xe_tile *tile = gt_to_tile(gt);
> + struct xe_device *xe = gt_to_xe(gt);
> struct xe_sa_manager *bb_pool;
> enum xe_sriov_vf_ccs_rw_ctxs ctx_id;
>
> @@ -136,7 +136,7 @@ static int sa_info_vf_ccs(struct xe_gt *gt, struct drm_printer *p)
> xe_pm_runtime_get(gt_to_xe(gt));
>
> for_each_ccs_rw_ctx(ctx_id) {
> - bb_pool = tile->sriov.vf.ccs[ctx_id].mem.ccs_bb_pool;
> + bb_pool = xe->sriov.vf.ccs.contexts[ctx_id].mem.ccs_bb_pool;
> if (!bb_pool)
> break;
>
> diff --git a/drivers/gpu/drm/xe/xe_sriov_vf_ccs.c b/drivers/gpu/drm/xe/xe_sriov_vf_ccs.c
> index eb8436e44ca4..c5c60f05073d 100644
> --- a/drivers/gpu/drm/xe/xe_sriov_vf_ccs.c
> +++ b/drivers/gpu/drm/xe/xe_sriov_vf_ccs.c
> @@ -136,7 +136,7 @@ static u64 get_ccs_bb_pool_size(struct xe_device *xe)
> return round_up(bb_pool_size * 2, SZ_1M);
> }
>
> -static int alloc_bb_pool(struct xe_tile *tile, struct xe_tile_vf_ccs *ctx)
> +static int alloc_bb_pool(struct xe_tile *tile, struct xe_sriov_vf_ccs_ctx *ctx)
> {
> struct xe_device *xe = tile_to_xe(tile);
> struct xe_sa_manager *sa_manager;
> @@ -168,7 +168,7 @@ static int alloc_bb_pool(struct xe_tile *tile, struct xe_tile_vf_ccs *ctx)
> return 0;
> }
>
> -static void ccs_rw_update_ring(struct xe_tile_vf_ccs *ctx)
> +static void ccs_rw_update_ring(struct xe_sriov_vf_ccs_ctx *ctx)
> {
> u64 addr = xe_sa_manager_gpu_addr(ctx->mem.ccs_bb_pool);
> struct xe_lrc *lrc = xe_exec_queue_lrc(ctx->mig_q);
> @@ -185,7 +185,7 @@ static void ccs_rw_update_ring(struct xe_tile_vf_ccs *ctx)
> xe_lrc_set_ring_tail(lrc, lrc->ring.tail);
> }
>
> -static int register_save_restore_context(struct xe_tile_vf_ccs *ctx)
> +static int register_save_restore_context(struct xe_sriov_vf_ccs_ctx *ctx)
> {
> int ctx_type;
>
> @@ -215,15 +215,14 @@ static int register_save_restore_context(struct xe_tile_vf_ccs *ctx)
> */
> int xe_sriov_vf_ccs_register_context(struct xe_device *xe)
> {
> - struct xe_tile *tile = xe_device_get_root_tile(xe);
> enum xe_sriov_vf_ccs_rw_ctxs ctx_id;
> - struct xe_tile_vf_ccs *ctx;
> + struct xe_sriov_vf_ccs_ctx *ctx;
> int err;
>
> xe_assert(xe, IS_VF_CCS_READY(xe));
>
> for_each_ccs_rw_ctx(ctx_id) {
> - ctx = &tile->sriov.vf.ccs[ctx_id];
> + ctx = &xe->sriov.vf.ccs.contexts[ctx_id];
> err = register_save_restore_context(ctx);
> if (err)
> return err;
> @@ -234,7 +233,7 @@ int xe_sriov_vf_ccs_register_context(struct xe_device *xe)
>
> static void xe_sriov_vf_ccs_fini(void *arg)
> {
> - struct xe_tile_vf_ccs *ctx = arg;
> + struct xe_sriov_vf_ccs_ctx *ctx = arg;
> struct xe_lrc *lrc = xe_exec_queue_lrc(ctx->mig_q);
>
> /*
> @@ -258,7 +257,7 @@ int xe_sriov_vf_ccs_init(struct xe_device *xe)
> {
> struct xe_tile *tile = xe_device_get_root_tile(xe);
> enum xe_sriov_vf_ccs_rw_ctxs ctx_id;
> - struct xe_tile_vf_ccs *ctx;
> + struct xe_sriov_vf_ccs_ctx *ctx;
> struct xe_exec_queue *q;
> u32 flags;
> int err;
> @@ -270,7 +269,7 @@ int xe_sriov_vf_ccs_init(struct xe_device *xe)
> return 0;
>
> for_each_ccs_rw_ctx(ctx_id) {
> - ctx = &tile->sriov.vf.ccs[ctx_id];
> + ctx = &xe->sriov.vf.ccs.contexts[ctx_id];
> ctx->ctx_id = ctx_id;
>
> flags = EXEC_QUEUE_FLAG_KERNEL |
> @@ -325,7 +324,7 @@ int xe_sriov_vf_ccs_attach_bo(struct xe_bo *bo)
> {
> struct xe_device *xe = xe_bo_device(bo);
> enum xe_sriov_vf_ccs_rw_ctxs ctx_id;
> - struct xe_tile_vf_ccs *ctx;
> + struct xe_sriov_vf_ccs_ctx *ctx;
> struct xe_tile *tile;
> struct xe_bb *bb;
> int err = 0;
> @@ -339,7 +338,7 @@ int xe_sriov_vf_ccs_attach_bo(struct xe_bo *bo)
> /* bb should be NULL here. Assert if not NULL */
> xe_assert(xe, !bb);
>
> - ctx = &tile->sriov.vf.ccs[ctx_id];
> + ctx = &xe->sriov.vf.ccs.contexts[ctx_id];
> err = xe_migrate_ccs_rw_copy(tile, ctx->mig_q, bo, ctx_id);
> }
> return err;
> diff --git a/drivers/gpu/drm/xe/xe_sriov_vf_ccs_types.h b/drivers/gpu/drm/xe/xe_sriov_vf_ccs_types.h
> index 4d3c10907135..22c499943d2a 100644
> --- a/drivers/gpu/drm/xe/xe_sriov_vf_ccs_types.h
> +++ b/drivers/gpu/drm/xe/xe_sriov_vf_ccs_types.h
> @@ -6,6 +6,8 @@
> #ifndef _XE_SRIOV_VF_CCS_TYPES_H_
> #define _XE_SRIOV_VF_CCS_TYPES_H_
>
> +#include <linux/types.h>
> +
> #define for_each_ccs_rw_ctx(id__) \
> for ((id__) = 0; (id__) < XE_SRIOV_VF_CCS_CTX_COUNT; (id__)++)
>
> @@ -18,16 +20,32 @@ enum xe_sriov_vf_ccs_rw_ctxs {
> struct xe_migrate;
> struct xe_sa_manager;
>
> -struct xe_tile_vf_ccs {
> - /** @id: Id to which context it belongs to */
> +/**
> + * struct xe_sriov_vf_ccs_ctx - VF CCS migration context data.
> + */
> +struct xe_sriov_vf_ccs_ctx {
> + /** @ctx_id: Id to which context it belongs to */
> enum xe_sriov_vf_ccs_rw_ctxs ctx_id;
> +
> /** @mig_q: exec queues used for migration */
> struct xe_exec_queue *mig_q;
>
> + /** @mem: memory data */
> struct {
> - /** @ccs_bb_pool: Pool from which batch buffers are allocated. */
> + /** @mem.ccs_bb_pool: Pool from which batch buffers are allocated. */
> struct xe_sa_manager *ccs_bb_pool;
> } mem;
> };
>
> +/**
> + * struct xe_sriov_vf_ccs - The VF CCS migration support data.
> + */
> +struct xe_sriov_vf_ccs {
> + /** @contexts: CCS read and write contexts for VF. */
> + struct xe_sriov_vf_ccs_ctx contexts[XE_SRIOV_VF_CCS_CTX_COUNT];
> +
> + /** @initialized: Initialization of VF CCS is completed or not. */
> + bool initialized;
> +};
> +
> #endif
> diff --git a/drivers/gpu/drm/xe/xe_sriov_vf_types.h b/drivers/gpu/drm/xe/xe_sriov_vf_types.h
> index 2c94d1f92187..426cc5841958 100644
> --- a/drivers/gpu/drm/xe/xe_sriov_vf_types.h
> +++ b/drivers/gpu/drm/xe/xe_sriov_vf_types.h
> @@ -9,6 +9,8 @@
> #include <linux/types.h>
> #include <linux/workqueue_types.h>
>
> +#include "xe_sriov_vf_ccs_types.h"
> +
> /**
> * struct xe_sriov_vf_relay_version - PF ABI version details.
> */
> @@ -43,10 +45,7 @@ struct xe_device_vf {
> } migration;
>
> /** @ccs: VF CCS state data */
> - struct {
> - /** @ccs.initialized: Initilalization of VF CCS is completed or not */
> - bool initialized;
> - } ccs;
> + struct xe_sriov_vf_ccs ccs;
> };
>
> #endif
LGTM.
Reviewed-by: Satyanarayana K V P <satyanarayana.k.v.p@intel.com>
next prev parent reply other threads:[~2025-09-09 8:19 UTC|newest]
Thread overview: 13+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-09-08 12:30 [PATCH v2 0/7] Small improvements around VF CCS Michal Wajdeczko
2025-09-08 12:30 ` [PATCH v2 1/7] drm/xe/guc: Rename xe_guc_register_exec_queue Michal Wajdeczko
2025-09-08 12:30 ` [PATCH v2 2/7] drm/xe/guc: Use proper flag definitions when registering context Michal Wajdeczko
2025-09-08 12:30 ` [PATCH v2 3/7] drm/xe/vf: Drop IS_VF_CCS_INIT_NEEDED macro Michal Wajdeczko
2025-09-08 12:30 ` [PATCH v2 4/7] drm/xe/vf: Use single check when calling VF CCS functions Michal Wajdeczko
2025-09-08 12:30 ` [PATCH v2 5/7] drm/xe/bo: Add xe_bo_has_valid_ccs_bb helper Michal Wajdeczko
2025-09-08 12:30 ` [PATCH v2 6/7] drm/xe/vf: Move VF CCS data to xe_device Michal Wajdeczko
2025-09-09 8:19 ` K V P, Satyanarayana [this message]
2025-09-08 12:30 ` [PATCH v2 7/7] drm/xe/vf: Move VF CCS debugfs attribute Michal Wajdeczko
2025-09-09 8:23 ` K V P, Satyanarayana
2025-09-08 22:53 ` ✓ CI.KUnit: success for Small improvements around VF CCS (rev2) Patchwork
2025-09-09 4:21 ` ✗ Xe.CI.Full: failure " Patchwork
2025-09-09 9:21 ` K V P, Satyanarayana
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=d1c96319-5672-4c39-afcc-2e64f7d5325c@intel.com \
--to=satyanarayana.k.v.p@intel.com \
--cc=intel-xe@lists.freedesktop.org \
--cc=michal.wajdeczko@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox