Intel-XE Archive on lore.kernel.org
 help / color / mirror / Atom feed
From: Matthew Brost <matthew.brost@intel.com>
To: <intel-xe@lists.freedesktop.org>
Cc: <francois.dugast@intel.com>, <daniele.ceraolospurio@intel.com>,
	<michal.wajdeczko@intel.com>
Subject: Re: [PATCH 1/2] drm/xe: Split H2G and G2H into separate buffer objects
Date: Fri, 13 Feb 2026 13:17:19 -0800	[thread overview]
Message-ID: <aY+U3x8a9wV+Ajw5@lstrano-desk.jf.intel.com> (raw)
In-Reply-To: <20260213205043.3111176-2-matthew.brost@intel.com>

On Fri, Feb 13, 2026 at 12:50:42PM -0800, Matthew Brost wrote:
> H2G and G2H buffers have different access patterns (H2G is CPU-write,
> GuC-read, while G2H is GPU-write, CPU-read). On dGPU, these patterns
> benefit from different memory placements: H2G in VRAM and G2H in system
> memory. Split the CT buffer into two separate buffers—one for H2G and
> one for G2H—and select the optimal placement for each.
> 
> This provides a significant performance improvement on the G2H read
> path, reducing a single read from ~20 µs to under 1 µs on BMG.
> 
> Fixes: dd08ebf6c352 ("drm/xe: Introduce a new DRM driver for Intel GPUs")
> Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> ---
>  drivers/gpu/drm/xe/xe_guc_ct.c       | 66 ++++++++++++++++++----------
>  drivers/gpu/drm/xe/xe_guc_ct_types.h |  6 ++-
>  2 files changed, 48 insertions(+), 24 deletions(-)
> 
> diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
> index 8a45573f8812..5d8d90a4f879 100644
> --- a/drivers/gpu/drm/xe/xe_guc_ct.c
> +++ b/drivers/gpu/drm/xe/xe_guc_ct.c
> @@ -255,6 +255,7 @@ static bool g2h_fence_needs_alloc(struct g2h_fence *g2h_fence)
>  
>  #define CTB_DESC_SIZE		ALIGN(sizeof(struct guc_ct_buffer_desc), SZ_2K)
>  #define CTB_H2G_BUFFER_OFFSET	(CTB_DESC_SIZE * 2)
> +#define CTB_G2H_BUFFER_OFFSET	(CTB_DESC_SIZE * 2)
>  #define CTB_H2G_BUFFER_SIZE	(SZ_4K)
>  #define CTB_H2G_BUFFER_DWORDS	(CTB_H2G_BUFFER_SIZE / sizeof(u32))
>  #define CTB_G2H_BUFFER_SIZE	(SZ_128K)
> @@ -279,10 +280,14 @@ long xe_guc_ct_queue_proc_time_jiffies(struct xe_guc_ct *ct)
>  	return (CTB_H2G_BUFFER_SIZE / SZ_4K) * HZ;
>  }
>  
> -static size_t guc_ct_size(void)
> +static size_t guc_h2g_size(void)
>  {
> -	return CTB_H2G_BUFFER_OFFSET + CTB_H2G_BUFFER_SIZE +
> -		CTB_G2H_BUFFER_SIZE;
> +	return CTB_H2G_BUFFER_OFFSET + CTB_H2G_BUFFER_SIZE;
> +}
> +
> +static size_t guc_g2h_size(void)
> +{
> +	return CTB_G2H_BUFFER_OFFSET + CTB_G2H_BUFFER_SIZE;
>  }
>  
>  static void guc_ct_fini(struct drm_device *drm, void *arg)
> @@ -311,7 +316,8 @@ int xe_guc_ct_init_noalloc(struct xe_guc_ct *ct)
>  	struct xe_gt *gt = ct_to_gt(ct);
>  	int err;
>  
> -	xe_gt_assert(gt, !(guc_ct_size() % PAGE_SIZE));
> +	xe_gt_assert(gt, !(guc_h2g_size() % PAGE_SIZE));
> +	xe_gt_assert(gt, !(guc_g2h_size() % PAGE_SIZE));
>  
>  	err = drmm_mutex_init(&xe->drm, &ct->lock);
>  	if (err)
> @@ -356,7 +362,17 @@ int xe_guc_ct_init(struct xe_guc_ct *ct)
>  	struct xe_tile *tile = gt_to_tile(gt);
>  	struct xe_bo *bo;
>  
> -	bo = xe_managed_bo_create_pin_map(xe, tile, guc_ct_size(),
> +	bo = xe_managed_bo_create_pin_map(xe, tile, guc_h2g_size(),
> +					  XE_BO_FLAG_SYSTEM |
> +					  XE_BO_FLAG_GGTT |
> +					  XE_BO_FLAG_GGTT_INVALIDATE |
> +					  XE_BO_FLAG_PINNED_NORESTORE);
> +	if (IS_ERR(bo))
> +		return PTR_ERR(bo);
> +
> +	ct->bo_h2g = bo;
> +
> +	bo = xe_managed_bo_create_pin_map(xe, tile, guc_g2h_size(),
>  					  XE_BO_FLAG_SYSTEM |
>  					  XE_BO_FLAG_GGTT |
>  					  XE_BO_FLAG_GGTT_INVALIDATE |
> @@ -364,7 +380,7 @@ int xe_guc_ct_init(struct xe_guc_ct *ct)
>  	if (IS_ERR(bo))
>  		return PTR_ERR(bo);
>  
> -	ct->bo = bo;
> +	ct->bo_g2h = bo;
>  
>  	return devm_add_action_or_reset(xe->drm.dev, guc_action_disable_ct, ct);
>  }
> @@ -389,7 +405,7 @@ int xe_guc_ct_init_post_hwconfig(struct xe_guc_ct *ct)
>  	xe_assert(xe, !xe_guc_ct_enabled(ct));
>  
>  	if (IS_DGFX(xe)) {
> -		ret = xe_managed_bo_reinit_in_vram(xe, tile, &ct->bo);
> +		ret = xe_managed_bo_reinit_in_vram(xe, tile, &ct->bo_h2g);
>  		if (ret)
>  			return ret;
>  	}
> @@ -439,8 +455,7 @@ static void guc_ct_ctb_g2h_init(struct xe_device *xe, struct guc_ctb *g2h,
>  	g2h->desc = IOSYS_MAP_INIT_OFFSET(map, CTB_DESC_SIZE);
>  	xe_map_memset(xe, &g2h->desc, 0, 0, sizeof(struct guc_ct_buffer_desc));
>  
> -	g2h->cmds = IOSYS_MAP_INIT_OFFSET(map, CTB_H2G_BUFFER_OFFSET +
> -					    CTB_H2G_BUFFER_SIZE);
> +	g2h->cmds = IOSYS_MAP_INIT_OFFSET(map, CTB_G2H_BUFFER_OFFSET);
>  }
>  
>  static int guc_ct_ctb_h2g_register(struct xe_guc_ct *ct)
> @@ -449,8 +464,8 @@ static int guc_ct_ctb_h2g_register(struct xe_guc_ct *ct)
>  	u32 desc_addr, ctb_addr, size;
>  	int err;
>  
> -	desc_addr = xe_bo_ggtt_addr(ct->bo);
> -	ctb_addr = xe_bo_ggtt_addr(ct->bo) + CTB_H2G_BUFFER_OFFSET;
> +	desc_addr = xe_bo_ggtt_addr(ct->bo_h2g);
> +	ctb_addr = xe_bo_ggtt_addr(ct->bo_h2g) + CTB_H2G_BUFFER_OFFSET;
>  	size = ct->ctbs.h2g.info.size * sizeof(u32);
>  
>  	err = xe_guc_self_cfg64(guc,
> @@ -476,9 +491,8 @@ static int guc_ct_ctb_g2h_register(struct xe_guc_ct *ct)
>  	u32 desc_addr, ctb_addr, size;
>  	int err;
>  
> -	desc_addr = xe_bo_ggtt_addr(ct->bo) + CTB_DESC_SIZE;
> -	ctb_addr = xe_bo_ggtt_addr(ct->bo) + CTB_H2G_BUFFER_OFFSET +
> -		CTB_H2G_BUFFER_SIZE;
> +	desc_addr = xe_bo_ggtt_addr(ct->bo_g2h) + CTB_DESC_SIZE;
> +	ctb_addr = xe_bo_ggtt_addr(ct->bo_g2h) + CTB_G2H_BUFFER_OFFSET;
>  	size = ct->ctbs.g2h.info.size * sizeof(u32);
>  
>  	err = xe_guc_self_cfg64(guc,
> @@ -605,9 +619,12 @@ static int __xe_guc_ct_start(struct xe_guc_ct *ct, bool needs_register)
>  	xe_gt_assert(gt, !xe_guc_ct_enabled(ct));
>  
>  	if (needs_register) {
> -		xe_map_memset(xe, &ct->bo->vmap, 0, 0, xe_bo_size(ct->bo));
> -		guc_ct_ctb_h2g_init(xe, &ct->ctbs.h2g, &ct->bo->vmap);
> -		guc_ct_ctb_g2h_init(xe, &ct->ctbs.g2h, &ct->bo->vmap);
> +		xe_map_memset(xe, &ct->bo_h2g->vmap, 0, 0,
> +			      xe_bo_size(ct->bo_h2g));
> +		xe_map_memset(xe, &ct->bo_g2h->vmap, 0, 0,
> +			      xe_bo_size(ct->bo_g2h));
> +		guc_ct_ctb_h2g_init(xe, &ct->ctbs.h2g, &ct->bo_h2g->vmap);
> +		guc_ct_ctb_g2h_init(xe, &ct->ctbs.g2h, &ct->bo_g2h->vmap);
>  
>  		err = guc_ct_ctb_h2g_register(ct);
>  		if (err)
> @@ -624,7 +641,7 @@ static int __xe_guc_ct_start(struct xe_guc_ct *ct, bool needs_register)
>  		ct->ctbs.h2g.info.broken = false;
>  		ct->ctbs.g2h.info.broken = false;
>  		/* Skip everything in H2G buffer */
> -		xe_map_memset(xe, &ct->bo->vmap, CTB_H2G_BUFFER_OFFSET, 0,
> +		xe_map_memset(xe, &ct->bo_h2g->vmap, CTB_H2G_BUFFER_OFFSET, 0,
>  			      CTB_H2G_BUFFER_SIZE);
>  	}
>  
> @@ -1963,8 +1980,9 @@ static struct xe_guc_ct_snapshot *guc_ct_snapshot_alloc(struct xe_guc_ct *ct, bo
>  	if (!snapshot)
>  		return NULL;
>  
> -	if (ct->bo && want_ctb) {
> -		snapshot->ctb_size = xe_bo_size(ct->bo);
> +	if (ct->bo_h2g && ct->bo_g2h && want_ctb) {
> +		snapshot->ctb_size = xe_bo_size(ct->bo_h2g) +
> +			xe_bo_size(ct->bo_g2h);
>  		snapshot->ctb = kmalloc(snapshot->ctb_size, atomic ? GFP_ATOMIC : GFP_KERNEL);
>  	}
>  
> @@ -2012,8 +2030,12 @@ static struct xe_guc_ct_snapshot *guc_ct_snapshot_capture(struct xe_guc_ct *ct,
>  		guc_ctb_snapshot_capture(xe, &ct->ctbs.g2h, &snapshot->g2h);
>  	}
>  
> -	if (ct->bo && snapshot->ctb)
> -		xe_map_memcpy_from(xe, snapshot->ctb, &ct->bo->vmap, 0, snapshot->ctb_size);
> +	if (ct->bo_h2g && ct->bo_g2h && snapshot->ctb) {
> +		xe_map_memcpy_from(xe, snapshot->ctb, &ct->bo_h2g->vmap, 0,
> +				   xe_bo_size(ct->bo_h2g));
> +		xe_map_memcpy_from(xe, snapshot->ctb, &ct->bo_g2h->vmap,
> +				   xe_bo_size(ct->bo_h2g), xe_bo_size(ct->bo_g2h));

Logic bug...

xe_map_memcpy_from(xe, snapshot->ctb + xe_bo_size(ct->bo_h2g), &ct->bo_g2h->vmap, 0, xe_bo_size(ct->bo_g2h));

Ignore this rev and skipped CI.

Matt

> +	}
>  
>  	return snapshot;
>  }
> diff --git a/drivers/gpu/drm/xe/xe_guc_ct_types.h b/drivers/gpu/drm/xe/xe_guc_ct_types.h
> index 09d7ff1ef42a..385a607e4777 100644
> --- a/drivers/gpu/drm/xe/xe_guc_ct_types.h
> +++ b/drivers/gpu/drm/xe/xe_guc_ct_types.h
> @@ -126,8 +126,10 @@ struct xe_fast_req_fence {
>   * for the H2G and G2H requests sent and received through the buffers.
>   */
>  struct xe_guc_ct {
> -	/** @bo: Xe BO for CT */
> -	struct xe_bo *bo;
> +	/** @bo_h2g: Xe BO for H2G */
> +	struct xe_bo *bo_h2g;
> +	/** @bo_g2h: Xe BO for G2H */
> +	struct xe_bo *bo_g2h;
>  	/** @lock: protects everything in CT layer */
>  	struct mutex lock;
>  	/** @fast_lock: protects G2H channel and credits */
> -- 
> 2.34.1
> 

  reply	other threads:[~2026-02-13 21:17 UTC|newest]

Thread overview: 7+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-02-13 20:50 [PATCH 0/2] GuC CT memory optimizations Matthew Brost
2026-02-13 20:50 ` [PATCH 1/2] drm/xe: Split H2G and G2H into separate buffer objects Matthew Brost
2026-02-13 21:17   ` Matthew Brost [this message]
2026-02-13 20:50 ` [PATCH 2/2] drm/xe: Remove H2G reads in CT send path in non-debug builds Matthew Brost
2026-02-13 20:57 ` ✓ CI.KUnit: success for GuC CT memory optimizations Patchwork
2026-02-13 21:53 ` ✓ Xe.CI.BAT: " Patchwork
2026-02-14 20:14 ` ✗ Xe.CI.FULL: failure " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=aY+U3x8a9wV+Ajw5@lstrano-desk.jf.intel.com \
    --to=matthew.brost@intel.com \
    --cc=daniele.ceraolospurio@intel.com \
    --cc=francois.dugast@intel.com \
    --cc=intel-xe@lists.freedesktop.org \
    --cc=michal.wajdeczko@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox