Intel-XE Archive on lore.kernel.org
 help / color / mirror / Atom feed
From: Matthew Brost <matthew.brost@intel.com>
To: Stuart Summers <stuart.summers@intel.com>
Cc: <John.C.Harrison@intel.com>, <brian.welty@intel.com>,
	<rodrigo.vivi@intel.com>, <intel-xe@lists.freedesktop.org>
Subject: Re: [PATCH 2/3] drm/xe: Use topology to determine page fault queue size
Date: Fri, 19 Jul 2024 18:12:30 +0000	[thread overview]
Message-ID: <ZpqsjqILy5RCL0oQ@DUT025-TGLU.fm.intel.com> (raw)
In-Reply-To: <3c2eef90948cbecb8beae29b21663f3b3cca81fa.1721411802.git.stuart.summers@intel.com>

On Fri, Jul 19, 2024 at 05:58:27PM +0000, Stuart Summers wrote:
> Currently the page fault queue size is hard coded. However
> the hardware supports faulting for each EU and each CS.
> For some applications running on hardware with a large
> number of EUs and CSs, this can result in an overflow of
> the page fault queue.
> 
> Add a small calculation to determine the page fault queue
> size based on the number of EUs and CSs in the platform as
> detmined by fuses.
> 
> Signed-off-by: Stuart Summers <stuart.summers@intel.com>

Reviewed-by: Matthew Brost <matthew.brost@intel.com>

> ---
>  drivers/gpu/drm/xe/xe_gt_pagefault.c | 54 +++++++++++++++++++++-------
>  drivers/gpu/drm/xe/xe_gt_types.h     |  9 +++--
>  2 files changed, 49 insertions(+), 14 deletions(-)
> 
> diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c
> index b2a7fa55bd18..6bfc60c0274a 100644
> --- a/drivers/gpu/drm/xe/xe_gt_pagefault.c
> +++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c
> @@ -287,7 +287,7 @@ static bool get_pagefault(struct pf_queue *pf_queue, struct pagefault *pf)
>  			PFD_VIRTUAL_ADDR_LO_SHIFT;
>  
>  		pf_queue->tail = (pf_queue->tail + PF_MSG_LEN_DW) %
> -			PF_QUEUE_NUM_DW;
> +			pf_queue->pf_queue_num_dw;
>  		ret = true;
>  	}
>  	spin_unlock_irq(&pf_queue->lock);
> @@ -299,7 +299,8 @@ static bool pf_queue_full(struct pf_queue *pf_queue)
>  {
>  	lockdep_assert_held(&pf_queue->lock);
>  
> -	return CIRC_SPACE(pf_queue->head, pf_queue->tail, PF_QUEUE_NUM_DW) <=
> +	return CIRC_SPACE(pf_queue->head, pf_queue->tail,
> +			  pf_queue->pf_queue_num_dw) <=
>  		PF_MSG_LEN_DW;
>  }
>  
> @@ -312,22 +313,23 @@ int xe_guc_pagefault_handler(struct xe_guc *guc, u32 *msg, u32 len)
>  	u32 asid;
>  	bool full;
>  
> -	/*
> -	 * The below logic doesn't work unless PF_QUEUE_NUM_DW % PF_MSG_LEN_DW == 0
> -	 */
> -	BUILD_BUG_ON(PF_QUEUE_NUM_DW % PF_MSG_LEN_DW);
> -
>  	if (unlikely(len != PF_MSG_LEN_DW))
>  		return -EPROTO;
>  
>  	asid = FIELD_GET(PFD_ASID, msg[1]);
>  	pf_queue = gt->usm.pf_queue + (asid % NUM_PF_QUEUE);
>  
> +	/*
> +	 * The below logic doesn't work unless PF_QUEUE_NUM_DW % PF_MSG_LEN_DW == 0
> +	 */
> +	xe_gt_assert(gt, !(pf_queue->pf_queue_num_dw % PF_MSG_LEN_DW));
> +
>  	spin_lock_irqsave(&pf_queue->lock, flags);
>  	full = pf_queue_full(pf_queue);
>  	if (!full) {
>  		memcpy(pf_queue->data + pf_queue->head, msg, len * sizeof(u32));
> -		pf_queue->head = (pf_queue->head + len) % PF_QUEUE_NUM_DW;
> +		pf_queue->head = (pf_queue->head + len) %
> +			pf_queue->pf_queue_num_dw;
>  		queue_work(gt->usm.pf_wq, &pf_queue->worker);
>  	} else {
>  		drm_warn(&xe->drm, "PF Queue full, shouldn't be possible");
> @@ -386,26 +388,54 @@ static void pagefault_fini(void *arg)
>  {
>  	struct xe_gt *gt = arg;
>  	struct xe_device *xe = gt_to_xe(gt);
> +	int i;
>  
>  	if (!xe->info.has_usm)
>  		return;
>  
>  	destroy_workqueue(gt->usm.acc_wq);
>  	destroy_workqueue(gt->usm.pf_wq);
> +
> +	for (i = 0; i < NUM_PF_QUEUE; ++i)
> +		kfree(gt->usm.pf_queue[i].data);
> +}
> +
> +static int xe_alloc_pf_queue(struct xe_gt *gt, struct pf_queue *pf_queue)
> +{
> +	xe_dss_mask_t all_dss;
> +	int num_dss, num_eus;
> +
> +	bitmap_or(all_dss, gt->fuse_topo.g_dss_mask, gt->fuse_topo.c_dss_mask,
> +		  XE_MAX_DSS_FUSE_BITS);
> +
> +	num_dss = bitmap_weight(all_dss, XE_MAX_DSS_FUSE_BITS);
> +	num_eus = bitmap_weight(gt->fuse_topo.eu_mask_per_dss,
> +				XE_MAX_EU_FUSE_BITS) * num_dss;
> +
> +	/* user can issue separate page faults per EU and per CS */
> +	pf_queue->pf_queue_num_dw =
> +		(num_eus + XE_NUM_HW_ENGINES) * PF_MSG_LEN_DW;
> +
> +	pf_queue->gt = gt;
> +	pf_queue->data = kzalloc(pf_queue->pf_queue_num_dw, GFP_KERNEL);
> +	spin_lock_init(&pf_queue->lock);
> +	INIT_WORK(&pf_queue->worker, pf_queue_work_func);
> +
> +	return 0;
>  }
>  
>  int xe_gt_pagefault_init(struct xe_gt *gt)
>  {
>  	struct xe_device *xe = gt_to_xe(gt);
> -	int i;
> +	int i, ret = 0;
>  
>  	if (!xe->info.has_usm)
>  		return 0;
>  
>  	for (i = 0; i < NUM_PF_QUEUE; ++i) {
> -		gt->usm.pf_queue[i].gt = gt;
> -		spin_lock_init(&gt->usm.pf_queue[i].lock);
> -		INIT_WORK(&gt->usm.pf_queue[i].worker, pf_queue_work_func);
> +		ret = xe_alloc_pf_queue(gt, &gt->usm.pf_queue[i]);
> +		if (ret)
> +			return ret;
>  	}
>  	for (i = 0; i < NUM_ACC_QUEUE; ++i) {
>  		gt->usm.acc_queue[i].gt = gt;
> diff --git a/drivers/gpu/drm/xe/xe_gt_types.h b/drivers/gpu/drm/xe/xe_gt_types.h
> index ef68c4a92972..f2a0bd19260b 100644
> --- a/drivers/gpu/drm/xe/xe_gt_types.h
> +++ b/drivers/gpu/drm/xe/xe_gt_types.h
> @@ -238,9 +238,14 @@ struct xe_gt {
>  		struct pf_queue {
>  			/** @usm.pf_queue.gt: back pointer to GT */
>  			struct xe_gt *gt;
> -#define PF_QUEUE_NUM_DW	128
>  			/** @usm.pf_queue.data: data in the page fault queue */
> -			u32 data[PF_QUEUE_NUM_DW];
> +			u32 *data;
> +			/**
> +			 * @usm.pf_queue_num_dw: number of DWORDS in the page
> +			 * fault queue. Dynamically calculated based on the number
> +			 * of compute resources available.
> +			 */
> +			u32 pf_queue_num_dw;
>  			/**
>  			 * @usm.pf_queue.tail: tail pointer in DWs for page fault queue,
>  			 * moved by worker which processes faults (consumer).
> -- 
> 2.34.1
> 

  reply	other threads:[~2024-07-19 18:13 UTC|newest]

Thread overview: 11+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-07-19 17:58 [PATCH 0/3] Update page fault queue size calculation Stuart Summers
2024-07-19 17:58 ` [PATCH 1/3] drm/xe: Fix missing workqueue destroy in xe_gt_pagefault Stuart Summers
2024-07-19 17:58 ` [PATCH 2/3] drm/xe: Use topology to determine page fault queue size Stuart Summers
2024-07-19 18:12   ` Matthew Brost [this message]
2024-07-19 17:58 ` [PATCH 3/3] drm/xe/guc: Bump the G2H queue size to account for page faults Stuart Summers
2024-07-19 18:10   ` Matthew Brost
2024-07-19 19:00     ` Summers, Stuart
2024-07-19 18:03 ` ✓ CI.Patch_applied: success for Update page fault queue size calculation (rev3) Patchwork
2024-07-19 18:03 ` ✓ CI.checkpatch: " Patchwork
2024-07-19 18:04 ` ✗ CI.KUnit: failure " Patchwork
  -- strict thread matches above, loose matches on Subject: below --
2024-07-19 19:06 [PATCH 0/3] Update page fault queue size calculation Stuart Summers
2024-07-19 19:06 ` [PATCH 2/3] drm/xe: Use topology to determine page fault queue size Stuart Summers

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=ZpqsjqILy5RCL0oQ@DUT025-TGLU.fm.intel.com \
    --to=matthew.brost@intel.com \
    --cc=John.C.Harrison@intel.com \
    --cc=brian.welty@intel.com \
    --cc=intel-xe@lists.freedesktop.org \
    --cc=rodrigo.vivi@intel.com \
    --cc=stuart.summers@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox