public inbox for intel-xe@lists.freedesktop.org
 help / color / mirror / Atom feed
From: Matthew Brost <matthew.brost@intel.com>
To: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
Cc: <intel-xe@lists.freedesktop.org>,
	<thomas.hellstrom@linux.intel.com>, <stuart.summers@intel.com>,
	<arvind.yadav@intel.com>, <tejas.upadhyay@intel.com>
Subject: Re: [RFC 08/15] drm/xe: Implement xe_access_counter_queue_work
Date: Wed, 1 Apr 2026 14:10:35 -0700	[thread overview]
Message-ID: <ac2Jy0G6+Q9bnMmo@gsse-cloud1.jf.intel.com> (raw)
In-Reply-To: <20260318074456.2839499-9-himal.prasad.ghimiray@intel.com>

On Wed, Mar 18, 2026 at 01:14:49PM +0530, Himal Prasad Ghimiray wrote:
> Implement worker function to dequeue and process access counter
> notifications and migrate bo based vma to vram and rebind it.
> 
> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>

Not a complete review - that will take a bit more time but one quick
comment below.

> ---
>  drivers/gpu/drm/xe/xe_access_counter.c | 139 ++++++++++++++++++++++++-
>  1 file changed, 138 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/gpu/drm/xe/xe_access_counter.c b/drivers/gpu/drm/xe/xe_access_counter.c
> index a2ce9dc45d05..9eb9917d8da7 100644
> --- a/drivers/gpu/drm/xe/xe_access_counter.c
> +++ b/drivers/gpu/drm/xe/xe_access_counter.c
> @@ -11,7 +11,10 @@
>  #include "xe_access_counter.h"
>  #include "xe_access_counter_types.h"
>  #include "xe_device.h"
> +#include "xe_gt_printk.h"
> +#include "xe_hw_engine.h"
>  #include "xe_usm_queue.h"
> +#include "xe_vm.h"
>  
>  /**
>   * DOC: Xe access counters
> @@ -33,9 +36,143 @@ static int xe_access_counter_entry_size(void)
>  	return roundup_pow_of_two(sizeof(struct xe_access_counter));
>  }
>  
> +static int xe_access_counter_sub_granularity_in_byte(int val)
> +{
> +	return xe_access_counter_granularity_in_byte(val) / 32;
> +}
> +
> +static struct xe_vma *xe_access_counter_get_vma(struct xe_vm *vm,
> +						struct xe_access_counter *ac)
> +{
> +	u64 page_va;
> +
> +	if (ac->consumer.granularity != XE_ACCESS_COUNTER_GRANULARITY_128K) {
> +		page_va = ac->consumer.va_range_base;
> +	} else {
> +		page_va = ac->consumer.va_range_base +
> +			  (ffs(ac->consumer.sub_granularity) - 1) *
> +			  xe_access_counter_sub_granularity_in_byte(ac->consumer.granularity);
> +	}
> +
> +	return xe_vm_find_overlapping_vma(vm, page_va, SZ_4K);
> +}
> +
> +static void xe_access_counter_print(struct xe_access_counter *ac)
> +{
> +	xe_gt_dbg(ac->gt, "\n\tASID: %d\n"
> +		  "\tVA Range Base: 0x%08x%08x\n"
> +		  "\tCounter Type: %d\n"
> +		  "\tGranularity: %d\n"
> +		  "\tSub-Granularity: 0x%08x\n"
> +		  "\tEngineClass: %d %s\n"
> +		  "\tEngineInstance: %d\n",
> +		  ac->consumer.xe3.asid,
> +		  upper_32_bits(ac->consumer.va_range_base),
> +		  lower_32_bits(ac->consumer.va_range_base),
> +		  ac->consumer.counter_type,
> +		  ac->consumer.granularity,
> +		  ac->consumer.sub_granularity,
> +		  ac->consumer.xe3.engine_class,
> +		  xe_hw_engine_class_to_str(ac->consumer.xe3.engine_class),
> +		  ac->consumer.xe3.engine_instance);
> +}
> +
> +static int xe_access_counter_service(struct xe_access_counter *ac)
> +{
> +	struct xe_gt *gt = ac->gt;
> +	struct xe_device *xe = gt_to_xe(gt);
> +	struct xe_tile *tile = gt_to_tile(gt);
> +	struct xe_validation_ctx ctx;
> +	struct drm_exec exec;
> +	struct dma_fence *fence;
> +	struct xe_vm *vm;
> +	struct xe_vma *vma;
> +	int err = 0;
> +
> +	if (ac->consumer.counter_type > XE_ACCESS_COUNTER_TYPE_NOTIFY)
> +		return -EINVAL;
> +
> +	vm = xe_device_asid_to_fault_vm(xe, ac->consumer.xe3.asid);
> +	if (IS_ERR(vm))
> +		return PTR_ERR(vm);
> +
> +	down_write(&vm->lock);
> +
> +	if (xe_vm_is_closed(vm)) {
> +		err = -ENOENT;
> +		goto unlock_vm;
> +	}
> +	/* Lookup VMA */
> +	vma = xe_access_counter_get_vma(vm, ac);
> +	if (!vma) {
> +		err = -EINVAL;
> +		goto unlock_vm;
> +	}
> +
> +	/* TODO: Handle svm vma's  */
> +	if (xe_vma_has_no_bo(vma))
> +		goto unlock_vm;
> +
> +	/* Lock VM and BOs dma-resv */
> +	xe_validation_ctx_init(&ctx, &vm->xe->val, &exec, (struct xe_val_flags) {});
> +	drm_exec_until_all_locked(&exec) {
> +		err = xe_vma_lock_and_validate(&exec, vma, tile->mem.vram, true);
> +		drm_exec_retry_on_contention(&exec);
> +		xe_validation_retry_on_oom(&ctx, &err);
> +		if (err)
> +			break;
> +
> +		xe_vm_set_validation_exec(vm, &exec);
> +		fence = xe_vma_rebind(vm, vma, BIT(tile->id));
> +		xe_vm_set_validation_exec(vm, NULL);
> +		if (IS_ERR(fence))
> +			err = PTR_ERR(fence);
> +	}
> +
> +	if (!err && !IS_ERR(fence)) {
> +		dma_fence_wait(fence, false);
> +		dma_fence_put(fence);
> +	}

You can move the dma_fence_wait()/put outside all of the locks taken here.
xe_pagefault_handle_vma() also unnecessarily waits under locks, but that
will be refactored a bit in [1]. Even there, I should probably move the
dma_fence_wait() completely outside vm->lock, which is held in read mode
after [1].

Best to get the semantics of this new layer right upfront.

Matt

[1] https://patchwork.freedesktop.org/patch/707294/?series=162167&rev=4

> +
> +	xe_validation_ctx_fini(&ctx);
> +
> +unlock_vm:
> +	up_write(&vm->lock);
> +	xe_vm_put(vm);
> +
> +	return err;
> +}
> +
>  static void xe_access_counter_queue_work_func(struct work_struct *w)
>  {
> -	/* TODO: Implement */
> +	struct xe_usm_queue *ac_queue =
> +		container_of(w, typeof(*ac_queue), worker);
> +	struct xe_access_counter ac = {};
> +	unsigned long threshold;
> +
> +#define USM_QUEUE_MAX_RUNTIME_MS	20
> +	threshold = jiffies + msecs_to_jiffies(USM_QUEUE_MAX_RUNTIME_MS);
> +
> +	while (xe_usm_queue_pop(ac_queue, &ac, xe_access_counter_entry_size())) {
> +		int err;
> +
> +		if (!ac.gt)	/* Access counter squashed during reset */
> +			continue;
> +
> +		err = xe_access_counter_service(&ac);
> +		if (err) {
> +			xe_access_counter_print(&ac);
> +			xe_gt_dbg(ac.gt, "Access counter handling: Unsuccessful %pe\n",
> +				  ERR_PTR(err));
> +		}
> +
> +		if (time_after(jiffies, threshold) &&
> +		    ac_queue->tail != ac_queue->head) {
> +			queue_work(gt_to_xe(ac.gt)->usm.pf_wq, w);
> +			break;
> +		}
> +	}
> +#undef USM_QUEUE_MAX_RUNTIME_MS
>  }
>  
>  static int xe_access_counter_queue_init(struct xe_device *xe,
> -- 
> 2.34.1
> 

  reply	other threads:[~2026-04-01 21:10 UTC|newest]

Thread overview: 37+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-03-18  7:44 [RFC 00/15] drm/xe: Access counter consumer layer Himal Prasad Ghimiray
2026-03-18  7:34 ` ✗ CI.checkpatch: warning for " Patchwork
2026-03-18  7:35 ` ✗ CI.KUnit: failure " Patchwork
2026-03-18  7:44 ` [RFC 01/15] drm/xe: Add xe_usm_queue generic USM circular buffer Himal Prasad Ghimiray
2026-04-01 21:28   ` Matthew Brost
2026-04-06  4:46     ` Ghimiray, Himal Prasad
2026-03-18  7:44 ` [RFC 02/15] drm/xe/pagefault: Use xe_usm_queue helpers Himal Prasad Ghimiray
2026-03-18  7:44 ` [RFC 03/15] drm/xe: Stub out new access_counter layer Himal Prasad Ghimiray
2026-04-02 21:46   ` Matthew Brost
2026-04-06  5:28     ` Ghimiray, Himal Prasad
2026-03-18  7:44 ` [RFC 04/15] drm/xe: Implement xe_access_counter_init Himal Prasad Ghimiray
2026-03-18  7:44 ` [RFC 05/15] drm/xe: Implement xe_access_counter_handler Himal Prasad Ghimiray
2026-04-03  2:06   ` Matthew Brost
2026-03-18  7:44 ` [RFC 06/15] drm/xe: Extract xe_vma_lock_and_validate helper Himal Prasad Ghimiray
2026-04-01 22:03   ` Matthew Brost
2026-03-18  7:44 ` [RFC 07/15] drm/xe: Move ASID to FAULT VM lookup to xe_device Himal Prasad Ghimiray
2026-04-02 21:50   ` Matthew Brost
2026-04-07  6:41     ` Ghimiray, Himal Prasad
2026-03-18  7:44 ` [RFC 08/15] drm/xe: Implement xe_access_counter_queue_work Himal Prasad Ghimiray
2026-04-01 21:10   ` Matthew Brost [this message]
2026-04-01 22:01     ` Matthew Brost
2026-04-01 22:11   ` Matthew Brost
2026-04-02 22:06   ` Matthew Brost
2026-03-18  7:44 ` [RFC 09/15] drm/xe/trace: Add xe_vma_acc trace event for access counter notifications Himal Prasad Ghimiray
2026-04-03  1:01   ` Matthew Brost
2026-03-18  7:44 ` [RFC 10/15] drm/xe/svm: Handle svm ranges on access ctr trigger Himal Prasad Ghimiray
2026-04-03  0:25   ` Matthew Brost
2026-03-18  7:44 ` [RFC 11/15] drm/xe: Add xe_guc_access_counter layer Himal Prasad Ghimiray
2026-04-02 21:27   ` Matthew Brost
2026-03-18  7:44 ` [RFC 12/15] drm/xe/uapi: Add access counter parameter extension for exec queue Himal Prasad Ghimiray
2026-03-24 14:25   ` Francois Dugast
2026-04-01 14:46     ` Matthew Brost
2026-04-01 16:36       ` Ghimiray, Himal Prasad
2026-03-18  7:44 ` [RFC 13/15] drm/xe/lrc: Pass exec_queue to xe_lrc_create for access counter params Himal Prasad Ghimiray
2026-03-18  7:44 ` [RFC 14/15] drm/xe/vm: Add xe_vma_supports_access_ctr() helper Himal Prasad Ghimiray
2026-03-18  7:44 ` [RFC 15/15] drm/xe/pt: Set NC PTE bit for VMAs ineligible for access counting Himal Prasad Ghimiray
2026-04-03  0:09   ` Matthew Brost

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=ac2Jy0G6+Q9bnMmo@gsse-cloud1.jf.intel.com \
    --to=matthew.brost@intel.com \
    --cc=arvind.yadav@intel.com \
    --cc=himal.prasad.ghimiray@intel.com \
    --cc=intel-xe@lists.freedesktop.org \
    --cc=stuart.summers@intel.com \
    --cc=tejas.upadhyay@intel.com \
    --cc=thomas.hellstrom@linux.intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox