public inbox for intel-xe@lists.freedesktop.org
 help / color / mirror / Atom feed
From: Matthew Brost <matthew.brost@intel.com>
To: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
Cc: <intel-xe@lists.freedesktop.org>,
	<thomas.hellstrom@linux.intel.com>, <stuart.summers@intel.com>,
	<arvind.yadav@intel.com>, <tejas.upadhyay@intel.com>
Subject: Re: [RFC 10/15] drm/xe/svm: Handle svm ranges on access ctr trigger
Date: Thu, 2 Apr 2026 17:25:19 -0700	[thread overview]
Message-ID: <ac8I76G9/3qo9+wW@gsse-cloud1.jf.intel.com> (raw)
In-Reply-To: <20260318074456.2839499-11-himal.prasad.ghimiray@intel.com>

On Wed, Mar 18, 2026 at 01:14:51PM +0530, Himal Prasad Ghimiray wrote:
> Migrate ranges to local vram and setup pte for gpu access.
> 
> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
> ---
>  drivers/gpu/drm/xe/xe_access_counter.c | 30 ++++++-------
>  drivers/gpu/drm/xe/xe_svm.c            | 59 +++++++++++++++++++-------
>  drivers/gpu/drm/xe/xe_svm.h            |  4 ++
>  3 files changed, 63 insertions(+), 30 deletions(-)
> 
> diff --git a/drivers/gpu/drm/xe/xe_access_counter.c b/drivers/gpu/drm/xe/xe_access_counter.c
> index f93618faab02..ba15f21c6803 100644
> --- a/drivers/gpu/drm/xe/xe_access_counter.c
> +++ b/drivers/gpu/drm/xe/xe_access_counter.c
> @@ -13,6 +13,7 @@
>  #include "xe_device.h"
>  #include "xe_gt_printk.h"
>  #include "xe_hw_engine.h"
> +#include "xe_svm.h"
>  #include "xe_trace_bo.h"
>  #include "xe_usm_queue.h"
>  #include "xe_vm.h"
> @@ -42,20 +43,14 @@ static int xe_access_counter_sub_granularity_in_byte(int val)
>  	return xe_access_counter_granularity_in_byte(val) / 32;
>  }
>  
> -static struct xe_vma *xe_access_counter_get_vma(struct xe_vm *vm,
> -						struct xe_access_counter *ac)
> +static u64 xe_access_counter_get_va(struct xe_access_counter *ac)
>  {
> -	u64 page_va;
> -
> -	if (ac->consumer.granularity != XE_ACCESS_COUNTER_GRANULARITY_128K) {
> -		page_va = ac->consumer.va_range_base;
> -	} else {
> -		page_va = ac->consumer.va_range_base +
> -			  (ffs(ac->consumer.sub_granularity) - 1) *
> -			  xe_access_counter_sub_granularity_in_byte(ac->consumer.granularity);
> -	}
> +	if (ac->consumer.granularity != XE_ACCESS_COUNTER_GRANULARITY_128K)
> +		return ac->consumer.va_range_base;
>  
> -	return xe_vm_find_overlapping_vma(vm, page_va, SZ_4K);
> +	return ac->consumer.va_range_base +
> +	       (ffs(ac->consumer.sub_granularity) - 1) *
> +	       xe_access_counter_sub_granularity_in_byte(ac->consumer.granularity);
>  }
>  
>  static void xe_access_counter_print(struct xe_access_counter *ac)
> @@ -88,6 +83,7 @@ static int xe_access_counter_service(struct xe_access_counter *ac)
>  	struct dma_fence *fence;
>  	struct xe_vm *vm;
>  	struct xe_vma *vma;
> +	u64 page_va;
>  	int err = 0;
>  
>  	if (ac->consumer.counter_type > XE_ACCESS_COUNTER_TYPE_NOTIFY)
> @@ -104,7 +100,8 @@ static int xe_access_counter_service(struct xe_access_counter *ac)
>  		goto unlock_vm;
>  	}
>  	/* Lookup VMA */
> -	vma = xe_access_counter_get_vma(vm, ac);
> +	page_va  = xe_access_counter_get_va(ac);
> +	vma = xe_vm_find_vma_by_addr(vm, page_va);
>  	if (!vma) {
>  		err = -EINVAL;
>  		goto unlock_vm;
> @@ -112,9 +109,12 @@ static int xe_access_counter_service(struct xe_access_counter *ac)
>  
>  	trace_xe_vma_acc(vma, ac->consumer.counter_type);
>  
> -	/* TODO: Handle svm vma's  */
> -	if (xe_vma_has_no_bo(vma))
> +	if (xe_vma_has_no_bo(vma)) {
> +		if (xe_vma_is_cpu_addr_mirror(vma))
> +			err = xe_svm_range_setup(vm, vma, gt, page_va,
> +						 false, true);

Can we split out access-counter handling for SVM versus VMAs into
separate functions, similar to how page faults are handled?

For example:

211         if (xe_vma_is_cpu_addr_mirror(vma))
212                 err = xe_svm_handle_pagefault(vm, vma, gt,
213                                               pf->consumer.page_addr, atomic);
214         else
215                 err = xe_pagefault_handle_vma(gt, vma, atomic);


I think this would make the code slightly more maintainable and clearer.

Also, can we add a flags argument to xe_svm_range_setup instead of
passing two booleans?

Arvind just did something similar for vma_lock_and_validate [1], and I
think Xe should move in this direction—avoiding multiple boolean
arguments, as in Arvind’s changes. It much more clear at the caller what
via flags rather than bools.

Matt

[1] https://patchwork.freedesktop.org/patch/714449/?series=156651&rev=11

>  		goto unlock_vm;
> +	}
>  
>  	/* Lock VM and BOs dma-resv */
>  	xe_validation_ctx_init(&ctx, &vm->xe->val, &exec, (struct xe_val_flags) {});
> diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
> index a91c84487a67..fe0f86a2d0bf 100644
> --- a/drivers/gpu/drm/xe/xe_svm.c
> +++ b/drivers/gpu/drm/xe/xe_svm.c
> @@ -1186,9 +1186,9 @@ DECL_SVM_RANGE_US_STATS(get_pages, GET_PAGES)
>  DECL_SVM_RANGE_US_STATS(bind, BIND)
>  DECL_SVM_RANGE_US_STATS(fault, PAGEFAULT)
>  
> -static int __xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
> -				     struct xe_gt *gt, u64 fault_addr,
> -				     bool need_vram)
> +static int __xe_svm_range_setup(struct xe_vm *vm, struct xe_vma *vma,
> +				struct xe_gt *gt, u64 fault_addr,
> +				bool need_vram, bool acc_ctr_trigger)
>  {
>  	int devmem_possible = IS_DGFX(vm->xe) &&
>  		IS_ENABLED(CONFIG_DRM_XE_PAGEMAP);
> @@ -1196,7 +1196,7 @@ static int __xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
>  		.read_only = xe_vma_read_only(vma),
>  		.devmem_possible = devmem_possible,
>  		.check_pages_threshold = devmem_possible ? SZ_64K : 0,
> -		.devmem_only = need_vram && devmem_possible,
> +		.devmem_only = (need_vram || acc_ctr_trigger) && devmem_possible,
>  		.timeslice_ms = need_vram && devmem_possible ?
>  			vm->xe->atomic_svm_timeslice_ms : 0,
>  	};
> @@ -1213,7 +1213,8 @@ static int __xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
>  	lockdep_assert_held_write(&vm->lock);
>  	xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(vma));
>  
> -	xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_PAGEFAULT_COUNT, 1);
> +	if (!acc_ctr_trigger)
> +		xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_PAGEFAULT_COUNT, 1);
>  
>  retry:
>  	/* Always process UNMAPs first so view SVM ranges is current */
> @@ -1229,7 +1230,8 @@ static int __xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
>  	if (IS_ERR(range))
>  		return PTR_ERR(range);
>  
> -	xe_svm_range_fault_count_stats_incr(gt, range);
> +	if (!acc_ctr_trigger)
> +		xe_svm_range_fault_count_stats_incr(gt, range);
>  
>  	if (ctx.devmem_only && !range->base.pages.flags.migrate_devmem) {
>  		err = -EACCES;
> @@ -1244,6 +1246,10 @@ static int __xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
>  
>  	range_debug(range, "PAGE FAULT");
>  
> +	if (acc_ctr_trigger && !range->base.pages.flags.migrate_devmem) {
> +		goto out;
> +	}
> +
>  	if (--migrate_try_count >= 0 &&
>  	    xe_svm_range_needs_migrate_to_vram(range, vma, dpagemap)) {
>  		ktime_t migrate_start = xe_gt_stats_ktime_get();
> @@ -1307,6 +1313,7 @@ static int __xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
>  	}
>  
>  	xe_svm_range_get_pages_us_stats_incr(gt, range, get_pages_start);
> +
>  	range_debug(range, "PAGE FAULT - BIND");
>  
>  	bind_start = xe_gt_stats_ktime_get();
> @@ -1347,21 +1354,22 @@ static int __xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
>  }
>  
>  /**
> - * xe_svm_handle_pagefault() - SVM handle page fault
> + * xe_svm_range_setup - Setup range for GPU access
>   * @vm: The VM.
>   * @vma: The CPU address mirror VMA.
> - * @gt: The gt upon the fault occurred.
> - * @fault_addr: The GPU fault address.
> + * @gt: The gt for which binding.
> + * @addr: Addr for which need to bind svm range.
>   * @atomic: The fault atomic access bit.
> + * @acc_ctr_trigger: If true, always migrate to local device memory.
>   *
>   * Create GPU bindings for a SVM page fault. Optionally migrate to device
>   * memory.
>   *
>   * Return: 0 on success, negative error code on error.
>   */
> -int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
> -			    struct xe_gt *gt, u64 fault_addr,
> -			    bool atomic)
> +int xe_svm_range_setup(struct xe_vm *vm, struct xe_vma *vma,
> +		       struct xe_gt *gt, u64 addr,
> +		       bool atomic, bool acc_ctr_trigger)
>  {
>  	int need_vram, ret;
>  retry:
> @@ -1369,14 +1377,15 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
>  	if (need_vram < 0)
>  		return need_vram;
>  
> -	ret =  __xe_svm_handle_pagefault(vm, vma, gt, fault_addr,
> -					 need_vram ? true : false);
> +	ret =  __xe_svm_range_setup(vm, vma, gt, addr,
> +				    need_vram ? true : false,
> +				    acc_ctr_trigger);
>  	if (ret == -EAGAIN) {
>  		/*
>  		 * Retry once on -EAGAIN to re-lookup the VMA, as the original VMA
>  		 * may have been split by xe_svm_range_set_default_attr.
>  		 */
> -		vma = xe_vm_find_vma_by_addr(vm, fault_addr);
> +		vma = xe_vm_find_vma_by_addr(vm, addr);
>  		if (!vma)
>  			return -EINVAL;
>  
> @@ -1385,6 +1394,26 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
>  	return ret;
>  }
>  
> +/**
> + * xe_svm_handle_pagefault() - SVM handle page fault
> + * @vm: The VM.
> + * @vma: The CPU address mirror VMA.
> + * @gt: The gt upon the fault occurred.
> + * @fault_addr: The GPU fault address.
> + * @atomic: The fault atomic access bit.
> + *
> + * Create GPU bindings for a SVM page fault. Optionally migrate to device
> + * memory.
> + *
> + * Return: 0 on success, negative error code on error.
> + */
> +int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
> +			    struct xe_gt *gt, u64 fault_addr,
> +			    bool atomic)
> +{
> +	return xe_svm_range_setup(vm, vma, gt, fault_addr, atomic, false);
> +}
> +
>  /**
>   * xe_svm_has_mapping() - SVM has mappings
>   * @vm: The VM.
> diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
> index b7b8eeacf196..50861d93b12f 100644
> --- a/drivers/gpu/drm/xe/xe_svm.h
> +++ b/drivers/gpu/drm/xe/xe_svm.h
> @@ -85,6 +85,10 @@ void xe_svm_fini(struct xe_vm *vm);
>  
>  void xe_svm_close(struct xe_vm *vm);
>  
> +int xe_svm_range_setup(struct xe_vm *vm, struct xe_vma *vma,
> +		       struct xe_gt *gt, u64 addr,
> +		       bool atomic, bool acc_ctr_trigger);
> +
>  int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
>  			    struct xe_gt *gt, u64 fault_addr,
>  			    bool atomic);
> -- 
> 2.34.1
> 

  reply	other threads:[~2026-04-03  0:25 UTC|newest]

Thread overview: 37+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-03-18  7:44 [RFC 00/15] drm/xe: Access counter consumer layer Himal Prasad Ghimiray
2026-03-18  7:34 ` ✗ CI.checkpatch: warning for " Patchwork
2026-03-18  7:35 ` ✗ CI.KUnit: failure " Patchwork
2026-03-18  7:44 ` [RFC 01/15] drm/xe: Add xe_usm_queue generic USM circular buffer Himal Prasad Ghimiray
2026-04-01 21:28   ` Matthew Brost
2026-04-06  4:46     ` Ghimiray, Himal Prasad
2026-03-18  7:44 ` [RFC 02/15] drm/xe/pagefault: Use xe_usm_queue helpers Himal Prasad Ghimiray
2026-03-18  7:44 ` [RFC 03/15] drm/xe: Stub out new access_counter layer Himal Prasad Ghimiray
2026-04-02 21:46   ` Matthew Brost
2026-04-06  5:28     ` Ghimiray, Himal Prasad
2026-03-18  7:44 ` [RFC 04/15] drm/xe: Implement xe_access_counter_init Himal Prasad Ghimiray
2026-03-18  7:44 ` [RFC 05/15] drm/xe: Implement xe_access_counter_handler Himal Prasad Ghimiray
2026-04-03  2:06   ` Matthew Brost
2026-03-18  7:44 ` [RFC 06/15] drm/xe: Extract xe_vma_lock_and_validate helper Himal Prasad Ghimiray
2026-04-01 22:03   ` Matthew Brost
2026-03-18  7:44 ` [RFC 07/15] drm/xe: Move ASID to FAULT VM lookup to xe_device Himal Prasad Ghimiray
2026-04-02 21:50   ` Matthew Brost
2026-04-07  6:41     ` Ghimiray, Himal Prasad
2026-03-18  7:44 ` [RFC 08/15] drm/xe: Implement xe_access_counter_queue_work Himal Prasad Ghimiray
2026-04-01 21:10   ` Matthew Brost
2026-04-01 22:01     ` Matthew Brost
2026-04-01 22:11   ` Matthew Brost
2026-04-02 22:06   ` Matthew Brost
2026-03-18  7:44 ` [RFC 09/15] drm/xe/trace: Add xe_vma_acc trace event for access counter notifications Himal Prasad Ghimiray
2026-04-03  1:01   ` Matthew Brost
2026-03-18  7:44 ` [RFC 10/15] drm/xe/svm: Handle svm ranges on access ctr trigger Himal Prasad Ghimiray
2026-04-03  0:25   ` Matthew Brost [this message]
2026-03-18  7:44 ` [RFC 11/15] drm/xe: Add xe_guc_access_counter layer Himal Prasad Ghimiray
2026-04-02 21:27   ` Matthew Brost
2026-03-18  7:44 ` [RFC 12/15] drm/xe/uapi: Add access counter parameter extension for exec queue Himal Prasad Ghimiray
2026-03-24 14:25   ` Francois Dugast
2026-04-01 14:46     ` Matthew Brost
2026-04-01 16:36       ` Ghimiray, Himal Prasad
2026-03-18  7:44 ` [RFC 13/15] drm/xe/lrc: Pass exec_queue to xe_lrc_create for access counter params Himal Prasad Ghimiray
2026-03-18  7:44 ` [RFC 14/15] drm/xe/vm: Add xe_vma_supports_access_ctr() helper Himal Prasad Ghimiray
2026-03-18  7:44 ` [RFC 15/15] drm/xe/pt: Set NC PTE bit for VMAs ineligible for access counting Himal Prasad Ghimiray
2026-04-03  0:09   ` Matthew Brost

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=ac8I76G9/3qo9+wW@gsse-cloud1.jf.intel.com \
    --to=matthew.brost@intel.com \
    --cc=arvind.yadav@intel.com \
    --cc=himal.prasad.ghimiray@intel.com \
    --cc=intel-xe@lists.freedesktop.org \
    --cc=stuart.summers@intel.com \
    --cc=tejas.upadhyay@intel.com \
    --cc=thomas.hellstrom@linux.intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox