Intel-XE Archive on lore.kernel.org
 help / color / mirror / Atom feed
From: Matthew Brost <matthew.brost@intel.com>
To: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
Cc: <intel-xe@lists.freedesktop.org>
Subject: Re: [PATCH v3 16/19] drm/xe/svm: Consult madvise preferred location in prefetch
Date: Thu, 29 May 2025 21:24:53 -0700	[thread overview]
Message-ID: <aDkzFXMYot1XDEGc@lstrano-desk.jf.intel.com> (raw)
In-Reply-To: <20250527164003.1068118-17-himal.prasad.ghimiray@intel.com>

On Tue, May 27, 2025 at 10:10:00PM +0530, Himal Prasad Ghimiray wrote:
> When prefetch region is DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC, prefetch svm
> ranges to preferred location provided by madvise.
> 
> v2 (Matthew Brost)
> - Fix region, devmem_fd usages
> - consult madvise is applicable for other vma's too.
> 
> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
> ---
>  drivers/gpu/drm/xe/xe_svm.c | 11 +++++++++++
>  drivers/gpu/drm/xe/xe_svm.h |  7 +++++++
>  drivers/gpu/drm/xe/xe_vm.c  | 30 ++++++++++++++++++++----------
>  3 files changed, 38 insertions(+), 10 deletions(-)
> 
> diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
> index 8b6546ebac72..0c929eb192e7 100644
> --- a/drivers/gpu/drm/xe/xe_svm.c
> +++ b/drivers/gpu/drm/xe/xe_svm.c
> @@ -822,6 +822,17 @@ struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *t
>  	return NULL;
>  }
>  
> +/**
> + * xe_tile_from_dpagemap - Find xe_tile from drm_pagemap
> + * @dpagemap: pointer to struct drm_pagemap
> + *
> + * Return: Pointer to xe_tile
> + */
> +struct xe_tile *xe_tile_from_dpagemap(struct drm_pagemap *dpagemap)
> +{
> +	return container_of(dpagemap, struct xe_tile, mem.vram.dpagemap);
> +}
> +

This looks to be in the wrong file. xe_tile.h would be my choice as
static inline.

>  /**
>   * xe_svm_handle_pagefault() - SVM handle page fault
>   * @vm: The VM.
> diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
> index 344349313001..a8b5bebf7a54 100644
> --- a/drivers/gpu/drm/xe/xe_svm.h
> +++ b/drivers/gpu/drm/xe/xe_svm.h
> @@ -97,6 +97,8 @@ void xe_svm_range_clean_if_addr_within(struct xe_vm *vm, u64 start, u64 end);
>  
>  struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *tile);
>  
> +struct xe_tile *xe_tile_from_dpagemap(struct drm_pagemap *dpagemap);
> +
>  /**
>   * xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
>   * @range: SVM range
> @@ -328,6 +330,11 @@ struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *t
>  	return NULL;
>  }
>  
> +static inline
> +struct xe_tile *xe_tile_from_dpagemap(struct drm_pagemap *dpagemap)
> +{
> +	return NULL;
> +}
>  #define xe_svm_assert_in_notifier(...) do {} while (0)
>  #define xe_svm_range_has_dma_mapping(...) false
>  
> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> index e5fc2c2be8b2..4520e475399e 100644
> --- a/drivers/gpu/drm/xe/xe_vm.c
> +++ b/drivers/gpu/drm/xe/xe_vm.c
> @@ -2917,15 +2917,24 @@ static int prefetch_ranges(struct xe_vm *vm, struct xe_vma_op *op)
>  	int err = 0;
>  
>  	struct xe_svm_range *svm_range;
> +	struct drm_pagemap *dpagemap;
>  	struct drm_gpusvm_ctx ctx = {};
> -	struct xe_tile *tile;
> +	struct xe_tile *tile = NULL;
>  	unsigned long i;
>  	u32 region;
>  
>  	if (!xe_vma_is_cpu_addr_mirror(vma))
>  		return 0;
>  
> -	region = op->prefetch_range.region;
> +	if (op->prefetch_range.region == DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC) {
> +		dpagemap = xe_vma_resolve_pagemap(vma, tile);
> +		if (dpagemap)
> +			tile = xe_tile_from_dpagemap(dpagemap);
> +	} else {
> +		region = op->prefetch_range.region;
> +		if (region)
> +			tile = &vm->xe->tiles[region_to_mem_type[region] - XE_PL_VRAM0];
> +	}
>  
>  	ctx.read_only = xe_vma_read_only(vma);
>  	ctx.devmem_possible = devmem_possible;
> @@ -2935,18 +2944,18 @@ static int prefetch_ranges(struct xe_vm *vm, struct xe_vma_op *op)
>  
>  	/* TODO: Threading the migration */
>  	xa_for_each(&op->prefetch_range.range, i, svm_range) {
> -		bool needs_vram = xe_svm_range_needs_migrate_to_vram(svm_range, vma, region);
> +		bool needs_vram = xe_svm_range_needs_migrate_to_vram(svm_range, vma, !!tile);
>  
>  		if (!needs_vram) {
>  			xe_svm_range_migrate_to_smem(vm, svm_range);
>  		} else if (needs_vram) {

else {

Matt

> -			/* If  migration is mandated by atomic attributes
> -			 * in vma and prefetch region is smem force prefetch
> +			/* If migration is mandated by atomic attributes
> +			 * in vma, and prefetch region is smem, force prefetch
>  			 * in vram of root tile.
>  			 */
> -			region = region ? region : 1;
> +			if (!tile)
> +				tile = xe_device_get_root_tile(vm->xe);
>  
> -			tile = &vm->xe->tiles[region_to_mem_type[region] - XE_PL_VRAM0];
>  			err = xe_svm_alloc_vram(vm, tile, svm_range, &ctx);
>  			if (err) {
>  				drm_dbg(&vm->xe->drm, "VRAM allocation failed, retry from userspace, asid=%u, gpusvm=%p, errno=%pe\n",
> @@ -3014,7 +3023,8 @@ static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
>  		else
>  			region = op->prefetch.region;
>  
> -		xe_assert(vm->xe, region <= ARRAY_SIZE(region_to_mem_type));
> +		xe_assert(vm->xe, region == DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC ||
> +			  region <= ARRAY_SIZE(region_to_mem_type));
>  
>  		err = vma_lock_and_validate(exec,
>  					    gpuva_to_vma(op->base.prefetch.va),
> @@ -3432,8 +3442,8 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe, struct xe_vm *vm,
>  				 op == DRM_XE_VM_BIND_OP_PREFETCH) ||
>  		    XE_IOCTL_DBG(xe, prefetch_region &&
>  				 op != DRM_XE_VM_BIND_OP_PREFETCH) ||
> -		    XE_IOCTL_DBG(xe, !(BIT(prefetch_region) &
> -				       xe->info.mem_region_mask)) ||
> +		    XE_IOCTL_DBG(xe,  (prefetch_region != DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC &&
> +				       !(BIT(prefetch_region) & xe->info.mem_region_mask))) ||
>  		    XE_IOCTL_DBG(xe, obj &&
>  				 op == DRM_XE_VM_BIND_OP_UNMAP)) {
>  			err = -EINVAL;
> -- 
> 2.34.1
> 

  reply	other threads:[~2025-05-30  4:23 UTC|newest]

Thread overview: 72+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-05-27 16:39 [PATCH v3 00/19] MADVISE FOR XE Himal Prasad Ghimiray
2025-05-27 16:39 ` [PATCH v3 01/19] Introduce drm_gpuvm_sm_map_ops_flags enums for sm_map_ops Himal Prasad Ghimiray
2025-05-27 16:39 ` [PATCH v3 02/19] drm/xe/uapi: Add madvise interface Himal Prasad Ghimiray
2025-05-28 16:27   ` Matthew Brost
2025-05-28 17:03   ` Souza, Jose
2025-05-29 18:03     ` Matthew Brost
2025-05-29 18:00   ` Matthew Brost
2025-06-10  4:32     ` Ghimiray, Himal Prasad
2025-05-27 16:39 ` [PATCH v3 03/19] drm/xe/vm: Add attributes struct as member of vma Himal Prasad Ghimiray
2025-05-28 16:46   ` Matthew Brost
2025-05-27 16:39 ` [PATCH v3 04/19] drm/xe/vma: Move pat_index to vma attributes Himal Prasad Ghimiray
2025-05-28 22:51   ` Matthew Brost
2025-05-27 16:39 ` [PATCH v3 05/19] drm/xe/vma: Modify new_vma to accept struct xe_vma_mem_attr as parameter Himal Prasad Ghimiray
2025-05-28 22:58   ` Matthew Brost
2025-06-02  6:19   ` Dan Carpenter
2025-05-27 16:39 ` [PATCH v3 06/19] drm/gpusvm: Make drm_gpusvm_for_each_* macros public Himal Prasad Ghimiray
2025-05-28 23:01   ` Matthew Brost
2025-05-27 16:39 ` [PATCH v3 07/19] drm/xe/vm: Add a helper xe_vm_range_tilemask_tlb_invalidation() Himal Prasad Ghimiray
2025-05-28 23:12   ` Matthew Brost
2025-05-29  3:21     ` Ghimiray, Himal Prasad
2025-05-27 16:39 ` [PATCH v3 08/19] drm/xe/svm: Add xe_svm_ranges_zap_ptes_in_range() for PTE zapping Himal Prasad Ghimiray
2025-05-28 23:15   ` Matthew Brost
2025-05-29  3:06     ` Ghimiray, Himal Prasad
2025-05-29  4:00       ` Matthew Brost
2025-05-30  6:29         ` Matthew Brost
2025-06-10  4:31           ` Ghimiray, Himal Prasad
2025-05-27 16:39 ` [PATCH v3 09/19] drm/xe/svm: Split system allocator vma incase of madvise call Himal Prasad Ghimiray
2025-05-29  2:49   ` Matthew Brost
2025-05-29  3:14     ` Ghimiray, Himal Prasad
2025-06-02  6:31   ` Dan Carpenter
2025-05-27 16:39 ` [PATCH v3 10/19] drm/xe: Implement madvise ioctl for xe Himal Prasad Ghimiray
2025-05-29 22:43   ` Matthew Brost
2025-05-30  6:36     ` Matthew Brost
2025-05-30 21:34   ` Matthew Brost
2025-06-10  4:52     ` Ghimiray, Himal Prasad
2025-06-10  5:13       ` Matthew Brost
2025-05-27 16:39 ` [PATCH v3 11/19] drm/xe: Allow CPU address mirror VMA unbind with gpu bindings for madvise Himal Prasad Ghimiray
2025-05-29 22:54   ` Matthew Brost
2025-06-12  9:02     ` Ghimiray, Himal Prasad
2025-05-27 16:39 ` [PATCH v3 12/19] drm/xe/svm : Add svm ranges migration policy on atomic access Himal Prasad Ghimiray
2025-05-29 23:27   ` Matthew Brost
2025-05-29 23:38     ` Matthew Brost
2025-05-30  4:40     ` Matthew Brost
2025-05-27 16:39 ` [PATCH v3 13/19] drm/xe/madvise: Update migration policy based on preferred location Himal Prasad Ghimiray
2025-05-29 23:42   ` Matthew Brost
2025-05-27 16:39 ` [PATCH v3 14/19] drm/xe/svm: Support DRM_XE_SVM_ATTR_PAT memory attribute Himal Prasad Ghimiray
2025-05-30  0:24   ` Matthew Brost
2025-05-27 16:39 ` [PATCH v3 15/19] drm/xe/uapi: Add flag for consulting madvise hints on svm prefetch Himal Prasad Ghimiray
2025-05-28 16:29   ` Matthew Brost
2025-05-27 16:40 ` [PATCH v3 16/19] drm/xe/svm: Consult madvise preferred location in prefetch Himal Prasad Ghimiray
2025-05-30  4:24   ` Matthew Brost [this message]
2025-06-24 18:56   ` Matthew Brost
2025-05-27 16:40 ` [PATCH v3 17/19] drm/xe/uapi: Add UAPI for querying VMA count and memory attributes Himal Prasad Ghimiray
2025-05-28 17:02   ` Souza, Jose
2025-05-30  1:11   ` kernel test robot
2025-05-30  4:29   ` Matthew Brost
2025-05-27 16:40 ` [PATCH v3 18/19] drm/xe/bo: Add attributes field to xe_bo Himal Prasad Ghimiray
2025-05-28 23:47   ` Matthew Brost
2025-05-29  2:29     ` Ghimiray, Himal Prasad
2025-05-27 16:40 ` [PATCH v3 19/19] drm/xe/bo: Update atomic_access attribute on madvise Himal Prasad Ghimiray
2025-05-28 23:46   ` Matthew Brost
2025-05-29  3:03     ` Ghimiray, Himal Prasad
2025-05-29 18:24       ` Matthew Brost
2025-05-29 18:30         ` Matthew Brost
2025-05-27 21:35 ` ✓ CI.Patch_applied: success for MADVISE FOR XE Patchwork
2025-05-27 21:35 ` ✗ CI.checkpatch: warning " Patchwork
2025-05-27 21:37 ` ✓ CI.KUnit: success " Patchwork
2025-05-27 21:40 ` ✗ CI.Build: failure " Patchwork
2025-05-28  7:45 ` ✓ CI.Patch_applied: success " Patchwork
2025-05-28  7:45 ` ✗ CI.checkpatch: warning " Patchwork
2025-05-28  7:46 ` ✓ CI.KUnit: success " Patchwork
2025-05-28  7:50 ` ✗ CI.Build: failure " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=aDkzFXMYot1XDEGc@lstrano-desk.jf.intel.com \
    --to=matthew.brost@intel.com \
    --cc=himal.prasad.ghimiray@intel.com \
    --cc=intel-xe@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox