Intel-XE Archive on lore.kernel.org
 help / color / mirror / Atom feed
From: Matthew Brost <matthew.brost@intel.com>
To: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
Cc: <intel-xe@lists.freedesktop.org>, <thomas.hellstrom@linux.intel.com>
Subject: Re: [PATCH v4 16/20] drm/xe/svm: Consult madvise preferred location in prefetch
Date: Mon, 23 Jun 2025 15:07:48 -0700	[thread overview]
Message-ID: <aFnQNFM87/jRVqXZ@lstrano-desk.jf.intel.com> (raw)
In-Reply-To: <20250613125558.2607665-17-himal.prasad.ghimiray@intel.com>

On Fri, Jun 13, 2025 at 06:25:54PM +0530, Himal Prasad Ghimiray wrote:
> When prefetch region is DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC, prefetch svm
> ranges to preferred location provided by madvise.
> 
> v2 (Matthew Brost)
> - Fix region, devmem_fd usages
> - consult madvise is applicable for other vma's too.
> 
> v3
> - Fix atomic handling
> 
> Cc: Matthew Brost <matthew.brost@intel.com>

Reviewed-by: Matthew Brost <matthew.brost@intel.com>

> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
> ---
>  drivers/gpu/drm/xe/xe_svm.h  |  1 -
>  drivers/gpu/drm/xe/xe_tile.h | 18 ++++++++++++++++++
>  drivers/gpu/drm/xe/xe_vm.c   | 26 ++++++++++++++++++--------
>  3 files changed, 36 insertions(+), 9 deletions(-)
> 
> diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
> index 900a5883951f..1658e28f3773 100644
> --- a/drivers/gpu/drm/xe/xe_svm.h
> +++ b/drivers/gpu/drm/xe/xe_svm.h
> @@ -327,7 +327,6 @@ struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *t
>  {
>  	return NULL;
>  }
> -
>  #define xe_svm_assert_in_notifier(...) do {} while (0)
>  #define xe_svm_range_has_dma_mapping(...) false
>  
> diff --git a/drivers/gpu/drm/xe/xe_tile.h b/drivers/gpu/drm/xe/xe_tile.h
> index eb939316d55b..142ed5cf654b 100644
> --- a/drivers/gpu/drm/xe/xe_tile.h
> +++ b/drivers/gpu/drm/xe/xe_tile.h
> @@ -10,6 +10,24 @@
>  
>  struct xe_tile;
>  
> +#if IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR)
> +/**
> + * xe_tile_from_dpagemap - Find xe_tile from drm_pagemap
> + * @dpagemap: pointer to struct drm_pagemap
> + *
> + * Return: Pointer to xe_tile
> + */
> +static inline struct xe_tile *xe_tile_from_dpagemap(struct drm_pagemap *dpagemap)
> +{
> +	return container_of(dpagemap, struct xe_tile, mem.vram.dpagemap);
> +}
> +
> +#else
> +static inline  struct xe_tile *xe_tile_from_dpagemap(struct drm_pagemap *dpagemap)
> +{
> +	return NULL;
> +}
> +#endif
>  int xe_tile_init_early(struct xe_tile *tile, struct xe_device *xe, u8 id);
>  int xe_tile_init_noalloc(struct xe_tile *tile);
>  int xe_tile_init(struct xe_tile *tile);
> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> index 6dd1f868942d..0dd9f9e11030 100644
> --- a/drivers/gpu/drm/xe/xe_vm.c
> +++ b/drivers/gpu/drm/xe/xe_vm.c
> @@ -38,6 +38,7 @@
>  #include "xe_res_cursor.h"
>  #include "xe_svm.h"
>  #include "xe_sync.h"
> +#include "xe_tile.h"
>  #include "xe_trace_bo.h"
>  #include "xe_wa.h"
>  #include "xe_hmm.h"
> @@ -2907,15 +2908,24 @@ static int prefetch_ranges(struct xe_vm *vm, struct xe_vma_op *op)
>  	int err = 0;
>  
>  	struct xe_svm_range *svm_range;
> +	struct drm_pagemap *dpagemap;
>  	struct drm_gpusvm_ctx ctx = {};
> -	struct xe_tile *tile;
> +	struct xe_tile *tile = NULL;
>  	unsigned long i;
>  	u32 region;
>  
>  	if (!xe_vma_is_cpu_addr_mirror(vma))
>  		return 0;
>  
> -	region = op->prefetch_range.region;
> +	if (op->prefetch_range.region == DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC) {
> +		dpagemap = xe_vma_resolve_pagemap(vma, xe_device_get_root_tile(vm->xe));
> +		if (dpagemap)
> +			tile = xe_tile_from_dpagemap(dpagemap);
> +	} else {
> +		region = op->prefetch_range.region;
> +		if (region)
> +			tile = &vm->xe->tiles[region_to_mem_type[region] - XE_PL_VRAM0];
> +	}
>  
>  	ctx.read_only = xe_vma_read_only(vma);
>  	ctx.devmem_possible = devmem_possible;
> @@ -2923,11 +2933,10 @@ static int prefetch_ranges(struct xe_vm *vm, struct xe_vma_op *op)
>  
>  	/* TODO: Threading the migration */
>  	xa_for_each(&op->prefetch_range.range, i, svm_range) {
> -		if (!region)
> +		if (!tile)
>  			xe_svm_range_migrate_to_smem(vm, svm_range);
>  
> -		if (xe_svm_range_needs_migrate_to_vram(svm_range, vma, region)) {
> -			tile = &vm->xe->tiles[region_to_mem_type[region] - XE_PL_VRAM0];
> +		if (xe_svm_range_needs_migrate_to_vram(svm_range, vma, !!tile)) {
>  			err = xe_svm_alloc_vram(vm, tile, svm_range, &ctx);
>  			if (err) {
>  				drm_dbg(&vm->xe->drm, "VRAM allocation failed, retry from userspace, asid=%u, gpusvm=%p, errno=%pe\n",
> @@ -2995,7 +3004,8 @@ static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
>  		else
>  			region = op->prefetch.region;
>  
> -		xe_assert(vm->xe, region <= ARRAY_SIZE(region_to_mem_type));
> +		xe_assert(vm->xe, region == DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC ||
> +			  region <= ARRAY_SIZE(region_to_mem_type));
>  
>  		err = vma_lock_and_validate(exec,
>  					    gpuva_to_vma(op->base.prefetch.va),
> @@ -3413,8 +3423,8 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe, struct xe_vm *vm,
>  				 op == DRM_XE_VM_BIND_OP_PREFETCH) ||
>  		    XE_IOCTL_DBG(xe, prefetch_region &&
>  				 op != DRM_XE_VM_BIND_OP_PREFETCH) ||
> -		    XE_IOCTL_DBG(xe, !(BIT(prefetch_region) &
> -				       xe->info.mem_region_mask)) ||
> +		    XE_IOCTL_DBG(xe,  (prefetch_region != DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC &&
> +				       !(BIT(prefetch_region) & xe->info.mem_region_mask))) ||
>  		    XE_IOCTL_DBG(xe, obj &&
>  				 op == DRM_XE_VM_BIND_OP_UNMAP)) {
>  			err = -EINVAL;
> -- 
> 2.34.1
> 

  reply	other threads:[~2025-06-23 22:06 UTC|newest]

Thread overview: 68+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-06-13 12:55 [PATCH v4 00/20] MADVISE FOR XE Himal Prasad Ghimiray
2025-06-13 12:43 ` ✗ CI.checkpatch: warning for MADVISE FOR XE (rev2) Patchwork
2025-06-13 12:44 ` ✗ CI.KUnit: failure " Patchwork
2025-06-13 12:55 ` [PATCH v4 01/20] Introduce drm_gpuvm_sm_map_ops_flags enums for sm_map_ops Himal Prasad Ghimiray
2025-06-13 12:55 ` [PATCH v4 02/20] drm/xe/uapi: Add madvise interface Himal Prasad Ghimiray
2025-06-13 14:15   ` Souza, Jose
2025-06-23  4:30   ` Matthew Brost
2025-06-23  6:20     ` Ghimiray, Himal Prasad
2025-06-27 13:47       ` Thomas Hellström
2025-06-27 14:29     ` Thomas Hellström
2025-06-27 18:13       ` Matthew Brost
2025-06-13 12:55 ` [PATCH v4 03/20] drm/xe/vm: Add attributes struct as member of vma Himal Prasad Ghimiray
2025-06-23  4:18   ` Matthew Brost
2025-06-23  6:21     ` Ghimiray, Himal Prasad
2025-06-27 14:32     ` Thomas Hellström
2025-06-13 12:55 ` [PATCH v4 04/20] drm/xe/vma: Move pat_index to vma attributes Himal Prasad Ghimiray
2025-06-13 12:55 ` [PATCH v4 05/20] drm/xe/vma: Modify new_vma to accept struct xe_vma_mem_attr as parameter Himal Prasad Ghimiray
2025-06-23  4:38   ` Matthew Brost
2025-06-23 16:21     ` Matthew Brost
2025-06-13 12:55 ` [PATCH v4 06/20] drm/gpusvm: Make drm_gpusvm_for_each_* macros public Himal Prasad Ghimiray
2025-06-13 12:55 ` [PATCH v4 07/20] drm/xe/svm: Split system allocator vma incase of madvise call Himal Prasad Ghimiray
2025-06-13 12:55 ` [PATCH v4 08/20] drm/xe/svm: Add xe_svm_ranges_zap_ptes_in_range() for PTE zapping Himal Prasad Ghimiray
2025-06-23  4:56   ` Matthew Brost
2025-06-23  6:25     ` Ghimiray, Himal Prasad
2025-06-13 12:55 ` [PATCH v4 09/20] drm/xe: Implement madvise ioctl for xe Himal Prasad Ghimiray
2025-06-23  5:33   ` Matthew Brost
2025-06-26  6:04   ` Lin, Shuicheng
2025-06-26  6:15     ` Matthew Brost
2025-06-26  8:36       ` Ghimiray, Himal Prasad
2025-06-26  8:34     ` Ghimiray, Himal Prasad
2025-06-13 12:55 ` [PATCH v4 10/20] drm/xe/vm: Add an identifier for madvise in xe_vma_ops Himal Prasad Ghimiray
2025-06-23  5:38   ` Matthew Brost
2025-06-23  6:28     ` Ghimiray, Himal Prasad
2025-06-13 12:55 ` [PATCH v4 11/20] drm/xe: Allow CPU address mirror VMA unbind with gpu bindings for madvise Himal Prasad Ghimiray
2025-06-14  4:31   ` kernel test robot
2025-06-23  5:52   ` Matthew Brost
2025-06-23  6:18     ` Ghimiray, Himal Prasad
2025-06-23 11:45       ` Matthew Brost
2025-06-13 12:55 ` [PATCH v4 12/20] drm/xe/svm : Add svm ranges migration policy on atomic access Himal Prasad Ghimiray
2025-06-23 16:32   ` Matthew Brost
2025-06-13 12:55 ` [PATCH v4 13/20] drm/xe/madvise: Update migration policy based on preferred location Himal Prasad Ghimiray
2025-06-13 23:31   ` kernel test robot
2025-06-14  5:33   ` kernel test robot
2025-06-13 12:55 ` [PATCH v4 14/20] drm/xe/svm: Support DRM_XE_SVM_ATTR_PAT memory attribute Himal Prasad Ghimiray
2025-06-23 16:34   ` Matthew Brost
2025-06-13 12:55 ` [PATCH v4 15/20] drm/xe/uapi: Add flag for consulting madvise hints on svm prefetch Himal Prasad Ghimiray
2025-06-23 16:36   ` Matthew Brost
2025-06-13 12:55 ` [PATCH v4 16/20] drm/xe/svm: Consult madvise preferred location in prefetch Himal Prasad Ghimiray
2025-06-23 22:07   ` Matthew Brost [this message]
2025-06-13 12:55 ` [PATCH v4 17/20] drm/xe/bo: Add attributes field to xe_bo Himal Prasad Ghimiray
2025-06-13 12:55 ` [PATCH v4 18/20] drm/xe/bo: Update atomic_access attribute on madvise Himal Prasad Ghimiray
2025-06-23 16:19   ` Matthew Brost
2025-06-13 12:55 ` [PATCH v4 19/20] drm/xe/uapi: Add UAPI for querying VMA count and memory attributes Himal Prasad Ghimiray
2025-06-23 22:43   ` Matthew Brost
2025-06-24  2:18   ` Matthew Brost
2025-06-27 13:20     ` Thomas Hellström
2025-06-27 13:43     ` Thomas Hellström
2025-06-26  3:44   ` Lin, Shuicheng
2025-06-13 12:55 ` [PATCH v4 20/20] drm/xe/madvise: Skip vma invalidation if mem attr are unchanged Himal Prasad Ghimiray
2025-06-23 22:28   ` Matthew Brost
2025-06-26  8:54     ` Ghimiray, Himal Prasad
2025-06-16  4:30 ` ✗ CI.checkpatch: warning for MADVISE FOR XE (rev3) Patchwork
2025-06-16  4:31 ` ✓ CI.KUnit: success " Patchwork
2025-06-16  4:45 ` ✗ CI.checksparse: warning " Patchwork
2025-06-16  5:13 ` ✓ Xe.CI.BAT: success " Patchwork
2025-06-16 15:06 ` ✗ Xe.CI.Full: failure " Patchwork
2025-07-29  4:41 ` [PATCH v4 00/20] MADVISE FOR XE Matthew Brost
2025-07-30 11:16   ` Ghimiray, Himal Prasad

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=aFnQNFM87/jRVqXZ@lstrano-desk.jf.intel.com \
    --to=matthew.brost@intel.com \
    --cc=himal.prasad.ghimiray@intel.com \
    --cc=intel-xe@lists.freedesktop.org \
    --cc=thomas.hellstrom@linux.intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox