From: Matthew Brost <matthew.brost@intel.com>
To: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
Cc: <intel-xe@lists.freedesktop.org>, <thomas.hellstrom@linux.intel.com>
Subject: Re: [PATCH v2 10/32] drm/xe/svm: Refactor usage of drm_gpusvm* function in xe_svm
Date: Wed, 16 Apr 2025 19:57:41 -0700 [thread overview]
Message-ID: <aABuJfAvTnFHbFGs@lstrano-desk.jf.intel.com> (raw)
In-Reply-To: <20250407101719.3350996-11-himal.prasad.ghimiray@intel.com>
On Mon, Apr 07, 2025 at 03:46:57PM +0530, Himal Prasad Ghimiray wrote:
> Define xe_svm_range_find_or_insert function wrapping
> drm_gpusvm_range_find_or_insert for reusing in prefetch.
>
> Define xe_svm_range_get_pages function wrapping
> drm_gpusvm_range_get_pages for reusing in prefetch.
>
> -v2 pass pagefault defined drm_gpu_svm context as parameter
> in xe_svm_range_find_or_insert(Matthew Brost)
>
> Cc: Matthew Brost <matthew.brost@intel.com>
> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
> ---
> drivers/gpu/drm/xe/xe_svm.c | 67 ++++++++++++++++++++++++++++++-------
> drivers/gpu/drm/xe/xe_svm.h | 20 +++++++++++
> 2 files changed, 75 insertions(+), 12 deletions(-)
>
> diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
> index 6648b4da0bca..8cd35553a927 100644
> --- a/drivers/gpu/drm/xe/xe_svm.c
> +++ b/drivers/gpu/drm/xe/xe_svm.c
> @@ -735,7 +735,6 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
> IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR) ? SZ_64K : 0,
> };
> struct xe_svm_range *range;
> - struct drm_gpusvm_range *r;
> struct drm_exec exec;
> struct dma_fence *fence;
> struct xe_tile *tile = gt_to_tile(gt);
> @@ -753,13 +752,11 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
> if (err)
> return err;
>
> - r = drm_gpusvm_range_find_or_insert(&vm->svm.gpusvm, fault_addr,
> - xe_vma_start(vma), xe_vma_end(vma),
> - &ctx);
> - if (IS_ERR(r))
> - return PTR_ERR(r);
> + range = xe_svm_range_find_or_insert(vm, fault_addr, vma, &ctx);
> +
> + if (IS_ERR(range))
> + return PTR_ERR(range);
>
> - range = to_xe_range(r);
> if (xe_svm_range_is_valid(range, tile))
> return 0;
>
> @@ -781,13 +778,9 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
> }
>
> range_debug(range, "GET PAGES");
> - err = drm_gpusvm_range_get_pages(&vm->svm.gpusvm, r, &ctx);
> + err = xe_svm_range_get_pages(vm, range, &ctx);
> /* Corner where CPU mappings have changed */
> if (err == -EOPNOTSUPP || err == -EFAULT || err == -EPERM) {
> - if (err == -EOPNOTSUPP) {
> - range_debug(range, "PAGE FAULT - EVICT PAGES");
> - drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base);
> - }
> drm_dbg(&vm->xe->drm,
> "Get pages failed, falling back to retrying, asid=%u, gpusvm=%p, errno=%pe\n",
> vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
> @@ -866,6 +859,56 @@ int xe_svm_bo_evict(struct xe_bo *bo)
> return drm_gpusvm_evict_to_ram(&bo->devmem_allocation);
> }
>
> +/**
> + * xe_svm_range_find_or_insert- Find or insert GPU SVM range
> + * @vm: xe_vm pointer
> + * @addr: address for which range needs to be found/inserted
> + * @vma: Pointer to struct xe_vma which mirrors CPU
> + * @ctx: GPU SVM context
> + *
> + * This function finds or inserts a newly allocated a SVM range based on the
> + * address.
> + *
> + * Return: Pointer to the SVM range on success, ERR_PTR() on failure.
> + */
> +struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr,
> + struct xe_vma *vma, struct drm_gpusvm_ctx *ctx)
> +{
> + struct drm_gpusvm_range *r;
> +
> + r = drm_gpusvm_range_find_or_insert(&vm->svm.gpusvm, max(addr, xe_vma_start(vma)),
> + xe_vma_start(vma), xe_vma_end(vma), ctx);
> + if (IS_ERR(r))
> + return ERR_PTR(PTR_ERR(r));
> +
> + return to_xe_range(r);
> +}
> +
> +/**
> + * xe_svm_range_get_pages() - Get pages for a SVM range
> + * @vm: Pointer to the struct xe_vm
> + * @range: Pointer to the xe SVM range structure
> + * @ctx: GPU SVM context
> + *
> + * This function gets pages for a SVM range and ensures they are mapped for
> + * DMA access. In case of failure with -EOPNOTSUPP, it evicts the range.
> + *
> + * Return: 0 on success, negative error code on failure.
> + */
> +int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
> + struct drm_gpusvm_ctx *ctx)
> +{
> + int err = 0;
> +
> + err = drm_gpusvm_range_get_pages(&vm->svm.gpusvm, &range->base, ctx);
> + if (err == -EOPNOTSUPP) {
> + range_debug(range, "PAGE FAULT - EVICT PAGES");
> + drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base);
> + }
> +
> + return err;
> +}
> +
> #if IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR)
>
> static struct drm_pagemap_device_addr
> diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
> index 1ec90d9bc749..9c4c3aeacc6c 100644
> --- a/drivers/gpu/drm/xe/xe_svm.h
> +++ b/drivers/gpu/drm/xe/xe_svm.h
> @@ -89,6 +89,12 @@ int xe_svm_alloc_vram(struct xe_vm *vm, struct xe_tile *tile,
> }
> #endif
>
> +struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr,
> + struct xe_vma *vma, struct drm_gpusvm_ctx *ctx);
One nit, check on the alignment here, checkpatch should complain if this
is off, hard to tell if this wrong from the patch.
But patch LGTM:
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
> +
> +int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
> + struct drm_gpusvm_ctx *ctx);
> +
> /**
> * xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
> * @range: SVM range
> @@ -241,6 +247,20 @@ int xe_svm_alloc_vram(struct xe_vm *vm, struct xe_tile *tile,
> return -EOPNOTSUPP;
> }
>
> +static inline
> +struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr,
> + struct xe_vma *vma, struct drm_gpusvm_ctx *ctx)
> +{
> + return ERR_PTR(-EINVAL);
> +}
> +
> +static inline
> +int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
> + struct drm_gpusvm_ctx *ctx)
> +{
> + return -EINVAL;
> +}
> +
> static inline struct xe_svm_range *to_xe_range(struct drm_gpusvm_range *r)
> {
> return NULL;
> --
> 2.34.1
>
next prev parent reply other threads:[~2025-04-17 2:56 UTC|newest]
Thread overview: 120+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-04-07 10:16 [PATCH v2 00/32] PREFETCH and MADVISE for SVM ranges Himal Prasad Ghimiray
2025-04-07 10:16 ` [PATCH v2 01/32] drm/xe: Introduce xe_vma_op_prefetch_range struct for prefetch of ranges Himal Prasad Ghimiray
2025-04-07 10:16 ` [PATCH v2 02/32] drm/xe: Make xe_svm_alloc_vram public Himal Prasad Ghimiray
2025-04-17 2:50 ` Matthew Brost
2025-04-21 4:06 ` Ghimiray, Himal Prasad
2025-04-07 10:16 ` [PATCH v2 03/32] drm/xe/svm: Helper to add tile masks to svm ranges Himal Prasad Ghimiray
2025-04-07 10:16 ` [PATCH v2 04/32] drm/xe/svm: Make to_xe_range a public function Himal Prasad Ghimiray
2025-04-07 10:16 ` [PATCH v2 05/32] drm/xe/svm: Make xe_svm_range_* end/start/size public Himal Prasad Ghimiray
2025-04-07 10:16 ` [PATCH v2 06/32] drm/xe/vm: Update xe_vma_ops_incr_pt_update_ops to take an increment value Himal Prasad Ghimiray
2025-04-17 0:10 ` Matthew Brost
2025-04-21 4:09 ` Ghimiray, Himal Prasad
2025-04-07 10:16 ` [PATCH v2 07/32] drm/xe/vm: Add an identifier in xe_vma_ops for svm prefetch Himal Prasad Ghimiray
2025-04-17 2:53 ` Matthew Brost
2025-04-07 10:16 ` [PATCH v2 08/32] drm/xe: Rename lookup_vma function to xe_find_vma_by_addr Himal Prasad Ghimiray
2025-04-07 22:42 ` kernel test robot
2025-04-07 10:16 ` [PATCH v2 09/32] drm/xe/svm: Allow unaligned addresses and ranges for prefetch Himal Prasad Ghimiray
2025-04-17 2:53 ` Matthew Brost
2025-04-07 10:16 ` [PATCH v2 10/32] drm/xe/svm: Refactor usage of drm_gpusvm* function in xe_svm Himal Prasad Ghimiray
2025-04-17 2:57 ` Matthew Brost [this message]
2025-04-21 4:30 ` Ghimiray, Himal Prasad
2025-04-07 10:16 ` [PATCH v2 11/32] drm/xe/svm: Add function to determine if range needs VRAM migration Himal Prasad Ghimiray
2025-04-17 3:05 ` Matthew Brost
2025-04-21 4:52 ` Ghimiray, Himal Prasad
2025-04-07 10:16 ` [PATCH v2 12/32] drm/gpusvm: Introduce vram_only flag for VRAM allocation Himal Prasad Ghimiray
2025-04-17 3:07 ` Matthew Brost
2025-04-21 4:55 ` Ghimiray, Himal Prasad
2025-04-07 10:17 ` [PATCH v2 13/32] drm/xe/svm: Incase of atomic access ensure get_pages happens from vram Himal Prasad Ghimiray
2025-04-17 4:19 ` Matthew Brost
2025-04-21 4:58 ` Ghimiray, Himal Prasad
2025-04-21 6:29 ` Ghimiray, Himal Prasad
2025-04-22 15:25 ` Matthew Brost
2025-04-22 15:27 ` Matthew Brost
2025-04-07 10:17 ` [PATCH v2 14/32] drm/xe/svm: Implement prefetch support for SVM ranges Himal Prasad Ghimiray
2025-04-17 4:54 ` Matthew Brost
2025-04-24 10:03 ` Ghimiray, Himal Prasad
2025-04-24 23:48 ` Matthew Brost
2025-04-28 6:44 ` Ghimiray, Himal Prasad
2025-04-07 10:17 ` [PATCH v2 15/32] drm/xe/vm: Add debug prints for SVM range prefetch Himal Prasad Ghimiray
2025-04-17 4:56 ` Matthew Brost
2025-04-07 10:17 ` [PATCH v2 16/32] Introduce drm_gpuvm_sm_map_ops_flags enums for sm_map_ops Himal Prasad Ghimiray
2025-04-07 10:30 ` Boris Brezillon
2025-05-26 13:48 ` Ghimiray, Himal Prasad
2025-04-07 22:42 ` kernel test robot
2025-04-07 10:17 ` [PATCH v2 17/32] drm/xe/uapi: Add madvise interface Himal Prasad Ghimiray
2025-04-17 18:19 ` Souza, Jose
2025-04-17 18:24 ` Souza, Jose
2025-04-22 15:34 ` Matthew Brost
2025-04-22 15:55 ` Souza, Jose
2025-04-22 16:19 ` Matthew Brost
2025-04-22 15:40 ` Matthew Brost
2025-04-22 16:02 ` Souza, Jose
2025-04-22 16:12 ` Matthew Brost
2025-04-22 16:16 ` Souza, Jose
2025-05-02 14:00 ` Thomas Hellström
2025-05-20 8:13 ` Ghimiray, Himal Prasad
2025-05-20 8:49 ` Ghimiray, Himal Prasad
2025-04-07 10:17 ` [PATCH v2 18/32] drm/xe/vm: Add attributes struct as member of vma Himal Prasad Ghimiray
2025-05-14 18:36 ` Matthew Brost
2025-05-20 9:27 ` Ghimiray, Himal Prasad
2025-05-27 17:37 ` Matthew Brost
2025-05-28 5:33 ` Ghimiray, Himal Prasad
2025-05-28 16:09 ` Matthew Brost
2025-05-28 16:16 ` Ghimiray, Himal Prasad
2025-04-07 10:17 ` [PATCH v2 19/32] drm/xe/vma: Move pat_index to vma attributes Himal Prasad Ghimiray
2025-05-14 18:37 ` Matthew Brost
2025-04-07 10:17 ` [PATCH v2 20/32] drm/xe/vma: Modify new_vma to accept struct xe_vma_mem_attr as parameter Himal Prasad Ghimiray
2025-05-13 2:36 ` Matthew Brost
2025-05-14 18:40 ` Matthew Brost
2025-05-20 9:28 ` Ghimiray, Himal Prasad
2025-04-07 10:17 ` [PATCH v2 21/32] drm/gpusvm: Make drm_gpusvm_for_each_* macros public Himal Prasad Ghimiray
2025-04-08 1:49 ` kernel test robot
2025-05-14 18:47 ` Matthew Brost
2025-04-07 10:17 ` [PATCH v2 22/32] drm/xe/svm: Split system allocator vma incase of madvise call Himal Prasad Ghimiray
2025-05-14 19:01 ` Matthew Brost
2025-05-20 9:46 ` Ghimiray, Himal Prasad
2025-05-14 19:02 ` Matthew Brost
2025-04-07 10:17 ` [PATCH v2 23/32] drm/xe: Implement madvise ioctl for xe Himal Prasad Ghimiray
2025-05-14 21:41 ` Matthew Brost
2025-05-20 10:15 ` Ghimiray, Himal Prasad
2025-05-28 5:22 ` Ghimiray, Himal Prasad
2025-04-07 10:17 ` [PATCH v2 24/32] drm/xe: Allow CPU address mirror VMA unbind with gpu bindings for madvise Himal Prasad Ghimiray
2025-05-14 19:20 ` Matthew Brost
2025-05-20 10:21 ` Ghimiray, Himal Prasad
2025-05-27 17:32 ` Matthew Brost
2025-04-07 10:17 ` [PATCH v2 25/32] drm/xe/svm : Add svm ranges migration policy on atomic access Himal Prasad Ghimiray
2025-05-14 22:21 ` Matthew Brost
2025-05-20 10:22 ` Ghimiray, Himal Prasad
2025-04-07 10:17 ` [PATCH v2 26/32] drm/xe/madvise: Update migration policy based on preferred location Himal Prasad Ghimiray
2025-05-14 22:04 ` Matthew Brost
2025-05-21 8:50 ` Ghimiray, Himal Prasad
2025-05-21 16:51 ` Ghimiray, Himal Prasad
2025-04-07 10:17 ` [PATCH v2 27/32] drm/xe/svm: Support DRM_XE_SVM_ATTR_PAT memory attribute Himal Prasad Ghimiray
2025-05-14 21:52 ` Matthew Brost
2025-05-21 8:51 ` Ghimiray, Himal Prasad
2025-04-07 10:17 ` [PATCH v2 28/32] drm/xe/uapi: Add flag for consulting madvise hints on svm prefetch Himal Prasad Ghimiray
2025-05-14 21:05 ` Matthew Brost
2025-05-21 8:52 ` Ghimiray, Himal Prasad
2025-04-07 10:17 ` [PATCH v2 29/32] drm/xe/svm: Consult madvise preferred location in prefetch Himal Prasad Ghimiray
2025-05-14 22:17 ` Matthew Brost
2025-04-07 10:17 ` [PATCH v2 30/32] drm/xe/uapi: Add uapi for vma count and mem attributes Himal Prasad Ghimiray
2025-05-14 21:08 ` Matthew Brost
2025-05-21 8:54 ` Ghimiray, Himal Prasad
2025-05-28 16:18 ` Ghimiray, Himal Prasad
2025-04-07 10:17 ` [PATCH v2 31/32] drm/xe/bo: Add attributes field to xe_bo Himal Prasad Ghimiray
2025-05-14 21:10 ` Matthew Brost
2025-04-07 10:17 ` [PATCH v2 32/32] drm/xe/bo: Update atomic_access attribute on madvise Himal Prasad Ghimiray
2025-05-14 22:31 ` Matthew Brost
2025-05-21 9:13 ` Ghimiray, Himal Prasad
2025-04-07 14:07 ` ✓ CI.Patch_applied: success for PREFETCH and MADVISE for SVM ranges (rev3) Patchwork
2025-04-07 14:07 ` ✗ CI.checkpatch: warning " Patchwork
2025-04-07 14:09 ` ✓ CI.KUnit: success " Patchwork
2025-04-07 14:12 ` ✗ CI.Build: failure " Patchwork
2025-04-09 5:11 ` ✓ CI.Patch_applied: success for PREFETCH and MADVISE for SVM ranges (rev4) Patchwork
2025-04-09 5:11 ` ✗ CI.checkpatch: warning " Patchwork
2025-04-09 5:12 ` ✓ CI.KUnit: success " Patchwork
2025-04-09 5:29 ` ✓ CI.Build: " Patchwork
2025-04-09 5:31 ` ✗ CI.Hooks: failure " Patchwork
2025-04-09 5:32 ` ✗ CI.checksparse: warning " Patchwork
2025-04-09 5:52 ` ✓ Xe.CI.BAT: success " Patchwork
2025-04-09 7:00 ` ✗ Xe.CI.Full: failure " Patchwork
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=aABuJfAvTnFHbFGs@lstrano-desk.jf.intel.com \
--to=matthew.brost@intel.com \
--cc=himal.prasad.ghimiray@intel.com \
--cc=intel-xe@lists.freedesktop.org \
--cc=thomas.hellstrom@linux.intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox