From: "Ghimiray, Himal Prasad" <himal.prasad.ghimiray@intel.com>
To: Matthew Brost <matthew.brost@intel.com>,
<intel-xe@lists.freedesktop.org>
Subject: Re: [PATCH] drm/xe: Use SVM range helpers in PT layer
Date: Thu, 23 Oct 2025 10:37:34 +0530 [thread overview]
Message-ID: <904e8691-fb2c-4f81-b4f4-1ef24042dfaa@intel.com> (raw)
In-Reply-To: <20251022230122.922382-1-matthew.brost@intel.com>
On 23-10-2025 04:31, Matthew Brost wrote:
> We have helpers SVM range start, end, and size. Use them in the PT
> layer rather than directly looking at the struct.
>
> Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> ---
> drivers/gpu/drm/xe/xe_pt.c | 28 ++++++++++++++--------------
> 1 file changed, 14 insertions(+), 14 deletions(-)
>
> diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
> index d22fd1ccc0ba..7c5bca78c8bf 100644
> --- a/drivers/gpu/drm/xe/xe_pt.c
> +++ b/drivers/gpu/drm/xe/xe_pt.c
> @@ -715,7 +715,7 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
> .vm = vm,
> .tile = tile,
> .curs = &curs,
> - .va_curs_start = range ? range->base.itree.start :
> + .va_curs_start = range ? xe_svm_range_start(range) :
> xe_vma_start(vma),
> .vma = vma,
> .wupd.entries = entries,
> @@ -734,7 +734,7 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
> }
> if (xe_svm_range_has_dma_mapping(range)) {
> xe_res_first_dma(range->base.pages.dma_addr, 0,
> - range->base.itree.last + 1 - range->base.itree.start,
> + xe_svm_range_size(range),
> &curs);
> xe_svm_range_debug(range, "BIND PREPARE - MIXED");
> } else {
> @@ -778,8 +778,8 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
>
> walk_pt:
> ret = xe_pt_walk_range(&pt->base, pt->level,
> - range ? range->base.itree.start : xe_vma_start(vma),
> - range ? range->base.itree.last + 1 : xe_vma_end(vma),
> + range ? xe_svm_range_start(range) : xe_vma_start(vma),
> + range ? xe_svm_range_end(range) : xe_vma_end(vma),
> &xe_walk.base);
>
> *num_entries = xe_walk.wupd.num_used_entries;
> @@ -975,8 +975,8 @@ bool xe_pt_zap_ptes_range(struct xe_tile *tile, struct xe_vm *vm,
> if (!(pt_mask & BIT(tile->id)))
> return false;
>
> - (void)xe_pt_walk_shared(&pt->base, pt->level, range->base.itree.start,
> - range->base.itree.last + 1, &xe_walk.base);
> + (void)xe_pt_walk_shared(&pt->base, pt->level, xe_svm_range_start(range),
> + xe_svm_range_end(range), &xe_walk.base);
>
> return xe_walk.needs_invalidate;
> }
> @@ -1661,8 +1661,8 @@ static unsigned int xe_pt_stage_unbind(struct xe_tile *tile,
> struct xe_svm_range *range,
> struct xe_vm_pgtable_update *entries)
> {
> - u64 start = range ? range->base.itree.start : xe_vma_start(vma);
> - u64 end = range ? range->base.itree.last + 1 : xe_vma_end(vma);
> + u64 start = range ? xe_svm_range_start(range) : xe_vma_start(vma);
> + u64 end = range ? xe_svm_range_end(range) : xe_vma_end(vma);
> struct xe_pt_stage_unbind_walk xe_walk = {
> .base = {
> .ops = &xe_pt_stage_unbind_ops,
> @@ -1872,7 +1872,7 @@ static int bind_range_prepare(struct xe_vm *vm, struct xe_tile *tile,
>
> vm_dbg(&xe_vma_vm(vma)->xe->drm,
> "Preparing bind, with range [%lx...%lx)\n",
> - range->base.itree.start, range->base.itree.last);
> + xe_svm_range_start(range), xe_svm_range_end(range) - 1);
>
> pt_op->vma = NULL;
> pt_op->bind = true;
> @@ -1887,8 +1887,8 @@ static int bind_range_prepare(struct xe_vm *vm, struct xe_tile *tile,
> pt_op->num_entries, true);
>
> xe_pt_update_ops_rfence_interval(pt_update_ops,
> - range->base.itree.start,
> - range->base.itree.last + 1);
> + xe_svm_range_start(range),
> + xe_svm_range_end(range));
> ++pt_update_ops->current_op;
> pt_update_ops->needs_svm_lock = true;
>
> @@ -1983,7 +1983,7 @@ static int unbind_range_prepare(struct xe_vm *vm,
>
> vm_dbg(&vm->xe->drm,
> "Preparing unbind, with range [%lx...%lx)\n",
> - range->base.itree.start, range->base.itree.last);
> + xe_svm_range_start(range), xe_svm_range_end(range) - 1);
>
> pt_op->vma = XE_INVALID_VMA;
> pt_op->bind = false;
> @@ -1994,8 +1994,8 @@ static int unbind_range_prepare(struct xe_vm *vm,
>
> xe_vm_dbg_print_entries(tile_to_xe(tile), pt_op->entries,
> pt_op->num_entries, false);
> - xe_pt_update_ops_rfence_interval(pt_update_ops, range->base.itree.start,
> - range->base.itree.last + 1);
> + xe_pt_update_ops_rfence_interval(pt_update_ops, xe_svm_range_start(range),
> + xe_svm_range_end(range));
Reviewed-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
> ++pt_update_ops->current_op;
> pt_update_ops->needs_svm_lock = true;
> pt_update_ops->needs_invalidation |= xe_vm_has_scratch(vm) ||
next prev parent reply other threads:[~2025-10-23 5:07 UTC|newest]
Thread overview: 5+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-10-22 23:01 [PATCH] drm/xe: Use SVM range helpers in PT layer Matthew Brost
2025-10-23 2:35 ` ✓ CI.KUnit: success for " Patchwork
2025-10-23 3:13 ` ✓ Xe.CI.BAT: " Patchwork
2025-10-23 5:07 ` Ghimiray, Himal Prasad [this message]
2025-10-23 9:47 ` ✗ Xe.CI.Full: failure " Patchwork
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=904e8691-fb2c-4f81-b4f4-1ef24042dfaa@intel.com \
--to=himal.prasad.ghimiray@intel.com \
--cc=intel-xe@lists.freedesktop.org \
--cc=matthew.brost@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox