From: Matthew Brost <matthew.brost@intel.com>
To: intel-xe@lists.freedesktop.org
Subject: [PATCH] drm/xe: Use SVM range helpers in PT layer
Date: Wed, 22 Oct 2025 16:01:22 -0700 [thread overview]
Message-ID: <20251022230122.922382-1-matthew.brost@intel.com> (raw)
We have helpers SVM range start, end, and size. Use them in the PT
layer rather than directly looking at the struct.
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
---
drivers/gpu/drm/xe/xe_pt.c | 28 ++++++++++++++--------------
1 file changed, 14 insertions(+), 14 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
index d22fd1ccc0ba..7c5bca78c8bf 100644
--- a/drivers/gpu/drm/xe/xe_pt.c
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -715,7 +715,7 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
.vm = vm,
.tile = tile,
.curs = &curs,
- .va_curs_start = range ? range->base.itree.start :
+ .va_curs_start = range ? xe_svm_range_start(range) :
xe_vma_start(vma),
.vma = vma,
.wupd.entries = entries,
@@ -734,7 +734,7 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
}
if (xe_svm_range_has_dma_mapping(range)) {
xe_res_first_dma(range->base.pages.dma_addr, 0,
- range->base.itree.last + 1 - range->base.itree.start,
+ xe_svm_range_size(range),
&curs);
xe_svm_range_debug(range, "BIND PREPARE - MIXED");
} else {
@@ -778,8 +778,8 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
walk_pt:
ret = xe_pt_walk_range(&pt->base, pt->level,
- range ? range->base.itree.start : xe_vma_start(vma),
- range ? range->base.itree.last + 1 : xe_vma_end(vma),
+ range ? xe_svm_range_start(range) : xe_vma_start(vma),
+ range ? xe_svm_range_end(range) : xe_vma_end(vma),
&xe_walk.base);
*num_entries = xe_walk.wupd.num_used_entries;
@@ -975,8 +975,8 @@ bool xe_pt_zap_ptes_range(struct xe_tile *tile, struct xe_vm *vm,
if (!(pt_mask & BIT(tile->id)))
return false;
- (void)xe_pt_walk_shared(&pt->base, pt->level, range->base.itree.start,
- range->base.itree.last + 1, &xe_walk.base);
+ (void)xe_pt_walk_shared(&pt->base, pt->level, xe_svm_range_start(range),
+ xe_svm_range_end(range), &xe_walk.base);
return xe_walk.needs_invalidate;
}
@@ -1661,8 +1661,8 @@ static unsigned int xe_pt_stage_unbind(struct xe_tile *tile,
struct xe_svm_range *range,
struct xe_vm_pgtable_update *entries)
{
- u64 start = range ? range->base.itree.start : xe_vma_start(vma);
- u64 end = range ? range->base.itree.last + 1 : xe_vma_end(vma);
+ u64 start = range ? xe_svm_range_start(range) : xe_vma_start(vma);
+ u64 end = range ? xe_svm_range_end(range) : xe_vma_end(vma);
struct xe_pt_stage_unbind_walk xe_walk = {
.base = {
.ops = &xe_pt_stage_unbind_ops,
@@ -1872,7 +1872,7 @@ static int bind_range_prepare(struct xe_vm *vm, struct xe_tile *tile,
vm_dbg(&xe_vma_vm(vma)->xe->drm,
"Preparing bind, with range [%lx...%lx)\n",
- range->base.itree.start, range->base.itree.last);
+ xe_svm_range_start(range), xe_svm_range_end(range) - 1);
pt_op->vma = NULL;
pt_op->bind = true;
@@ -1887,8 +1887,8 @@ static int bind_range_prepare(struct xe_vm *vm, struct xe_tile *tile,
pt_op->num_entries, true);
xe_pt_update_ops_rfence_interval(pt_update_ops,
- range->base.itree.start,
- range->base.itree.last + 1);
+ xe_svm_range_start(range),
+ xe_svm_range_end(range));
++pt_update_ops->current_op;
pt_update_ops->needs_svm_lock = true;
@@ -1983,7 +1983,7 @@ static int unbind_range_prepare(struct xe_vm *vm,
vm_dbg(&vm->xe->drm,
"Preparing unbind, with range [%lx...%lx)\n",
- range->base.itree.start, range->base.itree.last);
+ xe_svm_range_start(range), xe_svm_range_end(range) - 1);
pt_op->vma = XE_INVALID_VMA;
pt_op->bind = false;
@@ -1994,8 +1994,8 @@ static int unbind_range_prepare(struct xe_vm *vm,
xe_vm_dbg_print_entries(tile_to_xe(tile), pt_op->entries,
pt_op->num_entries, false);
- xe_pt_update_ops_rfence_interval(pt_update_ops, range->base.itree.start,
- range->base.itree.last + 1);
+ xe_pt_update_ops_rfence_interval(pt_update_ops, xe_svm_range_start(range),
+ xe_svm_range_end(range));
++pt_update_ops->current_op;
pt_update_ops->needs_svm_lock = true;
pt_update_ops->needs_invalidation |= xe_vm_has_scratch(vm) ||
--
2.34.1
next reply other threads:[~2025-10-22 23:01 UTC|newest]
Thread overview: 5+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-10-22 23:01 Matthew Brost [this message]
2025-10-23 2:35 ` ✓ CI.KUnit: success for drm/xe: Use SVM range helpers in PT layer Patchwork
2025-10-23 3:13 ` ✓ Xe.CI.BAT: " Patchwork
2025-10-23 5:07 ` [PATCH] " Ghimiray, Himal Prasad
2025-10-23 9:47 ` ✗ Xe.CI.Full: failure for " Patchwork
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251022230122.922382-1-matthew.brost@intel.com \
--to=matthew.brost@intel.com \
--cc=intel-xe@lists.freedesktop.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox