From: Matthew Brost <matthew.brost@intel.com>
To: intel-xe@lists.freedesktop.org
Cc: francois.dugast@intel.com, thomas.hellstrom@linux.intel.com,
himal.prasad.ghimiray@intel.com
Subject: [PATCH v2 1/2] drm/xe: Add xe_vm_has_valid_gpu_pages helper
Date: Fri, 13 Jun 2025 14:02:41 -0700 [thread overview]
Message-ID: <20250613210242.718441-2-matthew.brost@intel.com> (raw)
In-Reply-To: <20250613210242.718441-1-matthew.brost@intel.com>
Rather than having multiple READ_ONCE of the tile_* fields and comments
in code, use helper with kernel doc for single access point and clear
rules.
Suggested-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
---
drivers/gpu/drm/xe/xe_gt_pagefault.c | 9 ++-------
drivers/gpu/drm/xe/xe_pt.c | 6 +++---
drivers/gpu/drm/xe/xe_svm.c | 16 +++++++---------
drivers/gpu/drm/xe/xe_vm.c | 2 +-
drivers/gpu/drm/xe/xe_vm.h | 19 +++++++++++++++++++
5 files changed, 32 insertions(+), 20 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c
index e2d975b2fddb..adfd6a26b5d8 100644
--- a/drivers/gpu/drm/xe/xe_gt_pagefault.c
+++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c
@@ -69,15 +69,10 @@ static bool access_is_atomic(enum access_type access_type)
static bool vma_is_valid(struct xe_tile *tile, struct xe_vma *vma)
{
- /*
- * Advisory only check whether the VMA currently has a valid mapping,
- * READ_ONCE pairs with WRITE_ONCE in xe_pt.c
- */
- return BIT(tile->id) & READ_ONCE(vma->tile_present) &&
- !(BIT(tile->id) & READ_ONCE(vma->tile_invalidated));
+ return xe_vm_has_valid_gpu_pages(tile, vma->tile_present,
+ vma->tile_invalidated);
}
-
static int xe_pf_begin(struct drm_exec *exec, struct xe_vma *vma,
bool atomic, unsigned int id)
{
diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
index f39d5cc9f411..59496c1a1e77 100644
--- a/drivers/gpu/drm/xe/xe_pt.c
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -2196,7 +2196,7 @@ static void bind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
DMA_RESV_USAGE_KERNEL :
DMA_RESV_USAGE_BOOKKEEP);
}
- /* All WRITE_ONCE pair with READ_ONCE in xe_gt_pagefault.c */
+ /* All WRITE_ONCE pair with READ_ONCE in xe_vm_has_valid_gpu_pages() */
WRITE_ONCE(vma->tile_present, vma->tile_present | BIT(tile->id));
if (invalidate_on_bind)
WRITE_ONCE(vma->tile_invalidated,
@@ -2255,7 +2255,7 @@ static void range_present_and_invalidated_tile(struct xe_vm *vm,
struct xe_svm_range *range,
u8 tile_id)
{
- /* WRITE_ONCE pairs with READ_ONCE in xe_svm.c */
+ /* All WRITE_ONCE pair with READ_ONCE in xe_vm_has_valid_gpu_pages() */
lockdep_assert_held(&vm->svm.gpusvm.notifier_lock);
@@ -2324,7 +2324,7 @@ static void op_commit(struct xe_vm *vm,
}
case DRM_GPUVA_OP_DRIVER:
{
- /* WRITE_ONCE pairs with READ_ONCE in xe_svm.c */
+ /* WRITE_ONCE pairs with READ_ONCE in xe_vm_has_valid_gpu_pages() */
if (op->subop == XE_VMA_SUBOP_MAP_RANGE)
range_present_and_invalidated_tile(vm, op->map_range.range, tile->id);
else if (op->subop == XE_VMA_SUBOP_UNMAP_RANGE)
diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
index 2fbbd6a604ea..ce6b9e637b16 100644
--- a/drivers/gpu/drm/xe/xe_svm.c
+++ b/drivers/gpu/drm/xe/xe_svm.c
@@ -141,7 +141,10 @@ xe_svm_range_notifier_event_begin(struct xe_vm *vm, struct drm_gpusvm_range *r,
for_each_tile(tile, xe, id)
if (xe_pt_zap_ptes_range(tile, vm, range)) {
tile_mask |= BIT(id);
- /* Pairs with READ_ONCE in xe_svm_range_is_valid */
+ /*
+ * WRITE_ONCE pairs with READ_ONCE in
+ * xe_vm_has_valid_gpu_pages()
+ */
WRITE_ONCE(range->tile_invalidated,
range->tile_invalidated | BIT(id));
}
@@ -605,14 +608,9 @@ static bool xe_svm_range_is_valid(struct xe_svm_range *range,
struct xe_tile *tile,
bool devmem_only)
{
- /*
- * Advisory only check whether the range currently has a valid mapping,
- * READ_ONCE pairs with WRITE_ONCE in xe_pt.c,
- * xe_svm_range_notifier_event_begin
- */
- return ((READ_ONCE(range->tile_present) &
- ~READ_ONCE(range->tile_invalidated)) & BIT(tile->id)) &&
- (!devmem_only || xe_svm_range_in_vram(range));
+ return (xe_vm_has_valid_gpu_pages(tile, range->tile_present,
+ range->tile_invalidated) &&
+ (!devmem_only || xe_svm_range_in_vram(range)));
}
/** xe_svm_range_migrate_to_smem() - Move range pages from VRAM to SMEM
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 6ef8c4dab647..2bef0537a3c9 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -3961,7 +3961,7 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
ret = xe_vm_range_tilemask_tlb_invalidation(xe_vma_vm(vma), xe_vma_start(vma),
xe_vma_end(vma), tile_mask);
- /* WRITE_ONCE pair with READ_ONCE in xe_gt_pagefault.c */
+ /* WRITE_ONCE pairs with READ_ONCE in xe_vm_has_valid_gpu_pages() */
WRITE_ONCE(vma->tile_invalidated, vma->tile_mask);
return ret;
diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
index acd3fd6c605b..75eb7a1e2b83 100644
--- a/drivers/gpu/drm/xe/xe_vm.h
+++ b/drivers/gpu/drm/xe/xe_vm.h
@@ -375,6 +375,25 @@ static inline bool xe_vm_is_validating(struct xe_vm *vm)
return false;
}
+/**
+ * xe_vm_has_valid_gpu_pages() - Advisory helper to check if VMA or SVM range has
+ * valid GPU pages
+ * @tile: The tile which the GPU pages belong to
+ * @tile_present: Tile present mask
+ * @tile_invalidated: Tile invalidated mask
+ *
+ * The READ_ONCEs pair with WRITE_ONCEs in either the TLB invalidation paths
+ * (xe_vm.c, xe_svm.c) or the binding paths (xe_pt.c). These are not reliable
+ * without the notifier lock in userptr or SVM cases, and not reliable without
+ * the BO dma-resv lock in the BO case. As such, they should only be used in
+ * opportunistic cases (e.g., skipping a page fault fix or not skipping a TLB
+ * invalidation) where it is harmless.
+ *
+ * Return: True is there are valid GPU pages, False otherwise
+ */
+#define xe_vm_has_valid_gpu_pages(tile, tile_present, tile_invalidated) \
+ ((READ_ONCE(tile_present) & ~READ_ONCE(tile_invalidated)) & BIT(tile->id))
+
#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma);
#else
--
2.34.1
next prev parent reply other threads:[~2025-06-13 21:01 UTC|newest]
Thread overview: 8+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-06-13 21:02 [PATCH v2 0/2] Opportunistically skip TLB invalidaion on unbind Matthew Brost
2025-06-13 21:02 ` Matthew Brost [this message]
2025-06-13 21:02 ` [PATCH v2 2/2] drm/xe: " Matthew Brost
2025-06-15 3:56 ` Matthew Brost
2025-06-13 23:22 ` ✗ CI.checkpatch: warning for " Patchwork
2025-06-13 23:23 ` ✓ CI.KUnit: success " Patchwork
2025-06-14 0:13 ` ✓ Xe.CI.BAT: " Patchwork
2025-06-16 0:15 ` ✗ Xe.CI.Full: failure " Patchwork
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250613210242.718441-2-matthew.brost@intel.com \
--to=matthew.brost@intel.com \
--cc=francois.dugast@intel.com \
--cc=himal.prasad.ghimiray@intel.com \
--cc=intel-xe@lists.freedesktop.org \
--cc=thomas.hellstrom@linux.intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox