From: Matthew Brost <matthew.brost@intel.com>
To: intel-xe@lists.freedesktop.org
Cc: stuart.summers@intel.com, arvind.yadav@intel.com,
himal.prasad.ghimiray@intel.com,
thomas.hellstrom@linux.intel.com, francois.dugast@intel.com
Subject: [PATCH v4 02/12] drm/xe: Allow prefetch-only VM bind IOCTLs to use VM read lock
Date: Wed, 25 Feb 2026 20:28:24 -0800 [thread overview]
Message-ID: <20260226042834.2963245-3-matthew.brost@intel.com> (raw)
In-Reply-To: <20260226042834.2963245-1-matthew.brost@intel.com>
Prefetch-only VM bind IOCTLs do not modify VMAs after pinning userptr
pages. Downgrade vm->lock to read mode once pinning is complete.
Lays the groundwork for prefetch IOCTLs to use threaded migration.
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
---
drivers/gpu/drm/xe/xe_vm.c | 36 +++++++++++++++++++++++++++-----
drivers/gpu/drm/xe/xe_vm_types.h | 2 ++
2 files changed, 33 insertions(+), 5 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 3332a86f464f..204a89ca3397 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -2336,10 +2336,12 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops,
.map.gem.offset = bo_offset_or_userptr,
};
+ vops->flags |= XE_VMA_OPS_FLAG_MODIFIES_GPUVA;
ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, &map_req);
break;
}
case DRM_XE_VM_BIND_OP_UNMAP:
+ vops->flags |= XE_VMA_OPS_FLAG_MODIFIES_GPUVA;
ops = drm_gpuvm_sm_unmap_ops_create(&vm->gpuvm, addr, range);
break;
case DRM_XE_VM_BIND_OP_PREFETCH:
@@ -2348,6 +2350,7 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops,
case DRM_XE_VM_BIND_OP_UNMAP_ALL:
xe_assert(vm->xe, bo);
+ vops->flags |= XE_VMA_OPS_FLAG_MODIFIES_GPUVA;
err = xe_bo_lock(bo, true);
if (err)
return ERR_PTR(err);
@@ -2397,6 +2400,9 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops,
u8 id, tile_mask = 0;
u32 i;
+ if (xe_vma_is_userptr(vma))
+ vops->flags |= XE_VMA_OPS_FLAG_MODIFIES_GPUVA;
+
if (!xe_vma_is_cpu_addr_mirror(vma)) {
op->prefetch.region = prefetch_region;
break;
@@ -2582,10 +2588,12 @@ static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
{
int err = 0;
- xe_vm_assert_write_mode_or_garbage_collector(vm);
+ lockdep_assert_held(&vm->lock);
switch (op->base.op) {
case DRM_GPUVA_OP_MAP:
+ xe_vm_assert_write_mode_or_garbage_collector(vm);
+
err |= xe_vm_insert_vma(vm, op->map.vma);
if (!err)
op->flags |= XE_VMA_OP_COMMITTED;
@@ -2595,6 +2603,8 @@ static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
u8 tile_present =
gpuva_to_vma(op->base.remap.unmap->va)->tile_present;
+ xe_vm_assert_write_mode_or_garbage_collector(vm);
+
prep_vma_destroy(vm, gpuva_to_vma(op->base.remap.unmap->va),
true);
op->flags |= XE_VMA_OP_COMMITTED;
@@ -2628,6 +2638,8 @@ static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
break;
}
case DRM_GPUVA_OP_UNMAP:
+ xe_vm_assert_write_mode_or_garbage_collector(vm);
+
prep_vma_destroy(vm, gpuva_to_vma(op->base.unmap.va), true);
op->flags |= XE_VMA_OP_COMMITTED;
break;
@@ -2849,10 +2861,12 @@ static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op,
bool post_commit, bool prev_post_commit,
bool next_post_commit)
{
- xe_vm_assert_write_mode_or_garbage_collector(vm);
+ lockdep_assert_held(&vm->lock);
switch (op->base.op) {
case DRM_GPUVA_OP_MAP:
+ xe_vm_assert_write_mode_or_garbage_collector(vm);
+
if (op->map.vma) {
prep_vma_destroy(vm, op->map.vma, post_commit);
xe_vma_destroy_unlocked(op->map.vma);
@@ -2862,6 +2876,8 @@ static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op,
{
struct xe_vma *vma = gpuva_to_vma(op->base.unmap.va);
+ xe_vm_assert_write_mode_or_garbage_collector(vm);
+
if (vma) {
xe_svm_notifier_lock(vm);
vma->gpuva.flags &= ~XE_VMA_DESTROYED;
@@ -2875,6 +2891,8 @@ static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op,
{
struct xe_vma *vma = gpuva_to_vma(op->base.remap.unmap->va);
+ xe_vm_assert_write_mode_or_garbage_collector(vm);
+
if (op->remap.prev) {
prep_vma_destroy(vm, op->remap.prev, prev_post_commit);
xe_vma_destroy_unlocked(op->remap.prev);
@@ -3362,7 +3380,7 @@ static struct dma_fence *vm_bind_ioctl_ops_execute(struct xe_vm *vm,
struct dma_fence *fence;
int err = 0;
- lockdep_assert_held_write(&vm->lock);
+ lockdep_assert_held(&vm->lock);
xe_validation_guard(&ctx, &vm->xe->val, &exec,
((struct xe_val_flags) {
@@ -3664,7 +3682,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
u32 num_syncs, num_ufence = 0;
struct xe_sync_entry *syncs = NULL;
struct drm_xe_vm_bind_op *bind_ops = NULL;
- struct xe_vma_ops vops;
+ struct xe_vma_ops vops = { .flags = 0, };
struct dma_fence *fence;
int err;
int i;
@@ -3839,6 +3857,11 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
goto unwind_ops;
}
+ if (!(vops.flags & XE_VMA_OPS_FLAG_MODIFIES_GPUVA)) {
+ vops.flags |= XE_VMA_OPS_FLAG_DOWNGRADE_LOCK;
+ downgrade_write(&vm->lock);
+ }
+
err = xe_vma_ops_alloc(&vops, args->num_binds > 1);
if (err)
goto unwind_ops;
@@ -3875,7 +3898,10 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
free_bos:
kvfree(bos);
release_vm_lock:
- up_write(&vm->lock);
+ if (vops.flags & XE_VMA_OPS_FLAG_DOWNGRADE_LOCK)
+ up_read(&vm->lock);
+ else
+ up_write(&vm->lock);
put_exec_queue:
if (q)
xe_exec_queue_put(q);
diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
index 9c91934ec47f..db6e8e22a69f 100644
--- a/drivers/gpu/drm/xe/xe_vm_types.h
+++ b/drivers/gpu/drm/xe/xe_vm_types.h
@@ -518,6 +518,8 @@ struct xe_vma_ops {
#define XE_VMA_OPS_ARRAY_OF_BINDS BIT(2)
#define XE_VMA_OPS_FLAG_SKIP_TLB_WAIT BIT(3)
#define XE_VMA_OPS_FLAG_ALLOW_SVM_UNMAP BIT(4)
+#define XE_VMA_OPS_FLAG_MODIFIES_GPUVA BIT(5)
+#define XE_VMA_OPS_FLAG_DOWNGRADE_LOCK BIT(6)
u32 flags;
#ifdef TEST_VM_OPS_ERROR
/** @inject_error: inject error to test error handling */
--
2.34.1
next prev parent reply other threads:[~2026-02-26 4:28 UTC|newest]
Thread overview: 33+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-02-26 4:28 [PATCH v4 00/12] Fine grained fault locking, threaded prefetch, storm cache Matthew Brost
2026-02-26 4:28 ` [PATCH v4 01/12] drm/xe: Fine grained page fault locking Matthew Brost
2026-02-26 4:28 ` Matthew Brost [this message]
2026-02-26 4:28 ` [PATCH v4 03/12] drm/xe: Thread prefetch of SVM ranges Matthew Brost
2026-02-26 4:28 ` [PATCH v4 04/12] drm/xe: Use a single page-fault queue with multiple workers Matthew Brost
2026-05-06 15:46 ` Maciej Patelczyk
2026-05-06 19:42 ` Matthew Brost
2026-05-07 12:41 ` Maciej Patelczyk
2026-02-26 4:28 ` [PATCH v4 05/12] drm/xe: Add num_pf_work modparam Matthew Brost
2026-05-06 15:59 ` Maciej Patelczyk
2026-02-26 4:28 ` [PATCH v4 06/12] drm/xe: Engine class and instance into a u8 Matthew Brost
2026-05-06 16:04 ` Maciej Patelczyk
2026-05-07 16:20 ` Maciej Patelczyk
2026-02-26 4:28 ` [PATCH v4 07/12] drm/xe: Track pagefault worker runtime Matthew Brost
2026-05-07 12:51 ` Maciej Patelczyk
2026-02-26 4:28 ` [PATCH v4 08/12] drm/xe: Chain page faults via queue-resident cache to avoid fault storms Matthew Brost
2026-05-08 12:03 ` Maciej Patelczyk
2026-02-26 4:28 ` [PATCH v4 09/12] drm/xe: Add pagefault chaining stats Matthew Brost
2026-05-07 13:15 ` Maciej Patelczyk
2026-05-07 13:52 ` Francois Dugast
2026-02-26 4:28 ` [PATCH v4 10/12] drm/xe: Add debugfs pagefault_info Matthew Brost
2026-05-07 10:07 ` Maciej Patelczyk
2026-02-26 4:28 ` [PATCH v4 11/12] drm/xe: batch CT pagefault acks with periodic flush Matthew Brost
2026-05-08 9:24 ` Maciej Patelczyk
2026-02-26 4:28 ` [PATCH v4 12/12] drm/xe: Track parallel page fault activity in GT stats Matthew Brost
2026-05-07 13:56 ` Maciej Patelczyk
2026-05-07 14:23 ` Francois Dugast
2026-02-26 4:35 ` ✗ CI.checkpatch: warning for Fine grained fault locking, threaded prefetch, storm cache (rev4) Patchwork
2026-02-26 4:36 ` ✓ CI.KUnit: success " Patchwork
2026-02-26 5:26 ` ✗ Xe.CI.BAT: failure " Patchwork
2026-02-26 8:59 ` ✗ Xe.CI.FULL: " Patchwork
2026-02-26 13:43 ` [PATCH v4 00/12] Fine grained fault locking, threaded prefetch, storm cache Thomas Hellström
2026-02-26 19:36 ` Matthew Brost
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260226042834.2963245-3-matthew.brost@intel.com \
--to=matthew.brost@intel.com \
--cc=arvind.yadav@intel.com \
--cc=francois.dugast@intel.com \
--cc=himal.prasad.ghimiray@intel.com \
--cc=intel-xe@lists.freedesktop.org \
--cc=stuart.summers@intel.com \
--cc=thomas.hellstrom@linux.intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.