From: Matthew Brost <matthew.brost@intel.com>
To: <intel-xe@lists.freedesktop.org>
Cc: Matthew Brost <matthew.brost@intel.com>, Oak Zeng <oak.zeng@intel.com>
Subject: [PATCH 09/13] drm/xe: Add vm_bind_ioctl_ops_fini helper
Date: Tue, 9 Apr 2024 22:40:52 -0700 [thread overview]
Message-ID: <20240410054056.478023-10-matthew.brost@intel.com> (raw)
In-Reply-To: <20240410054056.478023-1-matthew.brost@intel.com>
Simplify VM bind code by signaling out-fences / destroying VMAs in a
single location. Will help with transition single job for many bind ops.
v2:
- s/vm_bind_ioctl_ops_install_fences/vm_bind_ioctl_ops_fini (Oak)
- Set last fence in vm_bind_ioctl_ops_fini (Oak)
Cc: Oak Zeng <oak.zeng@intel.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
---
drivers/gpu/drm/xe/xe_vm.c | 62 +++++++++++++++-----------------------
1 file changed, 24 insertions(+), 38 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 09871538484b..97384c77f662 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -1748,7 +1748,7 @@ xe_vm_unbind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
struct dma_fence *fence = NULL;
struct dma_fence **fences = NULL;
struct dma_fence_array *cf = NULL;
- int cur_fence = 0, i;
+ int cur_fence = 0;
int number_tiles = hweight8(vma->tile_present);
int err;
u8 id;
@@ -1806,10 +1806,6 @@ xe_vm_unbind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
fence = cf ? &cf->base : !fence ?
xe_exec_queue_last_fence_get(wait_exec_queue, vm) : fence;
- if (last_op) {
- for (i = 0; i < num_syncs; i++)
- xe_sync_entry_signal(&syncs[i], fence);
- }
return fence;
@@ -1833,7 +1829,7 @@ xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
struct dma_fence **fences = NULL;
struct dma_fence_array *cf = NULL;
struct xe_vm *vm = xe_vma_vm(vma);
- int cur_fence = 0, i;
+ int cur_fence = 0;
int number_tiles = hweight8(tile_mask);
int err;
u8 id;
@@ -1880,12 +1876,6 @@ xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
}
}
- if (last_op) {
- for (i = 0; i < num_syncs; i++)
- xe_sync_entry_signal(&syncs[i],
- cf ? &cf->base : fence);
- }
-
return cf ? &cf->base : fence;
err_fences:
@@ -1937,20 +1927,11 @@ xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q,
if (IS_ERR(fence))
return fence;
} else {
- int i;
-
xe_assert(vm->xe, xe_vm_in_fault_mode(vm));
fence = xe_exec_queue_last_fence_get(wait_exec_queue, vm);
- if (last_op) {
- for (i = 0; i < num_syncs; i++)
- xe_sync_entry_signal(&syncs[i], fence);
- }
}
- if (last_op)
- xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
-
return fence;
}
@@ -1960,7 +1941,6 @@ xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
u32 num_syncs, bool first_op, bool last_op)
{
struct dma_fence *fence;
- struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
xe_vm_assert_held(vm);
xe_bo_assert_held(xe_vma_bo(vma));
@@ -1969,10 +1949,6 @@ xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
if (IS_ERR(fence))
return fence;
- xe_vma_destroy(vma, fence);
- if (last_op)
- xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
-
return fence;
}
@@ -2127,17 +2103,7 @@ xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
return xe_vm_bind(vm, vma, q, xe_vma_bo(vma), syncs, num_syncs,
vma->tile_mask, true, first_op, last_op);
} else {
- struct dma_fence *fence =
- xe_exec_queue_last_fence_get(wait_exec_queue, vm);
- int i;
-
- /* Nothing to do, signal fences now */
- if (last_op) {
- for (i = 0; i < num_syncs; i++)
- xe_sync_entry_signal(&syncs[i], fence);
- }
-
- return fence;
+ return xe_exec_queue_last_fence_get(wait_exec_queue, vm);
}
}
@@ -2939,6 +2905,26 @@ static struct dma_fence *ops_execute(struct xe_vm *vm,
return fence;
}
+static void vm_bind_ioctl_ops_fini(struct xe_vm *vm, struct xe_vma_ops *vops,
+ struct dma_fence *fence)
+{
+ struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, vops->q);
+ struct xe_vma_op *op;
+ int i;
+
+ list_for_each_entry(op, &vops->list, link) {
+ if (op->base.op == DRM_GPUVA_OP_UNMAP)
+ xe_vma_destroy(gpuva_to_vma(op->base.unmap.va), fence);
+ else if (op->base.op == DRM_GPUVA_OP_REMAP)
+ xe_vma_destroy(gpuva_to_vma(op->base.remap.unmap->va),
+ fence);
+ }
+ for (i = 0; i < vops->num_syncs; i++)
+ xe_sync_entry_signal(vops->syncs + i, fence);
+ xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
+ dma_fence_put(fence);
+}
+
static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
struct xe_vma_ops *vops)
{
@@ -2963,7 +2949,7 @@ static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
xe_vm_kill(vm, false);
goto unlock;
} else {
- dma_fence_put(fence);
+ vm_bind_ioctl_ops_fini(vm, vops, fence);
}
}
--
2.34.1
next prev parent reply other threads:[~2024-04-10 5:40 UTC|newest]
Thread overview: 40+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-04-10 5:40 [PATCH 00/13] Prep patches for 1 job per VM bind IOCTL Matthew Brost
2024-04-10 5:40 ` [PATCH 01/13] drm/xe: Lock all gpuva ops during " Matthew Brost
2024-04-16 15:51 ` Zeng, Oak
2024-04-16 17:02 ` Matthew Brost
2024-04-10 5:40 ` [PATCH 02/13] drm/xe: Add ops_execute function which returns a fence Matthew Brost
2024-04-18 16:16 ` Zeng, Oak
2024-04-18 19:36 ` Matthew Brost
2024-04-23 3:09 ` Zeng, Oak
2024-04-10 5:40 ` [PATCH 03/13] drm/xe: Move migrate to prefetch to op_lock_and_prep function Matthew Brost
2024-04-18 19:27 ` Zeng, Oak
2024-04-19 19:52 ` Matthew Brost
2024-04-23 3:32 ` Zeng, Oak
2024-04-10 5:40 ` [PATCH 04/13] drm/xe: Add struct xe_vma_ops abstraction Matthew Brost
2024-04-10 5:40 ` [PATCH 05/13] drm/xe: Use xe_vma_ops to implement xe_vm_rebind Matthew Brost
2024-04-19 3:43 ` Zeng, Oak
2024-04-19 4:14 ` Matthew Brost
2024-04-23 3:17 ` Zeng, Oak
2024-04-10 5:40 ` [PATCH 06/13] drm/xe: Simplify VM bind IOCTL error handling and cleanup Matthew Brost
2024-04-19 4:19 ` Zeng, Oak
2024-04-19 19:16 ` Matthew Brost
2024-04-23 3:22 ` Zeng, Oak
2024-04-10 5:40 ` [PATCH 07/13] drm/xe: Use xe_vma_ops to implement page fault rebinds Matthew Brost
2024-04-19 14:22 ` Zeng, Oak
2024-04-19 19:33 ` Matthew Brost
2024-04-23 3:27 ` Zeng, Oak
2024-04-10 5:40 ` [PATCH 08/13] drm/xe: Add some members to xe_vma_ops Matthew Brost
2024-04-19 14:24 ` Zeng, Oak
2024-04-10 5:40 ` Matthew Brost [this message]
2024-04-19 14:51 ` [PATCH 09/13] drm/xe: Add vm_bind_ioctl_ops_fini helper Zeng, Oak
2024-04-10 5:40 ` [PATCH 10/13] drm/xe: Move ufence check to op_lock Matthew Brost
2024-04-19 14:56 ` Zeng, Oak
2024-04-19 19:34 ` Matthew Brost
2024-04-10 5:40 ` [PATCH 11/13] drm/xe: Move ufence add to vm_bind_ioctl_ops_fini Matthew Brost
2024-04-19 15:24 ` Zeng, Oak
2024-04-19 19:45 ` Matthew Brost
2024-04-23 3:36 ` Zeng, Oak
2024-04-10 5:40 ` [PATCH 12/13] drm/xe: Add xe_gt_tlb_invalidation_range and convert PT layer to use this Matthew Brost
2024-04-19 16:00 ` Zeng, Oak
2024-04-10 5:40 ` [PATCH 13/13] drm/xe: Delete PT update selftest Matthew Brost
2024-04-10 6:28 ` ✗ CI.Patch_applied: failure for Prep patches for 1 job per VM bind IOCTL Patchwork
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240410054056.478023-10-matthew.brost@intel.com \
--to=matthew.brost@intel.com \
--cc=intel-xe@lists.freedesktop.org \
--cc=oak.zeng@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox