From: Matthew Brost <matthew.brost@intel.com>
To: intel-xe@lists.freedesktop.org, dri-devel@lists.freedesktop.org
Cc: simona.vetter@ffwll.ch, thomas.hellstrom@linux.intel.com,
pstanner@redhat.com, boris.brezillon@collabora.com,
airlied@gmail.com, ltuikov89@gmail.com, dakr@kernel.org,
christian.koenig@amd.com, mihail.atanassov@arm.com,
steven.price@arm.com, shashank.sharma@amd.com
Subject: [RFC PATCH 5/6] drm/xe: Use DMA_RESV_USAGE_PREEMPT for preempt fences
Date: Sat, 9 Nov 2024 09:29:41 -0800 [thread overview]
Message-ID: <20241109172942.482630-6-matthew.brost@intel.com> (raw)
In-Reply-To: <20241109172942.482630-1-matthew.brost@intel.com>
Use the new DMA_RESV_USAGE_PREEMPT dma-resv slots in Xe for preemptive
fences, and call them in dma-resv/scheduler rather than open-coding the
enabling of signaling before waiting.
Cc: Dave Airlie <airlied@redhat.com>
Cc: Simona Vetter <simona.vetter@ffwll.ch>
Cc: Christian Koenig <christian.koenig@amd.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
---
drivers/gpu/drm/xe/xe_bo.c | 22 +++++-----------------
drivers/gpu/drm/xe/xe_migrate.c | 4 ++--
drivers/gpu/drm/xe/xe_pt.c | 12 ++----------
drivers/gpu/drm/xe/xe_vm.c | 22 ++++------------------
4 files changed, 13 insertions(+), 47 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
index 549866da5cd1..7ab7d27b66c6 100644
--- a/drivers/gpu/drm/xe/xe_bo.c
+++ b/drivers/gpu/drm/xe/xe_bo.c
@@ -484,23 +484,11 @@ static int xe_ttm_io_mem_reserve(struct ttm_device *bdev,
static int xe_bo_trigger_rebind(struct xe_device *xe, struct xe_bo *bo,
const struct ttm_operation_ctx *ctx)
{
- struct dma_resv_iter cursor;
- struct dma_fence *fence;
struct drm_gem_object *obj = &bo->ttm.base;
struct drm_gpuvm_bo *vm_bo;
bool idle = false;
int ret = 0;
- dma_resv_assert_held(bo->ttm.base.resv);
-
- if (!list_empty(&bo->ttm.base.gpuva.list)) {
- dma_resv_iter_begin(&cursor, bo->ttm.base.resv,
- DMA_RESV_USAGE_BOOKKEEP);
- dma_resv_for_each_fence_unlocked(&cursor, fence)
- dma_fence_enable_sw_signaling(fence);
- dma_resv_iter_end(&cursor);
- }
-
drm_gem_for_each_gpuvm_bo(vm_bo, obj) {
struct xe_vm *vm = gpuvm_to_vm(vm_bo->vm);
struct drm_gpuva *gpuva;
@@ -515,11 +503,11 @@ static int xe_bo_trigger_rebind(struct xe_device *xe, struct xe_bo *bo,
if (ctx->no_wait_gpu &&
!dma_resv_test_signaled(bo->ttm.base.resv,
- DMA_RESV_USAGE_BOOKKEEP))
+ DMA_RESV_USAGE_PREEMPT))
return -EBUSY;
timeout = dma_resv_wait_timeout(bo->ttm.base.resv,
- DMA_RESV_USAGE_BOOKKEEP,
+ DMA_RESV_USAGE_PREEMPT,
ctx->interruptible,
MAX_SCHEDULE_TIMEOUT);
if (!timeout)
@@ -723,7 +711,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
if (old_mem_type == XE_PL_TT &&
new_mem->mem_type == XE_PL_SYSTEM) {
long timeout = dma_resv_wait_timeout(ttm_bo->base.resv,
- DMA_RESV_USAGE_BOOKKEEP,
+ DMA_RESV_USAGE_PREEMPT,
true,
MAX_SCHEDULE_TIMEOUT);
if (timeout < 0) {
@@ -1056,7 +1044,7 @@ static void xe_ttm_bo_release_notify(struct ttm_buffer_object *ttm_bo)
* unbind.
*/
dma_resv_for_each_fence(&cursor, ttm_bo->base.resv,
- DMA_RESV_USAGE_BOOKKEEP, fence) {
+ DMA_RESV_USAGE_PREEMPT, fence) {
if (xe_fence_is_xe_preempt(fence) &&
!dma_fence_is_signaled(fence)) {
if (!replacement)
@@ -1065,7 +1053,7 @@ static void xe_ttm_bo_release_notify(struct ttm_buffer_object *ttm_bo)
dma_resv_replace_fences(ttm_bo->base.resv,
fence->context,
replacement,
- DMA_RESV_USAGE_BOOKKEEP);
+ DMA_RESV_USAGE_PREEMPT);
}
}
dma_fence_put(replacement);
diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
index cfd31ae49cc1..75067c584581 100644
--- a/drivers/gpu/drm/xe/xe_migrate.c
+++ b/drivers/gpu/drm/xe/xe_migrate.c
@@ -895,10 +895,10 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
xe_sched_job_add_migrate_flush(job, flush_flags);
if (!fence) {
err = xe_sched_job_add_deps(job, src_bo->ttm.base.resv,
- DMA_RESV_USAGE_BOOKKEEP);
+ DMA_RESV_USAGE_PREEMPT);
if (!err && src_bo != dst_bo)
err = xe_sched_job_add_deps(job, dst_bo->ttm.base.resv,
- DMA_RESV_USAGE_BOOKKEEP);
+ DMA_RESV_USAGE_PREEMPT);
if (err)
goto err_job;
}
diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
index f27f579f4d85..00358e748d0d 100644
--- a/drivers/gpu/drm/xe/xe_pt.c
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -1115,7 +1115,7 @@ static int xe_pt_vm_dependencies(struct xe_sched_job *job,
if (pt_update_ops->wait_vm_bookkeep || pt_update_ops->wait_vm_kernel) {
err = job_test_add_deps(job, xe_vm_resv(vm),
pt_update_ops->wait_vm_bookkeep ?
- DMA_RESV_USAGE_BOOKKEEP :
+ DMA_RESV_USAGE_PREEMPT :
DMA_RESV_USAGE_KERNEL);
if (err)
return err;
@@ -1231,18 +1231,10 @@ static int vma_check_userptr(struct xe_vm *vm, struct xe_vma *vma,
spin_unlock(&vm->userptr.invalidated_lock);
if (xe_vm_in_preempt_fence_mode(vm)) {
- struct dma_resv_iter cursor;
- struct dma_fence *fence;
long err;
- dma_resv_iter_begin(&cursor, xe_vm_resv(vm),
- DMA_RESV_USAGE_BOOKKEEP);
- dma_resv_for_each_fence_unlocked(&cursor, fence)
- dma_fence_enable_sw_signaling(fence);
- dma_resv_iter_end(&cursor);
-
err = dma_resv_wait_timeout(xe_vm_resv(vm),
- DMA_RESV_USAGE_BOOKKEEP,
+ DMA_RESV_USAGE_PREEMPT,
false, MAX_SCHEDULE_TIMEOUT);
XE_WARN_ON(err <= 0);
}
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 624133fae5f5..568395530c49 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -195,7 +195,7 @@ static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo)
if (q->lr.pfence) {
dma_resv_add_fence(bo->ttm.base.resv,
q->lr.pfence,
- DMA_RESV_USAGE_BOOKKEEP);
+ DMA_RESV_USAGE_PREEMPT);
}
return 0;
@@ -213,7 +213,7 @@ static void resume_and_reinstall_preempt_fences(struct xe_vm *vm,
q->ops->resume(q);
drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, q->lr.pfence,
- DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP);
+ DMA_RESV_USAGE_PREEMPT, DMA_RESV_USAGE_PREEMPT);
}
}
@@ -250,7 +250,7 @@ int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
down_read(&vm->userptr.notifier_lock);
drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, pfence,
- DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP);
+ DMA_RESV_USAGE_PREEMPT, DMA_RESV_USAGE_PREEMPT);
/*
* Check to see if a preemption on VM is in flight or userptr
@@ -588,8 +588,6 @@ static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
struct xe_userptr_vma *uvma = container_of(userptr, typeof(*uvma), userptr);
struct xe_vma *vma = &uvma->vma;
struct xe_vm *vm = xe_vma_vm(vma);
- struct dma_resv_iter cursor;
- struct dma_fence *fence;
long err;
xe_assert(vm->xe, xe_vma_is_userptr(vma));
@@ -625,20 +623,8 @@ static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
up_write(&vm->userptr.notifier_lock);
- /*
- * Preempt fences turn into schedule disables, pipeline these.
- * Note that even in fault mode, we need to wait for binds and
- * unbinds to complete, and those are attached as BOOKMARK fences
- * to the vm.
- */
- dma_resv_iter_begin(&cursor, xe_vm_resv(vm),
- DMA_RESV_USAGE_BOOKKEEP);
- dma_resv_for_each_fence_unlocked(&cursor, fence)
- dma_fence_enable_sw_signaling(fence);
- dma_resv_iter_end(&cursor);
-
err = dma_resv_wait_timeout(xe_vm_resv(vm),
- DMA_RESV_USAGE_BOOKKEEP,
+ DMA_RESV_USAGE_PREEMPT,
false, MAX_SCHEDULE_TIMEOUT);
XE_WARN_ON(err <= 0);
--
2.34.1
next prev parent reply other threads:[~2024-11-09 17:29 UTC|newest]
Thread overview: 27+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-11-09 17:29 [RFC PATCH 0/6] Common preempt fences and semantics Matthew Brost
2024-11-09 17:29 ` [RFC PATCH 1/6] dma-resv: Add DMA_RESV_USAGE_PREEMPT Matthew Brost
2024-11-09 17:29 ` [RFC PATCH 2/6] drm/sched: Teach scheduler about DMA_RESV_USAGE_PREEMPT Matthew Brost
2024-11-12 9:06 ` Philipp Stanner
2024-11-12 20:08 ` Matthew Brost
2024-11-13 11:03 ` Philipp Stanner
2024-11-09 17:29 ` [RFC PATCH 3/6] dma-fence: Add dma_fence_preempt base class Matthew Brost
2024-11-09 17:29 ` [RFC PATCH 4/6] drm/sched: Teach scheduler about dma_fence_prempt type Matthew Brost
2024-11-09 17:29 ` Matthew Brost [this message]
2024-11-09 17:29 ` [RFC PATCH 6/6] drm/xe: Use dma_fence_preempt base class Matthew Brost
2024-11-09 17:35 ` ✓ CI.Patch_applied: success for Common preempt fences and semantics Patchwork
2024-11-09 17:35 ` ✗ CI.checkpatch: warning " Patchwork
2024-11-09 17:36 ` ✓ CI.KUnit: success " Patchwork
2024-11-09 17:48 ` ✓ CI.Build: " Patchwork
2024-11-09 17:50 ` ✓ CI.Hooks: " Patchwork
2024-11-09 17:51 ` ✗ CI.checksparse: warning " Patchwork
2024-11-09 18:16 ` ✓ CI.BAT: success " Patchwork
2024-11-10 8:13 ` ✗ CI.FULL: failure " Patchwork
2024-11-11 13:42 ` [RFC PATCH 0/6] " Christian König
2024-11-12 3:29 ` Matthew Brost
2024-11-12 11:09 ` Christian König
2024-11-13 2:27 ` Matthew Brost
2024-11-13 2:30 ` Matthew Brost
2024-11-13 9:02 ` Christian König
2024-11-13 15:34 ` Matthew Brost
2024-11-14 8:38 ` Christian König
2024-11-15 19:38 ` Matthew Brost
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20241109172942.482630-6-matthew.brost@intel.com \
--to=matthew.brost@intel.com \
--cc=airlied@gmail.com \
--cc=boris.brezillon@collabora.com \
--cc=christian.koenig@amd.com \
--cc=dakr@kernel.org \
--cc=dri-devel@lists.freedesktop.org \
--cc=intel-xe@lists.freedesktop.org \
--cc=ltuikov89@gmail.com \
--cc=mihail.atanassov@arm.com \
--cc=pstanner@redhat.com \
--cc=shashank.sharma@amd.com \
--cc=simona.vetter@ffwll.ch \
--cc=steven.price@arm.com \
--cc=thomas.hellstrom@linux.intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox