From: "Thomas Hellström" <thomas.hellstrom@linux.intel.com>
To: intel-xe@lists.freedesktop.org
Subject: [Intel-xe] [PATCH v5 6/6] drm/xe: Convert remaining instances of ttm_eu_reserve_buffers to drm_exec
Date: Wed, 6 Sep 2023 17:39:16 +0200 [thread overview]
Message-ID: <20230906153916.5665-7-thomas.hellstrom@linux.intel.com> (raw)
In-Reply-To: <20230906153916.5665-1-thomas.hellstrom@linux.intel.com>
The VM_BIND functionality and vma destruction was locking
potentially multiple dma_resv objects using the
ttm_eu_reserve_buffers() function. Rework those to use the drm_exec
helper, taking care that any calls to xe_bo_validate() ends up
inside an unsealed locking transaction.
v4:
- Remove an unbalanced xe_bo_put() (igt and Matthew Brost)
v5:
- Rebase conflict
Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
---
drivers/gpu/drm/xe/xe_vm.c | 94 +++++++++++++++-----------------------
1 file changed, 36 insertions(+), 58 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 0df59e2926da..8b78cbafec19 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -1119,29 +1119,20 @@ int xe_vm_prepare_vma(struct drm_exec *exec, struct xe_vma *vma,
static void xe_vma_destroy_unlocked(struct xe_vma *vma)
{
- struct ttm_validate_buffer tv[2];
- struct ww_acquire_ctx ww;
- struct xe_bo *bo = xe_vma_bo(vma);
- LIST_HEAD(objs);
- LIST_HEAD(dups);
+ struct drm_exec exec;
int err;
- memset(tv, 0, sizeof(tv));
- tv[0].bo = xe_vm_ttm_bo(xe_vma_vm(vma));
- list_add(&tv[0].head, &objs);
-
- if (bo) {
- tv[1].bo = &xe_bo_get(bo)->ttm;
- list_add(&tv[1].head, &objs);
+ drm_exec_init(&exec, 0);
+ drm_exec_until_all_locked(&exec) {
+ err = xe_vm_prepare_vma(&exec, vma, 0);
+ drm_exec_retry_on_contention(&exec);
+ if (XE_WARN_ON(err))
+ break;
}
- err = ttm_eu_reserve_buffers(&ww, &objs, false, &dups);
- XE_WARN_ON(err);
xe_vma_destroy(vma, NULL);
- ttm_eu_backoff_reservation(&ww, &objs);
- if (bo)
- xe_bo_put(bo);
+ drm_exec_fini(&exec);
}
struct xe_vma *
@@ -2155,12 +2146,6 @@ struct ttm_buffer_object *xe_vm_ttm_bo(struct xe_vm *vm)
return &vm->pt_root[idx]->bo->ttm;
}
-static void xe_vm_tv_populate(struct xe_vm *vm, struct ttm_validate_buffer *tv)
-{
- tv->num_shared = 1;
- tv->bo = xe_vm_ttm_bo(vm);
-}
-
static void vm_set_async_error(struct xe_vm *vm, int err)
{
lockdep_assert_held(&vm->lock);
@@ -2665,42 +2650,16 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
return err;
}
-static int __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
- struct xe_vma_op *op)
+static int op_execute(struct drm_exec *exec, struct xe_vm *vm,
+ struct xe_vma *vma, struct xe_vma_op *op)
{
- LIST_HEAD(objs);
- LIST_HEAD(dups);
- struct ttm_validate_buffer tv_bo, tv_vm;
- struct ww_acquire_ctx ww;
- struct xe_bo *vbo;
int err;
lockdep_assert_held_write(&vm->lock);
- xe_vm_tv_populate(vm, &tv_vm);
- list_add_tail(&tv_vm.head, &objs);
- vbo = xe_vma_bo(vma);
- if (vbo) {
- /*
- * An unbind can drop the last reference to the BO and
- * the BO is needed for ttm_eu_backoff_reservation so
- * take a reference here.
- */
- xe_bo_get(vbo);
-
- if (!vbo->vm) {
- tv_bo.bo = &vbo->ttm;
- tv_bo.num_shared = 1;
- list_add(&tv_bo.head, &objs);
- }
- }
-
-again:
- err = ttm_eu_reserve_buffers(&ww, &objs, true, &dups);
- if (err) {
- xe_bo_put(vbo);
+ err = xe_vm_prepare_vma(exec, vma, 1);
+ if (err)
return err;
- }
xe_vm_assert_held(vm);
xe_bo_assert_held(xe_vma_bo(vma));
@@ -2779,17 +2738,36 @@ static int __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
XE_WARN_ON("NOT POSSIBLE");
}
- ttm_eu_backoff_reservation(&ww, &objs);
+ if (err)
+ trace_xe_vma_fail(vma);
+
+ return err;
+}
+
+static int __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
+ struct xe_vma_op *op)
+{
+ struct drm_exec exec;
+ int err;
+
+retry_userptr:
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
+ drm_exec_until_all_locked(&exec) {
+ err = op_execute(&exec, vm, vma, op);
+ drm_exec_retry_on_contention(&exec);
+ if (err)
+ break;
+ }
+ drm_exec_fini(&exec);
+
if (err == -EAGAIN && xe_vma_is_userptr(vma)) {
lockdep_assert_held_write(&vm->lock);
err = xe_vma_userptr_pin_pages(vma);
if (!err)
- goto again;
- }
- xe_bo_put(vbo);
+ goto retry_userptr;
- if (err)
trace_xe_vma_fail(vma);
+ }
return err;
}
--
2.41.0
next prev parent reply other threads:[~2023-09-06 15:39 UTC|newest]
Thread overview: 27+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-09-06 15:39 [Intel-xe] [PATCH v5 0/6] drm/xe: Convert to drm_exec Thomas Hellström
2023-09-06 15:39 ` [Intel-xe] [PATCH v5 1/6] drm/xe/bo: Simplify xe_bo_lock() Thomas Hellström
2023-09-06 15:39 ` [Intel-xe] [PATCH v5 2/6] drm/xe/vm: Simplify and document xe_vm_lock() Thomas Hellström
2023-09-06 15:39 ` [Intel-xe] [PATCH v5 3/6] drm/xe/bo: Remove the lock_no_vm()/unlock_no_vm() interface Thomas Hellström
2023-09-06 15:39 ` [Intel-xe] [PATCH v5 4/6] drm/xe: Rework xe_exec and the VM rebind worker to use the drm_exec helper Thomas Hellström
2023-09-06 15:39 ` [Intel-xe] [PATCH v5 5/6] drm/xe: Convert pagefaulting code to use drm_exec Thomas Hellström
2023-09-06 15:39 ` Thomas Hellström [this message]
2023-09-06 15:42 ` [Intel-xe] ✓ CI.Patch_applied: success for drm/xe: Convert to drm_exec (rev4) Patchwork
2023-09-06 15:42 ` [Intel-xe] ✓ CI.checkpatch: " Patchwork
2023-09-06 15:43 ` [Intel-xe] ✓ CI.KUnit: " Patchwork
2023-09-06 15:50 ` [Intel-xe] ✓ CI.Build: " Patchwork
2023-09-06 15:51 ` [Intel-xe] ✓ CI.Hooks: " Patchwork
2023-09-06 15:52 ` [Intel-xe] ✗ CI.checksparse: warning " Patchwork
2023-09-07 14:08 ` [Intel-xe] ✓ CI.Patch_applied: success for drm/xe: Convert to drm_exec (rev5) Patchwork
2023-09-07 14:08 ` [Intel-xe] ✓ CI.checkpatch: " Patchwork
2023-09-07 14:09 ` [Intel-xe] ✓ CI.KUnit: " Patchwork
2023-09-07 14:16 ` [Intel-xe] ✓ CI.Build: " Patchwork
2023-09-07 14:17 ` [Intel-xe] ✓ CI.Hooks: " Patchwork
2023-09-07 14:18 ` [Intel-xe] ✗ CI.checksparse: warning " Patchwork
2023-09-07 14:50 ` [Intel-xe] ✗ CI.BAT: failure " Patchwork
2023-09-08 0:32 ` [Intel-xe] ✓ CI.Patch_applied: success for drm/xe: Convert to drm_exec (rev6) Patchwork
2023-09-08 0:32 ` [Intel-xe] ✓ CI.checkpatch: " Patchwork
2023-09-08 0:33 ` [Intel-xe] ✓ CI.KUnit: " Patchwork
2023-09-08 0:40 ` [Intel-xe] ✓ CI.Build: " Patchwork
2023-09-08 0:41 ` [Intel-xe] ✓ CI.Hooks: " Patchwork
2023-09-08 0:42 ` [Intel-xe] ✗ CI.checksparse: warning " Patchwork
2023-09-08 1:13 ` [Intel-xe] ✗ CI.BAT: failure " Patchwork
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230906153916.5665-7-thomas.hellstrom@linux.intel.com \
--to=thomas.hellstrom@linux.intel.com \
--cc=intel-xe@lists.freedesktop.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox