From: "Thomas Hellström" <thomas.hellstrom@linux.intel.com>
To: intel-xe@lists.freedesktop.org
Subject: [Intel-xe] [PATCH v2 6/6] drm/xe: Convert remaining instances of ttm_eu_reserve_buffers to drm_exec
Date: Thu, 31 Aug 2023 08:46:16 +0200 [thread overview]
Message-ID: <20230831064616.96445-7-thomas.hellstrom@linux.intel.com> (raw)
In-Reply-To: <20230831064616.96445-1-thomas.hellstrom@linux.intel.com>
The VM_BIND functionality and vma destruction was locking
potentially multiple dma_resv objects using the
ttm_eu_reserve_buffers() function. Rework those to use the drm_exec
helper, taking care that any calls to xe_bo_validate() ends up
inside an unsealed locking transaction.
Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
---
drivers/gpu/drm/xe/xe_vm.c | 91 +++++++++++++++-----------------------
1 file changed, 36 insertions(+), 55 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index ba3a5b5b7042..d6b8a224bc7b 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -1108,27 +1108,21 @@ int xe_vm_prepare_vma(struct drm_exec *exec, struct xe_vma *vma,
static void xe_vma_destroy_unlocked(struct xe_vma *vma)
{
- struct ttm_validate_buffer tv[2];
- struct ww_acquire_ctx ww;
struct xe_bo *bo = xe_vma_bo(vma);
- LIST_HEAD(objs);
- LIST_HEAD(dups);
+ struct drm_exec exec;
int err;
- memset(tv, 0, sizeof(tv));
- tv[0].bo = xe_vm_ttm_bo(xe_vma_vm(vma));
- list_add(&tv[0].head, &objs);
-
- if (bo) {
- tv[1].bo = &xe_bo_get(bo)->ttm;
- list_add(&tv[1].head, &objs);
+ drm_exec_init(&exec, 0);
+ drm_exec_until_all_locked(&exec) {
+ err = xe_vm_prepare_vma(&exec, vma, 0);
+ drm_exec_retry_on_contention(&exec);
+ if (XE_WARN_ON(err))
+ break;
}
- err = ttm_eu_reserve_buffers(&ww, &objs, false, &dups);
- XE_WARN_ON(err);
xe_vma_destroy(vma, NULL);
- ttm_eu_backoff_reservation(&ww, &objs);
+ drm_exec_fini(&exec);
if (bo)
xe_bo_put(bo);
}
@@ -2144,12 +2138,6 @@ struct ttm_buffer_object *xe_vm_ttm_bo(struct xe_vm *vm)
return &vm->pt_root[idx]->bo->ttm;
}
-static void xe_vm_tv_populate(struct xe_vm *vm, struct ttm_validate_buffer *tv)
-{
- tv->num_shared = 1;
- tv->bo = xe_vm_ttm_bo(vm);
-}
-
static void vm_set_async_error(struct xe_vm *vm, int err)
{
lockdep_assert_held(&vm->lock);
@@ -2650,42 +2638,16 @@ static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
return err;
}
-static int __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
- struct xe_vma_op *op)
+static int op_execute(struct drm_exec *exec, struct xe_vm *vm,
+ struct xe_vma *vma, struct xe_vma_op *op)
{
- LIST_HEAD(objs);
- LIST_HEAD(dups);
- struct ttm_validate_buffer tv_bo, tv_vm;
- struct ww_acquire_ctx ww;
- struct xe_bo *vbo;
int err;
lockdep_assert_held_write(&vm->lock);
- xe_vm_tv_populate(vm, &tv_vm);
- list_add_tail(&tv_vm.head, &objs);
- vbo = xe_vma_bo(vma);
- if (vbo) {
- /*
- * An unbind can drop the last reference to the BO and
- * the BO is needed for ttm_eu_backoff_reservation so
- * take a reference here.
- */
- xe_bo_get(vbo);
-
- if (!vbo->vm) {
- tv_bo.bo = &vbo->ttm;
- tv_bo.num_shared = 1;
- list_add(&tv_bo.head, &objs);
- }
- }
-
-again:
- err = ttm_eu_reserve_buffers(&ww, &objs, true, &dups);
- if (err) {
- xe_bo_put(vbo);
+ err = xe_vm_prepare_vma(exec, vma, 1);
+ if (err)
return err;
- }
xe_vm_assert_held(vm);
xe_bo_assert_held(xe_vma_bo(vma));
@@ -2764,17 +2726,36 @@ static int __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
XE_WARN_ON("NOT POSSIBLE");
}
- ttm_eu_backoff_reservation(&ww, &objs);
+ if (err)
+ trace_xe_vma_fail(vma);
+
+ return err;
+}
+
+static int __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
+ struct xe_vma_op *op)
+{
+ struct drm_exec exec;
+ int err;
+
+retry_userptr:
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
+ drm_exec_until_all_locked(&exec) {
+ err = op_execute(&exec, vm, vma, op);
+ drm_exec_retry_on_contention(&exec);
+ if (err)
+ break;
+ }
+ drm_exec_fini(&exec);
+
if (err == -EAGAIN && xe_vma_is_userptr(vma)) {
lockdep_assert_held_write(&vm->lock);
err = xe_vma_userptr_pin_pages(vma);
if (!err)
- goto again;
- }
- xe_bo_put(vbo);
+ goto retry_userptr;
- if (err)
trace_xe_vma_fail(vma);
+ }
return err;
}
--
2.41.0
next prev parent reply other threads:[~2023-08-31 6:47 UTC|newest]
Thread overview: 8+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-08-31 6:46 [Intel-xe] [PATCH v2 0/6] drm/xe: Convert to drm_exec Thomas Hellström
2023-08-31 6:46 ` [Intel-xe] [PATCH v2 1/6] drm/xe/bo: Simplify xe_bo_lock() Thomas Hellström
2023-08-31 6:46 ` [Intel-xe] [PATCH v2 2/6] drm/xe/vm: Simplify and document xe_vm_lock() Thomas Hellström
2023-08-31 6:46 ` [Intel-xe] [PATCH v2 3/6] drm/xe/bo: Remove the lock_no_vm()/unlock_no_vm() interface Thomas Hellström
2023-08-31 6:46 ` [Intel-xe] [PATCH v2 4/6] drm/xe: Rework xe_exec and the VM rebind worker to use the drm_exec helper Thomas Hellström
2023-08-31 6:46 ` [Intel-xe] [PATCH v2 5/6] drm/xe: Convert pagefaulting code to use drm_exec Thomas Hellström
2023-08-31 6:46 ` Thomas Hellström [this message]
2023-08-31 7:05 ` [Intel-xe] ✗ CI.Patch_applied: failure for drm/xe: Convert to drm_exec Patchwork
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230831064616.96445-7-thomas.hellstrom@linux.intel.com \
--to=thomas.hellstrom@linux.intel.com \
--cc=intel-xe@lists.freedesktop.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.