From: "Thomas Hellström" <thomas.hellstrom@linux.intel.com>
To: intel-xe@lists.freedesktop.org
Subject: [Intel-xe] [PATCH v2 3/6] drm/xe/bo: Remove the lock_no_vm()/unlock_no_vm() interface
Date: Thu, 31 Aug 2023 08:46:13 +0200 [thread overview]
Message-ID: <20230831064616.96445-4-thomas.hellstrom@linux.intel.com> (raw)
In-Reply-To: <20230831064616.96445-1-thomas.hellstrom@linux.intel.com>
Apart from asserts, it's essentially the same as
xe_bo_lock()/xe_bo_unlock(), and the usage intentions of this interface
was unclear. Remove it.
v2:
- Update the xe_display subsystem as well.
Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
---
drivers/gpu/drm/i915/display/intel_fb.c | 4 ++--
drivers/gpu/drm/xe/tests/xe_bo.c | 2 +-
drivers/gpu/drm/xe/tests/xe_dma_buf.c | 4 ++--
drivers/gpu/drm/xe/tests/xe_migrate.c | 2 +-
drivers/gpu/drm/xe/xe_bo.h | 23 ++---------------------
drivers/gpu/drm/xe/xe_dma_buf.c | 5 +++--
drivers/gpu/drm/xe/xe_lrc.c | 10 ++--------
7 files changed, 13 insertions(+), 37 deletions(-)
diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
index e0bac4cf3f4b..f5a96b94cfba 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fb.c
@@ -1892,9 +1892,9 @@ static void intel_user_framebuffer_destroy_vm(struct drm_framebuffer *fb)
struct xe_bo *bo = intel_fb_obj(fb);
/* Unpin our kernel fb first */
- xe_bo_lock_no_vm(bo, NULL);
+ xe_bo_lock(bo, false);
xe_bo_unpin(bo);
- xe_bo_unlock_no_vm(bo);
+ xe_bo_unlock(bo);
}
xe_bo_put(intel_fb_obj(fb));
#endif
diff --git a/drivers/gpu/drm/xe/tests/xe_bo.c b/drivers/gpu/drm/xe/tests/xe_bo.c
index c6025404042d..acc5ad01baaf 100644
--- a/drivers/gpu/drm/xe/tests/xe_bo.c
+++ b/drivers/gpu/drm/xe/tests/xe_bo.c
@@ -143,7 +143,7 @@ static void ccs_test_run_gt(struct xe_device *xe, struct xe_gt *gt,
ret = ccs_test_migrate(gt, bo, true, 0ULL, 0ULL, test);
out_unlock:
- xe_bo_unlock_no_vm(bo);
+ xe_bo_unlock(bo);
xe_bo_put(bo);
}
diff --git a/drivers/gpu/drm/xe/tests/xe_dma_buf.c b/drivers/gpu/drm/xe/tests/xe_dma_buf.c
index 513a3b3362e9..1c3f4bc72b99 100644
--- a/drivers/gpu/drm/xe/tests/xe_dma_buf.c
+++ b/drivers/gpu/drm/xe/tests/xe_dma_buf.c
@@ -148,14 +148,14 @@ static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
int err;
/* Is everything where we expect it to be? */
- xe_bo_lock_no_vm(import_bo, NULL);
+ xe_bo_lock(import_bo, false);
err = xe_bo_validate(import_bo, NULL, false);
if (err && err != -EINTR && err != -ERESTARTSYS)
KUNIT_FAIL(test,
"xe_bo_validate() failed with err=%d\n", err);
check_residency(test, bo, import_bo, dmabuf);
- xe_bo_unlock_no_vm(import_bo);
+ xe_bo_unlock(import_bo);
}
drm_gem_object_put(import);
} else if (PTR_ERR(import) != -EOPNOTSUPP) {
diff --git a/drivers/gpu/drm/xe/tests/xe_migrate.c b/drivers/gpu/drm/xe/tests/xe_migrate.c
index 8bb081086ca2..f58cd1da1a34 100644
--- a/drivers/gpu/drm/xe/tests/xe_migrate.c
+++ b/drivers/gpu/drm/xe/tests/xe_migrate.c
@@ -183,7 +183,7 @@ static void test_copy(struct xe_migrate *m, struct xe_bo *bo,
xe_bo_vunmap(sysmem);
out_unlock:
- xe_bo_unlock_no_vm(sysmem);
+ xe_bo_unlock(sysmem);
xe_bo_put(sysmem);
}
diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h
index a7b9e7084225..9097bcc13209 100644
--- a/drivers/gpu/drm/xe/xe_bo.h
+++ b/drivers/gpu/drm/xe/xe_bo.h
@@ -164,25 +164,6 @@ static inline void xe_bo_unlock_vm_held(struct xe_bo *bo)
}
}
-static inline void xe_bo_lock_no_vm(struct xe_bo *bo,
- struct ww_acquire_ctx *ctx)
-{
- if (bo) {
- XE_WARN_ON(bo->vm || (bo->ttm.type != ttm_bo_type_sg &&
- bo->ttm.base.resv != &bo->ttm.base._resv));
- dma_resv_lock(bo->ttm.base.resv, ctx);
- }
-}
-
-static inline void xe_bo_unlock_no_vm(struct xe_bo *bo)
-{
- if (bo) {
- XE_WARN_ON(bo->vm || (bo->ttm.type != ttm_bo_type_sg &&
- bo->ttm.base.resv != &bo->ttm.base._resv));
- dma_resv_unlock(bo->ttm.base.resv);
- }
-}
-
int xe_bo_pin_external(struct xe_bo *bo);
int xe_bo_pin(struct xe_bo *bo);
void xe_bo_unpin_external(struct xe_bo *bo);
@@ -197,9 +178,9 @@ static inline bool xe_bo_is_pinned(struct xe_bo *bo)
static inline void xe_bo_unpin_map_no_vm(struct xe_bo *bo)
{
if (likely(bo)) {
- xe_bo_lock_no_vm(bo, NULL);
+ xe_bo_lock(bo, false);
xe_bo_unpin(bo);
- xe_bo_unlock_no_vm(bo);
+ xe_bo_unlock(bo);
xe_bo_put(bo);
}
diff --git a/drivers/gpu/drm/xe/xe_dma_buf.c b/drivers/gpu/drm/xe/xe_dma_buf.c
index 975dee1f770f..09343b8b3e96 100644
--- a/drivers/gpu/drm/xe/xe_dma_buf.c
+++ b/drivers/gpu/drm/xe/xe_dma_buf.c
@@ -153,9 +153,10 @@ static int xe_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
if (!reads)
return 0;
- xe_bo_lock_no_vm(bo, NULL);
+ /* Can we do interruptible lock here? */
+ xe_bo_lock(bo, false);
(void)xe_bo_migrate(bo, XE_PL_TT);
- xe_bo_unlock_no_vm(bo);
+ xe_bo_unlock(bo);
return 0;
}
diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c
index 434fbb364b4b..6f899b6a4877 100644
--- a/drivers/gpu/drm/xe/xe_lrc.c
+++ b/drivers/gpu/drm/xe/xe_lrc.c
@@ -790,15 +790,9 @@ int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
void xe_lrc_finish(struct xe_lrc *lrc)
{
xe_hw_fence_ctx_finish(&lrc->fence_ctx);
- if (lrc->bo->vm)
- xe_vm_lock(lrc->bo->vm, false);
- else
- xe_bo_lock_no_vm(lrc->bo, NULL);
+ xe_bo_lock(lrc->bo, false);
xe_bo_unpin(lrc->bo);
- if (lrc->bo->vm)
- xe_vm_unlock(lrc->bo->vm);
- else
- xe_bo_unlock_no_vm(lrc->bo);
+ xe_bo_unlock(lrc->bo);
xe_bo_put(lrc->bo);
}
--
2.41.0
next prev parent reply other threads:[~2023-08-31 6:46 UTC|newest]
Thread overview: 8+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-08-31 6:46 [Intel-xe] [PATCH v2 0/6] drm/xe: Convert to drm_exec Thomas Hellström
2023-08-31 6:46 ` [Intel-xe] [PATCH v2 1/6] drm/xe/bo: Simplify xe_bo_lock() Thomas Hellström
2023-08-31 6:46 ` [Intel-xe] [PATCH v2 2/6] drm/xe/vm: Simplify and document xe_vm_lock() Thomas Hellström
2023-08-31 6:46 ` Thomas Hellström [this message]
2023-08-31 6:46 ` [Intel-xe] [PATCH v2 4/6] drm/xe: Rework xe_exec and the VM rebind worker to use the drm_exec helper Thomas Hellström
2023-08-31 6:46 ` [Intel-xe] [PATCH v2 5/6] drm/xe: Convert pagefaulting code to use drm_exec Thomas Hellström
2023-08-31 6:46 ` [Intel-xe] [PATCH v2 6/6] drm/xe: Convert remaining instances of ttm_eu_reserve_buffers to drm_exec Thomas Hellström
2023-08-31 7:05 ` [Intel-xe] ✗ CI.Patch_applied: failure for drm/xe: Convert " Patchwork
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230831064616.96445-4-thomas.hellstrom@linux.intel.com \
--to=thomas.hellstrom@linux.intel.com \
--cc=intel-xe@lists.freedesktop.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.