All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Thomas Hellström" <thomas.hellstrom@linux.intel.com>
To: intel-xe@lists.freedesktop.org
Subject: [Intel-xe] [PATCH v3 5/6] drm/xe: Convert pagefaulting code to use drm_exec
Date: Thu, 31 Aug 2023 11:29:36 +0200	[thread overview]
Message-ID: <20230831092937.2197-6-thomas.hellstrom@linux.intel.com> (raw)
In-Reply-To: <20230831092937.2197-1-thomas.hellstrom@linux.intel.com>

Replace the calls into ttm_eu_reserve_buffers with the drm_exec helpers.
Also reuse some code.

Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
---
 drivers/gpu/drm/xe/xe_gt_pagefault.c | 106 ++++++++++++---------------
 drivers/gpu/drm/xe/xe_vm.c           |  15 ++++
 drivers/gpu/drm/xe/xe_vm.h           |   3 +
 3 files changed, 64 insertions(+), 60 deletions(-)

diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c
index 73fc9389a663..e6197ec6f72f 100644
--- a/drivers/gpu/drm/xe/xe_gt_pagefault.c
+++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c
@@ -8,6 +8,7 @@
 #include <linux/bitfield.h>
 #include <linux/circ_buf.h>
 
+#include <drm/drm_exec.h>
 #include <drm/drm_managed.h>
 #include <drm/ttm/ttm_execbuf_util.h>
 
@@ -84,11 +85,6 @@ static bool vma_matches(struct xe_vma *vma, u64 page_addr)
 	return true;
 }
 
-static bool only_needs_bo_lock(struct xe_bo *bo)
-{
-	return bo && bo->vm;
-}
-
 static struct xe_vma *lookup_vma(struct xe_vm *vm, u64 page_addr)
 {
 	struct xe_vma *vma = NULL;
@@ -103,17 +99,44 @@ static struct xe_vma *lookup_vma(struct xe_vm *vm, u64 page_addr)
 	return vma;
 }
 
+static int xe_pf_begin(struct drm_exec *exec, struct xe_vma *vma,
+		       unsigned int num_shared, bool atomic, unsigned int id)
+{
+	struct xe_bo *bo = xe_vma_bo(vma);
+	struct xe_vm *vm = xe_vma_vm(vma);
+	int err;
+
+	err = xe_vm_prepare_vma(exec, vma, num_shared);
+	if (err)
+		return err;
+
+	if (atomic) {
+		if (xe_vma_is_userptr(vma)) {
+			err = -EACCES;
+			return err;
+		}
+
+		/* Migrate to VRAM, move should invalidate the VMA first */
+		err = xe_bo_migrate(bo, XE_PL_VRAM0 + id);
+		if (err)
+			return err;
+	} else if (bo) {
+		/* Create backing store if needed */
+		err = xe_bo_validate(bo, vm, true);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
 static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf)
 {
 	struct xe_device *xe = gt_to_xe(gt);
 	struct xe_tile *tile = gt_to_tile(gt);
+	struct drm_exec exec;
 	struct xe_vm *vm;
 	struct xe_vma *vma = NULL;
-	struct xe_bo *bo;
-	LIST_HEAD(objs);
-	LIST_HEAD(dups);
-	struct ttm_validate_buffer tv_bo, tv_vm;
-	struct ww_acquire_ctx ww;
 	struct dma_fence *fence;
 	bool write_locked;
 	int ret = 0;
@@ -170,35 +193,10 @@ static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf)
 	}
 
 	/* Lock VM and BOs dma-resv */
-	bo = xe_vma_bo(vma);
-	if (!only_needs_bo_lock(bo)) {
-		tv_vm.num_shared = xe->info.tile_count;
-		tv_vm.bo = xe_vm_ttm_bo(vm);
-		list_add(&tv_vm.head, &objs);
-	}
-	if (bo) {
-		tv_bo.bo = &bo->ttm;
-		tv_bo.num_shared = xe->info.tile_count;
-		list_add(&tv_bo.head, &objs);
-	}
-
-	ret = ttm_eu_reserve_buffers(&ww, &objs, false, &dups);
-	if (ret)
-		goto unlock_vm;
-
-	if (atomic) {
-		if (xe_vma_is_userptr(vma)) {
-			ret = -EACCES;
-			goto unlock_dma_resv;
-		}
-
-		/* Migrate to VRAM, move should invalidate the VMA first */
-		ret = xe_bo_migrate(bo, XE_PL_VRAM0 + tile->id);
-		if (ret)
-			goto unlock_dma_resv;
-	} else if (bo) {
-		/* Create backing store if needed */
-		ret = xe_bo_validate(bo, vm, true);
+	drm_exec_init(&exec, 0);
+	drm_exec_until_all_locked(&exec) {
+		ret = xe_pf_begin(&exec, vma, xe->info.tile_count, atomic, tile->id);
+		drm_exec_retry_on_contention(&exec);
 		if (ret)
 			goto unlock_dma_resv;
 	}
@@ -225,7 +223,7 @@ static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf)
 	vma->usm.tile_invalidated &= ~BIT(gt_to_tile(gt)->id);
 
 unlock_dma_resv:
-	ttm_eu_backoff_reservation(&ww, &objs);
+	drm_exec_fini(&exec);
 unlock_vm:
 	if (!ret)
 		vm->usm.last_fault_vma = vma;
@@ -490,13 +488,9 @@ static int handle_acc(struct xe_gt *gt, struct acc *acc)
 {
 	struct xe_device *xe = gt_to_xe(gt);
 	struct xe_tile *tile = gt_to_tile(gt);
+	struct drm_exec exec;
 	struct xe_vm *vm;
 	struct xe_vma *vma;
-	struct xe_bo *bo;
-	LIST_HEAD(objs);
-	LIST_HEAD(dups);
-	struct ttm_validate_buffer tv_bo, tv_vm;
-	struct ww_acquire_ctx ww;
 	int ret = 0;
 
 	/* We only support ACC_TRIGGER at the moment */
@@ -528,23 +522,15 @@ static int handle_acc(struct xe_gt *gt, struct acc *acc)
 		goto unlock_vm;
 
 	/* Lock VM and BOs dma-resv */
-	bo = xe_vma_bo(vma);
-	if (!only_needs_bo_lock(bo)) {
-		tv_vm.num_shared = xe->info.tile_count;
-		tv_vm.bo = xe_vm_ttm_bo(vm);
-		list_add(&tv_vm.head, &objs);
+	drm_exec_init(&exec, 0);
+	drm_exec_until_all_locked(&exec) {
+		ret = xe_pf_begin(&exec, vma, xe->info.tile_count, true, tile->id);
+		drm_exec_retry_on_contention(&exec);
+		if (ret)
+			break;
 	}
-	tv_bo.bo = &bo->ttm;
-	tv_bo.num_shared = xe->info.tile_count;
-	list_add(&tv_bo.head, &objs);
-	ret = ttm_eu_reserve_buffers(&ww, &objs, false, &dups);
-	if (ret)
-		goto unlock_vm;
-
-	/* Migrate to VRAM, move should invalidate the VMA first */
-	ret = xe_bo_migrate(bo, XE_PL_VRAM0 + tile->id);
 
-	ttm_eu_backoff_reservation(&ww, &objs);
+	drm_exec_fini(&exec);
 unlock_vm:
 	up_read(&vm->lock);
 	xe_vm_put(vm);
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index b95a43d0af59..1547467c7d92 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -1085,6 +1085,21 @@ static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
 	}
 }
 
+int xe_vm_prepare_vma(struct drm_exec *exec, struct xe_vma *vma,
+		      unsigned int num_shared)
+{
+	struct xe_vm *vm = xe_vma_vm(vma);
+	struct xe_bo *bo = xe_vma_bo(vma);
+	int err;
+
+	XE_WARN_ON(!vm);
+	err = drm_exec_prepare_obj(exec, &xe_vm_ttm_bo(vm)->base, num_shared);
+	if (!err && bo && !bo->vm)
+		err = drm_exec_prepare_obj(exec, &bo->ttm.base, num_shared);
+
+	return err;
+}
+
 static void xe_vma_destroy_unlocked(struct xe_vma *vma)
 {
 	struct ttm_validate_buffer tv[2];
diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
index 4a1dd11f71c5..5608e4e33169 100644
--- a/drivers/gpu/drm/xe/xe_vm.h
+++ b/drivers/gpu/drm/xe/xe_vm.h
@@ -222,6 +222,9 @@ void xe_vm_fence_all_extobjs(struct xe_vm *vm, struct dma_fence *fence,
 
 int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id);
 
+int xe_vm_prepare_vma(struct drm_exec *exec, struct xe_vma *vma,
+		      unsigned int num_shared);
+
 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
 #define vm_dbg drm_dbg
 #else
-- 
2.41.0


  parent reply	other threads:[~2023-08-31  9:30 UTC|newest]

Thread overview: 26+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-08-31  9:29 [Intel-xe] [PATCH v3 0/6] drm/xe: Convert to drm_exec Thomas Hellström
2023-08-31  9:29 ` [Intel-xe] [PATCH v3 1/6] drm/xe/bo: Simplify xe_bo_lock() Thomas Hellström
2023-08-31 14:43   ` Thomas Hellström
2023-08-31 17:01     ` Matthew Brost
2023-08-31 17:48       ` Thomas Hellström
2023-08-31 18:33         ` Matthew Brost
2023-09-01 11:59           ` Thomas Hellström
2023-08-31  9:29 ` [Intel-xe] [PATCH v3 2/6] drm/xe/vm: Simplify and document xe_vm_lock() Thomas Hellström
2023-08-31 17:06   ` Matthew Brost
2023-08-31 17:49     ` Thomas Hellström
2023-08-31  9:29 ` [Intel-xe] [PATCH v3 3/6] drm/xe/bo: Remove the lock_no_vm()/unlock_no_vm() interface Thomas Hellström
2023-08-31 17:10   ` Matthew Brost
2023-08-31  9:29 ` [Intel-xe] [PATCH v3 4/6] drm/xe: Rework xe_exec and the VM rebind worker to use the drm_exec helper Thomas Hellström
2023-08-31 17:51   ` Matthew Brost
2023-08-31  9:29 ` Thomas Hellström [this message]
2023-08-31 17:58   ` [Intel-xe] [PATCH v3 5/6] drm/xe: Convert pagefaulting code to use drm_exec Matthew Brost
2023-08-31  9:29 ` [Intel-xe] [PATCH v3 6/6] drm/xe: Convert remaining instances of ttm_eu_reserve_buffers to drm_exec Thomas Hellström
2023-08-31 14:42   ` Thomas Hellström
2023-08-31 18:07   ` Matthew Brost
2023-08-31 10:40 ` [Intel-xe] ✓ CI.Patch_applied: success for drm/xe: Convert to drm_exec (rev2) Patchwork
2023-08-31 10:41 ` [Intel-xe] ✗ CI.checkpatch: warning " Patchwork
2023-08-31 10:42 ` [Intel-xe] ✓ CI.KUnit: success " Patchwork
2023-08-31 10:49 ` [Intel-xe] ✓ CI.Build: " Patchwork
2023-08-31 10:49 ` [Intel-xe] ✗ CI.Hooks: failure " Patchwork
2023-08-31 10:49 ` [Intel-xe] ✗ CI.checksparse: warning " Patchwork
2023-08-31 11:20 ` [Intel-xe] ✗ CI.BAT: failure " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230831092937.2197-6-thomas.hellstrom@linux.intel.com \
    --to=thomas.hellstrom@linux.intel.com \
    --cc=intel-xe@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.