Intel-XE Archive on lore.kernel.org
 help / color / mirror / Atom feed
From: Matthew Auld <matthew.auld@intel.com>
To: intel-xe@lists.freedesktop.org
Cc: dri-devel@lists.freedesktop.org,
	"Matthew Brost" <matthew.brost@intel.com>,
	"Himal Prasad Ghimiray" <himal.prasad.ghimiray@intel.com>,
	"Thomas Hellström" <thomas.hellstrom@linux.intel.com>
Subject: [PATCH v2 7/7] drm/xe/pt: unify xe_pt_svm_pre_commit with userptr
Date: Fri, 28 Mar 2025 18:10:36 +0000	[thread overview]
Message-ID: <20250328181028.288312-16-matthew.auld@intel.com> (raw)
In-Reply-To: <20250328181028.288312-9-matthew.auld@intel.com>

We now use the same notifier lock for SVM and userptr, with that we can
combine xe_pt_userptr_pre_commit and xe_pt_svm_pre_commit.

Suggested-by: Matthew Brost <matthew.brost@intel.com>
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
---
 drivers/gpu/drm/xe/xe_pt.c | 95 +++++++++++++-------------------------
 1 file changed, 33 insertions(+), 62 deletions(-)

diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
index b097c91e2e02..947b82aa19a6 100644
--- a/drivers/gpu/drm/xe/xe_pt.c
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -1396,7 +1396,7 @@ static int op_check_userptr(struct xe_vm *vm, struct xe_vma_op *op,
 	return err;
 }
 
-static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update)
+static int xe_pt_svm_userptr_pre_commit(struct xe_migrate_pt_update *pt_update)
 {
 	struct xe_vm *vm = pt_update->vops->vm;
 	struct xe_vma_ops *vops = pt_update->vops;
@@ -1409,55 +1409,40 @@ static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update)
 	if (err)
 		return err;
 
-	down_read(&vm->svm.gpusvm.notifier_lock);
+	drm_gpusvm_notifier_lock(&vm->svm.gpusvm);
 
 	list_for_each_entry(op, &vops->list, link) {
-		err = op_check_userptr(vm, op, pt_update_ops);
-		if (err) {
-			up_read(&vm->svm.gpusvm.notifier_lock);
-			break;
+		if (pt_update_ops->needs_svm_lock) {
+#if IS_ENABLED(CONFIG_DRM_XE_GPUSVM)
+			struct xe_svm_range *range = op->map_range.range;
+
+			if (op->subop == XE_VMA_SUBOP_UNMAP_RANGE)
+				continue;
+
+			xe_svm_range_debug(range, "PRE-COMMIT");
+
+			xe_assert(vm->xe,
+				  xe_vma_is_cpu_addr_mirror(op->map_range.vma));
+			xe_assert(vm->xe, op->subop == XE_VMA_SUBOP_MAP_RANGE);
+
+			if (!xe_svm_range_pages_valid(range)) {
+				xe_svm_range_debug(range, "PRE-COMMIT - RETRY");
+				drm_gpusvm_notifier_unlock(&vm->svm.gpusvm);
+				return -EAGAIN;
+			}
+#endif
+		} else {
+			err = op_check_userptr(vm, op, pt_update_ops);
+			if (err) {
+				drm_gpusvm_notifier_unlock(&vm->svm.gpusvm);
+				break;
+			}
 		}
 	}
 
 	return err;
 }
 
-#if IS_ENABLED(CONFIG_DRM_XE_GPUSVM)
-static int xe_pt_svm_pre_commit(struct xe_migrate_pt_update *pt_update)
-{
-	struct xe_vm *vm = pt_update->vops->vm;
-	struct xe_vma_ops *vops = pt_update->vops;
-	struct xe_vma_op *op;
-	int err;
-
-	err = xe_pt_pre_commit(pt_update);
-	if (err)
-		return err;
-
-	xe_svm_notifier_lock(vm);
-
-	list_for_each_entry(op, &vops->list, link) {
-		struct xe_svm_range *range = op->map_range.range;
-
-		if (op->subop == XE_VMA_SUBOP_UNMAP_RANGE)
-			continue;
-
-		xe_svm_range_debug(range, "PRE-COMMIT");
-
-		xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(op->map_range.vma));
-		xe_assert(vm->xe, op->subop == XE_VMA_SUBOP_MAP_RANGE);
-
-		if (!xe_svm_range_pages_valid(range)) {
-			xe_svm_range_debug(range, "PRE-COMMIT - RETRY");
-			xe_svm_notifier_unlock(vm);
-			return -EAGAIN;
-		}
-	}
-
-	return 0;
-}
-#endif
-
 struct invalidation_fence {
 	struct xe_gt_tlb_invalidation_fence base;
 	struct xe_gt *gt;
@@ -2255,22 +2240,12 @@ static const struct xe_migrate_pt_update_ops migrate_ops = {
 	.pre_commit = xe_pt_pre_commit,
 };
 
-static const struct xe_migrate_pt_update_ops userptr_migrate_ops = {
+static const struct xe_migrate_pt_update_ops svm_userptr_migrate_ops = {
 	.populate = xe_vm_populate_pgtable,
 	.clear = xe_migrate_clear_pgtable_callback,
-	.pre_commit = xe_pt_userptr_pre_commit,
+	.pre_commit = xe_pt_svm_userptr_pre_commit,
 };
 
-#if IS_ENABLED(CONFIG_DRM_XE_GPUSVM)
-static const struct xe_migrate_pt_update_ops svm_migrate_ops = {
-	.populate = xe_vm_populate_pgtable,
-	.clear = xe_migrate_clear_pgtable_callback,
-	.pre_commit = xe_pt_svm_pre_commit,
-};
-#else
-static const struct xe_migrate_pt_update_ops svm_migrate_ops;
-#endif
-
 /**
  * xe_pt_update_ops_run() - Run PT update operations
  * @tile: Tile of PT update operations
@@ -2296,10 +2271,8 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops)
 	struct xe_vma_op *op;
 	int err = 0, i;
 	struct xe_migrate_pt_update update = {
-		.ops = pt_update_ops->needs_svm_lock ?
-			&svm_migrate_ops :
-			pt_update_ops->needs_userptr_lock ?
-			&userptr_migrate_ops :
+		.ops = pt_update_ops->needs_svm_lock || pt_update_ops->needs_userptr_lock ?
+			&svm_userptr_migrate_ops :
 			&migrate_ops,
 		.vops = vops,
 		.tile_id = tile->id,
@@ -2419,10 +2392,8 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops)
 				  &ifence->base.base, &mfence->base.base);
 	}
 
-	if (pt_update_ops->needs_svm_lock)
-		xe_svm_notifier_unlock(vm);
-	if (pt_update_ops->needs_userptr_lock)
-		up_read(&vm->svm.gpusvm.notifier_lock);
+	if (pt_update_ops->needs_svm_lock || pt_update_ops->needs_userptr_lock)
+		drm_gpusvm_notifier_unlock(&vm->svm.gpusvm);
 
 	return fence;
 
-- 
2.48.1


  parent reply	other threads:[~2025-03-28 18:11 UTC|newest]

Thread overview: 19+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-03-28 18:10 [PATCH v2 0/7] Replace xe_hmm with gpusvm Matthew Auld
2025-03-28 18:10 ` [PATCH v2 1/7] drm/gpusvm: fix hmm_pfn_to_map_order() usage Matthew Auld
2025-03-28 18:10 ` [PATCH v2 2/7] drm/gpusvm: use more selective dma dir in get_pages() Matthew Auld
2025-03-28 18:10 ` [PATCH v2 3/7] drm/gpusvm: pull out drm_gpusvm_pages substructure Matthew Auld
2025-04-03 21:15   ` Matthew Brost
2025-03-28 18:10 ` [PATCH v2 4/7] drm/gpusvm: refactor core API to use pages struct Matthew Auld
2025-03-28 18:10 ` [PATCH v2 5/7] drm/gpusvm: export drm_gpusvm_pages API Matthew Auld
2025-03-28 18:10 ` [PATCH v2 6/7] drm/xe/userptr: replace xe_hmm with gpusvm Matthew Auld
2025-03-28 18:10 ` Matthew Auld [this message]
2025-04-03 21:23   ` [PATCH v2 7/7] drm/xe/pt: unify xe_pt_svm_pre_commit with userptr Matthew Brost
2025-04-04  8:20     ` Matthew Auld
2025-04-03 21:25   ` Matthew Brost
2025-04-04  8:19     ` Matthew Auld
2025-04-04 17:02       ` Matthew Brost
2025-04-07  7:29         ` Matthew Auld
2025-04-23 16:20           ` Matthew Brost
2025-03-28 20:46 ` ✓ CI.Patch_applied: success for Replace xe_hmm with gpusvm (rev2) Patchwork
2025-03-28 20:47 ` ✗ CI.checkpatch: warning " Patchwork
2025-03-28 20:47 ` ✗ CI.KUnit: failure " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20250328181028.288312-16-matthew.auld@intel.com \
    --to=matthew.auld@intel.com \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=himal.prasad.ghimiray@intel.com \
    --cc=intel-xe@lists.freedesktop.org \
    --cc=matthew.brost@intel.com \
    --cc=thomas.hellstrom@linux.intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox