Intel-XE Archive on lore.kernel.org
 help / color / mirror / Atom feed
From: Matthew Auld <matthew.auld@intel.com>
To: intel-xe@lists.freedesktop.org
Cc: dri-devel@lists.freedesktop.org,
	"Thomas Hellström" <thomas.hellstrom@linux.intel.com>,
	"Matthew Brost" <matthew.brost@intel.com>
Subject: [PATCH 6/7] drm/gpusvm: support basic_range
Date: Thu, 20 Mar 2025 17:30:03 +0000	[thread overview]
Message-ID: <20250320172956.168358-15-matthew.auld@intel.com> (raw)
In-Reply-To: <20250320172956.168358-9-matthew.auld@intel.com>

Idea is to use this for userptr, where we mostly just need
get/unmap/free pages, plus some kind of common notifier lock at the svm
level (provided by gpusvm). The range itself also maps to a single
notifier, covering the entire range (provided by the user).

TODO: needs proper kernel-doc, assuming this change makes sense.

Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Cc: Matthew Brost <matthew.brost@intel.com>
---
 drivers/gpu/drm/drm_gpusvm.c | 138 +++++++++++++++++++++++++++++------
 include/drm/drm_gpusvm.h     |  23 ++++++
 2 files changed, 137 insertions(+), 24 deletions(-)

diff --git a/drivers/gpu/drm/drm_gpusvm.c b/drivers/gpu/drm/drm_gpusvm.c
index 2beca5a6dc0a..ca571610214c 100644
--- a/drivers/gpu/drm/drm_gpusvm.c
+++ b/drivers/gpu/drm/drm_gpusvm.c
@@ -521,6 +521,41 @@ static const struct mmu_interval_notifier_ops drm_gpusvm_notifier_ops = {
 	.invalidate = drm_gpusvm_notifier_invalidate,
 };
 
+static void __drm_gpusvm_init(struct drm_gpusvm *gpusvm, const char *name,
+			      struct drm_device *drm, struct mm_struct *mm,
+			      void *device_private_page_owner,
+			      unsigned long mm_start, unsigned long mm_range,
+			      unsigned long notifier_size,
+			      const struct drm_gpusvm_ops *ops,
+			      const unsigned long *chunk_sizes, int num_chunks)
+{
+	gpusvm->name = name;
+	gpusvm->drm = drm;
+	gpusvm->mm = mm;
+	gpusvm->device_private_page_owner = device_private_page_owner;
+	gpusvm->mm_start = mm_start;
+	gpusvm->mm_range = mm_range;
+	gpusvm->notifier_size = notifier_size;
+	gpusvm->ops = ops;
+	gpusvm->chunk_sizes = chunk_sizes;
+	gpusvm->num_chunks = num_chunks;
+
+	if (mm)
+		mmgrab(mm);
+	gpusvm->root = RB_ROOT_CACHED;
+	INIT_LIST_HEAD(&gpusvm->notifier_list);
+
+	init_rwsem(&gpusvm->notifier_lock);
+
+	fs_reclaim_acquire(GFP_KERNEL);
+	might_lock(&gpusvm->notifier_lock);
+	fs_reclaim_release(GFP_KERNEL);
+
+#ifdef CONFIG_LOCKDEP
+	gpusvm->lock_dep_map = NULL;
+#endif
+}
+
 /**
  * drm_gpusvm_init() - Initialize the GPU SVM.
  * @gpusvm: Pointer to the GPU SVM structure.
@@ -552,35 +587,32 @@ int drm_gpusvm_init(struct drm_gpusvm *gpusvm,
 	if (!ops->invalidate || !num_chunks)
 		return -EINVAL;
 
-	gpusvm->name = name;
-	gpusvm->drm = drm;
-	gpusvm->mm = mm;
-	gpusvm->device_private_page_owner = device_private_page_owner;
-	gpusvm->mm_start = mm_start;
-	gpusvm->mm_range = mm_range;
-	gpusvm->notifier_size = notifier_size;
-	gpusvm->ops = ops;
-	gpusvm->chunk_sizes = chunk_sizes;
-	gpusvm->num_chunks = num_chunks;
-
-	mmgrab(mm);
-	gpusvm->root = RB_ROOT_CACHED;
-	INIT_LIST_HEAD(&gpusvm->notifier_list);
-
-	init_rwsem(&gpusvm->notifier_lock);
-
-	fs_reclaim_acquire(GFP_KERNEL);
-	might_lock(&gpusvm->notifier_lock);
-	fs_reclaim_release(GFP_KERNEL);
-
-#ifdef CONFIG_LOCKDEP
-	gpusvm->lock_dep_map = NULL;
-#endif
+	__drm_gpusvm_init(gpusvm, name, drm, mm, device_private_page_owner,
+			  mm_start, mm_range, notifier_size, ops, chunk_sizes,
+			  num_chunks);
 
 	return 0;
 }
 EXPORT_SYMBOL_GPL(drm_gpusvm_init);
 
+static bool drm_gpusvm_is_basic(struct drm_gpusvm *svm)
+{
+	return !svm->mm;
+}
+
+void drm_gpusvm_basic_init(struct drm_gpusvm *gpusvm, const char *name,
+			   struct drm_device *drm)
+{
+	__drm_gpusvm_init(gpusvm, name, drm, NULL, NULL, 0, 0, 0, NULL, NULL,
+			  0);
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_basic_init);
+
+void drm_gpusvm_basic_fini(struct drm_gpusvm *gpusvm)
+{
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_basic_fini);
+
 /**
  * drm_gpusvm_notifier_find() - Find GPU SVM notifier
  * @gpusvm: Pointer to the GPU SVM structure
@@ -1001,6 +1033,27 @@ static void drm_gpusvm_driver_lock_held(struct drm_gpusvm *gpusvm)
 }
 #endif
 
+void drm_gpusvm_basic_range_init(struct drm_gpusvm *svm,
+				 struct drm_gpusvm_basic_range *range,
+				 struct mmu_interval_notifier *notifier,
+				 unsigned long *notifier_seq)
+{
+	WARN_ON(!drm_gpusvm_is_basic(svm));
+
+	range->gpusvm = svm;
+	range->notifier = notifier;
+	range->notifier_seq = notifier_seq;
+	*notifier_seq = LONG_MAX;
+	memset(&range->pages, 0, sizeof(range->pages));
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_basic_range_init);
+
+void drm_gpusvm_basic_range_fini(struct drm_gpusvm_basic_range *range)
+{
+	WARN_ON(range->pages.flags.has_dma_mapping);
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_basic_range_fini);
+
 /**
  * drm_gpusvm_range_find_or_insert() - Find or insert GPU SVM range
  * @gpusvm: Pointer to the GPU SVM structure
@@ -1176,6 +1229,19 @@ static void drm_gpusvm_free_pages(struct drm_gpusvm *gpusvm,
 	}
 }
 
+void drm_gpusvm_basic_range_free_pages(struct drm_gpusvm_basic_range *range)
+{
+	unsigned long npages =
+		npages_in_range(range->notifier->interval_tree.start,
+				range->notifier->interval_tree.last + 1);
+
+	drm_gpusvm_notifier_lock(range->gpusvm);
+	__drm_gpusvm_unmap_pages(range->gpusvm, &range->pages, npages);
+	drm_gpusvm_free_pages(range->gpusvm, &range->pages);
+	drm_gpusvm_notifier_unlock(range->gpusvm);
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_basic_range_free_pages);
+
 /**
  * drm_gpusvm_range_remove() - Remove GPU SVM range
  * @gpusvm: Pointer to the GPU SVM structure
@@ -1545,6 +1611,20 @@ int drm_gpusvm_range_get_pages(struct drm_gpusvm *gpusvm,
 }
 EXPORT_SYMBOL_GPL(drm_gpusvm_range_get_pages);
 
+int drm_gpusvm_basic_range_get_pages(struct drm_gpusvm_basic_range *range,
+				     const struct drm_gpusvm_ctx *ctx)
+{
+	drm_gpusvm_driver_lock_held(range->gpusvm);
+
+	return drm_gpusvm_get_pages(range->gpusvm, &range->pages,
+				    range->notifier->mm, range->notifier,
+				    range->notifier_seq,
+				    range->notifier->interval_tree.start,
+				    range->notifier->interval_tree.last + 1,
+				    ctx);
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_basic_range_get_pages);
+
 static void drm_gpusvm_unmap_pages(struct drm_gpusvm *gpusvm,
 				   unsigned long mm_start, unsigned long mm_end,
 				   struct drm_gpusvm_pages *svm_pages,
@@ -1585,6 +1665,16 @@ void drm_gpusvm_range_unmap_pages(struct drm_gpusvm *gpusvm,
 }
 EXPORT_SYMBOL_GPL(drm_gpusvm_range_unmap_pages);
 
+void drm_gpusvm_basic_range_unmap_pages(struct drm_gpusvm_basic_range *range,
+					const struct drm_gpusvm_ctx *ctx)
+{
+	drm_gpusvm_unmap_pages(range->gpusvm,
+			       range->notifier->interval_tree.start,
+			       range->notifier->interval_tree.last + 1,
+			       &range->pages, ctx);
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_basic_range_unmap_pages);
+
 /**
  * drm_gpusvm_migration_unlock_put_page() - Put a migration page
  * @page: Pointer to the page to put
diff --git a/include/drm/drm_gpusvm.h b/include/drm/drm_gpusvm.h
index c15c733ef0ad..82c4e58fa84c 100644
--- a/include/drm/drm_gpusvm.h
+++ b/include/drm/drm_gpusvm.h
@@ -305,6 +305,29 @@ struct drm_gpusvm_ctx {
 	unsigned int devmem_possible :1;
 };
 
+struct drm_gpusvm_basic_range {
+	struct drm_gpusvm *gpusvm;
+	struct drm_gpusvm_pages pages;
+	struct mmu_interval_notifier *notifier;
+	unsigned long *notifier_seq;
+};
+
+void drm_gpusvm_basic_init(struct drm_gpusvm *gpusvm, const char *name,
+			   struct drm_device *drm);
+void drm_gpusvm_basic_fini(struct drm_gpusvm *gpusvm);
+
+void drm_gpusvm_basic_range_init(struct drm_gpusvm *svm,
+				 struct drm_gpusvm_basic_range *range,
+				 struct mmu_interval_notifier *notifier,
+				 unsigned long *notifier_seq);
+void drm_gpusvm_basic_range_fini(struct drm_gpusvm_basic_range *range);
+
+int drm_gpusvm_basic_range_get_pages(struct drm_gpusvm_basic_range *range,
+				     const struct drm_gpusvm_ctx *ctx);
+void drm_gpusvm_basic_range_unmap_pages(struct drm_gpusvm_basic_range *range,
+					const struct drm_gpusvm_ctx *ctx);
+void drm_gpusvm_basic_range_free_pages(struct drm_gpusvm_basic_range *range);
+
 int drm_gpusvm_init(struct drm_gpusvm *gpusvm,
 		    const char *name, struct drm_device *drm,
 		    struct mm_struct *mm, void *device_private_page_owner,
-- 
2.48.1


  parent reply	other threads:[~2025-03-20 17:32 UTC|newest]

Thread overview: 25+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-03-20 17:29 [PATCH 0/7] Replace xe_hmm with gpusvm Matthew Auld
2025-03-20 17:29 ` [PATCH 1/7] drm/gpusvm: fix hmm_pfn_to_map_order() usage Matthew Auld
2025-03-20 19:52   ` Thomas Hellström
2025-03-20 20:40   ` Matthew Brost
2025-03-21 11:42     ` Matthew Auld
2025-03-20 17:29 ` [PATCH 2/7] drm/gpusvm: use more selective dma dir in get_pages() Matthew Auld
2025-03-20 19:31   ` Thomas Hellström
2025-03-20 19:37   ` Matthew Brost
2025-03-20 17:30 ` [PATCH 3/7] drm/gpusvm: mark pages as dirty Matthew Auld
2025-03-20 19:29   ` Thomas Hellström
2025-03-20 19:33     ` Matthew Brost
2025-03-21 11:37       ` Matthew Auld
2025-03-20 17:30 ` [PATCH 4/7] drm/gpusvm: pull out drm_gpusvm_pages substructure Matthew Auld
2025-03-20 20:43   ` Matthew Brost
2025-03-21 11:53     ` Matthew Auld
2025-03-20 17:30 ` [PATCH 5/7] drm/gpusvm: lower get/unmap pages Matthew Auld
2025-03-20 20:59   ` Matthew Brost
2025-03-20 17:30 ` Matthew Auld [this message]
2025-03-20 21:04   ` [PATCH 6/7] drm/gpusvm: support basic_range Matthew Brost
2025-03-20 17:30 ` [PATCH 7/7] drm/xe/userptr: replace xe_hmm with gpusvm Matthew Auld
2025-03-20 20:52   ` Matthew Brost
2025-03-25  9:50     ` Ghimiray, Himal Prasad
2025-03-20 17:37 ` ✓ CI.Patch_applied: success for Replace " Patchwork
2025-03-20 17:37 ` ✗ CI.checkpatch: warning " Patchwork
2025-03-20 17:38 ` ✗ CI.KUnit: failure " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20250320172956.168358-15-matthew.auld@intel.com \
    --to=matthew.auld@intel.com \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=intel-xe@lists.freedesktop.org \
    --cc=matthew.brost@intel.com \
    --cc=thomas.hellstrom@linux.intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox