From: Matthew Auld <matthew.auld@intel.com>
To: intel-xe@lists.freedesktop.org
Cc: dri-devel@lists.freedesktop.org,
"Thomas Hellström" <thomas.hellstrom@linux.intel.com>,
"Matthew Brost" <matthew.brost@intel.com>
Subject: [PATCH v4 5/8] drm/gpusvm: export drm_gpusvm_pages API
Date: Mon, 12 May 2025 16:06:43 +0100 [thread overview]
Message-ID: <20250512150637.61462-15-matthew.auld@intel.com> (raw)
In-Reply-To: <20250512150637.61462-10-matthew.auld@intel.com>
Export get/unmap/free pages API. We also need to tweak the SVM init to
allow skipping much of the unneeded parts.
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
---
drivers/gpu/drm/drm_gpusvm.c | 66 ++++++++++++++++++++++++++++--------
include/drm/drm_gpusvm.h | 16 +++++++++
2 files changed, 67 insertions(+), 15 deletions(-)
diff --git a/drivers/gpu/drm/drm_gpusvm.c b/drivers/gpu/drm/drm_gpusvm.c
index f998f3fa69fe..eac7f9b165f9 100644
--- a/drivers/gpu/drm/drm_gpusvm.c
+++ b/drivers/gpu/drm/drm_gpusvm.c
@@ -539,6 +539,12 @@ static const struct mmu_interval_notifier_ops drm_gpusvm_notifier_ops = {
*
* This function initializes the GPU SVM.
*
+ * Note: If only using the simple drm_gpusvm_pages API (get/unmap/free),
+ * then only @gpusvm, @name, and @drm are expected. However, the same base
+ * @gpusvm can also be used with both modes together in which case the full
+ * setup is needed, where the core drm_gpusvm_pages API will simply never use
+ * the other fields.
+ *
* Return: 0 on success, a negative error code on failure.
*/
int drm_gpusvm_init(struct drm_gpusvm *gpusvm,
@@ -549,8 +555,16 @@ int drm_gpusvm_init(struct drm_gpusvm *gpusvm,
const struct drm_gpusvm_ops *ops,
const unsigned long *chunk_sizes, int num_chunks)
{
- if (!ops->invalidate || !num_chunks)
- return -EINVAL;
+ if (mm) {
+ if (!ops->invalidate || !num_chunks)
+ return -EINVAL;
+ mmgrab(mm);
+ } else {
+ /* No full SVM mode, only core drm_gpusvm_pages API. */
+ if (ops || num_chunks || mm_range || notifier_size ||
+ device_private_page_owner)
+ return -EINVAL;
+ }
gpusvm->name = name;
gpusvm->drm = drm;
@@ -563,7 +577,6 @@ int drm_gpusvm_init(struct drm_gpusvm *gpusvm,
gpusvm->chunk_sizes = chunk_sizes;
gpusvm->num_chunks = num_chunks;
- mmgrab(mm);
gpusvm->root = RB_ROOT_CACHED;
INIT_LIST_HEAD(&gpusvm->notifier_list);
@@ -671,7 +684,8 @@ void drm_gpusvm_fini(struct drm_gpusvm *gpusvm)
drm_gpusvm_range_remove(gpusvm, range);
}
- mmdrop(gpusvm->mm);
+ if (gpusvm->mm)
+ mmdrop(gpusvm->mm);
WARN_ON(!RB_EMPTY_ROOT(&gpusvm->root.rb_root));
}
EXPORT_SYMBOL_GPL(drm_gpusvm_fini);
@@ -1184,6 +1198,27 @@ static void __drm_gpusvm_free_pages(struct drm_gpusvm *gpusvm,
}
}
+/**
+ * drm_gpusvm_free_pages() - Free dma-mapping associated with GPU SVM pages
+ * struct
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @svm_pages: Pointer to the GPU SVM pages structure
+ * @npages: Number of mapped pages
+ *
+ * This function unmaps and frees the dma address array associated with a GPU
+ * SVM pages struct.
+ */
+void drm_gpusvm_free_pages(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_pages *svm_pages,
+ unsigned long npages)
+{
+ drm_gpusvm_notifier_lock(gpusvm);
+ __drm_gpusvm_unmap_pages(gpusvm, svm_pages, npages);
+ __drm_gpusvm_free_pages(gpusvm, svm_pages);
+ drm_gpusvm_notifier_unlock(gpusvm);
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_free_pages);
+
/**
* drm_gpusvm_range_remove() - Remove GPU SVM range
* @gpusvm: Pointer to the GPU SVM structure
@@ -1359,13 +1394,12 @@ static bool drm_gpusvm_pages_valid_unlocked(struct drm_gpusvm *gpusvm,
*
* Return: 0 on success, negative error code on failure.
*/
-static int drm_gpusvm_get_pages(struct drm_gpusvm *gpusvm,
- struct drm_gpusvm_pages *svm_pages,
- struct mm_struct *mm,
- struct mmu_interval_notifier *notifier,
- unsigned long pages_start,
- unsigned long pages_end,
- const struct drm_gpusvm_ctx *ctx)
+int drm_gpusvm_get_pages(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_pages *svm_pages,
+ struct mm_struct *mm,
+ struct mmu_interval_notifier *notifier,
+ unsigned long pages_start, unsigned long pages_end,
+ const struct drm_gpusvm_ctx *ctx)
{
struct hmm_range hmm_range = {
.default_flags = HMM_PFN_REQ_FAULT | (ctx->read_only ? 0 :
@@ -1543,6 +1577,7 @@ static int drm_gpusvm_get_pages(struct drm_gpusvm *gpusvm,
goto retry;
return err;
}
+EXPORT_SYMBOL_GPL(drm_gpusvm_get_pages);
/**
* drm_gpusvm_range_get_pages() - Get pages for a GPU SVM range
@@ -1578,10 +1613,10 @@ EXPORT_SYMBOL_GPL(drm_gpusvm_range_get_pages);
* Must be called in the invalidate() callback of the corresponding notifier for
* IOMMU security model.
*/
-static void drm_gpusvm_unmap_pages(struct drm_gpusvm *gpusvm,
- struct drm_gpusvm_pages *svm_pages,
- unsigned long npages,
- const struct drm_gpusvm_ctx *ctx)
+void drm_gpusvm_unmap_pages(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_pages *svm_pages,
+ unsigned long npages,
+ const struct drm_gpusvm_ctx *ctx)
{
if (ctx->in_notifier)
lockdep_assert_held_write(&gpusvm->notifier_lock);
@@ -1593,6 +1628,7 @@ static void drm_gpusvm_unmap_pages(struct drm_gpusvm *gpusvm,
if (!ctx->in_notifier)
drm_gpusvm_notifier_unlock(gpusvm);
}
+EXPORT_SYMBOL_GPL(drm_gpusvm_unmap_pages);
/**
* drm_gpusvm_range_unmap_pages() - Unmap pages associated with a GPU SVM range
diff --git a/include/drm/drm_gpusvm.h b/include/drm/drm_gpusvm.h
index 1b7ed4f4a8e2..611aaba1ac80 100644
--- a/include/drm/drm_gpusvm.h
+++ b/include/drm/drm_gpusvm.h
@@ -370,6 +370,22 @@ void drm_gpusvm_devmem_init(struct drm_gpusvm_devmem *devmem_allocation,
const struct drm_gpusvm_devmem_ops *ops,
struct drm_pagemap *dpagemap, size_t size);
+int drm_gpusvm_get_pages(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_pages *svm_pages,
+ struct mm_struct *mm,
+ struct mmu_interval_notifier *notifier,
+ unsigned long pages_start, unsigned long pages_end,
+ const struct drm_gpusvm_ctx *ctx);
+
+void drm_gpusvm_unmap_pages(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_pages *svm_pages,
+ unsigned long npages,
+ const struct drm_gpusvm_ctx *ctx);
+
+void drm_gpusvm_free_pages(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_pages *svm_pages,
+ unsigned long npages);
+
#ifdef CONFIG_LOCKDEP
/**
* drm_gpusvm_driver_set_lock() - Set the lock protecting accesses to GPU SVM
--
2.49.0
next prev parent reply other threads:[~2025-05-12 15:07 UTC|newest]
Thread overview: 17+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-05-12 15:06 [PATCH v4 0/8] Replace xe_hmm with gpusvm Matthew Auld
2025-05-12 15:06 ` [PATCH v4 1/8] drm/gpusvm: fix hmm_pfn_to_map_order() usage Matthew Auld
2025-05-12 15:06 ` [PATCH v4 2/8] drm/gpusvm: use more selective dma dir in get_pages() Matthew Auld
2025-05-12 15:06 ` [PATCH v4 3/8] drm/gpusvm: pull out drm_gpusvm_pages substructure Matthew Auld
2025-05-12 15:06 ` [PATCH v4 4/8] drm/gpusvm: refactor core API to use pages struct Matthew Auld
2025-05-12 15:06 ` Matthew Auld [this message]
2025-05-12 15:06 ` [PATCH v4 6/8] drm/xe/vm: split userptr bits into separate file Matthew Auld
2025-06-09 17:05 ` Matthew Brost
2025-05-12 15:06 ` [PATCH v4 7/8] drm/xe/userptr: replace xe_hmm with gpusvm Matthew Auld
2025-06-09 17:09 ` Matthew Brost
2025-05-12 15:06 ` [PATCH v4 8/8] drm/xe/pt: unify xe_pt_svm_pre_commit with userptr Matthew Auld
2025-05-12 16:27 ` ✓ CI.Patch_applied: success for Replace xe_hmm with gpusvm (rev4) Patchwork
2025-05-12 16:27 ` ✗ CI.checkpatch: warning " Patchwork
2025-05-12 16:28 ` ✓ CI.KUnit: success " Patchwork
2025-05-12 16:38 ` ✗ CI.Build: failure " Patchwork
2025-05-13 11:04 ` ✗ CI.Patch_applied: " Patchwork
2025-05-26 21:59 ` Patchwork
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250512150637.61462-15-matthew.auld@intel.com \
--to=matthew.auld@intel.com \
--cc=dri-devel@lists.freedesktop.org \
--cc=intel-xe@lists.freedesktop.org \
--cc=matthew.brost@intel.com \
--cc=thomas.hellstrom@linux.intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox