From: Rob Clark <robdclark@gmail.com>
To: dri-devel@lists.freedesktop.org
Cc: freedreno@lists.freedesktop.org, linux-arm-msm@vger.kernel.org,
Rob Clark <robdclark@chromium.org>,
Maarten Lankhorst <maarten.lankhorst@linux.intel.com>,
Maxime Ripard <mripard@kernel.org>,
Thomas Zimmermann <tzimmermann@suse.de>,
David Airlie <airlied@gmail.com>, Simona Vetter <simona@ffwll.ch>,
linux-kernel@vger.kernel.org (open list)
Subject: [PATCH v2 04/34] drm/gpuvm: Add drm_gpuvm_sm_unmap_va()
Date: Wed, 19 Mar 2025 07:52:16 -0700 [thread overview]
Message-ID: <20250319145425.51935-5-robdclark@gmail.com> (raw)
In-Reply-To: <20250319145425.51935-1-robdclark@gmail.com>
From: Rob Clark <robdclark@chromium.org>
Add drm_gpuvm_sm_unmap_va(), which works similarly to
drm_gpuvm_sm_unmap(), except that it operates on a single VA
at a time.
This is needed by drm/msm, where due to locking ordering we
cannot reuse drm_gpuvm_sm_unmap() as-is. This helper lets us
reuse the logic about remaps vs unmaps cases.
Signed-off-by: Rob Clark <robdclark@chromium.org>
---
drivers/gpu/drm/drm_gpuvm.c | 123 ++++++++++++++++++++++++------------
include/drm/drm_gpuvm.h | 2 +
2 files changed, 85 insertions(+), 40 deletions(-)
diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c
index 681dc58e9160..7267fcaa8f50 100644
--- a/drivers/gpu/drm/drm_gpuvm.c
+++ b/drivers/gpu/drm/drm_gpuvm.c
@@ -2244,6 +2244,56 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
req_obj, req_offset);
}
+static int
+__drm_gpuvm_sm_unmap_va(struct drm_gpuva *va, const struct drm_gpuvm_ops *ops,
+ void *priv, u64 req_addr, u64 req_range)
+{
+ struct drm_gpuva_op_map prev = {}, next = {};
+ bool prev_split = false, next_split = false;
+ struct drm_gem_object *obj = va->gem.obj;
+ u64 req_end = req_addr + req_range;
+ u64 offset = va->gem.offset;
+ u64 addr = va->va.addr;
+ u64 range = va->va.range;
+ u64 end = addr + range;
+ int ret;
+
+ if (addr < req_addr) {
+ prev.va.addr = addr;
+ prev.va.range = req_addr - addr;
+ prev.gem.obj = obj;
+ prev.gem.offset = offset;
+
+ prev_split = true;
+ }
+
+ if (end > req_end) {
+ next.va.addr = req_end;
+ next.va.range = end - req_end;
+ next.gem.obj = obj;
+ next.gem.offset = offset + (req_end - addr);
+
+ next_split = true;
+ }
+
+ if (prev_split || next_split) {
+ struct drm_gpuva_op_unmap unmap = { .va = va };
+
+ ret = op_remap_cb(ops, priv,
+ prev_split ? &prev : NULL,
+ next_split ? &next : NULL,
+ &unmap);
+ if (ret)
+ return ret;
+ } else {
+ ret = op_unmap_cb(ops, priv, va, false);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int
__drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm,
const struct drm_gpuvm_ops *ops, void *priv,
@@ -2257,46 +2307,9 @@ __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm,
return -EINVAL;
drm_gpuvm_for_each_va_range_safe(va, next, gpuvm, req_addr, req_end) {
- struct drm_gpuva_op_map prev = {}, next = {};
- bool prev_split = false, next_split = false;
- struct drm_gem_object *obj = va->gem.obj;
- u64 offset = va->gem.offset;
- u64 addr = va->va.addr;
- u64 range = va->va.range;
- u64 end = addr + range;
-
- if (addr < req_addr) {
- prev.va.addr = addr;
- prev.va.range = req_addr - addr;
- prev.gem.obj = obj;
- prev.gem.offset = offset;
-
- prev_split = true;
- }
-
- if (end > req_end) {
- next.va.addr = req_end;
- next.va.range = end - req_end;
- next.gem.obj = obj;
- next.gem.offset = offset + (req_end - addr);
-
- next_split = true;
- }
-
- if (prev_split || next_split) {
- struct drm_gpuva_op_unmap unmap = { .va = va };
-
- ret = op_remap_cb(ops, priv,
- prev_split ? &prev : NULL,
- next_split ? &next : NULL,
- &unmap);
- if (ret)
- return ret;
- } else {
- ret = op_unmap_cb(ops, priv, va, false);
- if (ret)
- return ret;
- }
+ ret = __drm_gpuvm_sm_unmap_va(va, ops, priv, req_addr, req_range);
+ if (ret)
+ return ret;
}
return 0;
@@ -2394,6 +2407,36 @@ drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, void *priv,
}
EXPORT_SYMBOL_GPL(drm_gpuvm_sm_unmap);
+/**
+ * drm_gpuvm_sm_unmap_va() - like drm_gpuvm_sm_unmap() but operating on a single VA
+ * @va: the &drm_gpuva to unmap from
+ * @priv: pointer to a driver private data structure
+ * @req_addr: the start address of the range to unmap
+ * @req_range: the range of the mappings to unmap
+ *
+ * This function iterates the range of a single VA. It utilizes the
+ * &drm_gpuvm_ops to call back into the driver providing the operations to
+ * unmap and, if required, split the existent mapping.
+ *
+ * There can be at most one unmap or remap operation, depending on whether the
+ * requested unmap range fully covers or partially covers the specified VA.
+ *
+ * Returns: 0 on success or a negative error code
+ */
+int
+drm_gpuvm_sm_unmap_va(struct drm_gpuva *va, void *priv,
+ u64 req_addr, u64 req_range)
+{
+ const struct drm_gpuvm_ops *ops = va->vm->ops;
+
+ if (unlikely(!(ops && ops->sm_step_remap &&
+ ops->sm_step_unmap)))
+ return -EINVAL;
+
+ return __drm_gpuvm_sm_unmap_va(va, ops, priv, req_addr, req_range);
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_sm_unmap_va);
+
static struct drm_gpuva_op *
gpuva_op_alloc(struct drm_gpuvm *gpuvm)
{
diff --git a/include/drm/drm_gpuvm.h b/include/drm/drm_gpuvm.h
index 13ab087a45fa..e1f488eb714e 100644
--- a/include/drm/drm_gpuvm.h
+++ b/include/drm/drm_gpuvm.h
@@ -1213,6 +1213,8 @@ int drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
int drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, void *priv,
u64 addr, u64 range);
+int drm_gpuvm_sm_unmap_va(struct drm_gpuva *va, void *priv,
+ u64 req_addr, u64 req_range);
void drm_gpuva_map(struct drm_gpuvm *gpuvm,
struct drm_gpuva *va,
--
2.48.1
next prev parent reply other threads:[~2025-03-19 14:54 UTC|newest]
Thread overview: 45+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-03-19 14:52 [PATCH v2 00/34] drm/msm: sparse / "VM_BIND" support Rob Clark
2025-03-19 14:52 ` [PATCH v2 01/34] drm/gpuvm: Don't require obj lock in destructor path Rob Clark
2025-03-19 14:52 ` [PATCH v2 02/34] drm/gpuvm: Remove bogus lock assert Rob Clark
2025-03-19 14:52 ` [PATCH v2 03/34] drm/gpuvm: Allow VAs to hold soft reference to BOs Rob Clark
2025-03-19 14:52 ` Rob Clark [this message]
2025-03-19 14:52 ` [PATCH v2 05/34] drm/msm: Rename msm_file_private -> msm_context Rob Clark
2025-04-16 23:11 ` Dmitry Baryshkov
2025-03-19 14:52 ` [PATCH v2 06/34] drm/msm: Improve msm_context comments Rob Clark
2025-04-16 23:19 ` Dmitry Baryshkov
2025-03-19 14:52 ` [PATCH v2 07/34] drm/msm: Rename msm_gem_address_space -> msm_gem_vm Rob Clark
2025-04-21 19:19 ` Dmitry Baryshkov
2025-03-19 14:52 ` [PATCH v2 08/34] drm/msm: Remove vram carveout support Rob Clark
2025-04-16 17:18 ` Akhil P Oommen
2025-04-16 23:20 ` Dmitry Baryshkov
2025-04-17 13:41 ` Luca Weiss
2025-03-19 14:52 ` [PATCH v2 09/34] drm/msm: Collapse vma allocation and initialization Rob Clark
2025-03-19 14:52 ` [PATCH v2 10/34] drm/msm: Collapse vma close and delete Rob Clark
2025-03-19 14:52 ` [PATCH v2 11/34] drm/msm: drm_gpuvm conversion Rob Clark
2025-04-16 17:20 ` Akhil P Oommen
2025-03-19 14:52 ` [PATCH v2 12/34] drm/msm: Use drm_gpuvm types more Rob Clark
2025-03-19 14:52 ` [PATCH v2 13/34] drm/msm: Split submit_pin_objects() Rob Clark
2025-03-19 14:52 ` [PATCH v2 14/34] drm/msm: Lazily create context VM Rob Clark
2025-04-16 17:38 ` Akhil P Oommen
2025-03-19 14:52 ` [PATCH v2 15/34] drm/msm: Add opt-in for VM_BIND Rob Clark
2025-03-19 14:52 ` [PATCH v2 16/34] drm/msm: Mark VM as unusable on faults Rob Clark
2025-03-19 16:15 ` Connor Abbott
2025-03-19 21:31 ` Rob Clark
2025-03-19 14:52 ` [PATCH v2 17/34] drm/msm: Extend SUBMIT ioctl for VM_BIND Rob Clark
2025-03-19 14:52 ` [PATCH v2 18/34] drm/msm: Add VM_BIND submitqueue Rob Clark
2025-03-19 14:52 ` [PATCH v2 19/34] drm/msm: Add _NO_SHARE flag Rob Clark
2025-03-19 14:52 ` [PATCH v2 20/34] drm/msm: Split out helper to get iommu prot flags Rob Clark
2025-03-19 14:52 ` [PATCH v2 21/34] drm/msm: Add mmu support for non-zero offset Rob Clark
2025-03-19 14:52 ` [PATCH v2 22/34] drm/msm: Add PRR support Rob Clark
2025-03-19 14:52 ` [PATCH v2 23/34] drm/msm: Rename msm_gem_vma_purge() -> _unmap() Rob Clark
2025-03-19 14:52 ` [PATCH v2 24/34] drm/msm: Split msm_gem_vma_new() Rob Clark
2025-03-19 14:52 ` [PATCH v2 25/34] drm/msm: Pre-allocate VMAs Rob Clark
2025-03-19 14:52 ` [PATCH v2 26/34] drm/msm: Pre-allocate vm_bo objects Rob Clark
2025-03-19 14:52 ` [PATCH v2 27/34] drm/msm: Pre-allocate pages for pgtable entries Rob Clark
2025-03-19 14:52 ` [PATCH v2 28/34] drm/msm: Wire up gpuvm ops Rob Clark
2025-03-19 14:52 ` [PATCH v2 29/34] drm/msm: Wire up drm_gpuvm debugfs Rob Clark
2025-03-19 14:52 ` [PATCH v2 30/34] drm/msm: Crashdump prep for sparse mappings Rob Clark
2025-03-19 14:52 ` [PATCH v2 31/34] drm/msm: rd dumping " Rob Clark
2025-03-19 14:52 ` [PATCH v2 32/34] drm/msm: Crashdec support for sparse Rob Clark
2025-03-19 14:52 ` [PATCH v2 33/34] drm/msm: rd dumping " Rob Clark
2025-03-19 14:52 ` [PATCH v2 34/34] drm/msm: Bump UAPI version Rob Clark
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250319145425.51935-5-robdclark@gmail.com \
--to=robdclark@gmail.com \
--cc=airlied@gmail.com \
--cc=dri-devel@lists.freedesktop.org \
--cc=freedreno@lists.freedesktop.org \
--cc=linux-arm-msm@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=maarten.lankhorst@linux.intel.com \
--cc=mripard@kernel.org \
--cc=robdclark@chromium.org \
--cc=simona@ffwll.ch \
--cc=tzimmermann@suse.de \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox