From: Rob Clark <robdclark@gmail.com>
To: dri-devel@lists.freedesktop.org
Cc: freedreno@lists.freedesktop.org, linux-arm-msm@vger.kernel.org,
Rob Clark <robdclark@chromium.org>,
Rob Clark <robdclark@gmail.com>,
Abhinav Kumar <quic_abhinavk@quicinc.com>,
Dmitry Baryshkov <lumag@kernel.org>, Sean Paul <sean@poorly.run>,
Marijn Suijten <marijn.suijten@somainline.org>,
David Airlie <airlied@gmail.com>, Simona Vetter <simona@ffwll.ch>,
linux-kernel@vger.kernel.org (open list)
Subject: [PATCH v2 24/34] drm/msm: Split msm_gem_vma_new()
Date: Wed, 19 Mar 2025 07:52:36 -0700 [thread overview]
Message-ID: <20250319145425.51935-25-robdclark@gmail.com> (raw)
In-Reply-To: <20250319145425.51935-1-robdclark@gmail.com>
From: Rob Clark <robdclark@chromium.org>
Split memory allocation from vma initialization. Async vm-bind happens
in the fence signalling path, so it will need to use pre-allocated
memory.
Signed-off-by: Rob Clark <robdclark@chromium.org>
---
drivers/gpu/drm/msm/msm_gem_vma.c | 67 ++++++++++++++++++++++---------
1 file changed, 49 insertions(+), 18 deletions(-)
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index baa5c6a0ff22..7d40b151aa95 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -71,40 +71,54 @@ msm_gem_vma_map(struct drm_gpuva *vma, int prot, struct sg_table *sgt)
return ret;
}
-/* Close an iova. Warn if it is still in use */
-void msm_gem_vma_close(struct drm_gpuva *vma)
+static void __vma_close(struct drm_gpuva *vma)
{
struct msm_gem_vm *vm = to_msm_vm(vma->vm);
struct msm_gem_vma *msm_vma = to_msm_vma(vma);
GEM_WARN_ON(msm_vma->mapped);
+ GEM_WARN_ON(!mutex_is_locked(&vm->vm_lock));
spin_lock(&vm->mm_lock);
if (vma->va.addr && vm->managed)
drm_mm_remove_node(&msm_vma->node);
spin_unlock(&vm->mm_lock);
- dma_resv_lock(drm_gpuvm_resv(vma->vm), NULL);
- mutex_lock(&vm->vm_lock);
drm_gpuva_remove(vma);
drm_gpuva_unlink(vma);
- mutex_unlock(&vm->vm_lock);
- dma_resv_unlock(drm_gpuvm_resv(vma->vm));
kfree(vma);
}
-/* Create a new vma and allocate an iova for it */
-struct drm_gpuva *
-msm_gem_vma_new(struct drm_gpuvm *_vm, struct drm_gem_object *obj,
- u64 offset, u64 range_start, u64 range_end)
+/* Close an iova. Warn if it is still in use */
+void msm_gem_vma_close(struct drm_gpuva *vma)
+{
+ struct msm_gem_vm *vm = to_msm_vm(vma->vm);
+
+ /*
+ * Only used in legacy (kernel managed) VM, if userspace is managing
+ * the VM, the legacy paths should be disallowed:
+ */
+ GEM_WARN_ON(!vm->managed);
+
+ dma_resv_lock(drm_gpuvm_resv(vma->vm), NULL);
+ mutex_lock(&vm->vm_lock);
+ __vma_close(vma);
+ mutex_unlock(&vm->vm_lock);
+ dma_resv_unlock(drm_gpuvm_resv(vma->vm));
+}
+
+static struct drm_gpuva *
+__vma_init(struct msm_gem_vma *vma, struct drm_gpuvm *_vm,
+ struct drm_gem_object *obj, u64 offset,
+ u64 range_start, u64 range_end)
{
struct msm_gem_vm *vm = to_msm_vm(_vm);
struct drm_gpuvm_bo *vm_bo;
- struct msm_gem_vma *vma;
int ret;
- vma = kzalloc(sizeof(*vma), GFP_KERNEL);
+ GEM_WARN_ON(!mutex_is_locked(&vm->vm_lock));
+
if (!vma)
return ERR_PTR(-ENOMEM);
@@ -128,9 +142,7 @@ msm_gem_vma_new(struct drm_gpuvm *_vm, struct drm_gem_object *obj,
drm_gpuva_init(&vma->base, range_start, range_end - range_start, obj, offset);
vma->mapped = false;
- mutex_lock(&vm->vm_lock);
ret = drm_gpuva_insert(&vm->base, &vma->base);
- mutex_unlock(&vm->vm_lock);
if (ret)
goto err_free_range;
@@ -140,17 +152,13 @@ msm_gem_vma_new(struct drm_gpuvm *_vm, struct drm_gem_object *obj,
goto err_va_remove;
}
- mutex_lock(&vm->vm_lock);
drm_gpuva_link(&vma->base, vm_bo);
- mutex_unlock(&vm->vm_lock);
GEM_WARN_ON(drm_gpuvm_bo_put(vm_bo));
return &vma->base;
err_va_remove:
- mutex_lock(&vm->vm_lock);
drm_gpuva_remove(&vma->base);
- mutex_unlock(&vm->vm_lock);
err_free_range:
if (vm->managed)
drm_mm_remove_node(&vma->node);
@@ -159,6 +167,29 @@ msm_gem_vma_new(struct drm_gpuvm *_vm, struct drm_gem_object *obj,
return ERR_PTR(ret);
}
+/* Create a new vma and allocate an iova for it */
+struct drm_gpuva *
+msm_gem_vma_new(struct drm_gpuvm *_vm, struct drm_gem_object *obj,
+ u64 offset, u64 range_start, u64 range_end)
+{
+ struct msm_gem_vm *vm = to_msm_vm(_vm);
+ struct msm_gem_vma *vma;
+
+ /*
+ * Only used in legacy (kernel managed) VM, if userspace is managing
+ * the VM, the legacy paths should be disallowed:
+ */
+ GEM_WARN_ON(!vm->managed);
+
+ vma = kzalloc(sizeof(*vma), GFP_KERNEL);
+
+ mutex_lock(&vm->vm_lock);
+ __vma_init(vma, _vm, obj, offset, range_start, range_end);
+ mutex_unlock(&vm->vm_lock);
+
+ return &vma->base;
+}
+
static const struct drm_gpuvm_ops msm_gpuvm_ops = {
.vm_free = msm_gem_vm_free,
};
--
2.48.1
next prev parent reply other threads:[~2025-03-19 14:55 UTC|newest]
Thread overview: 45+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-03-19 14:52 [PATCH v2 00/34] drm/msm: sparse / "VM_BIND" support Rob Clark
2025-03-19 14:52 ` [PATCH v2 01/34] drm/gpuvm: Don't require obj lock in destructor path Rob Clark
2025-03-19 14:52 ` [PATCH v2 02/34] drm/gpuvm: Remove bogus lock assert Rob Clark
2025-03-19 14:52 ` [PATCH v2 03/34] drm/gpuvm: Allow VAs to hold soft reference to BOs Rob Clark
2025-03-19 14:52 ` [PATCH v2 04/34] drm/gpuvm: Add drm_gpuvm_sm_unmap_va() Rob Clark
2025-03-19 14:52 ` [PATCH v2 05/34] drm/msm: Rename msm_file_private -> msm_context Rob Clark
2025-04-16 23:11 ` Dmitry Baryshkov
2025-03-19 14:52 ` [PATCH v2 06/34] drm/msm: Improve msm_context comments Rob Clark
2025-04-16 23:19 ` Dmitry Baryshkov
2025-03-19 14:52 ` [PATCH v2 07/34] drm/msm: Rename msm_gem_address_space -> msm_gem_vm Rob Clark
2025-04-21 19:19 ` Dmitry Baryshkov
2025-03-19 14:52 ` [PATCH v2 08/34] drm/msm: Remove vram carveout support Rob Clark
2025-04-16 17:18 ` Akhil P Oommen
2025-04-16 23:20 ` Dmitry Baryshkov
2025-04-17 13:41 ` Luca Weiss
2025-03-19 14:52 ` [PATCH v2 09/34] drm/msm: Collapse vma allocation and initialization Rob Clark
2025-03-19 14:52 ` [PATCH v2 10/34] drm/msm: Collapse vma close and delete Rob Clark
2025-03-19 14:52 ` [PATCH v2 11/34] drm/msm: drm_gpuvm conversion Rob Clark
2025-04-16 17:20 ` Akhil P Oommen
2025-03-19 14:52 ` [PATCH v2 12/34] drm/msm: Use drm_gpuvm types more Rob Clark
2025-03-19 14:52 ` [PATCH v2 13/34] drm/msm: Split submit_pin_objects() Rob Clark
2025-03-19 14:52 ` [PATCH v2 14/34] drm/msm: Lazily create context VM Rob Clark
2025-04-16 17:38 ` Akhil P Oommen
2025-03-19 14:52 ` [PATCH v2 15/34] drm/msm: Add opt-in for VM_BIND Rob Clark
2025-03-19 14:52 ` [PATCH v2 16/34] drm/msm: Mark VM as unusable on faults Rob Clark
2025-03-19 16:15 ` Connor Abbott
2025-03-19 21:31 ` Rob Clark
2025-03-19 14:52 ` [PATCH v2 17/34] drm/msm: Extend SUBMIT ioctl for VM_BIND Rob Clark
2025-03-19 14:52 ` [PATCH v2 18/34] drm/msm: Add VM_BIND submitqueue Rob Clark
2025-03-19 14:52 ` [PATCH v2 19/34] drm/msm: Add _NO_SHARE flag Rob Clark
2025-03-19 14:52 ` [PATCH v2 20/34] drm/msm: Split out helper to get iommu prot flags Rob Clark
2025-03-19 14:52 ` [PATCH v2 21/34] drm/msm: Add mmu support for non-zero offset Rob Clark
2025-03-19 14:52 ` [PATCH v2 22/34] drm/msm: Add PRR support Rob Clark
2025-03-19 14:52 ` [PATCH v2 23/34] drm/msm: Rename msm_gem_vma_purge() -> _unmap() Rob Clark
2025-03-19 14:52 ` Rob Clark [this message]
2025-03-19 14:52 ` [PATCH v2 25/34] drm/msm: Pre-allocate VMAs Rob Clark
2025-03-19 14:52 ` [PATCH v2 26/34] drm/msm: Pre-allocate vm_bo objects Rob Clark
2025-03-19 14:52 ` [PATCH v2 27/34] drm/msm: Pre-allocate pages for pgtable entries Rob Clark
2025-03-19 14:52 ` [PATCH v2 28/34] drm/msm: Wire up gpuvm ops Rob Clark
2025-03-19 14:52 ` [PATCH v2 29/34] drm/msm: Wire up drm_gpuvm debugfs Rob Clark
2025-03-19 14:52 ` [PATCH v2 30/34] drm/msm: Crashdump prep for sparse mappings Rob Clark
2025-03-19 14:52 ` [PATCH v2 31/34] drm/msm: rd dumping " Rob Clark
2025-03-19 14:52 ` [PATCH v2 32/34] drm/msm: Crashdec support for sparse Rob Clark
2025-03-19 14:52 ` [PATCH v2 33/34] drm/msm: rd dumping " Rob Clark
2025-03-19 14:52 ` [PATCH v2 34/34] drm/msm: Bump UAPI version Rob Clark
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250319145425.51935-25-robdclark@gmail.com \
--to=robdclark@gmail.com \
--cc=airlied@gmail.com \
--cc=dri-devel@lists.freedesktop.org \
--cc=freedreno@lists.freedesktop.org \
--cc=linux-arm-msm@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=lumag@kernel.org \
--cc=marijn.suijten@somainline.org \
--cc=quic_abhinavk@quicinc.com \
--cc=robdclark@chromium.org \
--cc=sean@poorly.run \
--cc=simona@ffwll.ch \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox