From: Rob Clark <robdclark@gmail.com>
To: dri-devel@lists.freedesktop.org
Cc: freedreno@lists.freedesktop.org, linux-arm-msm@vger.kernel.org,
Connor Abbott <cwabbott0@gmail.com>,
Rob Clark <robdclark@chromium.org>,
Rob Clark <robdclark@gmail.com>, Sean Paul <sean@poorly.run>,
Konrad Dybcio <konradybcio@kernel.org>,
Abhinav Kumar <quic_abhinavk@quicinc.com>,
Dmitry Baryshkov <lumag@kernel.org>,
Marijn Suijten <marijn.suijten@somainline.org>,
David Airlie <airlied@gmail.com>, Simona Vetter <simona@ffwll.ch>,
linux-kernel@vger.kernel.org (open list)
Subject: [PATCH v4 21/40] drm/msm: Lazily create context VM
Date: Wed, 14 May 2025 10:53:35 -0700 [thread overview]
Message-ID: <20250514175527.42488-22-robdclark@gmail.com> (raw)
In-Reply-To: <20250514175527.42488-1-robdclark@gmail.com>
From: Rob Clark <robdclark@chromium.org>
In the next commit, a way for userspace to opt-in to userspace managed
VM is added. For this to work, we need to defer creation of the VM
until it is needed.
Signed-off-by: Rob Clark <robdclark@chromium.org>
---
drivers/gpu/drm/msm/adreno/a6xx_gpu.c | 3 ++-
drivers/gpu/drm/msm/adreno/adreno_gpu.c | 14 +++++++-----
drivers/gpu/drm/msm/msm_drv.c | 29 ++++++++++++++++++++-----
drivers/gpu/drm/msm/msm_gem_submit.c | 2 +-
drivers/gpu/drm/msm/msm_gpu.h | 9 +++++++-
5 files changed, 43 insertions(+), 14 deletions(-)
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 7f7dcdd1f97d..bfc11f6bda97 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -112,6 +112,7 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
{
bool sysprof = refcount_read(&a6xx_gpu->base.base.sysprof_active) > 1;
struct msm_context *ctx = submit->queue->ctx;
+ struct drm_gpuvm *vm = msm_context_vm(submit->dev, ctx);
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
phys_addr_t ttbr;
u32 asid;
@@ -120,7 +121,7 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
if (ctx->seqno == ring->cur_ctx_seqno)
return;
- if (msm_iommu_pagetable_params(to_msm_vm(ctx->vm)->mmu, &ttbr, &asid))
+ if (msm_iommu_pagetable_params(to_msm_vm(vm)->mmu, &ttbr, &asid))
return;
if (adreno_gpu->info->family >= ADRENO_7XX_GEN1) {
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index cb4ee277721d..7e50de5c5110 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -373,6 +373,8 @@ int adreno_get_param(struct msm_gpu *gpu, struct msm_context *ctx,
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct drm_device *drm = gpu->dev;
+ /* Note ctx can be NULL when called from rd_open(): */
+ struct drm_gpuvm *vm = ctx ? msm_context_vm(drm, ctx) : NULL;
/* No pointer params yet */
if (*len != 0)
@@ -418,8 +420,8 @@ int adreno_get_param(struct msm_gpu *gpu, struct msm_context *ctx,
*value = 0;
return 0;
case MSM_PARAM_FAULTS:
- if (ctx->vm)
- *value = gpu->global_faults + to_msm_vm(ctx->vm)->faults;
+ if (vm)
+ *value = gpu->global_faults + to_msm_vm(vm)->faults;
else
*value = gpu->global_faults;
return 0;
@@ -427,14 +429,14 @@ int adreno_get_param(struct msm_gpu *gpu, struct msm_context *ctx,
*value = gpu->suspend_count;
return 0;
case MSM_PARAM_VA_START:
- if (ctx->vm == gpu->vm)
+ if (vm == gpu->vm)
return UERR(EINVAL, drm, "requires per-process pgtables");
- *value = ctx->vm->mm_start;
+ *value = vm->mm_start;
return 0;
case MSM_PARAM_VA_SIZE:
- if (ctx->vm == gpu->vm)
+ if (vm == gpu->vm)
return UERR(EINVAL, drm, "requires per-process pgtables");
- *value = ctx->vm->mm_range;
+ *value = vm->mm_range;
return 0;
case MSM_PARAM_HIGHEST_BANK_BIT:
*value = adreno_gpu->ubwc_config.highest_bank_bit;
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 5909720be48d..ac8a5b072afe 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -214,10 +214,29 @@ static void load_gpu(struct drm_device *dev)
mutex_unlock(&init_lock);
}
+/**
+ * msm_context_vm - lazily create the context's VM
+ *
+ * @dev: the drm device
+ * @ctx: the context
+ *
+ * The VM is lazily created, so that userspace has a chance to opt-in to having
+ * a userspace managed VM before the VM is created.
+ *
+ * Note that this does not return a reference to the VM. Once the VM is created,
+ * it exists for the lifetime of the context.
+ */
+struct drm_gpuvm *msm_context_vm(struct drm_device *dev, struct msm_context *ctx)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ if (!ctx->vm)
+ ctx->vm = msm_gpu_create_private_vm(priv->gpu, current);
+ return ctx->vm;
+}
+
static int context_init(struct drm_device *dev, struct drm_file *file)
{
static atomic_t ident = ATOMIC_INIT(0);
- struct msm_drm_private *priv = dev->dev_private;
struct msm_context *ctx;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
@@ -230,7 +249,6 @@ static int context_init(struct drm_device *dev, struct drm_file *file)
kref_init(&ctx->ref);
msm_submitqueue_init(dev, ctx);
- ctx->vm = msm_gpu_create_private_vm(priv->gpu, current);
file->driver_priv = ctx;
ctx->seqno = atomic_inc_return(&ident);
@@ -409,7 +427,7 @@ static int msm_ioctl_gem_info_iova(struct drm_device *dev,
* Don't pin the memory here - just get an address so that userspace can
* be productive
*/
- return msm_gem_get_iova(obj, ctx->vm, iova);
+ return msm_gem_get_iova(obj, msm_context_vm(dev, ctx), iova);
}
static int msm_ioctl_gem_info_set_iova(struct drm_device *dev,
@@ -418,18 +436,19 @@ static int msm_ioctl_gem_info_set_iova(struct drm_device *dev,
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_context *ctx = file->driver_priv;
+ struct drm_gpuvm *vm = msm_context_vm(dev, ctx);
if (!priv->gpu)
return -EINVAL;
/* Only supported if per-process address space is supported: */
- if (priv->gpu->vm == ctx->vm)
+ if (priv->gpu->vm == vm)
return UERR(EOPNOTSUPP, dev, "requires per-process pgtables");
if (should_fail(&fail_gem_iova, obj->size))
return -ENOMEM;
- return msm_gem_set_iova(obj, ctx->vm, iova);
+ return msm_gem_set_iova(obj, vm, iova);
}
static int msm_ioctl_gem_info_set_metadata(struct drm_gem_object *obj,
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index c4569e7b5a02..7a9bd20363dd 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -63,7 +63,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
kref_init(&submit->ref);
submit->dev = dev;
- submit->vm = queue->ctx->vm;
+ submit->vm = msm_context_vm(dev, queue->ctx);
submit->gpu = gpu;
submit->cmd = (void *)&submit->bos[nr_bos];
submit->queue = queue;
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index bfaec80e5f2d..d1530de96315 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -370,7 +370,12 @@ struct msm_context {
*/
bool closed;
- /** @vm: the per-process GPU address-space */
+ /**
+ * @vm:
+ *
+ * The per-process GPU address-space. Do not access directly, use
+ * msm_context_vm().
+ */
struct drm_gpuvm *vm;
/** @kref: the reference count */
@@ -455,6 +460,8 @@ struct msm_context {
atomic64_t ctx_mem;
};
+struct drm_gpuvm *msm_context_vm(struct drm_device *dev, struct msm_context *ctx);
+
/**
* msm_gpu_convert_priority - Map userspace priority to ring # and sched priority
*
--
2.49.0
next prev parent reply other threads:[~2025-05-14 17:57 UTC|newest]
Thread overview: 65+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-05-14 17:53 [PATCH v4 00/40] drm/msm: sparse / "VM_BIND" support Rob Clark
2025-05-14 17:53 ` [PATCH v4 01/40] drm/gpuvm: Don't require obj lock in destructor path Rob Clark
2025-05-15 8:54 ` Danilo Krummrich
2025-05-15 9:06 ` Danilo Krummrich
2025-05-15 17:35 ` Rob Clark
2025-05-15 17:55 ` Danilo Krummrich
2025-05-15 21:57 ` Rob Clark
2025-05-16 9:01 ` Danilo Krummrich
2025-05-16 16:20 ` Rob Clark
2025-05-20 21:25 ` Dave Airlie
2025-05-20 21:52 ` Rob Clark
2025-05-20 22:31 ` Dave Airlie
2025-05-20 22:56 ` Rob Clark
2025-05-23 2:51 ` Rob Clark
2025-05-23 6:28 ` Danilo Krummrich
2025-05-14 17:53 ` [PATCH v4 02/40] drm/gpuvm: Allow VAs to hold soft reference to BOs Rob Clark
2025-05-15 9:00 ` Danilo Krummrich
2025-05-15 14:59 ` Rob Clark
2025-05-15 15:30 ` Danilo Krummrich
2025-05-15 17:34 ` Rob Clark
2025-05-15 17:51 ` Danilo Krummrich
2025-05-15 20:10 ` Rob Clark
2025-05-14 17:53 ` [PATCH v4 03/40] drm/gem: Add ww_acquire_ctx support to drm_gem_lru_scan() Rob Clark
2025-05-14 17:53 ` [PATCH v4 04/40] drm/sched: Add enqueue credit limit Rob Clark
2025-05-14 17:53 ` [PATCH v4 05/40] iommu/io-pgtable-arm: Add quirk to quiet WARN_ON() Rob Clark
2025-05-15 14:33 ` Will Deacon
2025-05-15 14:48 ` Rob Clark
2025-05-20 11:31 ` Will Deacon
2025-05-20 13:06 ` Robin Murphy
2025-05-20 14:06 ` Will Deacon
2025-05-14 17:53 ` [PATCH v4 06/40] drm/msm: Rename msm_file_private -> msm_context Rob Clark
2025-05-14 17:53 ` [PATCH v4 07/40] drm/msm: Improve msm_context comments Rob Clark
2025-05-14 17:53 ` [PATCH v4 08/40] drm/msm: Rename msm_gem_address_space -> msm_gem_vm Rob Clark
2025-05-14 17:53 ` [PATCH v4 09/40] drm/msm: Remove vram carveout support Rob Clark
2025-05-14 17:53 ` [PATCH v4 10/40] drm/msm: Collapse vma allocation and initialization Rob Clark
2025-05-14 17:53 ` [PATCH v4 11/40] drm/msm: Collapse vma close and delete Rob Clark
2025-05-14 17:53 ` [PATCH v4 12/40] drm/msm: Don't close VMAs on purge Rob Clark
2025-05-14 17:53 ` [PATCH v4 13/40] drm/msm: drm_gpuvm conversion Rob Clark
2025-05-14 17:53 ` [PATCH v4 14/40] drm/msm: Convert vm locking Rob Clark
2025-05-14 17:53 ` [PATCH v4 15/40] drm/msm: Use drm_gpuvm types more Rob Clark
2025-05-14 17:53 ` [PATCH v4 16/40] drm/msm: Split out helper to get iommu prot flags Rob Clark
2025-05-14 17:53 ` [PATCH v4 17/40] drm/msm: Add mmu support for non-zero offset Rob Clark
2025-05-14 17:53 ` [PATCH v4 18/40] drm/msm: Add PRR support Rob Clark
2025-05-14 17:53 ` [PATCH v4 19/40] drm/msm: Rename msm_gem_vma_purge() -> _unmap() Rob Clark
2025-05-14 17:53 ` [PATCH v4 20/40] drm/msm: Drop queued submits on lastclose() Rob Clark
2025-05-14 17:53 ` Rob Clark [this message]
2025-05-14 17:53 ` [PATCH v4 22/40] drm/msm: Add opt-in for VM_BIND Rob Clark
2025-05-14 17:53 ` [PATCH v4 23/40] drm/msm: Mark VM as unusable on GPU hangs Rob Clark
2025-05-14 17:53 ` [PATCH v4 24/40] drm/msm: Add _NO_SHARE flag Rob Clark
2025-05-14 17:53 ` [PATCH v4 25/40] drm/msm: Crashdump prep for sparse mappings Rob Clark
2025-05-14 17:53 ` [PATCH v4 26/40] drm/msm: rd dumping " Rob Clark
2025-05-14 17:53 ` [PATCH v4 27/40] drm/msm: Crashdec support for sparse Rob Clark
2025-05-14 17:53 ` [PATCH v4 28/40] drm/msm: rd dumping " Rob Clark
2025-05-14 17:53 ` [PATCH v4 29/40] drm/msm: Extract out syncobj helpers Rob Clark
2025-05-14 17:53 ` [PATCH v4 30/40] drm/msm: Use DMA_RESV_USAGE_BOOKKEEP/KERNEL Rob Clark
2025-05-14 17:53 ` [PATCH v4 31/40] drm/msm: Add VM_BIND submitqueue Rob Clark
2025-05-14 17:53 ` [PATCH v4 32/40] drm/msm: Support IO_PGTABLE_QUIRK_NO_WARN_ON Rob Clark
2025-05-14 17:53 ` [PATCH v4 33/40] drm/msm: Support pgtable preallocation Rob Clark
2025-05-14 17:53 ` [PATCH v4 34/40] drm/msm: Split out map/unmap ops Rob Clark
2025-05-14 17:53 ` [PATCH v4 35/40] drm/msm: Add VM_BIND ioctl Rob Clark
2025-05-14 17:53 ` [PATCH v4 36/40] drm/msm: Add VM logging for VM_BIND updates Rob Clark
2025-05-14 17:53 ` [PATCH v4 37/40] drm/msm: Add VMA unmap reason Rob Clark
2025-05-14 17:53 ` [PATCH v4 38/40] drm/msm: Add mmu prealloc tracepoint Rob Clark
2025-05-14 17:53 ` [PATCH v4 39/40] drm/msm: use trylock for debugfs Rob Clark
2025-05-14 17:53 ` [PATCH v4 40/40] drm/msm: Bump UAPI version Rob Clark
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250514175527.42488-22-robdclark@gmail.com \
--to=robdclark@gmail.com \
--cc=airlied@gmail.com \
--cc=cwabbott0@gmail.com \
--cc=dri-devel@lists.freedesktop.org \
--cc=freedreno@lists.freedesktop.org \
--cc=konradybcio@kernel.org \
--cc=linux-arm-msm@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=lumag@kernel.org \
--cc=marijn.suijten@somainline.org \
--cc=quic_abhinavk@quicinc.com \
--cc=robdclark@chromium.org \
--cc=sean@poorly.run \
--cc=simona@ffwll.ch \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox