From: Rob Clark <robdclark@gmail.com>
To: dri-devel@lists.freedesktop.org
Cc: Daniel Vetter <daniel@ffwll.ch>,
freedreno@lists.freedesktop.org, linux-arm-msm@vger.kernel.org,
Rob Clark <robdclark@chromium.org>,
Rob Clark <robdclark@gmail.com>,
Abhinav Kumar <quic_abhinavk@quicinc.com>,
Dmitry Baryshkov <dmitry.baryshkov@linaro.org>,
Sean Paul <sean@poorly.run>, David Airlie <airlied@gmail.com>,
linux-kernel@vger.kernel.org (open list)
Subject: [PATCH v2 03/23] drm/msm/gem: Tidy up VMA API
Date: Mon, 20 Mar 2023 07:43:25 -0700 [thread overview]
Message-ID: <20230320144356.803762-4-robdclark@gmail.com> (raw)
In-Reply-To: <20230320144356.803762-1-robdclark@gmail.com>
From: Rob Clark <robdclark@chromium.org>
Stop open coding VMA construction, which will be needed in the next
commit. And since the VMA already has a ptr to the adress space, stop
passing that around everywhere. (Also, an aspace always has an mmu so
we can drop a couple pointless NULL checks.)
Signed-off-by: Rob Clark <robdclark@chromium.org>
---
drivers/gpu/drm/msm/msm_gem.c | 18 +++++-----
drivers/gpu/drm/msm/msm_gem.h | 18 ++++------
drivers/gpu/drm/msm/msm_gem_submit.c | 2 +-
drivers/gpu/drm/msm/msm_gem_vma.c | 51 ++++++++++++++++++----------
drivers/gpu/drm/msm/msm_ringbuffer.c | 2 +-
5 files changed, 51 insertions(+), 40 deletions(-)
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 1dee0d18abbb..6734aecf0703 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -309,12 +309,10 @@ static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
msm_gem_assert_locked(obj);
- vma = kzalloc(sizeof(*vma), GFP_KERNEL);
+ vma = msm_gem_vma_new(aspace);
if (!vma)
return ERR_PTR(-ENOMEM);
- vma->aspace = aspace;
-
list_add_tail(&vma->list, &msm_obj->vmas);
return vma;
@@ -361,9 +359,9 @@ put_iova_spaces(struct drm_gem_object *obj, bool close)
list_for_each_entry(vma, &msm_obj->vmas, list) {
if (vma->aspace) {
- msm_gem_purge_vma(vma->aspace, vma);
+ msm_gem_vma_purge(vma);
if (close)
- msm_gem_close_vma(vma->aspace, vma);
+ msm_gem_vma_close(vma);
}
}
}
@@ -399,7 +397,7 @@ static struct msm_gem_vma *get_vma_locked(struct drm_gem_object *obj,
if (IS_ERR(vma))
return vma;
- ret = msm_gem_init_vma(aspace, vma, obj->size,
+ ret = msm_gem_vma_init(vma, obj->size,
range_start, range_end);
if (ret) {
del_vma(vma);
@@ -437,7 +435,7 @@ int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
if (IS_ERR(pages))
return PTR_ERR(pages);
- ret = msm_gem_map_vma(vma->aspace, vma, prot, msm_obj->sgt, obj->size);
+ ret = msm_gem_vma_map(vma, prot, msm_obj->sgt, obj->size);
if (ret)
msm_gem_unpin_locked(obj);
@@ -539,8 +537,8 @@ static int clear_iova(struct drm_gem_object *obj,
if (msm_gem_vma_inuse(vma))
return -EBUSY;
- msm_gem_purge_vma(vma->aspace, vma);
- msm_gem_close_vma(vma->aspace, vma);
+ msm_gem_vma_purge(vma);
+ msm_gem_vma_close(vma);
del_vma(vma);
return 0;
@@ -589,7 +587,7 @@ void msm_gem_unpin_iova(struct drm_gem_object *obj,
msm_gem_lock(obj);
vma = lookup_vma(obj, aspace);
if (!GEM_WARN_ON(!vma)) {
- msm_gem_unpin_vma(vma);
+ msm_gem_vma_unpin(vma);
msm_gem_unpin_locked(obj);
}
msm_gem_unlock(obj);
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index c4844cf3a585..d3219c523034 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -69,19 +69,15 @@ struct msm_gem_vma {
struct msm_fence_context *fctx[MSM_GPU_MAX_RINGS];
};
-int msm_gem_init_vma(struct msm_gem_address_space *aspace,
- struct msm_gem_vma *vma, int size,
+struct msm_gem_vma *msm_gem_vma_new(struct msm_gem_address_space *aspace);
+int msm_gem_vma_init(struct msm_gem_vma *vma, int size,
u64 range_start, u64 range_end);
bool msm_gem_vma_inuse(struct msm_gem_vma *vma);
-void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
- struct msm_gem_vma *vma);
-void msm_gem_unpin_vma(struct msm_gem_vma *vma);
-void msm_gem_unpin_vma_fenced(struct msm_gem_vma *vma, struct msm_fence_context *fctx);
-int msm_gem_map_vma(struct msm_gem_address_space *aspace,
- struct msm_gem_vma *vma, int prot,
- struct sg_table *sgt, int size);
-void msm_gem_close_vma(struct msm_gem_address_space *aspace,
- struct msm_gem_vma *vma);
+void msm_gem_vma_purge(struct msm_gem_vma *vma);
+void msm_gem_vma_unpin(struct msm_gem_vma *vma);
+void msm_gem_vma_unpin_fenced(struct msm_gem_vma *vma, struct msm_fence_context *fctx);
+int msm_gem_vma_map(struct msm_gem_vma *vma, int prot, struct sg_table *sgt, int size);
+void msm_gem_vma_close(struct msm_gem_vma *vma);
struct msm_gem_object {
struct drm_gem_object base;
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 2570c018b0cb..1d8e7c2a8024 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -249,7 +249,7 @@ static void submit_cleanup_bo(struct msm_gem_submit *submit, int i,
submit->bos[i].flags &= ~cleanup_flags;
if (flags & BO_VMA_PINNED)
- msm_gem_unpin_vma(submit->bos[i].vma);
+ msm_gem_vma_unpin(submit->bos[i].vma);
if (flags & BO_OBJ_PINNED)
msm_gem_unpin_locked(obj);
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index c471aebcdbab..2827679dc39a 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -56,9 +56,9 @@ bool msm_gem_vma_inuse(struct msm_gem_vma *vma)
}
/* Actually unmap memory for the vma */
-void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
- struct msm_gem_vma *vma)
+void msm_gem_vma_purge(struct msm_gem_vma *vma)
{
+ struct msm_gem_address_space *aspace = vma->aspace;
unsigned size = vma->node.size;
/* Print a message if we try to purge a vma in use */
@@ -68,14 +68,13 @@ void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
if (!vma->mapped)
return;
- if (aspace->mmu)
- aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, size);
+ aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, size);
vma->mapped = false;
}
/* Remove reference counts for the mapping */
-void msm_gem_unpin_vma(struct msm_gem_vma *vma)
+void msm_gem_vma_unpin(struct msm_gem_vma *vma)
{
if (GEM_WARN_ON(!vma->inuse))
return;
@@ -84,21 +83,21 @@ void msm_gem_unpin_vma(struct msm_gem_vma *vma)
}
/* Replace pin reference with fence: */
-void msm_gem_unpin_vma_fenced(struct msm_gem_vma *vma, struct msm_fence_context *fctx)
+void msm_gem_vma_unpin_fenced(struct msm_gem_vma *vma, struct msm_fence_context *fctx)
{
vma->fctx[fctx->index] = fctx;
vma->fence[fctx->index] = fctx->last_fence;
vma->fence_mask |= BIT(fctx->index);
- msm_gem_unpin_vma(vma);
+ msm_gem_vma_unpin(vma);
}
/* Map and pin vma: */
int
-msm_gem_map_vma(struct msm_gem_address_space *aspace,
- struct msm_gem_vma *vma, int prot,
+msm_gem_vma_map(struct msm_gem_vma *vma, int prot,
struct sg_table *sgt, int size)
{
- int ret = 0;
+ struct msm_gem_address_space *aspace = vma->aspace;
+ int ret;
if (GEM_WARN_ON(!vma->iova))
return -EINVAL;
@@ -111,9 +110,10 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace,
vma->mapped = true;
- if (aspace && aspace->mmu)
- ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
- size, prot);
+ if (!aspace)
+ return 0;
+
+ ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt, size, prot);
if (ret) {
vma->mapped = false;
@@ -124,9 +124,10 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace,
}
/* Close an iova. Warn if it is still in use */
-void msm_gem_close_vma(struct msm_gem_address_space *aspace,
- struct msm_gem_vma *vma)
+void msm_gem_vma_close(struct msm_gem_vma *vma)
{
+ struct msm_gem_address_space *aspace = vma->aspace;
+
GEM_WARN_ON(msm_gem_vma_inuse(vma) || vma->mapped);
spin_lock(&aspace->lock);
@@ -139,13 +140,29 @@ void msm_gem_close_vma(struct msm_gem_address_space *aspace,
msm_gem_address_space_put(aspace);
}
+struct msm_gem_vma *msm_gem_vma_new(struct msm_gem_address_space *aspace)
+{
+ struct msm_gem_vma *vma;
+
+ vma = kzalloc(sizeof(*vma), GFP_KERNEL);
+ if (!vma)
+ return NULL;
+
+ vma->aspace = aspace;
+
+ return vma;
+}
+
/* Initialize a new vma and allocate an iova for it */
-int msm_gem_init_vma(struct msm_gem_address_space *aspace,
- struct msm_gem_vma *vma, int size,
+int msm_gem_vma_init(struct msm_gem_vma *vma, int size,
u64 range_start, u64 range_end)
{
+ struct msm_gem_address_space *aspace = vma->aspace;
int ret;
+ if (GEM_WARN_ON(!aspace))
+ return -EINVAL;
+
if (GEM_WARN_ON(vma->iova))
return -EBUSY;
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index a80447c8764e..44a22b283730 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -24,7 +24,7 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job)
struct drm_gem_object *obj = &submit->bos[i].obj->base;
msm_gem_lock(obj);
- msm_gem_unpin_vma_fenced(submit->bos[i].vma, fctx);
+ msm_gem_vma_unpin_fenced(submit->bos[i].vma, fctx);
msm_gem_unpin_locked(obj);
msm_gem_unlock(obj);
submit->bos[i].flags &= ~(BO_VMA_PINNED | BO_OBJ_PINNED);
--
2.39.2
next prev parent reply other threads:[~2023-03-20 14:45 UTC|newest]
Thread overview: 33+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-03-20 14:43 [PATCH v2 00/23] drm/msm+PM+icc: Make job_run() reclaim-safe Rob Clark
2023-03-20 14:43 ` [PATCH v2 01/23] drm/msm: Pre-allocate hw_fence Rob Clark
2023-03-20 16:52 ` Christian König
2023-03-20 20:32 ` Rob Clark
2023-03-20 14:43 ` [PATCH v2 02/23] drm/msm: Move submit bo flags update from obj lock Rob Clark
2023-03-20 14:43 ` Rob Clark [this message]
2023-03-20 14:43 ` [PATCH v2 04/23] drm/msm: Decouple vma tracking " Rob Clark
2023-03-20 14:43 ` [PATCH v2 05/23] drm/msm/gem: Simplify vmap vs LRU tracking Rob Clark
2023-03-20 14:43 ` [PATCH v2 06/23] drm/gem: Export drm_gem_lru_move_tail_locked() Rob Clark
2023-03-20 14:43 ` [PATCH v2 07/23] drm/msm/gem: Move update_lru() Rob Clark
2023-03-20 14:43 ` [PATCH v2 08/23] drm/msm/gem: Protect pin_count/madv by LRU lock Rob Clark
2023-03-20 14:43 ` [PATCH v2 09/23] drm/msm/gem: Avoid obj lock in job_run() Rob Clark
2023-03-20 14:43 ` [PATCH v2 10/23] drm/msm: Switch idr_lock to spinlock Rob Clark
2023-03-20 14:43 ` [PATCH v2 11/23] drm/msm: Use idr_preload() Rob Clark
2023-03-20 14:43 ` [PATCH v2 12/23] drm/msm/gpu: Move fw loading out of hw_init() path Rob Clark
2023-03-20 14:43 ` [PATCH v2 13/23] drm/msm/gpu: Move BO allocation out of hw_init Rob Clark
2023-03-20 14:43 ` [PATCH v2 14/23] drm/msm/a6xx: Move ioremap out of hw_init path Rob Clark
2023-03-20 14:43 ` [PATCH v2 15/23] PM / devfreq: Drop unneed locking to appease lockdep Rob Clark
2023-03-20 14:43 ` [PATCH v2 16/23] PM / devfreq: Teach lockdep about locking order Rob Clark
2023-03-20 14:43 ` [PATCH v2 17/23] PM / QoS: Fix constraints alloc vs reclaim locking Rob Clark
2023-03-27 17:53 ` Rafael J. Wysocki
2023-03-27 19:52 ` Rob Clark
2023-03-20 14:43 ` [PATCH v2 18/23] PM / QoS: Decouple request alloc from dev_pm_qos_mtx Rob Clark
2023-03-20 20:13 ` kernel test robot
2023-03-20 20:34 ` kernel test robot
2023-03-21 4:53 ` Dan Carpenter
2023-03-20 14:43 ` [PATCH v2 19/23] PM / QoS: Teach lockdep about dev_pm_qos_mtx locking order Rob Clark
2023-03-20 14:43 ` [PATCH v2 20/23] soc: qcom: smd-rpm: Use GFP_ATOMIC in write path Rob Clark
2023-03-20 14:43 ` [PATCH v2 21/23] interconnect: Fix locking for runpm vs reclaim Rob Clark
2023-03-20 14:43 ` [PATCH v2 22/23] interconnect: Teach lockdep about icc_bw_lock order Rob Clark
2023-03-20 14:43 ` [PATCH v2 23/23] drm/sched: Add (optional) fence signaling annotation Rob Clark
2023-03-21 2:55 ` Luben Tuikov
2023-04-07 17:41 ` (subset) [PATCH v2 00/23] drm/msm+PM+icc: Make job_run() reclaim-safe Bjorn Andersson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230320144356.803762-4-robdclark@gmail.com \
--to=robdclark@gmail.com \
--cc=airlied@gmail.com \
--cc=daniel@ffwll.ch \
--cc=dmitry.baryshkov@linaro.org \
--cc=dri-devel@lists.freedesktop.org \
--cc=freedreno@lists.freedesktop.org \
--cc=linux-arm-msm@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=quic_abhinavk@quicinc.com \
--cc=robdclark@chromium.org \
--cc=sean@poorly.run \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox