From: Rob Clark <robdclark@gmail.com>
To: dri-devel@lists.freedesktop.org
Cc: freedreno@lists.freedesktop.org, linux-arm-msm@vger.kernel.org,
Connor Abbott <cwabbott0@gmail.com>,
Rob Clark <robdclark@chromium.org>,
Rob Clark <robdclark@gmail.com>, Sean Paul <sean@poorly.run>,
Konrad Dybcio <konradybcio@kernel.org>,
Abhinav Kumar <quic_abhinavk@quicinc.com>,
Dmitry Baryshkov <lumag@kernel.org>,
Marijn Suijten <marijn.suijten@somainline.org>,
David Airlie <airlied@gmail.com>, Simona Vetter <simona@ffwll.ch>,
linux-kernel@vger.kernel.org (open list)
Subject: [PATCH v3 15/33] drm/msm: Add mmu support for non-zero offset
Date: Mon, 28 Apr 2025 13:54:22 -0700 [thread overview]
Message-ID: <20250428205619.227835-16-robdclark@gmail.com> (raw)
In-Reply-To: <20250428205619.227835-1-robdclark@gmail.com>
From: Rob Clark <robdclark@chromium.org>
Only needs to be supported for iopgtables mmu, the other cases are
either only used for kernel managed mappings (where offset is always
zero) or devices which do not support sparse bindings.
Signed-off-by: Rob Clark <robdclark@chromium.org>
---
drivers/gpu/drm/msm/adreno/a2xx_gpummu.c | 5 ++++-
drivers/gpu/drm/msm/msm_gem.c | 4 ++--
drivers/gpu/drm/msm/msm_gem.h | 4 ++--
drivers/gpu/drm/msm/msm_gem_vma.c | 13 +++++++------
drivers/gpu/drm/msm/msm_iommu.c | 22 ++++++++++++++++++++--
drivers/gpu/drm/msm/msm_mmu.h | 2 +-
6 files changed, 36 insertions(+), 14 deletions(-)
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpummu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpummu.c
index 39641551eeb6..6124336af2ec 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx_gpummu.c
+++ b/drivers/gpu/drm/msm/adreno/a2xx_gpummu.c
@@ -29,13 +29,16 @@ static void a2xx_gpummu_detach(struct msm_mmu *mmu)
}
static int a2xx_gpummu_map(struct msm_mmu *mmu, uint64_t iova,
- struct sg_table *sgt, size_t len, int prot)
+ struct sg_table *sgt, size_t off, size_t len,
+ int prot)
{
struct a2xx_gpummu *gpummu = to_a2xx_gpummu(mmu);
unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE;
struct sg_dma_page_iter dma_iter;
unsigned prot_bits = 0;
+ WARN_ON(off != 0);
+
if (prot & IOMMU_WRITE)
prot_bits |= 1;
if (prot & IOMMU_READ)
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index ecafc6b4a6b4..9cca5997f45c 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -435,7 +435,7 @@ static struct drm_gpuva *get_vma_locked(struct drm_gem_object *obj,
vma = lookup_vma(obj, vm);
if (!vma) {
- vma = msm_gem_vma_new(vm, obj, range_start, range_end);
+ vma = msm_gem_vma_new(vm, obj, 0, range_start, range_end);
} else {
GEM_WARN_ON(vma->va.addr < range_start);
GEM_WARN_ON((vma->va.addr + obj->size) > range_end);
@@ -477,7 +477,7 @@ int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct drm_gpuva *vma)
if (IS_ERR(pages))
return PTR_ERR(pages);
- return msm_gem_vma_map(vma, prot, msm_obj->sgt, obj->size);
+ return msm_gem_vma_map(vma, prot, msm_obj->sgt);
}
void msm_gem_unpin_locked(struct drm_gem_object *obj)
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 3a853fcb8944..0d755b9d5f26 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -110,9 +110,9 @@ struct msm_gem_vma {
struct drm_gpuva *
msm_gem_vma_new(struct drm_gpuvm *vm, struct drm_gem_object *obj,
- u64 range_start, u64 range_end);
+ u64 offset, u64 range_start, u64 range_end);
void msm_gem_vma_purge(struct drm_gpuva *vma);
-int msm_gem_vma_map(struct drm_gpuva *vma, int prot, struct sg_table *sgt, int size);
+int msm_gem_vma_map(struct drm_gpuva *vma, int prot, struct sg_table *sgt);
void msm_gem_vma_close(struct drm_gpuva *vma);
struct msm_gem_object {
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index 3acff002af39..bbb2ae1cdf45 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -38,8 +38,7 @@ void msm_gem_vma_purge(struct drm_gpuva *vma)
/* Map and pin vma: */
int
-msm_gem_vma_map(struct drm_gpuva *vma, int prot,
- struct sg_table *sgt, int size)
+msm_gem_vma_map(struct drm_gpuva *vma, int prot, struct sg_table *sgt)
{
struct msm_gem_vma *msm_vma = to_msm_vma(vma);
struct msm_gem_vm *vm = to_msm_vm(vma->vm);
@@ -62,8 +61,9 @@ msm_gem_vma_map(struct drm_gpuva *vma, int prot,
* Revisit this if we can come up with a scheme to pre-alloc pages
* for the pgtable in map/unmap ops.
*/
- ret = vm->mmu->funcs->map(vm->mmu, vma->va.addr, sgt, size, prot);
-
+ ret = vm->mmu->funcs->map(vm->mmu, vma->va.addr, sgt,
+ vma->gem.offset, vma->va.range,
+ prot);
if (ret) {
msm_vma->mapped = false;
}
@@ -93,7 +93,7 @@ void msm_gem_vma_close(struct drm_gpuva *vma)
/* Create a new vma and allocate an iova for it */
struct drm_gpuva *
msm_gem_vma_new(struct drm_gpuvm *_vm, struct drm_gem_object *obj,
- u64 range_start, u64 range_end)
+ u64 offset, u64 range_start, u64 range_end)
{
struct msm_gem_vm *vm = to_msm_vm(_vm);
struct drm_gpuvm_bo *vm_bo;
@@ -107,6 +107,7 @@ msm_gem_vma_new(struct drm_gpuvm *_vm, struct drm_gem_object *obj,
return ERR_PTR(-ENOMEM);
if (vm->managed) {
+ BUG_ON(offset != 0);
ret = drm_mm_insert_node_in_range(&vm->mm, &vma->node,
obj->size, PAGE_SIZE, 0,
range_start, range_end, 0);
@@ -120,7 +121,7 @@ msm_gem_vma_new(struct drm_gpuvm *_vm, struct drm_gem_object *obj,
GEM_WARN_ON((range_end - range_start) > obj->size);
- drm_gpuva_init(&vma->base, range_start, range_end - range_start, obj, 0);
+ drm_gpuva_init(&vma->base, range_start, range_end - range_start, obj, offset);
vma->mapped = false;
ret = drm_gpuva_insert(&vm->base, &vma->base);
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index e70088a91283..2fd48e66bc98 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -113,7 +113,8 @@ static int msm_iommu_pagetable_unmap(struct msm_mmu *mmu, u64 iova,
}
static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,
- struct sg_table *sgt, size_t len, int prot)
+ struct sg_table *sgt, size_t off, size_t len,
+ int prot)
{
struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
@@ -125,6 +126,19 @@ static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,
size_t size = sg->length;
phys_addr_t phys = sg_phys(sg);
+ if (!len)
+ break;
+
+ if (size <= off) {
+ off -= size;
+ continue;
+ }
+
+ phys += off;
+ size -= off;
+ size = min_t(size_t, size, len);
+ off = 0;
+
while (size) {
size_t pgsize, count, mapped = 0;
int ret;
@@ -140,6 +154,7 @@ static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,
phys += mapped;
addr += mapped;
size -= mapped;
+ len -= mapped;
if (ret) {
msm_iommu_pagetable_unmap(mmu, iova, addr - iova);
@@ -400,11 +415,14 @@ static void msm_iommu_detach(struct msm_mmu *mmu)
}
static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
- struct sg_table *sgt, size_t len, int prot)
+ struct sg_table *sgt, size_t off, size_t len,
+ int prot)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
size_t ret;
+ WARN_ON(off != 0);
+
/* The arm-smmu driver expects the addresses to be sign extended */
if (iova & BIT_ULL(48))
iova |= GENMASK_ULL(63, 49);
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index c33247e459d6..c874852b7331 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -12,7 +12,7 @@
struct msm_mmu_funcs {
void (*detach)(struct msm_mmu *mmu);
int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
- size_t len, int prot);
+ size_t off, size_t len, int prot);
int (*unmap)(struct msm_mmu *mmu, uint64_t iova, size_t len);
void (*destroy)(struct msm_mmu *mmu);
void (*resume_translation)(struct msm_mmu *mmu);
--
2.49.0
next prev parent reply other threads:[~2025-04-28 20:57 UTC|newest]
Thread overview: 39+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-04-28 20:54 [PATCH v3 00/33] drm/msm: sparse / "VM_BIND" support Rob Clark
2025-04-28 20:54 ` [PATCH v3 01/33] drm/gpuvm: Don't require obj lock in destructor path Rob Clark
2025-04-28 20:54 ` [PATCH v3 02/33] drm/gpuvm: Allow VAs to hold soft reference to BOs Rob Clark
2025-04-28 20:54 ` [PATCH v3 03/33] iommu/io-pgtable-arm: Add quirk to quiet WARN_ON() Rob Clark
2025-04-29 12:28 ` Jason Gunthorpe
2025-04-29 13:58 ` Rob Clark
2025-04-29 14:05 ` Jason Gunthorpe
2025-04-29 12:38 ` Robin Murphy
2025-04-29 13:59 ` Rob Clark
2025-04-28 20:54 ` [PATCH v3 04/33] drm/msm: Rename msm_file_private -> msm_context Rob Clark
2025-04-28 20:54 ` [PATCH v3 05/33] drm/msm: Improve msm_context comments Rob Clark
2025-04-28 20:54 ` [PATCH v3 06/33] drm/msm: Rename msm_gem_address_space -> msm_gem_vm Rob Clark
2025-04-28 20:54 ` [PATCH v3 07/33] drm/msm: Remove vram carveout support Rob Clark
2025-04-28 20:54 ` [PATCH v3 08/33] drm/msm: Collapse vma allocation and initialization Rob Clark
2025-04-28 20:54 ` [PATCH v3 09/33] drm/msm: Collapse vma close and delete Rob Clark
2025-04-28 20:54 ` [PATCH v3 10/33] drm/msm: Don't close VMAs on purge Rob Clark
2025-04-28 20:54 ` [PATCH v3 11/33] drm/msm: drm_gpuvm conversion Rob Clark
2025-04-28 20:54 ` [PATCH v3 12/33] drm/msm: Convert vm locking Rob Clark
2025-04-28 20:54 ` [PATCH v3 13/33] drm/msm: Use drm_gpuvm types more Rob Clark
2025-04-28 20:54 ` [PATCH v3 14/33] drm/msm: Split out helper to get iommu prot flags Rob Clark
2025-04-28 20:54 ` Rob Clark [this message]
2025-04-28 20:54 ` [PATCH v3 16/33] drm/msm: Add PRR support Rob Clark
2025-04-28 20:54 ` [PATCH v3 17/33] drm/msm: Rename msm_gem_vma_purge() -> _unmap() Rob Clark
2025-04-28 20:54 ` [PATCH v3 18/33] drm/msm: Lazily create context VM Rob Clark
2025-04-28 20:54 ` [PATCH v3 19/33] drm/msm: Add opt-in for VM_BIND Rob Clark
2025-04-28 20:54 ` [PATCH v3 20/33] drm/msm: Mark VM as unusable on GPU hangs Rob Clark
2025-04-28 20:54 ` [PATCH v3 21/33] drm/msm: Add _NO_SHARE flag Rob Clark
2025-04-28 20:54 ` [PATCH v3 22/33] drm/msm: Crashdump prep for sparse mappings Rob Clark
2025-04-28 20:54 ` [PATCH v3 23/33] drm/msm: rd dumping " Rob Clark
2025-04-28 20:54 ` [PATCH v3 24/33] drm/msm: Crashdec support for sparse Rob Clark
2025-04-28 20:54 ` [PATCH v3 25/33] drm/msm: rd dumping " Rob Clark
2025-04-28 20:54 ` [PATCH v3 26/33] drm/msm: Extract out syncobj helpers Rob Clark
2025-04-28 20:54 ` [PATCH v3 27/33] drm/msm: Use DMA_RESV_USAGE_BOOKKEEP/KERNEL Rob Clark
2025-04-28 20:54 ` [PATCH v3 28/33] drm/msm: Add VM_BIND submitqueue Rob Clark
2025-04-28 20:54 ` [PATCH v3 29/33] drm/msm: Support IO_PGTABLE_QUIRK_NO_WARN_ON Rob Clark
2025-04-28 20:54 ` [PATCH v3 30/33] drm/msm: Support pgtable preallocation Rob Clark
2025-04-28 20:54 ` [PATCH v3 31/33] drm/msm: Split out map/unmap ops Rob Clark
2025-04-28 20:54 ` [PATCH v3 32/33] drm/msm: Add VM_BIND ioctl Rob Clark
2025-04-28 20:54 ` [PATCH v3 33/33] drm/msm: Bump UAPI version Rob Clark
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250428205619.227835-16-robdclark@gmail.com \
--to=robdclark@gmail.com \
--cc=airlied@gmail.com \
--cc=cwabbott0@gmail.com \
--cc=dri-devel@lists.freedesktop.org \
--cc=freedreno@lists.freedesktop.org \
--cc=konradybcio@kernel.org \
--cc=linux-arm-msm@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=lumag@kernel.org \
--cc=marijn.suijten@somainline.org \
--cc=quic_abhinavk@quicinc.com \
--cc=robdclark@chromium.org \
--cc=sean@poorly.run \
--cc=simona@ffwll.ch \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox