From: John Stultz <john.stultz@linaro.org>
To: lkml <linux-kernel@vger.kernel.org>
Cc: dri-devel@lists.freedesktop.org,
"Sandeep Patil" <sspatil@google.com>,
"Chris Goldsworthy" <cgoldswo@codeaurora.org>,
"Ezequiel Garcia" <ezequiel@collabora.com>,
"Suren Baghdasaryan" <surenb@google.com>,
"James Jones" <jajones@nvidia.com>,
"Robin Murphy" <robin.murphy@arm.com>,
"Liam Mark" <lmark@codeaurora.org>,
"Bing Song" <bing.song@nxp.com>,
"Laura Abbott" <labbott@kernel.org>,
"Hridya Valsaraju" <hridya@google.com>,
"Ørjan Eide" <orjan.eide@arm.com>,
linux-media@vger.kernel.org,
"Daniel Mentz" <danielmentz@google.com>
Subject: [PATCH 3/3] dma-buf: cma_heap: Add a cma-uncached heap re-using the cma heap
Date: Wed, 20 Jan 2021 21:09:37 +0000 [thread overview]
Message-ID: <20210120210937.15069-4-john.stultz@linaro.org> (raw)
In-Reply-To: <20210120210937.15069-1-john.stultz@linaro.org>
From: Bing Song <bing.song@nxp.com>
This adds a heap that allocates CMA buffers that are
marked as writecombined, so they are not cached by the CPU.
Cc: Daniel Vetter <daniel@ffwll.ch>
Cc: Sumit Semwal <sumit.semwal@linaro.org>
Cc: Liam Mark <lmark@codeaurora.org>
Cc: Laura Abbott <labbott@kernel.org>
Cc: Brian Starkey <Brian.Starkey@arm.com>
Cc: Hridya Valsaraju <hridya@google.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Sandeep Patil <sspatil@google.com>
Cc: Daniel Mentz <danielmentz@google.com>
Cc: Chris Goldsworthy <cgoldswo@codeaurora.org>
Cc: Ørjan Eide <orjan.eide@arm.com>
Cc: Robin Murphy <robin.murphy@arm.com>
Cc: Ezequiel Garcia <ezequiel@collabora.com>
Cc: Simon Ser <contact@emersion.fr>
Cc: James Jones <jajones@nvidia.com>
Cc: linux-media@vger.kernel.org
Cc: dri-devel@lists.freedesktop.org
Signed-off-by: Bing Song <bing.song@nxp.com>
Signed-off-by: John Stultz <john.stultz@linaro.org>
---
drivers/dma-buf/heaps/cma_heap.c | 119 +++++++++++++++++++++++++++----
1 file changed, 107 insertions(+), 12 deletions(-)
diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c
index 364fc2f3e499..1b8c6eb0a8ea 100644
--- a/drivers/dma-buf/heaps/cma_heap.c
+++ b/drivers/dma-buf/heaps/cma_heap.c
@@ -38,6 +38,7 @@ struct cma_heap_buffer {
pgoff_t pagecount;
int vmap_cnt;
void *vaddr;
+ bool uncached;
};
struct dma_heap_attachment {
@@ -45,6 +46,7 @@ struct dma_heap_attachment {
struct sg_table table;
struct list_head list;
bool mapped;
+ bool uncached;
};
static int cma_heap_attach(struct dma_buf *dmabuf,
@@ -70,6 +72,7 @@ static int cma_heap_attach(struct dma_buf *dmabuf,
a->dev = attachment->dev;
INIT_LIST_HEAD(&a->list);
a->mapped = false;
+ a->uncached = buffer->uncached;
attachment->priv = a;
@@ -99,8 +102,12 @@ static struct sg_table *cma_heap_map_dma_buf(struct dma_buf_attachment *attachme
{
struct dma_heap_attachment *a = attachment->priv;
struct sg_table *table = &a->table;
+ int attr = 0;
int ret;
+ if (a->uncached)
+ attr = DMA_ATTR_SKIP_CPU_SYNC;
+
ret = dma_map_sgtable(attachment->dev, table, direction, 0);
if (ret)
return ERR_PTR(-ENOMEM);
@@ -113,7 +120,10 @@ static void cma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
enum dma_data_direction direction)
{
struct dma_heap_attachment *a = attachment->priv;
+ int attr = 0;
+ if (a->uncached)
+ attr = DMA_ATTR_SKIP_CPU_SYNC;
a->mapped = false;
dma_unmap_sgtable(attachment->dev, table, direction, 0);
}
@@ -128,10 +138,12 @@ static int cma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
mutex_lock(&buffer->lock);
- list_for_each_entry(a, &buffer->attachments, list) {
- if (!a->mapped)
- continue;
- dma_sync_sgtable_for_cpu(a->dev, &a->table, direction);
+ if (!buffer->uncached) {
+ list_for_each_entry(a, &buffer->attachments, list) {
+ if (!a->mapped)
+ continue;
+ dma_sync_sgtable_for_cpu(a->dev, &a->table, direction);
+ }
}
mutex_unlock(&buffer->lock);
@@ -148,10 +160,12 @@ static int cma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
flush_kernel_vmap_range(buffer->vaddr, buffer->len);
mutex_lock(&buffer->lock);
- list_for_each_entry(a, &buffer->attachments, list) {
- if (!a->mapped)
- continue;
- dma_sync_sgtable_for_device(a->dev, &a->table, direction);
+ if (!buffer->uncached) {
+ list_for_each_entry(a, &buffer->attachments, list) {
+ if (!a->mapped)
+ continue;
+ dma_sync_sgtable_for_device(a->dev, &a->table, direction);
+ }
}
mutex_unlock(&buffer->lock);
@@ -183,6 +197,9 @@ static int cma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
return -EINVAL;
+ if (buffer->uncached)
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
vma->vm_ops = &dma_heap_vm_ops;
vma->vm_private_data = buffer;
@@ -191,9 +208,13 @@ static int cma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
static void *cma_heap_do_vmap(struct cma_heap_buffer *buffer)
{
+ pgprot_t pgprot = PAGE_KERNEL;
void *vaddr;
- vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, PAGE_KERNEL);
+ if (buffer->uncached)
+ pgprot = pgprot_writecombine(PAGE_KERNEL);
+
+ vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, pgprot);
if (!vaddr)
return ERR_PTR(-ENOMEM);
@@ -271,10 +292,11 @@ static const struct dma_buf_ops cma_heap_buf_ops = {
.release = cma_heap_dma_buf_release,
};
-static int cma_heap_allocate(struct dma_heap *heap,
+static int cma_heap_do_allocate(struct dma_heap *heap,
unsigned long len,
unsigned long fd_flags,
- unsigned long heap_flags)
+ unsigned long heap_flags,
+ bool uncached)
{
struct cma_heap *cma_heap = dma_heap_get_drvdata(heap);
struct cma_heap_buffer *buffer;
@@ -283,8 +305,9 @@ static int cma_heap_allocate(struct dma_heap *heap,
pgoff_t pagecount = size >> PAGE_SHIFT;
unsigned long align = get_order(size);
struct page *cma_pages;
+ struct sg_table table;
struct dma_buf *dmabuf;
- int ret = -ENOMEM;
+ int ret = -ENOMEM, ret_sg_table;
pgoff_t pg;
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
@@ -294,6 +317,7 @@ static int cma_heap_allocate(struct dma_heap *heap,
INIT_LIST_HEAD(&buffer->attachments);
mutex_init(&buffer->lock);
buffer->len = size;
+ buffer->uncached = uncached;
if (align > CONFIG_CMA_ALIGNMENT)
align = CONFIG_CMA_ALIGNMENT;
@@ -356,6 +380,18 @@ static int cma_heap_allocate(struct dma_heap *heap,
return ret;
}
+ if (buffer->uncached) {
+ ret_sg_table = sg_alloc_table(&table, 1, GFP_KERNEL);
+ if (ret_sg_table)
+ return ret_sg_table;
+
+ sg_set_page(table.sgl, cma_pages, size, 0);
+
+ dma_map_sgtable(dma_heap_get_dev(heap), &table, DMA_BIDIRECTIONAL, 0);
+ dma_unmap_sgtable(dma_heap_get_dev(heap), &table, DMA_BIDIRECTIONAL, 0);
+ sg_free_table(&table);
+ }
+
return ret;
free_pages:
@@ -368,14 +404,45 @@ static int cma_heap_allocate(struct dma_heap *heap,
return ret;
}
+static int cma_heap_allocate(struct dma_heap *heap,
+ unsigned long len,
+ unsigned long fd_flags,
+ unsigned long heap_flags)
+{
+ return cma_heap_do_allocate(heap, len, fd_flags, heap_flags, false);
+}
+
+static int cma_uncached_heap_allocate(struct dma_heap *heap,
+ unsigned long len,
+ unsigned long fd_flags,
+ unsigned long heap_flags)
+{
+ return cma_heap_do_allocate(heap, len, fd_flags, heap_flags, true);
+}
+
+/* Dummy function to be used until we can call coerce_mask_and_coherent */
+static int cma_uncached_heap_not_initialized(struct dma_heap *heap,
+ unsigned long len,
+ unsigned long fd_flags,
+ unsigned long heap_flags)
+{
+ return -EBUSY;
+}
+
static const struct dma_heap_ops cma_heap_ops = {
.allocate = cma_heap_allocate,
};
+static struct dma_heap_ops cma_uncached_heap_ops = {
+ .allocate = cma_uncached_heap_not_initialized,
+};
+
static int __add_cma_heap(struct cma *cma, void *data)
{
struct cma_heap *cma_heap;
struct dma_heap_export_info exp_info;
+ const char *postfixed = "-uncached";
+ char *cma_name;
cma_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL);
if (!cma_heap)
@@ -394,6 +461,34 @@ static int __add_cma_heap(struct cma *cma, void *data)
return ret;
}
+ cma_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL);
+ if (!cma_heap)
+ return -ENOMEM;
+ cma_heap->cma = cma;
+
+ cma_name = kzalloc(strlen(cma_get_name(cma)) + strlen(postfixed) + 1, GFP_KERNEL);
+ if (!cma_name) {
+ kfree(cma_heap);
+ return -ENOMEM;
+ }
+
+ exp_info.name = strcat(strcpy(cma_name, cma_get_name(cma)), postfixed);
+ exp_info.ops = &cma_uncached_heap_ops;
+ exp_info.priv = cma_heap;
+
+ cma_heap->heap = dma_heap_add(&exp_info);
+ if (IS_ERR(cma_heap->heap)) {
+ int ret = PTR_ERR(cma_heap->heap);
+
+ kfree(cma_heap);
+ kfree(cma_name);
+ return ret;
+ }
+
+ dma_coerce_mask_and_coherent(dma_heap_get_dev(cma_heap->heap), DMA_BIT_MASK(64));
+ mb(); /* make sure we only set allocate after dma_mask is set */
+ cma_uncached_heap_ops.allocate = cma_uncached_heap_allocate;
+
return 0;
}
--
2.17.1
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel
prev parent reply other threads:[~2021-01-20 21:09 UTC|newest]
Thread overview: 4+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-01-20 21:09 [RFC][PATCH 0/3] dmabuf heaps: system uncached and cma uncached heaps John Stultz
2021-01-20 21:09 ` [PATCH 1/3] dma-buf: dma-heap: Keep track of the heap device struct John Stultz
2021-01-20 21:09 ` [PATCH 2/3] dma-buf: system_heap: Add a system-uncached heap re-using the system heap John Stultz
2021-01-20 21:09 ` John Stultz [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210120210937.15069-4-john.stultz@linaro.org \
--to=john.stultz@linaro.org \
--cc=bing.song@nxp.com \
--cc=cgoldswo@codeaurora.org \
--cc=danielmentz@google.com \
--cc=dri-devel@lists.freedesktop.org \
--cc=ezequiel@collabora.com \
--cc=hridya@google.com \
--cc=jajones@nvidia.com \
--cc=labbott@kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-media@vger.kernel.org \
--cc=lmark@codeaurora.org \
--cc=orjan.eide@arm.com \
--cc=robin.murphy@arm.com \
--cc=sspatil@google.com \
--cc=surenb@google.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).