From: Yong Zhi <yong.zhi@intel.com>
To: linux-media@vger.kernel.org, sakari.ailus@linux.intel.com
Cc: jian.xu.zheng@intel.com, tfiga@chromium.org,
rajmohan.mani@intel.com, tuukka.toivonen@intel.com,
Yong Zhi <yong.zhi@intel.com>
Subject: [PATCH 03/12] intel-ipu3: Add DMA API implementation
Date: Mon, 5 Jun 2017 15:39:08 -0500 [thread overview]
Message-ID: <1496695157-19926-4-git-send-email-yong.zhi@intel.com> (raw)
In-Reply-To: <1496695157-19926-1-git-send-email-yong.zhi@intel.com>
IPU3 mmu based DMA mapping driver
Signed-off-by: Yong Zhi <yong.zhi@intel.com>
---
drivers/media/pci/intel/ipu3/Kconfig | 6 +
drivers/media/pci/intel/ipu3/Makefile | 1 +
drivers/media/pci/intel/ipu3/ipu3-dmamap.c | 408 +++++++++++++++++++++++++++++
drivers/media/pci/intel/ipu3/ipu3-dmamap.h | 20 ++
4 files changed, 435 insertions(+)
create mode 100644 drivers/media/pci/intel/ipu3/ipu3-dmamap.c
create mode 100644 drivers/media/pci/intel/ipu3/ipu3-dmamap.h
diff --git a/drivers/media/pci/intel/ipu3/Kconfig b/drivers/media/pci/intel/ipu3/Kconfig
index ab2edcb..2030be7 100644
--- a/drivers/media/pci/intel/ipu3/Kconfig
+++ b/drivers/media/pci/intel/ipu3/Kconfig
@@ -26,3 +26,9 @@ config INTEL_IPU3_MMU
Say Y here if you have Skylake/Kaby Lake SoC with IPU3.
Say N if un-sure.
+
+config INTEL_IPU3_DMAMAP
+ bool "Intel ipu3 DMA mapping driver"
+ select IOMMU_IOVA
+ ---help---
+ This is IPU3 IOMMU domain specific DMA driver.
diff --git a/drivers/media/pci/intel/ipu3/Makefile b/drivers/media/pci/intel/ipu3/Makefile
index 2b669df..2c2a035 100644
--- a/drivers/media/pci/intel/ipu3/Makefile
+++ b/drivers/media/pci/intel/ipu3/Makefile
@@ -1,2 +1,3 @@
obj-$(CONFIG_VIDEO_IPU3_CIO2) += ipu3-cio2.o
obj-$(CONFIG_INTEL_IPU3_MMU) += ipu3-mmu.o
+obj-$(CONFIG_INTEL_IPU3_DMAMAP) += ipu3-dmamap.o
diff --git a/drivers/media/pci/intel/ipu3/ipu3-dmamap.c b/drivers/media/pci/intel/ipu3/ipu3-dmamap.c
new file mode 100644
index 0000000..74704d9
--- /dev/null
+++ b/drivers/media/pci/intel/ipu3/ipu3-dmamap.c
@@ -0,0 +1,408 @@
+/*
+ * Copyright (c) 2017 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/highmem.h>
+#include <linux/slab.h>
+#include <linux/version.h>
+#include <linux/vmalloc.h>
+#include "ipu3-mmu.h"
+
+/* Begin of things adapted from arch/arm/mm/dma-mapping.c */
+static void ipu3_dmamap_clear_buffer(struct page *page, size_t size,
+ unsigned long attrs)
+{
+ /*
+ * Ensure that the allocated pages are zeroed, and that any data
+ * lurking in the kernel direct-mapped region is invalidated.
+ */
+ if (PageHighMem(page)) {
+ while (size > 0) {
+ void *ptr = kmap_atomic(page);
+
+ memset(ptr, 0, PAGE_SIZE);
+ if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
+ clflush_cache_range(ptr, PAGE_SIZE);
+ kunmap_atomic(ptr);
+ page++;
+ size -= PAGE_SIZE;
+ }
+ } else {
+ void *ptr = page_address(page);
+
+ memset(ptr, 0, size);
+ if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
+ clflush_cache_range(ptr, size);
+ }
+}
+
+/**
+ * ipu3_dmamap_alloc_buffer - allocate buffer based on attributes
+ * @dev: struct device pointer
+ * @size: size of buffer in bytes
+ * @gfp: specify the free page type
+ * @attrs: defined in linux/dma-attrs.h
+ *
+ * This is a helper function for physical page allocation
+ *
+ * Return array representing buffer from alloc_pages() on success
+ * or NULL on failure
+ *
+ * Must be freed with ipu3_dmamap_free_buffer.
+ */
+static struct page **ipu3_dmamap_alloc_buffer(struct device *dev, size_t size,
+ gfp_t gfp, unsigned long attrs)
+{
+ struct page **pages;
+ int count = size >> PAGE_SHIFT;
+ int array_size = count * sizeof(struct page *);
+ int i = 0;
+
+ /* Allocate mem for array of page ptrs */
+ if (array_size <= PAGE_SIZE)
+ pages = kzalloc(array_size, GFP_KERNEL);
+ else
+ pages = vzalloc(array_size);
+ if (!pages)
+ return NULL;
+
+ gfp |= __GFP_NOWARN;
+
+ while (count) {
+ int j, order = __fls(count);
+
+ pages[i] = alloc_pages(gfp, order);
+ while (!pages[i] && order)
+ pages[i] = alloc_pages(gfp, --order);
+ if (!pages[i])
+ goto error;
+
+ if (order) {
+ split_page(pages[i], order);
+ j = 1 << order;
+ while (--j)
+ pages[i + j] = pages[i] + j;
+ }
+ /* Zero and invalidate */
+ ipu3_dmamap_clear_buffer(pages[i], PAGE_SIZE << order, attrs);
+ i += 1 << order;
+ count -= 1 << order;
+ }
+
+ return pages;
+
+error:
+ while (i--)
+ if (pages[i])
+ __free_pages(pages[i], 0);
+ if (array_size <= PAGE_SIZE)
+ kfree(pages);
+ else
+ vfree(pages);
+
+ return NULL;
+}
+
+/*
+ * Free a buffer allocated by ipu3_dmamap_alloc_buffer()
+ */
+static int ipu3_dmamap_free_buffer(struct device *dev, struct page **pages,
+ size_t size, unsigned long attrs)
+{
+ int count = size >> PAGE_SHIFT;
+ int array_size = count * sizeof(struct page *);
+ int i;
+
+ for (i = 0; i < count; i++) {
+ if (pages[i]) {
+ ipu3_dmamap_clear_buffer(pages[i], PAGE_SIZE, attrs);
+ __free_pages(pages[i], 0);
+ }
+ }
+
+ if (array_size <= PAGE_SIZE)
+ kfree(pages);
+ else
+ vfree(pages);
+ return 0;
+}
+
+/* End of things adapted from arch/arm/mm/dma-mapping.c */
+static void ipu3_dmamap_sync_single_for_cpu(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction dir)
+{
+ struct ipu3_mmu *mmu = to_ipu3_mmu(dev);
+ dma_addr_t daddr = iommu_iova_to_phys(mmu->domain, dma_handle);
+
+ clflush_cache_range(phys_to_virt(daddr), size);
+}
+
+/*
+ * Synchronization function to transfer ownership to CPU
+ */
+static void ipu3_dmamap_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sglist, int nents,
+ enum dma_data_direction dir)
+{
+ struct ipu3_mmu *mmu = to_ipu3_mmu(dev);
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sglist, sg, nents, i) {
+ clflush_cache_range(
+ phys_to_virt(iommu_iova_to_phys(mmu->domain,
+ sg_dma_address(sg))), sg->length);
+ }
+}
+
+/**
+ * ipu3_dmamap_alloc - allocate and map a buffer into KVA
+ * @dev: struct device pointer
+ * @size: size of buffer in bytes
+ * @gfp: specify the get free page type
+ * @attrs: defined in linux/dma-attrs.h
+ *
+ * Return KVA on success or NULL on failure
+ *
+ */
+static void *ipu3_dmamap_alloc(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp,
+ unsigned long attrs)
+{
+ struct ipu3_mmu *mmu = to_ipu3_mmu(dev);
+ struct page **pages;
+ struct iova *iova;
+ struct vm_struct *area;
+ int i;
+ int rval;
+
+ size = PAGE_ALIGN(size);
+
+ iova = alloc_iova(&mmu->iova_domain, size >> PAGE_SHIFT,
+ dma_get_mask(dev) >> PAGE_SHIFT, 0);
+ if (!iova)
+ return NULL;
+
+ pages = ipu3_dmamap_alloc_buffer(dev, size, gfp, attrs);
+ if (!pages)
+ goto out_free_iova;
+
+ /* Call IOMMU driver to setup pgt */
+ for (i = 0; iova->pfn_lo + i <= iova->pfn_hi; i++) {
+ rval = iommu_map(mmu->domain,
+ (iova->pfn_lo + i) << PAGE_SHIFT,
+ page_to_phys(pages[i]), PAGE_SIZE, 0);
+ if (rval)
+ goto out_unmap;
+ }
+ /* Now grab a virtual region */
+ area = __get_vm_area(size, 0, VMALLOC_START, VMALLOC_END);
+ if (!area)
+ goto out_unmap;
+
+ area->pages = pages;
+ /* And map it in KVA */
+ if (map_vm_area(area, PAGE_KERNEL, pages))
+ goto out_vunmap;
+
+ *dma_handle = iova->pfn_lo << PAGE_SHIFT;
+
+ return area->addr;
+
+out_vunmap:
+ vunmap(area->addr);
+
+out_unmap:
+ ipu3_dmamap_free_buffer(dev, pages, size, attrs);
+ for (i--; i >= 0; i--) {
+ iommu_unmap(mmu->domain, (iova->pfn_lo + i) << PAGE_SHIFT,
+ PAGE_SIZE);
+ }
+
+out_free_iova:
+ __free_iova(&mmu->iova_domain, iova);
+
+ return NULL;
+}
+
+/*
+ * Counterpart of ipu3_dmamap_alloc
+ */
+static void ipu3_dmamap_free(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle, unsigned long attrs)
+{
+ struct ipu3_mmu *mmu = to_ipu3_mmu(dev);
+ struct vm_struct *area = find_vm_area(vaddr);
+ struct iova *iova = find_iova(&mmu->iova_domain,
+ dma_handle >> PAGE_SHIFT);
+
+ if (WARN_ON(!area) || WARN_ON(!iova))
+ return;
+
+ if (WARN_ON(!area->pages))
+ return;
+
+ size = PAGE_ALIGN(size);
+
+ iommu_unmap(mmu->domain, iova->pfn_lo << PAGE_SHIFT,
+ (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT);
+
+ __free_iova(&mmu->iova_domain, iova);
+
+ ipu3_dmamap_free_buffer(dev, area->pages, size, attrs);
+
+ vunmap(vaddr);
+}
+
+/*
+ * Insert each page into user VMA
+ */
+static int ipu3_dmamap_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *addr, dma_addr_t iova, size_t size,
+ unsigned long attrs)
+{
+ struct vm_struct *area = find_vm_area(addr);
+ size_t count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ size_t i;
+
+ if (!area)
+ return -EFAULT;
+
+ if (vma->vm_start & ~PAGE_MASK)
+ return -EINVAL;
+
+ if (size > area->size)
+ return -EFAULT;
+
+ for (i = 0; i < count; i++)
+ vm_insert_page(vma, vma->vm_start + (i << PAGE_SHIFT),
+ area->pages[i]);
+
+ return 0;
+}
+
+static void ipu3_dmamap_unmap_sg(struct device *dev, struct scatterlist *sglist,
+ int nents, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ struct ipu3_mmu *mmu = to_ipu3_mmu(dev);
+ struct iova *iova = find_iova(&mmu->iova_domain,
+ sg_dma_address(sglist) >> PAGE_SHIFT);
+
+ if (!nents || WARN_ON(!iova))
+ return;
+
+ if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
+ ipu3_dmamap_sync_sg_for_cpu(dev, sglist, nents,
+ DMA_BIDIRECTIONAL);
+
+ iommu_unmap(mmu->domain, iova->pfn_lo << PAGE_SHIFT,
+ (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT);
+
+ __free_iova(&mmu->iova_domain, iova);
+}
+
+static int ipu3_dmamap_map_sg(struct device *dev, struct scatterlist *sglist,
+ int nents, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ struct ipu3_mmu *mmu = to_ipu3_mmu(dev);
+ struct scatterlist *sg;
+ struct iova *iova;
+ size_t size = 0;
+ uint32_t iova_addr;
+ int i;
+
+ for_each_sg(sglist, sg, nents, i)
+ size += PAGE_ALIGN(sg->length) >> PAGE_SHIFT;
+
+ dev_dbg(dev, "dmamap: mapping sg %d entries, %zu pages\n", nents, size);
+
+ iova = alloc_iova(&mmu->iova_domain, size,
+ dma_get_mask(dev) >> PAGE_SHIFT, 0);
+ if (!iova)
+ return 0;
+
+ dev_dbg(dev, "dmamap: iova low pfn %lu, high pfn %lu\n", iova->pfn_lo,
+ iova->pfn_hi);
+
+ iova_addr = iova->pfn_lo;
+
+ for_each_sg(sglist, sg, nents, i) {
+ int rval;
+
+ dev_dbg(dev,
+ "dmamap: entry %d: iova 0x%8.8x, phys 0x%16.16llx\n",
+ i, iova_addr << PAGE_SHIFT, page_to_phys(sg_page(sg)));
+ rval = iommu_map(mmu->domain, iova_addr << PAGE_SHIFT,
+ page_to_phys(sg_page(sg)),
+ PAGE_ALIGN(sg->length), 0);
+ if (rval)
+ goto out_fail;
+ sg_dma_address(sg) = iova_addr << PAGE_SHIFT;
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
+ sg_dma_len(sg) = sg->length;
+#endif /* CONFIG_NEED_SG_DMA_LENGTH */
+
+ iova_addr += PAGE_ALIGN(sg->length) >> PAGE_SHIFT;
+ }
+
+ if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
+ ipu3_dmamap_sync_sg_for_cpu(dev, sglist, nents,
+ DMA_BIDIRECTIONAL);
+
+ return nents;
+
+out_fail:
+ ipu3_dmamap_unmap_sg(dev, sglist, i, dir, attrs);
+
+ return 0;
+}
+
+/*
+ * Create scatter-list for the already allocated DMA buffer
+ */
+static int ipu3_dmamap_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t handle,
+ size_t size, unsigned long attrs)
+{
+ struct vm_struct *area = find_vm_area(cpu_addr);
+ int n_pages;
+ int ret;
+
+ if (!area || (WARN_ON(!area->pages)))
+ return -ENOMEM;
+
+ n_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
+
+ ret = sg_alloc_table_from_pages(sgt, area->pages, n_pages, 0, size,
+ GFP_KERNEL);
+ if (ret)
+ dev_dbg(dev, "failed to get sgt table\n");
+
+ return ret;
+}
+
+struct dma_map_ops ipu3_dmamap_ops = {
+ .alloc = ipu3_dmamap_alloc,
+ .free = ipu3_dmamap_free,
+ .mmap = ipu3_dmamap_mmap,
+ .map_sg = ipu3_dmamap_map_sg,
+ .unmap_sg = ipu3_dmamap_unmap_sg,
+ .sync_single_for_cpu = ipu3_dmamap_sync_single_for_cpu,
+ .sync_single_for_device = ipu3_dmamap_sync_single_for_cpu,
+ .sync_sg_for_cpu = ipu3_dmamap_sync_sg_for_cpu,
+ .sync_sg_for_device = ipu3_dmamap_sync_sg_for_cpu,
+ .get_sgtable = ipu3_dmamap_get_sgtable,
+};
+EXPORT_SYMBOL_GPL(ipu3_dmamap_ops);
diff --git a/drivers/media/pci/intel/ipu3/ipu3-dmamap.h b/drivers/media/pci/intel/ipu3/ipu3-dmamap.h
new file mode 100644
index 0000000..714bac0
--- /dev/null
+++ b/drivers/media/pci/intel/ipu3/ipu3-dmamap.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2017 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __IPU3_DMAMAP_H
+#define __IPU3_DMAMAP_H
+
+extern struct dma_map_ops ipu3_dmamap_ops;
+
+#endif
--
2.7.4
next prev parent reply other threads:[~2017-06-05 20:39 UTC|newest]
Thread overview: 66+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-06-05 20:39 [PATCH 00/12] Intel IPU3 ImgU patchset Yong Zhi
2017-06-05 20:39 ` [PATCH 01/12] videodev2.h, v4l2-ioctl: add IPU3 meta buffer format Yong Zhi
2017-06-05 20:43 ` Alan Cox
2017-06-09 10:55 ` Sakari Ailus
2017-06-06 4:30 ` Tomasz Figa
2017-06-06 4:30 ` Tomasz Figa
2017-06-06 7:25 ` Sakari Ailus
2017-06-06 8:04 ` Hans Verkuil
2017-06-06 10:09 ` Tomasz Figa
2017-06-16 5:52 ` Tomasz Figa
2017-06-16 8:25 ` Sakari Ailus
2017-06-16 8:35 ` Tomasz Figa
2017-06-16 8:49 ` Sakari Ailus
2017-06-16 9:03 ` Tomasz Figa
2017-06-16 9:19 ` Sakari Ailus
2017-06-16 9:29 ` Tomasz Figa
2017-06-19 9:17 ` Laurent Pinchart
2017-06-19 10:41 ` Tomasz Figa
2017-06-05 20:39 ` [PATCH 02/12] intel-ipu3: mmu: implement driver Yong Zhi
2017-06-06 8:07 ` Hans Verkuil
2017-06-16 9:21 ` Sakari Ailus
2017-06-06 10:13 ` Tomasz Figa
2017-06-07 8:35 ` Tomasz Figa
2017-06-08 16:43 ` Sakari Ailus
2017-06-09 5:59 ` Tomasz Figa
2017-06-09 11:16 ` Sakari Ailus
2017-06-09 12:09 ` Tomasz Figa
2017-06-09 8:26 ` Tuukka Toivonen
2017-06-09 12:10 ` Tomasz Figa
2017-06-07 21:59 ` Sakari Ailus
2017-06-08 7:36 ` Tomasz Figa
2017-06-05 20:39 ` Yong Zhi [this message]
2017-06-07 9:47 ` [PATCH 03/12] intel-ipu3: Add DMA API implementation Tomasz Figa
2017-06-07 17:45 ` Alan Cox
2017-06-08 2:55 ` Tomasz Figa
2017-06-08 16:47 ` Sakari Ailus
2017-06-08 13:22 ` Robin Murphy
2017-06-08 14:35 ` Tomasz Figa
2017-06-08 18:07 ` Robin Murphy
2017-06-09 6:20 ` Tomasz Figa
2017-06-09 13:05 ` Robin Murphy
2017-06-05 20:39 ` [PATCH 04/12] intel-ipu3: Add user space ABI definitions Yong Zhi
2017-06-06 8:28 ` Hans Verkuil
2017-06-07 22:22 ` Sakari Ailus
2017-09-05 17:31 ` Zhi, Yong
2018-04-27 12:27 ` Sakari Ailus
2017-06-05 20:39 ` [PATCH 06/12] intel-ipu3: css: imgu dma buff pool Yong Zhi
2017-06-05 20:39 ` [PATCH 07/12] intel-ipu3: css: firmware management Yong Zhi
2017-06-06 8:38 ` Hans Verkuil
2017-06-14 21:46 ` Zhi, Yong
2017-06-16 10:15 ` Tomasz Figa
2017-06-05 20:39 ` [PATCH 08/12] intel-ipu3: params: compute and program ccs Yong Zhi
2017-06-05 20:39 ` [PATCH 09/12] intel-ipu3: css hardware setup Yong Zhi
2017-06-05 20:39 ` [PATCH 10/12] intel-ipu3: css pipeline Yong Zhi
2017-06-05 20:39 ` [PATCH 11/12] intel-ipu3: Add imgu v4l2 driver Yong Zhi
2017-06-06 9:08 ` Hans Verkuil
2017-06-09 9:20 ` Sakari Ailus
2017-06-14 23:40 ` Zhi, Yong
2017-06-05 20:39 ` [PATCH 12/12] intel-ipu3: imgu top level pci device Yong Zhi
2017-06-05 20:46 ` [PATCH 00/12] Intel IPU3 ImgU patchset Alan Cox
2017-06-14 22:26 ` Sakari Ailus
2017-06-15 8:26 ` Andy Shevchenko
2017-06-15 8:37 ` Sakari Ailus
2017-06-06 9:14 ` Hans Verkuil
[not found] ` <1496695157-19926-6-git-send-email-yong.zhi@intel.com>
2017-06-08 8:29 ` [PATCH 05/12] intel-ipu3: css: tables Tomasz Figa
2017-06-09 9:43 ` Sakari Ailus
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1496695157-19926-4-git-send-email-yong.zhi@intel.com \
--to=yong.zhi@intel.com \
--cc=jian.xu.zheng@intel.com \
--cc=linux-media@vger.kernel.org \
--cc=rajmohan.mani@intel.com \
--cc=sakari.ailus@linux.intel.com \
--cc=tfiga@chromium.org \
--cc=tuukka.toivonen@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).