linux-arm-kernel.lists.infradead.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 1/2] [RFC] arm: iommu: Refactoring common code
@ 2014-03-13 20:23 Ritesh Harjani
  2014-03-19  2:58 ` Ritesh Harjani
  0 siblings, 1 reply; 3+ messages in thread
From: Ritesh Harjani @ 2014-03-13 20:23 UTC (permalink / raw)
  To: linux-arm-kernel

Hi Everyone,

Please find the following patch as refactoring of the common code out
from arch/arm/mm/dma-mapping.c to lib/iommu-helper.c

This is just an initial version of patch to get more details and to
know if this is how we want to plan refactoring iommu code to
lib/iommu-helper.

Please let me know the changes/suggestion which you think in this ?



Taking out the common code of buffer allocation and mapping
for iommu from arch/arm to lib/iommu-helper file.

Rearranging some functions outside to lib/iommu-helper.c

Signed-off-by: Ritesh Harjani <ritesh.harjani@gmail.com>
---
 arch/arm/mm/dma-mapping.c    | 121 +++++---------------------------------
 include/linux/iommu-helper.h |  13 ++++
 lib/iommu-helper.c           | 137 +++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 165 insertions(+), 106 deletions(-)

diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 11b3914..ae301c8e 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -26,6 +26,7 @@
 #include <linux/io.h>
 #include <linux/vmalloc.h>
 #include <linux/sizes.h>
+#include <linux/iommu-helper.h>

 #include <asm/memory.h>
 #include <asm/highmem.h>
@@ -1119,91 +1120,26 @@ static struct page
**__iommu_alloc_buffer(struct device *dev, size_t size,
 {
        struct page **pages;
        int count = size >> PAGE_SHIFT;
-       int array_size = count * sizeof(struct page *);
        int i = 0;

-       if (array_size <= PAGE_SIZE)
-               pages = kzalloc(array_size, gfp);
-       else
-               pages = vzalloc(array_size);
-       if (!pages)
-               return NULL;
-
-       if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs))
-       {
-               unsigned long order = get_order(size);
-               struct page *page;
-
-               page = dma_alloc_from_contiguous(dev, count, order);
-               if (!page)
-                       goto error;
-
-               __dma_clear_buffer(page, size);
-
-               for (i = 0; i < count; i++)
-                       pages[i] = page + i;
-
-               return pages;
-       }
+       pages = iommu_alloc_buffer(dev, size, gfp, attrs);

-       /*
-        * IOMMU can map any pages, so himem can also be used here
-        */
-       gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
-
-       while (count) {
-               int j, order = __fls(count);
-
-               pages[i] = alloc_pages(gfp, order);
-               while (!pages[i] && order)
-                       pages[i] = alloc_pages(gfp, --order);
-               if (!pages[i])
-                       goto error;
-
-               if (order) {
-                       split_page(pages[i], order);
-                       j = 1 << order;
-                       while (--j)
-                               pages[i + j] = pages[i] + j;
+       if (!pages) {
+               return NULL;
+       } else {
+               while (count--) {
+                       __dma_clear_buffer(pages[i], PAGE_SIZE);
+                       i++;
                }
-
-               __dma_clear_buffer(pages[i], PAGE_SIZE << order);
-               i += 1 << order;
-               count -= 1 << order;
        }
-
        return pages;
-error:
-       while (i--)
-               if (pages[i])
-                       __free_pages(pages[i], 0);
-       if (array_size <= PAGE_SIZE)
-               kfree(pages);
-       else
-               vfree(pages);
-       return NULL;
+
 }

 static int __iommu_free_buffer(struct device *dev, struct page **pages,
                               size_t size, struct dma_attrs *attrs)
 {
-       int count = size >> PAGE_SHIFT;
-       int array_size = count * sizeof(struct page *);
-       int i;
-
-       if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) {
-               dma_release_from_contiguous(dev, pages[0], count);
-       } else {
-               for (i = 0; i < count; i++)
-                       if (pages[i])
-                               __free_pages(pages[i], 0);
-       }
-
-       if (array_size <= PAGE_SIZE)
-               kfree(pages);
-       else
-               vfree(pages);
-       return 0;
+       return iommu_free_buffer(dev, pages, size, attrs);
 }

 /*
@@ -1246,51 +1182,24 @@ static dma_addr_t
 __iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
 {
        struct dma_iommu_mapping *mapping = dev->archdata.mapping;
-       unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
        dma_addr_t dma_addr, iova;
-       int i, ret = DMA_ERROR_CODE;

        dma_addr = __alloc_iova(mapping, size);
        if (dma_addr == DMA_ERROR_CODE)
                return dma_addr;

-       iova = dma_addr;
-       for (i = 0; i < count; ) {
-               unsigned int next_pfn = page_to_pfn(pages[i]) + 1;
-               phys_addr_t phys = page_to_phys(pages[i]);
-               unsigned int len, j;
-
-               for (j = i + 1; j < count; j++, next_pfn++)
-                       if (page_to_pfn(pages[j]) != next_pfn)
-                               break;
+       iova = iommu_mapper(mapping->domain, pages, dma_addr, size);
+       if (iova == DMA_ERROR_CODE)
+               __free_iova(mapping, dma_addr, size);

-               len = (j - i) << PAGE_SHIFT;
-               ret = iommu_map(mapping->domain, iova, phys, len,
-                               IOMMU_READ|IOMMU_WRITE);
-               if (ret < 0)
-                       goto fail;
-               iova += len;
-               i = j;
-       }
-       return dma_addr;
-fail:
-       iommu_unmap(mapping->domain, dma_addr, iova-dma_addr);
-       __free_iova(mapping, dma_addr, size);
-       return DMA_ERROR_CODE;
+       return iova;
 }

 static int __iommu_remove_mapping(struct device *dev, dma_addr_t
iova, size_t size)
 {
        struct dma_iommu_mapping *mapping = dev->archdata.mapping;

-       /*
-        * add optional in-page offset from iova to size and align
-        * result to page size
-        */
-       size = PAGE_ALIGN((iova & ~PAGE_MASK) + size);
-       iova &= PAGE_MASK;
-
-       iommu_unmap(mapping->domain, iova, size);
+       iommu_unmapper(mapping->domain, iova, size);
        __free_iova(mapping, iova, size);
        return 0;
 }
diff --git a/include/linux/iommu-helper.h b/include/linux/iommu-helper.h
index 86bdeff..d580508 100644
--- a/include/linux/iommu-helper.h
+++ b/include/linux/iommu-helper.h
@@ -2,6 +2,7 @@
 #define _LINUX_IOMMU_HELPER_H

 #include <linux/kernel.h>
+#include <linux/iommu.h>

 static inline unsigned long iommu_device_max_index(unsigned long size,
                                                   unsigned long offset,
@@ -31,4 +32,16 @@ static inline unsigned long
iommu_num_pages(unsigned long addr,
        return DIV_ROUND_UP(size, io_page_size);
 }

+extern struct page **iommu_alloc_buffer(struct device *dev, size_t size,
+                                         gfp_t gfp, struct dma_attrs *attrs);
+
+extern int iommu_free_buffer(struct device *dev, struct page **pages,
+                              size_t size, struct dma_attrs *attrs);
+
+extern dma_addr_t iommu_mapper(struct iommu_domain *domain,
+                       struct page **pages, dma_addr_t iova, size_t size);
+
+extern void iommu_unmapper(struct iommu_domain *domain, dma_addr_t iova,
+                       size_t size);
+
 #endif
diff --git a/lib/iommu-helper.c b/lib/iommu-helper.c
index c27e269..b6ea51b 100644
--- a/lib/iommu-helper.c
+++ b/lib/iommu-helper.c
@@ -5,6 +5,13 @@
 #include <linux/export.h>
 #include <linux/bitmap.h>
 #include <linux/bug.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma-contiguous.h>
+#include <linux/slab.h>
+#include <linux/iommu.h>
+#include <linux/io.h>
+#include <linux/vmalloc.h>
+#include <linux/sizes.h>

 int iommu_is_span_boundary(unsigned int index, unsigned int nr,
                           unsigned long shift,
@@ -39,3 +46,133 @@ again:
        return -1;
 }
 EXPORT_SYMBOL(iommu_area_alloc);
+
+struct page **iommu_alloc_buffer(struct device *dev, size_t size,
+                                         gfp_t gfp, struct dma_attrs *attrs)
+{
+       struct page **pages;
+       int count = size >> PAGE_SHIFT;
+       int array_size = count * sizeof(struct page *);
+       int i = 0;
+
+       if (array_size <= PAGE_SIZE)
+               pages = kzalloc(array_size, gfp);
+       else
+               pages = vzalloc(array_size);
+       if (!pages)
+               return NULL;
+
+       if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) {
+               unsigned long order = get_order(size);
+               struct page *page;
+
+               page = dma_alloc_from_contiguous(dev, count, order);
+               if (!page)
+                       goto error;
+
+               for (i = 0; i < count; i++)
+                       pages[i] = page + i;
+
+               return pages;
+       }
+
+       /*
+        * IOMMU can map any pages, so himem can also be used here
+        */
+       gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
+
+       while (count) {
+               int j, order = __fls(count);
+
+               pages[i] = alloc_pages(gfp, order);
+               while (!pages[i] && order)
+                       pages[i] = alloc_pages(gfp, --order);
+               if (!pages[i])
+                       goto error;
+
+               if (order) {
+                       split_page(pages[i], order);
+                       j = 1 << order;
+                       while (--j)
+                               pages[i + j] = pages[i] + j;
+               }
+
+               i += 1 << order;
+               count -= 1 << order;
+       }
+
+       return pages;
+error:
+       while (i--)
+               if (pages[i])
+                       __free_pages(pages[i], 0);
+       if (array_size <= PAGE_SIZE)
+               kfree(pages);
+       else
+               vfree(pages);
+       return NULL;
+}
+
+int iommu_free_buffer(struct device *dev, struct page **pages,
+                              size_t size, struct dma_attrs *attrs)
+{
+       int count = size >> PAGE_SHIFT;
+       int array_size = count * sizeof(struct page *);
+       int i;
+
+       if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) {
+               dma_release_from_contiguous(dev, pages[0], count);
+       } else {
+               for (i = 0; i < count; i++)
+                       if (pages[i])
+                               __free_pages(pages[i], 0);
+       }
+
+       if (array_size <= PAGE_SIZE)
+               kfree(pages);
+       else
+               vfree(pages);
+       return 0;
+}
+
+dma_addr_t iommu_mapper(struct iommu_domain *domain, struct page **pages,
+                       dma_addr_t iova, size_t size)
+{
+       dma_addr_t dma_addr = iova;
+       unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+       int i, ret = DMA_ERROR_CODE;
+
+       for (i = 0; i < count; ) {
+               unsigned int next_pfn = page_to_pfn(pages[i]) + 1;
+               phys_addr_t phys = page_to_phys(pages[i]);
+               unsigned int len, j;
+
+               for (j = i + 1; j < count; j++, next_pfn++)
+                       if (page_to_pfn(pages[j]) != next_pfn)
+                               break;
+
+               len = (j - i) << PAGE_SHIFT;
+               ret = iommu_map(domain, iova, phys, len,
+                               IOMMU_READ|IOMMU_WRITE);
+               if (ret < 0)
+                       goto fail;
+               iova += len;
+               i = j;
+       }
+       return dma_addr;
+fail:
+       iommu_unmap(domain, dma_addr, iova-dma_addr);
+       return DMA_ERROR_CODE;
+}
+
+void iommu_unmapper(struct iommu_domain *domain, dma_addr_t iova, size_t size)
+{
+       /*
+        * add optional in-page offset from iova to size and align
+        * result to page size
+        */
+       size = PAGE_ALIGN((iova & ~PAGE_MASK) + size);
+       iova &= PAGE_MASK;
+
+       iommu_unmap(domain, iova, size);
+}
--
1.8.1.3

^ permalink raw reply related	[flat|nested] 3+ messages in thread

* [PATCH 1/2] [RFC] arm: iommu: Refactoring common code
  2014-03-13 20:23 [PATCH 1/2] [RFC] arm: iommu: Refactoring common code Ritesh Harjani
@ 2014-03-19  2:58 ` Ritesh Harjani
  2014-03-19  9:57   ` Catalin Marinas
  0 siblings, 1 reply; 3+ messages in thread
From: Ritesh Harjani @ 2014-03-19  2:58 UTC (permalink / raw)
  To: linux-arm-kernel

Hi Catalin, Will, Arnd, Russell,


Some suggestions/comments on this to take this discussion forward ?

Thanks
Ritesh


On Fri, Mar 14, 2014 at 1:53 AM, Ritesh Harjani
<ritesh.harjani@gmail.com> wrote:
> Hi Everyone,
>
> Please find the following patch as refactoring of the common code out
> from arch/arm/mm/dma-mapping.c to lib/iommu-helper.c
>
> This is just an initial version of patch to get more details and to
> know if this is how we want to plan refactoring iommu code to
> lib/iommu-helper.
>
> Please let me know the changes/suggestion which you think in this ?
>
>
>
> Taking out the common code of buffer allocation and mapping
> for iommu from arch/arm to lib/iommu-helper file.
>
> Rearranging some functions outside to lib/iommu-helper.c
>
> Signed-off-by: Ritesh Harjani <ritesh.harjani@gmail.com>
> ---
>  arch/arm/mm/dma-mapping.c    | 121 +++++---------------------------------
>  include/linux/iommu-helper.h |  13 ++++
>  lib/iommu-helper.c           | 137 +++++++++++++++++++++++++++++++++++++++++++
>  3 files changed, 165 insertions(+), 106 deletions(-)
>
> diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
> index 11b3914..ae301c8e 100644
> --- a/arch/arm/mm/dma-mapping.c
> +++ b/arch/arm/mm/dma-mapping.c
> @@ -26,6 +26,7 @@
>  #include <linux/io.h>
>  #include <linux/vmalloc.h>
>  #include <linux/sizes.h>
> +#include <linux/iommu-helper.h>
>
>  #include <asm/memory.h>
>  #include <asm/highmem.h>
> @@ -1119,91 +1120,26 @@ static struct page
> **__iommu_alloc_buffer(struct device *dev, size_t size,
>  {
>         struct page **pages;
>         int count = size >> PAGE_SHIFT;
> -       int array_size = count * sizeof(struct page *);
>         int i = 0;
>
> -       if (array_size <= PAGE_SIZE)
> -               pages = kzalloc(array_size, gfp);
> -       else
> -               pages = vzalloc(array_size);
> -       if (!pages)
> -               return NULL;
> -
> -       if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs))
> -       {
> -               unsigned long order = get_order(size);
> -               struct page *page;
> -
> -               page = dma_alloc_from_contiguous(dev, count, order);
> -               if (!page)
> -                       goto error;
> -
> -               __dma_clear_buffer(page, size);
> -
> -               for (i = 0; i < count; i++)
> -                       pages[i] = page + i;
> -
> -               return pages;
> -       }
> +       pages = iommu_alloc_buffer(dev, size, gfp, attrs);
>
> -       /*
> -        * IOMMU can map any pages, so himem can also be used here
> -        */
> -       gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
> -
> -       while (count) {
> -               int j, order = __fls(count);
> -
> -               pages[i] = alloc_pages(gfp, order);
> -               while (!pages[i] && order)
> -                       pages[i] = alloc_pages(gfp, --order);
> -               if (!pages[i])
> -                       goto error;
> -
> -               if (order) {
> -                       split_page(pages[i], order);
> -                       j = 1 << order;
> -                       while (--j)
> -                               pages[i + j] = pages[i] + j;
> +       if (!pages) {
> +               return NULL;
> +       } else {
> +               while (count--) {
> +                       __dma_clear_buffer(pages[i], PAGE_SIZE);
> +                       i++;
>                 }
> -
> -               __dma_clear_buffer(pages[i], PAGE_SIZE << order);
> -               i += 1 << order;
> -               count -= 1 << order;
>         }
> -
>         return pages;
> -error:
> -       while (i--)
> -               if (pages[i])
> -                       __free_pages(pages[i], 0);
> -       if (array_size <= PAGE_SIZE)
> -               kfree(pages);
> -       else
> -               vfree(pages);
> -       return NULL;
> +
>  }
>
>  static int __iommu_free_buffer(struct device *dev, struct page **pages,
>                                size_t size, struct dma_attrs *attrs)
>  {
> -       int count = size >> PAGE_SHIFT;
> -       int array_size = count * sizeof(struct page *);
> -       int i;
> -
> -       if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) {
> -               dma_release_from_contiguous(dev, pages[0], count);
> -       } else {
> -               for (i = 0; i < count; i++)
> -                       if (pages[i])
> -                               __free_pages(pages[i], 0);
> -       }
> -
> -       if (array_size <= PAGE_SIZE)
> -               kfree(pages);
> -       else
> -               vfree(pages);
> -       return 0;
> +       return iommu_free_buffer(dev, pages, size, attrs);
>  }
>
>  /*
> @@ -1246,51 +1182,24 @@ static dma_addr_t
>  __iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
>  {
>         struct dma_iommu_mapping *mapping = dev->archdata.mapping;
> -       unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
>         dma_addr_t dma_addr, iova;
> -       int i, ret = DMA_ERROR_CODE;
>
>         dma_addr = __alloc_iova(mapping, size);
>         if (dma_addr == DMA_ERROR_CODE)
>                 return dma_addr;
>
> -       iova = dma_addr;
> -       for (i = 0; i < count; ) {
> -               unsigned int next_pfn = page_to_pfn(pages[i]) + 1;
> -               phys_addr_t phys = page_to_phys(pages[i]);
> -               unsigned int len, j;
> -
> -               for (j = i + 1; j < count; j++, next_pfn++)
> -                       if (page_to_pfn(pages[j]) != next_pfn)
> -                               break;
> +       iova = iommu_mapper(mapping->domain, pages, dma_addr, size);
> +       if (iova == DMA_ERROR_CODE)
> +               __free_iova(mapping, dma_addr, size);
>
> -               len = (j - i) << PAGE_SHIFT;
> -               ret = iommu_map(mapping->domain, iova, phys, len,
> -                               IOMMU_READ|IOMMU_WRITE);
> -               if (ret < 0)
> -                       goto fail;
> -               iova += len;
> -               i = j;
> -       }
> -       return dma_addr;
> -fail:
> -       iommu_unmap(mapping->domain, dma_addr, iova-dma_addr);
> -       __free_iova(mapping, dma_addr, size);
> -       return DMA_ERROR_CODE;
> +       return iova;
>  }
>
>  static int __iommu_remove_mapping(struct device *dev, dma_addr_t
> iova, size_t size)
>  {
>         struct dma_iommu_mapping *mapping = dev->archdata.mapping;
>
> -       /*
> -        * add optional in-page offset from iova to size and align
> -        * result to page size
> -        */
> -       size = PAGE_ALIGN((iova & ~PAGE_MASK) + size);
> -       iova &= PAGE_MASK;
> -
> -       iommu_unmap(mapping->domain, iova, size);
> +       iommu_unmapper(mapping->domain, iova, size);
>         __free_iova(mapping, iova, size);
>         return 0;
>  }
> diff --git a/include/linux/iommu-helper.h b/include/linux/iommu-helper.h
> index 86bdeff..d580508 100644
> --- a/include/linux/iommu-helper.h
> +++ b/include/linux/iommu-helper.h
> @@ -2,6 +2,7 @@
>  #define _LINUX_IOMMU_HELPER_H
>
>  #include <linux/kernel.h>
> +#include <linux/iommu.h>
>
>  static inline unsigned long iommu_device_max_index(unsigned long size,
>                                                    unsigned long offset,
> @@ -31,4 +32,16 @@ static inline unsigned long
> iommu_num_pages(unsigned long addr,
>         return DIV_ROUND_UP(size, io_page_size);
>  }
>
> +extern struct page **iommu_alloc_buffer(struct device *dev, size_t size,
> +                                         gfp_t gfp, struct dma_attrs *attrs);
> +
> +extern int iommu_free_buffer(struct device *dev, struct page **pages,
> +                              size_t size, struct dma_attrs *attrs);
> +
> +extern dma_addr_t iommu_mapper(struct iommu_domain *domain,
> +                       struct page **pages, dma_addr_t iova, size_t size);
> +
> +extern void iommu_unmapper(struct iommu_domain *domain, dma_addr_t iova,
> +                       size_t size);
> +
>  #endif
> diff --git a/lib/iommu-helper.c b/lib/iommu-helper.c
> index c27e269..b6ea51b 100644
> --- a/lib/iommu-helper.c
> +++ b/lib/iommu-helper.c
> @@ -5,6 +5,13 @@
>  #include <linux/export.h>
>  #include <linux/bitmap.h>
>  #include <linux/bug.h>
> +#include <linux/dma-mapping.h>
> +#include <linux/dma-contiguous.h>
> +#include <linux/slab.h>
> +#include <linux/iommu.h>
> +#include <linux/io.h>
> +#include <linux/vmalloc.h>
> +#include <linux/sizes.h>
>
>  int iommu_is_span_boundary(unsigned int index, unsigned int nr,
>                            unsigned long shift,
> @@ -39,3 +46,133 @@ again:
>         return -1;
>  }
>  EXPORT_SYMBOL(iommu_area_alloc);
> +
> +struct page **iommu_alloc_buffer(struct device *dev, size_t size,
> +                                         gfp_t gfp, struct dma_attrs *attrs)
> +{
> +       struct page **pages;
> +       int count = size >> PAGE_SHIFT;
> +       int array_size = count * sizeof(struct page *);
> +       int i = 0;
> +
> +       if (array_size <= PAGE_SIZE)
> +               pages = kzalloc(array_size, gfp);
> +       else
> +               pages = vzalloc(array_size);
> +       if (!pages)
> +               return NULL;
> +
> +       if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) {
> +               unsigned long order = get_order(size);
> +               struct page *page;
> +
> +               page = dma_alloc_from_contiguous(dev, count, order);
> +               if (!page)
> +                       goto error;
> +
> +               for (i = 0; i < count; i++)
> +                       pages[i] = page + i;
> +
> +               return pages;
> +       }
> +
> +       /*
> +        * IOMMU can map any pages, so himem can also be used here
> +        */
> +       gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
> +
> +       while (count) {
> +               int j, order = __fls(count);
> +
> +               pages[i] = alloc_pages(gfp, order);
> +               while (!pages[i] && order)
> +                       pages[i] = alloc_pages(gfp, --order);
> +               if (!pages[i])
> +                       goto error;
> +
> +               if (order) {
> +                       split_page(pages[i], order);
> +                       j = 1 << order;
> +                       while (--j)
> +                               pages[i + j] = pages[i] + j;
> +               }
> +
> +               i += 1 << order;
> +               count -= 1 << order;
> +       }
> +
> +       return pages;
> +error:
> +       while (i--)
> +               if (pages[i])
> +                       __free_pages(pages[i], 0);
> +       if (array_size <= PAGE_SIZE)
> +               kfree(pages);
> +       else
> +               vfree(pages);
> +       return NULL;
> +}
> +
> +int iommu_free_buffer(struct device *dev, struct page **pages,
> +                              size_t size, struct dma_attrs *attrs)
> +{
> +       int count = size >> PAGE_SHIFT;
> +       int array_size = count * sizeof(struct page *);
> +       int i;
> +
> +       if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) {
> +               dma_release_from_contiguous(dev, pages[0], count);
> +       } else {
> +               for (i = 0; i < count; i++)
> +                       if (pages[i])
> +                               __free_pages(pages[i], 0);
> +       }
> +
> +       if (array_size <= PAGE_SIZE)
> +               kfree(pages);
> +       else
> +               vfree(pages);
> +       return 0;
> +}
> +
> +dma_addr_t iommu_mapper(struct iommu_domain *domain, struct page **pages,
> +                       dma_addr_t iova, size_t size)
> +{
> +       dma_addr_t dma_addr = iova;
> +       unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
> +       int i, ret = DMA_ERROR_CODE;
> +
> +       for (i = 0; i < count; ) {
> +               unsigned int next_pfn = page_to_pfn(pages[i]) + 1;
> +               phys_addr_t phys = page_to_phys(pages[i]);
> +               unsigned int len, j;
> +
> +               for (j = i + 1; j < count; j++, next_pfn++)
> +                       if (page_to_pfn(pages[j]) != next_pfn)
> +                               break;
> +
> +               len = (j - i) << PAGE_SHIFT;
> +               ret = iommu_map(domain, iova, phys, len,
> +                               IOMMU_READ|IOMMU_WRITE);
> +               if (ret < 0)
> +                       goto fail;
> +               iova += len;
> +               i = j;
> +       }
> +       return dma_addr;
> +fail:
> +       iommu_unmap(domain, dma_addr, iova-dma_addr);
> +       return DMA_ERROR_CODE;
> +}
> +
> +void iommu_unmapper(struct iommu_domain *domain, dma_addr_t iova, size_t size)
> +{
> +       /*
> +        * add optional in-page offset from iova to size and align
> +        * result to page size
> +        */
> +       size = PAGE_ALIGN((iova & ~PAGE_MASK) + size);
> +       iova &= PAGE_MASK;
> +
> +       iommu_unmap(domain, iova, size);
> +}
> --
> 1.8.1.3

^ permalink raw reply	[flat|nested] 3+ messages in thread

* [PATCH 1/2] [RFC] arm: iommu: Refactoring common code
  2014-03-19  2:58 ` Ritesh Harjani
@ 2014-03-19  9:57   ` Catalin Marinas
  0 siblings, 0 replies; 3+ messages in thread
From: Catalin Marinas @ 2014-03-19  9:57 UTC (permalink / raw)
  To: linux-arm-kernel

On Wed, Mar 19, 2014 at 02:58:26AM +0000, Ritesh Harjani wrote:
> Some suggestions/comments on this to take this discussion forward ?

Just a bit of patience ;). It's likely that you won't get much feedback
before 3.15-rc1 as people are busy preparing for the merging window.

-- 
Catalin

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2014-03-19  9:57 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2014-03-13 20:23 [PATCH 1/2] [RFC] arm: iommu: Refactoring common code Ritesh Harjani
2014-03-19  2:58 ` Ritesh Harjani
2014-03-19  9:57   ` Catalin Marinas

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).