From: Ajay Kumar <ajaykumar.rs@samsung.com>
To: linux-arm-kernel@lists.infradead.org,
iommu@lists.linux-foundation.org, joro@8bytes.org,
will@kernel.org, robin.murphy@arm.com
Cc: alim.akhtar@samsung.com, pankaj.dubey@samsung.com,
ajaykumar.rs1989@gmail.com,
Marek Szyprowski <m.szyprowski@samsung.com>,
Ajay Kumar <ajaykumar.rs@samsung.com>
Subject: [PATCH V2 5/6] iommu: dma-iommu: add support for DMA_ATTR_LOW_ADDRESS
Date: Wed, 11 May 2022 17:45:43 +0530 [thread overview]
Message-ID: <20220511121544.5998-6-ajaykumar.rs@samsung.com> (raw)
In-Reply-To: <20220511121544.5998-1-ajaykumar.rs@samsung.com>
From: Marek Szyprowski <m.szyprowski@samsung.com>
Implement support for the DMA_ATTR_LOW_ADDRESS DMA attribute. If it has
been set, call alloc_iova_first_fit() instead of the alloc_iova_fast() to
allocate the new IOVA from the beginning of the address space.
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Signed-off-by: Ajay Kumar <ajaykumar.rs@samsung.com>
---
drivers/iommu/dma-iommu.c | 50 +++++++++++++++++++++++++++++----------
1 file changed, 38 insertions(+), 12 deletions(-)
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index cb235b40303c..553c5b863e19 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -601,6 +601,18 @@ static int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
}
#define DMA_ALLOC_IOVA_COHERENT BIT(0)
+#define DMA_ALLOC_IOVA_FIRST_FIT BIT(1)
+
+static unsigned int dma_attrs_to_alloc_flags(unsigned long attrs, bool coherent)
+{
+ unsigned int flags = 0;
+
+ if (coherent)
+ flags |= DMA_ALLOC_IOVA_COHERENT;
+ if (attrs & DMA_ATTR_LOW_ADDRESS)
+ flags |= DMA_ALLOC_IOVA_FIRST_FIT;
+ return flags;
+}
static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
struct device *dev, size_t size, unsigned int flags)
@@ -625,13 +637,23 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end);
/* Try to get PCI devices a SAC address */
- if (dma_limit > DMA_BIT_MASK(32) && !iommu_dma_forcedac && dev_is_pci(dev))
- iova = alloc_iova_fast(iovad, iova_len,
- DMA_BIT_MASK(32) >> shift, false);
+ if (dma_limit > DMA_BIT_MASK(32) && !iommu_dma_forcedac && dev_is_pci(dev)) {
+ if (unlikely(flags & DMA_ALLOC_IOVA_FIRST_FIT))
+ iova = alloc_iova_first_fit(iovad, iova_len,
+ DMA_BIT_MASK(32) >> shift);
+ else
+ iova = alloc_iova_fast(iovad, iova_len,
+ DMA_BIT_MASK(32) >> shift, false);
+ }
- if (iova == IOVA_BAD_ADDR)
- iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift,
- true);
+ if (iova == IOVA_BAD_ADDR) {
+ if (unlikely(flags & DMA_ALLOC_IOVA_FIRST_FIT))
+ iova = alloc_iova_first_fit(iovad, iova_len,
+ dma_limit >> shift);
+ else
+ iova = alloc_iova_fast(iovad, iova_len,
+ dma_limit >> shift, true);
+ }
if (iova != IOVA_BAD_ADDR)
return (dma_addr_t)iova << shift;
@@ -779,6 +801,7 @@ static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev,
struct iova_domain *iovad = &cookie->iovad;
bool coherent = dev_is_dma_coherent(dev);
int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
+ unsigned int flags = dma_attrs_to_alloc_flags(attrs, true);
unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
struct page **pages;
dma_addr_t iova;
@@ -804,7 +827,7 @@ static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev,
return NULL;
size = iova_align(iovad, size);
- iova = iommu_dma_alloc_iova(domain, dev, size, DMA_ALLOC_IOVA_COHERENT);
+ iova = iommu_dma_alloc_iova(domain, dev, size, flags);
if (iova == DMA_MAPPING_ERROR)
goto out_free_pages;
@@ -964,6 +987,7 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
phys_addr_t phys = page_to_phys(page) + offset;
bool coherent = dev_is_dma_coherent(dev);
int prot = dma_info_to_prot(dir, coherent, attrs);
+ unsigned int flags = dma_attrs_to_alloc_flags(attrs, false);
struct iommu_domain *domain = iommu_get_dma_domain(dev);
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;
@@ -1005,7 +1029,7 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
arch_sync_dma_for_device(phys, size, dir);
- iova = __iommu_dma_map(dev, phys, size, prot, 0);
+ iova = __iommu_dma_map(dev, phys, size, prot, flags);
if (iova == DMA_MAPPING_ERROR && is_swiotlb_buffer(dev, phys))
swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
return iova;
@@ -1152,6 +1176,7 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
struct iova_domain *iovad = &cookie->iovad;
struct scatterlist *s, *prev = NULL;
int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs);
+ unsigned int flags = dma_attrs_to_alloc_flags(attrs, false);
dma_addr_t iova;
size_t iova_len = 0;
unsigned long mask = dma_get_seg_boundary(dev);
@@ -1209,7 +1234,7 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
prev = s;
}
- iova = iommu_dma_alloc_iova(domain, dev, iova_len, 0);
+ iova = iommu_dma_alloc_iova(domain, dev, iova_len, flags);
if (iova == DMA_MAPPING_ERROR) {
ret = -ENOMEM;
goto out_restore_sg;
@@ -1268,7 +1293,8 @@ static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
return __iommu_dma_map(dev, phys, size,
- dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO, 0);
+ dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO,
+ dma_attrs_to_alloc_flags(attrs, false));
}
static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
@@ -1357,6 +1383,7 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
{
bool coherent = dev_is_dma_coherent(dev);
int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
+ unsigned int flags = dma_attrs_to_alloc_flags(attrs, true);
struct page *page = NULL;
void *cpu_addr;
@@ -1377,8 +1404,7 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
if (!cpu_addr)
return NULL;
- *handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot,
- DMA_ALLOC_IOVA_COHERENT);
+ *handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot, flags);
if (*handle == DMA_MAPPING_ERROR) {
__iommu_dma_free(dev, size, cpu_addr);
return NULL;
--
2.17.1
_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
next prev parent reply other threads:[~2022-05-12 3:41 UTC|newest]
Thread overview: 13+ messages / expand[flat|nested] mbox.gz Atom feed top
[not found] <CGME20220511121425epcas5p256b55554b32dc58566827818a817ac44@epcas5p2.samsung.com>
2022-05-11 12:15 ` [PATCH V2 0/6] IOMMU-DMA - support DMA_ATTR_LOW_ADDRESS attribute Ajay Kumar
[not found] ` <CGME20220511121429epcas5p2d91f585a555e51b1c11e9e02c1b8dc15@epcas5p2.samsung.com>
2022-05-11 12:15 ` [PATCH V2 1/6] dma-mapping: add " Ajay Kumar
[not found] ` <CGME20220511121433epcas5p3de77375a85edf1aa78c0976a7107fdfa@epcas5p3.samsung.com>
2022-05-11 12:15 ` [PATCH V2 2/6] iommu: iova: properly handle 0 as a valid IOVA address Ajay Kumar
2022-05-23 17:30 ` Robin Murphy
2022-05-30 13:27 ` Ajay Kumar
2022-06-02 15:58 ` Marek Szyprowski
2022-06-06 12:38 ` Robin Murphy
2022-06-17 14:30 ` Marek Szyprowski
[not found] ` <CGME20220511121437epcas5p29d2210065b47346840c9c6ac14b0e585@epcas5p2.samsung.com>
2022-05-11 12:15 ` [PATCH V2 3/6] iommu: iova: add support for 'first-fit' algorithm Ajay Kumar
[not found] ` <CGME20220511121439epcas5p493bf4b94c89c8a63fdc0586c89cea8df@epcas5p4.samsung.com>
2022-05-11 12:15 ` [PATCH V2 4/6] iommu: dma-iommu: refactor iommu_dma_alloc_iova() Ajay Kumar
[not found] ` <CGME20220511121442epcas5p26a997a19e8cc1de8eb23123500fb24fb@epcas5p2.samsung.com>
2022-05-11 12:15 ` Ajay Kumar [this message]
[not found] ` <CGME20220511121445epcas5p377ef245c4f5a0bf282245877d2b922c8@epcas5p3.samsung.com>
2022-05-11 12:15 ` [PATCH V2 6/6] media: platform: s5p-mfc: use DMA_ATTR_LOW_ADDRESS Ajay Kumar
2022-05-23 15:54 ` [PATCH V2 0/6] IOMMU-DMA - support DMA_ATTR_LOW_ADDRESS attribute Ajay Kumar
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220511121544.5998-6-ajaykumar.rs@samsung.com \
--to=ajaykumar.rs@samsung.com \
--cc=ajaykumar.rs1989@gmail.com \
--cc=alim.akhtar@samsung.com \
--cc=iommu@lists.linux-foundation.org \
--cc=joro@8bytes.org \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=m.szyprowski@samsung.com \
--cc=pankaj.dubey@samsung.com \
--cc=robin.murphy@arm.com \
--cc=will@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).