From: Leon Romanovsky <leon@kernel.org>
To: Marek Szyprowski <m.szyprowski@samsung.com>,
Robin Murphy <robin.murphy@arm.com>,
Russell King <linux@armlinux.org.uk>,
Juergen Gross <jgross@suse.com>,
Stefano Stabellini <sstabellini@kernel.org>,
Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>,
Richard Henderson <richard.henderson@linaro.org>,
Matt Turner <mattst88@gmail.com>,
Thomas Bogendoerfer <tsbogend@alpha.franken.de>,
"James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>,
Helge Deller <deller@gmx.de>,
Madhavan Srinivasan <maddy@linux.ibm.com>,
Michael Ellerman <mpe@ellerman.id.au>,
Nicholas Piggin <npiggin@gmail.com>,
Christophe Leroy <christophe.leroy@csgroup.eu>,
Geoff Levand <geoff@infradead.org>,
"David S. Miller" <davem@davemloft.net>,
Andreas Larsson <andreas@gaisler.com>,
Thomas Gleixner <tglx@linutronix.de>,
Ingo Molnar <mingo@redhat.com>, Borislav Petkov <bp@alien8.de>,
Dave Hansen <dave.hansen@linux.intel.com>,
x86@kernel.org, "H. Peter Anvin" <hpa@zytor.com>
Cc: iommu@lists.linux.dev, linux-kernel@vger.kernel.org,
linux-arm-kernel@lists.infradead.org,
xen-devel@lists.xenproject.org, linux-alpha@vger.kernel.org,
linux-mips@vger.kernel.org, linux-parisc@vger.kernel.org,
linuxppc-dev@lists.ozlabs.org, sparclinux@vger.kernel.org,
Jason Gunthorpe <jgg@ziepe.ca>, Jason Gunthorpe <jgg@nvidia.com>
Subject: [PATCH v5 04/14] ARM: dma-mapping: Switch to physical address mapping callbacks
Date: Wed, 15 Oct 2025 12:12:50 +0300 [thread overview]
Message-ID: <20251015-remove-map-page-v5-4-3bbfe3a25cdf@kernel.org> (raw)
In-Reply-To: <20251015-remove-map-page-v5-0-3bbfe3a25cdf@kernel.org>
From: Leon Romanovsky <leonro@nvidia.com>
Combine resource and page mappings routines to one function, which
handles both these flows at the same manner. This conversion allows
us to remove .map_resource/.unmap_resource callbacks completely.
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
arch/arm/mm/dma-mapping.c | 100 +++++++++++-----------------------------------
1 file changed, 23 insertions(+), 77 deletions(-)
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index b0310d6762d5..a4c765d24692 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -732,6 +732,9 @@ static int __dma_info_to_prot(enum dma_data_direction dir, unsigned long attrs)
if (attrs & DMA_ATTR_PRIVILEGED)
prot |= IOMMU_PRIV;
+ if (attrs & DMA_ATTR_MMIO)
+ prot |= IOMMU_MMIO;
+
switch (dir) {
case DMA_BIDIRECTIONAL:
return prot | IOMMU_READ | IOMMU_WRITE;
@@ -1350,25 +1353,27 @@ static void arm_iommu_sync_sg_for_device(struct device *dev,
}
/**
- * arm_iommu_map_page
+ * arm_iommu_map_phys
* @dev: valid struct device pointer
- * @page: page that buffer resides in
- * @offset: offset into page for start of buffer
+ * @phys: physical address that buffer resides in
* @size: size of buffer to map
* @dir: DMA transfer direction
+ * @attrs: DMA mapping attributes
*
* IOMMU aware version of arm_dma_map_page()
*/
-static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction dir,
- unsigned long attrs)
+static dma_addr_t arm_iommu_map_phys(struct device *dev, phys_addr_t phys,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
{
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
+ int len = PAGE_ALIGN(size + offset_in_page(phys));
+ phys_addr_t addr = phys & PAGE_MASK;
dma_addr_t dma_addr;
- int ret, prot, len = PAGE_ALIGN(size + offset);
+ int ret, prot;
- if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- arch_sync_dma_for_device(page_to_phys(page), offset, size, dir);
+ if (!dev->dma_coherent &&
+ !(attrs & (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_MMIO)))
+ arch_sync_dma_for_device(phys, size, dir);
dma_addr = __alloc_iova(mapping, len);
if (dma_addr == DMA_MAPPING_ERROR)
@@ -1376,12 +1381,11 @@ static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
prot = __dma_info_to_prot(dir, attrs);
- ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len,
- prot, GFP_KERNEL);
+ ret = iommu_map(mapping->domain, dma_addr, addr, len, prot, GFP_KERNEL);
if (ret < 0)
goto fail;
- return dma_addr + offset;
+ return dma_addr + offset_in_page(phys);
fail:
__free_iova(mapping, dma_addr, len);
return DMA_MAPPING_ERROR;
@@ -1393,10 +1397,11 @@ static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
* @handle: DMA address of buffer
* @size: size of buffer (same as passed to dma_map_page)
* @dir: DMA transfer direction (same as passed to dma_map_page)
+ * @attrs: DMA mapping attributes
*
- * IOMMU aware version of arm_dma_unmap_page()
+ * IOMMU aware version of arm_dma_unmap_phys()
*/
-static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
+static void arm_iommu_unmap_phys(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
@@ -1407,7 +1412,8 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
if (!iova)
return;
- if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
+ if (!dev->dma_coherent &&
+ !(attrs & (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_MMIO))) {
phys_addr_t phys = iommu_iova_to_phys(mapping->domain, iova);
arch_sync_dma_for_cpu(phys + offset, size, dir);
@@ -1417,63 +1423,6 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
__free_iova(mapping, iova, len);
}
-/**
- * arm_iommu_map_resource - map a device resource for DMA
- * @dev: valid struct device pointer
- * @phys_addr: physical address of resource
- * @size: size of resource to map
- * @dir: DMA transfer direction
- */
-static dma_addr_t arm_iommu_map_resource(struct device *dev,
- phys_addr_t phys_addr, size_t size,
- enum dma_data_direction dir, unsigned long attrs)
-{
- struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
- dma_addr_t dma_addr;
- int ret, prot;
- phys_addr_t addr = phys_addr & PAGE_MASK;
- unsigned int offset = phys_addr & ~PAGE_MASK;
- size_t len = PAGE_ALIGN(size + offset);
-
- dma_addr = __alloc_iova(mapping, len);
- if (dma_addr == DMA_MAPPING_ERROR)
- return dma_addr;
-
- prot = __dma_info_to_prot(dir, attrs) | IOMMU_MMIO;
-
- ret = iommu_map(mapping->domain, dma_addr, addr, len, prot, GFP_KERNEL);
- if (ret < 0)
- goto fail;
-
- return dma_addr + offset;
-fail:
- __free_iova(mapping, dma_addr, len);
- return DMA_MAPPING_ERROR;
-}
-
-/**
- * arm_iommu_unmap_resource - unmap a device DMA resource
- * @dev: valid struct device pointer
- * @dma_handle: DMA address to resource
- * @size: size of resource to map
- * @dir: DMA transfer direction
- */
-static void arm_iommu_unmap_resource(struct device *dev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs)
-{
- struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
- dma_addr_t iova = dma_handle & PAGE_MASK;
- unsigned int offset = dma_handle & ~PAGE_MASK;
- size_t len = PAGE_ALIGN(size + offset);
-
- if (!iova)
- return;
-
- iommu_unmap(mapping->domain, iova, len);
- __free_iova(mapping, iova, len);
-}
-
static void arm_iommu_sync_single_for_cpu(struct device *dev,
dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
@@ -1510,8 +1459,8 @@ static const struct dma_map_ops iommu_ops = {
.mmap = arm_iommu_mmap_attrs,
.get_sgtable = arm_iommu_get_sgtable,
- .map_page = arm_iommu_map_page,
- .unmap_page = arm_iommu_unmap_page,
+ .map_phys = arm_iommu_map_phys,
+ .unmap_phys = arm_iommu_unmap_phys,
.sync_single_for_cpu = arm_iommu_sync_single_for_cpu,
.sync_single_for_device = arm_iommu_sync_single_for_device,
@@ -1519,9 +1468,6 @@ static const struct dma_map_ops iommu_ops = {
.unmap_sg = arm_iommu_unmap_sg,
.sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu,
.sync_sg_for_device = arm_iommu_sync_sg_for_device,
-
- .map_resource = arm_iommu_map_resource,
- .unmap_resource = arm_iommu_unmap_resource,
};
/**
--
2.51.0
next prev parent reply other threads:[~2025-10-15 9:13 UTC|newest]
Thread overview: 18+ messages / expand[flat|nested] mbox.gz Atom feed top
[not found] <CGME20251015091313eucas1p2426ae40b579d7f3b4f29b46e0e788c0d@eucas1p2.samsung.com>
2025-10-15 9:12 ` [PATCH v5 00/14] Remove DMA map_page/map_resource and their unmap callbacks Leon Romanovsky
2025-10-15 9:12 ` [PATCH v5 01/14] dma-mapping: prepare dma_map_ops to conversion to physical address Leon Romanovsky
2025-10-15 9:12 ` [PATCH v5 02/14] dma-mapping: convert dummy ops to physical address mapping Leon Romanovsky
2025-10-15 9:12 ` [PATCH v5 03/14] ARM: dma-mapping: Reduce struct page exposure in arch_sync_dma*() Leon Romanovsky
2025-10-15 9:12 ` Leon Romanovsky [this message]
2025-10-15 9:12 ` [PATCH v5 05/14] xen: swiotlb: Switch to physical address mapping callbacks Leon Romanovsky
2025-10-15 9:12 ` [PATCH v5 06/14] dma-mapping: remove unused mapping resource callbacks Leon Romanovsky
2025-10-15 9:12 ` [PATCH v5 07/14] alpha: Convert mapping routine to rely on physical address Leon Romanovsky
2025-10-15 9:12 ` [PATCH v5 08/14] MIPS/jazzdma: Provide physical address directly Leon Romanovsky
2025-10-15 9:12 ` [PATCH v5 09/14] parisc: Convert DMA map_page to map_phys interface Leon Romanovsky
2025-10-15 9:12 ` [PATCH v5 10/14] powerpc: Convert to physical address DMA mapping Leon Romanovsky
2025-10-15 9:12 ` [PATCH v5 11/14] sparc: Use " Leon Romanovsky
2025-10-15 9:12 ` [PATCH v5 12/14] x86: Use physical address for " Leon Romanovsky
2025-10-15 9:12 ` [PATCH v5 13/14] xen: swiotlb: Convert mapping routine to rely on physical address Leon Romanovsky
2025-10-15 9:13 ` [PATCH v5 14/14] dma-mapping: remove unused map_page callback Leon Romanovsky
2025-10-20 16:27 ` [PATCH v5 00/14] Remove DMA map_page/map_resource and their unmap callbacks Marek Szyprowski
2025-10-26 7:38 ` Leon Romanovsky
2025-10-29 10:00 ` Marek Szyprowski
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251015-remove-map-page-v5-4-3bbfe3a25cdf@kernel.org \
--to=leon@kernel.org \
--cc=James.Bottomley@HansenPartnership.com \
--cc=andreas@gaisler.com \
--cc=bp@alien8.de \
--cc=christophe.leroy@csgroup.eu \
--cc=dave.hansen@linux.intel.com \
--cc=davem@davemloft.net \
--cc=deller@gmx.de \
--cc=geoff@infradead.org \
--cc=hpa@zytor.com \
--cc=iommu@lists.linux.dev \
--cc=jgg@nvidia.com \
--cc=jgg@ziepe.ca \
--cc=jgross@suse.com \
--cc=linux-alpha@vger.kernel.org \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mips@vger.kernel.org \
--cc=linux-parisc@vger.kernel.org \
--cc=linux@armlinux.org.uk \
--cc=linuxppc-dev@lists.ozlabs.org \
--cc=m.szyprowski@samsung.com \
--cc=maddy@linux.ibm.com \
--cc=mattst88@gmail.com \
--cc=mingo@redhat.com \
--cc=mpe@ellerman.id.au \
--cc=npiggin@gmail.com \
--cc=oleksandr_tyshchenko@epam.com \
--cc=richard.henderson@linaro.org \
--cc=robin.murphy@arm.com \
--cc=sparclinux@vger.kernel.org \
--cc=sstabellini@kernel.org \
--cc=tglx@linutronix.de \
--cc=tsbogend@alpha.franken.de \
--cc=x86@kernel.org \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).