From: Leon Romanovsky <leon@kernel.org>
To: Marek Szyprowski <m.szyprowski@samsung.com>,
Robin Murphy <robin.murphy@arm.com>,
Russell King <linux@armlinux.org.uk>,
Juergen Gross <jgross@suse.com>,
Stefano Stabellini <sstabellini@kernel.org>,
Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>,
Richard Henderson <richard.henderson@linaro.org>,
Matt Turner <mattst88@gmail.com>,
Thomas Bogendoerfer <tsbogend@alpha.franken.de>,
"James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>,
Helge Deller <deller@gmx.de>,
Madhavan Srinivasan <maddy@linux.ibm.com>,
Michael Ellerman <mpe@ellerman.id.au>,
Nicholas Piggin <npiggin@gmail.com>,
Christophe Leroy <christophe.leroy@csgroup.eu>,
Geoff Levand <geoff@infradead.org>,
"David S. Miller" <davem@davemloft.net>,
Andreas Larsson <andreas@gaisler.com>,
Thomas Gleixner <tglx@linutronix.de>,
Ingo Molnar <mingo@redhat.com>, Borislav Petkov <bp@alien8.de>,
Dave Hansen <dave.hansen@linux.intel.com>,
x86@kernel.org, "H. Peter Anvin" <hpa@zytor.com>
Cc: iommu@lists.linux.dev, linux-kernel@vger.kernel.org,
linux-arm-kernel@lists.infradead.org,
xen-devel@lists.xenproject.org, linux-alpha@vger.kernel.org,
linux-mips@vger.kernel.org, linux-parisc@vger.kernel.org,
linuxppc-dev@lists.ozlabs.org, sparclinux@vger.kernel.org,
Jason Gunthorpe <jgg@ziepe.ca>, Jason Gunthorpe <jgg@nvidia.com>
Subject: [PATCH v5 05/14] xen: swiotlb: Switch to physical address mapping callbacks
Date: Wed, 15 Oct 2025 12:12:51 +0300 [thread overview]
Message-ID: <20251015-remove-map-page-v5-5-3bbfe3a25cdf@kernel.org> (raw)
In-Reply-To: <20251015-remove-map-page-v5-0-3bbfe3a25cdf@kernel.org>
From: Leon Romanovsky <leonro@nvidia.com>
Combine resource and page mappings routines to one function
and remove .map_resource/.unmap_resource callbacks completely.
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
drivers/xen/swiotlb-xen.c | 63 ++++++++++++++++++++++-------------------------
1 file changed, 29 insertions(+), 34 deletions(-)
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index dd7747a2de87..ccf25027bec1 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -200,17 +200,32 @@ xen_swiotlb_free_coherent(struct device *dev, size_t size, void *vaddr,
* physical address to use is returned.
*
* Once the device is given the dma address, the device owns this memory until
- * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed.
+ * either xen_swiotlb_unmap_phys or xen_swiotlb_dma_sync_single is performed.
*/
-static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir,
+static dma_addr_t xen_swiotlb_map_phys(struct device *dev, phys_addr_t phys,
+ size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
- phys_addr_t map, phys = page_to_phys(page) + offset;
- dma_addr_t dev_addr = xen_phys_to_dma(dev, phys);
+ dma_addr_t dev_addr;
+ phys_addr_t map;
BUG_ON(dir == DMA_NONE);
+
+ if (attrs & DMA_ATTR_MMIO) {
+ if (unlikely(!dma_capable(dev, phys, size, false))) {
+ dev_err_once(
+ dev,
+ "DMA addr %pa+%zu overflow (mask %llx, bus limit %llx).\n",
+ &phys, size, *dev->dma_mask,
+ dev->bus_dma_limit);
+ WARN_ON_ONCE(1);
+ return DMA_MAPPING_ERROR;
+ }
+ return phys;
+ }
+
+ dev_addr = xen_phys_to_dma(dev, phys);
+
/*
* If the address happens to be in the device's DMA window,
* we can safely return the device addr and not worry about bounce
@@ -257,13 +272,13 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
/*
* Unmap a single streaming mode DMA translation. The dma_addr and size must
- * match what was provided for in a previous xen_swiotlb_map_page call. All
+ * match what was provided for in a previous xen_swiotlb_map_phys call. All
* other usages are undefined.
*
* After this call, reads by the cpu to the buffer are guaranteed to see
* whatever the device wrote there.
*/
-static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
+static void xen_swiotlb_unmap_phys(struct device *hwdev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
phys_addr_t paddr = xen_dma_to_phys(hwdev, dev_addr);
@@ -325,7 +340,7 @@ xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr,
/*
* Unmap a set of streaming mode DMA translations. Again, cpu read rules
- * concerning calls here are the same as for swiotlb_unmap_page() above.
+ * concerning calls here are the same as for swiotlb_unmap_phys() above.
*/
static void
xen_swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
@@ -337,7 +352,7 @@ xen_swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
BUG_ON(dir == DMA_NONE);
for_each_sg(sgl, sg, nelems, i)
- xen_swiotlb_unmap_page(hwdev, sg->dma_address, sg_dma_len(sg),
+ xen_swiotlb_unmap_phys(hwdev, sg->dma_address, sg_dma_len(sg),
dir, attrs);
}
@@ -352,8 +367,8 @@ xen_swiotlb_map_sg(struct device *dev, struct scatterlist *sgl, int nelems,
BUG_ON(dir == DMA_NONE);
for_each_sg(sgl, sg, nelems, i) {
- sg->dma_address = xen_swiotlb_map_page(dev, sg_page(sg),
- sg->offset, sg->length, dir, attrs);
+ sg->dma_address = xen_swiotlb_map_phys(dev, sg_phys(sg),
+ sg->length, dir, attrs);
if (sg->dma_address == DMA_MAPPING_ERROR)
goto out_unmap;
sg_dma_len(sg) = sg->length;
@@ -392,25 +407,6 @@ xen_swiotlb_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
}
}
-static dma_addr_t xen_swiotlb_direct_map_resource(struct device *dev,
- phys_addr_t paddr,
- size_t size,
- enum dma_data_direction dir,
- unsigned long attrs)
-{
- dma_addr_t dma_addr = paddr;
-
- if (unlikely(!dma_capable(dev, dma_addr, size, false))) {
- dev_err_once(dev,
- "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
- &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
- WARN_ON_ONCE(1);
- return DMA_MAPPING_ERROR;
- }
-
- return dma_addr;
-}
-
/*
* Return whether the given device DMA address mask can be supported
* properly. For example, if your device can only drive the low 24-bits
@@ -437,13 +433,12 @@ const struct dma_map_ops xen_swiotlb_dma_ops = {
.sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
.map_sg = xen_swiotlb_map_sg,
.unmap_sg = xen_swiotlb_unmap_sg,
- .map_page = xen_swiotlb_map_page,
- .unmap_page = xen_swiotlb_unmap_page,
+ .map_phys = xen_swiotlb_map_phys,
+ .unmap_phys = xen_swiotlb_unmap_phys,
.dma_supported = xen_swiotlb_dma_supported,
.mmap = dma_common_mmap,
.get_sgtable = dma_common_get_sgtable,
.alloc_pages_op = dma_common_alloc_pages,
.free_pages = dma_common_free_pages,
.max_mapping_size = swiotlb_max_mapping_size,
- .map_resource = xen_swiotlb_direct_map_resource,
};
--
2.51.0
next prev parent reply other threads:[~2025-10-15 9:13 UTC|newest]
Thread overview: 18+ messages / expand[flat|nested] mbox.gz Atom feed top
[not found] <CGME20251015091313eucas1p2426ae40b579d7f3b4f29b46e0e788c0d@eucas1p2.samsung.com>
2025-10-15 9:12 ` [PATCH v5 00/14] Remove DMA map_page/map_resource and their unmap callbacks Leon Romanovsky
2025-10-15 9:12 ` [PATCH v5 01/14] dma-mapping: prepare dma_map_ops to conversion to physical address Leon Romanovsky
2025-10-15 9:12 ` [PATCH v5 02/14] dma-mapping: convert dummy ops to physical address mapping Leon Romanovsky
2025-10-15 9:12 ` [PATCH v5 03/14] ARM: dma-mapping: Reduce struct page exposure in arch_sync_dma*() Leon Romanovsky
2025-10-15 9:12 ` [PATCH v5 04/14] ARM: dma-mapping: Switch to physical address mapping callbacks Leon Romanovsky
2025-10-15 9:12 ` Leon Romanovsky [this message]
2025-10-15 9:12 ` [PATCH v5 06/14] dma-mapping: remove unused mapping resource callbacks Leon Romanovsky
2025-10-15 9:12 ` [PATCH v5 07/14] alpha: Convert mapping routine to rely on physical address Leon Romanovsky
2025-10-15 9:12 ` [PATCH v5 08/14] MIPS/jazzdma: Provide physical address directly Leon Romanovsky
2025-10-15 9:12 ` [PATCH v5 09/14] parisc: Convert DMA map_page to map_phys interface Leon Romanovsky
2025-10-15 9:12 ` [PATCH v5 10/14] powerpc: Convert to physical address DMA mapping Leon Romanovsky
2025-10-15 9:12 ` [PATCH v5 11/14] sparc: Use " Leon Romanovsky
2025-10-15 9:12 ` [PATCH v5 12/14] x86: Use physical address for " Leon Romanovsky
2025-10-15 9:12 ` [PATCH v5 13/14] xen: swiotlb: Convert mapping routine to rely on physical address Leon Romanovsky
2025-10-15 9:13 ` [PATCH v5 14/14] dma-mapping: remove unused map_page callback Leon Romanovsky
2025-10-20 16:27 ` [PATCH v5 00/14] Remove DMA map_page/map_resource and their unmap callbacks Marek Szyprowski
2025-10-26 7:38 ` Leon Romanovsky
2025-10-29 10:00 ` Marek Szyprowski
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251015-remove-map-page-v5-5-3bbfe3a25cdf@kernel.org \
--to=leon@kernel.org \
--cc=James.Bottomley@HansenPartnership.com \
--cc=andreas@gaisler.com \
--cc=bp@alien8.de \
--cc=christophe.leroy@csgroup.eu \
--cc=dave.hansen@linux.intel.com \
--cc=davem@davemloft.net \
--cc=deller@gmx.de \
--cc=geoff@infradead.org \
--cc=hpa@zytor.com \
--cc=iommu@lists.linux.dev \
--cc=jgg@nvidia.com \
--cc=jgg@ziepe.ca \
--cc=jgross@suse.com \
--cc=linux-alpha@vger.kernel.org \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mips@vger.kernel.org \
--cc=linux-parisc@vger.kernel.org \
--cc=linux@armlinux.org.uk \
--cc=linuxppc-dev@lists.ozlabs.org \
--cc=m.szyprowski@samsung.com \
--cc=maddy@linux.ibm.com \
--cc=mattst88@gmail.com \
--cc=mingo@redhat.com \
--cc=mpe@ellerman.id.au \
--cc=npiggin@gmail.com \
--cc=oleksandr_tyshchenko@epam.com \
--cc=richard.henderson@linaro.org \
--cc=robin.murphy@arm.com \
--cc=sparclinux@vger.kernel.org \
--cc=sstabellini@kernel.org \
--cc=tglx@linutronix.de \
--cc=tsbogend@alpha.franken.de \
--cc=x86@kernel.org \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).