From: Leon Romanovsky <leon@kernel.org>
To: Marek Szyprowski <m.szyprowski@samsung.com>
Cc: Leon Romanovsky <leonro@nvidia.com>,
Jason Gunthorpe <jgg@nvidia.com>,
Andreas Larsson <andreas@gaisler.com>,
Borislav Petkov <bp@alien8.de>,
Dave Hansen <dave.hansen@linux.intel.com>,
"David S. Miller" <davem@davemloft.net>,
Geoff Levand <geoff@infradead.org>, Helge Deller <deller@gmx.de>,
Ingo Molnar <mingo@redhat.com>,
iommu@lists.linux.dev,
"James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>,
Jason Wang <jasowang@redhat.com>, Juergen Gross <jgross@suse.com>,
linux-alpha@vger.kernel.org, linux-kernel@vger.kernel.org,
linux-mips@vger.kernel.org, linux-parisc@vger.kernel.org,
linuxppc-dev@lists.ozlabs.org,
Madhavan Srinivasan <maddy@linux.ibm.com>,
Matt Turner <mattst88@gmail.com>,
Michael Ellerman <mpe@ellerman.id.au>,
"Michael S. Tsirkin" <mst@redhat.com>,
Richard Henderson <richard.henderson@linaro.org>,
sparclinux@vger.kernel.org,
Stefano Stabellini <sstabellini@kernel.org>,
Thomas Bogendoerfer <tsbogend@alpha.franken.de>,
Thomas Gleixner <tglx@linutronix.de>,
virtualization@lists.linux.dev, x86@kernel.org,
xen-devel@lists.xenproject.org,
Magnus Lindholm <linmag7@gmail.com>
Subject: [PATCH v1 7/9] vdpa: Convert to physical address DMA mapping
Date: Sun, 28 Sep 2025 18:02:27 +0300 [thread overview]
Message-ID: <fafaec3eb3830aa726b86ac7b145763c8be25a8a.1759071169.git.leon@kernel.org> (raw)
In-Reply-To: <cover.1759071169.git.leon@kernel.org>
From: Leon Romanovsky <leonro@nvidia.com>
Use physical address directly in DMA mapping flow.
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
drivers/vdpa/vdpa_user/iova_domain.c | 11 +++++------
drivers/vdpa/vdpa_user/iova_domain.h | 8 ++++----
drivers/vdpa/vdpa_user/vduse_dev.c | 18 ++++++++++--------
3 files changed, 19 insertions(+), 18 deletions(-)
diff --git a/drivers/vdpa/vdpa_user/iova_domain.c b/drivers/vdpa/vdpa_user/iova_domain.c
index 58116f89d8da..c0ecf01003cd 100644
--- a/drivers/vdpa/vdpa_user/iova_domain.c
+++ b/drivers/vdpa/vdpa_user/iova_domain.c
@@ -396,17 +396,16 @@ void vduse_domain_sync_single_for_cpu(struct vduse_iova_domain *domain,
read_unlock(&domain->bounce_lock);
}
-dma_addr_t vduse_domain_map_page(struct vduse_iova_domain *domain,
- struct page *page, unsigned long offset,
- size_t size, enum dma_data_direction dir,
+dma_addr_t vduse_domain_map_phys(struct vduse_iova_domain *domain,
+ phys_addr_t pa, size_t size,
+ enum dma_data_direction dir,
unsigned long attrs)
{
struct iova_domain *iovad = &domain->stream_iovad;
unsigned long limit = domain->bounce_size - 1;
- phys_addr_t pa = page_to_phys(page) + offset;
dma_addr_t iova = vduse_domain_alloc_iova(iovad, size, limit);
- if (!iova)
+ if (!iova || (attrs & DMA_ATTR_MMIO))
return DMA_MAPPING_ERROR;
if (vduse_domain_init_bounce_map(domain))
@@ -430,7 +429,7 @@ dma_addr_t vduse_domain_map_page(struct vduse_iova_domain *domain,
return DMA_MAPPING_ERROR;
}
-void vduse_domain_unmap_page(struct vduse_iova_domain *domain,
+void vduse_domain_unmap_phys(struct vduse_iova_domain *domain,
dma_addr_t dma_addr, size_t size,
enum dma_data_direction dir, unsigned long attrs)
{
diff --git a/drivers/vdpa/vdpa_user/iova_domain.h b/drivers/vdpa/vdpa_user/iova_domain.h
index 7f3f0928ec78..7c4546fd856a 100644
--- a/drivers/vdpa/vdpa_user/iova_domain.h
+++ b/drivers/vdpa/vdpa_user/iova_domain.h
@@ -53,12 +53,12 @@ void vduse_domain_sync_single_for_cpu(struct vduse_iova_domain *domain,
dma_addr_t dma_addr, size_t size,
enum dma_data_direction dir);
-dma_addr_t vduse_domain_map_page(struct vduse_iova_domain *domain,
- struct page *page, unsigned long offset,
- size_t size, enum dma_data_direction dir,
+dma_addr_t vduse_domain_map_phys(struct vduse_iova_domain *domain,
+ phys_addr_t phys, size_t size,
+ enum dma_data_direction dir,
unsigned long attrs);
-void vduse_domain_unmap_page(struct vduse_iova_domain *domain,
+void vduse_domain_unmap_phys(struct vduse_iova_domain *domain,
dma_addr_t dma_addr, size_t size,
enum dma_data_direction dir, unsigned long attrs);
diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
index 04620bb77203..75aa3c9f83fb 100644
--- a/drivers/vdpa/vdpa_user/vduse_dev.c
+++ b/drivers/vdpa/vdpa_user/vduse_dev.c
@@ -834,25 +834,27 @@ static void vduse_dev_sync_single_for_cpu(struct device *dev,
vduse_domain_sync_single_for_cpu(domain, dma_addr, size, dir);
}
-static dma_addr_t vduse_dev_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir,
+static dma_addr_t vduse_dev_map_phys(struct device *dev, phys_addr_t phys,
+ size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
struct vduse_dev *vdev = dev_to_vduse(dev);
struct vduse_iova_domain *domain = vdev->domain;
- return vduse_domain_map_page(domain, page, offset, size, dir, attrs);
+ if (attrs & DMA_ATTR_MMIO)
+ return DMA_MAPPING_ERROR;
+
+ return vduse_domain_map_phys(domain, phys, size, dir, attrs);
}
-static void vduse_dev_unmap_page(struct device *dev, dma_addr_t dma_addr,
+static void vduse_dev_unmap_phys(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
struct vduse_dev *vdev = dev_to_vduse(dev);
struct vduse_iova_domain *domain = vdev->domain;
- return vduse_domain_unmap_page(domain, dma_addr, size, dir, attrs);
+ return vduse_domain_unmap_phys(domain, dma_addr, size, dir, attrs);
}
static void *vduse_dev_alloc_coherent(struct device *dev, size_t size,
@@ -896,8 +898,8 @@ static size_t vduse_dev_max_mapping_size(struct device *dev)
static const struct dma_map_ops vduse_dev_dma_ops = {
.sync_single_for_device = vduse_dev_sync_single_for_device,
.sync_single_for_cpu = vduse_dev_sync_single_for_cpu,
- .map_page = vduse_dev_map_page,
- .unmap_page = vduse_dev_unmap_page,
+ .map_phys = vduse_dev_map_phys,
+ .unmap_phys = vduse_dev_unmap_phys,
.alloc = vduse_dev_alloc_coherent,
.free = vduse_dev_free_coherent,
.max_mapping_size = vduse_dev_max_mapping_size,
--
2.51.0
next prev parent reply other threads:[~2025-09-28 15:03 UTC|newest]
Thread overview: 34+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-09-28 15:02 [PATCH v1 0/9] Remove DMA .map_page and .unmap_page callbacks Leon Romanovsky
2025-09-28 15:02 ` [PATCH v1 1/9] alpha: Convert mapping routine to rely on physical address Leon Romanovsky
2025-09-28 17:35 ` Magnus Lindholm
2025-10-03 14:47 ` Jason Gunthorpe
2025-09-28 15:02 ` [PATCH v1 2/9] MIPS/jazzdma: Provide physical address directly Leon Romanovsky
2025-10-03 14:48 ` Jason Gunthorpe
2025-09-28 15:02 ` [PATCH v1 3/9] parisc: Convert DMA map_page to map_phys interface Leon Romanovsky
2025-10-03 15:01 ` Jason Gunthorpe
2025-10-03 17:18 ` John David Anglin
2025-10-03 17:26 ` Jason Gunthorpe
2025-10-03 20:28 ` John David Anglin
2025-10-05 14:29 ` James Bottomley
2025-10-05 13:22 ` Leon Romanovsky
2025-10-05 23:31 ` Jason Gunthorpe
2025-10-06 4:03 ` Leon Romanovsky
2025-10-05 14:22 ` James Bottomley
2025-09-28 15:02 ` [PATCH v1 4/9] powerpc: Convert to physical address DMA mapping Leon Romanovsky
2025-10-03 16:35 ` Jason Gunthorpe
2025-10-04 6:19 ` Christophe Leroy
2025-10-04 20:02 ` Jason Gunthorpe
2025-09-28 15:02 ` [PATCH v1 5/9] sparc64: Use " Leon Romanovsky
2025-10-03 15:16 ` Jason Gunthorpe
2025-09-28 15:02 ` [PATCH v1 6/9] x86: Use physical address for " Leon Romanovsky
2025-10-03 15:16 ` Jason Gunthorpe
2025-09-28 15:02 ` Leon Romanovsky [this message]
2025-10-03 15:58 ` [PATCH v1 7/9] vdpa: Convert to physical address " Jason Gunthorpe
2025-09-28 15:02 ` [PATCH v1 8/9] xen: swiotlb: Convert mapping routine to rely on physical address Leon Romanovsky
2025-10-03 16:18 ` Jason Gunthorpe
2025-09-28 15:02 ` [PATCH v1 9/9] dma-mapping: remove unused map_page callback Leon Romanovsky
2025-09-28 15:17 ` Sam Ravnborg
2025-09-28 15:20 ` Sam Ravnborg
2025-09-28 15:31 ` Leon Romanovsky
2025-09-28 15:28 ` Leon Romanovsky
2025-10-03 16:18 ` Jason Gunthorpe
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=fafaec3eb3830aa726b86ac7b145763c8be25a8a.1759071169.git.leon@kernel.org \
--to=leon@kernel.org \
--cc=James.Bottomley@HansenPartnership.com \
--cc=andreas@gaisler.com \
--cc=bp@alien8.de \
--cc=dave.hansen@linux.intel.com \
--cc=davem@davemloft.net \
--cc=deller@gmx.de \
--cc=geoff@infradead.org \
--cc=iommu@lists.linux.dev \
--cc=jasowang@redhat.com \
--cc=jgg@nvidia.com \
--cc=jgross@suse.com \
--cc=leonro@nvidia.com \
--cc=linmag7@gmail.com \
--cc=linux-alpha@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mips@vger.kernel.org \
--cc=linux-parisc@vger.kernel.org \
--cc=linuxppc-dev@lists.ozlabs.org \
--cc=m.szyprowski@samsung.com \
--cc=maddy@linux.ibm.com \
--cc=mattst88@gmail.com \
--cc=mingo@redhat.com \
--cc=mpe@ellerman.id.au \
--cc=mst@redhat.com \
--cc=richard.henderson@linaro.org \
--cc=sparclinux@vger.kernel.org \
--cc=sstabellini@kernel.org \
--cc=tglx@linutronix.de \
--cc=tsbogend@alpha.franken.de \
--cc=virtualization@lists.linux.dev \
--cc=x86@kernel.org \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).