From: "Aneesh Kumar K.V (Arm)" <aneesh.kumar@kernel.org>
To: iommu@lists.linux.dev, linux-arm-kernel@lists.infradead.org,
linux-kernel@vger.kernel.org, linux-coco@lists.linux.dev
Cc: "Aneesh Kumar K.V (Arm)" <aneesh.kumar@kernel.org>,
Robin Murphy <robin.murphy@arm.com>,
Marek Szyprowski <m.szyprowski@samsung.com>,
Will Deacon <will@kernel.org>, Marc Zyngier <maz@kernel.org>,
Steven Price <steven.price@arm.com>,
Suzuki K Poulose <Suzuki.Poulose@arm.com>,
Catalin Marinas <catalin.marinas@arm.com>,
Jiri Pirko <jiri@resnulli.us>, Jason Gunthorpe <jgg@ziepe.ca>,
Mostafa Saleh <smostafa@google.com>,
Petr Tesarik <ptesarik@suse.com>,
Alexey Kardashevskiy <aik@amd.com>,
Dan Williams <dan.j.williams@intel.com>,
Xu Yilun <yilun.xu@linux.intel.com>,
linuxppc-dev@lists.ozlabs.org, linux-s390@vger.kernel.org,
Madhavan Srinivasan <maddy@linux.ibm.com>,
Michael Ellerman <mpe@ellerman.id.au>,
Nicholas Piggin <npiggin@gmail.com>,
"Christophe Leroy (CS GROUP)" <chleroy@kernel.org>,
Alexander Gordeev <agordeev@linux.ibm.com>,
Gerald Schaefer <gerald.schaefer@linux.ibm.com>,
Heiko Carstens <hca@linux.ibm.com>,
Vasily Gorbik <gor@linux.ibm.com>,
Christian Borntraeger <borntraeger@linux.ibm.com>,
Sven Schnelle <svens@linux.ibm.com>,
x86@kernel.org
Subject: [PATCH v4 07/13] dma-direct: make dma_direct_map_phys() honor DMA_ATTR_CC_SHARED
Date: Tue, 12 May 2026 14:34:02 +0530 [thread overview]
Message-ID: <20260512090408.794195-8-aneesh.kumar@kernel.org> (raw)
In-Reply-To: <20260512090408.794195-1-aneesh.kumar@kernel.org>
Teach dma_direct_map_phys() to select the DMA address encoding based on
DMA_ATTR_CC_SHARED.
Use phys_to_dma_unencrypted() for decrypted mappings and
phys_to_dma_encrypted() otherwise. If a device requires unencrypted DMA
but the source physical address is still encrypted, force the mapping
through swiotlb so the DMA address and backing memory attributes remain
consistent.
Update the arm64, x86, s390 and powerpc secure-guest setup to not use
swiotlb force option
Signed-off-by: Aneesh Kumar K.V (Arm) <aneesh.kumar@kernel.org>
---
Changes from v3:
* Handle DMA_ATTR_MMIO
---
arch/arm64/mm/init.c | 4 +--
arch/powerpc/platforms/pseries/svm.c | 2 +-
arch/s390/mm/init.c | 2 +-
arch/x86/kernel/pci-dma.c | 4 +--
kernel/dma/direct.c | 4 ++-
kernel/dma/direct.h | 38 +++++++++++++---------------
6 files changed, 24 insertions(+), 30 deletions(-)
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 97987f850a33..acf67c7064db 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -338,10 +338,8 @@ void __init arch_mm_preinit(void)
unsigned int flags = SWIOTLB_VERBOSE;
bool swiotlb = max_pfn > PFN_DOWN(arm64_dma_phys_limit);
- if (is_realm_world()) {
+ if (is_realm_world())
swiotlb = true;
- flags |= SWIOTLB_FORCE;
- }
if (IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC) && !swiotlb) {
/*
diff --git a/arch/powerpc/platforms/pseries/svm.c b/arch/powerpc/platforms/pseries/svm.c
index 384c9dc1899a..7a403dbd35ee 100644
--- a/arch/powerpc/platforms/pseries/svm.c
+++ b/arch/powerpc/platforms/pseries/svm.c
@@ -29,7 +29,7 @@ static int __init init_svm(void)
* need to use the SWIOTLB buffer for DMA even if dma_capable() says
* otherwise.
*/
- ppc_swiotlb_flags |= SWIOTLB_ANY | SWIOTLB_FORCE;
+ ppc_swiotlb_flags |= SWIOTLB_ANY;
/* Share the SWIOTLB buffer with the host. */
swiotlb_update_mem_attributes();
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 1f72efc2a579..843dbd445124 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -149,7 +149,7 @@ static void __init pv_init(void)
virtio_set_mem_acc_cb(virtio_require_restricted_mem_acc);
/* make sure bounce buffers are shared */
- swiotlb_init(true, SWIOTLB_FORCE | SWIOTLB_VERBOSE);
+ swiotlb_init(true, SWIOTLB_VERBOSE);
swiotlb_update_mem_attributes();
}
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 6267363e0189..75cf8f6ae8cd 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -59,10 +59,8 @@ static void __init pci_swiotlb_detect(void)
* bounce buffers as the hypervisor can't access arbitrary VM memory
* that is not explicitly shared with it.
*/
- if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
+ if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
x86_swiotlb_enable = true;
- x86_swiotlb_flags |= SWIOTLB_FORCE;
- }
}
#else
static inline void __init pci_swiotlb_detect(void)
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index ac315dd046c4..5aaa813c5509 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -691,8 +691,10 @@ size_t dma_direct_max_mapping_size(struct device *dev)
{
/* If SWIOTLB is active, use its maximum mapping size */
if (is_swiotlb_active(dev) &&
- (dma_addressing_limited(dev) || is_swiotlb_force_bounce(dev)))
+ (dma_addressing_limited(dev) || is_swiotlb_force_bounce(dev) ||
+ force_dma_unencrypted(dev)))
return swiotlb_max_mapping_size(dev);
+
return SIZE_MAX;
}
diff --git a/kernel/dma/direct.h b/kernel/dma/direct.h
index e05dc7649366..4e35264ab6f8 100644
--- a/kernel/dma/direct.h
+++ b/kernel/dma/direct.h
@@ -89,36 +89,32 @@ static inline dma_addr_t dma_direct_map_phys(struct device *dev,
dma_addr_t dma_addr;
if (is_swiotlb_force_bounce(dev)) {
- if (!(attrs & DMA_ATTR_CC_SHARED)) {
- if (attrs & (DMA_ATTR_MMIO | DMA_ATTR_REQUIRE_COHERENT))
- return DMA_MAPPING_ERROR;
+ if (attrs & (DMA_ATTR_MMIO | DMA_ATTR_REQUIRE_COHERENT))
+ return DMA_MAPPING_ERROR;
- return swiotlb_map(dev, phys, size, dir, attrs);
- }
- } else if (attrs & DMA_ATTR_CC_SHARED) {
- return DMA_MAPPING_ERROR;
+ return swiotlb_map(dev, phys, size, dir, attrs);
}
- if (attrs & DMA_ATTR_MMIO) {
- dma_addr = phys;
- if (unlikely(!dma_capable(dev, dma_addr, size, false, attrs)))
- goto err_overflow;
- } else if (attrs & DMA_ATTR_CC_SHARED) {
+ if (attrs & DMA_ATTR_CC_SHARED)
dma_addr = phys_to_dma_unencrypted(dev, phys);
+ else
+ dma_addr = phys_to_dma_encrypted(dev, phys);
+
+ if (attrs & DMA_ATTR_MMIO) {
if (unlikely(!dma_capable(dev, dma_addr, size, false, attrs)))
goto err_overflow;
- } else {
- dma_addr = phys_to_dma(dev, phys);
- if (unlikely(!dma_capable(dev, dma_addr, size, true, attrs)) ||
- dma_kmalloc_needs_bounce(dev, size, dir)) {
- if (is_swiotlb_active(dev) &&
- !(attrs & DMA_ATTR_REQUIRE_COHERENT))
- return swiotlb_map(dev, phys, size, dir, attrs);
+ goto dma_mapped;
+ }
- goto err_overflow;
- }
+ if (unlikely(!dma_capable(dev, dma_addr, size, true, attrs)) ||
+ dma_kmalloc_needs_bounce(dev, size, dir)) {
+ if (is_swiotlb_active(dev) &&
+ !(attrs & DMA_ATTR_REQUIRE_COHERENT))
+ return swiotlb_map(dev, phys, size, dir, attrs);
+ goto err_overflow;
}
+dma_mapped:
if (!dev_is_dma_coherent(dev) &&
!(attrs & (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_MMIO))) {
arch_sync_dma_for_device(phys, size, dir);
--
2.43.0
next prev parent reply other threads:[~2026-05-12 9:05 UTC|newest]
Thread overview: 14+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-05-12 9:03 [PATCH v4 00/13] dma-mapping: Use DMA_ATTR_CC_SHARED through direct, pool and swiotlb paths Aneesh Kumar K.V (Arm)
2026-05-12 9:03 ` [PATCH v4 01/13] dma-direct: swiotlb: handle swiotlb alloc/free outside __dma_direct_alloc_pages Aneesh Kumar K.V (Arm)
2026-05-12 9:03 ` [PATCH v4 02/13] dma-direct: use DMA_ATTR_CC_SHARED in alloc/free paths Aneesh Kumar K.V (Arm)
2026-05-12 9:03 ` [PATCH v4 03/13] dma-pool: track decrypted atomic pools and select them via attrs Aneesh Kumar K.V (Arm)
2026-05-12 9:03 ` [PATCH v4 04/13] dma: swiotlb: track pool encryption state and honor DMA_ATTR_CC_SHARED Aneesh Kumar K.V (Arm)
2026-05-12 9:04 ` [PATCH v4 05/13] dma-mapping: make dma_pgprot() " Aneesh Kumar K.V (Arm)
2026-05-12 9:04 ` [PATCH v4 06/13] dma-direct: pass attrs to dma_capable() for DMA_ATTR_CC_SHARED checks Aneesh Kumar K.V (Arm)
2026-05-12 9:04 ` Aneesh Kumar K.V (Arm) [this message]
2026-05-12 9:04 ` [PATCH v4 08/13] dma-direct: set decrypted flag for remapped DMA allocations Aneesh Kumar K.V (Arm)
2026-05-12 9:04 ` [PATCH v4 09/13] dma-direct: select DMA address encoding from DMA_ATTR_CC_SHARED Aneesh Kumar K.V (Arm)
2026-05-12 9:04 ` [PATCH v4 10/13] dma-pool: fix page leak in atomic_pool_expand() cleanup Aneesh Kumar K.V (Arm)
2026-05-12 9:04 ` [PATCH v4 11/13] dma-direct: rename ret to cpu_addr in alloc helpers Aneesh Kumar K.V (Arm)
2026-05-12 9:04 ` [PATCH v4 12/13] dma-direct: return struct page from dma_direct_alloc_from_pool() Aneesh Kumar K.V (Arm)
2026-05-12 9:04 ` [PATCH v4 13/13] x86/amd-gart: preserve the direct DMA address until GART mapping succeeds Aneesh Kumar K.V (Arm)
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260512090408.794195-8-aneesh.kumar@kernel.org \
--to=aneesh.kumar@kernel.org \
--cc=Suzuki.Poulose@arm.com \
--cc=agordeev@linux.ibm.com \
--cc=aik@amd.com \
--cc=borntraeger@linux.ibm.com \
--cc=catalin.marinas@arm.com \
--cc=chleroy@kernel.org \
--cc=dan.j.williams@intel.com \
--cc=gerald.schaefer@linux.ibm.com \
--cc=gor@linux.ibm.com \
--cc=hca@linux.ibm.com \
--cc=iommu@lists.linux.dev \
--cc=jgg@ziepe.ca \
--cc=jiri@resnulli.us \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-coco@lists.linux.dev \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-s390@vger.kernel.org \
--cc=linuxppc-dev@lists.ozlabs.org \
--cc=m.szyprowski@samsung.com \
--cc=maddy@linux.ibm.com \
--cc=maz@kernel.org \
--cc=mpe@ellerman.id.au \
--cc=npiggin@gmail.com \
--cc=ptesarik@suse.com \
--cc=robin.murphy@arm.com \
--cc=smostafa@google.com \
--cc=steven.price@arm.com \
--cc=svens@linux.ibm.com \
--cc=will@kernel.org \
--cc=x86@kernel.org \
--cc=yilun.xu@linux.intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox