From: "Aneesh Kumar K.V (Arm)" <aneesh.kumar@kernel.org>
To: iommu@lists.linux.dev, linux-arm-kernel@lists.infradead.org,
linux-kernel@vger.kernel.org, linux-coco@lists.linux.dev
Cc: "Aneesh Kumar K.V (Arm)" <aneesh.kumar@kernel.org>,
Robin Murphy <robin.murphy@arm.com>,
Marek Szyprowski <m.szyprowski@samsung.com>,
Will Deacon <will@kernel.org>, Marc Zyngier <maz@kernel.org>,
Steven Price <steven.price@arm.com>,
Suzuki K Poulose <Suzuki.Poulose@arm.com>,
Catalin Marinas <catalin.marinas@arm.com>,
Jiri Pirko <jiri@resnulli.us>, Jason Gunthorpe <jgg@ziepe.ca>,
Mostafa Saleh <smostafa@google.com>,
Petr Tesarik <ptesarik@suse.com>,
Alexey Kardashevskiy <aik@amd.com>,
Dan Williams <dan.j.williams@intel.com>,
Xu Yilun <yilun.xu@linux.intel.com>,
linuxppc-dev@lists.ozlabs.org, linux-s390@vger.kernel.org,
Madhavan Srinivasan <maddy@linux.ibm.com>,
Michael Ellerman <mpe@ellerman.id.au>,
Nicholas Piggin <npiggin@gmail.com>,
"Christophe Leroy (CS GROUP)" <chleroy@kernel.org>,
Alexander Gordeev <agordeev@linux.ibm.com>,
Gerald Schaefer <gerald.schaefer@linux.ibm.com>,
Heiko Carstens <hca@linux.ibm.com>,
Vasily Gorbik <gor@linux.ibm.com>,
Christian Borntraeger <borntraeger@linux.ibm.com>,
Sven Schnelle <svens@linux.ibm.com>,
x86@kernel.org
Subject: [PATCH v4 06/13] dma-direct: pass attrs to dma_capable() for DMA_ATTR_CC_SHARED checks
Date: Tue, 12 May 2026 14:34:01 +0530 [thread overview]
Message-ID: <20260512090408.794195-7-aneesh.kumar@kernel.org> (raw)
In-Reply-To: <20260512090408.794195-1-aneesh.kumar@kernel.org>
Teach dma_capable() about DMA_ATTR_CC_SHARED so the capability
check can reject encrypted DMA addresses for devices that require
unencrypted/shared DMA.
Also propagate DMA_ATTR_CC_SHARED in swiotlb_map() when the selected
SWIOTLB pool is decrypted so the capability check sees the correct DMA
address attribute.
Signed-off-by: Aneesh Kumar K.V (Arm) <aneesh.kumar@kernel.org>
---
arch/x86/kernel/amd_gart_64.c | 30 ++++++++++++++++--------------
drivers/xen/swiotlb-xen.c | 10 +++++++---
include/linux/dma-direct.h | 9 ++++++++-
kernel/dma/direct.h | 6 +++---
kernel/dma/swiotlb.c | 8 +++++---
5 files changed, 39 insertions(+), 24 deletions(-)
diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c
index e8000a56732e..b5f1f031d45b 100644
--- a/arch/x86/kernel/amd_gart_64.c
+++ b/arch/x86/kernel/amd_gart_64.c
@@ -180,22 +180,23 @@ static void iommu_full(struct device *dev, size_t size, int dir)
}
static inline int
-need_iommu(struct device *dev, unsigned long addr, size_t size)
+need_iommu(struct device *dev, unsigned long addr, size_t size, unsigned long attrs)
{
- return force_iommu || !dma_capable(dev, addr, size, true);
+ return force_iommu || !dma_capable(dev, addr, size, true, attrs);
}
static inline int
-nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
+nonforced_iommu(struct device *dev, unsigned long addr, size_t size,
+ unsigned long attrs)
{
- return !dma_capable(dev, addr, size, true);
+ return !dma_capable(dev, addr, size, true, attrs);
}
/* Map a single continuous physical area into the IOMMU.
* Caller needs to check if the iommu is needed and flush.
*/
static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
- size_t size, int dir, unsigned long align_mask)
+ size_t size, int dir, unsigned long align_mask, unsigned long attrs)
{
unsigned long npages = iommu_num_pages(phys_mem, size, PAGE_SIZE);
unsigned long iommu_page;
@@ -206,7 +207,7 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
iommu_page = alloc_iommu(dev, npages, align_mask);
if (iommu_page == -1) {
- if (!nonforced_iommu(dev, phys_mem, size))
+ if (!nonforced_iommu(dev, phys_mem, size, attrs))
return phys_mem;
if (panic_on_overflow)
panic("dma_map_area overflow %lu bytes\n", size);
@@ -231,10 +232,10 @@ static dma_addr_t gart_map_phys(struct device *dev, phys_addr_t paddr,
if (unlikely(attrs & DMA_ATTR_MMIO))
return DMA_MAPPING_ERROR;
- if (!need_iommu(dev, paddr, size))
+ if (!need_iommu(dev, paddr, size, attrs))
return paddr;
- bus = dma_map_area(dev, paddr, size, dir, 0);
+ bus = dma_map_area(dev, paddr, size, dir, 0, attrs);
flush_gart();
return bus;
@@ -289,7 +290,7 @@ static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
/* Fallback for dma_map_sg in case of overflow */
static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
- int nents, int dir)
+ int nents, int dir, unsigned long attrs)
{
struct scatterlist *s;
int i;
@@ -301,8 +302,8 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
for_each_sg(sg, s, nents, i) {
unsigned long addr = sg_phys(s);
- if (nonforced_iommu(dev, addr, s->length)) {
- addr = dma_map_area(dev, addr, s->length, dir, 0);
+ if (nonforced_iommu(dev, addr, s->length, attrs)) {
+ addr = dma_map_area(dev, addr, s->length, dir, 0, attrs);
if (addr == DMA_MAPPING_ERROR) {
if (i > 0)
gart_unmap_sg(dev, sg, i, dir, 0);
@@ -401,7 +402,7 @@ static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
s->dma_address = addr;
BUG_ON(s->length == 0);
- nextneed = need_iommu(dev, addr, s->length);
+ nextneed = need_iommu(dev, addr, s->length, attrs);
/* Handle the previous not yet processed entries */
if (i > start) {
@@ -449,7 +450,7 @@ static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
/* When it was forced or merged try again in a dumb way */
if (force_iommu || iommu_merge) {
- out = dma_map_sg_nonforce(dev, sg, nents, dir);
+ out = dma_map_sg_nonforce(dev, sg, nents, dir, attrs);
if (out > 0)
return out;
}
@@ -473,7 +474,8 @@ gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
return vaddr;
*dma_addr = dma_map_area(dev, virt_to_phys(vaddr), size,
- DMA_BIDIRECTIONAL, (1UL << get_order(size)) - 1);
+ DMA_BIDIRECTIONAL,
+ (1UL << get_order(size)) - 1, attrs);
flush_gart();
if (unlikely(*dma_addr == DMA_MAPPING_ERROR))
goto out_free;
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 2cbf2b588f5b..fa6734461d4c 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -212,7 +212,7 @@ static dma_addr_t xen_swiotlb_map_phys(struct device *dev, phys_addr_t phys,
BUG_ON(dir == DMA_NONE);
if (attrs & DMA_ATTR_MMIO) {
- if (unlikely(!dma_capable(dev, phys, size, false))) {
+ if (unlikely(!dma_capable(dev, phys, size, false, attrs))) {
dev_err_once(
dev,
"DMA addr %pa+%zu overflow (mask %llx, bus limit %llx).\n",
@@ -231,7 +231,7 @@ static dma_addr_t xen_swiotlb_map_phys(struct device *dev, phys_addr_t phys,
* we can safely return the device addr and not worry about bounce
* buffering it.
*/
- if (dma_capable(dev, dev_addr, size, true) &&
+ if (dma_capable(dev, dev_addr, size, true, attrs) &&
!dma_kmalloc_needs_bounce(dev, size, dir) &&
!range_straddles_page_boundary(phys, size) &&
!xen_arch_need_swiotlb(dev, phys, dev_addr) &&
@@ -248,12 +248,16 @@ static dma_addr_t xen_swiotlb_map_phys(struct device *dev, phys_addr_t phys,
return DMA_MAPPING_ERROR;
phys = map;
+ /* This always return an encrypted addr */
dev_addr = xen_phys_to_dma(dev, map);
+ if (WARN_ON(dev->dma_io_tlb_mem->unencrypted))
+ attrs |= DMA_ATTR_CC_SHARED;
+
/*
* Ensure that the address returned is DMA'ble
*/
- if (unlikely(!dma_capable(dev, dev_addr, size, true))) {
+ if (unlikely(!dma_capable(dev, dev_addr, size, true, attrs))) {
__swiotlb_tbl_unmap_single(dev, map, size, dir,
attrs | DMA_ATTR_SKIP_CPU_SYNC,
swiotlb_find_pool(dev, map));
diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
index 94fad4e7c11e..9dbe198b2c4a 100644
--- a/include/linux/dma-direct.h
+++ b/include/linux/dma-direct.h
@@ -135,12 +135,19 @@ static inline bool force_dma_unencrypted(struct device *dev)
#endif /* CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED */
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size,
- bool is_ram)
+ bool is_ram, unsigned long attrs)
{
dma_addr_t end = addr + size - 1;
if (addr == DMA_MAPPING_ERROR)
return false;
+ /*
+ * if phys addr attribute is encrypted but the
+ * device is forcing an unencrypted dma addr
+ */
+ if (!(attrs & DMA_ATTR_CC_SHARED) && force_dma_unencrypted(dev))
+ return false;
+
if (is_ram && !IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) &&
min(addr, end) < phys_to_dma(dev, PFN_PHYS(min_low_pfn)))
return false;
diff --git a/kernel/dma/direct.h b/kernel/dma/direct.h
index 7140c208c123..e05dc7649366 100644
--- a/kernel/dma/direct.h
+++ b/kernel/dma/direct.h
@@ -101,15 +101,15 @@ static inline dma_addr_t dma_direct_map_phys(struct device *dev,
if (attrs & DMA_ATTR_MMIO) {
dma_addr = phys;
- if (unlikely(!dma_capable(dev, dma_addr, size, false)))
+ if (unlikely(!dma_capable(dev, dma_addr, size, false, attrs)))
goto err_overflow;
} else if (attrs & DMA_ATTR_CC_SHARED) {
dma_addr = phys_to_dma_unencrypted(dev, phys);
- if (unlikely(!dma_capable(dev, dma_addr, size, false)))
+ if (unlikely(!dma_capable(dev, dma_addr, size, false, attrs)))
goto err_overflow;
} else {
dma_addr = phys_to_dma(dev, phys);
- if (unlikely(!dma_capable(dev, dma_addr, size, true)) ||
+ if (unlikely(!dma_capable(dev, dma_addr, size, true, attrs)) ||
dma_kmalloc_needs_bounce(dev, size, dir)) {
if (is_swiotlb_active(dev) &&
!(attrs & DMA_ATTR_REQUIRE_COHERENT))
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 065663be282c..9f87ebe42797 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -1646,12 +1646,14 @@ dma_addr_t swiotlb_map(struct device *dev, phys_addr_t paddr, size_t size,
/*
* Use the allocated io_tlb_mem encryption type to determine dma addr.
*/
- if (dev->dma_io_tlb_mem->unencrypted)
+ if (dev->dma_io_tlb_mem->unencrypted) {
dma_addr = phys_to_dma_unencrypted(dev, swiotlb_addr);
- else
+ attrs |= DMA_ATTR_CC_SHARED;
+ } else {
dma_addr = phys_to_dma_encrypted(dev, swiotlb_addr);
+ }
- if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
+ if (unlikely(!dma_capable(dev, dma_addr, size, true, attrs))) {
__swiotlb_tbl_unmap_single(dev, swiotlb_addr, size, dir,
attrs | DMA_ATTR_SKIP_CPU_SYNC,
swiotlb_find_pool(dev, swiotlb_addr));
--
2.43.0
next prev parent reply other threads:[~2026-05-12 9:05 UTC|newest]
Thread overview: 35+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-05-12 9:03 [PATCH v4 00/13] dma-mapping: Use DMA_ATTR_CC_SHARED through direct, pool and swiotlb paths Aneesh Kumar K.V (Arm)
2026-05-12 9:03 ` [PATCH v4 01/13] dma-direct: swiotlb: handle swiotlb alloc/free outside __dma_direct_alloc_pages Aneesh Kumar K.V (Arm)
2026-05-13 13:57 ` Mostafa Saleh
2026-05-14 4:54 ` Aneesh Kumar K.V
2026-05-12 9:03 ` [PATCH v4 02/13] dma-direct: use DMA_ATTR_CC_SHARED in alloc/free paths Aneesh Kumar K.V (Arm)
2026-05-13 13:58 ` Mostafa Saleh
2026-05-14 5:01 ` Aneesh Kumar K.V
2026-05-12 9:03 ` [PATCH v4 03/13] dma-pool: track decrypted atomic pools and select them via attrs Aneesh Kumar K.V (Arm)
2026-05-13 14:00 ` Mostafa Saleh
2026-05-14 7:00 ` Aneesh Kumar K.V
2026-05-14 8:06 ` Mostafa Saleh
2026-05-12 9:03 ` [PATCH v4 04/13] dma: swiotlb: track pool encryption state and honor DMA_ATTR_CC_SHARED Aneesh Kumar K.V (Arm)
2026-05-13 14:27 ` Mostafa Saleh
2026-05-13 17:24 ` Jason Gunthorpe
2026-05-14 6:24 ` Aneesh Kumar K.V
2026-05-14 11:48 ` Mostafa Saleh
2026-05-14 12:35 ` Jason Gunthorpe
2026-05-14 14:43 ` Mostafa Saleh
2026-05-15 22:51 ` Jason Gunthorpe
2026-05-14 5:54 ` Aneesh Kumar K.V
2026-05-14 12:02 ` Mostafa Saleh
2026-05-14 12:48 ` Aneesh Kumar K.V
2026-05-14 14:21 ` Mostafa Saleh
2026-05-14 14:43 ` Aneesh Kumar K.V
2026-05-14 14:37 ` Jason Gunthorpe
2026-05-14 15:43 ` Mostafa Saleh
2026-05-12 9:04 ` [PATCH v4 05/13] dma-mapping: make dma_pgprot() " Aneesh Kumar K.V (Arm)
2026-05-12 9:04 ` Aneesh Kumar K.V (Arm) [this message]
2026-05-12 9:04 ` [PATCH v4 07/13] dma-direct: make dma_direct_map_phys() " Aneesh Kumar K.V (Arm)
2026-05-12 9:04 ` [PATCH v4 08/13] dma-direct: set decrypted flag for remapped DMA allocations Aneesh Kumar K.V (Arm)
2026-05-12 9:04 ` [PATCH v4 09/13] dma-direct: select DMA address encoding from DMA_ATTR_CC_SHARED Aneesh Kumar K.V (Arm)
2026-05-12 9:04 ` [PATCH v4 10/13] dma-pool: fix page leak in atomic_pool_expand() cleanup Aneesh Kumar K.V (Arm)
2026-05-12 9:04 ` [PATCH v4 11/13] dma-direct: rename ret to cpu_addr in alloc helpers Aneesh Kumar K.V (Arm)
2026-05-12 9:04 ` [PATCH v4 12/13] dma-direct: return struct page from dma_direct_alloc_from_pool() Aneesh Kumar K.V (Arm)
2026-05-12 9:04 ` [PATCH v4 13/13] x86/amd-gart: preserve the direct DMA address until GART mapping succeeds Aneesh Kumar K.V (Arm)
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260512090408.794195-7-aneesh.kumar@kernel.org \
--to=aneesh.kumar@kernel.org \
--cc=Suzuki.Poulose@arm.com \
--cc=agordeev@linux.ibm.com \
--cc=aik@amd.com \
--cc=borntraeger@linux.ibm.com \
--cc=catalin.marinas@arm.com \
--cc=chleroy@kernel.org \
--cc=dan.j.williams@intel.com \
--cc=gerald.schaefer@linux.ibm.com \
--cc=gor@linux.ibm.com \
--cc=hca@linux.ibm.com \
--cc=iommu@lists.linux.dev \
--cc=jgg@ziepe.ca \
--cc=jiri@resnulli.us \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-coco@lists.linux.dev \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-s390@vger.kernel.org \
--cc=linuxppc-dev@lists.ozlabs.org \
--cc=m.szyprowski@samsung.com \
--cc=maddy@linux.ibm.com \
--cc=maz@kernel.org \
--cc=mpe@ellerman.id.au \
--cc=npiggin@gmail.com \
--cc=ptesarik@suse.com \
--cc=robin.murphy@arm.com \
--cc=smostafa@google.com \
--cc=steven.price@arm.com \
--cc=svens@linux.ibm.com \
--cc=will@kernel.org \
--cc=x86@kernel.org \
--cc=yilun.xu@linux.intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.