From: "Aneesh Kumar K.V (Arm)" <aneesh.kumar@kernel.org>
To: iommu@lists.linux.dev, linux-arm-kernel@lists.infradead.org,
linux-kernel@vger.kernel.org, linux-coco@lists.linux.dev
Cc: "Aneesh Kumar K.V (Arm)" <aneesh.kumar@kernel.org>,
Robin Murphy <robin.murphy@arm.com>,
Marek Szyprowski <m.szyprowski@samsung.com>,
Will Deacon <will@kernel.org>, Marc Zyngier <maz@kernel.org>,
Steven Price <steven.price@arm.com>,
Suzuki K Poulose <Suzuki.Poulose@arm.com>,
Catalin Marinas <catalin.marinas@arm.com>,
Jiri Pirko <jiri@resnulli.us>, Jason Gunthorpe <jgg@ziepe.ca>,
Mostafa Saleh <smostafa@google.com>,
Petr Tesarik <ptesarik@suse.com>,
Alexey Kardashevskiy <aik@amd.com>,
Dan Williams <dan.j.williams@intel.com>,
Xu Yilun <yilun.xu@linux.intel.com>,
linuxppc-dev@lists.ozlabs.org, linux-s390@vger.kernel.org,
Madhavan Srinivasan <maddy@linux.ibm.com>,
Michael Ellerman <mpe@ellerman.id.au>,
Nicholas Piggin <npiggin@gmail.com>,
"Christophe Leroy (CS GROUP)" <chleroy@kernel.org>,
Alexander Gordeev <agordeev@linux.ibm.com>,
Gerald Schaefer <gerald.schaefer@linux.ibm.com>,
Heiko Carstens <hca@linux.ibm.com>,
Vasily Gorbik <gor@linux.ibm.com>,
Christian Borntraeger <borntraeger@linux.ibm.com>,
Sven Schnelle <svens@linux.ibm.com>,
x86@kernel.org
Subject: [PATCH v4 09/13] dma-direct: select DMA address encoding from DMA_ATTR_CC_SHARED
Date: Tue, 12 May 2026 14:34:04 +0530 [thread overview]
Message-ID: <20260512090408.794195-10-aneesh.kumar@kernel.org> (raw)
In-Reply-To: <20260512090408.794195-1-aneesh.kumar@kernel.org>
Make the dma-direct helpers derive the DMA address encoding from
DMA_ATTR_CC_SHARED instead of implicitly relying on
force_dma_unencrypted() inside phys_to_dma_direct()
Pass an explicit unencrypted/decrypted state into phys_to_dma_direct(),
make the alloc paths return DMA addresses that match the requested
buffer encryption state. Also only call dma_set_decrypted()
DMA_ATTR_CC_SHARED is actually set.
Signed-off-by: Aneesh Kumar K.V (Arm) <aneesh.kumar@kernel.org>
---
kernel/dma/direct.c | 48 ++++++++++++++++++++++++++++-----------------
1 file changed, 30 insertions(+), 18 deletions(-)
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index f5da6e992d83..1e9f9ff7b9d3 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -24,11 +24,11 @@
u64 zone_dma_limit __ro_after_init = DMA_BIT_MASK(24);
static inline dma_addr_t phys_to_dma_direct(struct device *dev,
- phys_addr_t phys)
+ phys_addr_t phys, bool unencrypted)
{
- if (force_dma_unencrypted(dev))
+ if (unencrypted)
return phys_to_dma_unencrypted(dev, phys);
- return phys_to_dma(dev, phys);
+ return phys_to_dma_encrypted(dev, phys);
}
static inline struct page *dma_direct_to_page(struct device *dev,
@@ -39,8 +39,9 @@ static inline struct page *dma_direct_to_page(struct device *dev,
u64 dma_direct_get_required_mask(struct device *dev)
{
+ bool require_decrypted = force_dma_unencrypted(dev);
phys_addr_t phys = (phys_addr_t)(max_pfn - 1) << PAGE_SHIFT;
- u64 max_dma = phys_to_dma_direct(dev, phys);
+ u64 max_dma = phys_to_dma_direct(dev, phys, require_decrypted);
return (1ULL << (fls64(max_dma) - 1)) * 2 - 1;
}
@@ -69,7 +70,8 @@ static gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 *phys_limit)
bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
{
- dma_addr_t dma_addr = phys_to_dma_direct(dev, phys);
+ bool require_decrypted = force_dma_unencrypted(dev);
+ dma_addr_t dma_addr = phys_to_dma_direct(dev, phys, require_decrypted);
if (dma_addr == DMA_MAPPING_ERROR)
return false;
@@ -79,17 +81,18 @@ bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
static int dma_set_decrypted(struct device *dev, void *vaddr, size_t size)
{
- if (!force_dma_unencrypted(dev))
- return 0;
- return set_memory_decrypted((unsigned long)vaddr, PFN_UP(size));
+ int ret;
+
+ ret = set_memory_decrypted((unsigned long)vaddr, PFN_UP(size));
+ if (ret)
+ pr_warn_ratelimited("leaking DMA memory that can't be decrypted\n");
+ return ret;
}
static int dma_set_encrypted(struct device *dev, void *vaddr, size_t size)
{
int ret;
- if (!force_dma_unencrypted(dev))
- return 0;
ret = set_memory_encrypted((unsigned long)vaddr, PFN_UP(size));
if (ret)
pr_warn_ratelimited("leaking DMA memory that can't be re-encrypted\n");
@@ -177,7 +180,8 @@ static void *dma_direct_alloc_from_pool(struct device *dev, size_t size,
dma_coherent_ok);
if (!page)
return NULL;
- *dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
+ *dma_handle = phys_to_dma_direct(dev, page_to_phys(page),
+ !!(attrs & DMA_ATTR_CC_SHARED));
return ret;
}
@@ -193,9 +197,11 @@ static void *dma_direct_alloc_no_mapping(struct device *dev, size_t size,
/* remove any dirty cache lines on the kernel alias */
if (!PageHighMem(page))
arch_dma_prep_coherent(page, size);
-
- /* return the page pointer as the opaque cookie */
- *dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
+ /*
+ * return the page pointer as the opaque cookie.
+ * Never used for unencrypted allocation
+ */
+ *dma_handle = phys_to_dma_encrypted(dev, page_to_phys(page));
return page;
}
@@ -340,7 +346,8 @@ void *dma_direct_alloc(struct device *dev, size_t size,
ret = uncached_cpu_addr;
}
- *dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
+ *dma_handle = phys_to_dma_direct(dev, page_to_phys(page),
+ !!(attrs & DMA_ATTR_CC_SHARED));
return ret;
@@ -457,11 +464,12 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
return NULL;
ret = page_address(page);
- if (dma_set_decrypted(dev, ret, size))
+ if ((attrs & DMA_ATTR_CC_SHARED) && dma_set_decrypted(dev, ret, size))
goto out_leak_pages;
setup_page:
memset(ret, 0, size);
- *dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
+ *dma_handle = phys_to_dma_direct(dev, page_to_phys(page),
+ !!(attrs & DMA_ATTR_CC_SHARED));
return page;
out_leak_pages:
return NULL;
@@ -471,8 +479,12 @@ void dma_direct_free_pages(struct device *dev, size_t size,
struct page *page, dma_addr_t dma_addr,
enum dma_data_direction dir)
{
+ /*
+ * if the device had requested for an unencrypted buffer,
+ * convert it to encrypted on free
+ */
+ bool mark_mem_encrypted = force_dma_unencrypted(dev);
void *vaddr = page_address(page);
- bool mark_mem_encrypted = true;
/* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
--
2.43.0
next prev parent reply other threads:[~2026-05-12 9:06 UTC|newest]
Thread overview: 34+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-05-12 9:03 [PATCH v4 00/13] dma-mapping: Use DMA_ATTR_CC_SHARED through direct, pool and swiotlb paths Aneesh Kumar K.V (Arm)
2026-05-12 9:03 ` [PATCH v4 01/13] dma-direct: swiotlb: handle swiotlb alloc/free outside __dma_direct_alloc_pages Aneesh Kumar K.V (Arm)
2026-05-13 13:57 ` Mostafa Saleh
2026-05-14 4:54 ` Aneesh Kumar K.V
2026-05-12 9:03 ` [PATCH v4 02/13] dma-direct: use DMA_ATTR_CC_SHARED in alloc/free paths Aneesh Kumar K.V (Arm)
2026-05-13 13:58 ` Mostafa Saleh
2026-05-14 5:01 ` Aneesh Kumar K.V
2026-05-12 9:03 ` [PATCH v4 03/13] dma-pool: track decrypted atomic pools and select them via attrs Aneesh Kumar K.V (Arm)
2026-05-13 14:00 ` Mostafa Saleh
2026-05-14 7:00 ` Aneesh Kumar K.V
2026-05-14 8:06 ` Mostafa Saleh
2026-05-12 9:03 ` [PATCH v4 04/13] dma: swiotlb: track pool encryption state and honor DMA_ATTR_CC_SHARED Aneesh Kumar K.V (Arm)
2026-05-13 14:27 ` Mostafa Saleh
2026-05-13 17:24 ` Jason Gunthorpe
2026-05-14 6:24 ` Aneesh Kumar K.V
2026-05-14 11:48 ` Mostafa Saleh
2026-05-14 12:35 ` Jason Gunthorpe
2026-05-14 14:43 ` Mostafa Saleh
2026-05-14 5:54 ` Aneesh Kumar K.V
2026-05-14 12:02 ` Mostafa Saleh
2026-05-14 12:48 ` Aneesh Kumar K.V
2026-05-14 14:21 ` Mostafa Saleh
2026-05-14 14:43 ` Aneesh Kumar K.V
2026-05-14 14:37 ` Jason Gunthorpe
2026-05-14 15:43 ` Mostafa Saleh
2026-05-12 9:04 ` [PATCH v4 05/13] dma-mapping: make dma_pgprot() " Aneesh Kumar K.V (Arm)
2026-05-12 9:04 ` [PATCH v4 06/13] dma-direct: pass attrs to dma_capable() for DMA_ATTR_CC_SHARED checks Aneesh Kumar K.V (Arm)
2026-05-12 9:04 ` [PATCH v4 07/13] dma-direct: make dma_direct_map_phys() honor DMA_ATTR_CC_SHARED Aneesh Kumar K.V (Arm)
2026-05-12 9:04 ` [PATCH v4 08/13] dma-direct: set decrypted flag for remapped DMA allocations Aneesh Kumar K.V (Arm)
2026-05-12 9:04 ` Aneesh Kumar K.V (Arm) [this message]
2026-05-12 9:04 ` [PATCH v4 10/13] dma-pool: fix page leak in atomic_pool_expand() cleanup Aneesh Kumar K.V (Arm)
2026-05-12 9:04 ` [PATCH v4 11/13] dma-direct: rename ret to cpu_addr in alloc helpers Aneesh Kumar K.V (Arm)
2026-05-12 9:04 ` [PATCH v4 12/13] dma-direct: return struct page from dma_direct_alloc_from_pool() Aneesh Kumar K.V (Arm)
2026-05-12 9:04 ` [PATCH v4 13/13] x86/amd-gart: preserve the direct DMA address until GART mapping succeeds Aneesh Kumar K.V (Arm)
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260512090408.794195-10-aneesh.kumar@kernel.org \
--to=aneesh.kumar@kernel.org \
--cc=Suzuki.Poulose@arm.com \
--cc=agordeev@linux.ibm.com \
--cc=aik@amd.com \
--cc=borntraeger@linux.ibm.com \
--cc=catalin.marinas@arm.com \
--cc=chleroy@kernel.org \
--cc=dan.j.williams@intel.com \
--cc=gerald.schaefer@linux.ibm.com \
--cc=gor@linux.ibm.com \
--cc=hca@linux.ibm.com \
--cc=iommu@lists.linux.dev \
--cc=jgg@ziepe.ca \
--cc=jiri@resnulli.us \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-coco@lists.linux.dev \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-s390@vger.kernel.org \
--cc=linuxppc-dev@lists.ozlabs.org \
--cc=m.szyprowski@samsung.com \
--cc=maddy@linux.ibm.com \
--cc=maz@kernel.org \
--cc=mpe@ellerman.id.au \
--cc=npiggin@gmail.com \
--cc=ptesarik@suse.com \
--cc=robin.murphy@arm.com \
--cc=smostafa@google.com \
--cc=steven.price@arm.com \
--cc=svens@linux.ibm.com \
--cc=will@kernel.org \
--cc=x86@kernel.org \
--cc=yilun.xu@linux.intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.