public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH] dma-direct: swiotlb: Skip encryption toggles for swiotlb allocations
@ 2026-01-02 15:54 Aneesh Kumar K.V (Arm)
  2026-01-08 11:01 ` Robin Murphy
  0 siblings, 1 reply; 11+ messages in thread
From: Aneesh Kumar K.V (Arm) @ 2026-01-02 15:54 UTC (permalink / raw)
  To: iommu, linux-kernel, linux-coco
  Cc: Marek Szyprowski, Robin Murphy, steven.price, Suzuki K Poulose,
	Aneesh Kumar K.V (Arm)

Swiotlb backing pages are already mapped decrypted via
swiotlb_update_mem_attributes(), so dma-direct does not need to call
set_memory_decrypted() during allocation or re-encrypt the memory on
free.

Handle swiotlb-backed buffers explicitly: obtain the DMA address and
zero the linear mapping for lowmem pages, and bypass the decrypt/encrypt
transitions when allocating/freeing from the swiotlb pool (detected via
swiotlb_find_pool()).

Signed-off-by: Aneesh Kumar K.V (Arm) <aneesh.kumar@kernel.org>
---
 kernel/dma/direct.c | 56 +++++++++++++++++++++++++++++++++++++--------
 1 file changed, 46 insertions(+), 10 deletions(-)

diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index faf1e41afde8..c4ef4457bd74 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -104,15 +104,27 @@ static void __dma_direct_free_pages(struct device *dev, struct page *page,
 	dma_free_contiguous(dev, page, size);
 }
 
-static struct page *dma_direct_alloc_swiotlb(struct device *dev, size_t size)
+static struct page *dma_direct_alloc_swiotlb(struct device *dev, size_t size,
+					     dma_addr_t *dma_handle)
 {
-	struct page *page = swiotlb_alloc(dev, size);
+	void *lm_addr;
+	struct page *page;
+
+	page = swiotlb_alloc(dev, size);
+	if (!page)
+		return NULL;
 
-	if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
+	if (!dma_coherent_ok(dev, page_to_phys(page), size)) {
 		swiotlb_free(dev, page, size);
 		return NULL;
 	}
+	/* If HighMem let caller take care of creating a mapping */
+	if (PageHighMem(page))
+		return page;
 
+	lm_addr = page_address(page);
+	memset(lm_addr, 0, size);
+	*dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
 	return page;
 }
 
@@ -125,9 +137,6 @@ static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
 
 	WARN_ON_ONCE(!PAGE_ALIGNED(size));
 
-	if (is_swiotlb_for_alloc(dev))
-		return dma_direct_alloc_swiotlb(dev, size);
-
 	gfp |= dma_direct_optimal_gfp_mask(dev, &phys_limit);
 	page = dma_alloc_contiguous(dev, size, gfp);
 	if (page) {
@@ -204,6 +213,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
 		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
 {
 	bool remap = false, set_uncached = false;
+	bool mark_mem_decrypt = true;
 	bool allow_highmem = true;
 	struct page *page;
 	void *ret;
@@ -251,6 +261,14 @@ void *dma_direct_alloc(struct device *dev, size_t size,
 	    dma_direct_use_pool(dev, gfp))
 		return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
 
+	if (is_swiotlb_for_alloc(dev)) {
+		page = dma_direct_alloc_swiotlb(dev, size, dma_handle);
+		if (page) {
+			mark_mem_decrypt = false;
+			goto setup_page;
+		}
+		return NULL;
+	}
 
 	if (force_dma_unencrypted(dev))
 		/*
@@ -266,6 +284,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
 	if (!page)
 		return NULL;
 
+setup_page:
 	/*
 	 * dma_alloc_contiguous can return highmem pages depending on a
 	 * combination the cma= arguments and per-arch setup.  These need to be
@@ -295,7 +314,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
 		ret = page_address(page);
 	}
 
-	if (force_dma_unencrypted(dev)) {
+	if (mark_mem_decrypt && force_dma_unencrypted(dev)) {
 		void *lm_addr;
 
 		lm_addr = page_address(page);
@@ -316,7 +335,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
 	return ret;
 
 out_encrypt_pages:
-	if (dma_set_encrypted(dev, page_address(page), size))
+	if (mark_mem_decrypt && dma_set_encrypted(dev, page_address(page), size))
 		return NULL;
 out_free_pages:
 	__dma_direct_free_pages(dev, page, size);
@@ -328,6 +347,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
 void dma_direct_free(struct device *dev, size_t size,
 		void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
 {
+	bool mark_mem_encrypted = true;
 	unsigned int page_order = get_order(size);
 
 	if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
@@ -356,6 +376,9 @@ void dma_direct_free(struct device *dev, size_t size,
 	    dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size)))
 		return;
 
+	if (swiotlb_find_pool(dev, dma_to_phys(dev, dma_addr)))
+		mark_mem_encrypted = false;
+
 	if (is_vmalloc_addr(cpu_addr)) {
 		vunmap(cpu_addr);
 	} else {
@@ -363,7 +386,7 @@ void dma_direct_free(struct device *dev, size_t size,
 			arch_dma_clear_uncached(cpu_addr, size);
 	}
 
-	if (force_dma_unencrypted(dev)) {
+	if (mark_mem_encrypted && force_dma_unencrypted(dev)) {
 		void *lm_addr;
 
 		lm_addr = phys_to_virt(dma_to_phys(dev, dma_addr));
@@ -385,6 +408,15 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
 	if (force_dma_unencrypted(dev) && dma_direct_use_pool(dev, gfp))
 		return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
 
+	if (is_swiotlb_for_alloc(dev)) {
+		page = dma_direct_alloc_swiotlb(dev, size, dma_handle);
+		if (page && PageHighMem(page)) {
+			swiotlb_free(dev, page, size);
+			return NULL;
+		}
+		return page;
+	}
+
 	page = __dma_direct_alloc_pages(dev, size, gfp, false);
 	if (!page)
 		return NULL;
@@ -404,13 +436,17 @@ void dma_direct_free_pages(struct device *dev, size_t size,
 		enum dma_data_direction dir)
 {
 	void *vaddr = page_address(page);
+	bool mark_mem_encrypted = true;
 
 	/* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
 	if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
 	    dma_free_from_pool(dev, vaddr, size))
 		return;
 
-	if (dma_set_encrypted(dev, vaddr, size))
+	if (swiotlb_find_pool(dev, page_to_phys(page)))
+		mark_mem_encrypted = false;
+
+	if (mark_mem_encrypted && dma_set_encrypted(dev, vaddr, size))
 		return;
 	__dma_direct_free_pages(dev, page, size);
 }
-- 
2.43.0


^ permalink raw reply related	[flat|nested] 11+ messages in thread

end of thread, other threads:[~2026-01-20  9:36 UTC | newest]

Thread overview: 11+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-01-02 15:54 [PATCH] dma-direct: swiotlb: Skip encryption toggles for swiotlb allocations Aneesh Kumar K.V (Arm)
2026-01-08 11:01 ` Robin Murphy
2026-01-09  2:51   ` Aneesh Kumar K.V
2026-01-12 13:25     ` Robin Murphy
2026-01-12 15:42       ` Aneesh Kumar K.V
2026-01-14  9:49         ` Aneesh Kumar K.V
2026-01-19  9:52           ` Marek Szyprowski
2026-01-19 14:28             ` Robin Murphy
2026-01-19 15:53               ` Aneesh Kumar K.V
2026-01-19 16:37                 ` Robin Murphy
2026-01-20  9:33                   ` Aneesh Kumar K.V

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox