From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 7AA383BA22C; Tue, 12 May 2026 09:06:17 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal:i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1778576777; cv=none; b=qMKu4G+2h7jm5V5m8IZOLJA9BECvdYDfVxkPJ2fBnQXd1Y0s89P81gkGJEDGJ2vpJpqUkDmrIvr/6pwl5HtkxXdmLScsNux/fNE2EMAfVetYWdhpsl6D3eRSbyX4H0xWG9GafiaSFzPb3ZzzluA5xP6QqHbDpOONEfzSzwsUJOI= ARC-Message-Signature:i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1778576777; c=relaxed/simple; bh=Q+boUmp5z1kUVbYWMstIjuxd/izdJGdKK16SPwpnnDo=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=WOE6NMyEJ8rGKMuxzXoDTHxdVgoVc5ItI7ephPjl29VRxNs3bEnSRoIWwvXOyRkesOUk5qjNxBJrVf7b4+/qup9cHvh7QC4ZNI3ks6NK1n9Xxc5DuGGbt/A+u5d7cp6MXRRgxkBGI50/DddjODFI+AaCPNoJNw26yfthY5e5wxE= ARC-Authentication-Results:i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=nuQjFhmv; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="nuQjFhmv" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 1C608C2BCB0; Tue, 12 May 2026 09:06:05 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1778576777; bh=Q+boUmp5z1kUVbYWMstIjuxd/izdJGdKK16SPwpnnDo=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=nuQjFhmvoLcL41GEj42JtD4I7DEN67oML0MlCcrk2S1rMkLr5JeJ2PT3R0eGsmfIQ GMQ1CC5+xev+2epOEXrCN6tMG58pcduD4Z3cbaZS96o/SRm2Aog8gjuXHOmXUdz109 Bbk4SMLLQUbd6VcsuviVsfF8WrWdF7OIdCjoQxUFc70B2hbwHaPEeCWQWz+krXJy5J 4TSQStB+FP7ScGbhVA34Byj+NEGmXSzV1kfjIq3wajiJHZjocepy7sQCqerEFHPXPL fJefKGR8G7UgYxesmWnRKkwTTiIH6r7oWjssvO48G1gceUeFMuSLkaaFHRFXCSgcmN M7lc3XBDFIvng== From: "Aneesh Kumar K.V (Arm)" To: iommu@lists.linux.dev, linux-arm-kernel@lists.infradead.org, linux-kernel@vger.kernel.org, linux-coco@lists.linux.dev Cc: "Aneesh Kumar K.V (Arm)" , Robin Murphy , Marek Szyprowski , Will Deacon , Marc Zyngier , Steven Price , Suzuki K Poulose , Catalin Marinas , Jiri Pirko , Jason Gunthorpe , Mostafa Saleh , Petr Tesarik , Alexey Kardashevskiy , Dan Williams , Xu Yilun , linuxppc-dev@lists.ozlabs.org, linux-s390@vger.kernel.org, Madhavan Srinivasan , Michael Ellerman , Nicholas Piggin , "Christophe Leroy (CS GROUP)" , Alexander Gordeev , Gerald Schaefer , Heiko Carstens , Vasily Gorbik , Christian Borntraeger , Sven Schnelle , x86@kernel.org Subject: [PATCH v4 09/13] dma-direct: select DMA address encoding from DMA_ATTR_CC_SHARED Date: Tue, 12 May 2026 14:34:04 +0530 Message-ID: <20260512090408.794195-10-aneesh.kumar@kernel.org> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20260512090408.794195-1-aneesh.kumar@kernel.org> References: <20260512090408.794195-1-aneesh.kumar@kernel.org> Precedence: bulk X-Mailing-List: linux-s390@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: 8bit Make the dma-direct helpers derive the DMA address encoding from DMA_ATTR_CC_SHARED instead of implicitly relying on force_dma_unencrypted() inside phys_to_dma_direct() Pass an explicit unencrypted/decrypted state into phys_to_dma_direct(), make the alloc paths return DMA addresses that match the requested buffer encryption state. Also only call dma_set_decrypted() DMA_ATTR_CC_SHARED is actually set. Signed-off-by: Aneesh Kumar K.V (Arm) --- kernel/dma/direct.c | 48 ++++++++++++++++++++++++++++----------------- 1 file changed, 30 insertions(+), 18 deletions(-) diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index f5da6e992d83..1e9f9ff7b9d3 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -24,11 +24,11 @@ u64 zone_dma_limit __ro_after_init = DMA_BIT_MASK(24); static inline dma_addr_t phys_to_dma_direct(struct device *dev, - phys_addr_t phys) + phys_addr_t phys, bool unencrypted) { - if (force_dma_unencrypted(dev)) + if (unencrypted) return phys_to_dma_unencrypted(dev, phys); - return phys_to_dma(dev, phys); + return phys_to_dma_encrypted(dev, phys); } static inline struct page *dma_direct_to_page(struct device *dev, @@ -39,8 +39,9 @@ static inline struct page *dma_direct_to_page(struct device *dev, u64 dma_direct_get_required_mask(struct device *dev) { + bool require_decrypted = force_dma_unencrypted(dev); phys_addr_t phys = (phys_addr_t)(max_pfn - 1) << PAGE_SHIFT; - u64 max_dma = phys_to_dma_direct(dev, phys); + u64 max_dma = phys_to_dma_direct(dev, phys, require_decrypted); return (1ULL << (fls64(max_dma) - 1)) * 2 - 1; } @@ -69,7 +70,8 @@ static gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 *phys_limit) bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) { - dma_addr_t dma_addr = phys_to_dma_direct(dev, phys); + bool require_decrypted = force_dma_unencrypted(dev); + dma_addr_t dma_addr = phys_to_dma_direct(dev, phys, require_decrypted); if (dma_addr == DMA_MAPPING_ERROR) return false; @@ -79,17 +81,18 @@ bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) static int dma_set_decrypted(struct device *dev, void *vaddr, size_t size) { - if (!force_dma_unencrypted(dev)) - return 0; - return set_memory_decrypted((unsigned long)vaddr, PFN_UP(size)); + int ret; + + ret = set_memory_decrypted((unsigned long)vaddr, PFN_UP(size)); + if (ret) + pr_warn_ratelimited("leaking DMA memory that can't be decrypted\n"); + return ret; } static int dma_set_encrypted(struct device *dev, void *vaddr, size_t size) { int ret; - if (!force_dma_unencrypted(dev)) - return 0; ret = set_memory_encrypted((unsigned long)vaddr, PFN_UP(size)); if (ret) pr_warn_ratelimited("leaking DMA memory that can't be re-encrypted\n"); @@ -177,7 +180,8 @@ static void *dma_direct_alloc_from_pool(struct device *dev, size_t size, dma_coherent_ok); if (!page) return NULL; - *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); + *dma_handle = phys_to_dma_direct(dev, page_to_phys(page), + !!(attrs & DMA_ATTR_CC_SHARED)); return ret; } @@ -193,9 +197,11 @@ static void *dma_direct_alloc_no_mapping(struct device *dev, size_t size, /* remove any dirty cache lines on the kernel alias */ if (!PageHighMem(page)) arch_dma_prep_coherent(page, size); - - /* return the page pointer as the opaque cookie */ - *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); + /* + * return the page pointer as the opaque cookie. + * Never used for unencrypted allocation + */ + *dma_handle = phys_to_dma_encrypted(dev, page_to_phys(page)); return page; } @@ -340,7 +346,8 @@ void *dma_direct_alloc(struct device *dev, size_t size, ret = uncached_cpu_addr; } - *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); + *dma_handle = phys_to_dma_direct(dev, page_to_phys(page), + !!(attrs & DMA_ATTR_CC_SHARED)); return ret; @@ -457,11 +464,12 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size, return NULL; ret = page_address(page); - if (dma_set_decrypted(dev, ret, size)) + if ((attrs & DMA_ATTR_CC_SHARED) && dma_set_decrypted(dev, ret, size)) goto out_leak_pages; setup_page: memset(ret, 0, size); - *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); + *dma_handle = phys_to_dma_direct(dev, page_to_phys(page), + !!(attrs & DMA_ATTR_CC_SHARED)); return page; out_leak_pages: return NULL; @@ -471,8 +479,12 @@ void dma_direct_free_pages(struct device *dev, size_t size, struct page *page, dma_addr_t dma_addr, enum dma_data_direction dir) { + /* + * if the device had requested for an unencrypted buffer, + * convert it to encrypted on free + */ + bool mark_mem_encrypted = force_dma_unencrypted(dev); void *vaddr = page_address(page); - bool mark_mem_encrypted = true; /* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */ if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) && -- 2.43.0