From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 0FA2B36897F; Mon, 27 Apr 2026 05:56:13 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal:i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1777269374; cv=none; b=qHUHttfHijzrNdj3qfU6JUDgGD7uiaTfBFBM1Bph7ucst2J1PI1JzYnsQVuVzxIDyQ6XED4dvTwshW5ef6cAVicOxVh8ocNcKRu9wU9vR9pNe9VLCTXqAQqVhxUHYoVTUYcZnJNaNe7ibx1Itpufw/HY5iJtY9j8TI2RBFMApak= ARC-Message-Signature:i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1777269374; c=relaxed/simple; bh=bFwWw9ornwOfEMIFvLXF0QGVNCkgM8YE0JfxZ/KW67Q=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=UQ8cMdr4mpK0jwKKJQHPVsF6plBTRMcpKDeG/F2p6ymm7+9EeVen4xLBjvEToOcm0KkL/0BgeyDLvS6CKMOaHtIrn8OtxtcFI81mu7pa2atytPkM0d3j5RtVztQXs4WJ5DjNV2CuSHNF0NdkCqOQz0KURtxcbAgc5pU4Q3xbhr4= ARC-Authentication-Results:i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=Ytlds4is; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="Ytlds4is" Received: by smtp.kernel.org (Postfix) with ESMTPSA id BDCCEC2BCB4; Mon, 27 Apr 2026 05:56:08 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1777269373; bh=bFwWw9ornwOfEMIFvLXF0QGVNCkgM8YE0JfxZ/KW67Q=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=Ytlds4isRPm11He1sxPy8w+5lT/dqMr4S4YeTl9RAyBzAWszDBX/VnBlsY4AUt4AL r8Rg3bs0stHe1qX7RoSZrtxU47j4Llsmf/+FW1ZxpI6zP5XsNIb6q/13SZ6QtB4Z9N fgGtMHJ5b6bkNVZDiSbhczLmc0ZWrltM6GAHVwCptkFsi1vLscd3NlGTB4YzDAM9+t uAFARLw6NU2B616m6k91PRiREI29VvbwS73VEChpR8Zb5VdUuRGwNmeTkLA65O0ece NTWE/9a6WYkPcb8P3rQozOvEHBXDY3HCcYo8LNvGHhfgeY0oaGhF/vSamN9aRmT2Sj 4YjOdkyNbj6Fg== From: "Aneesh Kumar K.V (Arm)" To: iommu@lists.linux.dev, linux-kernel@vger.kernel.org Cc: "Aneesh Kumar K.V (Arm)" , Robin Murphy , Marek Szyprowski , Will Deacon , Marc Zyngier , Steven Price , Suzuki K Poulose , Catalin Marinas , Jiri Pirko , Jason Gunthorpe , Mostafa Saleh , Petr Tesarik , Alexey Kardashevskiy , Dan Williams , Xu Yilun Subject: [PATCH v3 9/9] dma-direct: select DMA address encoding from DMA_ATTR_CC_SHARED Date: Mon, 27 Apr 2026 11:25:09 +0530 Message-ID: <20260427055509.898190-10-aneesh.kumar@kernel.org> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20260427055509.898190-1-aneesh.kumar@kernel.org> References: <20260427055509.898190-1-aneesh.kumar@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: 8bit Make the dma-direct helpers derive the DMA address encoding from DMA_ATTR_CC_SHARED instead of implicitly relying on force_dma_unencrypted() inside phys_to_dma_direct() Pass an explicit unencrypted/decrypted state into phys_to_dma_direct(), make the alloc paths return DMA addresses that match the requested buffer encryption state. Also only call dma_set_decrypted() DMA_ATTR_CC_SHARED is actually set. Signed-off-by: Aneesh Kumar K.V (Arm) --- kernel/dma/direct.c | 48 ++++++++++++++++++++++++++++----------------- 1 file changed, 30 insertions(+), 18 deletions(-) diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index f874be2d85c2..cafa6b5ed1c6 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -24,11 +24,11 @@ u64 zone_dma_limit __ro_after_init = DMA_BIT_MASK(24); static inline dma_addr_t phys_to_dma_direct(struct device *dev, - phys_addr_t phys) + phys_addr_t phys, bool unencrypted) { - if (force_dma_unencrypted(dev)) + if (unencrypted) return phys_to_dma_unencrypted(dev, phys); - return phys_to_dma(dev, phys); + return phys_to_dma_encrypted(dev, phys); } static inline struct page *dma_direct_to_page(struct device *dev, @@ -39,8 +39,9 @@ static inline struct page *dma_direct_to_page(struct device *dev, u64 dma_direct_get_required_mask(struct device *dev) { + bool require_decrypted = force_dma_unencrypted(dev); phys_addr_t phys = (phys_addr_t)(max_pfn - 1) << PAGE_SHIFT; - u64 max_dma = phys_to_dma_direct(dev, phys); + u64 max_dma = phys_to_dma_direct(dev, phys, require_decrypted); return (1ULL << (fls64(max_dma) - 1)) * 2 - 1; } @@ -69,7 +70,8 @@ static gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 *phys_limit) bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) { - dma_addr_t dma_addr = phys_to_dma_direct(dev, phys); + bool require_decrypted = force_dma_unencrypted(dev); + dma_addr_t dma_addr = phys_to_dma_direct(dev, phys, require_decrypted); if (dma_addr == DMA_MAPPING_ERROR) return false; @@ -79,17 +81,18 @@ bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) static int dma_set_decrypted(struct device *dev, void *vaddr, size_t size) { - if (!force_dma_unencrypted(dev)) - return 0; - return set_memory_decrypted((unsigned long)vaddr, PFN_UP(size)); + int ret; + + ret = set_memory_decrypted((unsigned long)vaddr, PFN_UP(size)); + if (ret) + pr_warn_ratelimited("leaking DMA memory that can't be decrypted\n"); + return ret; } static int dma_set_encrypted(struct device *dev, void *vaddr, size_t size) { int ret; - if (!force_dma_unencrypted(dev)) - return 0; ret = set_memory_encrypted((unsigned long)vaddr, PFN_UP(size)); if (ret) pr_warn_ratelimited("leaking DMA memory that can't be re-encrypted\n"); @@ -177,7 +180,8 @@ static void *dma_direct_alloc_from_pool(struct device *dev, size_t size, dma_coherent_ok); if (!page) return NULL; - *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); + *dma_handle = phys_to_dma_direct(dev, page_to_phys(page), + !!(attrs & DMA_ATTR_CC_SHARED)); return ret; } @@ -193,9 +197,11 @@ static void *dma_direct_alloc_no_mapping(struct device *dev, size_t size, /* remove any dirty cache lines on the kernel alias */ if (!PageHighMem(page)) arch_dma_prep_coherent(page, size); - - /* return the page pointer as the opaque cookie */ - *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); + /* + * return the page pointer as the opaque cookie. + * Never used for unencrypted allocation + */ + *dma_handle = phys_to_dma_encrypted(dev, page_to_phys(page)); return page; } @@ -337,7 +343,8 @@ void *dma_direct_alloc(struct device *dev, size_t size, goto out_encrypt_pages; } - *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); + *dma_handle = phys_to_dma_direct(dev, page_to_phys(page), + !!(attrs & DMA_ATTR_CC_SHARED)); return ret; out_encrypt_pages: @@ -447,11 +454,12 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size, return NULL; ret = page_address(page); - if (dma_set_decrypted(dev, ret, size)) + if ((attrs & DMA_ATTR_CC_SHARED) && dma_set_decrypted(dev, ret, size)) goto out_leak_pages; setup_page: memset(ret, 0, size); - *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); + *dma_handle = phys_to_dma_direct(dev, page_to_phys(page), + !!(attrs & DMA_ATTR_CC_SHARED)); return page; out_leak_pages: return NULL; @@ -461,8 +469,12 @@ void dma_direct_free_pages(struct device *dev, size_t size, struct page *page, dma_addr_t dma_addr, enum dma_data_direction dir) { + /* + * if the device had requested for an unencrypted buffer, + * convert it to encrypted on free + */ + bool mark_mem_encrypted = force_dma_unencrypted(dev); void *vaddr = page_address(page); - bool mark_mem_encrypted = true; /* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */ if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) && -- 2.43.0