From: Aneesh Kumar K.V <aneesh.kumar@kernel.org>
To: Catalin Marinas <catalin.marinas@arm.com>
Cc: iommu@lists.linux.dev, linux-kernel@vger.kernel.org,
Robin Murphy <robin.murphy@arm.com>,
Marek Szyprowski <m.szyprowski@samsung.com>,
Will Deacon <will@kernel.org>, Marc Zyngier <maz@kernel.org>,
Steven Price <steven.price@arm.com>,
Suzuki K Poulose <Suzuki.Poulose@arm.com>,
Jiri Pirko <jiri@resnulli.us>, Jason Gunthorpe <jgg@ziepe.ca>,
Mostafa Saleh <smostafa@google.com>,
Petr Tesarik <ptesarik@suse.com>,
Alexey Kardashevskiy <aik@amd.com>,
Dan Williams <dan.j.williams@intel.com>,
Xu Yilun <yilun.xu@linux.intel.com>,
Christoph Hellwig <hch@lst.de>
Subject: Re: [PATCH v3 2/9] dma-direct: use DMA_ATTR_CC_SHARED in alloc/free paths
Date: Mon, 11 May 2026 11:08:00 +0530 [thread overview]
Message-ID: <yq5afr3ytsjr.fsf@kernel.org> (raw)
In-Reply-To: <af2sTsMX1Ap_EOiU@arm.com>
Catalin Marinas <catalin.marinas@arm.com> writes:
> On Mon, Apr 27, 2026 at 11:25:02AM +0530, Aneesh Kumar K.V (Arm) wrote:
>> @@ -365,10 +389,14 @@ void dma_direct_free(struct device *dev, size_t size,
>> struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
>> dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
>> {
>> + unsigned long attrs = 0;
>> struct page *page;
>> void *ret;
>>
>> - if (force_dma_unencrypted(dev) && dma_direct_use_pool(dev, gfp))
>> + if (force_dma_unencrypted(dev))
>> + attrs |= DMA_ATTR_CC_SHARED;
>> +
>> + if ((attrs & DMA_ATTR_CC_SHARED) && dma_direct_use_pool(dev, gfp))
>> return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
>
> I was looking at Sashiko's reports and it noticed the wrong type
> returned here. Not something your patch introduces but I think it should
> be fixed rather than continue to propagate it. It's been around since
> 5.10, commit 5b138c534fda ("dma-direct: factor out a
> dma_direct_alloc_from_pool helper"). This code path isn't used much I
> guess.
>
I can add this change as one of the patch
modified kernel/dma/direct.c
@@ -165,24 +165,24 @@
return !gfpflags_allow_blocking(gfp) && !is_swiotlb_for_alloc(dev);
}
-static void *dma_direct_alloc_from_pool(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
+static struct page *dma_direct_alloc_from_pool(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, void **cpu_addr, gfp_t gfp,
+ unsigned long attrs)
{
struct page *page;
u64 phys_limit;
- void *ret;
if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_DMA_COHERENT_POOL)))
return NULL;
gfp |= dma_direct_optimal_gfp_mask(dev, &phys_limit);
- page = dma_alloc_from_pool(dev, size, &ret, gfp, attrs,
- dma_coherent_ok);
+ page = dma_alloc_from_pool(dev, size, cpu_addr, gfp, attrs,
+ dma_coherent_ok);
if (!page)
return NULL;
*dma_handle = phys_to_dma_direct(dev, page_to_phys(page),
!!(attrs & DMA_ATTR_CC_SHARED));
- return ret;
+ return page;
}
static void *dma_direct_alloc_no_mapping(struct device *dev, size_t size,
@@ -212,7 +212,7 @@
bool mark_mem_decrypt = false;
bool allow_highmem = true;
struct page *page;
- void *ret;
+ void *cpu_addr;
/*
* DMA_ATTR_CC_SHARED is not a caller-visible dma_alloc_*()
@@ -278,9 +278,12 @@
* the atomic pools instead if we aren't allowed block.
*/
if ((remap || (attrs & DMA_ATTR_CC_SHARED)) &&
- dma_direct_use_pool(dev, gfp))
- return dma_direct_alloc_from_pool(dev, size, dma_handle,
- gfp, attrs);
+ dma_direct_use_pool(dev, gfp)) {
+ page = dma_direct_alloc_from_pool(dev, size,
+ dma_handle, &cpu_addr,
+ gfp, attrs);
+ return page ? cpu_addr : NULL;
+ }
if (is_swiotlb_for_alloc(dev)) {
page = dma_direct_alloc_swiotlb(dev, size, attrs);
@@ -318,12 +321,12 @@
arch_dma_prep_coherent(page, size);
/* create a coherent mapping */
- ret = dma_common_contiguous_remap(page, size, prot,
+ cpu_addr = dma_common_contiguous_remap(page, size, prot,
__builtin_return_address(0));
- if (!ret)
+ if (!cpu_addr)
goto out_free_pages;
} else {
- ret = page_address(page);
+ cpu_addr = page_address(page);
}
if (mark_mem_decrypt) {
@@ -334,18 +337,18 @@
goto out_leak_pages;
}
- memset(ret, 0, size);
+ memset(cpu_addr, 0, size);
if (set_uncached) {
arch_dma_prep_coherent(page, size);
- ret = arch_dma_set_uncached(ret, size);
- if (IS_ERR(ret))
+ cpu_addr = arch_dma_set_uncached(cpu_addr, size);
+ if (IS_ERR(cpu_addr))
goto out_encrypt_pages;
}
*dma_handle = phys_to_dma_direct(dev, page_to_phys(page),
!!(attrs & DMA_ATTR_CC_SHARED));
- return ret;
+ return cpu_addr;
out_encrypt_pages:
if (mark_mem_decrypt && dma_set_encrypted(dev, page_address(page), size))
@@ -427,14 +430,14 @@
{
unsigned long attrs = 0;
struct page *page;
- void *ret;
+ void *cpu_addr;
if (force_dma_unencrypted(dev))
attrs |= DMA_ATTR_CC_SHARED;
if ((attrs & DMA_ATTR_CC_SHARED) && dma_direct_use_pool(dev, gfp))
return dma_direct_alloc_from_pool(dev, size, dma_handle,
- gfp, attrs);
+ &cpu_addr, gfp, attrs);
if (is_swiotlb_for_alloc(dev)) {
page = dma_direct_alloc_swiotlb(dev, size, attrs);
@@ -445,7 +448,7 @@
swiotlb_free(dev, page, size);
return NULL;
}
- ret = page_address(page);
+ cpu_addr = page_address(page);
goto setup_page;
}
@@ -453,11 +456,11 @@
if (!page)
return NULL;
- ret = page_address(page);
- if ((attrs & DMA_ATTR_CC_SHARED) && dma_set_decrypted(dev, ret, size))
+ cpu_addr = page_address(page);
+ if ((attrs & DMA_ATTR_CC_SHARED) && dma_set_decrypted(dev, cpu_addr, size))
goto out_leak_pages;
setup_page:
- memset(ret, 0, size);
+ memset(cpu_addr, 0, size);
*dma_handle = phys_to_dma_direct(dev, page_to_phys(page),
!!(attrs & DMA_ATTR_CC_SHARED));
return page;
next prev parent reply other threads:[~2026-05-11 5:38 UTC|newest]
Thread overview: 19+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-04-27 5:55 [PATCH v3 0/9] dma-mapping: Use DMA_ATTR_CC_SHARED through direct, pool and swiotlb paths Aneesh Kumar K.V (Arm)
2026-04-27 5:55 ` [PATCH v3 1/9] dma-direct: swiotlb: handle swiotlb alloc/free outside __dma_direct_alloc_pages Aneesh Kumar K.V (Arm)
2026-04-27 5:55 ` [PATCH v3 2/9] dma-direct: use DMA_ATTR_CC_SHARED in alloc/free paths Aneesh Kumar K.V (Arm)
2026-05-08 9:26 ` Catalin Marinas
2026-05-11 5:38 ` Aneesh Kumar K.V [this message]
2026-04-27 5:55 ` [PATCH v3 3/9] dma-pool: track decrypted atomic pools and select them via attrs Aneesh Kumar K.V (Arm)
2026-04-27 5:55 ` [PATCH v3 4/9] dma: swiotlb: track pool encryption state and honor DMA_ATTR_CC_SHARED Aneesh Kumar K.V (Arm)
2026-05-08 16:49 ` Catalin Marinas
2026-05-11 5:14 ` Aneesh Kumar K.V
2026-04-27 5:55 ` [PATCH v3 5/9] dma-mapping: make dma_pgprot() " Aneesh Kumar K.V (Arm)
2026-04-27 5:55 ` [PATCH v3 6/9] dma-direct: pass attrs to dma_capable() for DMA_ATTR_CC_SHARED checks Aneesh Kumar K.V (Arm)
2026-04-27 5:55 ` [PATCH v3 7/9] dma-direct: make dma_direct_map_phys() honor DMA_ATTR_CC_SHARED Aneesh Kumar K.V (Arm)
2026-04-27 5:55 ` [PATCH v3 8/9] dma-direct: set decrypted flag for remapped DMA allocations Aneesh Kumar K.V (Arm)
2026-04-27 5:55 ` [PATCH v3 9/9] dma-direct: select DMA address encoding from DMA_ATTR_CC_SHARED Aneesh Kumar K.V (Arm)
2026-05-08 17:28 ` [PATCH v3 0/9] dma-mapping: Use DMA_ATTR_CC_SHARED through direct, pool and swiotlb paths Catalin Marinas
2026-05-10 0:36 ` Jason Gunthorpe
2026-05-11 11:13 ` Mostafa Saleh
2026-05-12 12:42 ` Jason Gunthorpe
2026-05-11 11:18 ` Aneesh Kumar K.V
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=yq5afr3ytsjr.fsf@kernel.org \
--to=aneesh.kumar@kernel.org \
--cc=Suzuki.Poulose@arm.com \
--cc=aik@amd.com \
--cc=catalin.marinas@arm.com \
--cc=dan.j.williams@intel.com \
--cc=hch@lst.de \
--cc=iommu@lists.linux.dev \
--cc=jgg@ziepe.ca \
--cc=jiri@resnulli.us \
--cc=linux-kernel@vger.kernel.org \
--cc=m.szyprowski@samsung.com \
--cc=maz@kernel.org \
--cc=ptesarik@suse.com \
--cc=robin.murphy@arm.com \
--cc=smostafa@google.com \
--cc=steven.price@arm.com \
--cc=will@kernel.org \
--cc=yilun.xu@linux.intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.