* [PATCH v4 12/13] dma-direct: return struct page from dma_direct_alloc_from_pool()
[not found] <20260512090408.794195-1-aneesh.kumar@kernel.org>
@ 2026-05-12 9:04 ` Aneesh Kumar K.V (Arm)
0 siblings, 0 replies; only message in thread
From: Aneesh Kumar K.V (Arm) @ 2026-05-12 9:04 UTC (permalink / raw)
To: iommu, linux-arm-kernel, linux-kernel, linux-coco
Cc: Aneesh Kumar K.V (Arm), Robin Murphy, Marek Szyprowski,
Will Deacon, Marc Zyngier, Steven Price, Suzuki K Poulose,
Catalin Marinas, Jiri Pirko, Jason Gunthorpe, Mostafa Saleh,
Petr Tesarik, Alexey Kardashevskiy, Dan Williams, Xu Yilun,
linuxppc-dev, linux-s390, Madhavan Srinivasan, Michael Ellerman,
Nicholas Piggin, Christophe Leroy (CS GROUP), Alexander Gordeev,
Gerald Schaefer, Heiko Carstens, Vasily Gorbik,
Christian Borntraeger, Sven Schnelle, x86, stable
Commit 5b138c534fda ("dma-direct: factor out a dma_direct_alloc_from_pool
helper") changed dma_direct_alloc_from_pool() to return the CPU address
from dma_alloc_from_pool(). That fits dma_direct_alloc(), but
dma_direct_alloc_pages() also uses the helper and expects a struct page *.
Fix this by making dma_direct_alloc_from_pool() return the struct page *
again, and pass the CPU address back through an out-parameter for the
dma_direct_alloc() caller.
Fixes: 5b138c534fda ("dma-direct: factor out a dma_direct_alloc_from_pool helper")
Cc: stable@vger.kernel.org
Signed-off-by: Aneesh Kumar K.V (Arm) <aneesh.kumar@kernel.org>
---
kernel/dma/direct.c | 21 ++++++++++++---------
1 file changed, 12 insertions(+), 9 deletions(-)
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 902008e40ea1..5103a04df99f 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -165,24 +165,24 @@ static bool dma_direct_use_pool(struct device *dev, gfp_t gfp)
return !gfpflags_allow_blocking(gfp) && !is_swiotlb_for_alloc(dev);
}
-static void *dma_direct_alloc_from_pool(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
+static struct page *dma_direct_alloc_from_pool(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, void **cpu_addr, gfp_t gfp,
+ unsigned long attrs)
{
struct page *page;
u64 phys_limit;
- void *ret;
if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_DMA_COHERENT_POOL)))
return NULL;
gfp |= dma_direct_optimal_gfp_mask(dev, &phys_limit);
- page = dma_alloc_from_pool(dev, size, &ret, gfp, attrs,
+ page = dma_alloc_from_pool(dev, size, cpu_addr, gfp, attrs,
dma_coherent_ok);
if (!page)
return NULL;
*dma_handle = phys_to_dma_direct(dev, page_to_phys(page),
!!(attrs & DMA_ATTR_CC_SHARED));
- return ret;
+ return page;
}
static void *dma_direct_alloc_no_mapping(struct device *dev, size_t size,
@@ -278,9 +278,12 @@ void *dma_direct_alloc(struct device *dev, size_t size,
* the atomic pools instead if we aren't allowed block.
*/
if ((remap || (attrs & DMA_ATTR_CC_SHARED)) &&
- dma_direct_use_pool(dev, gfp))
- return dma_direct_alloc_from_pool(dev, size, dma_handle,
- gfp, attrs);
+ dma_direct_use_pool(dev, gfp)) {
+ page = dma_direct_alloc_from_pool(dev, size,
+ dma_handle, &cpu_addr,
+ gfp, attrs);
+ return page ? cpu_addr : NULL;
+ }
if (is_swiotlb_for_alloc(dev)) {
page = dma_direct_alloc_swiotlb(dev, size, attrs);
@@ -443,7 +446,7 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
if ((attrs & DMA_ATTR_CC_SHARED) && dma_direct_use_pool(dev, gfp))
return dma_direct_alloc_from_pool(dev, size, dma_handle,
- gfp, attrs);
+ &cpu_addr, gfp, attrs);
if (is_swiotlb_for_alloc(dev)) {
page = dma_direct_alloc_swiotlb(dev, size, attrs);
--
2.43.0
^ permalink raw reply related [flat|nested] only message in thread