From: Mostafa Saleh <smostafa@google.com>
To: iommu@lists.linux.dev, linux-kernel@vger.kernel.org
Cc: robin.murphy@arm.com, m.szyprowski@samsung.com, will@kernel.org,
maz@kernel.org, suzuki.poulose@arm.com, catalin.marinas@arm.com,
jiri@resnulli.us, jgg@ziepe.ca, aneesh.kumar@kernel.org,
Mostafa Saleh <smostafa@google.com>
Subject: [RFC PATCH v3 4/5] dma-mapping: Encapsulate memory state during allocation
Date: Wed, 8 Apr 2026 19:47:41 +0000 [thread overview]
Message-ID: <20260408194750.2280873-5-smostafa@google.com> (raw)
In-Reply-To: <20260408194750.2280873-1-smostafa@google.com>
Introduce a new dma-direct internal type dma_page which is
"struct page" and a bit indicate whether the memory has been decrypted
or not.
This is useful to pass such information encapsulated through
allocation functions, which is currently set from swiotlb_alloc().
No functional changes.
Signed-off-by: Mostafa Saleh <smostafa@google.com>
---
kernel/dma/direct.c | 58 +++++++++++++++++++++++++++++++++++----------
1 file changed, 46 insertions(+), 12 deletions(-)
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index de63e0449700..204bc566480c 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -16,6 +16,33 @@
#include <linux/pci-p2pdma.h>
#include "direct.h"
+/*
+ * Represent DMA allocation and 1 bit flag for it's state
+ */
+struct dma_page {
+ unsigned long val;
+};
+
+#define DMA_PAGE_DECRYPTED_FLAG BIT(0)
+
+#define DMA_PAGE_NULL ((struct dma_page){ .val = 0 })
+
+static inline struct dma_page page_to_dma_page(struct page *page, bool decrypted)
+{
+ struct dma_page dma_page;
+
+ dma_page.val = (unsigned long)page;
+ if (decrypted)
+ dma_page.val |= DMA_PAGE_DECRYPTED_FLAG;
+
+ return dma_page;
+}
+
+static inline struct page *dma_page_to_page(struct dma_page dma_page)
+{
+ return (struct page *)(dma_page.val & ~DMA_PAGE_DECRYPTED_FLAG);
+}
+
/*
* Most architectures use ZONE_DMA for the first 16 Megabytes, but some use
* it for entirely different regions. In that case the arch code needs to
@@ -103,20 +130,21 @@ static void __dma_direct_free_pages(struct device *dev, struct page *page,
dma_free_contiguous(dev, page, size);
}
-static struct page *dma_direct_alloc_swiotlb(struct device *dev, size_t size)
+static struct dma_page dma_direct_alloc_swiotlb(struct device *dev, size_t size)
{
- struct page *page = swiotlb_alloc(dev, size, NULL);
+ enum swiotlb_page_state state;
+ struct page *page = swiotlb_alloc(dev, size, &state);
if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
swiotlb_free(dev, page, size);
- return NULL;
+ return DMA_PAGE_NULL;
}
- return page;
+ return page_to_dma_page(page, state == SWIOTLB_PAGE_DECRYPTED);
}
-static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
- gfp_t gfp, bool allow_highmem)
+static struct dma_page __dma_direct_alloc_pages(struct device *dev, size_t size,
+ gfp_t gfp, bool allow_highmem)
{
int node = dev_to_node(dev);
struct page *page;
@@ -132,7 +160,7 @@ static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
if (page) {
if (dma_coherent_ok(dev, page_to_phys(page), size) &&
(allow_highmem || !PageHighMem(page)))
- return page;
+ return page_to_dma_page(page, false);
dma_free_contiguous(dev, page, size);
}
@@ -148,10 +176,10 @@ static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
else if (IS_ENABLED(CONFIG_ZONE_DMA) && !(gfp & GFP_DMA))
gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
else
- return NULL;
+ return DMA_PAGE_NULL;
}
- return page;
+ return page_to_dma_page(page, false);
}
/*
@@ -184,9 +212,11 @@ static void *dma_direct_alloc_from_pool(struct device *dev, size_t size,
static void *dma_direct_alloc_no_mapping(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp)
{
+ struct dma_page dma_page;
struct page *page;
- page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO, true);
+ dma_page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO, true);
+ page = dma_page_to_page(dma_page);
if (!page)
return NULL;
@@ -203,6 +233,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
bool remap = false, set_uncached = false, decrypt = force_dma_unencrypted(dev);
+ struct dma_page dma_page;
struct page *page;
void *ret;
@@ -253,7 +284,8 @@ void *dma_direct_alloc(struct device *dev, size_t size,
* we always manually zero the memory once we are done, and only allow
* high mem if pages doesn't need decryption.
*/
- page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO, !decrypt);
+ dma_page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO, !decrypt);
+ page = dma_page_to_page(dma_page);
if (!page)
return NULL;
@@ -352,13 +384,15 @@ void dma_direct_free(struct device *dev, size_t size,
struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
{
+ struct dma_page dma_page;
struct page *page;
void *ret;
if (force_dma_unencrypted(dev) && dma_direct_use_pool(dev, gfp))
return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
- page = __dma_direct_alloc_pages(dev, size, gfp, false);
+ dma_page = __dma_direct_alloc_pages(dev, size, gfp, false);
+ page = dma_page_to_page(dma_page);
if (!page)
return NULL;
--
2.53.0.1213.gd9a14994de-goog
next prev parent reply other threads:[~2026-04-08 19:47 UTC|newest]
Thread overview: 9+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-04-08 19:47 [RFC PATCH v3 0/5] dma-mapping: Fixes for memory encryption Mostafa Saleh
2026-04-08 19:47 ` [RFC PATCH v3 1/5] swiotlb: Return state of memory from swiotlb_alloc() Mostafa Saleh
2026-04-08 19:47 ` [RFC PATCH v3 2/5] dma-mapping: Move encryption in __dma_direct_free_pages() Mostafa Saleh
2026-04-10 17:45 ` Jason Gunthorpe
2026-04-08 19:47 ` [RFC PATCH v3 3/5] dma-mapping: Decrypt memory on remap Mostafa Saleh
2026-04-08 19:47 ` Mostafa Saleh [this message]
2026-04-10 18:05 ` [RFC PATCH v3 4/5] dma-mapping: Encapsulate memory state during allocation Jason Gunthorpe
2026-04-08 19:47 ` [RFC PATCH v3 5/5] dma-mapping: Fix memory decryption issues Mostafa Saleh
2026-04-10 17:43 ` [RFC PATCH v3 0/5] dma-mapping: Fixes for memory encryption Jason Gunthorpe
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260408194750.2280873-5-smostafa@google.com \
--to=smostafa@google.com \
--cc=aneesh.kumar@kernel.org \
--cc=catalin.marinas@arm.com \
--cc=iommu@lists.linux.dev \
--cc=jgg@ziepe.ca \
--cc=jiri@resnulli.us \
--cc=linux-kernel@vger.kernel.org \
--cc=m.szyprowski@samsung.com \
--cc=maz@kernel.org \
--cc=robin.murphy@arm.com \
--cc=suzuki.poulose@arm.com \
--cc=will@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox