From: Samiullah Khawaja <skhawaja@google.com>
To: Marek Szyprowski <m.szyprowski@samsung.com>,
Will Deacon <will@kernel.org>, Jason Gunthorpe <jgg@ziepe.ca>
Cc: Samiullah Khawaja <skhawaja@google.com>,
Pasha Tatashin <pasha.tatashin@soleen.com>,
Mike Rapoport <rppt@kernel.org>,
Pratyush Yadav <pratyush@kernel.org>,
Alexander Graf <graf@amazon.com>,
Robin Murphy <robin.murphy@arm.com>,
Kevin Tian <kevin.tian@intel.com>,
iommu@lists.linux.dev, kexec@lists.infradead.org,
linux-mm@kvack.org, linux-kernel@vger.kernel.org,
David Matlack <dmatlack@google.com>,
Andrew Morton <akpm@linux-foundation.org>,
Pranjal Shrivastava <praan@google.com>,
Vipin Sharma <vipinsh@google.com>
Subject: [RFC PATCH 3/4] dma-direct: Add API to preserve/restore allocations
Date: Tue, 5 May 2026 00:27:36 +0000 [thread overview]
Message-ID: <20260505002737.2213734-4-skhawaja@google.com> (raw)
In-Reply-To: <20260505002737.2213734-1-skhawaja@google.com>
Add an API to preserve/restore the DMA direct allocation for liveupdate.
The underlying memory is preserved/restored using KHO. During restore
the memory is setup based on the device configuration, gfp flags and
allocation attributes. Once restored, the driver can use the usual
dma_free* API to deallocate the restored DMA allocation.
This API will be used to add support in dma_alloc* APIs to
preseve/restore the DMA allocations.
Signed-off-by: Samiullah Khawaja <skhawaja@google.com>
---
include/linux/dma-direct.h | 29 +++++++
kernel/dma/Kconfig | 3 +
kernel/dma/direct.c | 163 +++++++++++++++++++++++++++++++++++++
3 files changed, 195 insertions(+)
diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
index c249912456f9..0efe2bc1a815 100644
--- a/include/linux/dma-direct.h
+++ b/include/linux/dma-direct.h
@@ -141,6 +141,35 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size,
u64 dma_direct_get_required_mask(struct device *dev);
void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp, unsigned long attrs);
+
+#ifdef CONFIG_DMA_LIVEUPDATE
+int dma_direct_preserve_allocation(struct device *dev, void *cpu_addr,
+ size_t size, dma_addr_t dma_handle,
+ unsigned long attrs, u64 *state);
+void dma_direct_unpreserve_allocation(struct device *dev, u64 state);
+void *dma_direct_restore_allocation(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp,
+ unsigned long attrs, u64 state);
+#else
+static inline int dma_direct_preserve_allocation(struct device *dev, void *cpu_addr,
+ size_t size, dma_addr_t dma_handle,
+ unsigned long attrs, u64 *state)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void dma_direct_unpreserve_allocation(struct device *dev, u64 state)
+{
+}
+
+static inline void *dma_direct_restore_allocation(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp,
+ unsigned long attrs, u64 state)
+{
+ return NULL;
+}
+#endif
+
void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_addr, unsigned long attrs);
struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
diff --git a/kernel/dma/Kconfig b/kernel/dma/Kconfig
index bfef21b4a9ae..d92852942c6c 100644
--- a/kernel/dma/Kconfig
+++ b/kernel/dma/Kconfig
@@ -265,3 +265,6 @@ config DMA_MAP_BENCHMARK
performance of dma_(un)map_page.
See tools/testing/selftests/dma/dma_map_benchmark.c
+
+config DMA_LIVEUPDATE
+ bool "Enable preservation of DMA direct allocations"
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index ec887f443741..c2b98f91900a 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -6,6 +6,8 @@
*/
#include <linux/memblock.h> /* for max_pfn */
#include <linux/export.h>
+#include <linux/kexec_handover.h>
+#include <linux/kho/abi/dma_alloc.h>
#include <linux/mm.h>
#include <linux/dma-map-ops.h>
#include <linux/scatterlist.h>
@@ -307,6 +309,167 @@ void *dma_direct_alloc(struct device *dev, size_t size,
return NULL;
}
+#ifdef CONFIG_DMA_LIVEUPDATE
+int dma_direct_preserve_allocation(struct device *dev, void *cpu_addr,
+ size_t size, dma_addr_t dma_handle,
+ unsigned long attrs, u64 *state)
+{
+ struct dma_alloc_ser *ser;
+ int ret;
+
+ if (!kho_is_enabled())
+ return -EOPNOTSUPP;
+
+ if (IS_ENABLED(CONFIG_DMA_CMA))
+ return -EOPNOTSUPP;
+
+ if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
+ !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev))
+ return -EOPNOTSUPP;
+
+ if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_ALLOC) &&
+ !dev_is_dma_coherent(dev) &&
+ !is_swiotlb_for_alloc(dev))
+ return -EOPNOTSUPP;
+
+ if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
+ !dev_is_dma_coherent(dev))
+ return -EOPNOTSUPP;
+
+ if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
+ dma_is_from_pool(dev, cpu_addr, PAGE_ALIGN(size)))
+ return -EOPNOTSUPP;
+
+ ser = kho_alloc_preserve(sizeof(*ser));
+ if (IS_ERR(ser))
+ return PTR_ERR(ser);
+
+ ser->page_phys = dma_to_phys(dev, dma_handle);
+ ser->force_decrypted = force_dma_unencrypted(dev);
+ ser->size = size;
+
+ ret = kho_preserve_pages(phys_to_page(ser->page_phys),
+ size >> PAGE_SHIFT);
+ if (ret) {
+ kho_unpreserve_free(ser);
+ return ret;
+ }
+
+ *state = virt_to_phys(ser);
+ return 0;
+}
+
+void dma_direct_unpreserve_allocation(struct device *dev, u64 state)
+{
+ struct dma_alloc_ser *ser;
+
+ if (!kho_is_enabled())
+ return;
+
+ ser = phys_to_virt(state);
+ kho_unpreserve_pages(phys_to_page(ser->page_phys),
+ ser->size >> PAGE_SHIFT);
+ kho_unpreserve_free(ser);
+}
+
+void *dma_direct_restore_allocation(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp,
+ unsigned long attrs, u64 state)
+{
+ bool remap = false, set_uncached = false;
+ struct dma_alloc_ser *ser = NULL;
+ struct page *page;
+ void *cpu_addr;
+
+ if (!kho_is_enabled())
+ return NULL;
+
+ ser = phys_to_virt(state);
+ page = phys_to_page(ser->page_phys);
+
+ if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
+ !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev))
+ return NULL;
+
+ if (!dev_is_dma_coherent(dev)) {
+ if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_ALLOC) &&
+ !is_swiotlb_for_alloc(dev))
+ return NULL;
+
+ if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL))
+ return NULL;
+
+ set_uncached = IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED);
+ remap = IS_ENABLED(CONFIG_DMA_DIRECT_REMAP);
+ if (!set_uncached && !remap)
+ return NULL;
+ }
+
+ if (PageHighMem(page)) {
+ remap = true;
+ set_uncached = false;
+ }
+
+ /*
+ * Remapping will be blocking so return error. The preserved memory
+ * might be already decrypted in the previous kernel, but the decryption
+ * call is not guaranteed to be non-blocking so return error always if
+ * decryption is required.
+ */
+ if ((remap || force_dma_unencrypted(dev)) &&
+ dma_direct_use_pool(dev, gfp))
+ return NULL;
+
+ /*
+ * Encryption scheme changed between two kernels and this might cause
+ * issues if device/driver is not handling it properly.
+ */
+ WARN_ON_ONCE(ser->force_decrypted != force_dma_unencrypted(dev));
+
+ /*
+ * arch_dma_prep_coherent() should make sure that any cache lines from
+ * the previous kernel, if the device was coherent previously or cached
+ * mapping in this kernel during init are not problamatic for
+ * non-coherent allocations.
+ */
+ if (remap) {
+ pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
+
+ if (force_dma_unencrypted(dev))
+ prot = pgprot_decrypted(prot);
+
+ arch_dma_prep_coherent(page, size);
+
+ cpu_addr = dma_common_contiguous_remap(page, size, prot,
+ __builtin_return_address(0));
+ if (!cpu_addr)
+ return NULL;
+ } else {
+ cpu_addr = page_address(page);
+ if (dma_set_decrypted(dev, cpu_addr, size))
+ return NULL;
+ }
+
+ if (set_uncached) {
+ arch_dma_prep_coherent(page, size);
+ cpu_addr = arch_dma_set_uncached(cpu_addr, size);
+ if (IS_ERR(cpu_addr))
+ return NULL;
+ }
+
+ *dma_handle = phys_to_dma_direct(dev, ser->page_phys);
+
+ /*
+ * Cannot free the restored pages on error here as these might be in use
+ * by a device with direct allocation in the previous kernel.
+ */
+ WARN_ON(!kho_restore_pages(ser->page_phys,
+ ser->size >> PAGE_SHIFT));
+ kho_restore_free(ser);
+ return cpu_addr;
+}
+#endif
+
void dma_direct_free(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
{
--
2.54.0.545.g6539524ca2-goog
next prev parent reply other threads:[~2026-05-05 0:27 UTC|newest]
Thread overview: 5+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-05-05 0:27 [RFC PATCH 0/4] dma-mapping: Add preservation of direct allocations Samiullah Khawaja
2026-05-05 0:27 ` [RFC PATCH 1/4] dma: Add DMA allocation preservation KHO ABI Samiullah Khawaja
2026-05-05 0:27 ` [RFC PATCH 2/4] dma/pool: Add an API to check if DMA allocation is from pool Samiullah Khawaja
2026-05-05 0:27 ` Samiullah Khawaja [this message]
2026-05-05 0:27 ` [RFC PATCH 4/4] dma-mapping: Add API to preserve/restore DMA allocation Samiullah Khawaja
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260505002737.2213734-4-skhawaja@google.com \
--to=skhawaja@google.com \
--cc=akpm@linux-foundation.org \
--cc=dmatlack@google.com \
--cc=graf@amazon.com \
--cc=iommu@lists.linux.dev \
--cc=jgg@ziepe.ca \
--cc=kevin.tian@intel.com \
--cc=kexec@lists.infradead.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=m.szyprowski@samsung.com \
--cc=pasha.tatashin@soleen.com \
--cc=praan@google.com \
--cc=pratyush@kernel.org \
--cc=robin.murphy@arm.com \
--cc=rppt@kernel.org \
--cc=vipinsh@google.com \
--cc=will@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox