From: Mostafa Saleh <smostafa@google.com>
To: linux-arm-kernel@lists.infradead.org,
linux-kernel@vger.kernel.org, kvmarm@lists.linux.dev,
iommu@lists.linux.dev
Cc: catalin.marinas@arm.com, will@kernel.org, maz@kernel.org,
oliver.upton@linux.dev, joey.gouly@arm.com,
suzuki.poulose@arm.com, yuzenghui@huawei.com, joro@8bytes.org,
jean-philippe@linaro.org, jgg@ziepe.ca, mark.rutland@arm.com,
qperret@google.com, tabba@google.com, vdonnefort@google.com,
sebastianene@google.com, keirf@google.com,
Mostafa Saleh <smostafa@google.com>
Subject: [PATCH v6 06/25] iommu/io-pgtable-arm: Rework to use the iommu-pages API
Date: Fri, 1 May 2026 11:19:08 +0000 [thread overview]
Message-ID: <20260501111928.259252-7-smostafa@google.com> (raw)
In-Reply-To: <20260501111928.259252-1-smostafa@google.com>
To prepare for supporting io-pgtable-arm in the pKVM hypervisor,
we need to abstract away standard kernel allocations, frees, virt/phys
conversions, and DMA API mapping.
This patch introduces a set of generic wrappers in iommu-pages.h:
- iommu_alloc_data
- iommu_free_data
- iommu_virt_to_phys
- iommu_phys_to_virt
- iommu_pages_dma_map
- iommu_pages_dma_mapping_error
- iommu_pages_dma_unmap
The io-pgtable-arm.c code is updated to universally use these new
wrappers instead of standard kernel kmalloc_obj, kfree, virt_to_phys,
dma_map_single, etc. This abstraction makes it easy to replace them with
hypervisor-specific implementations in a later patch.
Signed-off-by: Mostafa Saleh <smostafa@google.com>
---
drivers/iommu/io-pgtable-arm.c | 37 ++++++++++++++++------------------
drivers/iommu/iommu-pages.h | 36 +++++++++++++++++++++++++++++++++
2 files changed, 53 insertions(+), 20 deletions(-)
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 0208e5897c29..e765021308f9 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -15,7 +15,6 @@
#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/types.h>
-#include <linux/dma-mapping.h>
#include <asm/barrier.h>
@@ -143,7 +142,7 @@
#define ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC 0x8DULL
/* IOPTE accessors */
-#define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d))
+#define iopte_deref(pte, d) iommu_phys_to_virt(iopte_to_paddr(pte, d))
#define iopte_type(pte) \
(((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
@@ -245,7 +244,7 @@ static inline bool arm_lpae_concat_mandatory(struct io_pgtable_cfg *cfg,
static dma_addr_t __arm_lpae_dma_addr(void *pages)
{
- return (dma_addr_t)virt_to_phys(pages);
+ return (dma_addr_t)iommu_virt_to_phys(pages);
}
static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
@@ -272,15 +271,15 @@ static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
return NULL;
if (!cfg->coherent_walk) {
- dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
- if (dma_mapping_error(dev, dma))
+ dma = iommu_pages_dma_map(dev, pages, size);
+ if (iommu_pages_dma_mapping_error(dev, dma))
goto out_free;
/*
* We depend on the IOMMU being able to work with any physical
* address directly, so if the DMA layer suggests otherwise by
* translating or truncating them, that bodes very badly...
*/
- if (dma != virt_to_phys(pages))
+ if (dma != iommu_virt_to_phys(pages))
goto out_unmap;
}
@@ -288,7 +287,7 @@ static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
out_unmap:
dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
- dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
+ iommu_pages_dma_unmap(dev, dma, size);
out_free:
if (cfg->free)
@@ -304,8 +303,7 @@ static void __arm_lpae_free_pages(void *pages, size_t size,
void *cookie)
{
if (!cfg->coherent_walk)
- dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages),
- size, DMA_TO_DEVICE);
+ iommu_pages_dma_unmap(cfg->iommu_dev, __arm_lpae_dma_addr(pages), size);
if (cfg->free)
cfg->free(cookie, pages, size);
@@ -316,8 +314,7 @@ static void __arm_lpae_free_pages(void *pages, size_t size,
static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep, int num_entries,
struct io_pgtable_cfg *cfg)
{
- dma_sync_single_for_device(cfg->iommu_dev, __arm_lpae_dma_addr(ptep),
- sizeof(*ptep) * num_entries, DMA_TO_DEVICE);
+ iommu_pages_flush_incoherent(cfg->iommu_dev, ptep, 0, sizeof(*ptep) * num_entries);
}
static void __arm_lpae_clear_pte(arm_lpae_iopte *ptep, struct io_pgtable_cfg *cfg, int num_entries)
@@ -395,7 +392,7 @@ static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table,
arm_lpae_iopte old, new;
struct io_pgtable_cfg *cfg = &data->iop.cfg;
- new = paddr_to_iopte(__pa(table), data) | ARM_LPAE_PTE_TYPE_TABLE;
+ new = paddr_to_iopte(iommu_virt_to_phys(table), data) | ARM_LPAE_PTE_TYPE_TABLE;
if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
new |= ARM_LPAE_PTE_NSTABLE;
@@ -616,7 +613,7 @@ static void arm_lpae_free_pgtable(struct io_pgtable *iop)
struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
__arm_lpae_free_pgtable(data, data->start_level, data->pgd);
- kfree(data);
+ iommu_free_data(data);
}
static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
@@ -930,7 +927,7 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
return NULL;
- data = kmalloc_obj(*data);
+ data = iommu_alloc_data(sizeof(*data), GFP_KERNEL);
if (!data)
return NULL;
@@ -1053,11 +1050,11 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
wmb();
/* TTBR */
- cfg->arm_lpae_s1_cfg.ttbr = virt_to_phys(data->pgd);
+ cfg->arm_lpae_s1_cfg.ttbr = iommu_virt_to_phys(data->pgd);
return &data->iop;
out_free_data:
- kfree(data);
+ iommu_free_data(data);
return NULL;
}
@@ -1149,11 +1146,11 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
wmb();
/* VTTBR */
- cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
+ cfg->arm_lpae_s2_cfg.vttbr = iommu_virt_to_phys(data->pgd);
return &data->iop;
out_free_data:
- kfree(data);
+ iommu_free_data(data);
return NULL;
}
@@ -1223,7 +1220,7 @@ arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
/* Ensure the empty pgd is visible before TRANSTAB can be written */
wmb();
- cfg->arm_mali_lpae_cfg.transtab = virt_to_phys(data->pgd) |
+ cfg->arm_mali_lpae_cfg.transtab = iommu_virt_to_phys(data->pgd) |
ARM_MALI_LPAE_TTBR_READ_INNER |
ARM_MALI_LPAE_TTBR_ADRMODE_TABLE;
if (cfg->coherent_walk)
@@ -1232,7 +1229,7 @@ arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
return &data->iop;
out_free_data:
- kfree(data);
+ iommu_free_data(data);
return NULL;
}
diff --git a/drivers/iommu/iommu-pages.h b/drivers/iommu/iommu-pages.h
index ae9da4f571f6..e1945193ad7f 100644
--- a/drivers/iommu/iommu-pages.h
+++ b/drivers/iommu/iommu-pages.h
@@ -7,6 +7,7 @@
#ifndef __IOMMU_PAGES_H
#define __IOMMU_PAGES_H
+#include <linux/dma-mapping.h>
#include <linux/iommu.h>
/**
@@ -145,4 +146,39 @@ void iommu_pages_stop_incoherent_list(struct iommu_pages_list *list,
void iommu_pages_free_incoherent(void *virt, struct device *dma_dev);
#endif
+static inline void *iommu_alloc_data(size_t size, gfp_t gfp)
+{
+ return kmalloc(size, gfp);
+}
+
+static inline void iommu_free_data(void *p)
+{
+ kfree(p);
+}
+
+static inline phys_addr_t iommu_virt_to_phys(void *virt)
+{
+ return virt_to_phys(virt);
+}
+
+static inline void *iommu_phys_to_virt(phys_addr_t phys)
+{
+ return phys_to_virt(phys);
+}
+
+static inline dma_addr_t iommu_pages_dma_map(struct device *dev, void *virt, size_t size)
+{
+ return dma_map_single(dev, virt, size, DMA_TO_DEVICE);
+}
+
+static inline int iommu_pages_dma_mapping_error(struct device *dev, dma_addr_t dma)
+{
+ return dma_mapping_error(dev, dma);
+}
+
+static inline void iommu_pages_dma_unmap(struct device *dev, dma_addr_t dma, size_t size)
+{
+ dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
+}
+
#endif /* __IOMMU_PAGES_H */
--
2.54.0.545.g6539524ca2-goog
next prev parent reply other threads:[~2026-05-01 11:20 UTC|newest]
Thread overview: 38+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-05-01 11:19 [PATCH v6 00/25] KVM: arm64: SMMUv3 driver for pKVM (trap and emulate) Mostafa Saleh
2026-05-01 11:19 ` [PATCH v6 01/25] KVM: arm64: Generalize trace clock Mostafa Saleh
2026-05-01 11:19 ` [PATCH v6 02/25] KVM: arm64: Donate MMIO to the hypervisor Mostafa Saleh
2026-05-01 11:19 ` [PATCH v6 03/25] iommu/arm-smmu-v3: Split code with hyp Mostafa Saleh
2026-05-01 12:44 ` Jason Gunthorpe
2026-05-04 12:13 ` Mostafa Saleh
2026-05-01 11:19 ` [PATCH v6 04/25] iommu/arm-smmu-v3: Move TLB range invalidation into common code Mostafa Saleh
2026-05-01 12:41 ` Jason Gunthorpe
2026-05-04 12:15 ` Mostafa Saleh
2026-05-01 11:19 ` [PATCH v6 05/25] iommu/arm-smmu-v3: Move IDR parsing to common functions Mostafa Saleh
2026-05-01 12:47 ` Jason Gunthorpe
2026-05-04 12:16 ` Mostafa Saleh
2026-05-01 11:19 ` Mostafa Saleh [this message]
2026-05-01 12:24 ` [PATCH v6 06/25] iommu/io-pgtable-arm: Rework to use the iommu-pages API Jason Gunthorpe
2026-05-04 12:19 ` Mostafa Saleh
2026-05-01 11:19 ` [PATCH v6 07/25] KVM: arm64: iommu: Introduce IOMMU driver infrastructure Mostafa Saleh
2026-05-01 11:19 ` [PATCH v6 08/25] KVM: arm64: iommu: Shadow host stage-2 page table Mostafa Saleh
2026-05-01 13:00 ` Jason Gunthorpe
2026-05-04 12:28 ` Mostafa Saleh
2026-05-01 11:19 ` [PATCH v6 09/25] KVM: arm64: iommu: Add memory pool Mostafa Saleh
2026-05-01 11:19 ` [PATCH v6 10/25] KVM: arm64: iommu: Support DABT for IOMMU Mostafa Saleh
2026-05-01 11:19 ` [PATCH v6 11/25] iommu/arm-smmu-v3-kvm: Add SMMUv3 driver Mostafa Saleh
2026-05-01 11:19 ` [PATCH v6 12/25] iommu/arm-smmu-v3-kvm: Add the kernel driver Mostafa Saleh
2026-05-01 11:19 ` [PATCH v6 13/25] iommu/arm-smmu-v3-kvm: Probe SMMU HW Mostafa Saleh
2026-05-01 12:51 ` Jason Gunthorpe
2026-05-04 12:30 ` Mostafa Saleh
2026-05-01 11:19 ` [PATCH v6 14/25] iommu/arm-smmu-v3-kvm: Add MMIO emulation Mostafa Saleh
2026-05-01 11:19 ` [PATCH v6 15/25] iommu/arm-smmu-v3-kvm: Shadow the command queue Mostafa Saleh
2026-05-01 11:19 ` [PATCH v6 16/25] iommu/arm-smmu-v3-kvm: Add CMDQ functions Mostafa Saleh
2026-05-01 11:19 ` [PATCH v6 17/25] iommu/arm-smmu-v3-kvm: Emulate CMDQ for host Mostafa Saleh
2026-05-01 11:19 ` [PATCH v6 18/25] iommu/arm-smmu-v3-kvm: Shadow stream table Mostafa Saleh
2026-05-01 11:19 ` [PATCH v6 19/25] iommu/arm-smmu-v3-kvm: Shadow STEs Mostafa Saleh
2026-05-01 11:19 ` [PATCH v6 20/25] iommu/arm-smmu-v3-kvm: Share other queues Mostafa Saleh
2026-05-01 11:19 ` [PATCH v6 21/25] iommu/arm-smmu-v3-kvm: Emulate GBPA Mostafa Saleh
2026-05-01 11:19 ` [PATCH v6 22/25] iommu/io-pgtable-arm: Support io-pgtable-arm in the hypervisor Mostafa Saleh
2026-05-01 11:19 ` [PATCH v6 23/25] iommu/arm-smmu-v3-kvm: Shadow the CPU stage-2 page table Mostafa Saleh
2026-05-01 11:19 ` [PATCH v6 24/25] iommu/arm-smmu-v3-kvm: Enable nesting Mostafa Saleh
2026-05-01 11:19 ` [PATCH v6 25/25] KVM: arm64: Add documentation for pKVM DMA isolation Mostafa Saleh
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260501111928.259252-7-smostafa@google.com \
--to=smostafa@google.com \
--cc=catalin.marinas@arm.com \
--cc=iommu@lists.linux.dev \
--cc=jean-philippe@linaro.org \
--cc=jgg@ziepe.ca \
--cc=joey.gouly@arm.com \
--cc=joro@8bytes.org \
--cc=keirf@google.com \
--cc=kvmarm@lists.linux.dev \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-kernel@vger.kernel.org \
--cc=mark.rutland@arm.com \
--cc=maz@kernel.org \
--cc=oliver.upton@linux.dev \
--cc=qperret@google.com \
--cc=sebastianene@google.com \
--cc=suzuki.poulose@arm.com \
--cc=tabba@google.com \
--cc=vdonnefort@google.com \
--cc=will@kernel.org \
--cc=yuzenghui@huawei.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox