From: Mostafa Saleh <smostafa@google.com>
To: linux-arm-kernel@lists.infradead.org,
linux-kernel@vger.kernel.org, kvmarm@lists.linux.dev,
iommu@lists.linux.dev
Cc: catalin.marinas@arm.com, will@kernel.org, maz@kernel.org,
oliver.upton@linux.dev, joey.gouly@arm.com,
suzuki.poulose@arm.com, yuzenghui@huawei.com, joro@8bytes.org,
jean-philippe@linaro.org, jgg@ziepe.ca, mark.rutland@arm.com,
qperret@google.com, tabba@google.com, vdonnefort@google.com,
sebastianene@google.com, keirf@google.com,
Mostafa Saleh <smostafa@google.com>
Subject: [PATCH v6 23/25] iommu/arm-smmu-v3-kvm: Shadow the CPU stage-2 page table
Date: Fri, 1 May 2026 11:19:25 +0000 [thread overview]
Message-ID: <20260501111928.259252-24-smostafa@google.com> (raw)
In-Reply-To: <20260501111928.259252-1-smostafa@google.com>
Based on the callbacks from the hypervisor, update the SMMUv3
Identity mapped page table.
Signed-off-by: Mostafa Saleh <smostafa@google.com>
---
.../iommu/arm/arm-smmu-v3/pkvm/arm-smmu-v3.c | 197 +++++++++++++++++-
1 file changed, 195 insertions(+), 2 deletions(-)
diff --git a/drivers/iommu/arm/arm-smmu-v3/pkvm/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/pkvm/arm-smmu-v3.c
index 1ed5ccce7849..b73a2462f0dd 100644
--- a/drivers/iommu/arm/arm-smmu-v3/pkvm/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/pkvm/arm-smmu-v3.c
@@ -13,6 +13,9 @@
#include "arm_smmu_v3.h"
+#include <linux/io-pgtable.h>
+#include "../../../io-pgtable-arm.h"
+
size_t __ro_after_init kvm_hyp_arm_smmu_v3_count;
struct hyp_arm_smmu_v3_device *kvm_hyp_arm_smmu_v3_smmus;
@@ -59,6 +62,9 @@ struct hyp_arm_smmu_v3_device *kvm_hyp_arm_smmu_v3_smmus;
__ret; \
})
+/* Protected by host_mmu.lock from core code. */
+static struct io_pgtable *idmap_pgtable;
+
static bool is_cmdq_enabled(struct hyp_arm_smmu_v3_device *smmu)
{
return FIELD_GET(CR0_CMDQEN, smmu->cr0);
@@ -210,7 +216,6 @@ static int smmu_sync_cmd(struct hyp_arm_smmu_v3_device *smmu)
smmu_cmdq_empty(&smmu->cmdq));
}
-__maybe_unused
static int smmu_send_cmd(struct hyp_arm_smmu_v3_device *smmu,
struct arm_smmu_cmdq_ent *cmd)
{
@@ -222,6 +227,78 @@ static int smmu_send_cmd(struct hyp_arm_smmu_v3_device *smmu,
return smmu_sync_cmd(smmu);
}
+static void __smmu_add_cmd(void *__opaque, struct arm_smmu_cmdq_batch *unused,
+ struct arm_smmu_cmdq_ent *cmd)
+{
+ struct hyp_arm_smmu_v3_device *smmu = (struct hyp_arm_smmu_v3_device *)__opaque;
+
+ WARN_ON(smmu_add_cmd(smmu, cmd));
+}
+
+static int smmu_tlb_inv_range_smmu(struct hyp_arm_smmu_v3_device *smmu,
+ struct arm_smmu_cmdq_ent *cmd,
+ unsigned long iova, size_t size, size_t granule)
+{
+ arm_smmu_tlb_inv_build(cmd, iova, size, granule,
+ PAGE_SHIFT, smmu->features & ARM_SMMU_FEAT_RANGE_INV,
+ smmu, __smmu_add_cmd, NULL);
+ return smmu_sync_cmd(smmu);
+}
+
+static void smmu_tlb_inv_range(unsigned long iova, size_t size, size_t granule,
+ bool leaf)
+{
+ struct arm_smmu_cmdq_ent cmd_s1 = {
+ .opcode = CMDQ_OP_TLBI_NH_ALL,
+ .tlbi = {
+ .vmid = 0,
+ },
+ };
+ struct hyp_arm_smmu_v3_device *smmu;
+
+ for_each_smmu(smmu) {
+ struct arm_smmu_cmdq_ent cmd = {
+ .opcode = CMDQ_OP_TLBI_S2_IPA,
+ .tlbi = {
+ .leaf = leaf,
+ .vmid = 0,
+ },
+ };
+
+ hyp_spin_lock(&smmu->lock);
+ /*
+ * Don't bother if SMMU is disabled, this would be useful for the case
+ * when RPM is supported to avoid touching the SMMU MMIO when disabled.
+ * The hypervisor also asserts CMDQEN is enabled before the SMMU is
+ * enabled. As otherwise the host can prevent the hypervisor from doing
+ * TLB invalidations.
+ */
+ if (is_smmu_enabled(smmu)) {
+ WARN_ON(smmu_tlb_inv_range_smmu(smmu, &cmd, iova, size, granule));
+ WARN_ON(smmu_send_cmd(smmu, &cmd_s1));
+ }
+ hyp_spin_unlock(&smmu->lock);
+ }
+}
+
+static void smmu_tlb_flush_walk(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
+{
+ smmu_tlb_inv_range(iova, size, granule, false);
+}
+
+static void smmu_tlb_add_page(struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t granule,
+ void *cookie)
+{
+ smmu_tlb_inv_range(iova, granule, granule, true);
+}
+
+static const struct iommu_flush_ops smmu_tlb_ops = {
+ .tlb_flush_walk = smmu_tlb_flush_walk,
+ .tlb_add_page = smmu_tlb_add_page,
+};
+
/* Put the device in a state that can be probed by the host driver. */
static void smmu_deinit_device(struct hyp_arm_smmu_v3_device *smmu)
{
@@ -495,6 +572,37 @@ static int smmu_init_device(struct hyp_arm_smmu_v3_device *smmu)
return ret;
}
+static int smmu_init_pgt(void)
+{
+ /* Default values overridden based on SMMUs common features. */
+ struct io_pgtable_cfg cfg = (struct io_pgtable_cfg) {
+ .tlb = &smmu_tlb_ops,
+ .pgsize_bitmap = -1,
+ .ias = 48,
+ .oas = 48,
+ .coherent_walk = true,
+ };
+ struct hyp_arm_smmu_v3_device *smmu;
+ struct io_pgtable_ops *ops;
+
+ for_each_smmu(smmu) {
+ cfg.ias = min(cfg.ias, smmu->oas);
+ cfg.oas = min(cfg.oas, smmu->oas);
+ cfg.pgsize_bitmap &= smmu->pgsize_bitmap;
+ cfg.coherent_walk &= !!(smmu->features & ARM_SMMU_FEAT_COHERENCY);
+ }
+
+ /* At least PAGE_SIZE must be supported by all SMMUs*/
+ if ((cfg.pgsize_bitmap & PAGE_SIZE) == 0)
+ return -EINVAL;
+
+ ops = kvm_alloc_io_pgtable_ops(ARM_64_LPAE_S2, &cfg, NULL);
+ if (!ops)
+ return -ENOMEM;
+ idmap_pgtable = io_pgtable_ops_to_pgtable(ops);
+ return 0;
+}
+
/* Called while is the host is still trusted. */
static int smmu_init(void)
{
@@ -520,7 +628,10 @@ static int smmu_init(void)
BUILD_BUG_ON(sizeof(hyp_spinlock_t) != sizeof(u32));
- return 0;
+ ret = smmu_init_pgt();
+ if (ret)
+ goto out_reclaim_smmu;
+ return ret;
out_reclaim_smmu:
while (smmu != kvm_hyp_arm_smmu_v3_smmus)
@@ -950,8 +1061,90 @@ static bool smmu_dabt_handler(struct user_pt_regs *regs, u64 esr, u64 addr)
return false;
}
+static size_t smmu_pgsize_idmap(size_t size, u64 paddr, size_t pgsize_bitmap)
+{
+ size_t pgsizes;
+
+ /* Remove page sizes that are larger than the current size */
+ pgsizes = pgsize_bitmap & GENMASK_ULL(__fls(size), 0);
+
+ /* Remove page sizes that the address is not aligned to. */
+ if (likely(paddr))
+ pgsizes &= GENMASK_ULL(__ffs(paddr), 0);
+
+ WARN_ON(!pgsizes);
+
+ /* Return the largest page size that fits. */
+ return BIT(__fls(pgsizes));
+}
+
static int smmu_host_stage2_idmap(phys_addr_t start, phys_addr_t end, int prot)
{
+ size_t pgsize = PAGE_SIZE, pgcount, size;
+ struct io_pgtable *pgtable = idmap_pgtable;
+ int ret = 0;
+
+ end = min(end, BIT(pgtable->cfg.oas));
+ if (start >= end)
+ return 0;
+
+ size = end - start;
+ if (prot) {
+ size_t mapped;
+
+ if (!(prot & IOMMU_MMIO))
+ prot |= IOMMU_CACHE;
+
+ while (size) {
+ mapped = 0;
+ /*
+ * We handle pages size for memory and MMIO differently:
+ * - memory: Map everything with PAGE_SIZE, that is guaranteed to
+ * find memory as we allocated enough pages to cover the entire
+ * memory, we do that as io-pgtable-arm doesn't support
+ * split_blk_unmap logic any more, so we can't break blocks once
+ * mapped to tables.
+ * - MMIO: Unlike memory, pKVM allocate 1G to for all MMIO, while
+ * the MMIO space can be large, as it is assumed to cover the
+ * whole IAS that is not memory, we have to use block mappings,
+ * that is fine for MMIO as it is never donated at the moment,
+ * so we never need to unmap MMIO at the run time triggereing
+ * split block logic.
+ */
+ if (prot & IOMMU_MMIO)
+ pgsize = smmu_pgsize_idmap(size, start, pgtable->cfg.pgsize_bitmap);
+
+ pgcount = size / pgsize;
+ ret = pgtable->ops.map_pages(&pgtable->ops, start, start,
+ pgsize, pgcount, prot, 0, &mapped);
+ size -= mapped;
+ start += mapped;
+ /* Map failures doesn't impact security, tolerate it. */
+ if (!mapped || ret)
+ break;
+ }
+ } else {
+ struct iommu_iotlb_gather gather;
+ size_t unmapped;
+
+ while (size) {
+ pgcount = size / pgsize;
+ iommu_iotlb_gather_init(&gather);
+ unmapped = pgtable->ops.unmap_pages(&pgtable->ops, start,
+ pgsize, pgcount, &gather);
+ size -= unmapped;
+ start += unmapped;
+ if (!unmapped)
+ break;
+ }
+ }
+
+ if (ret)
+ return ret;
+
+ if (WARN_ON(size))
+ return -EINVAL;
+
return 0;
}
--
2.54.0.545.g6539524ca2-goog
next prev parent reply other threads:[~2026-05-01 11:20 UTC|newest]
Thread overview: 61+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-05-01 11:19 [PATCH v6 00/25] KVM: arm64: SMMUv3 driver for pKVM (trap and emulate) Mostafa Saleh
2026-05-01 11:19 ` [PATCH v6 01/25] KVM: arm64: Generalize trace clock Mostafa Saleh
2026-05-01 11:19 ` [PATCH v6 02/25] KVM: arm64: Donate MMIO to the hypervisor Mostafa Saleh
2026-05-01 11:19 ` [PATCH v6 03/25] iommu/arm-smmu-v3: Split code with hyp Mostafa Saleh
2026-05-01 12:44 ` Jason Gunthorpe
2026-05-04 12:13 ` Mostafa Saleh
2026-05-01 11:19 ` [PATCH v6 04/25] iommu/arm-smmu-v3: Move TLB range invalidation into common code Mostafa Saleh
2026-05-01 12:41 ` Jason Gunthorpe
2026-05-04 12:15 ` Mostafa Saleh
2026-05-05 16:17 ` Jason Gunthorpe
2026-05-05 16:43 ` Mostafa Saleh
2026-05-06 9:53 ` Jason Gunthorpe
2026-05-07 9:40 ` Mostafa Saleh
2026-05-09 23:29 ` Jason Gunthorpe
2026-05-11 11:45 ` Mostafa Saleh
2026-05-11 14:24 ` Jason Gunthorpe
2026-05-01 11:19 ` [PATCH v6 05/25] iommu/arm-smmu-v3: Move IDR parsing to common functions Mostafa Saleh
2026-05-01 12:47 ` Jason Gunthorpe
2026-05-04 12:16 ` Mostafa Saleh
2026-05-05 16:27 ` Jason Gunthorpe
2026-05-05 16:48 ` Mostafa Saleh
2026-05-06 9:56 ` Jason Gunthorpe
2026-05-07 10:13 ` Mostafa Saleh
2026-05-09 23:34 ` Jason Gunthorpe
2026-05-11 11:53 ` Mostafa Saleh
2026-05-11 14:30 ` Jason Gunthorpe
2026-05-01 11:19 ` [PATCH v6 06/25] iommu/io-pgtable-arm: Rework to use the iommu-pages API Mostafa Saleh
2026-05-01 12:24 ` Jason Gunthorpe
2026-05-04 12:19 ` Mostafa Saleh
2026-05-09 23:21 ` Jason Gunthorpe
2026-05-11 11:16 ` Mostafa Saleh
2026-05-11 14:18 ` Jason Gunthorpe
2026-05-13 21:54 ` Mostafa Saleh
2026-05-01 11:19 ` [PATCH v6 07/25] KVM: arm64: iommu: Introduce IOMMU driver infrastructure Mostafa Saleh
2026-05-01 11:19 ` [PATCH v6 08/25] KVM: arm64: iommu: Shadow host stage-2 page table Mostafa Saleh
2026-05-01 13:00 ` Jason Gunthorpe
2026-05-04 12:28 ` Mostafa Saleh
2026-05-09 23:27 ` Jason Gunthorpe
2026-05-11 11:24 ` Mostafa Saleh
2026-05-11 14:22 ` Jason Gunthorpe
2026-05-12 10:42 ` Mostafa Saleh
2026-05-12 12:36 ` Jason Gunthorpe
2026-05-01 11:19 ` [PATCH v6 09/25] KVM: arm64: iommu: Add memory pool Mostafa Saleh
2026-05-01 11:19 ` [PATCH v6 10/25] KVM: arm64: iommu: Support DABT for IOMMU Mostafa Saleh
2026-05-01 11:19 ` [PATCH v6 11/25] iommu/arm-smmu-v3-kvm: Add SMMUv3 driver Mostafa Saleh
2026-05-01 11:19 ` [PATCH v6 12/25] iommu/arm-smmu-v3-kvm: Add the kernel driver Mostafa Saleh
2026-05-01 11:19 ` [PATCH v6 13/25] iommu/arm-smmu-v3-kvm: Probe SMMU HW Mostafa Saleh
2026-05-01 12:51 ` Jason Gunthorpe
2026-05-04 12:30 ` Mostafa Saleh
2026-05-01 11:19 ` [PATCH v6 14/25] iommu/arm-smmu-v3-kvm: Add MMIO emulation Mostafa Saleh
2026-05-01 11:19 ` [PATCH v6 15/25] iommu/arm-smmu-v3-kvm: Shadow the command queue Mostafa Saleh
2026-05-01 11:19 ` [PATCH v6 16/25] iommu/arm-smmu-v3-kvm: Add CMDQ functions Mostafa Saleh
2026-05-01 11:19 ` [PATCH v6 17/25] iommu/arm-smmu-v3-kvm: Emulate CMDQ for host Mostafa Saleh
2026-05-01 11:19 ` [PATCH v6 18/25] iommu/arm-smmu-v3-kvm: Shadow stream table Mostafa Saleh
2026-05-01 11:19 ` [PATCH v6 19/25] iommu/arm-smmu-v3-kvm: Shadow STEs Mostafa Saleh
2026-05-01 11:19 ` [PATCH v6 20/25] iommu/arm-smmu-v3-kvm: Share other queues Mostafa Saleh
2026-05-01 11:19 ` [PATCH v6 21/25] iommu/arm-smmu-v3-kvm: Emulate GBPA Mostafa Saleh
2026-05-01 11:19 ` [PATCH v6 22/25] iommu/io-pgtable-arm: Support io-pgtable-arm in the hypervisor Mostafa Saleh
2026-05-01 11:19 ` Mostafa Saleh [this message]
2026-05-01 11:19 ` [PATCH v6 24/25] iommu/arm-smmu-v3-kvm: Enable nesting Mostafa Saleh
2026-05-01 11:19 ` [PATCH v6 25/25] KVM: arm64: Add documentation for pKVM DMA isolation Mostafa Saleh
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260501111928.259252-24-smostafa@google.com \
--to=smostafa@google.com \
--cc=catalin.marinas@arm.com \
--cc=iommu@lists.linux.dev \
--cc=jean-philippe@linaro.org \
--cc=jgg@ziepe.ca \
--cc=joey.gouly@arm.com \
--cc=joro@8bytes.org \
--cc=keirf@google.com \
--cc=kvmarm@lists.linux.dev \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-kernel@vger.kernel.org \
--cc=mark.rutland@arm.com \
--cc=maz@kernel.org \
--cc=oliver.upton@linux.dev \
--cc=qperret@google.com \
--cc=sebastianene@google.com \
--cc=suzuki.poulose@arm.com \
--cc=tabba@google.com \
--cc=vdonnefort@google.com \
--cc=will@kernel.org \
--cc=yuzenghui@huawei.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.