From: Jason Gunthorpe <jgg@nvidia.com>
To: Jonathan Corbet <corbet@lwn.net>,
iommu@lists.linux.dev, Joerg Roedel <joro@8bytes.org>,
Justin Stitt <justinstitt@google.com>,
Kevin Tian <kevin.tian@intel.com>,
linux-doc@vger.kernel.org, linux-kselftest@vger.kernel.org,
llvm@lists.linux.dev, Bill Wendling <morbo@google.com>,
Nathan Chancellor <nathan@kernel.org>,
Nick Desaulniers <nick.desaulniers+lkml@gmail.com>,
Miguel Ojeda <ojeda@kernel.org>,
Robin Murphy <robin.murphy@arm.com>,
Shuah Khan <shuah@kernel.org>,
Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>,
Will Deacon <will@kernel.org>
Cc: Alexey Kardashevskiy <aik@amd.com>,
Alejandro Jimenez <alejandro.j.jimenez@oracle.com>,
James Gowans <jgowans@amazon.com>,
Michael Roth <michael.roth@amd.com>,
Pasha Tatashin <pasha.tatashin@soleen.com>,
patches@lists.linux.dev
Subject: [PATCH v6 05/15] iommupt: Add iova_to_phys op
Date: Tue, 7 Oct 2025 13:11:50 -0300 [thread overview]
Message-ID: <5-v6-0fb54a1d9850+36b-iommu_pt_jgg@nvidia.com> (raw)
In-Reply-To: <0-v6-0fb54a1d9850+36b-iommu_pt_jgg@nvidia.com>
iova_to_phys is a performance path for the DMA API and iommufd, implement
it using an unrolled get_user_pages() like function waterfall scheme.
The implementation itself is fairly trivial.
Tested-by: Alejandro Jimenez <alejandro.j.jimenez@oracle.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
---
drivers/iommu/generic_pt/iommu_pt.h | 105 ++++++++++++++++++++++++++++
include/linux/generic_pt/iommu.h | 19 +++--
2 files changed, 119 insertions(+), 5 deletions(-)
diff --git a/drivers/iommu/generic_pt/iommu_pt.h b/drivers/iommu/generic_pt/iommu_pt.h
index 564f2d3a6e11e1..5ff1b887928a46 100644
--- a/drivers/iommu/generic_pt/iommu_pt.h
+++ b/drivers/iommu/generic_pt/iommu_pt.h
@@ -17,6 +17,111 @@
#define DOMAIN_NS(op) CONCATENATE(CONCATENATE(pt_iommu_, PTPFX), op)
+static int make_range_ul(struct pt_common *common, struct pt_range *range,
+ unsigned long iova, unsigned long len)
+{
+ unsigned long last;
+
+ if (unlikely(len == 0))
+ return -EINVAL;
+
+ if (check_add_overflow(iova, len - 1, &last))
+ return -EOVERFLOW;
+
+ *range = pt_make_range(common, iova, last);
+ if (sizeof(iova) > sizeof(range->va)) {
+ if (unlikely(range->va != iova || range->last_va != last))
+ return -EOVERFLOW;
+ }
+ return 0;
+}
+
+static __maybe_unused int make_range_u64(struct pt_common *common,
+ struct pt_range *range, u64 iova,
+ u64 len)
+{
+ if (unlikely(iova > ULONG_MAX || len > ULONG_MAX))
+ return -EOVERFLOW;
+ return make_range_ul(common, range, iova, len);
+}
+
+/*
+ * Some APIs use unsigned long, while othersuse dma_addr_t as the type. Dispatch
+ * to the correct validation based on the type.
+ */
+#define make_range_no_check(common, range, iova, len) \
+ ({ \
+ int ret; \
+ if (sizeof(iova) > sizeof(unsigned long) || \
+ sizeof(len) > sizeof(unsigned long)) \
+ ret = make_range_u64(common, range, iova, len); \
+ else \
+ ret = make_range_ul(common, range, iova, len); \
+ ret; \
+ })
+
+#define make_range(common, range, iova, len) \
+ ({ \
+ int ret = make_range_no_check(common, range, iova, len); \
+ if (!ret) \
+ ret = pt_check_range(range); \
+ ret; \
+ })
+
+static __always_inline int __do_iova_to_phys(struct pt_range *range, void *arg,
+ unsigned int level,
+ struct pt_table_p *table,
+ pt_level_fn_t descend_fn)
+{
+ struct pt_state pts = pt_init(range, level, table);
+ pt_oaddr_t *res = arg;
+
+ switch (pt_load_single_entry(&pts)) {
+ case PT_ENTRY_EMPTY:
+ return -ENOENT;
+ case PT_ENTRY_TABLE:
+ return pt_descend(&pts, arg, descend_fn);
+ case PT_ENTRY_OA:
+ *res = pt_entry_oa_exact(&pts);
+ return 0;
+ }
+ return -ENOENT;
+}
+PT_MAKE_LEVELS(__iova_to_phys, __do_iova_to_phys);
+
+/**
+ * iova_to_phys() - Return the output address for the given IOVA
+ * @iommu_table: Table to query
+ * @iova: IO virtual address to query
+ *
+ * Determine the output address from the given IOVA. @iova may have any
+ * alignment, the returned physical will be adjusted with any sub page offset.
+ *
+ * Context: The caller must hold a read range lock that includes @iova.
+ *
+ * Return: 0 if there is no translation for the given iova.
+ */
+phys_addr_t DOMAIN_NS(iova_to_phys)(struct iommu_domain *domain,
+ dma_addr_t iova)
+{
+ struct pt_iommu *iommu_table =
+ container_of(domain, struct pt_iommu, domain);
+ struct pt_range range;
+ pt_oaddr_t res;
+ int ret;
+
+ ret = make_range(common_from_iommu(iommu_table), &range, iova, 1);
+ if (ret)
+ return ret;
+
+ ret = pt_walk_range(&range, __iova_to_phys, &res);
+ /* PHYS_ADDR_MAX would be a better error code */
+ if (ret)
+ return 0;
+ return res;
+}
+EXPORT_SYMBOL_NS_GPL(DOMAIN_NS(iova_to_phys), "GENERIC_PT_IOMMU");
+
struct pt_iommu_collect_args {
struct iommu_pages_list free_list;
};
diff --git a/include/linux/generic_pt/iommu.h b/include/linux/generic_pt/iommu.h
index dc731fe003d153..5622856e199881 100644
--- a/include/linux/generic_pt/iommu.h
+++ b/include/linux/generic_pt/iommu.h
@@ -116,11 +116,13 @@ struct pt_iommu_cfg {
};
/* Generate the exported function signatures from iommu_pt.h */
-#define IOMMU_PROTOTYPES(fmt) \
- int pt_iommu_##fmt##_init(struct pt_iommu_##fmt *table, \
- const struct pt_iommu_##fmt##_cfg *cfg, \
- gfp_t gfp); \
- void pt_iommu_##fmt##_hw_info(struct pt_iommu_##fmt *table, \
+#define IOMMU_PROTOTYPES(fmt) \
+ phys_addr_t pt_iommu_##fmt##_iova_to_phys(struct iommu_domain *domain, \
+ dma_addr_t iova); \
+ int pt_iommu_##fmt##_init(struct pt_iommu_##fmt *table, \
+ const struct pt_iommu_##fmt##_cfg *cfg, \
+ gfp_t gfp); \
+ void pt_iommu_##fmt##_hw_info(struct pt_iommu_##fmt *table, \
struct pt_iommu_##fmt##_hw_info *info)
#define IOMMU_FORMAT(fmt, member) \
struct pt_iommu_##fmt { \
@@ -129,6 +131,13 @@ struct pt_iommu_cfg {
}; \
IOMMU_PROTOTYPES(fmt)
+/*
+ * A driver uses IOMMU_PT_DOMAIN_OPS to populate the iommu_domain_ops for the
+ * iommu_pt
+ */
+#define IOMMU_PT_DOMAIN_OPS(fmt) \
+ .iova_to_phys = &pt_iommu_##fmt##_iova_to_phys,
+
/*
* The driver should setup its domain struct like
* union {
--
2.43.0
next prev parent reply other threads:[~2025-10-07 16:12 UTC|newest]
Thread overview: 30+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-10-07 16:11 [PATCH v6 00/15] Consolidate iommu page table implementations (AMD) Jason Gunthorpe
2025-10-07 16:11 ` [PATCH v6 01/15] genpt: Generic Page Table base API Jason Gunthorpe
2025-10-08 14:37 ` Jason Gunthorpe
2025-10-15 2:03 ` Tian, Kevin
2025-10-21 16:46 ` Jason Gunthorpe
2025-10-07 16:11 ` [PATCH v6 02/15] genpt: Add Documentation/ files Jason Gunthorpe
2025-10-07 16:11 ` [PATCH v6 03/15] iommupt: Add the basic structure of the iommu implementation Jason Gunthorpe
2025-10-15 7:25 ` Tian, Kevin
2025-10-21 14:07 ` Jason Gunthorpe
2025-10-07 16:11 ` [PATCH v6 04/15] iommupt: Add the AMD IOMMU v1 page table format Jason Gunthorpe
2025-10-07 16:11 ` Jason Gunthorpe [this message]
2025-10-07 16:11 ` [PATCH v6 06/15] iommupt: Add unmap_pages op Jason Gunthorpe
2025-10-07 16:11 ` [PATCH v6 07/15] iommupt: Add map_pages op Jason Gunthorpe
2025-10-16 6:52 ` Tian, Kevin
2025-10-21 17:19 ` Jason Gunthorpe
2025-10-07 16:11 ` [PATCH v6 08/15] iommupt: Add read_and_clear_dirty op Jason Gunthorpe
2025-10-07 16:11 ` [PATCH v6 09/15] iommupt: Add a kunit test for Generic Page Table Jason Gunthorpe
2025-10-16 6:54 ` Tian, Kevin
2025-10-07 16:11 ` [PATCH v6 10/15] iommupt: Add a mock pagetable format for iommufd selftest to use Jason Gunthorpe
2025-10-07 16:11 ` [PATCH v6 11/15] iommufd: Change the selftest to use iommupt instead of xarray Jason Gunthorpe
2025-10-16 6:54 ` Tian, Kevin
2025-10-07 16:11 ` [PATCH v6 12/15] iommupt: Add the x86 64 bit page table format Jason Gunthorpe
2025-10-16 6:55 ` Tian, Kevin
2025-10-21 14:09 ` Jason Gunthorpe
2025-10-07 16:11 ` [PATCH v6 13/15] iommu/amd: Use the generic iommu page table Jason Gunthorpe
2025-10-07 16:11 ` [PATCH v6 14/15] iommu/amd: Remove AMD io_pgtable support Jason Gunthorpe
2025-10-07 16:12 ` [PATCH v6 15/15] iommupt: Add a kunit test for the IOMMU implementation Jason Gunthorpe
2025-10-16 6:55 ` Tian, Kevin
2025-10-07 17:44 ` [PATCH v6 00/15] Consolidate iommu page table implementations (AMD) Pasha Tatashin
2025-10-07 17:54 ` Jason Gunthorpe
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=5-v6-0fb54a1d9850+36b-iommu_pt_jgg@nvidia.com \
--to=jgg@nvidia.com \
--cc=aik@amd.com \
--cc=alejandro.j.jimenez@oracle.com \
--cc=corbet@lwn.net \
--cc=iommu@lists.linux.dev \
--cc=jgowans@amazon.com \
--cc=joro@8bytes.org \
--cc=justinstitt@google.com \
--cc=kevin.tian@intel.com \
--cc=linux-doc@vger.kernel.org \
--cc=linux-kselftest@vger.kernel.org \
--cc=llvm@lists.linux.dev \
--cc=michael.roth@amd.com \
--cc=morbo@google.com \
--cc=nathan@kernel.org \
--cc=nick.desaulniers+lkml@gmail.com \
--cc=ojeda@kernel.org \
--cc=pasha.tatashin@soleen.com \
--cc=patches@lists.linux.dev \
--cc=robin.murphy@arm.com \
--cc=shuah@kernel.org \
--cc=suravee.suthikulpanit@amd.com \
--cc=will@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).