From: Jason Gunthorpe <jgg@nvidia.com>
To: Jonathan Corbet <corbet@lwn.net>,
iommu@lists.linux.dev, Joerg Roedel <joro@8bytes.org>,
Justin Stitt <justinstitt@google.com>,
Kevin Tian <kevin.tian@intel.com>,
linux-doc@vger.kernel.org, linux-kselftest@vger.kernel.org,
llvm@lists.linux.dev, Bill Wendling <morbo@google.com>,
Nathan Chancellor <nathan@kernel.org>,
Nick Desaulniers <nick.desaulniers+lkml@gmail.com>,
Miguel Ojeda <ojeda@kernel.org>,
Robin Murphy <robin.murphy@arm.com>,
Shuah Khan <shuah@kernel.org>,
Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>,
Will Deacon <will@kernel.org>
Cc: Alexey Kardashevskiy <aik@amd.com>,
Alejandro Jimenez <alejandro.j.jimenez@oracle.com>,
James Gowans <jgowans@amazon.com>,
Michael Roth <michael.roth@amd.com>,
Pasha Tatashin <pasha.tatashin@soleen.com>,
patches@lists.linux.dev
Subject: [PATCH v3 06/15] iommupt: Add unmap_pages op
Date: Mon, 16 Jun 2025 15:06:09 -0300 [thread overview]
Message-ID: <6-v3-a93aab628dbc+521-iommu_pt_jgg@nvidia.com> (raw)
In-Reply-To: <0-v3-a93aab628dbc+521-iommu_pt_jgg@nvidia.com>
unmap_pages removes mappings and any fully contained interior tables from
the given range. This follows the now-standard iommu_domain API definition
where it does not split up larger page sizes into smaller. The caller must
perform unmap only on ranges created by map or it must have somehow
otherwise determined safe cut points (eg iommufd/vfio use iova_to_phys to
scan for them)
A future work will provide 'cut' which explicitly does the page size split
if the HW can support it.
unmap is implemented with a recursive descent of the tree. If the caller
provides a VA range that spans an entire table item then the table memory
can be freed as well.
If an entire table item can be freed then this version will also check the
leaf-only level of the tree to ensure that all entries are present to
generate -EINVAL. Many of the existing drivers don't do this extra check.
This version sits under the iommu_domain_ops as unmap_pages() but does not
require the external page size calculation. The implementation is actually
unmap_range() and can do arbitrary ranges, internally handling all the
validation and supporting any arrangment of page sizes. A future series
can optimize __iommu_unmap() to take advantage of this.
Freed page table memory is batched up in the gather and will be freed in
the driver's iotlb_sync() callback after the IOTLB flush completes.
Tested-by: Alejandro Jimenez <alejandro.j.jimenez@oracle.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
---
drivers/iommu/generic_pt/iommu_pt.h | 155 ++++++++++++++++++++++++++++
include/linux/generic_pt/iommu.h | 10 +-
2 files changed, 163 insertions(+), 2 deletions(-)
diff --git a/drivers/iommu/generic_pt/iommu_pt.h b/drivers/iommu/generic_pt/iommu_pt.h
index bae87ff34b8043..2729422d12dfcd 100644
--- a/drivers/iommu/generic_pt/iommu_pt.h
+++ b/drivers/iommu/generic_pt/iommu_pt.h
@@ -14,6 +14,29 @@
#include <linux/iommu.h>
#include "../iommu-pages.h"
#include <linux/export.h>
+#include <linux/cleanup.h>
+#include <linux/dma-mapping.h>
+
+static void gather_range_pages(struct iommu_iotlb_gather *iotlb_gather,
+ struct pt_iommu *iommu_table, pt_vaddr_t iova,
+ pt_vaddr_t len,
+ struct iommu_pages_list *free_list)
+{
+ struct pt_common *common = common_from_iommu(iommu_table);
+
+ if (pt_feature(common, PT_FEAT_FLUSH_RANGE_NO_GAPS) &&
+ iommu_iotlb_gather_is_disjoint(iotlb_gather, iova, len)) {
+ iommu_iotlb_sync(&iommu_table->domain, iotlb_gather);
+ /*
+ * Note that the sync frees the gather's free list, so we must
+ * not have any pages on that list that are covered by iova/len
+ */
+ } else if (pt_feature(common, PT_FEAT_FLUSH_RANGE)) {
+ iommu_iotlb_gather_add_range(iotlb_gather, iova, len);
+ }
+
+ iommu_pages_list_splice(free_list, &iotlb_gather->freelist);
+}
#define DOMAIN_NS(op) CONCATENATE(CONCATENATE(pt_iommu_, PTPFX), op)
@@ -167,6 +190,138 @@ static inline struct pt_table_p *table_alloc_top(struct pt_common *common,
log2_to_int(pt_top_memsize_lg2(common, top_of_table)));
}
+struct pt_unmap_args {
+ struct iommu_pages_list free_list;
+ pt_vaddr_t unmapped;
+};
+
+static __maybe_unused int __unmap_range(struct pt_range *range, void *arg,
+ unsigned int level,
+ struct pt_table_p *table)
+{
+ struct pt_state pts = pt_init(range, level, table);
+ struct pt_unmap_args *unmap = arg;
+ unsigned int num_oas = 0;
+ unsigned int start_index;
+ int ret = 0;
+
+ _pt_iter_first(&pts);
+ start_index = pts.index;
+ pts.type = pt_load_entry_raw(&pts);
+ /*
+ * A starting index is in the middle of a contiguous entry
+ *
+ * The IOMMU API does not require drivers to support unmapping parts of
+ * large pages. Long ago VFIO would try to split maps but the current
+ * version never does.
+ *
+ * Instead when unmap reaches a partial unmap of the start of a large
+ * IOPTE it should remove the entire IOPTE and return that size to the
+ * caller.
+ */
+ if (pts.type == PT_ENTRY_OA) {
+ if (log2_mod(range->va, pt_entry_oa_lg2sz(&pts)))
+ return -EINVAL;
+ goto start_oa;
+ }
+
+ do {
+ if (pts.type != PT_ENTRY_OA) {
+ bool fully_covered;
+
+ if (pts.type != PT_ENTRY_TABLE) {
+ ret = -EINVAL;
+ break;
+ }
+
+ if (pts.index != start_index)
+ pt_index_to_va(&pts);
+ pts.table_lower = pt_table_ptr(&pts);
+
+ fully_covered = pt_item_fully_covered(
+ &pts, pt_table_item_lg2sz(&pts));
+
+ ret = pt_descend(&pts, arg, __unmap_range);
+ if (ret)
+ break;
+
+ /*
+ * If the unmapping range fully covers the table then we
+ * can free it as well. The clear is delayed until we
+ * succeed in clearing the lower table levels.
+ */
+ if (fully_covered) {
+ iommu_pages_list_add(&unmap->free_list,
+ pts.table_lower);
+ pt_clear_entry(&pts, ilog2(1));
+ }
+ pts.index++;
+ } else {
+ unsigned int num_contig_lg2;
+start_oa:
+ /*
+ * If the caller requested an last that falls within a
+ * single entry then the entire entry is unmapped and
+ * the length returned will be larger than requested.
+ */
+ num_contig_lg2 = pt_entry_num_contig_lg2(&pts);
+ pt_clear_entry(&pts, num_contig_lg2);
+ num_oas += log2_to_int(num_contig_lg2);
+ pts.index += log2_to_int(num_contig_lg2);
+ }
+ if (pts.index >= pts.end_index)
+ break;
+ pts.type = pt_load_entry_raw(&pts);
+ } while (true);
+
+ unmap->unmapped += log2_mul(num_oas, pt_table_item_lg2sz(&pts));
+ return ret;
+}
+
+/**
+ * unmap_pages() - Make a range of IOVA empty/not present
+ * @iommu_table: Table to manipulate
+ * @iova: IO virtual address to start
+ * @pgsize: Length of each page
+ * @pgcount: Length of the range in pgsize units starting from @iova
+ * @gather: Gather struct that must be flushed on return
+ *
+ * unmap_pages() will remove a translation created by map_pages(). It cannot
+ * subdivide a mapping created by map_pages(), so it should be called with IOVA
+ * ranges that match those passed to map_pages(). The IOVA range can aggregate
+ * contiguous map_pages() calls so long as no individual range is split.
+ *
+ * Context: The caller must hold a write range lock that includes
+ * the whole range.
+ *
+ * Returns: Number of bytes of VA unmapped. iova + res will be the point
+ * unmapping stopped.
+ */
+size_t DOMAIN_NS(unmap_pages)(struct iommu_domain *domain, unsigned long iova,
+ size_t pgsize, size_t pgcount,
+ struct iommu_iotlb_gather *iotlb_gather)
+{
+ struct pt_iommu *iommu_table =
+ container_of(domain, struct pt_iommu, domain);
+ struct pt_unmap_args unmap = { .free_list = IOMMU_PAGES_LIST_INIT(
+ unmap.free_list) };
+ pt_vaddr_t len = pgsize * pgcount;
+ struct pt_range range;
+ int ret;
+
+ ret = make_range(common_from_iommu(iommu_table), &range, iova, len);
+ if (ret)
+ return 0;
+
+ pt_walk_range(&range, __unmap_range, &unmap);
+
+ gather_range_pages(iotlb_gather, iommu_table, iova, len,
+ &unmap.free_list);
+
+ return unmap.unmapped;
+}
+EXPORT_SYMBOL_NS_GPL(DOMAIN_NS(unmap_pages), "GENERIC_PT_IOMMU");
+
static void NS(get_info)(struct pt_iommu *iommu_table,
struct pt_iommu_info *info)
{
diff --git a/include/linux/generic_pt/iommu.h b/include/linux/generic_pt/iommu.h
index 1fe5106997f833..382596b70e394e 100644
--- a/include/linux/generic_pt/iommu.h
+++ b/include/linux/generic_pt/iommu.h
@@ -9,6 +9,7 @@
#include <linux/iommu.h>
#include <linux/mm_types.h>
+struct iommu_iotlb_gather;
struct pt_iommu_ops;
/**
@@ -119,6 +120,10 @@ struct pt_iommu_cfg {
#define IOMMU_PROTOTYPES(fmt) \
phys_addr_t pt_iommu_##fmt##_iova_to_phys(struct iommu_domain *domain, \
dma_addr_t iova); \
+ size_t pt_iommu_##fmt##_unmap_pages( \
+ struct iommu_domain *domain, unsigned long iova, \
+ size_t pgsize, size_t pgcount, \
+ struct iommu_iotlb_gather *iotlb_gather); \
int pt_iommu_##fmt##_init(struct pt_iommu_##fmt *table, \
const struct pt_iommu_##fmt##_cfg *cfg, \
gfp_t gfp); \
@@ -135,8 +140,9 @@ struct pt_iommu_cfg {
* A driver uses IOMMU_PT_DOMAIN_OPS to populate the iommu_domain_ops for the
* iommu_pt
*/
-#define IOMMU_PT_DOMAIN_OPS(fmt) \
- .iova_to_phys = &pt_iommu_##fmt##_iova_to_phys,
+#define IOMMU_PT_DOMAIN_OPS(fmt) \
+ .iova_to_phys = &pt_iommu_##fmt##_iova_to_phys, \
+ .unmap_pages = &pt_iommu_##fmt##_unmap_pages
/*
* The driver should setup its domain struct like
--
2.43.0
next prev parent reply other threads:[~2025-06-16 18:06 UTC|newest]
Thread overview: 25+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-06-16 18:06 [PATCH v3 00/15] Consolidate iommu page table implementations (AMD) Jason Gunthorpe
2025-06-16 18:06 ` [PATCH v3 01/15] genpt: Generic Page Table base API Jason Gunthorpe
2025-07-28 11:23 ` Ankit Soni
2025-07-28 11:44 ` Jason Gunthorpe
2025-06-16 18:06 ` [PATCH v3 02/15] genpt: Add Documentation/ files Jason Gunthorpe
2025-06-17 1:59 ` Bagas Sanjaya
2025-06-17 16:51 ` Jason Gunthorpe
2025-06-16 18:06 ` [PATCH v3 03/15] iommupt: Add the basic structure of the iommu implementation Jason Gunthorpe
2025-07-08 6:36 ` Ankit Soni
2025-07-09 0:29 ` Jason Gunthorpe
2025-06-16 18:06 ` [PATCH v3 04/15] iommupt: Add the AMD IOMMU v1 page table format Jason Gunthorpe
2025-06-16 18:06 ` [PATCH v3 05/15] iommupt: Add iova_to_phys op Jason Gunthorpe
2025-06-16 18:06 ` Jason Gunthorpe [this message]
2025-06-16 18:06 ` [PATCH v3 07/15] iommupt: Add map_pages op Jason Gunthorpe
2025-06-16 18:06 ` [PATCH v3 08/15] iommupt: Add read_and_clear_dirty op Jason Gunthorpe
2025-06-16 18:06 ` [PATCH v3 09/15] iommupt: Add a kunit test for Generic Page Table Jason Gunthorpe
2025-06-16 18:06 ` [PATCH v3 10/15] iommupt: Add a mock pagetable format for iommufd selftest to use Jason Gunthorpe
2025-06-16 18:06 ` [PATCH v3 11/15] iommufd: Change the selftest to use iommupt instead of xarray Jason Gunthorpe
2025-06-16 18:06 ` [PATCH v3 12/15] iommupt: Add the x86 64 bit page table format Jason Gunthorpe
2025-06-16 18:06 ` [PATCH v3 13/15] iommu/amd: Use the generic iommu page table Jason Gunthorpe
2025-06-16 18:06 ` [PATCH v3 14/15] iommu/amd: Remove AMD io_pgtable support Jason Gunthorpe
2025-07-02 17:06 ` Alejandro Jimenez
2025-06-16 18:06 ` [PATCH v3 15/15] iommupt: Add a kunit test for the IOMMU implementation Jason Gunthorpe
2025-07-09 8:56 ` Ankit Soni
2025-07-09 19:49 ` Jason Gunthorpe
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=6-v3-a93aab628dbc+521-iommu_pt_jgg@nvidia.com \
--to=jgg@nvidia.com \
--cc=aik@amd.com \
--cc=alejandro.j.jimenez@oracle.com \
--cc=corbet@lwn.net \
--cc=iommu@lists.linux.dev \
--cc=jgowans@amazon.com \
--cc=joro@8bytes.org \
--cc=justinstitt@google.com \
--cc=kevin.tian@intel.com \
--cc=linux-doc@vger.kernel.org \
--cc=linux-kselftest@vger.kernel.org \
--cc=llvm@lists.linux.dev \
--cc=michael.roth@amd.com \
--cc=morbo@google.com \
--cc=nathan@kernel.org \
--cc=nick.desaulniers+lkml@gmail.com \
--cc=ojeda@kernel.org \
--cc=pasha.tatashin@soleen.com \
--cc=patches@lists.linux.dev \
--cc=robin.murphy@arm.com \
--cc=shuah@kernel.org \
--cc=suravee.suthikulpanit@amd.com \
--cc=will@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).