Archive-only list for patches
 help / color / mirror / Atom feed
From: Jason Gunthorpe <jgg@nvidia.com>
To: Alex Williamson <alex.williamson@redhat.com>,
	iommu@lists.linux.dev, kvm@vger.kernel.org
Cc: patches@lists.linux.dev
Subject: [PATCH] vfio/type1: Remove Fine Grained Superpages detection
Date: Tue,  8 Apr 2025 14:39:52 -0300	[thread overview]
Message-ID: <0-v1-0eed68063e59+93d-vfio_fgsp_jgg@nvidia.com> (raw)

VFIO is looking to enable an optimization where it can rely on the
unmap operation not splitting and returning the size of a larger IOPTE.

However since commits:
  d50651636fb ("iommu/io-pgtable-arm-v7s: Remove split on unmap behavior")
  33729a5fc0ca ("iommu/io-pgtable-arm: Remove split on unmap behavior")

There are no iommu drivers that do split on unmap anymore. Instead all
iommu drivers are expected to unmap the whole contiguous page and return
its size.

Thus, there is no purpose in vfio_test_domain_fgsp() as it is only
checking if the iommu supports 2*PAGE_SIZE as a contiguous page or not.

Currently only AMD v1 supports such a page size so all this logic only
activates on AMD v1.

Remove vfio_test_domain_fgsp() and just rely on a direct 2*PAGE_SIZE check
instead so there is no behavior change.

Maybe it should always activate the iommu_iova_to_phys(), it shouldn't
have a performance downside since split is gone.

Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
---
 drivers/vfio/vfio_iommu_type1.c | 71 +++++++++------------------------
 1 file changed, 19 insertions(+), 52 deletions(-)

diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 0ac56072af9f23..529561bbbef98a 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -80,7 +80,6 @@ struct vfio_domain {
 	struct iommu_domain	*domain;
 	struct list_head	next;
 	struct list_head	group_list;
-	bool			fgsp : 1;	/* Fine-grained super pages */
 	bool			enforce_cache_coherency : 1;
 };
 
@@ -1056,6 +1055,7 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
 	LIST_HEAD(unmapped_region_list);
 	struct iommu_iotlb_gather iotlb_gather;
 	int unmapped_region_cnt = 0;
+	bool scan_for_contig;
 	long unlocked = 0;
 
 	if (!dma->size)
@@ -1079,9 +1079,15 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
 		cond_resched();
 	}
 
+	/*
+	 * For historical reasons this has only triggered on AMDv1 page tables,
+	 * though these days it should work everywhere.
+	 */
+	scan_for_contig = !(domain->domain->pgsize_bitmap & (2 * PAGE_SIZE));
 	iommu_iotlb_gather_init(&iotlb_gather);
 	while (iova < end) {
-		size_t unmapped, len;
+		size_t len = PAGE_SIZE;
+		size_t unmapped;
 		phys_addr_t phys, next;
 
 		phys = iommu_iova_to_phys(domain->domain, iova);
@@ -1094,12 +1100,18 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
 		 * To optimize for fewer iommu_unmap() calls, each of which
 		 * may require hardware cache flushing, try to find the
 		 * largest contiguous physical memory chunk to unmap.
+		 *
+		 * If the iova is part of a contiguous page > PAGE_SIZE then
+		 * unmap will unmap the whole contiguous page and return its
+		 * size.
 		 */
-		for (len = PAGE_SIZE;
-		     !domain->fgsp && iova + len < end; len += PAGE_SIZE) {
-			next = iommu_iova_to_phys(domain->domain, iova + len);
-			if (next != phys + len)
-				break;
+		if (scan_for_contig) {
+			for (; iova + len < end; len += PAGE_SIZE) {
+				next = iommu_iova_to_phys(domain->domain,
+							  iova + len);
+				if (next != phys + len)
+					break;
+			}
 		}
 
 		/*
@@ -1833,49 +1845,6 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
 	return ret;
 }
 
-/*
- * We change our unmap behavior slightly depending on whether the IOMMU
- * supports fine-grained superpages.  IOMMUs like AMD-Vi will use a superpage
- * for practically any contiguous power-of-two mapping we give it.  This means
- * we don't need to look for contiguous chunks ourselves to make unmapping
- * more efficient.  On IOMMUs with coarse-grained super pages, like Intel VT-d
- * with discrete 2M/1G/512G/1T superpages, identifying contiguous chunks
- * significantly boosts non-hugetlbfs mappings and doesn't seem to hurt when
- * hugetlbfs is in use.
- */
-static void vfio_test_domain_fgsp(struct vfio_domain *domain, struct list_head *regions)
-{
-	int ret, order = get_order(PAGE_SIZE * 2);
-	struct vfio_iova *region;
-	struct page *pages;
-	dma_addr_t start;
-
-	pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
-	if (!pages)
-		return;
-
-	list_for_each_entry(region, regions, list) {
-		start = ALIGN(region->start, PAGE_SIZE * 2);
-		if (start >= region->end || (region->end - start < PAGE_SIZE * 2))
-			continue;
-
-		ret = iommu_map(domain->domain, start, page_to_phys(pages), PAGE_SIZE * 2,
-				IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE,
-				GFP_KERNEL_ACCOUNT);
-		if (!ret) {
-			size_t unmapped = iommu_unmap(domain->domain, start, PAGE_SIZE);
-
-			if (unmapped == PAGE_SIZE)
-				iommu_unmap(domain->domain, start + PAGE_SIZE, PAGE_SIZE);
-			else
-				domain->fgsp = true;
-		}
-		break;
-	}
-
-	__free_pages(pages, order);
-}
-
 static struct vfio_iommu_group *find_iommu_group(struct vfio_domain *domain,
 						 struct iommu_group *iommu_group)
 {
@@ -2314,8 +2283,6 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
 		}
 	}
 
-	vfio_test_domain_fgsp(domain, &iova_copy);
-
 	/* replay mappings on new domains */
 	ret = vfio_iommu_replay(iommu, domain);
 	if (ret)

base-commit: 5a7ff05a5717e2ac4f4f83bcdd9033f246e9946b
-- 
2.43.0


             reply	other threads:[~2025-04-08 17:39 UTC|newest]

Thread overview: 3+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-04-08 17:39 Jason Gunthorpe [this message]
2025-04-08 19:23 ` [PATCH] vfio/type1: Remove Fine Grained Superpages detection Alex Williamson
2025-04-09 15:50   ` Jason Gunthorpe

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=0-v1-0eed68063e59+93d-vfio_fgsp_jgg@nvidia.com \
    --to=jgg@nvidia.com \
    --cc=alex.williamson@redhat.com \
    --cc=iommu@lists.linux.dev \
    --cc=kvm@vger.kernel.org \
    --cc=patches@lists.linux.dev \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox