From: Mostafa Saleh <smostafa@google.com>
To: linux-mm@kvack.org, iommu@lists.linux.dev,
linux-kernel@vger.kernel.org, linux-doc@vger.kernel.org
Cc: corbet@lwn.net, joro@8bytes.org, will@kernel.org,
robin.murphy@arm.com, akpm@linux-foundation.org, vbabka@suse.cz,
surenb@google.com, mhocko@suse.com, jackmanb@google.com,
hannes@cmpxchg.org, ziy@nvidia.com, david@redhat.com,
lorenzo.stoakes@oracle.com, Liam.Howlett@oracle.com,
rppt@kernel.org, xiaqinxin@huawei.com, baolu.lu@linux.intel.com,
rdunlap@infradead.org, Mostafa Saleh <smostafa@google.com>
Subject: [PATCH v4 3/4] iommu: debug-pagealloc: Track IOMMU pages
Date: Thu, 11 Dec 2025 12:59:27 +0000 [thread overview]
Message-ID: <20251211125928.3258905-4-smostafa@google.com> (raw)
In-Reply-To: <20251211125928.3258905-1-smostafa@google.com>
Using the new calls, use an atomic refcount to track how many times
a page is mapped in any of the IOMMUs.
For unmap we need to use iova_to_phys() to get the physical address
of the pages.
We use the smallest supported page size as the granularity of tracking
per domain.
This is important as it is possible to map pages and unmap them with
larger sizes (as in map_sg()) cases.
Signed-off-by: Mostafa Saleh <smostafa@google.com>
---
drivers/iommu/iommu-debug-pagealloc.c | 91 +++++++++++++++++++++++++++
1 file changed, 91 insertions(+)
diff --git a/drivers/iommu/iommu-debug-pagealloc.c b/drivers/iommu/iommu-debug-pagealloc.c
index 1d343421da98..4639cf9518e6 100644
--- a/drivers/iommu/iommu-debug-pagealloc.c
+++ b/drivers/iommu/iommu-debug-pagealloc.c
@@ -29,19 +29,110 @@ struct page_ext_operations page_iommu_debug_ops = {
.need = need_iommu_debug,
};
+static struct page_ext *get_iommu_page_ext(phys_addr_t phys)
+{
+ struct page *page = phys_to_page(phys);
+ struct page_ext *page_ext = page_ext_get(page);
+
+ return page_ext;
+}
+
+static struct iommu_debug_metadata *get_iommu_data(struct page_ext *page_ext)
+{
+ return page_ext_data(page_ext, &page_iommu_debug_ops);
+}
+
+static void iommu_debug_inc_page(phys_addr_t phys)
+{
+ struct page_ext *page_ext = get_iommu_page_ext(phys);
+ struct iommu_debug_metadata *d = get_iommu_data(page_ext);
+
+ WARN_ON(atomic_inc_return_relaxed(&d->ref) <= 0);
+ page_ext_put(page_ext);
+}
+
+static void iommu_debug_dec_page(phys_addr_t phys)
+{
+ struct page_ext *page_ext = get_iommu_page_ext(phys);
+ struct iommu_debug_metadata *d = get_iommu_data(page_ext);
+
+ WARN_ON(atomic_dec_return_relaxed(&d->ref) < 0);
+ page_ext_put(page_ext);
+}
+
+/*
+ * IOMMU page size doesn't have tomatch the CPU page size. So, we use
+ * the smallest IOMMU page size to refcount the pages in the vmemmap.
+ * That is important as both map and unmap has to use the same page size
+ * to update the refcount to avoid double counting the same page.
+ * And as we can't know from iommu_unmap() what was the original page size
+ * used for map, we just use the minimum supported one for both.
+ */
+static size_t iommu_debug_page_size(struct iommu_domain *domain)
+{
+ return 1UL << __ffs(domain->pgsize_bitmap);
+}
+
void __iommu_debug_map(struct iommu_domain *domain, phys_addr_t phys, size_t size)
{
+ size_t off, end;
+ size_t page_size = iommu_debug_page_size(domain);
+
+ if (WARN_ON(!phys || check_add_overflow(phys, size, &end)))
+ return;
+
+ for (off = 0 ; off < size ; off += page_size) {
+ if (!pfn_valid(__phys_to_pfn(phys + off)))
+ continue;
+ iommu_debug_inc_page(phys + off);
+ }
+}
+
+static void __iommu_debug_update_iova(struct iommu_domain *domain,
+ unsigned long iova, size_t size, bool inc)
+{
+ size_t off, end;
+ size_t page_size = iommu_debug_page_size(domain);
+
+ if (WARN_ON(check_add_overflow(iova, size, &end)))
+ return;
+
+ for (off = 0 ; off < size ; off += page_size) {
+ phys_addr_t phys = iommu_iova_to_phys(domain, iova + off);
+
+ if (!phys || !pfn_valid(__phys_to_pfn(phys)))
+ continue;
+
+ if (inc)
+ iommu_debug_inc_page(phys);
+ else
+ iommu_debug_dec_page(phys);
+ }
}
void __iommu_debug_unmap_begin(struct iommu_domain *domain,
unsigned long iova, size_t size)
{
+ __iommu_debug_update_iova(domain, iova, size, false);
}
void __iommu_debug_unmap_end(struct iommu_domain *domain,
unsigned long iova, size_t size,
size_t unmapped)
{
+ if (unmapped == size)
+ return;
+
+ /*
+ * If unmap failed, re-increment the refcount, but if it unmapped
+ * larger size, decrement the extra part.
+ */
+ if (unmapped < size)
+ __iommu_debug_update_iova(domain, iova + unmapped,
+ size - unmapped, true);
+ else
+ __iommu_debug_update_iova(domain, iova + size,
+ unmapped - size, false);
}
void iommu_debug_init(void)
--
2.52.0.223.gf5cc29aaa4-goog
next prev parent reply other threads:[~2025-12-11 12:59 UTC|newest]
Thread overview: 12+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-12-11 12:59 [PATCH v4 0/4] iommu: Add IOMMU_DEBUG_PAGEALLOC sanitizer Mostafa Saleh
2025-12-11 12:59 ` [PATCH v4 1/4] iommu: Add page_ext for IOMMU_DEBUG_PAGEALLOC Mostafa Saleh
2025-12-12 2:17 ` Baolu Lu
2025-12-12 18:37 ` Mostafa Saleh
2025-12-11 12:59 ` [PATCH v4 2/4] iommu: Add calls " Mostafa Saleh
2025-12-12 2:33 ` Baolu Lu
2025-12-12 18:44 ` Mostafa Saleh
2025-12-11 12:59 ` Mostafa Saleh [this message]
2025-12-12 2:45 ` [PATCH v4 3/4] iommu: debug-pagealloc: Track IOMMU pages Baolu Lu
2025-12-11 12:59 ` [PATCH v4 4/4] iommu: debug-pagealloc: Check mapped/unmapped kernel memory Mostafa Saleh
2025-12-12 2:51 ` Baolu Lu
2025-12-12 18:47 ` Mostafa Saleh
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251211125928.3258905-4-smostafa@google.com \
--to=smostafa@google.com \
--cc=Liam.Howlett@oracle.com \
--cc=akpm@linux-foundation.org \
--cc=baolu.lu@linux.intel.com \
--cc=corbet@lwn.net \
--cc=david@redhat.com \
--cc=hannes@cmpxchg.org \
--cc=iommu@lists.linux.dev \
--cc=jackmanb@google.com \
--cc=joro@8bytes.org \
--cc=linux-doc@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=lorenzo.stoakes@oracle.com \
--cc=mhocko@suse.com \
--cc=rdunlap@infradead.org \
--cc=robin.murphy@arm.com \
--cc=rppt@kernel.org \
--cc=surenb@google.com \
--cc=vbabka@suse.cz \
--cc=will@kernel.org \
--cc=xiaqinxin@huawei.com \
--cc=ziy@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).