From: Jason Gunthorpe <jgg@nvidia.com>
To: Alim Akhtar <alim.akhtar@samsung.com>,
Alyssa Rosenzweig <alyssa@rosenzweig.io>,
Albert Ou <aou@eecs.berkeley.edu>,
asahi@lists.linux.dev, Lu Baolu <baolu.lu@linux.intel.com>,
David Woodhouse <dwmw2@infradead.org>,
Heiko Stuebner <heiko@sntech.de>,
iommu@lists.linux.dev, Jernej Skrabec <jernej.skrabec@gmail.com>,
Jonathan Hunter <jonathanh@nvidia.com>,
Joerg Roedel <joro@8bytes.org>,
Krzysztof Kozlowski <krzk@kernel.org>,
linux-arm-kernel@lists.infradead.org,
linux-riscv@lists.infradead.org,
linux-rockchip@lists.infradead.org,
linux-samsung-soc@vger.kernel.org, linux-sunxi@lists.linux.dev,
linux-tegra@vger.kernel.org,
Marek Szyprowski <m.szyprowski@samsung.com>,
Hector Martin <marcan@marcan.st>,
Palmer Dabbelt <palmer@dabbelt.com>,
Paul Walmsley <paul.walmsley@sifive.com>,
Robin Murphy <robin.murphy@arm.com>,
Samuel Holland <samuel@sholland.org>,
Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>,
Sven Peter <sven@svenpeter.dev>,
Thierry Reding <thierry.reding@gmail.com>,
Tomasz Jeznach <tjeznach@rivosinc.com>,
Krishna Reddy <vdumpa@nvidia.com>, Chen-Yu Tsai <wens@csie.org>,
Will Deacon <will@kernel.org>
Cc: Bagas Sanjaya <bagasdotme@gmail.com>,
Joerg Roedel <jroedel@suse.de>,
Pasha Tatashin <pasha.tatashin@soleen.com>,
patches@lists.linux.dev, David Rientjes <rientjes@google.com>,
Matthew Wilcox <willy@infradead.org>
Subject: [PATCH v3 14/23] iommu/pages: Move from struct page to struct ioptdesc and folio
Date: Tue, 25 Feb 2025 15:39:31 -0400 [thread overview]
Message-ID: <14-v3-e797f4dc6918+93057-iommu_pages_jgg@nvidia.com> (raw)
In-Reply-To: <0-v3-e797f4dc6918+93057-iommu_pages_jgg@nvidia.com>
This brings the iommu page table allocator into the modern world of having
its own private page descriptor and not re-using fields from struct page
for its own purpose. It follows the basic pattern of struct ptdesc which
did this transformation for the CPU page table allocator.
Currently iommu-pages is pretty basic so this isn't a huge benefit,
however I see a coming need for features that CPU allocator has, like sub
PAGE_SIZE allocations, and RCU freeing. This provides the base
infrastructure to implement those cleanly.
Remove numa_node_id() calls from the inlines and instead use NUMA_NO_NODE
which will get switched to numa_mem_id(), which seems to be the right ID
to use for memory allocations.
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
---
drivers/iommu/iommu-pages.c | 54 ++++++++++++++++++++++++++-----------
drivers/iommu/iommu-pages.h | 43 ++++++++++++++++++++++++++---
2 files changed, 78 insertions(+), 19 deletions(-)
diff --git a/drivers/iommu/iommu-pages.c b/drivers/iommu/iommu-pages.c
index 6eacb6a34586a6..3077df642adb1f 100644
--- a/drivers/iommu/iommu-pages.c
+++ b/drivers/iommu/iommu-pages.c
@@ -7,6 +7,21 @@
#include <linux/gfp.h>
#include <linux/mm.h>
+#define IOPTDESC_MATCH(pg_elm, elm) \
+ static_assert(offsetof(struct page, pg_elm) == \
+ offsetof(struct ioptdesc, elm))
+IOPTDESC_MATCH(flags, __page_flags);
+IOPTDESC_MATCH(lru, iopt_freelist_elm); /* Ensure bit 0 is clear */
+IOPTDESC_MATCH(mapping, __page_mapping);
+IOPTDESC_MATCH(private, _private);
+IOPTDESC_MATCH(page_type, __page_type);
+IOPTDESC_MATCH(_refcount, __page_refcount);
+#ifdef CONFIG_MEMCG
+IOPTDESC_MATCH(memcg_data, memcg_data);
+#endif
+#undef IOPTDESC_MATCH
+static_assert(sizeof(struct ioptdesc) <= sizeof(struct page));
+
/**
* iommu_alloc_pages_node - Allocate a zeroed page of a given order from
* specific NUMA node
@@ -20,10 +35,17 @@
void *iommu_alloc_pages_node(int nid, gfp_t gfp, unsigned int order)
{
const unsigned long pgcnt = 1UL << order;
- struct page *page;
+ struct folio *folio;
- page = alloc_pages_node(nid, gfp | __GFP_ZERO | __GFP_COMP, order);
- if (unlikely(!page))
+ /*
+ * __folio_alloc_node() does not handle NUMA_NO_NODE like
+ * alloc_pages_node() did.
+ */
+ if (nid == NUMA_NO_NODE)
+ nid = numa_mem_id();
+
+ folio = __folio_alloc_node(gfp | __GFP_ZERO, order, nid);
+ if (unlikely(!folio))
return NULL;
/*
@@ -35,21 +57,21 @@ void *iommu_alloc_pages_node(int nid, gfp_t gfp, unsigned int order)
* This is necessary for the proper accounting as IOMMU state can be
* rather large, i.e. multiple gigabytes in size.
*/
- mod_node_page_state(page_pgdat(page), NR_IOMMU_PAGES, pgcnt);
- mod_lruvec_page_state(page, NR_SECONDARY_PAGETABLE, pgcnt);
+ mod_node_page_state(folio_pgdat(folio), NR_IOMMU_PAGES, pgcnt);
+ lruvec_stat_mod_folio(folio, NR_SECONDARY_PAGETABLE, pgcnt);
- return page_address(page);
+ return folio_address(folio);
}
EXPORT_SYMBOL_GPL(iommu_alloc_pages_node);
-static void __iommu_free_page(struct page *page)
+static void __iommu_free_desc(struct ioptdesc *iopt)
{
- unsigned int order = folio_order(page_folio(page));
- const unsigned long pgcnt = 1UL << order;
+ struct folio *folio = ioptdesc_folio(iopt);
+ const unsigned long pgcnt = 1UL << folio_order(folio);
- mod_node_page_state(page_pgdat(page), NR_IOMMU_PAGES, -pgcnt);
- mod_lruvec_page_state(page, NR_SECONDARY_PAGETABLE, -pgcnt);
- put_page(page);
+ mod_node_page_state(folio_pgdat(folio), NR_IOMMU_PAGES, -pgcnt);
+ lruvec_stat_mod_folio(folio, NR_SECONDARY_PAGETABLE, -pgcnt);
+ folio_put(folio);
}
/**
@@ -62,7 +84,7 @@ void iommu_free_pages(void *virt)
{
if (!virt)
return;
- __iommu_free_page(virt_to_page(virt));
+ __iommu_free_desc(virt_to_ioptdesc(virt));
}
EXPORT_SYMBOL_GPL(iommu_free_pages);
@@ -74,9 +96,9 @@ EXPORT_SYMBOL_GPL(iommu_free_pages);
*/
void iommu_put_pages_list(struct iommu_pages_list *list)
{
- struct page *p, *tmp;
+ struct ioptdesc *iopt, *tmp;
- list_for_each_entry_safe(p, tmp, &list->pages, lru)
- __iommu_free_page(p);
+ list_for_each_entry_safe(iopt, tmp, &list->pages, iopt_freelist_elm)
+ __iommu_free_desc(iopt);
}
EXPORT_SYMBOL_GPL(iommu_put_pages_list);
diff --git a/drivers/iommu/iommu-pages.h b/drivers/iommu/iommu-pages.h
index 8dc0202bf108e4..f4578f252e2580 100644
--- a/drivers/iommu/iommu-pages.h
+++ b/drivers/iommu/iommu-pages.h
@@ -9,6 +9,43 @@
#include <linux/iommu.h>
+/**
+ * struct ioptdesc - Memory descriptor for IOMMU page tables
+ * @iopt_freelist_elm: List element for a struct iommu_pages_list
+ *
+ * This struct overlays struct page for now. Do not modify without a good
+ * understanding of the issues.
+ */
+struct ioptdesc {
+ unsigned long __page_flags;
+
+ struct list_head iopt_freelist_elm;
+ unsigned long __page_mapping;
+ pgoff_t __index;
+ void *_private;
+
+ unsigned int __page_type;
+ atomic_t __page_refcount;
+#ifdef CONFIG_MEMCG
+ unsigned long memcg_data;
+#endif
+};
+
+static inline struct ioptdesc *folio_ioptdesc(struct folio *folio)
+{
+ return (struct ioptdesc *)folio;
+}
+
+static inline struct folio *ioptdesc_folio(struct ioptdesc *iopt)
+{
+ return (struct folio *)iopt;
+}
+
+static inline struct ioptdesc *virt_to_ioptdesc(void *virt)
+{
+ return folio_ioptdesc(virt_to_folio(virt));
+}
+
void *iommu_alloc_pages_node(int nid, gfp_t gfp, unsigned int order);
void iommu_free_pages(void *virt);
void iommu_put_pages_list(struct iommu_pages_list *list);
@@ -21,7 +58,7 @@ void iommu_put_pages_list(struct iommu_pages_list *list);
static inline void iommu_pages_list_add(struct iommu_pages_list *list,
void *virt)
{
- list_add_tail(&virt_to_page(virt)->lru, &list->pages);
+ list_add_tail(&virt_to_ioptdesc(virt)->iopt_freelist_elm, &list->pages);
}
/**
@@ -56,7 +93,7 @@ static inline bool iommu_pages_list_empty(struct iommu_pages_list *list)
*/
static inline void *iommu_alloc_pages(gfp_t gfp, int order)
{
- return iommu_alloc_pages_node(numa_node_id(), gfp, order);
+ return iommu_alloc_pages_node(NUMA_NO_NODE, gfp, order);
}
/**
@@ -79,7 +116,7 @@ static inline void *iommu_alloc_page_node(int nid, gfp_t gfp)
*/
static inline void *iommu_alloc_page(gfp_t gfp)
{
- return iommu_alloc_pages_node(numa_node_id(), gfp, 0);
+ return iommu_alloc_pages_node(NUMA_NO_NODE, gfp, 0);
}
#endif /* __IOMMU_PAGES_H */
--
2.43.0
_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv
next prev parent reply other threads:[~2025-02-25 19:50 UTC|newest]
Thread overview: 55+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-02-25 19:39 [PATCH v3 00/23] iommu: Further abstract iommu-pages Jason Gunthorpe
2025-02-25 19:39 ` [PATCH v3 01/23] iommu/terga: Do not use struct page as the handle for as->pd memory Jason Gunthorpe
2025-02-25 19:39 ` [PATCH v3 02/23] iommu/tegra: Do not use struct page as the handle for pts Jason Gunthorpe
2025-02-25 19:39 ` [PATCH v3 03/23] iommu/pages: Remove __iommu_alloc_pages()/__iommu_free_pages() Jason Gunthorpe
2025-02-26 6:25 ` Baolu Lu
2025-03-12 11:43 ` Mostafa Saleh
2025-02-25 19:39 ` [PATCH v3 04/23] iommu/pages: Make iommu_put_pages_list() work with high order allocations Jason Gunthorpe
2025-02-26 6:28 ` Baolu Lu
2025-02-25 19:39 ` [PATCH v3 05/23] iommu/pages: Remove the order argument to iommu_free_pages() Jason Gunthorpe
2025-02-26 6:32 ` Baolu Lu
2025-03-12 11:43 ` Mostafa Saleh
2025-02-25 19:39 ` [PATCH v3 06/23] iommu/pages: Remove iommu_free_page() Jason Gunthorpe
2025-02-26 6:34 ` Baolu Lu
2025-03-12 11:44 ` Mostafa Saleh
2025-02-25 19:39 ` [PATCH v3 07/23] iommu/pages: De-inline the substantial functions Jason Gunthorpe
2025-02-26 6:43 ` Baolu Lu
2025-03-12 12:45 ` Mostafa Saleh
2025-02-25 19:39 ` [PATCH v3 08/23] iommu/vtd: Use virt_to_phys() Jason Gunthorpe
2025-03-10 2:21 ` Baolu Lu
2025-02-25 19:39 ` [PATCH v3 09/23] iommu/pages: Formalize the freelist API Jason Gunthorpe
2025-02-26 6:56 ` Baolu Lu
2025-02-26 17:31 ` Jason Gunthorpe
2025-02-27 5:11 ` Baolu Lu
2025-02-25 19:39 ` [PATCH v3 10/23] iommu/riscv: Convert to use struct iommu_pages_list Jason Gunthorpe
2025-02-25 19:39 ` [PATCH v3 11/23] iommu/amd: " Jason Gunthorpe
2025-02-25 19:39 ` [PATCH v3 12/23] iommu: Change iommu_iotlb_gather to use iommu_page_list Jason Gunthorpe
2025-02-26 7:02 ` Baolu Lu
2025-02-25 19:39 ` [PATCH v3 13/23] iommu/pages: Remove iommu_put_pages_list_old and the _Generic Jason Gunthorpe
2025-02-26 7:04 ` Baolu Lu
2025-02-25 19:39 ` Jason Gunthorpe [this message]
2025-02-26 12:42 ` [PATCH v3 14/23] iommu/pages: Move from struct page to struct ioptdesc and folio Baolu Lu
2025-02-26 13:51 ` Jason Gunthorpe
2025-02-27 5:17 ` Baolu Lu
2025-02-27 5:17 ` Baolu Lu
2025-02-25 19:39 ` [PATCH v3 15/23] iommu/pages: Move the __GFP_HIGHMEM checks into the common code Jason Gunthorpe
2025-03-12 12:45 ` Mostafa Saleh
2025-02-25 19:39 ` [PATCH v3 16/23] iommu/pages: Allow sub page sizes to be passed into the allocator Jason Gunthorpe
2025-02-26 12:22 ` Baolu Lu
2025-02-25 19:39 ` [PATCH v3 17/23] iommu/amd: Change rlookup, irq_lookup, and alias to use kvalloc() Jason Gunthorpe
2025-02-25 19:39 ` [PATCH v3 18/23] iommu/amd: Use roundup_pow_two() instead of get_order() Jason Gunthorpe
2025-02-25 19:39 ` [PATCH v3 19/23] iommu/riscv: Update to use iommu_alloc_pages_node_lg2() Jason Gunthorpe
2025-02-25 19:39 ` [PATCH v3 20/23] iommu: Update various drivers to pass in lg2sz instead of order to iommu pages Jason Gunthorpe
2025-02-26 12:24 ` Baolu Lu
2025-03-12 12:59 ` Mostafa Saleh
2025-03-17 13:35 ` Jason Gunthorpe
2025-03-18 10:46 ` Mostafa Saleh
2025-03-18 10:57 ` Robin Murphy
2025-02-25 19:39 ` [PATCH v3 21/23] iommu/pages: Remove iommu_alloc_page/pages() Jason Gunthorpe
2025-02-26 9:15 ` Marek Szyprowski
2025-02-25 19:39 ` [PATCH v3 22/23] iommu/pages: Remove iommu_alloc_page_node() Jason Gunthorpe
2025-02-26 12:26 ` Baolu Lu
2025-02-25 19:39 ` [PATCH v3 23/23] iommu/pages: Remove iommu_alloc_pages_node() Jason Gunthorpe
2025-02-26 12:30 ` Baolu Lu
2025-02-25 20:18 ` [PATCH v3 00/23] iommu: Further abstract iommu-pages Nicolin Chen
2025-02-25 23:17 ` Alejandro Jimenez
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=14-v3-e797f4dc6918+93057-iommu_pages_jgg@nvidia.com \
--to=jgg@nvidia.com \
--cc=alim.akhtar@samsung.com \
--cc=alyssa@rosenzweig.io \
--cc=aou@eecs.berkeley.edu \
--cc=asahi@lists.linux.dev \
--cc=bagasdotme@gmail.com \
--cc=baolu.lu@linux.intel.com \
--cc=dwmw2@infradead.org \
--cc=heiko@sntech.de \
--cc=iommu@lists.linux.dev \
--cc=jernej.skrabec@gmail.com \
--cc=jonathanh@nvidia.com \
--cc=joro@8bytes.org \
--cc=jroedel@suse.de \
--cc=krzk@kernel.org \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-riscv@lists.infradead.org \
--cc=linux-rockchip@lists.infradead.org \
--cc=linux-samsung-soc@vger.kernel.org \
--cc=linux-sunxi@lists.linux.dev \
--cc=linux-tegra@vger.kernel.org \
--cc=m.szyprowski@samsung.com \
--cc=marcan@marcan.st \
--cc=palmer@dabbelt.com \
--cc=pasha.tatashin@soleen.com \
--cc=patches@lists.linux.dev \
--cc=paul.walmsley@sifive.com \
--cc=rientjes@google.com \
--cc=robin.murphy@arm.com \
--cc=samuel@sholland.org \
--cc=suravee.suthikulpanit@amd.com \
--cc=sven@svenpeter.dev \
--cc=thierry.reding@gmail.com \
--cc=tjeznach@rivosinc.com \
--cc=vdumpa@nvidia.com \
--cc=wens@csie.org \
--cc=will@kernel.org \
--cc=willy@infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox