From: Jason Gunthorpe <jgg@nvidia.com>
To: iommu@lists.linux.dev, Joerg Roedel <joro@8bytes.org>,
Robin Murphy <robin.murphy@arm.com>,
Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>,
Will Deacon <will@kernel.org>
Cc: Alejandro Jimenez <alejandro.j.jimenez@oracle.com>,
Joao Martins <joao.m.martins@oracle.com>,
Joerg Roedel <jroedel@suse.de>,
patches@lists.linux.dev, Vasant Hegde <vasant.hegde@amd.com>
Subject: [PATCH 09/14] iommu/amd: Narrow the use of struct protection_domain to invalidation
Date: Wed, 21 Aug 2024 14:37:15 -0300 [thread overview]
Message-ID: <9-v1-cdaaddf80abb+14190-amd_iopgtbl_jgg@nvidia.com> (raw)
In-Reply-To: <0-v1-cdaaddf80abb+14190-amd_iopgtbl_jgg@nvidia.com>
The AMD io_pgtable stuff doesn't implement the tlb ops callbacks, instead
it invokes the invalidation ops directly on the struct protection_domain.
Narrow the use of struct protection_domain to only those few code paths.
Make everything properly use struct amd_io_pgtable, which is the correct
modular type for this module.
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
---
drivers/iommu/amd/io_pgtable.c | 35 +++++++++++++++++--------------
drivers/iommu/amd/io_pgtable_v2.c | 11 ++++++----
2 files changed, 26 insertions(+), 20 deletions(-)
diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c
index d56be842c0b71e..cb8f23e878028a 100644
--- a/drivers/iommu/amd/io_pgtable.c
+++ b/drivers/iommu/amd/io_pgtable.c
@@ -137,31 +137,33 @@ static void free_sub_pt(u64 *root, int mode, struct list_head *freelist)
* another level increases the size of the address space by 9 bits to a size up
* to 64 bits.
*/
-static bool increase_address_space(struct protection_domain *domain,
+static bool increase_address_space(struct amd_io_pgtable *pgtable,
unsigned long address,
gfp_t gfp)
{
+ struct protection_domain *domain =
+ container_of(pgtable, struct protection_domain, iop);
unsigned long flags;
bool ret = true;
u64 *pte;
- pte = iommu_alloc_page_node(domain->iop.pgtbl.cfg.amd.nid, gfp);
+ pte = iommu_alloc_page_node(pgtable->pgtbl.cfg.amd.nid, gfp);
if (!pte)
return false;
spin_lock_irqsave(&domain->lock, flags);
- if (address <= PM_LEVEL_SIZE(domain->iop.mode))
+ if (address <= PM_LEVEL_SIZE(pgtable->mode))
goto out;
ret = false;
- if (WARN_ON_ONCE(domain->iop.mode == PAGE_MODE_6_LEVEL))
+ if (WARN_ON_ONCE(pgtable->mode == PAGE_MODE_6_LEVEL))
goto out;
- *pte = PM_LEVEL_PDE(domain->iop.mode, iommu_virt_to_phys(domain->iop.root));
+ *pte = PM_LEVEL_PDE(pgtable->mode, iommu_virt_to_phys(pgtable->root));
- domain->iop.root = pte;
- domain->iop.mode += 1;
+ pgtable->root = pte;
+ pgtable->mode += 1;
amd_iommu_update_and_flush_device_table(domain);
amd_iommu_domain_flush_complete(domain);
@@ -175,7 +177,7 @@ static bool increase_address_space(struct protection_domain *domain,
return ret;
}
-static u64 *alloc_pte(struct protection_domain *domain,
+static u64 *alloc_pte(struct amd_io_pgtable *pgtable,
unsigned long address,
unsigned long page_size,
u64 **pte_page,
@@ -187,18 +189,18 @@ static u64 *alloc_pte(struct protection_domain *domain,
BUG_ON(!is_power_of_2(page_size));
- while (address > PM_LEVEL_SIZE(domain->iop.mode)) {
+ while (address > PM_LEVEL_SIZE(pgtable->mode)) {
/*
* Return an error if there is no memory to update the
* page-table.
*/
- if (!increase_address_space(domain, address, gfp))
+ if (!increase_address_space(pgtable, address, gfp))
return NULL;
}
- level = domain->iop.mode - 1;
- pte = &domain->iop.root[PM_LEVEL_INDEX(level, address)];
+ level = pgtable->mode - 1;
+ pte = &pgtable->root[PM_LEVEL_INDEX(level, address)];
address = PAGE_SIZE_ALIGN(address, page_size);
end_lvl = PAGE_SIZE_LEVEL(page_size);
@@ -233,8 +235,8 @@ static u64 *alloc_pte(struct protection_domain *domain,
if (!IOMMU_PTE_PRESENT(__pte) ||
pte_level == PAGE_MODE_NONE) {
- page = iommu_alloc_page_node(
- domain->iop.pgtbl.cfg.amd.nid, gfp);
+ page = iommu_alloc_page_node(pgtable->pgtbl.cfg.amd.nid,
+ gfp);
if (!page)
return NULL;
@@ -348,7 +350,7 @@ static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
phys_addr_t paddr, size_t pgsize, size_t pgcount,
int prot, gfp_t gfp, size_t *mapped)
{
- struct protection_domain *dom = io_pgtable_ops_to_domain(ops);
+ struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
LIST_HEAD(freelist);
bool updated = false;
u64 __pte, *pte;
@@ -365,7 +367,7 @@ static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
while (pgcount > 0) {
count = PAGE_SIZE_PTE_COUNT(pgsize);
- pte = alloc_pte(dom, iova, pgsize, NULL, gfp, &updated);
+ pte = alloc_pte(pgtable, iova, pgsize, NULL, gfp, &updated);
ret = -ENOMEM;
if (!pte)
@@ -402,6 +404,7 @@ static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
out:
if (updated) {
+ struct protection_domain *dom = io_pgtable_ops_to_domain(ops);
unsigned long flags;
spin_lock_irqsave(&dom->lock, flags);
diff --git a/drivers/iommu/amd/io_pgtable_v2.c b/drivers/iommu/amd/io_pgtable_v2.c
index 1e3be8c5312b87..ed2c1faae6d580 100644
--- a/drivers/iommu/amd/io_pgtable_v2.c
+++ b/drivers/iommu/amd/io_pgtable_v2.c
@@ -233,8 +233,8 @@ static int iommu_v2_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
phys_addr_t paddr, size_t pgsize, size_t pgcount,
int prot, gfp_t gfp, size_t *mapped)
{
- struct protection_domain *pdom = io_pgtable_ops_to_domain(ops);
- struct io_pgtable_cfg *cfg = &pdom->iop.pgtbl.cfg;
+ struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
+ struct io_pgtable_cfg *cfg = &pgtable->pgtbl.cfg;
u64 *pte;
unsigned long map_size;
unsigned long mapped_size = 0;
@@ -251,7 +251,7 @@ static int iommu_v2_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
while (mapped_size < size) {
map_size = get_alloc_page_size(pgsize);
- pte = v2_alloc_pte(cfg->amd.nid, pdom->iop.pgd,
+ pte = v2_alloc_pte(cfg->amd.nid, pgtable->pgd,
iova, map_size, gfp, &updated);
if (!pte) {
ret = -EINVAL;
@@ -266,8 +266,11 @@ static int iommu_v2_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
}
out:
- if (updated)
+ if (updated) {
+ struct protection_domain *pdom = io_pgtable_ops_to_domain(ops);
+
amd_iommu_domain_flush_pages(pdom, o_iova, size);
+ }
if (mapped)
*mapped += mapped_size;
--
2.46.0
next prev parent reply other threads:[~2024-08-21 17:37 UTC|newest]
Thread overview: 32+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-08-21 17:37 [PATCH 00/14] Minor fixups and refactorings for AMD's io-pgtable code Jason Gunthorpe
2024-08-21 17:37 ` [PATCH 01/14] iommu/amd: Move allocation of the top table into v1_alloc_pgtable Jason Gunthorpe
2024-08-23 9:26 ` Joerg Roedel
2024-08-23 12:14 ` Jason Gunthorpe
2024-08-28 6:22 ` Vasant Hegde
2024-08-21 17:37 ` [PATCH 02/14] iommu/amd: Allocate the page table root using GFP_KERNEL Jason Gunthorpe
2024-08-27 14:38 ` Vasant Hegde
2024-08-21 17:37 ` [PATCH 03/14] iommu/amd: Set the pgsize_bitmap correctly Jason Gunthorpe
2024-08-28 6:26 ` Vasant Hegde
2024-08-21 17:37 ` [PATCH 04/14] iommu/amd: Remove amd_iommu_domain_update() from page table freeing Jason Gunthorpe
2024-08-28 6:26 ` Vasant Hegde
2024-08-21 17:37 ` [PATCH 05/14] iommu/amd: Remove the amd_iommu_domain_set_pt_root() and related Jason Gunthorpe
2024-08-28 6:27 ` Vasant Hegde
2024-08-21 17:37 ` [PATCH 06/14] iommu/amd: Rename struct amd_io_pgtable iopt to pgtbl Jason Gunthorpe
2024-08-28 6:28 ` Vasant Hegde
2024-08-21 17:37 ` [PATCH 07/14] iommu/amd: Remove amd_io_pgtable::pgtbl_cfg Jason Gunthorpe
2024-08-28 6:37 ` Vasant Hegde
2024-08-21 17:37 ` [PATCH 08/14] iommu/amd: Store the nid in io_pgtable_cfg instead of the domain Jason Gunthorpe
2024-08-28 6:39 ` Vasant Hegde
2024-08-28 18:13 ` Jason Gunthorpe
2024-08-29 10:47 ` Vasant Hegde
2024-08-21 17:37 ` Jason Gunthorpe [this message]
2024-08-21 17:37 ` [PATCH 10/14] iommu/amd: Remove conditions from domain free paths Jason Gunthorpe
2024-08-28 6:56 ` Vasant Hegde
2024-08-21 17:37 ` [PATCH 11/14] iommu/amd: Fix typo of , instead of ; Jason Gunthorpe
2024-08-28 6:39 ` Vasant Hegde
2024-08-21 17:37 ` [PATCH 12/14] iommu/amd: Remove the confusing dummy iommu_flush_ops tlb ops Jason Gunthorpe
2024-08-28 6:42 ` Vasant Hegde
2024-08-21 17:37 ` [PATCH 13/14] iommu/amd: Correct the reported page sizes from the V1 table Jason Gunthorpe
2024-08-28 13:50 ` Vasant Hegde
2024-08-21 17:37 ` [PATCH 14/14] iommu/amd: Do not set the D bit on AMD v2 table entries Jason Gunthorpe
2024-08-28 6:41 ` Vasant Hegde
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=9-v1-cdaaddf80abb+14190-amd_iopgtbl_jgg@nvidia.com \
--to=jgg@nvidia.com \
--cc=alejandro.j.jimenez@oracle.com \
--cc=iommu@lists.linux.dev \
--cc=joao.m.martins@oracle.com \
--cc=joro@8bytes.org \
--cc=jroedel@suse.de \
--cc=patches@lists.linux.dev \
--cc=robin.murphy@arm.com \
--cc=suravee.suthikulpanit@amd.com \
--cc=vasant.hegde@amd.com \
--cc=will@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox