From: Vasant Hegde <vasant.hegde@amd.com>
To: <iommu@lists.linux.dev>, <joro@8bytes.org>
Cc: <will@kernel.org>, <robin.murphy@arm.com>,
<suravee.suthikulpanit@amd.com>,
Vasant Hegde <vasant.hegde@amd.com>
Subject: [PATCH v2 03/10] iommu/amd: xarray to track protection_domain->iommu list
Date: Tue, 10 Sep 2024 06:58:05 +0000 [thread overview]
Message-ID: <20240910065812.6091-4-vasant.hegde@amd.com> (raw)
In-Reply-To: <20240910065812.6091-1-vasant.hegde@amd.com>
Use xarray to track IOMMU attached to protection domain instead of
static array of MAX_IOMMUS.
Signed-off-by: Vasant Hegde <vasant.hegde@amd.com>
---
drivers/iommu/amd/amd_iommu_types.h | 8 ++-
drivers/iommu/amd/iommu.c | 85 ++++++++++++++++++++++-------
2 files changed, 72 insertions(+), 21 deletions(-)
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index 418c14aa4ec9..0f030a61ceb2 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -565,6 +565,12 @@ struct pdom_dev_data {
struct list_head list;
};
+/* Keeps track of the IOMMUs attached to protection domain */
+struct pdom_iommu_info {
+ struct amd_iommu *iommu; /* IOMMUs attach to protection domain */
+ u32 refcnt; /* Count of attached dev/pasid per domain/IOMMU */
+};
+
/*
* This structure contains generic data for IOMMU protection domains
* independent of their use.
@@ -578,7 +584,7 @@ struct protection_domain {
u16 id; /* the domain id written to the device table */
enum protection_domain_mode pd_mode; /* Track page table type */
bool dirty_tracking; /* dirty tracking is enabled in the domain */
- unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */
+ struct xarray iommu_array; /* per-IOMMU reference count */
struct mmu_notifier mn; /* mmu notifier for the SVA domain */
struct list_head dev_data_list; /* List of pdom_dev_data */
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index d870aeb654a7..b9b060219596 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -1254,18 +1254,15 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
static void domain_flush_complete(struct protection_domain *domain)
{
- int i;
-
- for (i = 0; i < amd_iommu_get_num_iommus(); ++i) {
- if (domain && !domain->dev_iommu[i])
- continue;
+ struct pdom_iommu_info *pdom_iommu_info;
+ unsigned long i;
- /*
- * Devices of this domain are behind this IOMMU
- * We need to wait for completion of all commands.
- */
- iommu_completion_wait(amd_iommus[i]);
- }
+ /*
+ * Devices of this domain are behind this IOMMU
+ * We need to wait for completion of all commands.
+ */
+ xa_for_each(&domain->iommu_array, i, pdom_iommu_info)
+ iommu_completion_wait(pdom_iommu_info->iommu);
}
static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid)
@@ -1447,21 +1444,20 @@ static int domain_flush_pages_v2(struct protection_domain *pdom,
static int domain_flush_pages_v1(struct protection_domain *pdom,
u64 address, size_t size)
{
+ struct pdom_iommu_info *pdom_iommu_info;
struct iommu_cmd cmd;
- int ret = 0, i;
+ int ret = 0;
+ unsigned long i;
build_inv_iommu_pages(&cmd, address, size,
pdom->id, IOMMU_NO_PASID, false);
- for (i = 0; i < amd_iommu_get_num_iommus(); ++i) {
- if (!pdom->dev_iommu[i])
- continue;
-
+ xa_for_each(&pdom->iommu_array, i, pdom_iommu_info) {
/*
* Devices of this domain are behind this IOMMU
* We need a TLB flush
*/
- ret |= iommu_queue_command(amd_iommus[i], &cmd);
+ ret |= iommu_queue_command(pdom_iommu_info->iommu, &cmd);
}
return ret;
@@ -2016,6 +2012,50 @@ static void destroy_gcr3_table(struct iommu_dev_data *dev_data,
free_gcr3_table(gcr3_info);
}
+static int pdom_attach_iommu(struct amd_iommu *iommu,
+ struct protection_domain *pdom)
+{
+ struct pdom_iommu_info *pdom_iommu_info, *curr;
+
+ pdom_iommu_info = xa_load(&pdom->iommu_array, iommu->index);
+ if (pdom_iommu_info) {
+ pdom_iommu_info->refcnt++;
+ return 0;
+ }
+
+ pdom_iommu_info = kzalloc(sizeof(*pdom_iommu_info), GFP_ATOMIC);
+ if (!pdom_iommu_info)
+ return -ENOMEM;
+
+ pdom_iommu_info->iommu = iommu;
+ pdom_iommu_info->refcnt = 1;
+
+ curr = xa_cmpxchg(&pdom->iommu_array, iommu->index,
+ NULL, pdom_iommu_info, GFP_ATOMIC);
+ if (curr) {
+ kfree(pdom_iommu_info);
+ return -ENOSPC;
+ }
+
+ return 0;
+}
+
+static void pdom_detach_iommu(struct amd_iommu *iommu,
+ struct protection_domain *pdom)
+{
+ struct pdom_iommu_info *pdom_iommu_info;
+
+ pdom_iommu_info = xa_load(&pdom->iommu_array, iommu->index);
+ if (!pdom_iommu_info)
+ return;
+
+ pdom_iommu_info->refcnt--;
+ if (pdom_iommu_info->refcnt == 0) {
+ xa_erase(&pdom->iommu_array, iommu->index);
+ kfree(pdom_iommu_info);
+ }
+}
+
static int do_attach(struct iommu_dev_data *dev_data,
struct protection_domain *domain)
{
@@ -2032,13 +2072,17 @@ static int do_attach(struct iommu_dev_data *dev_data,
cfg->amd.nid = dev_to_node(dev_data->dev);
/* Do reference counting */
- domain->dev_iommu[iommu->index] += 1;
+ ret = pdom_attach_iommu(iommu, domain);
+ if (ret)
+ return ret;
/* Setup GCR3 table */
if (pdom_is_sva_capable(domain)) {
ret = init_gcr3_table(dev_data, domain);
- if (ret)
+ if (ret) {
+ pdom_detach_iommu(iommu, domain);
return ret;
+ }
}
return ret;
@@ -2064,7 +2108,7 @@ static void do_detach(struct iommu_dev_data *dev_data)
list_del(&dev_data->list);
/* decrease reference counters - needs to happen after the flushes */
- domain->dev_iommu[iommu->index] -= 1;
+ pdom_detach_iommu(iommu, domain);
}
/*
@@ -2273,6 +2317,7 @@ struct protection_domain *protection_domain_alloc(unsigned int type, int nid)
spin_lock_init(&domain->lock);
INIT_LIST_HEAD(&domain->dev_list);
INIT_LIST_HEAD(&domain->dev_data_list);
+ xa_init(&domain->iommu_array);
domain->iop.pgtbl.cfg.amd.nid = nid;
switch (type) {
--
2.31.1
next prev parent reply other threads:[~2024-09-10 6:59 UTC|newest]
Thread overview: 28+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-09-10 6:58 [PATCH v2 00/10] iommu/amd: Improve domain allocator and device attach code path Vasant Hegde
2024-09-10 6:58 ` [PATCH v2 01/10] iommu/amd: Use ida interface to manage protection domain ID Vasant Hegde
2024-10-15 8:38 ` Joerg Roedel
2024-09-10 6:58 ` [PATCH v2 02/10] iommu/amd: Remove protection_domain.dev_cnt variable Vasant Hegde
2024-10-15 8:39 ` Joerg Roedel
2024-09-10 6:58 ` Vasant Hegde [this message]
2024-10-15 8:39 ` [PATCH v2 03/10] iommu/amd: xarray to track protection_domain->iommu list Joerg Roedel
2024-09-10 6:58 ` [PATCH v2 04/10] iommu/amd: Remove unused amd_iommus variable Vasant Hegde
2024-10-15 8:39 ` Joerg Roedel
2024-09-10 6:58 ` [PATCH v2 05/10] iommu/amd: Do not detach devices in domain free path Vasant Hegde
2024-10-15 8:40 ` Joerg Roedel
2024-09-10 6:58 ` [PATCH v2 06/10] iommu/amd: Reduce domain lock scope in attach device path Vasant Hegde
2024-10-15 8:38 ` Joerg Roedel
2024-10-15 12:30 ` Vasant Hegde
2024-10-15 16:12 ` Jason Gunthorpe
2024-10-15 16:45 ` Vasant Hegde
2024-10-15 17:01 ` Jason Gunthorpe
2024-10-16 10:13 ` Vasant Hegde
2024-09-10 6:58 ` [PATCH v2 07/10] iommu/amd: Rearrange attach device code Vasant Hegde
2024-10-15 8:41 ` Joerg Roedel
2024-09-10 6:58 ` [PATCH v2 08/10] iommu/amd: Convert dev_data lock from spinlock to mutex Vasant Hegde
2024-10-15 8:43 ` Joerg Roedel
2024-09-10 6:58 ` [PATCH v2 09/10] iommu/amd: Reorder attach device code Vasant Hegde
2024-10-15 8:44 ` Joerg Roedel
2024-10-15 16:42 ` Vasant Hegde
2024-09-10 6:58 ` [PATCH v2 10/10] iommu/amd: Improve amd_iommu_release_device() Vasant Hegde
2024-10-15 8:45 ` Joerg Roedel
2024-10-04 14:24 ` [PATCH v2 00/10] iommu/amd: Improve domain allocator and device attach code path Vasant Hegde
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240910065812.6091-4-vasant.hegde@amd.com \
--to=vasant.hegde@amd.com \
--cc=iommu@lists.linux.dev \
--cc=joro@8bytes.org \
--cc=robin.murphy@arm.com \
--cc=suravee.suthikulpanit@amd.com \
--cc=will@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox