Linux IOMMU Development
 help / color / mirror / Atom feed
From: Vasant Hegde <vasant.hegde@amd.com>
To: <iommu@lists.linux.dev>, <joro@8bytes.org>
Cc: <will@kernel.org>, <robin.murphy@arm.com>,
	<suravee.suthikulpanit@amd.com>,
	Vasant Hegde <vasant.hegde@amd.com>
Subject: [PATCH v2 01/10] iommu/amd: Use ida interface to manage protection domain ID
Date: Tue, 10 Sep 2024 06:58:03 +0000	[thread overview]
Message-ID: <20240910065812.6091-2-vasant.hegde@amd.com> (raw)
In-Reply-To: <20240910065812.6091-1-vasant.hegde@amd.com>

Replace custom domain ID allocator with IDA interface.

Signed-off-by: Vasant Hegde <vasant.hegde@amd.com>
---
 drivers/iommu/amd/amd_iommu.h       |  2 +
 drivers/iommu/amd/amd_iommu_types.h |  3 --
 drivers/iommu/amd/init.c            | 29 +++++----------
 drivers/iommu/amd/iommu.c           | 58 ++++++++++++++---------------
 4 files changed, 40 insertions(+), 52 deletions(-)

diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index 6386fa4556d9..6f28f435ed62 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -46,6 +46,8 @@ extern int amd_iommu_gpt_level;
 extern unsigned long amd_iommu_pgsize_bitmap;
 
 /* Protection domain ops */
+int amd_iommu_pdom_id_alloc(int min, int max);
+void amd_iommu_pdom_id_destroy(void);
 struct protection_domain *protection_domain_alloc(unsigned int type, int nid);
 void protection_domain_free(struct protection_domain *domain);
 struct iommu_domain *amd_iommu_domain_alloc_sva(struct device *dev,
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index 601fb4ee6900..d32a86434de3 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -912,9 +912,6 @@ struct unity_map_entry {
 /* size of the dma_ops aperture as power of 2 */
 extern unsigned amd_iommu_aperture_order;
 
-/* allocation bitmap for domain ids */
-extern unsigned long *amd_iommu_pd_alloc_bitmap;
-
 extern bool amd_iommu_force_isolation;
 
 /* Max levels of glxval supported */
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index 43131c3a2172..a1156d928bc4 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -194,12 +194,6 @@ bool amd_iommu_force_isolation __read_mostly;
 
 unsigned long amd_iommu_pgsize_bitmap __ro_after_init = AMD_IOMMU_PGSIZES;
 
-/*
- * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap
- * to know which ones are already in use.
- */
-unsigned long *amd_iommu_pd_alloc_bitmap;
-
 enum iommu_init_state {
 	IOMMU_START_STATE,
 	IOMMU_IVRS_DETECTED,
@@ -1082,7 +1076,11 @@ static bool __copy_device_table(struct amd_iommu *iommu)
 		if (dte_v && dom_id) {
 			pci_seg->old_dev_tbl_cpy[devid].data[0] = old_devtb[devid].data[0];
 			pci_seg->old_dev_tbl_cpy[devid].data[1] = old_devtb[devid].data[1];
-			__set_bit(dom_id, amd_iommu_pd_alloc_bitmap);
+			if (amd_iommu_pdom_id_alloc(dom_id, dom_id) != dom_id) {
+				pr_err("Failed to reserve domain ID 0x%x\n", dom_id);
+				memunmap(old_devtb);
+				return false;
+			}
 			/* If gcr3 table existed, mask it out */
 			if (old_devtb[devid].data[0] & DTE_FLAG_GV) {
 				tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B;
@@ -2994,9 +2992,7 @@ static bool __init check_ioapic_information(void)
 
 static void __init free_dma_resources(void)
 {
-	iommu_free_pages(amd_iommu_pd_alloc_bitmap,
-			 get_order(MAX_DOMAIN_ID / 8));
-	amd_iommu_pd_alloc_bitmap = NULL;
+	amd_iommu_pdom_id_destroy();
 
 	free_unity_maps();
 }
@@ -3064,19 +3060,14 @@ static int __init early_amd_iommu_init(void)
 	amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base);
 	DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type);
 
-	/* Device table - directly used by all IOMMUs */
-	ret = -ENOMEM;
-
-	amd_iommu_pd_alloc_bitmap = iommu_alloc_pages(GFP_KERNEL,
-						      get_order(MAX_DOMAIN_ID / 8));
-	if (amd_iommu_pd_alloc_bitmap == NULL)
-		goto out;
-
 	/*
 	 * never allocate domain 0 because its used as the non-allocated and
 	 * error value placeholder
 	 */
-	__set_bit(0, amd_iommu_pd_alloc_bitmap);
+	if (amd_iommu_pdom_id_alloc(0, 0) != 0) {
+		ret = -ENOMEM;
+		goto out;
+	}
 
 	/*
 	 * now the data structures are allocated and basically initialized
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 154f90e7b98e..fc1969011ff9 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -18,6 +18,7 @@
 #include <linux/scatterlist.h>
 #include <linux/dma-map-ops.h>
 #include <linux/dma-direct.h>
+#include <linux/idr.h>
 #include <linux/iommu-helper.h>
 #include <linux/delay.h>
 #include <linux/amd-iommu.h>
@@ -52,8 +53,6 @@
 #define HT_RANGE_START		(0xfd00000000ULL)
 #define HT_RANGE_END		(0xffffffffffULL)
 
-static DEFINE_SPINLOCK(pd_bitmap_lock);
-
 LIST_HEAD(ioapic_map);
 LIST_HEAD(hpet_map);
 LIST_HEAD(acpihid_map);
@@ -70,6 +69,12 @@ struct iommu_cmd {
 	u32 data[4];
 };
 
+/*
+ * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap
+ * to know which ones are already in use.
+ */
+static DEFINE_IDA(pdom_ids);
+
 struct kmem_cache *amd_iommu_irq_cache;
 
 static void detach_device(struct device *dev);
@@ -1640,31 +1645,19 @@ int amd_iommu_complete_ppr(struct device *dev, u32 pasid, int status, int tag)
  *
  ****************************************************************************/
 
-static u16 domain_id_alloc(void)
+int amd_iommu_pdom_id_alloc(int min, int max)
 {
-	unsigned long flags;
-	int id;
-
-	spin_lock_irqsave(&pd_bitmap_lock, flags);
-	id = find_first_zero_bit(amd_iommu_pd_alloc_bitmap, MAX_DOMAIN_ID);
-	BUG_ON(id == 0);
-	if (id > 0 && id < MAX_DOMAIN_ID)
-		__set_bit(id, amd_iommu_pd_alloc_bitmap);
-	else
-		id = 0;
-	spin_unlock_irqrestore(&pd_bitmap_lock, flags);
-
-	return id;
+	return ida_alloc_range(&pdom_ids, min, max, GFP_ATOMIC);
 }
 
-static void domain_id_free(int id)
+void amd_iommu_pdom_id_destroy(void)
 {
-	unsigned long flags;
+	ida_destroy(&pdom_ids);
+}
 
-	spin_lock_irqsave(&pd_bitmap_lock, flags);
-	if (id > 0 && id < MAX_DOMAIN_ID)
-		__clear_bit(id, amd_iommu_pd_alloc_bitmap);
-	spin_unlock_irqrestore(&pd_bitmap_lock, flags);
+static void pdom_id_free(int id)
+{
+	ida_free(&pdom_ids, id);
 }
 
 static void free_gcr3_tbl_level1(u64 *tbl)
@@ -1709,7 +1702,7 @@ static void free_gcr3_table(struct gcr3_tbl_info *gcr3_info)
 	gcr3_info->glx = 0;
 
 	/* Free per device domain ID */
-	domain_id_free(gcr3_info->domid);
+	pdom_id_free(gcr3_info->domid);
 
 	iommu_free_page(gcr3_info->gcr3_tbl);
 	gcr3_info->gcr3_tbl = NULL;
@@ -1736,6 +1729,7 @@ static int setup_gcr3_table(struct gcr3_tbl_info *gcr3_info,
 {
 	int levels = get_gcr3_levels(pasids);
 	int nid = iommu ? dev_to_node(&iommu->dev->dev) : NUMA_NO_NODE;
+	int domid;
 
 	if (levels > amd_iommu_max_glx_val)
 		return -EINVAL;
@@ -1744,11 +1738,14 @@ static int setup_gcr3_table(struct gcr3_tbl_info *gcr3_info,
 		return -EBUSY;
 
 	/* Allocate per device domain ID */
-	gcr3_info->domid = domain_id_alloc();
+	domid = amd_iommu_pdom_id_alloc(1, MAX_DOMAIN_ID - 1);
+	if (domid <= 0)
+		return -ENOSPC;
+	gcr3_info->domid = domid;
 
 	gcr3_info->gcr3_tbl = iommu_alloc_page_node(nid, GFP_ATOMIC);
 	if (gcr3_info->gcr3_tbl == NULL) {
-		domain_id_free(gcr3_info->domid);
+		pdom_id_free(domid);
 		return -ENOMEM;
 	}
 
@@ -2259,7 +2256,7 @@ void protection_domain_free(struct protection_domain *domain)
 	WARN_ON(!list_empty(&domain->dev_list));
 	if (domain->domain.type & __IOMMU_DOMAIN_PAGING)
 		free_io_pgtable_ops(&domain->iop.pgtbl.ops);
-	domain_id_free(domain->id);
+	pdom_id_free(domain->id);
 	kfree(domain);
 }
 
@@ -2267,15 +2264,16 @@ struct protection_domain *protection_domain_alloc(unsigned int type, int nid)
 {
 	struct io_pgtable_ops *pgtbl_ops;
 	struct protection_domain *domain;
-	int pgtable;
+	int pgtable, domid;
 
 	domain = kzalloc(sizeof(*domain), GFP_KERNEL);
 	if (!domain)
 		return NULL;
 
-	domain->id = domain_id_alloc();
-	if (!domain->id)
+	domid = amd_iommu_pdom_id_alloc(1, MAX_DOMAIN_ID - 1);
+	if (domid <= 0)
 		goto err_free;
+	domain->id = domid;
 
 	spin_lock_init(&domain->lock);
 	INIT_LIST_HEAD(&domain->dev_list);
@@ -2319,7 +2317,7 @@ struct protection_domain *protection_domain_alloc(unsigned int type, int nid)
 
 	return domain;
 err_id:
-	domain_id_free(domain->id);
+	pdom_id_free(domain->id);
 err_free:
 	kfree(domain);
 	return NULL;
-- 
2.31.1


  reply	other threads:[~2024-09-10  6:58 UTC|newest]

Thread overview: 28+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-09-10  6:58 [PATCH v2 00/10] iommu/amd: Improve domain allocator and device attach code path Vasant Hegde
2024-09-10  6:58 ` Vasant Hegde [this message]
2024-10-15  8:38   ` [PATCH v2 01/10] iommu/amd: Use ida interface to manage protection domain ID Joerg Roedel
2024-09-10  6:58 ` [PATCH v2 02/10] iommu/amd: Remove protection_domain.dev_cnt variable Vasant Hegde
2024-10-15  8:39   ` Joerg Roedel
2024-09-10  6:58 ` [PATCH v2 03/10] iommu/amd: xarray to track protection_domain->iommu list Vasant Hegde
2024-10-15  8:39   ` Joerg Roedel
2024-09-10  6:58 ` [PATCH v2 04/10] iommu/amd: Remove unused amd_iommus variable Vasant Hegde
2024-10-15  8:39   ` Joerg Roedel
2024-09-10  6:58 ` [PATCH v2 05/10] iommu/amd: Do not detach devices in domain free path Vasant Hegde
2024-10-15  8:40   ` Joerg Roedel
2024-09-10  6:58 ` [PATCH v2 06/10] iommu/amd: Reduce domain lock scope in attach device path Vasant Hegde
2024-10-15  8:38   ` Joerg Roedel
2024-10-15 12:30     ` Vasant Hegde
2024-10-15 16:12       ` Jason Gunthorpe
2024-10-15 16:45         ` Vasant Hegde
2024-10-15 17:01           ` Jason Gunthorpe
2024-10-16 10:13             ` Vasant Hegde
2024-09-10  6:58 ` [PATCH v2 07/10] iommu/amd: Rearrange attach device code Vasant Hegde
2024-10-15  8:41   ` Joerg Roedel
2024-09-10  6:58 ` [PATCH v2 08/10] iommu/amd: Convert dev_data lock from spinlock to mutex Vasant Hegde
2024-10-15  8:43   ` Joerg Roedel
2024-09-10  6:58 ` [PATCH v2 09/10] iommu/amd: Reorder attach device code Vasant Hegde
2024-10-15  8:44   ` Joerg Roedel
2024-10-15 16:42     ` Vasant Hegde
2024-09-10  6:58 ` [PATCH v2 10/10] iommu/amd: Improve amd_iommu_release_device() Vasant Hegde
2024-10-15  8:45   ` Joerg Roedel
2024-10-04 14:24 ` [PATCH v2 00/10] iommu/amd: Improve domain allocator and device attach code path Vasant Hegde

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240910065812.6091-2-vasant.hegde@amd.com \
    --to=vasant.hegde@amd.com \
    --cc=iommu@lists.linux.dev \
    --cc=joro@8bytes.org \
    --cc=robin.murphy@arm.com \
    --cc=suravee.suthikulpanit@amd.com \
    --cc=will@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox