From: Vasant Hegde via iommu <iommu@lists.linux-foundation.org>
To: <iommu@lists.linux-foundation.org>, <joro@8bytes.org>
Cc: Vasant Hegde <vasant.hegde@amd.com>
Subject: [RFC PATCH 03/36] iommu/amd: Introduce per PCI segment rlookup table
Date: Fri, 11 Mar 2022 15:18:21 +0530 [thread overview]
Message-ID: <20220311094854.31595-4-vasant.hegde@amd.com> (raw)
In-Reply-To: <20220311094854.31595-1-vasant.hegde@amd.com>
From: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
This will replace global rlookup table (amd_iommu_rlookup_table).
Also add helper functions to set/get rlookup table for the given device.
Co-developed-by: Vasant Hegde <vasant.hegde@amd.com>
Signed-off-by: Vasant Hegde <vasant.hegde@amd.com>
Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
---
drivers/iommu/amd/amd_iommu.h | 1 +
drivers/iommu/amd/amd_iommu_types.h | 8 ++++++
drivers/iommu/amd/init.c | 23 +++++++++++++++
drivers/iommu/amd/iommu.c | 44 +++++++++++++++++++++++++++++
4 files changed, 76 insertions(+)
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index fa9a4eac45de..4c22683011cf 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -16,6 +16,7 @@ extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
extern void amd_iommu_apply_erratum_63(u16 devid);
extern void amd_iommu_restart_event_logging(struct amd_iommu *iommu);
extern int amd_iommu_init_api(void);
+extern void amd_iommu_set_rlookup_table(struct amd_iommu *iommu, u16 devid);
#ifdef CONFIG_AMD_IOMMU_DEBUGFS
void amd_iommu_debugfs_setup(struct amd_iommu *iommu);
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index 90d3a953bf97..0eeac1d23786 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -486,6 +486,7 @@ struct amd_iommu_fault {
};
+struct amd_iommu;
struct iommu_domain;
struct irq_domain;
struct amd_irte_ops;
@@ -549,6 +550,13 @@ struct amd_iommu_pci_seg {
* page table root pointer.
*/
struct dev_table_entry *dev_table;
+
+ /*
+ * The rlookup iommu table is used to find the IOMMU which is
+ * responsible for a specific device. It is indexed by the PCI
+ * device id.
+ */
+ struct amd_iommu **rlookup_table;
};
/*
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index 0fd1071bfc85..a2efc02ba80a 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -663,6 +663,26 @@ static inline void free_dev_table(struct amd_iommu_pci_seg *pci_seg)
pci_seg->dev_table = NULL;
}
+/* Allocate per PCI segment IOMMU rlookup table. */
+static inline int __init alloc_rlookup_table(struct amd_iommu_pci_seg *pci_seg)
+{
+ pci_seg->rlookup_table = (void *)__get_free_pages(
+ GFP_KERNEL | __GFP_ZERO,
+ get_order(rlookup_table_size));
+ if (pci_seg->rlookup_table == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static inline void free_rlookup_table(struct amd_iommu_pci_seg *pci_seg)
+{
+ free_pages((unsigned long)pci_seg->rlookup_table,
+ get_order(rlookup_table_size));
+ pci_seg->rlookup_table = NULL;
+}
+
+
/*
* Allocates the command buffer. This buffer is per AMD IOMMU. We can
* write commands to that buffer later and the IOMMU will execute them
@@ -1489,6 +1509,8 @@ static struct amd_iommu_pci_seg *__init alloc_pci_segment(u16 id)
if (alloc_dev_table(pci_seg))
return NULL;
+ if (alloc_rlookup_table(pci_seg))
+ return NULL;
return pci_seg;
}
@@ -1511,6 +1533,7 @@ static void __init free_pci_segment(void)
for_each_pci_segment_safe(pci_seg, next) {
list_del(&pci_seg->list);
+ free_rlookup_table(pci_seg);
free_dev_table(pci_seg);
kfree(pci_seg);
}
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index bf607cce3234..fa473ee5be28 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -146,6 +146,50 @@ struct dev_table_entry *get_dev_table(struct amd_iommu *iommu)
return dev_table;
}
+static inline u16 get_device_segment(struct device *dev)
+{
+ u16 seg;
+
+ if (dev_is_pci(dev)) {
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ seg = pci_domain_nr(pdev->bus);
+ } else {
+ u32 devid = get_acpihid_device_id(dev, NULL);
+
+ seg = (devid >> 16) & 0xffff;
+ }
+
+ return seg;
+}
+
+/* Writes the specific IOMMU for a device into the PCI segment rlookup table */
+void amd_iommu_set_rlookup_table(struct amd_iommu *iommu, u16 devid)
+{
+ struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
+
+ pci_seg->rlookup_table[devid] = iommu;
+}
+
+static struct amd_iommu *__rlookup_amd_iommu(u16 seg, u16 devid)
+{
+ struct amd_iommu_pci_seg *pci_seg;
+
+ for_each_pci_segment(pci_seg) {
+ if (pci_seg->id == seg)
+ return pci_seg->rlookup_table[devid];
+ }
+ return NULL;
+}
+
+static struct amd_iommu *rlookup_amd_iommu(struct device *dev)
+{
+ u16 seg = get_device_segment(dev);
+ u16 devid = get_device_id(dev);
+
+ return __rlookup_amd_iommu(seg, devid);
+}
+
static struct protection_domain *to_pdomain(struct iommu_domain *dom)
{
return container_of(dom, struct protection_domain, domain);
--
2.27.0
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu
next prev parent reply other threads:[~2022-03-11 9:50 UTC|newest]
Thread overview: 37+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-03-11 9:48 [RFC PATCH 00/36] iommu/amd: Add multiple PCI segments support Vasant Hegde via iommu
2022-03-11 9:48 ` [RFC PATCH 01/36] iommu/amd: Introduce pci segment structure Vasant Hegde via iommu
2022-03-11 9:48 ` [RFC PATCH 02/36] iommu/amd: Introduce per PCI segment device table Vasant Hegde via iommu
2022-03-11 9:48 ` Vasant Hegde via iommu [this message]
2022-03-11 9:48 ` [RFC PATCH 04/36] iommu/amd: Introduce per PCI segment irq_lookup_table Vasant Hegde via iommu
2022-03-11 9:48 ` [RFC PATCH 05/36] iommu/amd: Introduce per PCI segment dev_data_list Vasant Hegde via iommu
2022-03-11 9:48 ` [RFC PATCH 06/36] iommu/amd: Introduce per PCI segment old_dev_tbl_cpy Vasant Hegde via iommu
2022-03-11 9:48 ` [RFC PATCH 07/36] iommu/amd: Introduce per PCI segment alias_table Vasant Hegde via iommu
2022-03-11 9:48 ` [RFC PATCH 08/36] iommu/amd: Introduce per PCI segment unity map list Vasant Hegde via iommu
2022-03-11 9:48 ` [RFC PATCH 09/36] iommu/amd: Introduce per PCI segment last_bdf Vasant Hegde via iommu
2022-03-11 9:48 ` [RFC PATCH 10/36] iommu/amd: Introduce per PCI segment device table size Vasant Hegde via iommu
2022-03-11 9:48 ` [RFC PATCH 11/36] iommu/amd: Introduce per PCI segment alias " Vasant Hegde via iommu
2022-03-11 9:48 ` [RFC PATCH 12/36] iommu/amd: Introduce per PCI segment rlookup " Vasant Hegde via iommu
2022-03-11 9:48 ` [RFC PATCH 13/36] iommu/amd: Convert to use per PCI segment irq_lookup_table Vasant Hegde via iommu
2022-03-11 9:48 ` [RFC PATCH 14/36] iommu/amd: Convert to use rlookup_amd_iommu helper function Vasant Hegde via iommu
2022-03-11 9:48 ` [RFC PATCH 15/36] iommu/amd: Update irq_remapping_alloc to use IOMMU lookup " Vasant Hegde via iommu
2022-03-11 9:48 ` [RFC PATCH 16/36] iommu/amd: Introduce struct amd_ir_data.iommu Vasant Hegde via iommu
2022-03-11 9:48 ` [RFC PATCH 17/36] iommu/amd: Update amd_irte_ops functions Vasant Hegde via iommu
2022-03-11 9:48 ` [RFC PATCH 18/36] iommu/amd: Update alloc_irq_table and alloc_irq_index Vasant Hegde via iommu
2022-03-11 9:48 ` [RFC PATCH 19/36] iommu/amd: Convert to use per PCI segment rlookup_table Vasant Hegde via iommu
2022-03-11 9:48 ` [RFC PATCH 20/36] iommu/amd: Update set_dte_entry and clear_dte_entry Vasant Hegde via iommu
2022-03-11 9:48 ` [RFC PATCH 21/36] iommu/amd: Update iommu_ignore_device Vasant Hegde via iommu
2022-03-11 9:48 ` [RFC PATCH 22/36] iommu/amd: Update dump_dte_entry Vasant Hegde via iommu
2022-03-11 9:48 ` [RFC PATCH 23/36] iommu/amd: Update set_dte_irq_entry Vasant Hegde via iommu
2022-03-11 9:48 ` [RFC PATCH 24/36] iommu/amd: Update (un)init_device_table_dma() Vasant Hegde via iommu
2022-03-11 9:48 ` [RFC PATCH 25/36] iommu/amd: Update set_dev_entry_bit() and get_dev_entry_bit() Vasant Hegde via iommu
2022-03-11 9:48 ` [RFC PATCH 26/36] iommu/amd: Remove global amd_iommu_dev_table Vasant Hegde via iommu
2022-03-11 9:48 ` [RFC PATCH 27/36] iommu/amd: Remove global amd_iommu_alias_table Vasant Hegde via iommu
2022-03-11 9:48 ` [RFC PATCH 28/36] iommu/amd: Remove global amd_iommu_last_bdf Vasant Hegde via iommu
2022-03-11 9:48 ` [RFC PATCH 29/36] iommu/amd: Flush upto last_bdf only Vasant Hegde via iommu
2022-03-11 9:48 ` [RFC PATCH 30/36] iommu/amd: Introduce get_device_sbdf_id() helper function Vasant Hegde via iommu
2022-03-11 9:48 ` [RFC PATCH 31/36] iommu/amd: Include PCI segment ID when initialize IOMMU Vasant Hegde via iommu
2022-03-11 9:48 ` [RFC PATCH 32/36] iommu/amd: Specify PCI segment ID when getting pci device Vasant Hegde via iommu
2022-03-11 9:48 ` [RFC PATCH 33/36] iommu/amd: Add PCI segment support for ivrs_ioapic, ivrs_hpet, ivrs_acpihid commands Vasant Hegde via iommu
2022-03-11 9:48 ` [RFC PATCH 34/36] iommu/amd: Print PCI segment ID in error log messages Vasant Hegde via iommu
2022-03-11 9:48 ` [RFC PATCH 35/36] iommu/amd: Update device_state structure to include PCI seg ID Vasant Hegde via iommu
2022-03-11 9:48 ` [RFC PATCH 36/36] iommu/amd: Update amd_iommu_fault " Vasant Hegde via iommu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220311094854.31595-4-vasant.hegde@amd.com \
--to=iommu@lists.linux-foundation.org \
--cc=joro@8bytes.org \
--cc=vasant.hegde@amd.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox