From: Vasant Hegde via iommu <iommu@lists.linux-foundation.org>
To: <iommu@lists.linux-foundation.org>
Cc: Vasant Hegde <vasant.hegde@amd.com>
Subject: [PATCH v2 11/37] iommu/amd: Introduce per PCI segment device table size
Date: Mon, 25 Apr 2022 17:03:49 +0530 [thread overview]
Message-ID: <20220425113415.24087-12-vasant.hegde@amd.com> (raw)
In-Reply-To: <20220425113415.24087-1-vasant.hegde@amd.com>
With multiple pci segment support, number of BDF supported by each
segment may differ. Hence introduce per segment device table size
which depends on last_bdf. This will replace global
"device_table_size" variable.
Co-developed-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Signed-off-by: Vasant Hegde <vasant.hegde@amd.com>
---
drivers/iommu/amd/amd_iommu_types.h | 3 +++
drivers/iommu/amd/init.c | 18 ++++++++++--------
2 files changed, 13 insertions(+), 8 deletions(-)
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index e39e7db54e69..aa666d0723ba 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -547,6 +547,9 @@ struct amd_iommu_pci_seg {
/* Largest PCI device id we expect translation requests for */
u16 last_bdf;
+ /* Size of the device table */
+ u32 dev_table_size;
+
/*
* device table virtual address
*
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index 71f39551a83a..f8da686182b5 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -414,6 +414,7 @@ static void iommu_set_cwwb_range(struct amd_iommu *iommu)
static void iommu_set_device_table(struct amd_iommu *iommu)
{
u64 entry;
+ u32 dev_table_size = iommu->pci_seg->dev_table_size;
BUG_ON(iommu->mmio_base == NULL);
@@ -651,7 +652,7 @@ static int __init find_last_devid_acpi(struct acpi_table_header *table, u16 pci_
static inline int __init alloc_dev_table(struct amd_iommu_pci_seg *pci_seg)
{
pci_seg->dev_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO | GFP_DMA32,
- get_order(dev_table_size));
+ get_order(pci_seg->dev_table_size));
if (!pci_seg->dev_table)
return -ENOMEM;
@@ -661,7 +662,7 @@ static inline int __init alloc_dev_table(struct amd_iommu_pci_seg *pci_seg)
static inline void free_dev_table(struct amd_iommu_pci_seg *pci_seg)
{
free_pages((unsigned long)pci_seg->dev_table,
- get_order(dev_table_size));
+ get_order(pci_seg->dev_table_size));
pci_seg->dev_table = NULL;
}
@@ -1034,7 +1035,7 @@ static bool __copy_device_table(struct amd_iommu *iommu)
entry = (((u64) hi) << 32) + lo;
old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12;
- if (old_devtb_size != dev_table_size) {
+ if (old_devtb_size != pci_seg->dev_table_size) {
pr_err("The device table size of IOMMU:%d is not expected!\n",
iommu->index);
return false;
@@ -1053,15 +1054,15 @@ static bool __copy_device_table(struct amd_iommu *iommu)
}
old_devtb = (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT) && is_kdump_kernel())
? (__force void *)ioremap_encrypted(old_devtb_phys,
- dev_table_size)
- : memremap(old_devtb_phys, dev_table_size, MEMREMAP_WB);
+ pci_seg->dev_table_size)
+ : memremap(old_devtb_phys, pci_seg->dev_table_size, MEMREMAP_WB);
if (!old_devtb)
return false;
gfp_flag = GFP_KERNEL | __GFP_ZERO | GFP_DMA32;
pci_seg->old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag,
- get_order(dev_table_size));
+ get_order(pci_seg->dev_table_size));
if (pci_seg->old_dev_tbl_cpy == NULL) {
pr_err("Failed to allocate memory for copying old device table!\n");
memunmap(old_devtb);
@@ -1580,6 +1581,7 @@ static struct amd_iommu_pci_seg *__init alloc_pci_segment(u16 id,
pci_seg->last_bdf = last_bdf;
DUMP_printk("PCI segment : 0x%0x, last bdf : 0x%04x\n", id, last_bdf);
+ pci_seg->dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE);
pci_seg->id = id;
init_llist_head(&pci_seg->dev_data_list);
@@ -2675,7 +2677,7 @@ static void early_enable_iommus(void)
for_each_pci_segment(pci_seg) {
if (pci_seg->old_dev_tbl_cpy != NULL) {
free_pages((unsigned long)pci_seg->old_dev_tbl_cpy,
- get_order(dev_table_size));
+ get_order(pci_seg->dev_table_size));
pci_seg->old_dev_tbl_cpy = NULL;
}
}
@@ -2689,7 +2691,7 @@ static void early_enable_iommus(void)
for_each_pci_segment(pci_seg) {
free_pages((unsigned long)pci_seg->dev_table,
- get_order(dev_table_size));
+ get_order(pci_seg->dev_table_size));
pci_seg->dev_table = pci_seg->old_dev_tbl_cpy;
}
--
2.27.0
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu
next prev parent reply other threads:[~2022-04-25 11:37 UTC|newest]
Thread overview: 53+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-04-25 11:33 [PATCH v2 00/37] iommu/amd: Add multiple PCI segments support Vasant Hegde via iommu
2022-04-25 11:33 ` [PATCH v2 01/37] iommu/amd: Update struct iommu_dev_data defination Vasant Hegde via iommu
2022-04-28 9:55 ` Joerg Roedel
2022-04-29 14:34 ` Vasant Hegde via iommu
2022-04-25 11:33 ` [PATCH v2 02/37] iommu/amd: Introduce pci segment structure Vasant Hegde via iommu
2022-04-28 9:54 ` Joerg Roedel
2022-04-29 14:41 ` Vasant Hegde via iommu
2022-04-25 11:33 ` [PATCH v2 03/37] iommu/amd: Introduce per PCI segment device table Vasant Hegde via iommu
2022-04-25 11:33 ` [PATCH v2 04/37] iommu/amd: Introduce per PCI segment rlookup table Vasant Hegde via iommu
2022-04-25 11:33 ` [PATCH v2 05/37] iommu/amd: Introduce per PCI segment irq_lookup_table Vasant Hegde via iommu
2022-04-25 11:33 ` [PATCH v2 06/37] iommu/amd: Introduce per PCI segment dev_data_list Vasant Hegde via iommu
2022-04-25 11:33 ` [PATCH v2 07/37] iommu/amd: Introduce per PCI segment old_dev_tbl_cpy Vasant Hegde via iommu
2022-04-25 11:33 ` [PATCH v2 08/37] iommu/amd: Introduce per PCI segment alias_table Vasant Hegde via iommu
2022-04-25 11:33 ` [PATCH v2 09/37] iommu/amd: Introduce per PCI segment unity map list Vasant Hegde via iommu
2022-04-25 11:33 ` [PATCH v2 10/37] iommu/amd: Introduce per PCI segment last_bdf Vasant Hegde via iommu
2022-04-28 10:10 ` Joerg Roedel
2022-04-29 14:45 ` Vasant Hegde via iommu
2022-05-02 10:54 ` Joerg Roedel
2022-05-05 9:09 ` Vasant Hegde via iommu
2022-04-25 11:33 ` Vasant Hegde via iommu [this message]
2022-04-28 10:14 ` [PATCH v2 11/37] iommu/amd: Introduce per PCI segment device table size Joerg Roedel
2022-04-25 11:33 ` [PATCH v2 12/37] iommu/amd: Introduce per PCI segment alias " Vasant Hegde via iommu
2022-04-25 11:33 ` [PATCH v2 13/37] iommu/amd: Introduce per PCI segment rlookup " Vasant Hegde via iommu
2022-04-25 11:33 ` [PATCH v2 14/37] iommu/amd: Convert to use per PCI segment irq_lookup_table Vasant Hegde via iommu
2022-04-25 11:33 ` [PATCH v2 15/37] iommu/amd: Convert to use rlookup_amd_iommu helper function Vasant Hegde via iommu
2022-04-25 11:33 ` [PATCH v2 16/37] iommu/amd: Update irq_remapping_alloc to use IOMMU lookup " Vasant Hegde via iommu
2022-04-25 11:33 ` [PATCH v2 17/37] iommu/amd: Introduce struct amd_ir_data.iommu Vasant Hegde via iommu
2022-04-25 11:33 ` [PATCH v2 18/37] iommu/amd: Update amd_irte_ops functions Vasant Hegde via iommu
2022-04-25 11:33 ` [PATCH v2 19/37] iommu/amd: Update alloc_irq_table and alloc_irq_index Vasant Hegde via iommu
2022-04-25 11:33 ` [PATCH v2 20/37] iommu/amd: Convert to use per PCI segment rlookup_table Vasant Hegde via iommu
2022-04-25 11:33 ` [PATCH v2 21/37] iommu/amd: Update set_dte_entry and clear_dte_entry Vasant Hegde via iommu
2022-04-25 11:34 ` [PATCH v2 22/37] iommu/amd: Update iommu_ignore_device Vasant Hegde via iommu
2022-04-25 11:34 ` [PATCH v2 23/37] iommu/amd: Update dump_dte_entry Vasant Hegde via iommu
2022-04-25 11:34 ` [PATCH v2 24/37] iommu/amd: Update set_dte_irq_entry Vasant Hegde via iommu
2022-04-25 11:34 ` [PATCH v2 25/37] iommu/amd: Update (un)init_device_table_dma() Vasant Hegde via iommu
2022-04-25 11:34 ` [PATCH v2 26/37] iommu/amd: Update set_dev_entry_bit() and get_dev_entry_bit() Vasant Hegde via iommu
2022-04-25 11:34 ` [PATCH v2 27/37] iommu/amd: Remove global amd_iommu_dev_table Vasant Hegde via iommu
2022-04-28 10:15 ` Joerg Roedel
2022-04-29 14:39 ` Vasant Hegde via iommu
2022-04-25 11:34 ` [PATCH v2 28/37] iommu/amd: Remove global amd_iommu_alias_table Vasant Hegde via iommu
2022-04-25 11:34 ` [PATCH v2 29/37] iommu/amd: Remove global amd_iommu_last_bdf Vasant Hegde via iommu
2022-04-25 11:34 ` [PATCH v2 30/37] iommu/amd: Flush upto last_bdf only Vasant Hegde via iommu
2022-04-25 11:34 ` [PATCH v2 31/37] iommu/amd: Introduce get_device_sbdf_id() helper function Vasant Hegde via iommu
2022-04-25 11:34 ` [PATCH v2 32/37] iommu/amd: Include PCI segment ID when initialize IOMMU Vasant Hegde via iommu
2022-04-25 11:34 ` [PATCH v2 33/37] iommu/amd: Specify PCI segment ID when getting pci device Vasant Hegde via iommu
2022-04-25 11:34 ` [PATCH v2 34/37] iommu/amd: Add PCI segment support for ivrs_ioapic, ivrs_hpet, ivrs_acpihid commands Vasant Hegde via iommu
2022-04-25 11:34 ` [PATCH v2 35/37] iommu/amd: Print PCI segment ID in error log messages Vasant Hegde via iommu
2022-04-25 11:34 ` [PATCH v2 36/37] iommu/amd: Update device_state structure to include PCI seg ID Vasant Hegde via iommu
2022-04-25 11:34 ` [PATCH v2 37/37] iommu/amd: Update amd_iommu_fault " Vasant Hegde via iommu
2022-04-28 10:19 ` Joerg Roedel
2022-04-29 14:37 ` Vasant Hegde via iommu
2022-04-28 10:22 ` [PATCH v2 00/37] iommu/amd: Add multiple PCI segments support Joerg Roedel
2022-04-29 14:35 ` Vasant Hegde via iommu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220425113415.24087-12-vasant.hegde@amd.com \
--to=iommu@lists.linux-foundation.org \
--cc=vasant.hegde@amd.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox