From: Nicolin Chen <nicolinc@nvidia.com>
To: <peter.maydell@linaro.org>, <shannon.zhaosl@gmail.com>,
<mst@redhat.com>, <imammedo@redhat.com>, <anisinha@redhat.com>,
<eric.auger@redhat.com>, <peterx@redhat.com>
Cc: <qemu-arm@nongnu.org>, <qemu-devel@nongnu.org>, <jgg@nvidia.com>,
<shameerali.kolothum.thodi@huawei.com>, <jasowang@redhat.com>
Subject: [PATCH RFCv1 09/10] hw/arm/virt-acpi-build: Build IORT with multiple SMMU nodes
Date: Tue, 25 Jun 2024 17:28:36 -0700 [thread overview]
Message-ID: <074b51a5a42875304d7d8d223d0bc644fa7a4736.1719361174.git.nicolinc@nvidia.com> (raw)
In-Reply-To: <cover.1719361174.git.nicolinc@nvidia.com>
There can be multiple PCI buses behind different SMMU nodes. And each pair
should be associated in the IORT table too when building the ID mappings.
Create multiple SMMU nodes if needed, store their offsets in an array.
Signed-off-by: Nicolin Chen <nicolinc@nvidia.com>
---
hw/arm/virt-acpi-build.c | 36 ++++++++++++++++++++++++++----------
1 file changed, 26 insertions(+), 10 deletions(-)
diff --git a/hw/arm/virt-acpi-build.c b/hw/arm/virt-acpi-build.c
index 91f53f90ca..6d8b9aea42 100644
--- a/hw/arm/virt-acpi-build.c
+++ b/hw/arm/virt-acpi-build.c
@@ -295,7 +295,7 @@ static int iort_idmap_compare(gconstpointer a, gconstpointer b)
}
static void
-build_iort_rmr_nodes(GArray *table_data, GArray *smmu_idmaps, int smmu_offset, uint32_t *id) {
+build_iort_rmr_nodes(GArray *table_data, GArray *smmu_idmaps, size_t *smmu_offset, uint32_t *id) {
AcpiIortIdMapping *range;
int i;
@@ -323,7 +323,7 @@ build_iort_rmr_nodes(GArray *table_data, GArray *smmu_idmaps, int smmu_offset, u
build_append_int_noprefix(table_data, 1 , 4);
/* Reference to Memory Range Descriptors */
build_append_int_noprefix(table_data, 28 + ID_MAPPING_ENTRY_SIZE, 4);
- build_iort_id_mapping(table_data, bdf, range->id_count, smmu_offset, 1);
+ build_iort_id_mapping(table_data, bdf, range->id_count, smmu_offset[i], 1);
/* Table 19 Memory Range Descriptor */
@@ -345,25 +345,42 @@ static void
build_iort(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
{
int i, nb_nodes, rc_mapping_count;
- size_t node_size, smmu_offset = 0;
+ size_t node_size, *smmu_offset;
AcpiIortIdMapping *idmap;
uint32_t id = 0;
GArray *smmu_idmaps = g_array_new(false, true, sizeof(AcpiIortIdMapping));
GArray *its_idmaps = g_array_new(false, true, sizeof(AcpiIortIdMapping));
AcpiIortIdMappingVM idmap_vm = { .vms = vms, .smmu_idmaps = smmu_idmaps, };
+ int irq_offset = NUM_SMMU_IRQS;
+ hwaddr offset = SMMU_IO_LEN;
+ int irq, num_smmus = 0;
+ hwaddr base;
AcpiTable table = { .sig = "IORT", .rev = 5, .oem_id = vms->oem_id,
.oem_table_id = vms->oem_table_id };
/* Table 2 The IORT */
acpi_table_begin(&table, table_data);
+ if (vms->num_nested_smmus) {
+ irq = vms->irqmap[VIRT_NESTED_SMMU] + ARM_SPI_BASE;
+ base = vms->memmap[VIRT_NESTED_SMMU].base;
+ num_smmus = vms->num_nested_smmus;
+ } else if (virt_has_smmuv3(vms)) {
+ irq = vms->irqmap[VIRT_SMMU] + ARM_SPI_BASE;
+ base = vms->memmap[VIRT_SMMU].base;
+ num_smmus = 1;
+ }
+ smmu_offset = g_new0(size_t, num_smmus);
+
+ nb_nodes = 2; /* RC, ITS */
+ nb_nodes += num_smmus; /* SMMU nodes */
+
if (virt_has_smmuv3(vms)) {
AcpiIortIdMapping next_range = {0};
object_child_foreach_recursive(object_get_root(),
iort_host_bridges, &idmap_vm);
- nb_nodes = 3; /* RC, ITS, SMMUv3 */
/* Sort the smmu idmap by input_base */
g_array_sort(smmu_idmaps, iort_idmap_compare);
@@ -394,7 +411,6 @@ build_iort(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
rc_mapping_count = smmu_idmaps->len + its_idmaps->len;
} else {
- nb_nodes = 2; /* RC, ITS */
rc_mapping_count = 1;
}
/* Number of IORT Nodes */
@@ -416,10 +432,9 @@ build_iort(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
/* GIC ITS Identifier Array */
build_append_int_noprefix(table_data, 0 /* MADT translation_id */, 4);
- if (virt_has_smmuv3(vms)) {
- int irq = vms->irqmap[VIRT_SMMU] + ARM_SPI_BASE;
+ for (i = 0; i < num_smmus; i++) {
+ smmu_offset[i] = table_data->len - table.table_offset;
- smmu_offset = table_data->len - table.table_offset;
/* Table 9 SMMUv3 Format */
build_append_int_noprefix(table_data, 4 /* SMMUv3 */, 1); /* Type */
node_size = SMMU_V3_ENTRY_SIZE + ID_MAPPING_ENTRY_SIZE;
@@ -430,12 +445,13 @@ build_iort(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
/* Reference to ID Array */
build_append_int_noprefix(table_data, SMMU_V3_ENTRY_SIZE, 4);
/* Base address */
- build_append_int_noprefix(table_data, vms->memmap[VIRT_SMMU].base, 8);
+ build_append_int_noprefix(table_data, base + i * offset, 8);
/* Flags */
build_append_int_noprefix(table_data, 1 /* COHACC Override */, 4);
build_append_int_noprefix(table_data, 0, 4); /* Reserved */
build_append_int_noprefix(table_data, 0, 8); /* VATOS address */
/* Model */
+ irq += irq_offset;
build_append_int_noprefix(table_data, 0 /* Generic SMMU-v3 */, 4);
build_append_int_noprefix(table_data, irq, 4); /* Event */
build_append_int_noprefix(table_data, irq + 1, 4); /* PRI */
@@ -487,7 +503,7 @@ build_iort(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
range = &g_array_index(smmu_idmaps, AcpiIortIdMapping, i);
/* output IORT node is the smmuv3 node */
build_iort_id_mapping(table_data, range->input_base,
- range->id_count, smmu_offset, 0);
+ range->id_count, smmu_offset[i], 0);
}
/* bypassed RIDs connect to ITS group node directly: RC -> ITS */
--
2.43.0
next prev parent reply other threads:[~2024-06-26 1:01 UTC|newest]
Thread overview: 25+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-06-26 0:28 [PATCH RFCv1 00/10] hw/arm/virt: Add multiple nested SMMUs Nicolin Chen
2024-06-26 0:28 ` [PATCH RFCv1 01/10] hw/arm/virt-acpi-build: Add IORT RMR regions to handle MSI nested binding Nicolin Chen
2024-06-26 0:28 ` [PATCH RFCv1 02/10] hw/arm/virt: Add iommufd link to virt-machine Nicolin Chen
2024-07-09 9:11 ` Eric Auger
2024-07-09 16:59 ` Nicolin Chen
2024-07-09 17:06 ` Eric Auger
2024-07-09 17:18 ` Nicolin Chen
2024-07-10 2:32 ` Duan, Zhenzhong
2024-06-26 0:28 ` [PATCH RFCv1 03/10] hw/arm/virt: Get the number of host-level SMMUv3 instances Nicolin Chen
2024-07-09 9:20 ` Eric Auger
2024-07-09 17:11 ` Nicolin Chen
2024-07-09 17:22 ` Eric Auger
2024-07-09 18:02 ` Nicolin Chen
2024-06-26 0:28 ` [PATCH RFCv1 04/10] hw/arm/virt: Add an SMMU_IO_LEN macro Nicolin Chen
2024-06-26 0:28 ` [PATCH RFCv1 05/10] hw/arm/virt: Add VIRT_NESTED_SMMU Nicolin Chen
2024-07-09 13:26 ` Eric Auger
2024-07-09 17:59 ` Nicolin Chen
2024-07-11 15:48 ` Andrea Bolognani
2024-07-11 17:57 ` Jason Gunthorpe
2024-06-26 0:28 ` [PATCH RFCv1 06/10] hw/arm/virt: Assign vfio-pci devices to nested SMMUs Nicolin Chen
2024-07-09 13:32 ` Eric Auger
2024-06-26 0:28 ` [PATCH RFCv1 07/10] hw/arm/virt: Bypass iommu for default PCI bus Nicolin Chen
2024-06-26 0:28 ` [PATCH RFCv1 08/10] hw/arm/virt-acpi-build: Handle reserved bus number of pxb buses Nicolin Chen
2024-06-26 0:28 ` Nicolin Chen [this message]
2024-06-26 0:28 ` [PATCH RFCv1 10/10] hw/arm/virt-acpi-build: Enable ATS for nested SMMUv3 Nicolin Chen
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=074b51a5a42875304d7d8d223d0bc644fa7a4736.1719361174.git.nicolinc@nvidia.com \
--to=nicolinc@nvidia.com \
--cc=anisinha@redhat.com \
--cc=eric.auger@redhat.com \
--cc=imammedo@redhat.com \
--cc=jasowang@redhat.com \
--cc=jgg@nvidia.com \
--cc=mst@redhat.com \
--cc=peter.maydell@linaro.org \
--cc=peterx@redhat.com \
--cc=qemu-arm@nongnu.org \
--cc=qemu-devel@nongnu.org \
--cc=shameerali.kolothum.thodi@huawei.com \
--cc=shannon.zhaosl@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).