From: Jonathan Cameron via <qemu-devel@nongnu.org>
To: <qemu-devel@nongnu.org>, Fan Ni <fan.ni@samsung.com>,
Peter Maydell <peter.maydell@linaro.org>, <mst@redhat.com>
Cc: linux-cxl@vger.kernel.org, linuxarm@huawei.com,
qemu-arm@nongnu.org,
"Yuquan Wang" <wangyuquan1236@phytium.com.cn>,
"Itaru Kitayama" <itaru.kitayama@linux.dev>,
"Philippe Mathieu-Daudé" <philmd@linaro.org>
Subject: [PATCH v13 4/5] hw/arm/virt: Basic CXL enablement on pci_expander_bridge instances pxb-cxl
Date: Tue, 13 May 2025 12:14:54 +0100 [thread overview]
Message-ID: <20250513111455.128266-5-Jonathan.Cameron@huawei.com> (raw)
In-Reply-To: <20250513111455.128266-1-Jonathan.Cameron@huawei.com>
Code based on i386/pc enablement.
The memory layout places space for 16 host bridge register regions after
the GIC_REDIST2 in the extended memmap.
The CFMWs are placed above the extended memmap.
Only create the CEDT table if cxl=on set for the machine.
Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
---
include/hw/arm/virt.h | 4 ++++
hw/arm/virt-acpi-build.c | 34 ++++++++++++++++++++++++++++++++++
hw/arm/virt.c | 29 +++++++++++++++++++++++++++++
3 files changed, 67 insertions(+)
diff --git a/include/hw/arm/virt.h b/include/hw/arm/virt.h
index 9a1b0f53d2..4375819ea0 100644
--- a/include/hw/arm/virt.h
+++ b/include/hw/arm/virt.h
@@ -36,6 +36,7 @@
#include "hw/arm/boot.h"
#include "hw/arm/bsa.h"
#include "hw/block/flash.h"
+#include "hw/cxl/cxl.h"
#include "system/kvm.h"
#include "hw/intc/arm_gicv3_common.h"
#include "qom/object.h"
@@ -85,6 +86,7 @@ enum {
/* indices of IO regions located after the RAM */
enum {
VIRT_HIGH_GIC_REDIST2 = VIRT_LOWMEMMAP_LAST,
+ VIRT_CXL_HOST,
VIRT_HIGH_PCIE_ECAM,
VIRT_HIGH_PCIE_MMIO,
};
@@ -140,6 +142,7 @@ struct VirtMachineState {
bool secure;
bool highmem;
bool highmem_compact;
+ bool highmem_cxl;
bool highmem_ecam;
bool highmem_mmio;
bool highmem_redists;
@@ -174,6 +177,7 @@ struct VirtMachineState {
char *oem_id;
char *oem_table_id;
bool ns_el2_virt_timer_irq;
+ CXLState cxl_devices_state;
};
#define VIRT_ECAM_ID(high) (high ? VIRT_HIGH_PCIE_ECAM : VIRT_PCIE_ECAM)
diff --git a/hw/arm/virt-acpi-build.c b/hw/arm/virt-acpi-build.c
index 7e8e0f0298..589e221b89 100644
--- a/hw/arm/virt-acpi-build.c
+++ b/hw/arm/virt-acpi-build.c
@@ -39,10 +39,12 @@
#include "hw/acpi/aml-build.h"
#include "hw/acpi/utils.h"
#include "hw/acpi/pci.h"
+#include "hw/acpi/cxl.h"
#include "hw/acpi/memory_hotplug.h"
#include "hw/acpi/generic_event_device.h"
#include "hw/acpi/tpm.h"
#include "hw/acpi/hmat.h"
+#include "hw/cxl/cxl.h"
#include "hw/pci/pcie_host.h"
#include "hw/pci/pci.h"
#include "hw/pci/pci_bus.h"
@@ -119,10 +121,29 @@ static void acpi_dsdt_add_flash(Aml *scope, const MemMapEntry *flash_memmap)
aml_append(scope, dev);
}
+static void build_acpi0017(Aml *table)
+{
+ Aml *dev, *scope, *method;
+
+ scope = aml_scope("_SB");
+ dev = aml_device("CXLM");
+ aml_append(dev, aml_name_decl("_HID", aml_string("ACPI0017")));
+
+ method = aml_method("_STA", 0, AML_NOTSERIALIZED);
+ aml_append(method, aml_return(aml_int(0x0B)));
+ aml_append(dev, method);
+ build_cxl_dsm_method(dev);
+
+ aml_append(scope, dev);
+ aml_append(table, scope);
+}
+
static void acpi_dsdt_add_pci(Aml *scope, const MemMapEntry *memmap,
uint32_t irq, VirtMachineState *vms)
{
int ecam_id = VIRT_ECAM_ID(vms->highmem_ecam);
+ bool cxl_present = false;
+ PCIBus *bus = vms->bus;
struct GPEXConfig cfg = {
.mmio32 = memmap[VIRT_PCIE_MMIO],
.pio = memmap[VIRT_PCIE_PIO],
@@ -136,6 +157,14 @@ static void acpi_dsdt_add_pci(Aml *scope, const MemMapEntry *memmap,
}
acpi_dsdt_add_gpex(scope, &cfg);
+ QLIST_FOREACH(bus, &vms->bus->child, sibling) {
+ if (pci_bus_is_cxl(bus)) {
+ cxl_present = true;
+ }
+ }
+ if (cxl_present) {
+ build_acpi0017(scope);
+ }
}
static void acpi_dsdt_add_gpio(Aml *scope, const MemMapEntry *gpio_memmap,
@@ -963,6 +992,11 @@ void virt_acpi_build(VirtMachineState *vms, AcpiBuildTables *tables)
}
}
+ if (vms->cxl_devices_state.is_enabled) {
+ cxl_build_cedt(table_offsets, tables_blob, tables->linker,
+ vms->oem_id, vms->oem_table_id, &vms->cxl_devices_state);
+ }
+
if (ms->nvdimms_state->is_enabled) {
nvdimm_build_acpi(table_offsets, tables_blob, tables->linker,
ms->nvdimms_state, ms->ram_slots, vms->oem_id,
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
index 9a6cd085a3..e06d293edc 100644
--- a/hw/arm/virt.c
+++ b/hw/arm/virt.c
@@ -57,6 +57,7 @@
#include "qemu/error-report.h"
#include "qemu/module.h"
#include "hw/pci-host/gpex.h"
+#include "hw/pci-bridge/pci_expander_bridge.h"
#include "hw/virtio/virtio-pci.h"
#include "hw/core/sysbus-fdt.h"
#include "hw/platform-bus.h"
@@ -86,6 +87,8 @@
#include "hw/virtio/virtio-md-pci.h"
#include "hw/virtio/virtio-iommu.h"
#include "hw/char/pl011.h"
+#include "hw/cxl/cxl.h"
+#include "hw/cxl/cxl_host.h"
#include "qemu/guest-random.h"
static GlobalProperty arm_virt_compat[] = {
@@ -220,6 +223,7 @@ static const MemMapEntry base_memmap[] = {
static MemMapEntry extended_memmap[] = {
/* Additional 64 MB redist region (can contain up to 512 redistributors) */
[VIRT_HIGH_GIC_REDIST2] = { 0x0, 64 * MiB },
+ [VIRT_CXL_HOST] = { 0x0, 64 * KiB * 16 }, /* 16 UID */
[VIRT_HIGH_PCIE_ECAM] = { 0x0, 256 * MiB },
/* Second PCIe window */
[VIRT_HIGH_PCIE_MMIO] = { 0x0, DEFAULT_HIGH_PCIE_MMIO_SIZE },
@@ -1621,6 +1625,17 @@ static void create_pcie(VirtMachineState *vms)
}
}
+static void create_cxl_host_reg_region(VirtMachineState *vms)
+{
+ MemoryRegion *sysmem = get_system_memory();
+ MemoryRegion *mr = &vms->cxl_devices_state.host_mr;
+
+ memory_region_init(mr, OBJECT(vms), "cxl_host_reg",
+ vms->memmap[VIRT_CXL_HOST].size);
+ memory_region_add_subregion(sysmem, vms->memmap[VIRT_CXL_HOST].base, mr);
+ vms->highmem_cxl = true;
+}
+
static void create_platform_bus(VirtMachineState *vms)
{
DeviceState *dev;
@@ -1737,6 +1752,12 @@ void virt_machine_done(Notifier *notifier, void *data)
struct arm_boot_info *info = &vms->bootinfo;
AddressSpace *as = arm_boot_address_space(cpu, info);
+ cxl_hook_up_pxb_registers(vms->bus, &vms->cxl_devices_state,
+ &error_fatal);
+
+ if (vms->cxl_devices_state.is_enabled) {
+ cxl_fmws_link_targets(&error_fatal);
+ }
/*
* If the user provided a dtb, we assume the dynamic sysbus nodes
* already are integrated there. This corresponds to a use case where
@@ -1783,6 +1804,7 @@ static inline bool *virt_get_high_memmap_enabled(VirtMachineState *vms,
{
bool *enabled_array[] = {
&vms->highmem_redists,
+ &vms->highmem_cxl,
&vms->highmem_ecam,
&vms->highmem_mmio,
};
@@ -1890,6 +1912,9 @@ static void virt_set_memmap(VirtMachineState *vms, int pa_bits)
if (device_memory_size > 0) {
machine_memory_devices_init(ms, device_memory_base, device_memory_size);
}
+
+ cxl_fmws_set_memmap(ROUND_UP(vms->highest_gpa + 1, 256 * MiB),
+ BIT_ULL(pa_bits));
}
static VirtGICType finalize_gic_version_do(const char *accel_name,
@@ -2340,6 +2365,8 @@ static void machvirt_init(MachineState *machine)
memory_region_add_subregion(sysmem, vms->memmap[VIRT_MEM].base,
machine->ram);
+ cxl_fmws_update_mmio();
+
virt_flash_fdt(vms, sysmem, secure_sysmem ?: sysmem);
create_gic(vms, sysmem);
@@ -2395,6 +2422,7 @@ static void machvirt_init(MachineState *machine)
create_rtc(vms);
create_pcie(vms);
+ create_cxl_host_reg_region(vms);
if (has_ged && aarch64 && firmware_loaded && virt_is_acpi_enabled(vms)) {
vms->acpi_dev = create_acpi_ged(vms);
@@ -3365,6 +3393,7 @@ static void virt_instance_init(Object *obj)
vms->oem_id = g_strndup(ACPI_BUILD_APPNAME6, 6);
vms->oem_table_id = g_strndup(ACPI_BUILD_APPNAME8, 8);
+ cxl_machine_init(obj, &vms->cxl_devices_state);
}
static const TypeInfo virt_machine_info = {
--
2.43.0
next prev parent reply other threads:[~2025-05-13 11:17 UTC|newest]
Thread overview: 24+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-05-13 11:14 [PATCH v13 0/5] arm/virt: CXL support via pxb_cxl Jonathan Cameron via
2025-05-13 11:14 ` [PATCH v13 1/5] hw/cxl-host: Add an index field to CXLFixedMemoryWindow Jonathan Cameron via
2025-05-16 5:44 ` Zhijian Li (Fujitsu) via
2025-05-21 16:59 ` Fan Ni
2025-08-08 8:29 ` wangyuquan
2025-08-08 8:44 ` Yuquan Wang
2025-05-13 11:14 ` [PATCH v13 2/5] hw/cxl: Make the CXL fixed memory windows devices Jonathan Cameron via
2025-05-16 5:44 ` Zhijian Li (Fujitsu) via
2025-05-21 17:54 ` Fan Ni
2025-05-27 16:04 ` Jonathan Cameron via
2025-05-13 11:14 ` [PATCH v13 3/5] hw/cxl-host: Allow split of establishing memory address and mmio setup Jonathan Cameron via
2025-05-16 5:50 ` Zhijian Li (Fujitsu) via
2025-05-27 16:28 ` Jonathan Cameron via
2025-05-13 11:14 ` Jonathan Cameron via [this message]
2025-05-13 11:14 ` [PATCH v13 5/5] qtest/cxl: Add aarch64 virt test for CXL Jonathan Cameron via
2025-05-15 9:04 ` Itaru Kitayama
2025-05-19 12:54 ` Jonathan Cameron via
2025-05-20 20:38 ` Itaru Kitayama
2025-05-21 7:38 ` Itaru Kitayama
2025-05-21 17:52 ` Jonathan Cameron via
2025-05-21 22:07 ` Itaru Kitayama
2025-05-16 2:30 ` [PATCH v13 0/5] arm/virt: CXL support via pxb_cxl Itaru Kitayama
2025-05-16 6:34 ` Itaru Kitayama
2025-05-20 17:31 ` Jonathan Cameron via
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250513111455.128266-5-Jonathan.Cameron@huawei.com \
--to=qemu-devel@nongnu.org \
--cc=Jonathan.Cameron@huawei.com \
--cc=fan.ni@samsung.com \
--cc=itaru.kitayama@linux.dev \
--cc=linux-cxl@vger.kernel.org \
--cc=linuxarm@huawei.com \
--cc=mst@redhat.com \
--cc=peter.maydell@linaro.org \
--cc=philmd@linaro.org \
--cc=qemu-arm@nongnu.org \
--cc=wangyuquan1236@phytium.com.cn \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).