Linux PCI subsystem development
 help / color / mirror / Atom feed
From: Szymon Durawa <szymon.durawa@linux.intel.com>
To: helgaas@kernel.org
Cc: Szymon Durawa <szymon.durawa@linux.intel.com>,
	Bjorn Helgaas <bhelgaas@google.com>,
	Dan Williams <dan.j.williams@intel.com>,
	Lukas Wunner <lukas@wunner.de>,
	linux-pci@vger.kernel.org,
	Nirmal Patel <nirmal.patel@linux.intel.com>,
	Mariusz Tkaczyk <mariusz.tkaczyk@linux.intel.com>
Subject: [PATCH v3 7/8] PCI: vmd: Add support for second rootbus under VMD
Date: Fri, 22 Nov 2024 09:52:14 +0100	[thread overview]
Message-ID: <20241122085215.424736-8-szymon.durawa@linux.intel.com> (raw)
In-Reply-To: <20241122085215.424736-1-szymon.durawa@linux.intel.com>

Starting from Intel Arrow Lake VMD enhancement introduces second rootbus
support with fixed root bus number (0x80). It means that all 3 MMIO BARs
exposed by VMD are shared now between both buses (current BUS0 and
new BUS1).

Add new BUS1 enumeration and divide MMIO space to be shared between
both rootbuses. Due to enumeration issues with rootbus hardwired to a
fixed non-zero value, this patch will work with a workaround proposed
in next patch. Without workaround user won't see attached devices for BUS1
rootbus.

Suggested-by: Nirmal Patel <nirmal.patel@linux.intel.com>
Reviewed-by: Mariusz Tkaczyk <mariusz.tkaczyk@linux.intel.com>
Signed-off-by: Szymon Durawa <szymon.durawa@linux.intel.com>
---
 drivers/pci/controller/vmd.c | 208 ++++++++++++++++++++++++++++++-----
 1 file changed, 180 insertions(+), 28 deletions(-)

diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index 6d8397b5ebee..6cd14c28fd4e 100755
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -26,6 +26,7 @@
 #define VMD_RESTRICT_0_BUS_START 0
 #define VMD_RESTRICT_1_BUS_START 128
 #define VMD_RESTRICT_2_BUS_START 224
+#define VMD_RESTRICT_3_BUS_START 225
 
 #define PCI_REG_VMCAP		0x40
 #define BUS_RESTRICT_CAP(vmcap)	(vmcap & 0x1)
@@ -38,15 +39,33 @@
 #define MB2_SHADOW_OFFSET	0x2000
 #define MB2_SHADOW_SIZE		16
 
+#define VMD_PRIMARY_BUS0 0x00
+#define VMD_PRIMARY_BUS1 0x80
+#define VMD_BUSRANGE0 0xc8
+#define VMD_BUSRANGE1 0xcc
+#define VMD_MEMBAR1_OFFSET 0xd0
+#define VMD_MEMBAR2_OFFSET1 0xd8
+#define VMD_MEMBAR2_OFFSET2 0xdc
+#define VMD_BUS_END(busr) ((busr >> 8) & 0xff)
+#define VMD_BUS_START(busr) (busr & 0x00ff)
+
+/*
+ * Add VMD resources for BUS1, it will share the same MMIO space with
+ * previous VMD resources.
+ */
 enum vmd_resource {
 	VMD_RES_CFGBAR = 0, /* VMD Bus0 Config BAR */
 	VMD_RES_MBAR_1, /* VMD Bus0 Resource MemBAR 1 */
 	VMD_RES_MBAR_2, /* VMD Bus0 Resource MemBAR 2 */
+	VMD_RES_BUS1_CFGBAR, /* VMD Bus1 Config BAR */
+	VMD_RES_BUS1_MBAR_1, /* VMD Bus1 Resource MemBAR 1 */
+	VMD_RES_BUS1_MBAR_2, /* VMD Bus1 Resource MemBAR 2 */
 	VMD_RES_COUNT
 };
 
 enum vmd_rootbus {
 	VMD_BUS_0 = 0,
+	VMD_BUS_1,
 	VMD_BUS_COUNT
 };
 
@@ -90,6 +109,12 @@ enum vmd_features {
 	 * proper power management of the SoC.
 	 */
 	VMD_FEAT_BIOS_PM_QUIRK		= (1 << 5),
+
+	/*
+	 * Starting from Intel Arrow Lake, VMD devices have their VMD rootports
+	 * connected to additional BUS1 rootport.
+	 */
+	VMD_FEAT_HAS_BUS1_ROOTBUS	= (1 << 6)
 };
 
 #define VMD_BIOS_PM_QUIRK_LTR	0x1003	/* 3145728 ns */
@@ -97,7 +122,8 @@ enum vmd_features {
 #define VMD_FEATS_CLIENT	(VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |	\
 				 VMD_FEAT_HAS_BUS_RESTRICTIONS |	\
 				 VMD_FEAT_OFFSET_FIRST_VECTOR |		\
-				 VMD_FEAT_BIOS_PM_QUIRK)
+				 VMD_FEAT_BIOS_PM_QUIRK |		\
+				 VMD_FEAT_HAS_BUS1_ROOTBUS)
 
 static DEFINE_IDA(vmd_instance_ida);
 
@@ -155,6 +181,7 @@ struct vmd_dev {
 	u8			first_vec;
 	char			*name;
 	int			instance;
+	bool			bus1_rootbus;
 };
 
 static inline struct vmd_dev *vmd_from_bus(struct pci_bus *bus)
@@ -532,11 +559,16 @@ static inline void vmd_acpi_end(void) { }
 
 static void vmd_domain_reset(struct vmd_dev *vmd)
 {
-	u16 bus, max_buses = resource_size(&vmd->resources[VMD_RES_CFGBAR]);
+	u16 bus, bus_cnt = resource_size(&vmd->resources[VMD_RES_CFGBAR]);
 	u8 dev, functions, fn, hdr_type;
 	char __iomem *base;
 
-	for (bus = 0; bus < max_buses; bus++) {
+	if (vmd->bus1_rootbus) {
+		bus_cnt += resource_size(&vmd->resources[VMD_RES_BUS1_CFGBAR]);
+		bus_cnt += 2;
+	}
+
+	for (bus = 0; bus < bus_cnt; bus++) {
 		for (dev = 0; dev < 32; dev++) {
 			base = vmd->cfgbar + PCIE_ECAM_OFFSET(bus,
 						PCI_DEVFN(dev, 0), 0);
@@ -582,12 +614,24 @@ static void vmd_attach_resources(struct vmd_dev *vmd)
 {
 	vmd->dev->resource[VMD_MEMBAR1].child = &vmd->resources[VMD_RES_MBAR_1];
 	vmd->dev->resource[VMD_MEMBAR2].child = &vmd->resources[VMD_RES_MBAR_2];
+
+	if (vmd->bus1_rootbus) {
+		vmd->resources[VMD_RES_MBAR_1].sibling =
+			&vmd->resources[VMD_RES_BUS1_MBAR_1];
+		vmd->resources[VMD_RES_MBAR_2].sibling =
+			&vmd->resources[VMD_RES_BUS1_MBAR_2];
+	}
 }
 
 static void vmd_detach_resources(struct vmd_dev *vmd)
 {
 	vmd->dev->resource[VMD_MEMBAR1].child = NULL;
 	vmd->dev->resource[VMD_MEMBAR2].child = NULL;
+
+	if (vmd->bus1_rootbus) {
+		vmd->resources[VMD_RES_MBAR_1].sibling = NULL;
+		vmd->resources[VMD_RES_MBAR_2].sibling = NULL;
+	}
 }
 
 /*
@@ -660,7 +704,7 @@ static int vmd_get_phys_offsets(struct vmd_dev *vmd, bool native_hint,
 	return 0;
 }
 
-static int vmd_get_bus_number_start(struct vmd_dev *vmd)
+static int vmd_get_bus_number_start(struct vmd_dev *vmd, unsigned long features)
 {
 	struct pci_dev *dev = vmd->dev;
 	u16 reg;
@@ -679,6 +723,19 @@ static int vmd_get_bus_number_start(struct vmd_dev *vmd)
 		case 2:
 			vmd->busn_start[VMD_BUS_0] = VMD_RESTRICT_2_BUS_START;
 			break;
+		case 3:
+			if (!(features & VMD_FEAT_HAS_BUS1_ROOTBUS)) {
+				pci_err(dev, "VMD Bus Restriction detected type %d, but BUS1 Rootbus is not supported, aborting.\n",
+					BUS_RESTRICT_CFG(reg));
+				return -ENODEV;
+			}
+
+			/* BUS0 start number */
+			vmd->busn_start[VMD_BUS_0] = VMD_RESTRICT_2_BUS_START;
+			/* BUS1 start number */
+			vmd->busn_start[VMD_BUS_1] = VMD_RESTRICT_3_BUS_START;
+			vmd->bus1_rootbus = true;
+			break;
 		default:
 			pci_err(dev, "Unknown Bus Offset Setting (%d)\n",
 				BUS_RESTRICT_CFG(reg));
@@ -805,6 +862,30 @@ static void vmd_configure_cfgbar(struct vmd_dev *vmd)
 		       (resource_size(res) >> 20) - 1,
 		.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED,
 	};
+
+	if (vmd->bus1_rootbus) {
+		u16 bus0_range = 0;
+		u16 bus1_range = 0;
+
+		pci_read_config_word(vmd->dev, VMD_BUSRANGE0, &bus0_range);
+		pci_read_config_word(vmd->dev, VMD_BUSRANGE1, &bus1_range);
+
+		/*
+		 * Resize BUS0 CFGBAR range to make space for BUS1
+		 * owned devices by adjusting range end with value stored in
+		 * VMD_BUSRANGE0 register.
+		 */
+		vmd->resources[VMD_RES_CFGBAR].start = VMD_BUS_START(bus0_range);
+		vmd->resources[VMD_RES_CFGBAR].end = VMD_BUS_END(bus0_range);
+
+		vmd->resources[VMD_RES_BUS1_CFGBAR] = (struct resource){
+			.name = "VMD CFGBAR BUS1",
+			.start = VMD_BUS_START(bus1_range),
+			.end = VMD_BUS_END(bus1_range),
+			.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED,
+			.parent = res,
+		};
+	}
 }
 
 /*
@@ -834,8 +915,9 @@ static void vmd_configure_membar(struct vmd_dev *vmd,
 		flags &= ~IORESOURCE_MEM_64;
 
 	vmd->resources[resource_number] = (struct resource){
-		.name = kasprintf(GFP_KERNEL, "VMD MEMBAR%d",
-				  membar_number / 2),
+		.name = kasprintf(
+			GFP_KERNEL, "VMD MEMBAR%d %s", membar_number / 2,
+			resource_number > VMD_RES_MBAR_2 ? "BUS1" : ""),
 		.start = res->start + start_offset,
 		.end = res->end - end_offset,
 		.flags = flags,
@@ -846,41 +928,80 @@ static void vmd_configure_membar(struct vmd_dev *vmd,
 static void vmd_configure_membar1_membar2(struct vmd_dev *vmd,
 					  resource_size_t mbar2_ofs)
 {
-	vmd_configure_membar(vmd, 1, VMD_MEMBAR1, 0, 0);
-	vmd_configure_membar(vmd, 2, VMD_MEMBAR2, mbar2_ofs, 0);
+	if (vmd->bus1_rootbus) {
+		u32 bus1_mbar1_ofs = 0;
+		u64 bus1_mbar2_ofs = 0;
+		u32 reg;
+
+		pci_read_config_dword(vmd->dev, VMD_MEMBAR1_OFFSET,
+				      &bus1_mbar1_ofs);
+
+		pci_read_config_dword(vmd->dev, VMD_MEMBAR2_OFFSET1, &reg);
+		bus1_mbar2_ofs = reg;
+
+		pci_read_config_dword(vmd->dev, VMD_MEMBAR2_OFFSET2, &reg);
+		bus1_mbar2_ofs |= (u64)reg << 32;
+
+		/*
+		 * Resize BUS MEMBAR1 and MEMBAR2 ranges to make space
+		 * for BUS1 owned devices by adjusting range end with values
+		 * stored in VMD_MEMBAR1_OFFSET and VMD_MEMBAR2_OFFSET registers
+		 */
+		vmd_configure_membar(vmd, VMD_RES_MBAR_1, VMD_MEMBAR1, 0,
+				     bus1_mbar1_ofs);
+		vmd_configure_membar(vmd, VMD_RES_MBAR_2, VMD_MEMBAR2,
+				     mbar2_ofs, bus1_mbar2_ofs - mbar2_ofs);
+
+		vmd_configure_membar(vmd, VMD_RES_BUS1_MBAR_1, VMD_MEMBAR1,
+				     bus1_mbar1_ofs, 0);
+		vmd_configure_membar(vmd, VMD_RES_BUS1_MBAR_2, VMD_MEMBAR2,
+				     mbar2_ofs + bus1_mbar2_ofs, 0);
+	} else {
+		vmd_configure_membar(vmd, VMD_RES_MBAR_1, VMD_MEMBAR1, 0, 0);
+		vmd_configure_membar(vmd, VMD_RES_MBAR_2, VMD_MEMBAR2,
+				     mbar2_ofs, 0);
+	}
 }
 
-static int vmd_create_bus(struct vmd_dev *vmd, struct pci_sysdata *sd,
-			  resource_size_t *offset)
+static int vmd_create_bus(struct vmd_dev *vmd, enum vmd_rootbus bus_number,
+			  struct pci_sysdata *sd, resource_size_t *offset)
 {
+	u8 cfgbar = bus_number * 3;
+	u8 membar1 = cfgbar + 1;
+	u8 membar2 = cfgbar + 2;
+	struct pci_bus *vmd_bus;
 	LIST_HEAD(resources);
 
-	pci_add_resource(&resources, &vmd->resources[VMD_RES_CFGBAR]);
-	pci_add_resource_offset(&resources, &vmd->resources[VMD_RES_MBAR_1],
+	pci_add_resource(&resources, &vmd->resources[cfgbar]);
+	pci_add_resource_offset(&resources, &vmd->resources[membar1],
 				offset[0]);
-	pci_add_resource_offset(&resources, &vmd->resources[VMD_RES_MBAR_2],
+	pci_add_resource_offset(&resources, &vmd->resources[membar2],
 				offset[1]);
 
-	vmd->bus[VMD_BUS_0] = pci_create_root_bus(&vmd->dev->dev,
-						  vmd->busn_start[VMD_BUS_0],
-						  &vmd_ops, sd, &resources);
-	if (!vmd->bus[VMD_BUS_0]) {
+	vmd_bus = pci_create_root_bus(&vmd->dev->dev,
+				      vmd->busn_start[bus_number], &vmd_ops, sd,
+				      &resources);
+
+	if (!vmd_bus) {
 		pci_free_resource_list(&resources);
-		vmd_remove_irq_domain(vmd);
+
+		if (bus_number == VMD_PRIMARY_BUS0)
+			vmd_remove_irq_domain(vmd);
 		return -ENODEV;
 	}
 
-	vmd_copy_host_bridge_flags(
-		pci_find_host_bridge(vmd->dev->bus),
-		to_pci_host_bridge(vmd->bus[VMD_BUS_0]->bridge));
+	vmd_copy_host_bridge_flags(pci_find_host_bridge(vmd->dev->bus),
+				   to_pci_host_bridge(vmd_bus->bridge));
 
 	vmd_attach_resources(vmd);
 	if (vmd->irq_domain)
-		dev_set_msi_domain(&vmd->bus[VMD_BUS_0]->dev, vmd->irq_domain);
+		dev_set_msi_domain(&vmd_bus->dev, vmd->irq_domain);
 	else
-		dev_set_msi_domain(&vmd->bus[VMD_BUS_0]->dev,
+		dev_set_msi_domain(&vmd_bus->dev,
 				   dev_get_msi_domain(&vmd->dev->dev));
 
+	vmd->bus[bus_number] = vmd_bus;
+
 	return 0;
 }
 
@@ -893,7 +1014,9 @@ static void vmd_bus_enumeration(struct pci_bus *bus, unsigned long features)
 	vmd_acpi_begin();
 
 	pci_scan_child_bus(bus);
-	vmd_domain_reset(vmd_from_bus(bus));
+
+	if (bus->primary == VMD_PRIMARY_BUS0)
+		vmd_domain_reset(vmd_from_bus(bus));
 
 	/*
 	 * When Intel VMD is enabled, the OS does not discover the Root Ports
@@ -961,7 +1084,7 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
 	 * limits the bus range to between 0-127, 128-255, or 224-255
 	 */
 	if (features & VMD_FEAT_HAS_BUS_RESTRICTIONS) {
-		ret = vmd_get_bus_number_start(vmd);
+		ret = vmd_get_bus_number_start(vmd, features);
 		if (ret)
 			return ret;
 	}
@@ -1021,7 +1144,7 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
 		vmd_set_msi_remapping(vmd, false);
 	}
 
-	ret = vmd_create_bus(vmd, sd, offset);
+	ret = vmd_create_bus(vmd, VMD_BUS_0, sd, offset);
 
 	if (ret) {
 		pci_err(vmd->dev, "Can't create bus: %d\n", ret);
@@ -1034,6 +1157,27 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
 
 	vmd_bus_enumeration(vmd->bus[VMD_BUS_0], features);
 
+	if (vmd->bus1_rootbus) {
+		ret = vmd_create_bus(vmd, VMD_BUS_1, sd, offset);
+		if (ret) {
+			pci_err(vmd->dev, "Can't create BUS1: %d\n", ret);
+			return ret;
+		}
+
+		/*
+		 * Primary bus is not set by pci_create_root_bus(), it is
+		 * updated here
+		 */
+		vmd->bus[VMD_BUS_1]->primary = VMD_PRIMARY_BUS1;
+
+		WARN(sysfs_create_link(&vmd->dev->dev.kobj,
+				       &vmd->bus[VMD_BUS_1]->dev.kobj,
+				       "domain1"),
+		     "Can't create symlink to domain1\n");
+
+		vmd_bus_enumeration(vmd->bus[VMD_BUS_1], features);
+	}
+
 	return 0;
 }
 
@@ -1113,10 +1257,18 @@ static void vmd_remove(struct pci_dev *dev)
 	sysfs_remove_link(&vmd->dev->dev.kobj, "domain");
 	pci_remove_root_bus(vmd->bus[VMD_BUS_0]);
 
-	/* CFGBAR is static, does not require releasing memory */
+	/* CFGBARs are static, do not require releasing memory */
 	kfree(vmd->resources[VMD_RES_MBAR_1].name);
 	kfree(vmd->resources[VMD_RES_MBAR_2].name);
 
+	if (vmd->bus1_rootbus) {
+		pci_stop_root_bus(vmd->bus[VMD_BUS_1]);
+		sysfs_remove_link(&vmd->dev->dev.kobj, "domain1");
+		pci_remove_root_bus(vmd->bus[VMD_BUS_1]);
+		kfree(vmd->resources[VMD_RES_BUS1_MBAR_1].name);
+		kfree(vmd->resources[VMD_RES_BUS1_MBAR_2].name);
+	}
+
 	vmd_cleanup_srcu(vmd);
 	vmd_detach_resources(vmd);
 	vmd_remove_irq_domain(vmd);
@@ -1202,4 +1354,4 @@ module_pci_driver(vmd_drv);
 MODULE_AUTHOR("Intel Corporation");
 MODULE_DESCRIPTION("Volume Management Device driver");
 MODULE_LICENSE("GPL v2");
-MODULE_VERSION("0.6");
+MODULE_VERSION("0.7");
-- 
2.39.3


  parent reply	other threads:[~2024-11-22  7:51 UTC|newest]

Thread overview: 14+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-11-22  8:52 [PATCH v3 0/8] VMD add second rootbus support Szymon Durawa
2024-11-22  8:52 ` [PATCH v3 1/8] PCI: vmd: Add vmd_bus_enumeration() Szymon Durawa
2024-11-22  8:52 ` [PATCH v3 2/8] PCI: vmd: Add vmd_configure_cfgbar() Szymon Durawa
2024-11-22  8:52 ` [PATCH v3 3/8] PCI: vmd: Add vmd_configure_membar() and vmd_configure_membar1_membar2() Szymon Durawa
2024-11-22  8:52 ` [PATCH v3 4/8] PCI: vmd: Add vmd_create_bus() Szymon Durawa
2024-11-22  8:52 ` [PATCH v3 5/8] PCI: vmd: Replace hardcoded values with enum and defines Szymon Durawa
2025-05-05 17:33   ` Bjorn Helgaas
2024-11-22  8:52 ` [PATCH v3 6/8] PCI: vmd: Convert bus and busn_start to an array Szymon Durawa
2024-11-22  8:52 ` Szymon Durawa [this message]
2025-05-03 13:20   ` [PATCH v3 7/8] PCI: vmd: Add support for second rootbus under VMD Manivannan Sadhasivam
2025-05-05 17:31   ` Bjorn Helgaas
2024-11-22  8:52 ` [PATCH v3 8/8] PCI: vmd: Add workaround for bus number hardwired to fixed non-zero value Szymon Durawa
2025-05-05 17:31   ` Bjorn Helgaas
2025-01-27 20:59 ` [PATCH v3 0/8] VMD add second rootbus support Durawa, Szymon

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20241122085215.424736-8-szymon.durawa@linux.intel.com \
    --to=szymon.durawa@linux.intel.com \
    --cc=bhelgaas@google.com \
    --cc=dan.j.williams@intel.com \
    --cc=helgaas@kernel.org \
    --cc=linux-pci@vger.kernel.org \
    --cc=lukas@wunner.de \
    --cc=mariusz.tkaczyk@linux.intel.com \
    --cc=nirmal.patel@linux.intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox