* [PATCH v6 0/3] Add support for Xilinx XDMA Soft IP as Root Port.
@ 2023-08-18 9:35 Thippeswamy Havalige
2023-08-18 9:35 ` [PATCH v6 1/3] PCI: xilinx-cpm: Move interrupt bit definitions to common header Thippeswamy Havalige
` (2 more replies)
0 siblings, 3 replies; 10+ messages in thread
From: Thippeswamy Havalige @ 2023-08-18 9:35 UTC (permalink / raw)
To: linux-arm-kernel, linux-kernel, linux-pci, devicetree
Cc: bhelgaas, lpieralisi, robh, kw, michal.simek,
krzysztof.kozlowski+dt, bharat.kumar.gogada, Thippeswamy Havalige
This series of patch add support for Xilinx XDMA Soft IP as Root Port.
The Xilinx XDMA Soft IP support's 32 bit and 64bit BAR's.
As Root Port it supports MSI and legacy interrupts.
For code reusability existing CPM4 error interrupt bits are moved to
common header.
Signed-off-by: Thippeswamy Havalige <thippeswamy.havalige@amd.com>
Signed-off-by: Bharat Kumar Gogada <bharat.kumar.gogada@amd.com>
---
Thippeswamy Havalige (3):
PCI: xilinx-cpm: Move interrupt bit definitions to common header
dt-bindings: PCI: xilinx-xdma: Add YAML schemas for Xilinx XDMA PCIe
Root Port Bridge
PCI: xilinx-xdma: Add Xilinx XDMA Root Port driver
.../devicetree/bindings/pci/xlnx,xdma-host.yaml | 114 +++
drivers/pci/controller/Kconfig | 11 +
drivers/pci/controller/Makefile | 1 +
drivers/pci/controller/pcie-xilinx-common.h | 31 +
drivers/pci/controller/pcie-xilinx-cpm.c | 38 +-
drivers/pci/controller/pcie-xilinx-dma-pl.c | 802 +++++++++++++++++++++
6 files changed, 966 insertions(+), 31 deletions(-)
create mode 100644 Documentation/devicetree/bindings/pci/xlnx,xdma-host.yaml
create mode 100644 drivers/pci/controller/pcie-xilinx-common.h
create mode 100644 drivers/pci/controller/pcie-xilinx-dma-pl.c
--
1.8.3.1
^ permalink raw reply [flat|nested] 10+ messages in thread
* [PATCH v6 1/3] PCI: xilinx-cpm: Move interrupt bit definitions to common header
2023-08-18 9:35 [PATCH v6 0/3] Add support for Xilinx XDMA Soft IP as Root Port Thippeswamy Havalige
@ 2023-08-18 9:35 ` Thippeswamy Havalige
2023-08-18 9:35 ` [PATCH v6 2/3] dt-bindings: PCI: xilinx-xdma: Add YAML schemas for Xilinx XDMA PCIe Root Port Bridge Thippeswamy Havalige
2023-08-18 9:35 ` [PATCH v6 3/3] PCI: xilinx-xdma: Add Xilinx XDMA Root Port driver Thippeswamy Havalige
2 siblings, 0 replies; 10+ messages in thread
From: Thippeswamy Havalige @ 2023-08-18 9:35 UTC (permalink / raw)
To: linux-arm-kernel, linux-kernel, linux-pci, devicetree
Cc: bhelgaas, lpieralisi, robh, kw, michal.simek,
krzysztof.kozlowski+dt, bharat.kumar.gogada, Thippeswamy Havalige
Rename Xilinx interrupt bit definitions so they are not CPM-specific. Move
the definitions to pcie-xilinx-common.h where they can be shared between
pcie-xilinx-cpm and the new xilinx-xdma driver.
Signed-off-by: Thippeswamy Havalige <thippeswamy.havalige@amd.com>
Signed-off-by: Bharat Kumar Gogada <bharat.kumar.gogada@amd.com>
---
changes in v6:
- subject line and commit message
changes in v5:
- None
changes in v4:
- None
changes in v3:
- changed licensing year to 2023
---
drivers/pci/controller/pcie-xilinx-common.h | 30 +++++++++++++++++++++++
drivers/pci/controller/pcie-xilinx-cpm.c | 38 ++++++-----------------------
2 files changed, 37 insertions(+), 31 deletions(-)
create mode 100644 drivers/pci/controller/pcie-xilinx-common.h
diff --git a/drivers/pci/controller/pcie-xilinx-common.h b/drivers/pci/controller/pcie-xilinx-common.h
new file mode 100644
index 0000000..e97d272
--- /dev/null
+++ b/drivers/pci/controller/pcie-xilinx-common.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * (C) Copyright 2023, Xilinx, Inc.
+ */
+
+#include <linux/pci.h>
+#include <linux/pci-ecam.h>
+#include <linux/platform_device.h>
+
+/* Interrupt registers definitions */
+#define XILINX_PCIE_INTR_LINK_DOWN 0
+#define XILINX_PCIE_INTR_HOT_RESET 3
+#define XILINX_PCIE_INTR_CFG_PCIE_TIMEOUT 4
+#define XILINX_PCIE_INTR_CFG_TIMEOUT 8
+#define XILINX_PCIE_INTR_CORRECTABLE 9
+#define XILINX_PCIE_INTR_NONFATAL 10
+#define XILINX_PCIE_INTR_FATAL 11
+#define XILINX_PCIE_INTR_CFG_ERR_POISON 12
+#define XILINX_PCIE_INTR_PME_TO_ACK_RCVD 15
+#define XILINX_PCIE_INTR_INTX 16
+#define XILINX_PCIE_INTR_PM_PME_RCVD 17
+#define XILINX_PCIE_INTR_SLV_UNSUPP 20
+#define XILINX_PCIE_INTR_SLV_UNEXP 21
+#define XILINX_PCIE_INTR_SLV_COMPL 22
+#define XILINX_PCIE_INTR_SLV_ERRP 23
+#define XILINX_PCIE_INTR_SLV_CMPABT 24
+#define XILINX_PCIE_INTR_SLV_ILLBUR 25
+#define XILINX_PCIE_INTR_MST_DECERR 26
+#define XILINX_PCIE_INTR_MST_SLVERR 27
+#define XILINX_PCIE_INTR_SLV_PCIE_TIMEOUT 28
diff --git a/drivers/pci/controller/pcie-xilinx-cpm.c b/drivers/pci/controller/pcie-xilinx-cpm.c
index 4a787a9..a0f5e1d 100644
--- a/drivers/pci/controller/pcie-xilinx-cpm.c
+++ b/drivers/pci/controller/pcie-xilinx-cpm.c
@@ -16,11 +16,9 @@
#include <linux/of_address.h>
#include <linux/of_pci.h>
#include <linux/of_platform.h>
-#include <linux/pci.h>
-#include <linux/platform_device.h>
-#include <linux/pci-ecam.h>
#include "../pci.h"
+#include "pcie-xilinx-common.h"
/* Register definitions */
#define XILINX_CPM_PCIE_REG_IDR 0x00000E10
@@ -38,29 +36,7 @@
#define XILINX_CPM_PCIE_IR_ENABLE 0x000002A8
#define XILINX_CPM_PCIE_IR_LOCAL BIT(0)
-/* Interrupt registers definitions */
-#define XILINX_CPM_PCIE_INTR_LINK_DOWN 0
-#define XILINX_CPM_PCIE_INTR_HOT_RESET 3
-#define XILINX_CPM_PCIE_INTR_CFG_PCIE_TIMEOUT 4
-#define XILINX_CPM_PCIE_INTR_CFG_TIMEOUT 8
-#define XILINX_CPM_PCIE_INTR_CORRECTABLE 9
-#define XILINX_CPM_PCIE_INTR_NONFATAL 10
-#define XILINX_CPM_PCIE_INTR_FATAL 11
-#define XILINX_CPM_PCIE_INTR_CFG_ERR_POISON 12
-#define XILINX_CPM_PCIE_INTR_PME_TO_ACK_RCVD 15
-#define XILINX_CPM_PCIE_INTR_INTX 16
-#define XILINX_CPM_PCIE_INTR_PM_PME_RCVD 17
-#define XILINX_CPM_PCIE_INTR_SLV_UNSUPP 20
-#define XILINX_CPM_PCIE_INTR_SLV_UNEXP 21
-#define XILINX_CPM_PCIE_INTR_SLV_COMPL 22
-#define XILINX_CPM_PCIE_INTR_SLV_ERRP 23
-#define XILINX_CPM_PCIE_INTR_SLV_CMPABT 24
-#define XILINX_CPM_PCIE_INTR_SLV_ILLBUR 25
-#define XILINX_CPM_PCIE_INTR_MST_DECERR 26
-#define XILINX_CPM_PCIE_INTR_MST_SLVERR 27
-#define XILINX_CPM_PCIE_INTR_SLV_PCIE_TIMEOUT 28
-
-#define IMR(x) BIT(XILINX_CPM_PCIE_INTR_ ##x)
+#define IMR(x) BIT(XILINX_PCIE_INTR_ ##x)
#define XILINX_CPM_PCIE_IMR_ALL_MASK \
( \
@@ -323,7 +299,7 @@ static void xilinx_cpm_pcie_event_flow(struct irq_desc *desc)
}
#define _IC(x, s) \
- [XILINX_CPM_PCIE_INTR_ ## x] = { __stringify(x), s }
+ [XILINX_PCIE_INTR_ ## x] = { __stringify(x), s }
static const struct {
const char *sym;
@@ -359,9 +335,9 @@ static irqreturn_t xilinx_cpm_pcie_intr_handler(int irq, void *dev_id)
d = irq_domain_get_irq_data(port->cpm_domain, irq);
switch (d->hwirq) {
- case XILINX_CPM_PCIE_INTR_CORRECTABLE:
- case XILINX_CPM_PCIE_INTR_NONFATAL:
- case XILINX_CPM_PCIE_INTR_FATAL:
+ case XILINX_PCIE_INTR_CORRECTABLE:
+ case XILINX_PCIE_INTR_NONFATAL:
+ case XILINX_PCIE_INTR_FATAL:
cpm_pcie_clear_err_interrupts(port);
fallthrough;
@@ -466,7 +442,7 @@ static int xilinx_cpm_setup_irq(struct xilinx_cpm_pcie *port)
}
port->intx_irq = irq_create_mapping(port->cpm_domain,
- XILINX_CPM_PCIE_INTR_INTX);
+ XILINX_PCIE_INTR_INTX);
if (!port->intx_irq) {
dev_err(dev, "Failed to map INTx interrupt\n");
return -ENXIO;
--
1.8.3.1
^ permalink raw reply related [flat|nested] 10+ messages in thread
* [PATCH v6 2/3] dt-bindings: PCI: xilinx-xdma: Add YAML schemas for Xilinx XDMA PCIe Root Port Bridge
2023-08-18 9:35 [PATCH v6 0/3] Add support for Xilinx XDMA Soft IP as Root Port Thippeswamy Havalige
2023-08-18 9:35 ` [PATCH v6 1/3] PCI: xilinx-cpm: Move interrupt bit definitions to common header Thippeswamy Havalige
@ 2023-08-18 9:35 ` Thippeswamy Havalige
2023-08-18 9:35 ` [PATCH v6 3/3] PCI: xilinx-xdma: Add Xilinx XDMA Root Port driver Thippeswamy Havalige
2 siblings, 0 replies; 10+ messages in thread
From: Thippeswamy Havalige @ 2023-08-18 9:35 UTC (permalink / raw)
To: linux-arm-kernel, linux-kernel, linux-pci, devicetree
Cc: bhelgaas, lpieralisi, robh, kw, michal.simek,
krzysztof.kozlowski+dt, bharat.kumar.gogada, Thippeswamy Havalige,
Krzysztof Kozlowski
Add YAML dtschemas of Xilinx XDMA Soft IP PCIe Root Port Bridge
dt binding.
Signed-off-by: Thippeswamy Havalige <thippeswamy.havalige@amd.com>
Signed-off-by: Bharat Kumar Gogada <bharat.kumar.gogada@amd.com>
Reviewed-by: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
Acked-by: Rob Herring <robh@kernel.org>
---
changes in v6:
- None
change in v5:
- Modified uppercase case hex value to lower case.
change in v4:
- Removed unnecessary space.
changes in v3:
- Fixed compatible string issue.
- Modified ranges property description to maxItems.
- Modified address-cell property of interrupt-controller child node.
changes in v2:
- None
---
.../devicetree/bindings/pci/xlnx,xdma-host.yaml | 114 +++++++++++++++++++++
1 file changed, 114 insertions(+)
create mode 100644 Documentation/devicetree/bindings/pci/xlnx,xdma-host.yaml
diff --git a/Documentation/devicetree/bindings/pci/xlnx,xdma-host.yaml b/Documentation/devicetree/bindings/pci/xlnx,xdma-host.yaml
new file mode 100644
index 0000000..0aa00b8
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/xlnx,xdma-host.yaml
@@ -0,0 +1,114 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pci/xlnx,xdma-host.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Xilinx XDMA PL PCIe Root Port Bridge
+
+maintainers:
+ - Thippeswamy Havalige <thippeswamy.havalige@amd.com>
+
+allOf:
+ - $ref: /schemas/pci/pci-bus.yaml#
+
+properties:
+ compatible:
+ const: xlnx,xdma-host-3.00
+
+ reg:
+ maxItems: 1
+
+ ranges:
+ maxItems: 2
+
+ interrupts:
+ items:
+ - description: interrupt asserted when miscellaneous interrupt is received.
+ - description: msi0 interrupt asserted when an MSI is received.
+ - description: msi1 interrupt asserted when an MSI is received.
+
+ interrupt-names:
+ items:
+ - const: misc
+ - const: msi0
+ - const: msi1
+
+ interrupt-map-mask:
+ items:
+ - const: 0
+ - const: 0
+ - const: 0
+ - const: 7
+
+ interrupt-map:
+ maxItems: 4
+
+ "#interrupt-cells":
+ const: 1
+
+ interrupt-controller:
+ description: identifies the node as an interrupt controller
+ type: object
+ properties:
+ interrupt-controller: true
+
+ "#address-cells":
+ const: 0
+
+ "#interrupt-cells":
+ const: 1
+
+ required:
+ - interrupt-controller
+ - "#address-cells"
+ - "#interrupt-cells"
+
+ additionalProperties: false
+
+required:
+ - compatible
+ - reg
+ - ranges
+ - interrupts
+ - interrupt-map
+ - interrupt-map-mask
+ - "#interrupt-cells"
+ - interrupt-controller
+
+unevaluatedProperties: false
+
+examples:
+
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ pcie@a0000000 {
+ compatible = "xlnx,xdma-host-3.00";
+ reg = <0x0 0xa0000000 0x0 0x10000000>;
+ ranges = <0x2000000 0x0 0xb0000000 0x0 0xb0000000 0x0 0x1000000>,
+ <0x43000000 0x5 0x0 0x5 0x0 0x0 0x1000000>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+ device_type = "pci";
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "misc", "msi0", "msi1";
+ interrupt-map-mask = <0x0 0x0 0x0 0x7>;
+ interrupt-map = <0 0 0 1 &pcie_intc_0 0>,
+ <0 0 0 2 &pcie_intc_0 1>,
+ <0 0 0 3 &pcie_intc_0 2>,
+ <0 0 0 4 &pcie_intc_0 3>;
+ pcie_intc_0: interrupt-controller {
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ };
+ };
+ };
--
1.8.3.1
^ permalink raw reply related [flat|nested] 10+ messages in thread
* [PATCH v6 3/3] PCI: xilinx-xdma: Add Xilinx XDMA Root Port driver
2023-08-18 9:35 [PATCH v6 0/3] Add support for Xilinx XDMA Soft IP as Root Port Thippeswamy Havalige
2023-08-18 9:35 ` [PATCH v6 1/3] PCI: xilinx-cpm: Move interrupt bit definitions to common header Thippeswamy Havalige
2023-08-18 9:35 ` [PATCH v6 2/3] dt-bindings: PCI: xilinx-xdma: Add YAML schemas for Xilinx XDMA PCIe Root Port Bridge Thippeswamy Havalige
@ 2023-08-18 9:35 ` Thippeswamy Havalige
2023-08-21 20:14 ` Bjorn Helgaas
2023-11-08 15:31 ` Bjorn Helgaas
2 siblings, 2 replies; 10+ messages in thread
From: Thippeswamy Havalige @ 2023-08-18 9:35 UTC (permalink / raw)
To: linux-arm-kernel, linux-kernel, linux-pci, devicetree
Cc: bhelgaas, lpieralisi, robh, kw, michal.simek,
krzysztof.kozlowski+dt, bharat.kumar.gogada, Thippeswamy Havalige
Add support for Xilinx XDMA Soft IP core as Root Port.
The Zynq UltraScale+ MPSoCs devices support XDMA soft IP module in
programmable logic.
The integrated XDMA soft IP block has integrated bridge function that
can act as PCIe Root Port.
Signed-off-by: Thippeswamy Havalige <thippeswamy.havalige@amd.com>
Signed-off-by: Bharat Kumar Gogada <bharat.kumar.gogada@amd.com>
---
changes in v6:
- Replaced chained irq's with regular interrupts.
- Modified interrupt names.
- Aligned to 80 columns
changes in v5:
- Added detailed comments for link_up check.
- Modified upper case hex values to lower case.
changes in v4:
- Fixed unsigned integer to integer.
- Fixed return type to EINVAL.
changes in v3:
- Changed license to 2023.
- Added bitfield header to avoid implicit warning.
- Fixed indentation issue.
- Fixed code-style.
changes in v2:
- Remove unnecessary inclusion of headerfiles.
- Added a subset of interrupt error bits to common header files.
- Added pci_is_root_bus function.
- Removed kerneldoc comments of private function.
- Modified of_get_next_child API to of_get_child_by_name.
- Modified of_address_to_resource API to platform_get_resource.
- Modified return value.
---
drivers/pci/controller/Kconfig | 11 +
drivers/pci/controller/Makefile | 1 +
drivers/pci/controller/pcie-xilinx-common.h | 1 +
drivers/pci/controller/pcie-xilinx-dma-pl.c | 802 ++++++++++++++++++++++++++++
4 files changed, 815 insertions(+)
create mode 100644 drivers/pci/controller/pcie-xilinx-dma-pl.c
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index 0859be8..b9e8fb7 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -324,6 +324,17 @@ config PCIE_XILINX
Say 'Y' here if you want kernel to support the Xilinx AXI PCIe
Host Bridge driver.
+config PCIE_XILINX_DMA_PL
+ bool "Xilinx DMA PL PCIe host bridge support"
+ depends on ARCH_ZYNQMP || COMPILE_TEST
+ depends on PCI_MSI
+ select PCI_HOST_COMMON
+ help
+ Add support for the Xilinx PL DMA PCIe host bridge,
+ The controller is soft IP which can act as Root Port.
+ If you know your system provides Xilinx PCIe host controller
+ bridge DMA as soft IP say Y; if you are not sure, say N.
+
config PCIE_XILINX_NWL
bool "Xilinx NWL PCIe controller"
depends on ARCH_ZYNQMP || COMPILE_TEST
diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile
index 37c8663..f2b19e6 100644
--- a/drivers/pci/controller/Makefile
+++ b/drivers/pci/controller/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_PCI_HOST_THUNDER_PEM) += pci-thunder-pem.o
obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o
obj-$(CONFIG_PCIE_XILINX_NWL) += pcie-xilinx-nwl.o
obj-$(CONFIG_PCIE_XILINX_CPM) += pcie-xilinx-cpm.o
+obj-$(CONFIG_PCIE_XILINX_DMA_PL) += pcie-xilinx-dma-pl.o
obj-$(CONFIG_PCI_V3_SEMI) += pci-v3-semi.o
obj-$(CONFIG_PCI_XGENE) += pci-xgene.o
obj-$(CONFIG_PCI_XGENE_MSI) += pci-xgene-msi.o
diff --git a/drivers/pci/controller/pcie-xilinx-common.h b/drivers/pci/controller/pcie-xilinx-common.h
index e97d272..1832770 100644
--- a/drivers/pci/controller/pcie-xilinx-common.h
+++ b/drivers/pci/controller/pcie-xilinx-common.h
@@ -19,6 +19,7 @@
#define XILINX_PCIE_INTR_PME_TO_ACK_RCVD 15
#define XILINX_PCIE_INTR_INTX 16
#define XILINX_PCIE_INTR_PM_PME_RCVD 17
+#define XILINX_PCIE_INTR_MSI 17
#define XILINX_PCIE_INTR_SLV_UNSUPP 20
#define XILINX_PCIE_INTR_SLV_UNEXP 21
#define XILINX_PCIE_INTR_SLV_COMPL 22
diff --git a/drivers/pci/controller/pcie-xilinx-dma-pl.c b/drivers/pci/controller/pcie-xilinx-dma-pl.c
new file mode 100644
index 0000000..c9673e9
--- /dev/null
+++ b/drivers/pci/controller/pcie-xilinx-dma-pl.c
@@ -0,0 +1,802 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * PCIe host controller driver for Xilinx XDMA PCIe Bridge
+ *
+ * Copyright (C) 2023 Xilinx, Inc. All rights reserved.
+ */
+#include <linux/bitfield.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+
+#include "../pci.h"
+#include "pcie-xilinx-common.h"
+
+/* Register definitions */
+#define XILINX_PCIE_DMA_REG_IDR 0x00000138
+#define XILINX_PCIE_DMA_REG_IMR 0x0000013c
+#define XILINX_PCIE_DMA_REG_PSCR 0x00000144
+#define XILINX_PCIE_DMA_REG_RPSC 0x00000148
+#define XILINX_PCIE_DMA_REG_MSIBASE1 0x0000014c
+#define XILINX_PCIE_DMA_REG_MSIBASE2 0x00000150
+#define XILINX_PCIE_DMA_REG_RPEFR 0x00000154
+#define XILINX_PCIE_DMA_REG_IDRN 0x00000160
+#define XILINX_PCIE_DMA_REG_IDRN_MASK 0x00000164
+#define XILINX_PCIE_DMA_REG_MSI_LOW 0x00000170
+#define XILINX_PCIE_DMA_REG_MSI_HI 0x00000174
+#define XILINX_PCIE_DMA_REG_MSI_LOW_MASK 0x00000178
+#define XILINX_PCIE_DMA_REG_MSI_HI_MASK 0x0000017c
+
+#define IMR(x) BIT(XILINX_PCIE_INTR_ ##x)
+
+#define XILINX_PCIE_INTR_IMR_ALL_MASK \
+ ( \
+ IMR(LINK_DOWN) | \
+ IMR(HOT_RESET) | \
+ IMR(CFG_TIMEOUT) | \
+ IMR(CORRECTABLE) | \
+ IMR(NONFATAL) | \
+ IMR(FATAL) | \
+ IMR(INTX) | \
+ IMR(MSI) | \
+ IMR(SLV_UNSUPP) | \
+ IMR(SLV_UNEXP) | \
+ IMR(SLV_COMPL) | \
+ IMR(SLV_ERRP) | \
+ IMR(SLV_CMPABT) | \
+ IMR(SLV_ILLBUR) | \
+ IMR(MST_DECERR) | \
+ IMR(MST_SLVERR) | \
+ )
+
+#define XILINX_PCIE_DMA_IMR_ALL_MASK 0x0ff30fe9
+#define XILINX_PCIE_DMA_IDR_ALL_MASK 0xffffffff
+#define XILINX_PCIE_DMA_IDRN_MASK GENMASK(19, 16)
+
+/* Root Port Error Register definitions */
+#define XILINX_PCIE_DMA_RPEFR_ERR_VALID BIT(18)
+#define XILINX_PCIE_DMA_RPEFR_REQ_ID GENMASK(15, 0)
+#define XILINX_PCIE_DMA_RPEFR_ALL_MASK 0xffffffff
+
+/* Root Port Interrupt Register definitions */
+#define XILINX_PCIE_DMA_IDRN_SHIFT 16
+
+/* Root Port Status/control Register definitions */
+#define XILINX_PCIE_DMA_REG_RPSC_BEN BIT(0)
+
+/* Phy Status/Control Register definitions */
+#define XILINX_PCIE_DMA_REG_PSCR_LNKUP BIT(11)
+
+/* Number of MSI IRQs */
+#define XILINX_NUM_MSI_IRQS 64
+
+struct xilinx_msi {
+ struct irq_domain *msi_domain;
+ unsigned long *bitmap;
+ struct irq_domain *dev_domain;
+ struct mutex lock; /* protect bitmap variable */
+ int irq_msi0;
+ int irq_msi1;
+};
+
+/**
+ * struct pl_dma_pcie - PCIe port information
+ * @dev: Device pointer
+ * @reg_base: IO Mapped Register Base
+ * @irq: Interrupt number
+ * @cfg: Holds mappings of config space window
+ * @phys_reg_base: Physical address of reg base
+ * @intx_domain: Legacy IRQ domain pointer
+ * @pldma_domain: PL DMA IRQ domain pointer
+ * @resources: Bus Resources
+ * @msi: MSI information
+ * @intx_irq: INTx error interrupt number
+ * @lock: Lock protecting shared register access
+ */
+struct pl_dma_pcie {
+ struct device *dev;
+ void __iomem *reg_base;
+ int irq;
+ struct pci_config_window *cfg;
+ phys_addr_t phys_reg_base;
+ struct irq_domain *intx_domain;
+ struct irq_domain *pldma_domain;
+ struct list_head resources;
+ struct xilinx_msi msi;
+ int intx_irq;
+ raw_spinlock_t lock;
+};
+
+static inline u32 pcie_read(struct pl_dma_pcie *port, u32 reg)
+{
+ return readl(port->reg_base + reg);
+}
+
+static inline void pcie_write(struct pl_dma_pcie *port, u32 val, u32 reg)
+{
+ writel(val, port->reg_base + reg);
+}
+
+static inline bool xilinx_pl_dma_pcie_link_up(struct pl_dma_pcie *port)
+{
+ return (pcie_read(port, XILINX_PCIE_DMA_REG_PSCR) &
+ XILINX_PCIE_DMA_REG_PSCR_LNKUP) ? 1 : 0;
+}
+
+static void xilinx_pl_dma_pcie_clear_err_interrupts(struct pl_dma_pcie *port)
+{
+ unsigned long val = pcie_read(port, XILINX_PCIE_DMA_REG_RPEFR);
+
+ if (val & XILINX_PCIE_DMA_RPEFR_ERR_VALID) {
+ dev_dbg(port->dev, "Requester ID %lu\n",
+ val & XILINX_PCIE_DMA_RPEFR_REQ_ID);
+ pcie_write(port, XILINX_PCIE_DMA_RPEFR_ALL_MASK,
+ XILINX_PCIE_DMA_REG_RPEFR);
+ }
+}
+
+static bool xilinx_pl_dma_pcie_valid_device(struct pci_bus *bus,
+ unsigned int devfn)
+{
+ struct pl_dma_pcie *port = bus->sysdata;
+
+ /* Check if link is up when trying to access downstream ports */
+ if (!pci_is_root_bus(bus)) {
+ /*
+ * Checking whether link is up here is a last line of defence,
+ * if the link goes down after we check for link-up, we have a
+ * problem: if a PIO request is initiated while link-down, the
+ * whole controller hangs, and even after link comes up again,
+ * previous PIO requests won't work, and a reset of the whole
+ * PCIe controller is needed. Henceforth we need link-up check
+ * here to avoid sending PIO request when link is down. This
+ * check is racy by definition and does not make controller hang
+ * if the link goes down after this check is performed.
+ */
+ if (!xilinx_pl_dma_pcie_link_up(port))
+ return false;
+ } else if (devfn > 0)
+ /* Only one device down on each root port */
+ return false;
+
+ return true;
+}
+
+static void __iomem *xilinx_pl_dma_pcie_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
+{
+ struct pl_dma_pcie *port = bus->sysdata;
+
+ if (!xilinx_pl_dma_pcie_valid_device(bus, devfn))
+ return NULL;
+
+ return port->reg_base + PCIE_ECAM_OFFSET(bus->number, devfn, where);
+}
+
+/* PCIe operations */
+static struct pci_ecam_ops xilinx_pl_dma_pcie_ops = {
+ .pci_ops = {
+ .map_bus = xilinx_pl_dma_pcie_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+ }
+};
+
+static void xilinx_pl_dma_pcie_enable_msi(struct pl_dma_pcie *port)
+{
+ phys_addr_t msi_addr = port->phys_reg_base;
+
+ pcie_write(port, upper_32_bits(msi_addr), XILINX_PCIE_DMA_REG_MSIBASE1);
+ pcie_write(port, lower_32_bits(msi_addr), XILINX_PCIE_DMA_REG_MSIBASE2);
+}
+
+static void xilinx_mask_intx_irq(struct irq_data *data)
+{
+ struct pl_dma_pcie *port = irq_data_get_irq_chip_data(data);
+ unsigned long flags;
+ u32 mask, val;
+
+ mask = BIT(data->hwirq + XILINX_PCIE_DMA_IDRN_SHIFT);
+ raw_spin_lock_irqsave(&port->lock, flags);
+ val = pcie_read(port, XILINX_PCIE_DMA_REG_IDRN_MASK);
+ pcie_write(port, (val & (~mask)), XILINX_PCIE_DMA_REG_IDRN_MASK);
+ raw_spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void xilinx_unmask_intx_irq(struct irq_data *data)
+{
+ struct pl_dma_pcie *port = irq_data_get_irq_chip_data(data);
+ unsigned long flags;
+ u32 mask, val;
+
+ mask = BIT(data->hwirq + XILINX_PCIE_DMA_IDRN_SHIFT);
+ raw_spin_lock_irqsave(&port->lock, flags);
+ val = pcie_read(port, XILINX_PCIE_DMA_REG_IDRN_MASK);
+ pcie_write(port, (val | mask), XILINX_PCIE_DMA_REG_IDRN_MASK);
+ raw_spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static struct irq_chip xilinx_leg_irq_chip = {
+ .name = "pl_dma:INTx",
+ .irq_mask = xilinx_mask_intx_irq,
+ .irq_unmask = xilinx_unmask_intx_irq,
+};
+
+static int xilinx_pl_dma_pcie_intx_map(struct irq_domain *domain,
+ unsigned int irq, irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &xilinx_leg_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+ irq_set_status_flags(irq, IRQ_LEVEL);
+
+ return 0;
+}
+
+/* INTx IRQ Domain operations */
+static const struct irq_domain_ops intx_domain_ops = {
+ .map = xilinx_pl_dma_pcie_intx_map,
+};
+
+static irqreturn_t xilinx_pl_dma_pcie_msi_handler_high(int irq, void *args)
+{
+ struct xilinx_msi *msi;
+ unsigned long status;
+ u32 bit, virq;
+ struct pl_dma_pcie *port = args;
+
+ msi = &port->msi;
+
+ while ((status = pcie_read(port, XILINX_PCIE_DMA_REG_MSI_HI)) != 0) {
+ for_each_set_bit(bit, &status, 32) {
+ pcie_write(port, 1 << bit, XILINX_PCIE_DMA_REG_MSI_HI);
+ bit = bit + 32;
+ virq = irq_find_mapping(msi->dev_domain, bit);
+ if (virq)
+ generic_handle_irq(virq);
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t xilinx_pl_dma_pcie_msi_handler_low(int irq, void *args)
+{
+ struct pl_dma_pcie *port = args;
+ struct xilinx_msi *msi;
+ unsigned long status;
+ u32 bit, virq;
+
+ msi = &port->msi;
+
+ while ((status = pcie_read(port, XILINX_PCIE_DMA_REG_MSI_LOW)) != 0) {
+ for_each_set_bit(bit, &status, 32) {
+ pcie_write(port, 1 << bit, XILINX_PCIE_DMA_REG_MSI_LOW);
+ virq = irq_find_mapping(msi->dev_domain, bit);
+ if (virq)
+ generic_handle_irq(virq);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t xilinx_pl_dma_pcie_event_flow(int irq, void *args)
+{
+ struct pl_dma_pcie *port = args;
+ unsigned long val;
+ int i;
+
+ val = pcie_read(port, XILINX_PCIE_DMA_REG_IDR);
+ val &= pcie_read(port, XILINX_PCIE_DMA_REG_IMR);
+ for_each_set_bit(i, &val, 32)
+ generic_handle_domain_irq(port->pldma_domain, i);
+
+ pcie_write(port, val, XILINX_PCIE_DMA_REG_IDR);
+
+ return IRQ_HANDLED;
+}
+
+#define _IC(x, s) \
+ [XILINX_PCIE_INTR_ ## x] = { __stringify(x), s }
+
+static const struct {
+ const char *sym;
+ const char *str;
+} intr_cause[32] = {
+ _IC(LINK_DOWN, "Link Down"),
+ _IC(HOT_RESET, "Hot reset"),
+ _IC(CFG_TIMEOUT, "ECAM access timeout"),
+ _IC(CORRECTABLE, "Correctable error message"),
+ _IC(NONFATAL, "Non fatal error message"),
+ _IC(FATAL, "Fatal error message"),
+ _IC(SLV_UNSUPP, "Slave unsupported request"),
+ _IC(SLV_UNEXP, "Slave unexpected completion"),
+ _IC(SLV_COMPL, "Slave completion timeout"),
+ _IC(SLV_ERRP, "Slave Error Poison"),
+ _IC(SLV_CMPABT, "Slave Completer Abort"),
+ _IC(SLV_ILLBUR, "Slave Illegal Burst"),
+ _IC(MST_DECERR, "Master decode error"),
+ _IC(MST_SLVERR, "Master slave error"),
+};
+
+static irqreturn_t xilinx_pl_dma_pcie_intr_handler(int irq, void *dev_id)
+{
+ struct pl_dma_pcie *port = (struct pl_dma_pcie *)dev_id;
+ struct device *dev = port->dev;
+ struct irq_data *d;
+
+ d = irq_domain_get_irq_data(port->pldma_domain, irq);
+ switch (d->hwirq) {
+ case XILINX_PCIE_INTR_CORRECTABLE:
+ case XILINX_PCIE_INTR_NONFATAL:
+ case XILINX_PCIE_INTR_FATAL:
+ xilinx_pl_dma_pcie_clear_err_interrupts(port);
+ fallthrough;
+
+ default:
+ if (intr_cause[d->hwirq].str)
+ dev_warn(dev, "%s\n", intr_cause[d->hwirq].str);
+ else
+ dev_warn(dev, "Unknown IRQ %ld\n", d->hwirq);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static struct irq_chip xilinx_msi_irq_chip = {
+ .name = "pl_dma:PCIe MSI",
+ .irq_enable = pci_msi_unmask_irq,
+ .irq_disable = pci_msi_mask_irq,
+ .irq_mask = pci_msi_mask_irq,
+ .irq_unmask = pci_msi_unmask_irq,
+};
+
+static struct msi_domain_info xilinx_msi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_MULTI_PCI_MSI),
+ .chip = &xilinx_msi_irq_chip,
+};
+
+static void xilinx_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ struct pl_dma_pcie *pcie = irq_data_get_irq_chip_data(data);
+ phys_addr_t msi_addr = pcie->phys_reg_base;
+
+ msg->address_lo = lower_32_bits(msi_addr);
+ msg->address_hi = upper_32_bits(msi_addr);
+ msg->data = data->hwirq;
+}
+
+static int xilinx_msi_set_affinity(struct irq_data *irq_data,
+ const struct cpumask *mask, bool force)
+{
+ return -EINVAL;
+}
+
+static struct irq_chip xilinx_irq_chip = {
+ .name = "pl_dma:MSI",
+ .irq_compose_msi_msg = xilinx_compose_msi_msg,
+ .irq_set_affinity = xilinx_msi_set_affinity,
+};
+
+static int xilinx_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ struct pl_dma_pcie *pcie = domain->host_data;
+ struct xilinx_msi *msi = &pcie->msi;
+ int bit, i;
+
+ mutex_lock(&msi->lock);
+ bit = bitmap_find_free_region(msi->bitmap, XILINX_NUM_MSI_IRQS,
+ get_count_order(nr_irqs));
+ if (bit < 0) {
+ mutex_unlock(&msi->lock);
+ return -ENOSPC;
+ }
+
+ for (i = 0; i < nr_irqs; i++) {
+ irq_domain_set_info(domain, virq + i, bit + i, &xilinx_irq_chip,
+ domain->host_data, handle_simple_irq,
+ NULL, NULL);
+ }
+ mutex_unlock(&msi->lock);
+ return 0;
+}
+
+static void xilinx_irq_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct irq_data *data = irq_domain_get_irq_data(domain, virq);
+ struct pl_dma_pcie *pcie = irq_data_get_irq_chip_data(data);
+ struct xilinx_msi *msi = &pcie->msi;
+
+ mutex_lock(&msi->lock);
+ bitmap_release_region(msi->bitmap, data->hwirq,
+ get_count_order(nr_irqs));
+ mutex_unlock(&msi->lock);
+}
+
+static const struct irq_domain_ops dev_msi_domain_ops = {
+ .alloc = xilinx_irq_domain_alloc,
+ .free = xilinx_irq_domain_free,
+};
+
+static void xilinx_pl_dma_pcie_free_irq_domains(struct pl_dma_pcie *port)
+{
+ struct xilinx_msi *msi = &port->msi;
+
+ if (port->intx_domain) {
+ irq_domain_remove(port->intx_domain);
+ port->intx_domain = NULL;
+ }
+
+ if (msi->dev_domain) {
+ irq_domain_remove(msi->dev_domain);
+ msi->dev_domain = NULL;
+ }
+
+ if (msi->msi_domain) {
+ irq_domain_remove(msi->msi_domain);
+ msi->msi_domain = NULL;
+ }
+}
+
+static int xilinx_pl_dma_pcie_init_msi_irq_domain(struct pl_dma_pcie *port)
+{
+ struct device *dev = port->dev;
+ struct xilinx_msi *msi = &port->msi;
+ int size = BITS_TO_LONGS(XILINX_NUM_MSI_IRQS) * sizeof(long);
+ struct fwnode_handle *fwnode = of_node_to_fwnode(port->dev->of_node);
+
+ msi->dev_domain = irq_domain_add_linear(NULL, XILINX_NUM_MSI_IRQS,
+ &dev_msi_domain_ops, port);
+ if (!msi->dev_domain)
+ goto out;
+
+ msi->msi_domain = pci_msi_create_irq_domain(fwnode,
+ &xilinx_msi_domain_info,
+ msi->dev_domain);
+ if (!msi->msi_domain)
+ goto out;
+
+ mutex_init(&msi->lock);
+ msi->bitmap = kzalloc(size, GFP_KERNEL);
+ if (!msi->bitmap)
+ goto out;
+
+ raw_spin_lock_init(&port->lock);
+ xilinx_pl_dma_pcie_enable_msi(port);
+
+ return 0;
+
+out:
+ xilinx_pl_dma_pcie_free_irq_domains(port);
+ dev_err(dev, "Failed to allocate MSI IRQ domains\n");
+ return -ENOMEM;
+}
+
+/* INTx error interrupts are Xilinx controller specific interrupt, used to
+ * notify user about error's such as cfg timeout, slave unsupported requests,
+ * fatal and non fatal error etc.
+ */
+
+static irqreturn_t xilinx_pl_dma_pcie_intx_flow(int irq, void *args)
+{
+ unsigned long val;
+ int i;
+ struct pl_dma_pcie *port = args;
+
+ val = FIELD_GET(XILINX_PCIE_DMA_IDRN_MASK,
+ pcie_read(port, XILINX_PCIE_DMA_REG_IDRN));
+
+ for_each_set_bit(i, &val, PCI_NUM_INTX)
+ generic_handle_domain_irq(port->intx_domain, i);
+ return IRQ_HANDLED;
+}
+
+static void xilinx_pl_dma_pcie_mask_event_irq(struct irq_data *d)
+{
+ struct pl_dma_pcie *port = irq_data_get_irq_chip_data(d);
+ u32 val;
+
+ raw_spin_lock(&port->lock);
+ val = pcie_read(port, XILINX_PCIE_DMA_REG_IMR);
+ val &= ~BIT(d->hwirq);
+ pcie_write(port, val, XILINX_PCIE_DMA_REG_IMR);
+ raw_spin_unlock(&port->lock);
+}
+
+static void xilinx_pl_dma_pcie_unmask_event_irq(struct irq_data *d)
+{
+ struct pl_dma_pcie *port = irq_data_get_irq_chip_data(d);
+ u32 val;
+
+ raw_spin_lock(&port->lock);
+ val = pcie_read(port, XILINX_PCIE_DMA_REG_IMR);
+ val |= BIT(d->hwirq);
+ pcie_write(port, val, XILINX_PCIE_DMA_REG_IMR);
+ raw_spin_unlock(&port->lock);
+}
+
+static struct irq_chip xilinx_pl_dma_pcie_event_irq_chip = {
+ .name = "pl_dma:RC-Event",
+ .irq_mask = xilinx_pl_dma_pcie_mask_event_irq,
+ .irq_unmask = xilinx_pl_dma_pcie_unmask_event_irq,
+};
+
+static int xilinx_pl_dma_pcie_event_map(struct irq_domain *domain,
+ unsigned int irq, irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &xilinx_pl_dma_pcie_event_irq_chip,
+ handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+ irq_set_status_flags(irq, IRQ_LEVEL);
+
+ return 0;
+}
+
+static const struct irq_domain_ops event_domain_ops = {
+ .map = xilinx_pl_dma_pcie_event_map,
+};
+
+/**
+ * xilinx_pl_dma_pcie_init_irq_domain - Initialize IRQ domain
+ * @port: PCIe port information
+ *
+ * Return: '0' on success and error value on failure
+ */
+static int xilinx_pl_dma_pcie_init_irq_domain(struct pl_dma_pcie *port)
+{
+ struct device *dev = port->dev;
+ struct device_node *node = dev->of_node;
+ struct device_node *pcie_intc_node;
+ int ret;
+
+ /* Setup INTx */
+ pcie_intc_node = of_get_child_by_name(node, "interrupt-controller");
+ if (!pcie_intc_node) {
+ dev_err(dev, "No PCIe Intc node found\n");
+ return -EINVAL;
+ }
+
+ port->pldma_domain = irq_domain_add_linear(pcie_intc_node, 32,
+ &event_domain_ops, port);
+ if (!port->pldma_domain)
+ return -ENOMEM;
+
+ irq_domain_update_bus_token(port->pldma_domain, DOMAIN_BUS_NEXUS);
+
+ port->intx_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
+ &intx_domain_ops, port);
+ if (!port->intx_domain) {
+ dev_err(dev, "Failed to get a INTx IRQ domain\n");
+ return PTR_ERR(port->intx_domain);
+ }
+
+ irq_domain_update_bus_token(port->intx_domain, DOMAIN_BUS_WIRED);
+
+ ret = xilinx_pl_dma_pcie_init_msi_irq_domain(port);
+ if (ret != 0) {
+ irq_domain_remove(port->intx_domain);
+ return -ENOMEM;
+ }
+
+ of_node_put(pcie_intc_node);
+ raw_spin_lock_init(&port->lock);
+
+ return 0;
+}
+
+static int xilinx_pl_dma_pcie_setup_irq(struct pl_dma_pcie *port)
+{
+ struct device *dev = port->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ int i, irq, err;
+
+ port->irq = platform_get_irq(pdev, 0);
+ if (port->irq < 0)
+ return port->irq;
+
+ for (i = 0; i < ARRAY_SIZE(intr_cause); i++) {
+ int err;
+
+ if (!intr_cause[i].str)
+ continue;
+
+ irq = irq_create_mapping(port->pldma_domain, i);
+ if (!irq) {
+ dev_err(dev, "Failed to map interrupt\n");
+ return -ENXIO;
+ }
+
+ err = devm_request_irq(dev, irq, xilinx_pl_dma_pcie_intr_handler,
+ IRQF_SHARED | IRQF_NO_THREAD, intr_cause[i].sym, port);
+ if (err) {
+ dev_err(dev, "Failed to request IRQ %d\n", irq);
+ return err;
+ }
+ }
+
+ port->intx_irq = irq_create_mapping(port->pldma_domain,
+ XILINX_PCIE_INTR_INTX);
+ if (!port->intx_irq) {
+ dev_err(dev, "Failed to map INTx interrupt\n");
+ return -ENXIO;
+ }
+
+ err = devm_request_irq(dev, port->intx_irq, xilinx_pl_dma_pcie_intx_flow,
+ IRQF_SHARED | IRQF_NO_THREAD, NULL, port);
+ if (err) {
+ dev_err(dev, "Failed to request INTx IRQ %d\n", irq);
+ return err;
+ }
+ err = devm_request_irq(dev, port->irq, xilinx_pl_dma_pcie_event_flow,
+ IRQF_SHARED | IRQF_NO_THREAD, NULL, port);
+ if (err) {
+ dev_err(dev, "Failed to request event IRQ %d\n", irq);
+ return err;
+ }
+ return 0;
+}
+
+static void xilinx_pl_dma_pcie_init_port(struct pl_dma_pcie *port)
+{
+ if (xilinx_pl_dma_pcie_link_up(port))
+ dev_info(port->dev, "PCIe Link is UP\n");
+ else
+ dev_info(port->dev, "PCIe Link is DOWN\n");
+
+ /* Disable all interrupts */
+ pcie_write(port, ~XILINX_PCIE_DMA_IDR_ALL_MASK,
+ XILINX_PCIE_DMA_REG_IMR);
+
+ /* Clear pending interrupts */
+ pcie_write(port, pcie_read(port, XILINX_PCIE_DMA_REG_IDR) &
+ XILINX_PCIE_DMA_IMR_ALL_MASK,
+ XILINX_PCIE_DMA_REG_IDR);
+
+ /* Needed for MSI DECODE MODE */
+ pcie_write(port, XILINX_PCIE_DMA_IDR_ALL_MASK, XILINX_PCIE_DMA_REG_MSI_LOW_MASK);
+ pcie_write(port, XILINX_PCIE_DMA_IDR_ALL_MASK, XILINX_PCIE_DMA_REG_MSI_HI_MASK);
+
+ /* Set the Bridge enable bit */
+ pcie_write(port, pcie_read(port, XILINX_PCIE_DMA_REG_RPSC) |
+ XILINX_PCIE_DMA_REG_RPSC_BEN,
+ XILINX_PCIE_DMA_REG_RPSC);
+}
+
+static int xilinx_request_msi_irq(struct pl_dma_pcie *port)
+{
+ struct device *dev = port->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ int ret;
+
+ port->msi.irq_msi0 = platform_get_irq_byname(pdev, "msi0");
+ if (port->msi.irq_msi0 <= 0) {
+ dev_err(dev, "Unable to find msi0 IRQ line\n");
+ return port->msi.irq_msi0;
+ }
+ ret = devm_request_irq(dev, port->msi.irq_msi0, xilinx_pl_dma_pcie_msi_handler_low,
+ IRQF_SHARED | IRQF_NO_THREAD, "xlnx-pcie-dma-pl",
+ port);
+ if (ret) {
+ dev_err(dev, "Failed to register interrupt\n");
+ return ret;
+ }
+ port->msi.irq_msi1 = platform_get_irq_byname(pdev, "msi1");
+ if (port->msi.irq_msi1 <= 0) {
+ dev_err(dev, "Unable to find msi1 IRQ line\n");
+ return port->msi.irq_msi1;
+ }
+ ret = devm_request_irq(dev, port->msi.irq_msi1, xilinx_pl_dma_pcie_msi_handler_high,
+ IRQF_SHARED | IRQF_NO_THREAD, "xlnx-pcie-dma-pl",
+ port);
+ if (ret) {
+ dev_err(dev, "Failed to register interrupt\n");
+ return ret;
+ }
+ return 0;
+}
+
+static int xilinx_pl_dma_pcie_parse_dt(struct pl_dma_pcie *port,
+ struct resource *bus_range)
+{
+ struct device *dev = port->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct resource *res;
+ int err;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "Missing \"reg\" property\n");
+ return -ENXIO;
+ }
+ port->phys_reg_base = res->start;
+
+ port->cfg = pci_ecam_create(dev, res, bus_range, &xilinx_pl_dma_pcie_ops);
+ if (IS_ERR(port->cfg))
+ return PTR_ERR(port->cfg);
+
+ port->reg_base = port->cfg->win;
+
+ err = xilinx_request_msi_irq(port);
+ if (err) {
+ pci_ecam_free(port->cfg);
+ return err;
+ }
+
+ return 0;
+}
+
+static int xilinx_pl_dma_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct pl_dma_pcie *port;
+ struct pci_host_bridge *bridge;
+ struct resource_entry *bus;
+ int err;
+
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*port));
+ if (!bridge)
+ return -ENODEV;
+
+ port = pci_host_bridge_priv(bridge);
+
+ port->dev = dev;
+
+ bus = resource_list_first_type(&bridge->windows, IORESOURCE_BUS);
+ if (!bus)
+ return -ENODEV;
+
+ err = xilinx_pl_dma_pcie_parse_dt(port, bus->res);
+ if (err) {
+ dev_err(dev, "Parsing DT failed\n");
+ return err;
+ }
+
+ xilinx_pl_dma_pcie_init_port(port);
+
+ err = xilinx_pl_dma_pcie_init_irq_domain(port);
+ if (err)
+ goto err_irq_domain;
+
+ err = xilinx_pl_dma_pcie_setup_irq(port);
+
+ bridge->sysdata = port;
+ bridge->ops = &xilinx_pl_dma_pcie_ops.pci_ops;
+
+ err = pci_host_probe(bridge);
+ if (err < 0)
+ goto err_host_bridge;
+
+ return 0;
+
+err_host_bridge:
+ xilinx_pl_dma_pcie_free_irq_domains(port);
+
+err_irq_domain:
+ pci_ecam_free(port->cfg);
+ return err;
+}
+
+static const struct of_device_id xilinx_pl_dma_pcie_of_match[] = {
+ {
+ .compatible = "xlnx,xdma-host-3.00",
+ },
+ {}
+};
+
+static struct platform_driver xilinx_pl_dma_pcie_driver = {
+ .driver = {
+ .name = "xilinx-xdma-pcie",
+ .of_match_table = xilinx_pl_dma_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = xilinx_pl_dma_pcie_probe,
+};
+
+builtin_platform_driver(xilinx_pl_dma_pcie_driver);
--
1.8.3.1
^ permalink raw reply related [flat|nested] 10+ messages in thread
* Re: [PATCH v6 3/3] PCI: xilinx-xdma: Add Xilinx XDMA Root Port driver
2023-08-18 9:35 ` [PATCH v6 3/3] PCI: xilinx-xdma: Add Xilinx XDMA Root Port driver Thippeswamy Havalige
@ 2023-08-21 20:14 ` Bjorn Helgaas
2023-08-28 9:09 ` Havalige, Thippeswamy
2023-11-08 15:31 ` Bjorn Helgaas
1 sibling, 1 reply; 10+ messages in thread
From: Bjorn Helgaas @ 2023-08-21 20:14 UTC (permalink / raw)
To: Thippeswamy Havalige
Cc: linux-arm-kernel, linux-kernel, linux-pci, devicetree, bhelgaas,
lpieralisi, robh, kw, michal.simek, krzysztof.kozlowski+dt,
bharat.kumar.gogada
On Fri, Aug 18, 2023 at 03:05:07PM +0530, Thippeswamy Havalige wrote:
> Add support for Xilinx XDMA Soft IP core as Root Port.
>
> The Zynq UltraScale+ MPSoCs devices support XDMA soft IP module in
> programmable logic.
>
> The integrated XDMA soft IP block has integrated bridge function that
> can act as PCIe Root Port.
>
> Signed-off-by: Thippeswamy Havalige <thippeswamy.havalige@amd.com>
> Signed-off-by: Bharat Kumar Gogada <bharat.kumar.gogada@amd.com>
> ---
> changes in v6:
> - Replaced chained irq's with regular interrupts.
Thanks a million for working this out!
Trivial comments below, wait a couple days before reposting in case
there are other comments.
> +static inline bool xilinx_pl_dma_pcie_link_up(struct pl_dma_pcie *port)
> +{
> + return (pcie_read(port, XILINX_PCIE_DMA_REG_PSCR) &
> + XILINX_PCIE_DMA_REG_PSCR_LNKUP) ? 1 : 0;
This function returns bool, so I think true/false would be more
appropriate than 1/0.
> +static bool xilinx_pl_dma_pcie_valid_device(struct pci_bus *bus,
> + unsigned int devfn)
> +{
> + struct pl_dma_pcie *port = bus->sysdata;
> +
> + /* Check if link is up when trying to access downstream ports */
> + if (!pci_is_root_bus(bus)) {
> + /*
> + * Checking whether link is up here is a last line of defence,
> + * if the link goes down after we check for link-up, we have a
> + * problem: if a PIO request is initiated while link-down, the
> + * whole controller hangs, and even after link comes up again,
> + * previous PIO requests won't work, and a reset of the whole
> + * PCIe controller is needed. Henceforth we need link-up check
> + * here to avoid sending PIO request when link is down. This
> + * check is racy by definition and does not make controller hang
> + * if the link goes down after this check is performed.
This comment doesn't make sense to me. "If PIO request initiated
while link-down, controller hangs ... This check is racy and does not
make controller hang if link goes down." Which is it?
My *guess* is that this check narrows the window but doesn't close it,
so if xilinx_pl_dma_pcie_link_up() finds the link up, but the link
goes down before pci_generic_config_read() initiates the PIO request,
the controller hangs, and a reset is required.
> + */
> + if (!xilinx_pl_dma_pcie_link_up(port))
> + return false;
> + } else if (devfn > 0)
> + /* Only one device down on each root port */
> + return false;
> +
> + return true;
> +}
> +/* INTx error interrupts are Xilinx controller specific interrupt, used to
> + * notify user about error's such as cfg timeout, slave unsupported requests,
s/error's/errors/
> + * fatal and non fatal error etc.
> + err = devm_request_irq(dev, irq, xilinx_pl_dma_pcie_intr_handler,
> + IRQF_SHARED | IRQF_NO_THREAD, intr_cause[i].sym, port);
Rewrap to fit in 80 columns.
> + /* Needed for MSI DECODE MODE */
> + pcie_write(port, XILINX_PCIE_DMA_IDR_ALL_MASK, XILINX_PCIE_DMA_REG_MSI_LOW_MASK);
> + pcie_write(port, XILINX_PCIE_DMA_IDR_ALL_MASK, XILINX_PCIE_DMA_REG_MSI_HI_MASK);
Rewrap.
Bjorn
^ permalink raw reply [flat|nested] 10+ messages in thread
* RE: [PATCH v6 3/3] PCI: xilinx-xdma: Add Xilinx XDMA Root Port driver
2023-08-21 20:14 ` Bjorn Helgaas
@ 2023-08-28 9:09 ` Havalige, Thippeswamy
2023-08-28 9:44 ` Havalige, Thippeswamy
2023-08-28 12:01 ` Havalige, Thippeswamy
0 siblings, 2 replies; 10+ messages in thread
From: Havalige, Thippeswamy @ 2023-08-28 9:09 UTC (permalink / raw)
To: Bjorn Helgaas
Cc: linux-arm-kernel@lists.infradead.org,
linux-kernel@vger.kernel.org, linux-pci@vger.kernel.org,
devicetree@vger.kernel.org, bhelgaas@google.com,
lpieralisi@kernel.org, robh@kernel.org, kw@linux.com,
Simek, Michal, krzysztof.kozlowski+dt@linaro.org,
Gogada, Bharat Kumar
Hi Bjorn,
Thanks, ll take all your comments and update them in next patch.
Regards,
Thippeswamy H
> -----Original Message-----
> From: Bjorn Helgaas <helgaas@kernel.org>
> Sent: Tuesday, August 22, 2023 1:44 AM
> To: Havalige, Thippeswamy <thippeswamy.havalige@amd.com>
> Cc: linux-arm-kernel@lists.infradead.org; linux-kernel@vger.kernel.org; linux-
> pci@vger.kernel.org; devicetree@vger.kernel.org; bhelgaas@google.com;
> lpieralisi@kernel.org; robh@kernel.org; kw@linux.com; Simek, Michal
> <michal.simek@amd.com>; krzysztof.kozlowski+dt@linaro.org; Gogada,
> Bharat Kumar <bharat.kumar.gogada@amd.com>
> Subject: Re: [PATCH v6 3/3] PCI: xilinx-xdma: Add Xilinx XDMA Root Port driver
>
> On Fri, Aug 18, 2023 at 03:05:07PM +0530, Thippeswamy Havalige wrote:
> > Add support for Xilinx XDMA Soft IP core as Root Port.
> >
> > The Zynq UltraScale+ MPSoCs devices support XDMA soft IP module in
> > programmable logic.
> >
> > The integrated XDMA soft IP block has integrated bridge function that
> > can act as PCIe Root Port.
> >
> > Signed-off-by: Thippeswamy Havalige <thippeswamy.havalige@amd.com>
> > Signed-off-by: Bharat Kumar Gogada <bharat.kumar.gogada@amd.com>
> > ---
> > changes in v6:
> > - Replaced chained irq's with regular interrupts.
>
> Thanks a million for working this out!
>
> Trivial comments below, wait a couple days before reposting in case there are
> other comments.
>
> > +static inline bool xilinx_pl_dma_pcie_link_up(struct pl_dma_pcie
> > +*port) {
> > + return (pcie_read(port, XILINX_PCIE_DMA_REG_PSCR) &
> > + XILINX_PCIE_DMA_REG_PSCR_LNKUP) ? 1 : 0;
>
> This function returns bool, so I think true/false would be more appropriate
> than 1/0.
>
> > +static bool xilinx_pl_dma_pcie_valid_device(struct pci_bus *bus,
> > + unsigned int devfn)
> > +{
> > + struct pl_dma_pcie *port = bus->sysdata;
> > +
> > + /* Check if link is up when trying to access downstream ports */
> > + if (!pci_is_root_bus(bus)) {
> > + /*
> > + * Checking whether link is up here is a last line of defence,
> > + * if the link goes down after we check for link-up, we have a
> > + * problem: if a PIO request is initiated while link-down, the
> > + * whole controller hangs, and even after link comes up again,
> > + * previous PIO requests won't work, and a reset of the whole
> > + * PCIe controller is needed. Henceforth we need link-up
> check
> > + * here to avoid sending PIO request when link is down. This
> > + * check is racy by definition and does not make controller
> hang
> > + * if the link goes down after this check is performed.
>
> This comment doesn't make sense to me. "If PIO request initiated while link-
> down, controller hangs ... This check is racy and does not make controller
> hang if link goes down." Which is it?
>
> My *guess* is that this check narrows the window but doesn't close it, so if
> xilinx_pl_dma_pcie_link_up() finds the link up, but the link goes down before
> pci_generic_config_read() initiates the PIO request, the controller hangs, and
> a reset is required.
>
> > + */
> > + if (!xilinx_pl_dma_pcie_link_up(port))
> > + return false;
> > + } else if (devfn > 0)
> > + /* Only one device down on each root port */
> > + return false;
> > +
> > + return true;
> > +}
>
> > +/* INTx error interrupts are Xilinx controller specific interrupt,
> > +used to
> > + * notify user about error's such as cfg timeout, slave unsupported
> > +requests,
>
> s/error's/errors/
>
> > + * fatal and non fatal error etc.
>
> > + err = devm_request_irq(dev, irq,
> xilinx_pl_dma_pcie_intr_handler,
> > + IRQF_SHARED | IRQF_NO_THREAD,
> intr_cause[i].sym, port);
>
> Rewrap to fit in 80 columns.
>
> > + /* Needed for MSI DECODE MODE */
> > + pcie_write(port, XILINX_PCIE_DMA_IDR_ALL_MASK,
> XILINX_PCIE_DMA_REG_MSI_LOW_MASK);
> > + pcie_write(port, XILINX_PCIE_DMA_IDR_ALL_MASK,
> > +XILINX_PCIE_DMA_REG_MSI_HI_MASK);
>
> Rewrap.
>
> Bjorn
^ permalink raw reply [flat|nested] 10+ messages in thread
* RE: [PATCH v6 3/3] PCI: xilinx-xdma: Add Xilinx XDMA Root Port driver
2023-08-28 9:09 ` Havalige, Thippeswamy
@ 2023-08-28 9:44 ` Havalige, Thippeswamy
2023-08-28 12:01 ` Havalige, Thippeswamy
1 sibling, 0 replies; 10+ messages in thread
From: Havalige, Thippeswamy @ 2023-08-28 9:44 UTC (permalink / raw)
To: Bjorn Helgaas
Cc: linux-arm-kernel@lists.infradead.org,
linux-kernel@vger.kernel.org, linux-pci@vger.kernel.org,
devicetree@vger.kernel.org, bhelgaas@google.com,
lpieralisi@kernel.org, robh@kernel.org, kw@linux.com,
Simek, Michal, krzysztof.kozlowski+dt@linaro.org,
Gogada, Bharat Kumar
Hi Bjorn,
> -----Original Message-----
> From: Havalige, Thippeswamy
> Sent: Monday, August 28, 2023 2:39 PM
> To: Bjorn Helgaas <helgaas@kernel.org>
> Cc: linux-arm-kernel@lists.infradead.org; linux-kernel@vger.kernel.org; linux-
> pci@vger.kernel.org; devicetree@vger.kernel.org; bhelgaas@google.com;
> lpieralisi@kernel.org; robh@kernel.org; kw@linux.com; Simek, Michal
> <michal.simek@amd.com>; krzysztof.kozlowski+dt@linaro.org; Gogada,
> Bharat Kumar <bharat.kumar.gogada@amd.com>
> Subject: RE: [PATCH v6 3/3] PCI: xilinx-xdma: Add Xilinx XDMA Root Port driver
>
> Hi Bjorn,
>
> Thanks, ll take all your comments and update them in next patch.
>
> Regards,
> Thippeswamy H
>
> > -----Original Message-----
> > From: Bjorn Helgaas <helgaas@kernel.org>
> > Sent: Tuesday, August 22, 2023 1:44 AM
> > To: Havalige, Thippeswamy <thippeswamy.havalige@amd.com>
> > Cc: linux-arm-kernel@lists.infradead.org;
> > linux-kernel@vger.kernel.org; linux- pci@vger.kernel.org;
> > devicetree@vger.kernel.org; bhelgaas@google.com;
> > lpieralisi@kernel.org; robh@kernel.org; kw@linux.com; Simek, Michal
> > <michal.simek@amd.com>; krzysztof.kozlowski+dt@linaro.org; Gogada,
> > Bharat Kumar <bharat.kumar.gogada@amd.com>
> > Subject: Re: [PATCH v6 3/3] PCI: xilinx-xdma: Add Xilinx XDMA Root
> > Port driver
> >
> > On Fri, Aug 18, 2023 at 03:05:07PM +0530, Thippeswamy Havalige wrote:
> > > Add support for Xilinx XDMA Soft IP core as Root Port.
> > >
> > > The Zynq UltraScale+ MPSoCs devices support XDMA soft IP module in
> > > programmable logic.
> > >
> > > The integrated XDMA soft IP block has integrated bridge function
> > > that can act as PCIe Root Port.
> > >
> > > Signed-off-by: Thippeswamy Havalige <thippeswamy.havalige@amd.com>
> > > Signed-off-by: Bharat Kumar Gogada <bharat.kumar.gogada@amd.com>
> > > ---
> > > changes in v6:
> > > - Replaced chained irq's with regular interrupts.
> >
> > Thanks a million for working this out!
> >
> > Trivial comments below, wait a couple days before reposting in case
> > there are other comments.
> >
> > > +static inline bool xilinx_pl_dma_pcie_link_up(struct pl_dma_pcie
> > > +*port) {
> > > + return (pcie_read(port, XILINX_PCIE_DMA_REG_PSCR) &
> > > + XILINX_PCIE_DMA_REG_PSCR_LNKUP) ? 1 : 0;
> >
> > This function returns bool, so I think true/false would be more
> > appropriate than 1/0.
> >
> > > +static bool xilinx_pl_dma_pcie_valid_device(struct pci_bus *bus,
> > > + unsigned int devfn)
> > > +{
> > > + struct pl_dma_pcie *port = bus->sysdata;
> > > +
> > > + /* Check if link is up when trying to access downstream ports */
> > > + if (!pci_is_root_bus(bus)) {
> > > + /*
> > > + * Checking whether link is up here is a last line of defence,
> > > + * if the link goes down after we check for link-up, we have a
> > > + * problem: if a PIO request is initiated while link-down, the
> > > + * whole controller hangs, and even after link comes up again,
> > > + * previous PIO requests won't work, and a reset of the whole
> > > + * PCIe controller is needed. Henceforth we need link-up
> > check
> > > + * here to avoid sending PIO request when link is down. This
> > > + * check is racy by definition and does not make controller
> > hang
> > > + * if the link goes down after this check is performed.
> >
> > This comment doesn't make sense to me. "If PIO request initiated
> > while link- down, controller hangs ... This check is racy and does not
> > make controller hang if link goes down." Which is it?
In link down scenario, will treat current device as a invalid device.
> > My *guess* is that this check narrows the window but doesn't close it,
> > so if
> > xilinx_pl_dma_pcie_link_up() finds the link up, but the link goes down
> > before
> > pci_generic_config_read() initiates the PIO request, the controller
> > hangs, and a reset is required.
> >
> > > + */
> > > + if (!xilinx_pl_dma_pcie_link_up(port))
> > > + return false;
> > > + } else if (devfn > 0)
> > > + /* Only one device down on each root port */
> > > + return false;
> > > +
> > > + return true;
> > > +}
> >
> > > +/* INTx error interrupts are Xilinx controller specific interrupt,
> > > +used to
> > > + * notify user about error's such as cfg timeout, slave unsupported
> > > +requests,
> >
> > s/error's/errors/
> >
> > > + * fatal and non fatal error etc.
> >
> > > + err = devm_request_irq(dev, irq,
> > xilinx_pl_dma_pcie_intr_handler,
> > > + IRQF_SHARED | IRQF_NO_THREAD,
> > intr_cause[i].sym, port);
> >
> > Rewrap to fit in 80 columns.
> >
> > > + /* Needed for MSI DECODE MODE */
> > > + pcie_write(port, XILINX_PCIE_DMA_IDR_ALL_MASK,
> > XILINX_PCIE_DMA_REG_MSI_LOW_MASK);
> > > + pcie_write(port, XILINX_PCIE_DMA_IDR_ALL_MASK,
> > > +XILINX_PCIE_DMA_REG_MSI_HI_MASK);
> >
> > Rewrap.
> >
> > Bjorn
^ permalink raw reply [flat|nested] 10+ messages in thread
* RE: [PATCH v6 3/3] PCI: xilinx-xdma: Add Xilinx XDMA Root Port driver
2023-08-28 9:09 ` Havalige, Thippeswamy
2023-08-28 9:44 ` Havalige, Thippeswamy
@ 2023-08-28 12:01 ` Havalige, Thippeswamy
2023-08-28 21:11 ` Bjorn Helgaas
1 sibling, 1 reply; 10+ messages in thread
From: Havalige, Thippeswamy @ 2023-08-28 12:01 UTC (permalink / raw)
To: Bjorn Helgaas
Cc: linux-arm-kernel@lists.infradead.org,
linux-kernel@vger.kernel.org, linux-pci@vger.kernel.org,
devicetree@vger.kernel.org, bhelgaas@google.com,
lpieralisi@kernel.org, robh@kernel.org, kw@linux.com,
Simek, Michal, krzysztof.kozlowski+dt@linaro.org,
Gogada, Bharat Kumar
Hi Bjorn,
> -----Original Message-----
> From: Havalige, Thippeswamy
> Sent: Monday, August 28, 2023 2:39 PM
> To: Bjorn Helgaas <helgaas@kernel.org>
> Cc: linux-arm-kernel@lists.infradead.org; linux-kernel@vger.kernel.org; linux-
> pci@vger.kernel.org; devicetree@vger.kernel.org; bhelgaas@google.com;
> lpieralisi@kernel.org; robh@kernel.org; kw@linux.com; Simek, Michal
> <michal.simek@amd.com>; krzysztof.kozlowski+dt@linaro.org; Gogada,
> Bharat Kumar <bharat.kumar.gogada@amd.com>
> Subject: RE: [PATCH v6 3/3] PCI: xilinx-xdma: Add Xilinx XDMA Root Port driver
>
> Hi Bjorn,
>
> Thanks, ll take all your comments and update them in next patch.
>
> Regards,
> Thippeswamy H
>
> > -----Original Message-----
> > From: Bjorn Helgaas <helgaas@kernel.org>
> > Sent: Tuesday, August 22, 2023 1:44 AM
> > To: Havalige, Thippeswamy <thippeswamy.havalige@amd.com>
> > Cc: linux-arm-kernel@lists.infradead.org;
> > linux-kernel@vger.kernel.org; linux- pci@vger.kernel.org;
> > devicetree@vger.kernel.org; bhelgaas@google.com;
> > lpieralisi@kernel.org; robh@kernel.org; kw@linux.com; Simek, Michal
> > <michal.simek@amd.com>; krzysztof.kozlowski+dt@linaro.org; Gogada,
> > Bharat Kumar <bharat.kumar.gogada@amd.com>
> > Subject: Re: [PATCH v6 3/3] PCI: xilinx-xdma: Add Xilinx XDMA Root
> > Port driver
> >
> > On Fri, Aug 18, 2023 at 03:05:07PM +0530, Thippeswamy Havalige wrote:
> > > Add support for Xilinx XDMA Soft IP core as Root Port.
> > >
> > > The Zynq UltraScale+ MPSoCs devices support XDMA soft IP module in
> > > programmable logic.
> > >
> > > The integrated XDMA soft IP block has integrated bridge function
> > > that can act as PCIe Root Port.
> > >
> > > Signed-off-by: Thippeswamy Havalige <thippeswamy.havalige@amd.com>
> > > Signed-off-by: Bharat Kumar Gogada <bharat.kumar.gogada@amd.com>
> > > ---
> > > changes in v6:
> > > - Replaced chained irq's with regular interrupts.
> >
> > Thanks a million for working this out!
> >
> > Trivial comments below, wait a couple days before reposting in case
> > there are other comments.
> >
> > > +static inline bool xilinx_pl_dma_pcie_link_up(struct pl_dma_pcie
> > > +*port) {
> > > + return (pcie_read(port, XILINX_PCIE_DMA_REG_PSCR) &
> > > + XILINX_PCIE_DMA_REG_PSCR_LNKUP) ? 1 : 0;
> >
> > This function returns bool, so I think true/false would be more
> > appropriate than 1/0.
> >
> > > +static bool xilinx_pl_dma_pcie_valid_device(struct pci_bus *bus,
> > > + unsigned int devfn)
> > > +{
> > > + struct pl_dma_pcie *port = bus->sysdata;
> > > +
> > > + /* Check if link is up when trying to access downstream ports */
> > > + if (!pci_is_root_bus(bus)) {
> > > + /*
> > > + * Checking whether link is up here is a last line of defence,
> > > + * if the link goes down after we check for link-up, we have a
> > > + * problem: if a PIO request is initiated while link-down, the
> > > + * whole controller hangs, and even after link comes up again,
> > > + * previous PIO requests won't work, and a reset of the whole
> > > + * PCIe controller is needed. Henceforth we need link-up
> > check
> > > + * here to avoid sending PIO request when link is down. This
> > > + * check is racy by definition and does not make controller
> > hang
> > > + * if the link goes down after this check is performed.
> >
> > This comment doesn't make sense to me. "If PIO request initiated
> > while link- down, controller hangs ... This check is racy and does not
> > make controller hang if link goes down." Which is it?
- Here checking link up treats device as invalid.
Please find comment that I ll update in next patch and
Please letme know if any changes are needed.
/*
* Checking whether the link is up. Here is the last line of
* defence. If the link goes down after we check for link-up,
* we have a problem. If a PIO request is initiated while link
* is down, the whole controller hangs. Even after link comes up
* again, previous PIO requests won't work, and a reset of the
* whole PCIe controller is needed. Henceforth we need link-up
* check here to treat device as invalid and avoid sending PIO
* request when link is down and this check is inherently racy
* by definition.
*/
> >
> > My *guess* is that this check narrows the window but doesn't close it,
> > so if
> > xilinx_pl_dma_pcie_link_up() finds the link up, but the link goes down
> > before
> > pci_generic_config_read() initiates the PIO request, the controller
> > hangs, and a reset is required.
> >
> > > + */
> > > + if (!xilinx_pl_dma_pcie_link_up(port))
> > > + return false;
> > > + } else if (devfn > 0)
> > > + /* Only one device down on each root port */
> > > + return false;
> > > +
> > > + return true;
> > > +}
> >
> > > +/* INTx error interrupts are Xilinx controller specific interrupt,
> > > +used to
> > > + * notify user about error's such as cfg timeout, slave unsupported
> > > +requests,
> >
> > s/error's/errors/
> >
> > > + * fatal and non fatal error etc.
> >
> > > + err = devm_request_irq(dev, irq,
> > xilinx_pl_dma_pcie_intr_handler,
> > > + IRQF_SHARED | IRQF_NO_THREAD,
> > intr_cause[i].sym, port);
> >
> > Rewrap to fit in 80 columns.
> >
> > > + /* Needed for MSI DECODE MODE */
> > > + pcie_write(port, XILINX_PCIE_DMA_IDR_ALL_MASK,
> > XILINX_PCIE_DMA_REG_MSI_LOW_MASK);
> > > + pcie_write(port, XILINX_PCIE_DMA_IDR_ALL_MASK,
> > > +XILINX_PCIE_DMA_REG_MSI_HI_MASK);
> >
> > Rewrap.
> >
> > Bjorn
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH v6 3/3] PCI: xilinx-xdma: Add Xilinx XDMA Root Port driver
2023-08-28 12:01 ` Havalige, Thippeswamy
@ 2023-08-28 21:11 ` Bjorn Helgaas
0 siblings, 0 replies; 10+ messages in thread
From: Bjorn Helgaas @ 2023-08-28 21:11 UTC (permalink / raw)
To: Havalige, Thippeswamy
Cc: linux-arm-kernel@lists.infradead.org,
linux-kernel@vger.kernel.org, linux-pci@vger.kernel.org,
devicetree@vger.kernel.org, bhelgaas@google.com,
lpieralisi@kernel.org, robh@kernel.org, kw@linux.com,
Simek, Michal, krzysztof.kozlowski+dt@linaro.org,
Gogada, Bharat Kumar
On Mon, Aug 28, 2023 at 12:01:52PM +0000, Havalige, Thippeswamy wrote:
> > > On Fri, Aug 18, 2023 at 03:05:07PM +0530, Thippeswamy Havalige wrote:
> > > > ...
> > > > +static bool xilinx_pl_dma_pcie_valid_device(struct pci_bus *bus,
> > > > + unsigned int devfn)
> > > > +{
> > > > + struct pl_dma_pcie *port = bus->sysdata;
> > > > +
> > > > + /* Check if link is up when trying to access downstream ports */
> > > > + if (!pci_is_root_bus(bus)) {
> > > > + /*
> > > > + * Checking whether link is up here is a last line of defence,
> > > > + * if the link goes down after we check for link-up, we have a
> > > > + * problem: if a PIO request is initiated while link-down, the
> > > > + * whole controller hangs, and even after link comes up again,
> > > > + * previous PIO requests won't work, and a reset of the whole
> > > > + * PCIe controller is needed. Henceforth we need link-up
> > > check
> > > > + * here to avoid sending PIO request when link is down. This
> > > > + * check is racy by definition and does not make controller
> > > hang
> > > > + * if the link goes down after this check is performed.
> > >
> > > This comment doesn't make sense to me. "If PIO request initiated
> > > while link- down, controller hangs ... This check is racy and does not
> > > make controller hang if link goes down." Which is it?
> - Here checking link up treats device as invalid.
>
> Please find comment that I ll update in next patch and
> Please letme know if any changes are needed.
>
> /*
> * Checking whether the link is up. Here is the last line of
> * defence. If the link goes down after we check for link-up,
> * we have a problem. If a PIO request is initiated while link
> * is down, the whole controller hangs. Even after link comes up
> * again, previous PIO requests won't work, and a reset of the
> * whole PCIe controller is needed. Henceforth we need link-up
> * check here to treat device as invalid and avoid sending PIO
> * request when link is down and this check is inherently racy
> * by definition.
> */
> > >
> > > My *guess* is that this check narrows the window but doesn't close it,
> > > so if
> > > xilinx_pl_dma_pcie_link_up() finds the link up, but the link goes down
> > > before
> > > pci_generic_config_read() initiates the PIO request, the controller
> > > hangs, and a reset is required.
Sorry, I dragged this out by not giving you a more useful suggestion
to begin with. Since advk_pcie_valid_device() has the same issue,
copying its comment was a great place to start.
But I think advk_pcie_valid_device(), altera_pcie_valid_device(),
nwl_pcie_valid_device(), xilinx_pcie_valid_device(), and now
xilinx_pl_dma_pcie_valid_device() all have the same race, but none of
them really address it in the comment.
I'm looking for explicit acknowledgement that we can't reliably
prevent this unrecoverable error, e.g., something like this:
Sending a PIO request to a downstream device when the link is down
causes an unrecoverable error, and a reset of the entire PCIe
controller will be needed. We can reduce the likelihood of that
unrecoverable error by checking whether the link is up, but can't
completely prevent it because the link may go down between the
link-up check and the PIO request.
Bjorn
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH v6 3/3] PCI: xilinx-xdma: Add Xilinx XDMA Root Port driver
2023-08-18 9:35 ` [PATCH v6 3/3] PCI: xilinx-xdma: Add Xilinx XDMA Root Port driver Thippeswamy Havalige
2023-08-21 20:14 ` Bjorn Helgaas
@ 2023-11-08 15:31 ` Bjorn Helgaas
1 sibling, 0 replies; 10+ messages in thread
From: Bjorn Helgaas @ 2023-11-08 15:31 UTC (permalink / raw)
To: Thippeswamy Havalige
Cc: linux-arm-kernel, linux-kernel, linux-pci, devicetree, bhelgaas,
lpieralisi, robh, kw, michal.simek, krzysztof.kozlowski+dt,
bharat.kumar.gogada
On Fri, Aug 18, 2023 at 03:05:07PM +0530, Thippeswamy Havalige wrote:
> Add support for Xilinx XDMA Soft IP core as Root Port.
>
> The Zynq UltraScale+ MPSoCs devices support XDMA soft IP module in
> programmable logic.
>
> The integrated XDMA soft IP block has integrated bridge function that
> can act as PCIe Root Port.
>
> Signed-off-by: Thippeswamy Havalige <thippeswamy.havalige@amd.com>
> Signed-off-by: Bharat Kumar Gogada <bharat.kumar.gogada@amd.com>
> ---
> changes in v6:
> - Replaced chained irq's with regular interrupts.
> - Modified interrupt names.
> - Aligned to 80 columns
> changes in v5:
> - Added detailed comments for link_up check.
> - Modified upper case hex values to lower case.
> ...
I really appreciate the chained IRQ to regular IRQ conversion here,
and I'm just pulling out the diff between v5 and v6 here because it's
a good example that may be useful to anybody who has the opportunity
to do similar conversions for other drivers.
These are the v5 and v6 xilinx-xdma patches:
v5 https://lore.kernel.org/r/20230628092812.1592644-4-thippeswamy.havalige@amd.com
v6 https://lore.kernel.org/r/20230818093507.24435-4-thippeswamy.havalige@amd.com
And here's the diff between them:
diff --git a/drivers/pci/controller/pcie-xilinx-dma-pl.c b/drivers/pci/controller/pcie-xilinx-dma-pl.c
index 938c6934839c..c9673e905f4b 100644
--- a/drivers/pci/controller/pcie-xilinx-dma-pl.c
+++ b/drivers/pci/controller/pcie-xilinx-dma-pl.c
@@ -8,7 +8,6 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
-#include <linux/irqchip/chained_irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/msi.h>
@@ -96,9 +95,8 @@ struct xilinx_msi {
* @pldma_domain: PL DMA IRQ domain pointer
* @resources: Bus Resources
* @msi: MSI information
- * @irq_misc: Legacy and error interrupt number
- * @intx_irq: legacy interrupt number
- * @lock: lock protecting shared register access
+ * @intx_irq: INTx error interrupt number
+ * @lock: Lock protecting shared register access
*/
struct pl_dma_pcie {
struct device *dev;
@@ -110,7 +108,6 @@ struct pl_dma_pcie {
struct irq_domain *pldma_domain;
struct list_head resources;
struct xilinx_msi msi;
- int irq_misc;
int intx_irq;
raw_spinlock_t lock;
};
@@ -143,19 +140,23 @@ static void xilinx_pl_dma_pcie_clear_err_interrupts(struct pl_dma_pcie *port)
}
}
-static bool xilinx_pl_dma_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
+static bool xilinx_pl_dma_pcie_valid_device(struct pci_bus *bus,
+ unsigned int devfn)
{
struct pl_dma_pcie *port = bus->sysdata;
/* Check if link is up when trying to access downstream ports */
if (!pci_is_root_bus(bus)) {
/*
- * If the link goes down after we check for link-up, we have a problem:
- * if a PIO request is initiated while link-down, the whole controller
- * hangs, and even after link comes up again, previous PIO requests
- * won't work, and a reset of the whole PCIe controller is needed.
- * Henceforth we need link-up check here to avoid sending PIO request
- * when link is down.
+ * Checking whether link is up here is a last line of defence,
+ * if the link goes down after we check for link-up, we have a
+ * problem: if a PIO request is initiated while link-down, the
+ * whole controller hangs, and even after link comes up again,
+ * previous PIO requests won't work, and a reset of the whole
+ * PCIe controller is needed. Henceforth we need link-up check
+ * here to avoid sending PIO request when link is down. This
+ * check is racy by definition and does not make controller hang
+ * if the link goes down after this check is performed.
*/
if (!xilinx_pl_dma_pcie_link_up(port))
return false;
@@ -178,7 +179,7 @@ static void __iomem *xilinx_pl_dma_pcie_map_bus(struct pci_bus *bus,
}
/* PCIe operations */
-static const struct pci_ecam_ops xilinx_pl_dma_pcie_ops = {
+static struct pci_ecam_ops xilinx_pl_dma_pcie_ops = {
.pci_ops = {
.map_bus = xilinx_pl_dma_pcie_map_bus,
.read = pci_generic_config_read,
@@ -221,13 +222,13 @@ static void xilinx_unmask_intx_irq(struct irq_data *data)
}
static struct irq_chip xilinx_leg_irq_chip = {
- .name = "INTx",
+ .name = "pl_dma:INTx",
.irq_mask = xilinx_mask_intx_irq,
.irq_unmask = xilinx_unmask_intx_irq,
};
-static int xilinx_pl_dma_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
- irq_hw_number_t hwirq)
+static int xilinx_pl_dma_pcie_intx_map(struct irq_domain *domain,
+ unsigned int irq, irq_hw_number_t hwirq)
{
irq_set_chip_and_handler(irq, &xilinx_leg_irq_chip, handle_level_irq);
irq_set_chip_data(irq, domain->host_data);
@@ -241,55 +242,54 @@ static const struct irq_domain_ops intx_domain_ops = {
.map = xilinx_pl_dma_pcie_intx_map,
};
-static void xilinx_pl_dma_pcie_handle_msi_irq(struct pl_dma_pcie *port,
- u32 status_reg)
+static irqreturn_t xilinx_pl_dma_pcie_msi_handler_high(int irq, void *args)
{
struct xilinx_msi *msi;
unsigned long status;
u32 bit, virq;
+ struct pl_dma_pcie *port = args;
+
+ msi = &port->msi;
+
+ while ((status = pcie_read(port, XILINX_PCIE_DMA_REG_MSI_HI)) != 0) {
+ for_each_set_bit(bit, &status, 32) {
+ pcie_write(port, 1 << bit, XILINX_PCIE_DMA_REG_MSI_HI);
+ bit = bit + 32;
+ virq = irq_find_mapping(msi->dev_domain, bit);
+ if (virq)
+ generic_handle_irq(virq);
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t xilinx_pl_dma_pcie_msi_handler_low(int irq, void *args)
+{
+ struct pl_dma_pcie *port = args;
+ struct xilinx_msi *msi;
+ unsigned long status;
+ u32 bit, virq;
msi = &port->msi;
- while ((status = pcie_read(port, status_reg)) != 0) {
+ while ((status = pcie_read(port, XILINX_PCIE_DMA_REG_MSI_LOW)) != 0) {
for_each_set_bit(bit, &status, 32) {
- pcie_write(port, 1 << bit, status_reg);
- if (status_reg == XILINX_PCIE_DMA_REG_MSI_HI)
- bit = bit + 32;
+ pcie_write(port, 1 << bit, XILINX_PCIE_DMA_REG_MSI_LOW);
virq = irq_find_mapping(msi->dev_domain, bit);
if (virq)
generic_handle_irq(virq);
}
}
+
+ return IRQ_HANDLED;
}
-static void xilinx_pl_dma_pcie_msi_handler_high(struct irq_desc *desc)
+static irqreturn_t xilinx_pl_dma_pcie_event_flow(int irq, void *args)
{
- struct pl_dma_pcie *port = irq_desc_get_handler_data(desc);
- struct irq_chip *chip = irq_desc_get_chip(desc);
-
- chained_irq_enter(chip, desc);
- xilinx_pl_dma_pcie_handle_msi_irq(port, XILINX_PCIE_DMA_REG_MSI_HI);
- chained_irq_exit(chip, desc);
-}
-
-static void xilinx_pl_dma_pcie_msi_handler_low(struct irq_desc *desc)
-{
- struct pl_dma_pcie *port = irq_desc_get_handler_data(desc);
- struct irq_chip *chip = irq_desc_get_chip(desc);
-
- chained_irq_enter(chip, desc);
- xilinx_pl_dma_pcie_handle_msi_irq(port, XILINX_PCIE_DMA_REG_MSI_LOW);
- chained_irq_exit(chip, desc);
-}
-
-static void xilinx_pl_dma_pcie_event_flow(struct irq_desc *desc)
-{
- struct pl_dma_pcie *port = irq_desc_get_handler_data(desc);
- struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct pl_dma_pcie *port = args;
unsigned long val;
int i;
- chained_irq_enter(chip, desc);
val = pcie_read(port, XILINX_PCIE_DMA_REG_IDR);
val &= pcie_read(port, XILINX_PCIE_DMA_REG_IMR);
for_each_set_bit(i, &val, 32)
@@ -297,7 +297,7 @@ static void xilinx_pl_dma_pcie_event_flow(struct irq_desc *desc)
pcie_write(port, val, XILINX_PCIE_DMA_REG_IDR);
- chained_irq_exit(chip, desc);
+ return IRQ_HANDLED;
}
#define _IC(x, s) \
@@ -313,8 +313,6 @@ static const struct {
_IC(CORRECTABLE, "Correctable error message"),
_IC(NONFATAL, "Non fatal error message"),
_IC(FATAL, "Fatal error message"),
- _IC(INTX, "INTX error message"),
- _IC(MSI, "MSI message received"),
_IC(SLV_UNSUPP, "Slave unsupported request"),
_IC(SLV_UNEXP, "Slave unexpected completion"),
_IC(SLV_COMPL, "Slave completion timeout"),
@@ -350,7 +348,7 @@ static irqreturn_t xilinx_pl_dma_pcie_intr_handler(int irq, void *dev_id)
}
static struct irq_chip xilinx_msi_irq_chip = {
- .name = "pl_dma_pciepcie:msi",
+ .name = "pl_dma:PCIe MSI",
.irq_enable = pci_msi_unmask_irq,
.irq_disable = pci_msi_mask_irq,
.irq_mask = pci_msi_mask_irq,
@@ -380,7 +378,7 @@ static int xilinx_msi_set_affinity(struct irq_data *irq_data,
}
static struct irq_chip xilinx_irq_chip = {
- .name = "Xilinx MSI",
+ .name = "pl_dma:MSI",
.irq_compose_msi_msg = xilinx_compose_msi_msg,
.irq_set_affinity = xilinx_msi_set_affinity,
};
@@ -427,12 +425,6 @@ static const struct irq_domain_ops dev_msi_domain_ops = {
.free = xilinx_irq_domain_free,
};
-static void xilinx_pl_dma_pcie_free_interrupts(struct pl_dma_pcie *port)
-{
- irq_set_chained_handler_and_data(port->msi.irq_msi0, NULL, NULL);
- irq_set_chained_handler_and_data(port->msi.irq_msi1, NULL, NULL);
-}
-
static void xilinx_pl_dma_pcie_free_irq_domains(struct pl_dma_pcie *port)
{
struct xilinx_msi *msi = &port->msi;
@@ -487,22 +479,23 @@ static int xilinx_pl_dma_pcie_init_msi_irq_domain(struct pl_dma_pcie *port)
return -ENOMEM;
}
-static void xilinx_pl_dma_pcie_intx_flow(struct irq_desc *desc)
+/* INTx error interrupts are Xilinx controller specific interrupt, used to
+ * notify user about error's such as cfg timeout, slave unsupported requests,
+ * fatal and non fatal error etc.
+ */
+
+static irqreturn_t xilinx_pl_dma_pcie_intx_flow(int irq, void *args)
{
- struct pl_dma_pcie *port = irq_desc_get_handler_data(desc);
- struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned long val;
int i;
-
- chained_irq_enter(chip, desc);
+ struct pl_dma_pcie *port = args;
val = FIELD_GET(XILINX_PCIE_DMA_IDRN_MASK,
pcie_read(port, XILINX_PCIE_DMA_REG_IDRN));
for_each_set_bit(i, &val, PCI_NUM_INTX)
generic_handle_domain_irq(port->intx_domain, i);
-
- chained_irq_exit(chip, desc);
+ return IRQ_HANDLED;
}
static void xilinx_pl_dma_pcie_mask_event_irq(struct irq_data *d)
@@ -530,7 +523,7 @@ static void xilinx_pl_dma_pcie_unmask_event_irq(struct irq_data *d)
}
static struct irq_chip xilinx_pl_dma_pcie_event_irq_chip = {
- .name = "RC-Event",
+ .name = "pl_dma:RC-Event",
.irq_mask = xilinx_pl_dma_pcie_mask_event_irq,
.irq_unmask = xilinx_pl_dma_pcie_unmask_event_irq,
};
@@ -602,7 +595,7 @@ static int xilinx_pl_dma_pcie_setup_irq(struct pl_dma_pcie *port)
{
struct device *dev = port->dev;
struct platform_device *pdev = to_platform_device(dev);
- int i, irq;
+ int i, irq, err;
port->irq = platform_get_irq(pdev, 0);
if (port->irq < 0)
@@ -621,7 +614,7 @@ static int xilinx_pl_dma_pcie_setup_irq(struct pl_dma_pcie *port)
}
err = devm_request_irq(dev, irq, xilinx_pl_dma_pcie_intr_handler,
- 0, intr_cause[i].sym, port);
+ IRQF_SHARED | IRQF_NO_THREAD, intr_cause[i].sym, port);
if (err) {
dev_err(dev, "Failed to request IRQ %d\n", irq);
return err;
@@ -635,14 +628,18 @@ static int xilinx_pl_dma_pcie_setup_irq(struct pl_dma_pcie *port)
return -ENXIO;
}
- /* Plug the INTx chained handler */
- irq_set_chained_handler_and_data(port->intx_irq,
- xilinx_pl_dma_pcie_intx_flow, port);
-
- /* Plug the main event chained handler */
- irq_set_chained_handler_and_data(port->irq,
- xilinx_pl_dma_pcie_event_flow, port);
-
+ err = devm_request_irq(dev, port->intx_irq, xilinx_pl_dma_pcie_intx_flow,
+ IRQF_SHARED | IRQF_NO_THREAD, NULL, port);
+ if (err) {
+ dev_err(dev, "Failed to request INTx IRQ %d\n", irq);
+ return err;
+ }
+ err = devm_request_irq(dev, port->irq, xilinx_pl_dma_pcie_event_flow,
+ IRQF_SHARED | IRQF_NO_THREAD, NULL, port);
+ if (err) {
+ dev_err(dev, "Failed to request event IRQ %d\n", irq);
+ return err;
+ }
return 0;
}
@@ -666,7 +663,7 @@ static void xilinx_pl_dma_pcie_init_port(struct pl_dma_pcie *port)
pcie_write(port, XILINX_PCIE_DMA_IDR_ALL_MASK, XILINX_PCIE_DMA_REG_MSI_LOW_MASK);
pcie_write(port, XILINX_PCIE_DMA_IDR_ALL_MASK, XILINX_PCIE_DMA_REG_MSI_HI_MASK);
- /*set the Bridge enable bit */
+ /* Set the Bridge enable bit */
pcie_write(port, pcie_read(port, XILINX_PCIE_DMA_REG_RPSC) |
XILINX_PCIE_DMA_REG_RPSC_BEN,
XILINX_PCIE_DMA_REG_RPSC);
@@ -676,29 +673,32 @@ static int xilinx_request_msi_irq(struct pl_dma_pcie *port)
{
struct device *dev = port->dev;
struct platform_device *pdev = to_platform_device(dev);
+ int ret;
port->msi.irq_msi0 = platform_get_irq_byname(pdev, "msi0");
if (port->msi.irq_msi0 <= 0) {
dev_err(dev, "Unable to find msi0 IRQ line\n");
return port->msi.irq_msi0;
}
-
- irq_set_chained_handler_and_data(port->msi.irq_msi0,
- xilinx_pl_dma_pcie_msi_handler_low,
- port);
-
+ ret = devm_request_irq(dev, port->msi.irq_msi0, xilinx_pl_dma_pcie_msi_handler_low,
+ IRQF_SHARED | IRQF_NO_THREAD, "xlnx-pcie-dma-pl",
+ port);
+ if (ret) {
+ dev_err(dev, "Failed to register interrupt\n");
+ return ret;
+ }
port->msi.irq_msi1 = platform_get_irq_byname(pdev, "msi1");
if (port->msi.irq_msi1 <= 0) {
- irq_set_chained_handler_and_data(port->msi.irq_msi0,
- NULL, NULL);
dev_err(dev, "Unable to find msi1 IRQ line\n");
return port->msi.irq_msi1;
}
-
- irq_set_chained_handler_and_data(port->msi.irq_msi1,
- xilinx_pl_dma_pcie_msi_handler_high,
- port);
-
+ ret = devm_request_irq(dev, port->msi.irq_msi1, xilinx_pl_dma_pcie_msi_handler_high,
+ IRQF_SHARED | IRQF_NO_THREAD, "xlnx-pcie-dma-pl",
+ port);
+ if (ret) {
+ dev_err(dev, "Failed to register interrupt\n");
+ return ret;
+ }
return 0;
}
@@ -712,7 +712,7 @@ static int xilinx_pl_dma_pcie_parse_dt(struct pl_dma_pcie *port,
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
- dev_err(dev, "missing \"reg\" property\n");
+ dev_err(dev, "Missing \"reg\" property\n");
return -ENXIO;
}
port->phys_reg_base = res->start;
@@ -767,7 +767,7 @@ static int xilinx_pl_dma_pcie_probe(struct platform_device *pdev)
err = xilinx_pl_dma_pcie_setup_irq(port);
bridge->sysdata = port;
- bridge->ops = (struct pci_ops *)&xilinx_pl_dma_pcie_ops.pci_ops;
+ bridge->ops = &xilinx_pl_dma_pcie_ops.pci_ops;
err = pci_host_probe(bridge);
if (err < 0)
@@ -780,7 +780,6 @@ static int xilinx_pl_dma_pcie_probe(struct platform_device *pdev)
err_irq_domain:
pci_ecam_free(port->cfg);
- xilinx_pl_dma_pcie_free_interrupts(port);
return err;
}
^ permalink raw reply related [flat|nested] 10+ messages in thread
end of thread, other threads:[~2023-11-08 15:31 UTC | newest]
Thread overview: 10+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2023-08-18 9:35 [PATCH v6 0/3] Add support for Xilinx XDMA Soft IP as Root Port Thippeswamy Havalige
2023-08-18 9:35 ` [PATCH v6 1/3] PCI: xilinx-cpm: Move interrupt bit definitions to common header Thippeswamy Havalige
2023-08-18 9:35 ` [PATCH v6 2/3] dt-bindings: PCI: xilinx-xdma: Add YAML schemas for Xilinx XDMA PCIe Root Port Bridge Thippeswamy Havalige
2023-08-18 9:35 ` [PATCH v6 3/3] PCI: xilinx-xdma: Add Xilinx XDMA Root Port driver Thippeswamy Havalige
2023-08-21 20:14 ` Bjorn Helgaas
2023-08-28 9:09 ` Havalige, Thippeswamy
2023-08-28 9:44 ` Havalige, Thippeswamy
2023-08-28 12:01 ` Havalige, Thippeswamy
2023-08-28 21:11 ` Bjorn Helgaas
2023-11-08 15:31 ` Bjorn Helgaas
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).