* [PATCH 01/52] mips: octeon: Add misc cvmx-* header files
2022-03-30 10:06 [PATCH 00/52] mips: octeon: Add ethernet support Stefan Roese
@ 2022-03-30 10:06 ` Stefan Roese
2022-03-30 10:06 ` [PATCH 02/52] mips: octeon: Add cvmx-ilk-defs.h header file Stefan Roese
` (48 subsequent siblings)
49 siblings, 0 replies; 52+ messages in thread
From: Stefan Roese @ 2022-03-30 10:06 UTC (permalink / raw)
To: u-boot; +Cc: awilliams, cchavva
From: Aaron Williams <awilliams@marvell.com>
Import misc cvmx-helper header files from 2013 U-Boot. They will be used
by the later added drivers to support networking on the MIPS Octeon II /
III platforms.
Signed-off-by: Aaron Williams <awilliams@marvell.com>
Signed-off-by: Stefan Roese <sr@denx.de>
---
arch/mips/mach-octeon/include/mach/cvmx-agl.h | 45 +
.../mach-octeon/include/mach/cvmx-config.h | 128 ++
arch/mips/mach-octeon/include/mach/cvmx-fau.h | 581 +++++++++
.../mips/mach-octeon/include/mach/cvmx-mdio.h | 516 ++++++++
.../include/mach/cvmx-pki-cluster.h | 343 ++++++
arch/mips/mach-octeon/include/mach/cvmx-pko.h | 213 ++++
.../include/mach/cvmx-pko3-resources.h | 36 +
.../mips/mach-octeon/include/mach/cvmx-pko3.h | 1052 +++++++++++++++++
.../mach-octeon/include/mach/cvmx-range.h | 23 +
9 files changed, 2937 insertions(+)
create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-agl.h
create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-config.h
create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-fau.h
create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-mdio.h
create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-pki-cluster.h
create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-pko.h
create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-pko3-resources.h
create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-pko3.h
create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-range.h
diff --git a/arch/mips/mach-octeon/include/mach/cvmx-agl.h b/arch/mips/mach-octeon/include/mach/cvmx-agl.h
new file mode 100644
index 000000000000..4afb3a48bfdc
--- /dev/null
+++ b/arch/mips/mach-octeon/include/mach/cvmx-agl.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ * Functions for AGL (RGMII) commong initialization, configuration.
+ */
+
+#ifndef __CVMX_AGL_H__
+#define __CVMX_AGL_H__
+
+/*
+ * @param port to enable
+ *
+ * @return Zero on success, negative on failure
+ */
+int cvmx_agl_enable(int port);
+
+cvmx_helper_link_info_t cvmx_agl_link_get(int port);
+
+/*
+ * Set MII/RGMII link based on mode.
+ *
+ * @param port interface port to set the link.
+ * @param link_info Link status
+ *
+ * @return 0 on success and 1 on failure
+ */
+int cvmx_agl_link_set(int port, cvmx_helper_link_info_t link_info);
+
+/**
+ * Disables the sending of flow control (pause) frames on the specified
+ * AGL (RGMII) port(s).
+ *
+ * @param interface Which interface (0 or 1)
+ * @param port_mask Mask (4bits) of which ports on the interface to disable
+ * backpressure on.
+ * 1 => disable backpressure
+ * 0 => enable backpressure
+ *
+ * @return 0 on success
+ * -1 on error
+ */
+int cvmx_agl_set_backpressure_override(u32 interface, uint32_t port_mask);
+
+#endif /* __CVMX_AGL_H__ */
diff --git a/arch/mips/mach-octeon/include/mach/cvmx-config.h b/arch/mips/mach-octeon/include/mach/cvmx-config.h
new file mode 100644
index 000000000000..4f66a3cce524
--- /dev/null
+++ b/arch/mips/mach-octeon/include/mach/cvmx-config.h
@@ -0,0 +1,128 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ */
+
+#ifndef __CVMX_CONFIG_H__
+#define __CVMX_CONFIG_H__
+
+/************************* Config Specific Defines ************************/
+#define CVMX_LLM_NUM_PORTS 1
+
+/**< PKO queues per port for interface 0 (ports 0-15) */
+#define CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 1
+
+/**< PKO queues per port for interface 1 (ports 16-31) */
+#define CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 1
+
+/**< PKO queues per port for interface 4 (AGL) */
+#define CVMX_PKO_QUEUES_PER_PORT_INTERFACE4 1
+
+/**< Limit on the number of PKO ports enabled for interface 0 */
+#define CVMX_PKO_MAX_PORTS_INTERFACE0 CVMX_HELPER_PKO_MAX_PORTS_INTERFACE0
+
+/**< Limit on the number of PKO ports enabled for interface 1 */
+#define CVMX_PKO_MAX_PORTS_INTERFACE1 CVMX_HELPER_PKO_MAX_PORTS_INTERFACE1
+
+/**< PKO queues per port for PCI (ports 32-35) */
+#define CVMX_PKO_QUEUES_PER_PORT_PCI 1
+
+/**< PKO queues per port for Loop devices (ports 36-39) */
+#define CVMX_PKO_QUEUES_PER_PORT_LOOP 1
+
+/**< PKO queues per port for SRIO0 devices (ports 40-41) */
+#define CVMX_PKO_QUEUES_PER_PORT_SRIO0 1
+
+/**< PKO queues per port for SRIO1 devices (ports 42-43) */
+#define CVMX_PKO_QUEUES_PER_PORT_SRIO1 1
+
+/************************* FPA allocation *********************************/
+/* Pool sizes in bytes, must be multiple of a cache line */
+#define CVMX_FPA_POOL_0_SIZE (16 * CVMX_CACHE_LINE_SIZE)
+#define CVMX_FPA_POOL_1_SIZE (1 * CVMX_CACHE_LINE_SIZE)
+#define CVMX_FPA_POOL_2_SIZE (8 * CVMX_CACHE_LINE_SIZE)
+#define CVMX_FPA_POOL_3_SIZE (2 * CVMX_CACHE_LINE_SIZE)
+#define CVMX_FPA_POOL_4_SIZE (0 * CVMX_CACHE_LINE_SIZE)
+#define CVMX_FPA_POOL_5_SIZE (0 * CVMX_CACHE_LINE_SIZE)
+#define CVMX_FPA_POOL_6_SIZE (8 * CVMX_CACHE_LINE_SIZE)
+#define CVMX_FPA_POOL_7_SIZE (0 * CVMX_CACHE_LINE_SIZE)
+
+/* Pools in use */
+/**< Packet buffers */
+#define CVMX_FPA_PACKET_POOL (0)
+#ifndef CVMX_FPA_PACKET_POOL_SIZE
+#define CVMX_FPA_PACKET_POOL_SIZE CVMX_FPA_POOL_0_SIZE
+#endif
+
+/**< Work queue entries */
+#define CVMX_FPA_WQE_POOL (1)
+#define CVMX_FPA_WQE_POOL_SIZE CVMX_FPA_POOL_1_SIZE
+
+/**< PKO queue command buffers */
+#define CVMX_FPA_OUTPUT_BUFFER_POOL (2)
+#define CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE CVMX_FPA_POOL_2_SIZE
+
+/**< BCH queue command buffers */
+#define CVMX_FPA_BCH_POOL (6)
+#define CVMX_FPA_BCH_POOL_SIZE CVMX_FPA_POOL6_SIZE
+
+/************************* FAU allocation ********************************/
+/* The fetch and add registers are allocated here. They are arranged
+ * in order of descending size so that all alignment constraints are
+ * automatically met.
+ * The enums are linked so that the following enum continues allocating
+ * where the previous one left off, so the numbering within each
+ * enum always starts with zero. The macros take care of the address
+ * increment size, so the values entered always increase by 1.
+ * FAU registers are accessed with byte addresses.
+ */
+
+#define CVMX_FAU_REG_64_ADDR(x) (((x) << 3) + CVMX_FAU_REG_64_START)
+typedef enum {
+ CVMX_FAU_REG_64_START = 0,
+ /**< FAU registers for the position in PKO command buffers */
+ CVMX_FAU_REG_OQ_ADDR_INDEX = CVMX_FAU_REG_64_ADDR(0),
+ /* Array of 36 */
+ CVMX_FAU_REG_64_END = CVMX_FAU_REG_64_ADDR(36),
+} cvmx_fau_reg_64_t;
+
+#define CVMX_FAU_REG_32_ADDR(x) (((x) << 2) + CVMX_FAU_REG_32_START)
+typedef enum {
+ CVMX_FAU_REG_32_START = CVMX_FAU_REG_64_END,
+ CVMX_FAU_REG_32_END = CVMX_FAU_REG_32_ADDR(0),
+} cvmx_fau_reg_32_t;
+
+#define CVMX_FAU_REG_16_ADDR(x) (((x) << 1) + CVMX_FAU_REG_16_START)
+typedef enum {
+ CVMX_FAU_REG_16_START = CVMX_FAU_REG_32_END,
+ CVMX_FAU_REG_16_END = CVMX_FAU_REG_16_ADDR(0),
+} cvmx_fau_reg_16_t;
+
+#define CVMX_FAU_REG_8_ADDR(x) ((x) + CVMX_FAU_REG_8_START)
+typedef enum {
+ CVMX_FAU_REG_8_START = CVMX_FAU_REG_16_END,
+ CVMX_FAU_REG_8_END = CVMX_FAU_REG_8_ADDR(0),
+} cvmx_fau_reg_8_t;
+
+/* The name CVMX_FAU_REG_AVAIL_BASE is provided to indicate the first available
+ * FAU address that is not allocated in cvmx-config.h. This is 64 bit aligned.
+ */
+#define CVMX_FAU_REG_AVAIL_BASE ((CVMX_FAU_REG_8_END + 0x7) & (~0x7ULL))
+#define CVMX_FAU_REG_END (2048)
+
+/********************** scratch memory allocation *************************/
+/* Scratchpad memory allocation. Note that these are byte memory addresses.
+ * Some uses of scratchpad (IOBDMA for example) require the use of 8-byte
+ * aligned addresses, so proper alignment needs to be taken into account.
+ */
+
+/**< Pre allocation for PKO queue command buffers */
+#define CVMX_SCR_OQ_BUF_PRE_ALLOC (0)
+
+/**< Generic scratch iobdma area */
+#define CVMX_SCR_SCRATCH (8)
+
+/**< First location available after cvmx-config.h allocated region. */
+#define CVMX_SCR_REG_AVAIL_BASE (16)
+
+#endif /* __CVMX_CONFIG_H__ */
diff --git a/arch/mips/mach-octeon/include/mach/cvmx-fau.h b/arch/mips/mach-octeon/include/mach/cvmx-fau.h
new file mode 100644
index 000000000000..d795ff6e9b06
--- /dev/null
+++ b/arch/mips/mach-octeon/include/mach/cvmx-fau.h
@@ -0,0 +1,581 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ * Interface to the hardware Fetch and Add Unit.
+ */
+
+#ifndef __CVMX_FAU_H__
+#define __CVMX_FAU_H__
+
+extern u8 *cvmx_fau_regs_ptr;
+
+/**
+ * Initializes fau, on devices with FAU hw this is a noop.
+ */
+int cvmx_fau_init(void);
+
+/**
+ * Return the location of emulated FAU register
+ */
+static inline u8 *__cvmx_fau_sw_addr(int reg)
+{
+ if (cvmx_unlikely(!cvmx_fau_regs_ptr))
+ cvmx_fau_init();
+ return (cvmx_fau_regs_ptr + reg);
+}
+
+/**
+ * Perform an atomic 64 bit add
+ *
+ * @param reg FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 8 for 64 bit access.
+ * @param value Signed value to add.
+ * Note: Only the low 22 bits are available.
+ * @return Value of the register before the update
+ */
+static inline int64_t cvmx_fau_fetch_and_add64(cvmx_fau_reg64_t reg,
+ int64_t value)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_FAU))
+ return cvmx_hwfau_fetch_and_add64(reg, value);
+
+ return __atomic_fetch_add(CASTPTR(int64_t, __cvmx_fau_sw_addr(reg)),
+ value, __ATOMIC_SEQ_CST);
+}
+
+/**
+ * Perform an atomic 32 bit add
+ *
+ * @param reg FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 4 for 32 bit access.
+ * @param value Signed value to add.
+ * Note: Only the low 22 bits are available.
+ * @return Value of the register before the update
+ */
+static inline int32_t cvmx_fau_fetch_and_add32(cvmx_fau_reg32_t reg,
+ int32_t value)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_FAU))
+ return cvmx_hwfau_fetch_and_add32(reg, value);
+
+ reg ^= SWIZZLE_32;
+ return __atomic_fetch_add(CASTPTR(int32_t, __cvmx_fau_sw_addr(reg)),
+ value, __ATOMIC_SEQ_CST);
+}
+
+/**
+ * Perform an atomic 16 bit add
+ *
+ * @param reg FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 2 for 16 bit access.
+ * @param value Signed value to add.
+ * @return Value of the register before the update
+ */
+static inline int16_t cvmx_fau_fetch_and_add16(cvmx_fau_reg16_t reg,
+ int16_t value)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_FAU))
+ return cvmx_hwfau_fetch_and_add16(reg, value);
+
+ reg ^= SWIZZLE_16;
+ return __atomic_fetch_add(CASTPTR(int16_t, __cvmx_fau_sw_addr(reg)),
+ value, __ATOMIC_SEQ_CST);
+}
+
+/**
+ * Perform an atomic 8 bit add
+ *
+ * @param reg FAU atomic register to access. 0 <= reg < 2048.
+ * @param value Signed value to add.
+ * @return Value of the register before the update
+ */
+static inline int8_t cvmx_fau_fetch_and_add8(cvmx_fau_reg8_t reg, int8_t value)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_FAU))
+ return cvmx_hwfau_fetch_and_add8(reg, value);
+
+ reg ^= SWIZZLE_8;
+ return __atomic_fetch_add(CASTPTR(int8_t, __cvmx_fau_sw_addr(reg)),
+ value, __ATOMIC_SEQ_CST);
+}
+
+/**
+ * Perform an atomic 64 bit add after the current tag switch
+ * completes
+ *
+ * @param reg FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 8 for 64 bit access.
+ * @param value Signed value to add.
+ * Note: Only the low 22 bits are available.
+ * @return If a timeout occurs, the error bit will be set. Otherwise
+ * the value of the register before the update will be
+ * returned
+ */
+static inline cvmx_fau_tagwait64_t
+cvmx_fau_tagwait_fetch_and_add64(cvmx_fau_reg64_t reg, int64_t value)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_FAU))
+ return cvmx_hwfau_tagwait_fetch_and_add64(reg, value);
+
+ /* not implemented yet.*/
+ return (cvmx_fau_tagwait64_t){ 1, 0 };
+}
+
+/**
+ * Perform an atomic 32 bit add after the current tag switch
+ * completes
+ *
+ * @param reg FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 4 for 32 bit access.
+ * @param value Signed value to add.
+ * Note: Only the low 22 bits are available.
+ * @return If a timeout occurs, the error bit will be set. Otherwise
+ * the value of the register before the update will be
+ * returned
+ */
+static inline cvmx_fau_tagwait32_t
+cvmx_fau_tagwait_fetch_and_add32(cvmx_fau_reg32_t reg, int32_t value)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_FAU))
+ return cvmx_hwfau_tagwait_fetch_and_add32(reg, value);
+
+ /* not implemented yet.*/
+ return (cvmx_fau_tagwait32_t){ 1, 0 };
+}
+
+/**
+ * Perform an atomic 16 bit add after the current tag switch
+ * completes
+ *
+ * @param reg FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 2 for 16 bit access.
+ * @param value Signed value to add.
+ * @return If a timeout occurs, the error bit will be set. Otherwise
+ * the value of the register before the update will be
+ * returned
+ */
+static inline cvmx_fau_tagwait16_t
+cvmx_fau_tagwait_fetch_and_add16(cvmx_fau_reg16_t reg, int16_t value)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_FAU))
+ return cvmx_hwfau_tagwait_fetch_and_add16(reg, value);
+
+ /* not implemented yet.*/
+ return (cvmx_fau_tagwait16_t){ 1, 0 };
+}
+
+/**
+ * Perform an atomic 8 bit add after the current tag switch
+ * completes
+ *
+ * @param reg FAU atomic register to access. 0 <= reg < 2048.
+ * @param value Signed value to add.
+ * @return If a timeout occurs, the error bit will be set. Otherwise
+ * the value of the register before the update will be
+ * returned
+ */
+static inline cvmx_fau_tagwait8_t
+cvmx_fau_tagwait_fetch_and_add8(cvmx_fau_reg8_t reg, int8_t value)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_FAU))
+ return cvmx_hwfau_tagwait_fetch_and_add8(reg, value);
+
+ /* not implemented yet.*/
+ return (cvmx_fau_tagwait8_t){ 1, 0 };
+}
+
+/**
+ * Perform an async atomic 64 bit add. The old value is
+ * placed in the scratch memory at byte address scraddr.
+ *
+ * @param scraddr Scratch memory byte address to put response in.
+ * Must be 8 byte aligned.
+ * @param reg FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 8 for 64 bit access.
+ * @param value Signed value to add.
+ * Note: Only the low 22 bits are available.
+ * @return Placed in the scratch pad register
+ */
+static inline void
+cvmx_fau_async_fetch_and_add64(u64 scraddr, cvmx_fau_reg64_t reg, int64_t value)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
+ cvmx_hwfau_async_fetch_and_add64(scraddr, reg, value);
+ return;
+ }
+ cvmx_scratch_write64(
+ scraddr,
+ __atomic_fetch_add(CASTPTR(int64_t, __cvmx_fau_sw_addr(reg)),
+ value, __ATOMIC_SEQ_CST));
+}
+
+/**
+ * Perform an async atomic 32 bit add. The old value is
+ * placed in the scratch memory at byte address scraddr.
+ *
+ * @param scraddr Scratch memory byte address to put response in.
+ * Must be 8 byte aligned.
+ * @param reg FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 4 for 32 bit access.
+ * @param value Signed value to add.
+ * Note: Only the low 22 bits are available.
+ * @return Placed in the scratch pad register
+ */
+static inline void
+cvmx_fau_async_fetch_and_add32(u64 scraddr, cvmx_fau_reg32_t reg, int32_t value)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
+ cvmx_hwfau_async_fetch_and_add32(scraddr, reg, value);
+ return;
+ }
+ cvmx_scratch_write64(
+ scraddr,
+ __atomic_fetch_add(CASTPTR(int32_t, __cvmx_fau_sw_addr(reg)),
+ value, __ATOMIC_SEQ_CST));
+}
+
+/**
+ * Perform an async atomic 16 bit add. The old value is
+ * placed in the scratch memory at byte address scraddr.
+ *
+ * @param scraddr Scratch memory byte address to put response in.
+ * Must be 8 byte aligned.
+ * @param reg FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 2 for 16 bit access.
+ * @param value Signed value to add.
+ * @return Placed in the scratch pad register
+ */
+static inline void
+cvmx_fau_async_fetch_and_add16(u64 scraddr, cvmx_fau_reg16_t reg, int16_t value)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
+ cvmx_hwfau_async_fetch_and_add16(scraddr, reg, value);
+ return;
+ }
+ cvmx_scratch_write64(
+ scraddr,
+ __atomic_fetch_add(CASTPTR(int16_t, __cvmx_fau_sw_addr(reg)),
+ value, __ATOMIC_SEQ_CST));
+}
+
+/**
+ * Perform an async atomic 8 bit add. The old value is
+ * placed in the scratch memory at byte address scraddr.
+ *
+ * @param scraddr Scratch memory byte address to put response in.
+ * Must be 8 byte aligned.
+ * @param reg FAU atomic register to access. 0 <= reg < 2048.
+ * @param value Signed value to add.
+ * @return Placed in the scratch pad register
+ */
+static inline void
+cvmx_fau_async_fetch_and_add8(u64 scraddr, cvmx_fau_reg8_t reg, int8_t value)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
+ cvmx_hwfau_async_fetch_and_add8(scraddr, reg, value);
+ return;
+ }
+ cvmx_scratch_write64(
+ scraddr,
+ __atomic_fetch_add(CASTPTR(int8_t, __cvmx_fau_sw_addr(reg)),
+ value, __ATOMIC_SEQ_CST));
+}
+
+/**
+ * Perform an async atomic 64 bit add after the current tag
+ * switch completes.
+ *
+ * @param scraddr Scratch memory byte address to put response in.
+ * Must be 8 byte aligned.
+ * If a timeout occurs, the error bit (63) will be set. Otherwise
+ * the value of the register before the update will be
+ * returned
+ * @param reg FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 8 for 64 bit access.
+ * @param value Signed value to add.
+ * Note: Only the low 22 bits are available.
+ * @return Placed in the scratch pad register
+ */
+static inline void cvmx_fau_async_tagwait_fetch_and_add64(u64 scraddr,
+ cvmx_fau_reg64_t reg,
+ int64_t value)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
+ cvmx_hwfau_async_tagwait_fetch_and_add64(scraddr, reg, value);
+ return;
+ }
+
+ /* Broken. Where is the tag wait? */
+ cvmx_scratch_write64(
+ scraddr,
+ __atomic_fetch_add(CASTPTR(int64_t, __cvmx_fau_sw_addr(reg)),
+ value, __ATOMIC_SEQ_CST));
+}
+
+/**
+ * Perform an async atomic 32 bit add after the current tag
+ * switch completes.
+ *
+ * @param scraddr Scratch memory byte address to put response in.
+ * Must be 8 byte aligned.
+ * If a timeout occurs, the error bit (63) will be set. Otherwise
+ * the value of the register before the update will be
+ * returned
+ * @param reg FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 4 for 32 bit access.
+ * @param value Signed value to add.
+ * Note: Only the low 22 bits are available.
+ * @return Placed in the scratch pad register
+ */
+static inline void cvmx_fau_async_tagwait_fetch_and_add32(u64 scraddr,
+ cvmx_fau_reg32_t reg,
+ int32_t value)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
+ cvmx_hwfau_async_tagwait_fetch_and_add32(scraddr, reg, value);
+ return;
+ }
+ /* Broken. Where is the tag wait? */
+ cvmx_scratch_write64(
+ scraddr,
+ __atomic_fetch_add(CASTPTR(int32_t, __cvmx_fau_sw_addr(reg)),
+ value, __ATOMIC_SEQ_CST));
+}
+
+/**
+ * Perform an async atomic 16 bit add after the current tag
+ * switch completes.
+ *
+ * @param scraddr Scratch memory byte address to put response in.
+ * Must be 8 byte aligned.
+ * If a timeout occurs, the error bit (63) will be set. Otherwise
+ * the value of the register before the update will be
+ * returned
+ * @param reg FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 2 for 16 bit access.
+ * @param value Signed value to add.
+ * @return Placed in the scratch pad register
+ */
+static inline void cvmx_fau_async_tagwait_fetch_and_add16(u64 scraddr,
+ cvmx_fau_reg16_t reg,
+ int16_t value)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
+ cvmx_hwfau_async_tagwait_fetch_and_add16(scraddr, reg, value);
+ return;
+ }
+ /* Broken. Where is the tag wait? */
+ cvmx_scratch_write64(
+ scraddr,
+ __atomic_fetch_add(CASTPTR(int16_t, __cvmx_fau_sw_addr(reg)),
+ value, __ATOMIC_SEQ_CST));
+}
+
+/**
+ * Perform an async atomic 8 bit add after the current tag
+ * switch completes.
+ *
+ * @param scraddr Scratch memory byte address to put response in.
+ * Must be 8 byte aligned.
+ * If a timeout occurs, the error bit (63) will be set. Otherwise
+ * the value of the register before the update will be
+ * returned
+ * @param reg FAU atomic register to access. 0 <= reg < 2048.
+ * @param value Signed value to add.
+ * @return Placed in the scratch pad register
+ */
+static inline void cvmx_fau_async_tagwait_fetch_and_add8(u64 scraddr,
+ cvmx_fau_reg8_t reg,
+ int8_t value)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
+ cvmx_hwfau_async_tagwait_fetch_and_add8(scraddr, reg, value);
+ return;
+ }
+ /* Broken. Where is the tag wait? */
+ cvmx_scratch_write64(
+ scraddr,
+ __atomic_fetch_add(CASTPTR(int8_t, __cvmx_fau_sw_addr(reg)),
+ value, __ATOMIC_SEQ_CST));
+}
+
+/**
+ * Perform an atomic 64 bit add
+ *
+ * @param reg FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 8 for 64 bit access.
+ * @param value Signed value to add.
+ */
+static inline void cvmx_fau_atomic_add64(cvmx_fau_reg64_t reg, int64_t value)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
+ cvmx_hwfau_atomic_add64(reg, value);
+ return;
+ }
+ /* Ignored fetch values should be optimized away */
+ __atomic_add_fetch(CASTPTR(int64_t, __cvmx_fau_sw_addr(reg)), value,
+ __ATOMIC_SEQ_CST);
+}
+
+/**
+ * Perform an atomic 32 bit add
+ *
+ * @param reg FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 4 for 32 bit access.
+ * @param value Signed value to add.
+ */
+static inline void cvmx_fau_atomic_add32(cvmx_fau_reg32_t reg, int32_t value)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
+ cvmx_hwfau_atomic_add32(reg, value);
+ return;
+ }
+ reg ^= SWIZZLE_32;
+ /* Ignored fetch values should be optimized away */
+ __atomic_add_fetch(CASTPTR(int32_t, __cvmx_fau_sw_addr(reg)), value,
+ __ATOMIC_SEQ_CST);
+}
+
+/**
+ * Perform an atomic 16 bit add
+ *
+ * @param reg FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 2 for 16 bit access.
+ * @param value Signed value to add.
+ */
+static inline void cvmx_fau_atomic_add16(cvmx_fau_reg16_t reg, int16_t value)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
+ cvmx_hwfau_atomic_add16(reg, value);
+ return;
+ }
+ reg ^= SWIZZLE_16;
+ /* Ignored fetch values should be optimized away */
+ __atomic_add_fetch(CASTPTR(int16_t, __cvmx_fau_sw_addr(reg)), value,
+ __ATOMIC_SEQ_CST);
+}
+
+/**
+ * Perform an atomic 8 bit add
+ *
+ * @param reg FAU atomic register to access. 0 <= reg < 2048.
+ * @param value Signed value to add.
+ */
+static inline void cvmx_fau_atomic_add8(cvmx_fau_reg8_t reg, int8_t value)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
+ cvmx_hwfau_atomic_add8(reg, value);
+ return;
+ }
+ reg ^= SWIZZLE_8;
+ /* Ignored fetch values should be optimized away */
+ __atomic_add_fetch(CASTPTR(int8_t, __cvmx_fau_sw_addr(reg)), value,
+ __ATOMIC_SEQ_CST);
+}
+
+/**
+ * Perform an atomic 64 bit write
+ *
+ * @param reg FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 8 for 64 bit access.
+ * @param value Signed value to write.
+ */
+static inline void cvmx_fau_atomic_write64(cvmx_fau_reg64_t reg, int64_t value)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
+ cvmx_hwfau_atomic_write64(reg, value);
+ return;
+ }
+ __atomic_store_n(CASTPTR(int64_t, __cvmx_fau_sw_addr(reg)), value,
+ __ATOMIC_SEQ_CST);
+}
+
+/**
+ * Perform an atomic 32 bit write
+ *
+ * @param reg FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 4 for 32 bit access.
+ * @param value Signed value to write.
+ */
+static inline void cvmx_fau_atomic_write32(cvmx_fau_reg32_t reg, int32_t value)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
+ cvmx_hwfau_atomic_write32(reg, value);
+ return;
+ }
+ reg ^= SWIZZLE_32;
+ __atomic_store_n(CASTPTR(int32_t, __cvmx_fau_sw_addr(reg)), value,
+ __ATOMIC_SEQ_CST);
+}
+
+/**
+ * Perform an atomic 16 bit write
+ *
+ * @param reg FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 2 for 16 bit access.
+ * @param value Signed value to write.
+ */
+static inline void cvmx_fau_atomic_write16(cvmx_fau_reg16_t reg, int16_t value)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
+ cvmx_hwfau_atomic_write16(reg, value);
+ return;
+ }
+ reg ^= SWIZZLE_16;
+ __atomic_store_n(CASTPTR(int16_t, __cvmx_fau_sw_addr(reg)), value,
+ __ATOMIC_SEQ_CST);
+}
+
+/**
+ * Perform an atomic 8 bit write
+ *
+ * @param reg FAU atomic register to access. 0 <= reg < 2048.
+ * @param value Signed value to write.
+ */
+static inline void cvmx_fau_atomic_write8(cvmx_fau_reg8_t reg, int8_t value)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
+ cvmx_hwfau_atomic_write8(reg, value);
+ return;
+ }
+ reg ^= SWIZZLE_8;
+ __atomic_store_n(CASTPTR(int8_t, __cvmx_fau_sw_addr(reg)), value,
+ __ATOMIC_SEQ_CST);
+}
+
+/** Allocates 64bit FAU register.
+ * @param reserve base address to reserve
+ * @return value is the base address of allocated FAU register
+ */
+int cvmx_fau64_alloc(int reserve);
+
+/** Allocates 32bit FAU register.
+ * @param reserve base address to reserve
+ * @return value is the base address of allocated FAU register
+ */
+int cvmx_fau32_alloc(int reserve);
+
+/** Allocates 16bit FAU register.
+ * @param reserve base address to reserve
+ * @return value is the base address of allocated FAU register
+ */
+int cvmx_fau16_alloc(int reserve);
+
+/** Allocates 8bit FAU register.
+ * @param reserve base address to reserve
+ * @return value is the base address of allocated FAU register
+ */
+int cvmx_fau8_alloc(int reserve);
+
+/** Frees the specified FAU register.
+ * @param address base address of register to release.
+ * @return 0 on success; -1 on failure
+ */
+int cvmx_fau_free(int address);
+
+/** Display the fau registers array
+ */
+void cvmx_fau_show(void);
+
+#endif /* __CVMX_FAU_H__ */
diff --git a/arch/mips/mach-octeon/include/mach/cvmx-mdio.h b/arch/mips/mach-octeon/include/mach/cvmx-mdio.h
new file mode 100644
index 000000000000..9bc138fa2770
--- /dev/null
+++ b/arch/mips/mach-octeon/include/mach/cvmx-mdio.h
@@ -0,0 +1,516 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ * Interface to the SMI/MDIO hardware, including support for both IEEE 802.3
+ * clause 22 and clause 45 operations.
+ */
+
+#ifndef __CVMX_MIO_H__
+#define __CVMX_MIO_H__
+
+/**
+ * PHY register 0 from the 802.3 spec
+ */
+#define CVMX_MDIO_PHY_REG_CONTROL 0
+
+typedef union {
+ u16 u16;
+ struct {
+ u16 reset : 1;
+ u16 loopback : 1;
+ u16 speed_lsb : 1;
+ u16 autoneg_enable : 1;
+ u16 power_down : 1;
+ u16 isolate : 1;
+ u16 restart_autoneg : 1;
+ u16 duplex : 1;
+ u16 collision_test : 1;
+ u16 speed_msb : 1;
+ u16 unidirectional_enable : 1;
+ u16 reserved_0_4 : 5;
+ } s;
+} cvmx_mdio_phy_reg_control_t;
+
+/**
+ * PHY register 1 from the 802.3 spec
+ */
+#define CVMX_MDIO_PHY_REG_STATUS 1
+typedef union {
+ u16 u16;
+ struct {
+ u16 capable_100base_t4 : 1;
+ u16 capable_100base_x_full : 1;
+ u16 capable_100base_x_half : 1;
+ u16 capable_10_full : 1;
+ u16 capable_10_half : 1;
+ u16 capable_100base_t2_full : 1;
+ u16 capable_100base_t2_half : 1;
+ u16 capable_extended_status : 1;
+ u16 capable_unidirectional : 1;
+ u16 capable_mf_preamble_suppression : 1;
+ u16 autoneg_complete : 1;
+ u16 remote_fault : 1;
+ u16 capable_autoneg : 1;
+ u16 link_status : 1;
+ u16 jabber_detect : 1;
+ u16 capable_extended_registers : 1;
+
+ } s;
+} cvmx_mdio_phy_reg_status_t;
+
+/**
+ * PHY register 2 from the 802.3 spec
+ */
+#define CVMX_MDIO_PHY_REG_ID1 2
+typedef union {
+ u16 u16;
+ struct {
+ u16 oui_bits_3_18;
+ } s;
+} cvmx_mdio_phy_reg_id1_t;
+
+/**
+ * PHY register 3 from the 802.3 spec
+ */
+#define CVMX_MDIO_PHY_REG_ID2 3
+typedef union {
+ u16 u16;
+ struct {
+ u16 oui_bits_19_24 : 6;
+ u16 model : 6;
+ u16 revision : 4;
+ } s;
+} cvmx_mdio_phy_reg_id2_t;
+
+/**
+ * PHY register 4 from the 802.3 spec
+ */
+#define CVMX_MDIO_PHY_REG_AUTONEG_ADVER 4
+typedef union {
+ u16 u16;
+ struct {
+ u16 next_page : 1;
+ u16 reserved_14 : 1;
+ u16 remote_fault : 1;
+ u16 reserved_12 : 1;
+ u16 asymmetric_pause : 1;
+ u16 pause : 1;
+ u16 advert_100base_t4 : 1;
+ u16 advert_100base_tx_full : 1;
+ u16 advert_100base_tx_half : 1;
+ u16 advert_10base_tx_full : 1;
+ u16 advert_10base_tx_half : 1;
+ u16 selector : 5;
+ } s;
+} cvmx_mdio_phy_reg_autoneg_adver_t;
+
+/**
+ * PHY register 5 from the 802.3 spec
+ */
+#define CVMX_MDIO_PHY_REG_LINK_PARTNER_ABILITY 5
+typedef union {
+ u16 u16;
+ struct {
+ u16 next_page : 1;
+ u16 ack : 1;
+ u16 remote_fault : 1;
+ u16 reserved_12 : 1;
+ u16 asymmetric_pause : 1;
+ u16 pause : 1;
+ u16 advert_100base_t4 : 1;
+ u16 advert_100base_tx_full : 1;
+ u16 advert_100base_tx_half : 1;
+ u16 advert_10base_tx_full : 1;
+ u16 advert_10base_tx_half : 1;
+ u16 selector : 5;
+ } s;
+} cvmx_mdio_phy_reg_link_partner_ability_t;
+
+/**
+ * PHY register 6 from the 802.3 spec
+ */
+#define CVMX_MDIO_PHY_REG_AUTONEG_EXPANSION 6
+typedef union {
+ u16 u16;
+ struct {
+ u16 reserved_5_15 : 11;
+ u16 parallel_detection_fault : 1;
+ u16 link_partner_next_page_capable : 1;
+ u16 local_next_page_capable : 1;
+ u16 page_received : 1;
+ u16 link_partner_autoneg_capable : 1;
+
+ } s;
+} cvmx_mdio_phy_reg_autoneg_expansion_t;
+
+/**
+ * PHY register 9 from the 802.3 spec
+ */
+#define CVMX_MDIO_PHY_REG_CONTROL_1000 9
+typedef union {
+ u16 u16;
+ struct {
+ u16 test_mode : 3;
+ u16 manual_master_slave : 1;
+ u16 master : 1;
+ u16 port_type : 1;
+ u16 advert_1000base_t_full : 1;
+ u16 advert_1000base_t_half : 1;
+ u16 reserved_0_7 : 8;
+ } s;
+} cvmx_mdio_phy_reg_control_1000_t;
+
+/**
+ * PHY register 10 from the 802.3 spec
+ */
+#define CVMX_MDIO_PHY_REG_STATUS_1000 10
+typedef union {
+ u16 u16;
+ struct {
+ u16 master_slave_fault : 1;
+ u16 is_master : 1;
+ u16 local_receiver_ok : 1;
+ u16 remote_receiver_ok : 1;
+ u16 remote_capable_1000base_t_full : 1;
+ u16 remote_capable_1000base_t_half : 1;
+ u16 reserved_8_9 : 2;
+ u16 idle_error_count : 8;
+ } s;
+} cvmx_mdio_phy_reg_status_1000_t;
+
+/**
+ * PHY register 15 from the 802.3 spec
+ */
+#define CVMX_MDIO_PHY_REG_EXTENDED_STATUS 15
+typedef union {
+ u16 u16;
+ struct {
+ u16 capable_1000base_x_full : 1;
+ u16 capable_1000base_x_half : 1;
+ u16 capable_1000base_t_full : 1;
+ u16 capable_1000base_t_half : 1;
+ u16 reserved_0_11 : 12;
+ } s;
+} cvmx_mdio_phy_reg_extended_status_t;
+
+/**
+ * PHY register 13 from the 802.3 spec
+ */
+#define CVMX_MDIO_PHY_REG_MMD_CONTROL 13
+typedef union {
+ u16 u16;
+ struct {
+ u16 function : 2;
+ u16 reserved_5_13 : 9;
+ u16 devad : 5;
+ } s;
+} cvmx_mdio_phy_reg_mmd_control_t;
+
+/**
+ * PHY register 14 from the 802.3 spec
+ */
+#define CVMX_MDIO_PHY_REG_MMD_ADDRESS_DATA 14
+typedef union {
+ u16 u16;
+ struct {
+ u16 address_data : 16;
+ } s;
+} cvmx_mdio_phy_reg_mmd_address_data_t;
+
+/* Operating request encodings. */
+#define MDIO_CLAUSE_22_WRITE 0
+#define MDIO_CLAUSE_22_READ 1
+
+#define MDIO_CLAUSE_45_ADDRESS 0
+#define MDIO_CLAUSE_45_WRITE 1
+#define MDIO_CLAUSE_45_READ_INC 2
+#define MDIO_CLAUSE_45_READ 3
+
+/* MMD identifiers, mostly for accessing devices within XENPAK modules. */
+#define CVMX_MMD_DEVICE_PMA_PMD 1
+#define CVMX_MMD_DEVICE_WIS 2
+#define CVMX_MMD_DEVICE_PCS 3
+#define CVMX_MMD_DEVICE_PHY_XS 4
+#define CVMX_MMD_DEVICE_DTS_XS 5
+#define CVMX_MMD_DEVICE_TC 6
+#define CVMX_MMD_DEVICE_CL22_EXT 29
+#define CVMX_MMD_DEVICE_VENDOR_1 30
+#define CVMX_MMD_DEVICE_VENDOR_2 31
+
+#define CVMX_MDIO_TIMEOUT 100000 /* 100 millisec */
+
+static inline int cvmx_mdio_bus_id_to_node(int bus_id)
+{
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX))
+ return (bus_id >> 2) & CVMX_NODE_MASK;
+ else
+ return 0;
+}
+
+static inline int cvmx_mdio_bus_id_to_bus(int bus_id)
+{
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX))
+ return bus_id & 3;
+ else
+ return bus_id;
+}
+
+/* Helper function to put MDIO interface into clause 45 mode */
+static inline void __cvmx_mdio_set_clause45_mode(int bus_id)
+{
+ cvmx_smix_clk_t smi_clk;
+ int node = cvmx_mdio_bus_id_to_node(bus_id);
+ int bus = cvmx_mdio_bus_id_to_bus(bus_id);
+
+ /* Put bus into clause 45 mode */
+ smi_clk.u64 = csr_rd_node(node, CVMX_SMIX_CLK(bus));
+ smi_clk.s.mode = 1;
+ smi_clk.s.preamble = 1;
+ csr_wr_node(node, CVMX_SMIX_CLK(bus), smi_clk.u64);
+}
+
+/* Helper function to put MDIO interface into clause 22 mode */
+static inline void __cvmx_mdio_set_clause22_mode(int bus_id)
+{
+ cvmx_smix_clk_t smi_clk;
+ int node = cvmx_mdio_bus_id_to_node(bus_id);
+ int bus = cvmx_mdio_bus_id_to_bus(bus_id);
+
+ /* Put bus into clause 22 mode */
+ smi_clk.u64 = csr_rd_node(node, CVMX_SMIX_CLK(bus));
+ smi_clk.s.mode = 0;
+ csr_wr_node(node, CVMX_SMIX_CLK(bus), smi_clk.u64);
+}
+
+/**
+ * @INTERNAL
+ * Function to read SMIX_RD_DAT and check for timeouts. This
+ * code sequence is done fairly often, so put in one spot.
+ *
+ * @param bus_id SMI/MDIO bus to read
+ *
+ * @return Value of SMIX_RD_DAT. pending will be set on
+ * a timeout.
+ */
+static inline cvmx_smix_rd_dat_t __cvmx_mdio_read_rd_dat(int bus_id)
+{
+ cvmx_smix_rd_dat_t smi_rd;
+ int node = cvmx_mdio_bus_id_to_node(bus_id);
+ int bus = cvmx_mdio_bus_id_to_bus(bus_id);
+ u64 done;
+
+ done = get_timer(0);
+
+ do {
+ mdelay(1);
+ smi_rd.u64 = csr_rd_node(node, CVMX_SMIX_RD_DAT(bus));
+ if (get_timer(done) > (CVMX_MDIO_TIMEOUT / 1000))
+ break;
+ } while (smi_rd.s.pending);
+
+ return smi_rd;
+}
+
+/**
+ * Perform an MII read. This function is used to read PHY
+ * registers controlling auto negotiation.
+ *
+ * @param bus_id MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
+ * support multiple busses.
+ * @param phy_id The MII phy id
+ * @param location Register location to read
+ *
+ * @return Result from the read or -1 on failure
+ */
+static inline int cvmx_mdio_read(int bus_id, int phy_id, int location)
+{
+ int node = cvmx_mdio_bus_id_to_node(bus_id);
+ int bus = cvmx_mdio_bus_id_to_bus(bus_id);
+ cvmx_smix_cmd_t smi_cmd;
+ cvmx_smix_rd_dat_t smi_rd;
+
+ if (octeon_has_feature(OCTEON_FEATURE_MDIO_CLAUSE_45))
+ __cvmx_mdio_set_clause22_mode(bus_id);
+
+ smi_cmd.u64 = 0;
+ smi_cmd.s.phy_op = MDIO_CLAUSE_22_READ;
+ smi_cmd.s.phy_adr = phy_id;
+ smi_cmd.s.reg_adr = location;
+ csr_wr_node(node, CVMX_SMIX_CMD(bus), smi_cmd.u64);
+
+ smi_rd = __cvmx_mdio_read_rd_dat(bus_id);
+ if (smi_rd.s.val)
+ return smi_rd.s.dat;
+ else
+ return -1;
+}
+
+/**
+ * Perform an MII write. This function is used to write PHY
+ * registers controlling auto negotiation.
+ *
+ * @param bus_id MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
+ * support multiple busses.
+ * @param phy_id The MII phy id
+ * @param location Register location to write
+ * @param val Value to write
+ *
+ * @return -1 on error
+ * 0 on success
+ */
+static inline int cvmx_mdio_write(int bus_id, int phy_id, int location, int val)
+{
+ int node = cvmx_mdio_bus_id_to_node(bus_id);
+ int bus = cvmx_mdio_bus_id_to_bus(bus_id);
+ cvmx_smix_cmd_t smi_cmd;
+ cvmx_smix_wr_dat_t smi_wr;
+
+ if (octeon_has_feature(OCTEON_FEATURE_MDIO_CLAUSE_45))
+ __cvmx_mdio_set_clause22_mode(bus_id);
+
+ smi_wr.u64 = 0;
+ smi_wr.s.dat = val;
+ csr_wr_node(node, CVMX_SMIX_WR_DAT(bus), smi_wr.u64);
+
+ smi_cmd.u64 = 0;
+ smi_cmd.s.phy_op = MDIO_CLAUSE_22_WRITE;
+ smi_cmd.s.phy_adr = phy_id;
+ smi_cmd.s.reg_adr = location;
+ csr_wr_node(node, CVMX_SMIX_CMD(bus), smi_cmd.u64);
+
+ if (CVMX_WAIT_FOR_FIELD64_NODE(node, CVMX_SMIX_WR_DAT(bus),
+ cvmx_smix_wr_dat_t, pending, ==, 0,
+ CVMX_MDIO_TIMEOUT))
+ return -1;
+
+ return 0;
+}
+
+/**
+ * Perform an IEEE 802.3 clause 45 MII read. This function is used to read PHY
+ * registers controlling auto negotiation.
+ *
+ * @param bus_id MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
+ * support multiple busses.
+ * @param phy_id The MII phy id
+ * @param device MDIO Manageable Device (MMD) id
+ * @param location Register location to read
+ *
+ * @return Result from the read or -1 on failure
+ */
+
+static inline int cvmx_mdio_45_read(int bus_id, int phy_id, int device,
+ int location)
+{
+ cvmx_smix_cmd_t smi_cmd;
+ cvmx_smix_rd_dat_t smi_rd;
+ cvmx_smix_wr_dat_t smi_wr;
+ int node = cvmx_mdio_bus_id_to_node(bus_id);
+ int bus = cvmx_mdio_bus_id_to_bus(bus_id);
+
+ if (!octeon_has_feature(OCTEON_FEATURE_MDIO_CLAUSE_45))
+ return -1;
+
+ __cvmx_mdio_set_clause45_mode(bus_id);
+
+ smi_wr.u64 = 0;
+ smi_wr.s.dat = location;
+ csr_wr_node(node, CVMX_SMIX_WR_DAT(bus), smi_wr.u64);
+
+ smi_cmd.u64 = 0;
+ smi_cmd.s.phy_op = MDIO_CLAUSE_45_ADDRESS;
+ smi_cmd.s.phy_adr = phy_id;
+ smi_cmd.s.reg_adr = device;
+ csr_wr_node(node, CVMX_SMIX_CMD(bus), smi_cmd.u64);
+
+ if (CVMX_WAIT_FOR_FIELD64_NODE(node, CVMX_SMIX_WR_DAT(bus),
+ cvmx_smix_wr_dat_t, pending, ==, 0,
+ CVMX_MDIO_TIMEOUT)) {
+ debug("cvmx_mdio_45_read: bus_id %d phy_id %2d device %2d register %2d TIME OUT(address)\n",
+ bus_id, phy_id, device, location);
+ return -1;
+ }
+
+ smi_cmd.u64 = 0;
+ smi_cmd.s.phy_op = MDIO_CLAUSE_45_READ;
+ smi_cmd.s.phy_adr = phy_id;
+ smi_cmd.s.reg_adr = device;
+ csr_wr_node(node, CVMX_SMIX_CMD(bus), smi_cmd.u64);
+
+ smi_rd = __cvmx_mdio_read_rd_dat(bus_id);
+ if (smi_rd.s.pending) {
+ debug("cvmx_mdio_45_read: bus_id %d phy_id %2d device %2d register %2d TIME OUT(data)\n",
+ bus_id, phy_id, device, location);
+ return -1;
+ }
+
+ if (smi_rd.s.val)
+ return smi_rd.s.dat;
+
+ debug("cvmx_mdio_45_read: bus_id %d phy_id %2d device %2d register %2d INVALID READ\n",
+ bus_id, phy_id, device, location);
+ return -1;
+}
+
+/**
+ * Perform an IEEE 802.3 clause 45 MII write. This function is used to write PHY
+ * registers controlling auto negotiation.
+ *
+ * @param bus_id MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
+ * support multiple busses.
+ * @param phy_id The MII phy id
+ * @param device MDIO Manageable Device (MMD) id
+ * @param location Register location to write
+ * @param val Value to write
+ *
+ * @return -1 on error
+ * 0 on success
+ */
+static inline int cvmx_mdio_45_write(int bus_id, int phy_id, int device,
+ int location, int val)
+{
+ cvmx_smix_cmd_t smi_cmd;
+ cvmx_smix_wr_dat_t smi_wr;
+ int node = cvmx_mdio_bus_id_to_node(bus_id);
+ int bus = cvmx_mdio_bus_id_to_bus(bus_id);
+
+ if (!octeon_has_feature(OCTEON_FEATURE_MDIO_CLAUSE_45))
+ return -1;
+
+ __cvmx_mdio_set_clause45_mode(bus_id);
+
+ smi_wr.u64 = 0;
+ smi_wr.s.dat = location;
+ csr_wr_node(node, CVMX_SMIX_WR_DAT(bus), smi_wr.u64);
+
+ smi_cmd.u64 = 0;
+ smi_cmd.s.phy_op = MDIO_CLAUSE_45_ADDRESS;
+ smi_cmd.s.phy_adr = phy_id;
+ smi_cmd.s.reg_adr = device;
+ csr_wr_node(node, CVMX_SMIX_CMD(bus), smi_cmd.u64);
+
+ if (CVMX_WAIT_FOR_FIELD64_NODE(node, CVMX_SMIX_WR_DAT(bus),
+ cvmx_smix_wr_dat_t, pending, ==, 0,
+ CVMX_MDIO_TIMEOUT))
+ return -1;
+
+ smi_wr.u64 = 0;
+ smi_wr.s.dat = val;
+ csr_wr_node(node, CVMX_SMIX_WR_DAT(bus), smi_wr.u64);
+
+ smi_cmd.u64 = 0;
+ smi_cmd.s.phy_op = MDIO_CLAUSE_45_WRITE;
+ smi_cmd.s.phy_adr = phy_id;
+ smi_cmd.s.reg_adr = device;
+ csr_wr_node(node, CVMX_SMIX_CMD(bus), smi_cmd.u64);
+
+ if (CVMX_WAIT_FOR_FIELD64_NODE(node, CVMX_SMIX_WR_DAT(bus),
+ cvmx_smix_wr_dat_t, pending, ==, 0,
+ CVMX_MDIO_TIMEOUT))
+ return -1;
+
+ return 0;
+}
+
+#endif
diff --git a/arch/mips/mach-octeon/include/mach/cvmx-pki-cluster.h b/arch/mips/mach-octeon/include/mach/cvmx-pki-cluster.h
new file mode 100644
index 000000000000..4d5a9d4ec829
--- /dev/null
+++ b/arch/mips/mach-octeon/include/mach/cvmx-pki-cluster.h
@@ -0,0 +1,343 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ */
+
+/* L4_PORT_CHECK_DISABLE_LF tag */
+/* This file is autogenerated from ipemainc.elf */
+const int cvmx_pki_cluster_code_length = 997;
+const u64 cvmx_pki_cluster_code_default[] = {
+ 0x000000000a000000ull, 0x0000413a68024070ull, 0x0000813800200020ull,
+ 0x900081b800200020ull, 0x0004da00ffff0001ull, 0x000455ab68010b0eull,
+ 0x00045fba46010000ull, 0x9046898120002000ull, 0x0004418068010028ull,
+ 0x90665300680100f0ull, 0x0004413f68004070ull, 0x00065380680100f0ull,
+ 0x00045a346803a0f0ull, 0x000401b448000001ull, 0x00045cb968030870ull,
+ 0x0007debd00100010ull, 0x0000813b80008000ull, 0x000441bb68004070ull,
+ 0xd001c00000000000ull, 0xd021c00000000000ull, 0x00045f80680100f0ull,
+ 0x0004c639ff000200ull, 0x0004403f72010000ull, 0x0001c00000000000ull,
+ 0x0001c00000000000ull, 0x000041ba68034078ull, 0x0000512268030870ull,
+ 0x000041bc68034070ull, 0x00005d3a68030870ull, 0x00045cb942080000ull,
+ 0x0004552a4e09312dull, 0x00045cb968082868ull, 0x0004410246090000ull,
+ 0x0000813800800080ull, 0x000401a486000005ull, 0x000615ab74000123ull,
+ 0x0007122448000004ull, 0x0000813901000000ull, 0x000481b800010001ull,
+ 0x000685b800020002ull, 0xa006823800010001ull, 0x0006c639ff000400ull,
+ 0x00085f3e68010a00ull, 0xa0885f3e68010f01ull, 0x00085f3e68010405ull,
+ 0x00085f3e68010906ull, 0xa0485f3e68010e07ull, 0xa061c00000000000ull,
+ 0xa4085f3e68010b28ull, 0xa421c00000000000ull, 0x00095f3e68010940ull,
+ 0xa066403e72010000ull, 0x000941be68034039ull, 0x00085f3e68010305ull,
+ 0xa4685f3e68010028ull, 0x00095f3e68030030ull, 0x00095f3e68010416ull,
+ 0x0001c00000000000ull, 0x00065cb942080000ull, 0xa046552a4e09312dull,
+ 0xa446c639ff000500ull, 0x0006debd00010001ull, 0x0006403e72010001ull,
+ 0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+ 0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+ 0x0001c00000000000ull, 0x00065cb942080000ull, 0x0006552a4e09312dull,
+ 0x00065cb968082868ull, 0x0006410246090000ull, 0x9060813901000000ull,
+ 0x0004c639ff000800ull, 0x0004400072010000ull, 0x0001c00000000000ull,
+ 0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+ 0x0001c00000000000ull, 0x0001c00000000000ull, 0x00045cb942080000ull,
+ 0x9084552a4e09312dull, 0x90a4c639ff000900ull, 0x00045f80680100f0ull,
+ 0x0004403f72010001ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+ 0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+ 0x0001c00000000000ull, 0x00045cb942080000ull, 0x9004552a4e09312dull,
+ 0x0004c639ff000a00ull, 0x0004400072010000ull, 0x00048181ff00ff00ull,
+ 0x0007820101000100ull, 0x0006898100ff00ffull, 0x00048301ffff0180ull,
+ 0x0008d5ab10001000ull, 0x0004d4a900010001ull, 0x0001c00000000000ull,
+ 0x00045cb942080000ull, 0x9024552a4e09312dull, 0x0004c639ff000b00ull,
+ 0x90445f80680100f0ull, 0x000459b368020070ull, 0x000401024000000cull,
+ 0x0006823fffffffffull, 0x00088281ffffffffull, 0x000ad5ab20002000ull,
+ 0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+ 0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+ 0x0004403f72010001ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+ 0x0001c00000000000ull, 0x000c8b3fffffc200ull, 0x000c8b01ffff0001ull,
+ 0x000ddebd00020002ull, 0x00045cb942080000ull, 0x0004552a4e09312dull,
+ 0x00045cb968082868ull, 0x0004410246090000ull, 0x0000813901000000ull,
+ 0x000481b800080008ull, 0x9846c639ff001200ull, 0x9861c00000000000ull,
+ 0x00064180680100f0ull, 0x0006400372010000ull, 0x0001c00000000000ull,
+ 0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+ 0x0001c00000000000ull, 0x0001c00000000000ull, 0x000683891f000200ull,
+ 0x000ed52a00800080ull, 0x000e5e3c68020070ull, 0x00065cb942080000ull,
+ 0x0006552a4e09312dull, 0x00065cb968082868ull, 0x0006410246090000ull,
+ 0x0000813d00020002ull, 0x0004893901000000ull, 0x9004893800040004ull,
+ 0x9024c639ff001300ull, 0x00044180680100f0ull, 0x9044400372010001ull,
+ 0x0001c00000000000ull, 0x00045f3e68010044ull, 0x0004debd00040004ull,
+ 0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+ 0x000483891f000200ull, 0x000ed52a00800080ull, 0x000e5e3c68020070ull,
+ 0x00045cb942080000ull, 0x0004552a4e09312dull, 0x00045cb968082868ull,
+ 0x0004410246090000ull, 0x000581b902000000ull, 0x9826c639ff001800ull,
+ 0x9801c00000000000ull, 0x00064180680100f0ull, 0x0006400172030000ull,
+ 0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+ 0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+ 0x000682091f000200ull, 0x000883aa00800080ull, 0x000ed52a00400040ull,
+ 0x000e5e3c68020870ull, 0x000fd52a00800080ull, 0x000f5e3c68020070ull,
+ 0x000983891f000000ull, 0x000f54a968090148ull, 0x000f59b368020870ull,
+ 0x00065cb942080000ull, 0x0006552a4e09312dull, 0x00065cb968082868ull,
+ 0x0006410246090000ull, 0x000081b902000000ull, 0x9826c639ff001900ull,
+ 0x9801c00000000000ull, 0x00064180680100f0ull, 0x0006400172030001ull,
+ 0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+ 0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+ 0x000682091f000200ull, 0x000883aa00800080ull, 0x000ed52a00400040ull,
+ 0x000e5e3c68020870ull, 0x000fd52a00800080ull, 0x000f5e3c68020070ull,
+ 0x000983891f000000ull, 0x000f54a968090148ull, 0x000f59b368020870ull,
+ 0x00065cb942080000ull, 0x0006552a4e09312dull, 0x00065cb968082868ull,
+ 0x0006410246090000ull, 0x000081b902000000ull, 0x9826c639ff001a00ull,
+ 0x9801c00000000000ull, 0x00064180680100f0ull, 0x0006400172030000ull,
+ 0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+ 0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+ 0x000682091f000200ull, 0x000883aa00800080ull, 0x000ed52a00400040ull,
+ 0x000e5e3c68020870ull, 0x000fd52a00800080ull, 0x000f5e3c68020070ull,
+ 0x000983891f000000ull, 0x000f54a968090148ull, 0x000f59b368020870ull,
+ 0x00065cb942080000ull, 0x0006552a4e09312dull, 0x00065cb968082868ull,
+ 0x0006410246090000ull, 0x000081b902000000ull, 0x9826c639ff001b00ull,
+ 0x9801c00000000000ull, 0x00064180680100f0ull, 0x0006400172030001ull,
+ 0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+ 0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+ 0x000682091f000200ull, 0x000883aa00800080ull, 0x000ed52a00400040ull,
+ 0x000e5e3c68020870ull, 0x000fd52a00800080ull, 0x000f5e3c68020070ull,
+ 0x000983891f000000ull, 0x000f54a968090148ull, 0x000f59b368020870ull,
+ 0x00065cb942080000ull, 0x0006552a4e09312dull, 0x00065cb968082868ull,
+ 0x0006410246090000ull, 0x9000813902000000ull, 0x000481b800400040ull,
+ 0x00068981ffff8847ull, 0x00068581ffff8848ull, 0x0006debd00080008ull,
+ 0x0006c639ff001e00ull, 0x0006010240000002ull, 0x9801c00000000000ull,
+ 0x9821c00000000000ull, 0x00065f80680100f0ull, 0x0006403f72010000ull,
+ 0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+ 0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+ 0x00065cb942080000ull, 0x0006552a4e09312dull, 0x00065cb968082868ull,
+ 0x0006010240000004ull, 0x0006823902000000ull, 0x00065f3e68010629ull,
+ 0xac28828101000100ull, 0x000b010240000004ull, 0xa42b820101000100ull,
+ 0x0009010240000004ull, 0xac29828101000100ull, 0x000b010240000004ull,
+ 0xa42b820101000100ull, 0x0009010240000004ull, 0xac29828101000100ull,
+ 0x000b010240000004ull, 0x0006823904000000ull, 0x0008d4a907c00200ull,
+ 0x0008593268020070ull, 0x0008dcb902000200ull, 0x9000813902000000ull,
+ 0x0001c00000000000ull, 0x00040181840005ffull, 0x0006010240000008ull,
+ 0x9801c00000000000ull, 0x0006debd00200020ull, 0x00048181ffff0806ull,
+ 0x0006d4a907c00180ull, 0x00048201ffff8035ull, 0x00068581ffff8035ull,
+ 0x0008d4a907c001c0ull, 0x0006dcb97c007c00ull, 0x00048201ffff0800ull,
+ 0x00088601ffff86ddull, 0x00068581ffff0800ull, 0x00068581ffff86ddull,
+ 0x0008d4a907c00200ull, 0x0009dcb97c007c00ull, 0x0007823d00200020ull,
+ 0x000685bd00200020ull, 0x0008d4a907c00140ull, 0x0004010240000002ull,
+ 0x0006593268020070ull, 0x000042a486020000ull, 0x000a15ab74000124ull,
+ 0x9000813904000000ull, 0x0001c00000000000ull, 0x00048181f0004000ull,
+ 0x9886593268020070ull, 0x0006d4a907c00200ull, 0x00068201ff000000ull,
+ 0xa40815ab74000345ull, 0x0009debd01000100ull, 0xa429418068010038ull,
+ 0x00095a3468010870ull, 0x0009028386000005ull, 0x000a068186000014ull,
+ 0xacca15ab74000343ull, 0xacebc639ff002200ull, 0x000b5f80680100f0ull,
+ 0xac8b403f72010000ull, 0x000b8203000f0005ull, 0x000b5a3468010070ull,
+ 0x0009d4a907c00240ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+ 0x0001c00000000000ull, 0x000b5cb942080000ull, 0xad0b552a4e09312dull,
+ 0xad2bc639ff002700ull, 0x000b5f80680100f0ull, 0xac6b403f72010001ull,
+ 0x0001c00000000000ull, 0x000b82013fff0000ull, 0x0009d52a00010001ull,
+ 0x0009d4a9f8006800ull, 0x0009593268020870ull, 0x0006418068030230ull,
+ 0x000b5cb942080000ull, 0x000b552a4e09312dull, 0x0006410240030000ull,
+ 0x9c01c00000000000ull, 0x0001c00000000000ull, 0x00078201f0006000ull,
+ 0x0008593268020070ull, 0x0008d4a907c00280ull, 0xa069d4a907c00000ull,
+ 0x00085a3468010874ull, 0x0008818100ff0000ull, 0x000615ab74000345ull,
+ 0x00075a3468010078ull, 0x9c8741b9680040f0ull, 0x9ca7c603ff001f00ull,
+ 0x00075f80680100f0ull, 0x0007403f72010001ull, 0x0001c00000000000ull,
+ 0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+ 0x0001c00000000000ull, 0x0001c00000000000ull, 0x0007418342080000ull,
+ 0x9cc7552a4e09312dull, 0x9ce7c603ff002000ull, 0x00075f80680100f0ull,
+ 0x0007403f72010000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+ 0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+ 0x0001c00000000000ull, 0x0007418342080000ull, 0x9d07552a4e09312dull,
+ 0x9d27c603ff002100ull, 0x00075f80680100f0ull, 0x0007403f72010001ull,
+ 0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+ 0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+ 0x0007418342080000ull, 0x0007552a4e09312dull, 0x9d475c80680300f0ull,
+ 0x9d67c639ff002200ull, 0x00075f80680100f0ull, 0x0007403f72010000ull,
+ 0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+ 0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+ 0x00075cb942080000ull, 0x0007552a4e09312dull, 0x9d8741b9680040f0ull,
+ 0x9da7c603ff002400ull, 0x00075f80680100f0ull, 0x0001c00000000000ull,
+ 0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+ 0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+ 0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+ 0x0007403f72010000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+ 0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+ 0x0001c00000000000ull, 0x0007418342080000ull, 0x9dc7552a4e09312dull,
+ 0x9de7c603ff002500ull, 0x00075f80680100f0ull, 0x0007403f72010001ull,
+ 0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+ 0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+ 0x0007418342080000ull, 0x0007552a4e09312dull, 0x0007010240000020ull,
+ 0x9c01c00000000000ull, 0x9c27c603ff002600ull, 0x00075f80680100f0ull,
+ 0x0007403f72010000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+ 0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+ 0x0001c00000000000ull, 0x0007418342080000ull, 0x0007552a4e09312dull,
+ 0x9c475c80680300f0ull, 0x9c67c639ff002700ull, 0x00075f80680100f0ull,
+ 0x0007403f72010001ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+ 0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+ 0x0001c00000000000ull, 0x00075cb942080000ull, 0x0007552a4e09312dull,
+ 0x0007010240000008ull, 0xa80782b400ff0000ull, 0x000ad4a907c002c0ull,
+ 0x000a5a3468010078ull, 0x000a410244010000ull, 0xa80782b400ff003cull,
+ 0x000ad4a907c002c0ull, 0x000a5a3468010078ull, 0x000a410244010000ull,
+ 0xa80782b400ff002bull, 0x000ad4a907c002c0ull, 0x000a5a3468010078ull,
+ 0x000a410244010000ull, 0xa80782b400ff002cull, 0x000ad4a9ffc06ac0ull,
+ 0x000a593268020870ull, 0x000ad52a00010001ull, 0x000a5a3468010078ull,
+ 0x000a010240000008ull, 0x0007debd01000100ull, 0x000481bd01000100ull,
+ 0x0006c639ff002300ull, 0x000641aa68034000ull, 0x000641a968034846ull,
+ 0x0006403472030001ull, 0x0004822907000200ull, 0x000915ab74000341ull,
+ 0x000082aa00010001ull, 0x000a86ab00ff0045ull, 0x000adcb978007800ull,
+ 0x0000822907000200ull, 0x00088a3908000000ull, 0x00065cb942080000ull,
+ 0x0006552a4e09312dull, 0x00065cb968082868ull, 0x0006410246090000ull,
+ 0x000042a486020000ull, 0x000a15ab74000343ull, 0x000081b940004000ull,
+ 0x000685a907c00000ull, 0x000782b807000100ull, 0x000a41b268004070ull,
+ 0x000a410040030000ull, 0x000a41ba68004078ull, 0x000a410240030000ull,
+ 0xa801c00000000000ull, 0xa821c00000000000ull, 0x000a4180680100f0ull,
+ 0x000ac639ff003900ull, 0x000a400372010001ull, 0x0001c00000000000ull,
+ 0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+ 0x0001c00000000000ull, 0x0001c00000000000ull, 0x000a83891f000000ull,
+ 0x000f542868090a48ull, 0x000f583068020070ull, 0x000a5cb942080000ull,
+ 0x000a552a4e09312dull, 0x000a5cb968082868ull, 0x000a410246090000ull,
+ 0x982881b400ff0011ull, 0x9881c00000000000ull, 0x00064180680100f0ull,
+ 0x00068283ffff12b5ull, 0x000a8a8108000800ull, 0x000ad4a9f8009800ull,
+ 0x00068303ffff17c1ull, 0x000c8b01c0000000ull, 0xb0ac5bb768010a58ull,
+ 0x000cd4a9f800b800ull, 0x000c8281ffff6558ull, 0x000adbb701000100ull,
+ 0x000c8281ffff86ddull, 0x000a8681ffff0800ull, 0x000adbb702000200ull,
+ 0x000682a9c8009800ull, 0x000adebd02000200ull, 0x000a593268020870ull,
+ 0x000a010240000008ull, 0x9c21c00000000000ull, 0x0007813400ff002full,
+ 0x90048201ffff6558ull, 0x00098381ffff0800ull, 0x00088281b0002000ull,
+ 0x000a593268020870ull, 0x000ad4a9f800a800ull, 0x000adebd02000200ull,
+ 0x000e593268020870ull, 0x000ed4a9f800a000ull, 0x000e010240000004ull,
+ 0x000e828180008000ull, 0x000a010240000004ull, 0x000e828120002000ull,
+ 0x000a010240000004ull, 0x000e828110001000ull, 0x000a010240000004ull,
+ 0x000082bd02000200ull, 0xa80ac639ff002800ull, 0xa861c00000000000ull,
+ 0x000a418368010526ull, 0xa84a418368010878ull, 0x000a5bb768030078ull,
+ 0x000a400172030000ull, 0x000a5b00680100f0ull, 0x000041b468034878ull,
+ 0x00005fbf68030878ull, 0x00068229c8009800ull, 0x0008010248000008ull,
+ 0xa001c00000000000ull, 0x000843a486020000ull, 0x00088101ffff0000ull,
+ 0x000415ab74000464ull, 0x000e15ab74000461ull, 0x0008010240000008ull,
+ 0x000c41b76800425aull, 0x000c410240030000ull, 0x000a010240000008ull,
+ 0x000a5cb942080000ull, 0x000a552a4e09312dull, 0x000a5cb968082868ull,
+ 0x000a410246090000ull, 0x0000422486020000ull, 0x000815ab74000461ull,
+ 0x000081b940004000ull, 0x000685a9f8000000ull, 0x000782b807000200ull,
+ 0x000a41b268004078ull, 0x000a410040030000ull, 0x000a41ba68004078ull,
+ 0x000a410240030000ull, 0xa801c00000000000ull, 0xa821c00000000000ull,
+ 0x000a4180680100f0ull, 0x000ac639ff003900ull, 0x000a400372010001ull,
+ 0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+ 0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+ 0x000a83891f000000ull, 0x000f542868090a48ull, 0x000f583068020070ull,
+ 0x000a5cb942080000ull, 0x000a552a4e09312dull, 0x000a5cb968082868ull,
+ 0x000a410246090000ull, 0x000081a9f800b800ull, 0x000689b701000100ull,
+ 0x000685a9f8009800ull, 0x000685a9f800a800ull, 0x00078229f800b800ull,
+ 0x000601024000000cull, 0x9801c00000000000ull, 0x00088a3702000200ull,
+ 0x00088629f800a000ull, 0x00068101ffff8100ull, 0x0004010240000004ull,
+ 0x9801c00000000000ull, 0x0009dcb910001000ull, 0x00068101ffff86ddull,
+ 0x00048501ffff0800ull, 0x0005dcb978003800ull, 0x0006010240000002ull,
+ 0x000081a9f8000000ull, 0x9007813910000000ull, 0x0001c00000000000ull,
+ 0x00048181f0004000ull, 0x988658b168020070ull, 0x0006d428001f0008ull,
+ 0x00068201ff000000ull, 0xa40815ab74000545ull, 0x0009debd04000400ull,
+ 0xa429418068010038ull, 0x00095a3468010870ull, 0x0009028386000005ull,
+ 0xac8a068186000014ull, 0x000a15ab74000543ull, 0x000b5a3468010070ull,
+ 0xac6b8303000f0005ull, 0x000dd428001f0009ull, 0x000b83013fff0000ull,
+ 0x000dd42803e001a0ull, 0x000d58b168020870ull, 0x000ddcb960006000ull,
+ 0x0006418068030230ull, 0x0006410240030000ull, 0x9c01c00000000000ull,
+ 0x0001c00000000000ull, 0x00078201f0006000ull, 0x000858b168020070ull,
+ 0xa068d428001f000aull, 0x00085a3468010874ull, 0x0008818100ff0000ull,
+ 0x000615ab74000545ull, 0x00075a3468010078ull, 0x0007010240000028ull,
+ 0xa80782b400ff0000ull, 0x000ad428001f000bull, 0x000a5a3468010078ull,
+ 0x000a410244010000ull, 0xa80782b400ff003cull, 0x000ad428001f000bull,
+ 0x000a5a3468010078ull, 0x000a410244010000ull, 0xa80782b400ff002bull,
+ 0x000ad428001f000bull, 0x000a5a3468010078ull, 0x000a410244010000ull,
+ 0xa80782b400ff002cull, 0x000ad42803ff01abull, 0x000adcb960006000ull,
+ 0x000a58b168020870ull, 0x000a5a3468010078ull, 0x000a010240000008ull,
+ 0x0007debd04000400ull, 0x000481bd04000400ull, 0x0006c639ff002b00ull,
+ 0x0006832803e001a0ull, 0x000cc18300010001ull, 0x000dc18300010000ull,
+ 0x000641a868034840ull, 0x0006403472030001ull, 0x00048228001c0008ull,
+ 0x000915ab74000541ull, 0x000082ab00ff0045ull, 0x000adcb960006000ull,
+ 0x0001c00000000000ull, 0x0001c00000000000ull, 0x00065cb942080000ull,
+ 0x0006552a4e09312dull, 0x00065cb968082868ull, 0x0006410246090000ull,
+ 0x000042a486020000ull, 0x000a15ab74000543ull, 0x000081b940004000ull,
+ 0x000685a8001f0000ull, 0x000782b807000300ull, 0x000a41b168004070ull,
+ 0x000a410040030000ull, 0x000a41ba68004078ull, 0x000a410240030000ull,
+ 0xa801c00000000000ull, 0xa821c00000000000ull, 0x000a4180680100f0ull,
+ 0x000ac639ff003900ull, 0x000a400372010001ull, 0x0001c00000000000ull,
+ 0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+ 0x0001c00000000000ull, 0x0001c00000000000ull, 0x000a83891f000000ull,
+ 0x000f542868090a48ull, 0x000f583068020070ull, 0x000a5cb942080000ull,
+ 0x000a552a4e09312dull, 0x000a5cb968082868ull, 0x000a410246090000ull,
+ 0x00008329ff000200ull, 0x000c8728001c0008ull, 0x000c813920000000ull,
+ 0x000481b400ff006cull, 0x0006d42803e001c0ull, 0x000658b168020870ull,
+ 0xa047823400ff0033ull, 0x0008d42803e00180ull, 0xa0685f80680100f0ull,
+ 0xa007823400ff0032ull, 0x0008d42803e00180ull, 0xa0285f80680100f0ull,
+ 0x0007822803e00180ull, 0x0008c639ff002e00ull, 0x0008403f72010000ull,
+ 0x000858b168020870ull, 0x00085abf680040f0ull, 0x00085d80680100f0ull,
+ 0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+ 0x00085cb942080000ull, 0x0008552a4e09312dull, 0x00085cb968082868ull,
+ 0x0008410246090000ull, 0x986981b400ff002full, 0x0006d42803e00280ull,
+ 0x00065a80680100f0ull, 0x000658b168020870ull, 0x000481b400ff0084ull,
+ 0x0006d42803e00240ull, 0x0004823400ff0011ull, 0x0008d42803e00220ull,
+ 0x98c481b400ff0006ull, 0x0006d42803e00200ull, 0x00065ebd68010b31ull,
+ 0x000641806801003cull, 0x0006028386000005ull, 0x000a15ab74000661ull,
+ 0x0006418068030230ull, 0x0008c180ffff0008ull, 0x0008863400ff0006ull,
+ 0x0008418240030000ull, 0x000842a486030000ull, 0x000a15ab74000661ull,
+ 0x9008863400ff0084ull, 0x0004c639ff002f00ull, 0x0004400072010001ull,
+ 0x000858b168020870ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+ 0x0001c00000000000ull, 0x00085cb942080000ull, 0x9028552a4e09312dull,
+ 0x0004c639ff003000ull, 0x0004403472010000ull, 0x0001c00000000000ull,
+ 0x0001c00000000000ull, 0x000858b168020870ull, 0x0001c00000000000ull,
+ 0x000081b940004000ull, 0x000685a803e00000ull, 0x00045cb942080000ull,
+ 0x0004552a4e09312dull, 0x00045cb968082868ull, 0x0004410246090000ull,
+ 0x000483891f000000ull, 0x000f542868090a48ull, 0x000f583068020070ull,
+ 0x000042a486020000ull, 0x000a15ab74000661ull, 0x000782b807000400ull,
+ 0x000a41b168004078ull, 0x000a410040030000ull, 0x000a41ba68004078ull,
+ 0x000a410240030000ull, 0xa801c00000000000ull, 0xa821c00000000000ull,
+ 0x000a4180680100f0ull, 0x000ac639ff003900ull, 0x000a400372010001ull,
+ 0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+ 0x0001c00000000000ull, 0x000041bf68034878ull, 0x00005a3468030878ull,
+ 0x000a83891f000000ull, 0x000f542868090a48ull, 0x000f583068020070ull,
+ 0x000a5cb942080000ull, 0x000a552a4e09312dull, 0x000a5cb968082868ull,
+ 0x000a410246090000ull, 0x00005fb968004250ull, 0x0000003f70000000ull,
+ 0x000041b968034070ull, 0x0000512268030070ull, 0x0000813800200020ull,
+ 0x0004413a68024070ull, 0x9001c00000000000ull, 0x000081b800200020ull,
+ 0x9026898180008000ull, 0x0004890110001000ull, 0x000456ad680100a0ull,
+ 0x0006898180008000ull, 0x000652a56801001dull, 0x000456ad68090b5bull,
+ 0x00055680680900f0ull, 0x0005debd00400040ull, 0x00005600680800f0ull,
+ 0x0000833d00200020ull, 0x000c872907c00000ull, 0x000dd62c20000000ull,
+ 0x0000822902800280ull, 0x000841b268034070ull, 0x000982a8000a000aull,
+ 0x000a41b168034070ull, 0x000b822907c00000ull, 0x0000003f70000800ull,
+ 0x000941b268034070ull, 0x0000418048030000ull, 0x0000018340000008ull,
+ 0x0009018348000004ull, 0x000050a168030c20ull, 0x000082aa00800080ull,
+ 0x000850a168080c2bull, 0x0000820800010001ull, 0x000850a168000c20ull,
+ 0x000752a56808001eull, 0x000a822a00400040ull, 0x00088a0900010001ull,
+ 0x000841bc68034078ull, 0x000941bc68034070ull, 0x000a583068030870ull,
+ 0x0000813d00400000ull, 0x0005c180ffff0000ull, 0x00058288001e0000ull,
+ 0x000b8208001e0008ull, 0x00085d2168004030ull, 0x00098308001e0010ull,
+ 0x00088608001e0010ull, 0x000c5d2168004070ull, 0x0008418068080025ull,
+ 0x000841ba6803a0f0ull, 0x000856ad40030000ull, 0x0008c180ffff0000ull,
+ 0x0005820807000500ull, 0x00088a3d00010001ull, 0x000841be68004050ull,
+ 0x0005828807000300ull, 0x000a8abd00040004ull, 0x000a41be68004040ull,
+ 0x0005820807000100ull, 0x00088a2a00800080ull, 0x0008413068004078ull,
+ 0xa021c00000000000ull, 0x0005828807000200ull, 0x000841806801002dull,
+ 0x000a8abd00080008ull, 0x000a41be68004026ull, 0x0005820807000400ull,
+ 0x00088a2907000200ull, 0x000841b46800405aull, 0x000556ad40030000ull,
+ 0x000081bd00100010ull, 0x0006c180ffff0000ull, 0x0006822a00800080ull,
+ 0x00088a0900100010ull, 0x0008413c68024070ull, 0xa021c00000000000ull,
+ 0x0006832907000200ull, 0x0008c181f0008000ull, 0x000841834c00ffffull,
+ 0x0006822a00400040ull, 0x00088a0900200020ull, 0x0008413c68024078ull,
+ 0xa021c00000000000ull, 0x000c8b0900400040ull, 0x0008dc01f0008000ull,
+ 0x000841b84c03ffffull, 0x000c8b2a00010000ull, 0x000c41b44c0300ffull,
+ 0x000682a9f800a800ull, 0x000a86a9f8009800ull, 0x000a8a8904000400ull,
+ 0x000a41b64c03ffffull, 0x000a41b74c0300ffull, 0x0000828901000100ull,
+ 0x000a822803e00180ull, 0x0008413168024078ull, 0x0008833400ff0033ull,
+ 0x000c010240000004ull, 0xa001c00000000000ull, 0xa021c00000000000ull,
+ 0x000841814c03ffffull, 0x000841814c03ffffull, 0x000a822803e00280ull,
+ 0x000841b54c03ffffull, 0x000682287c005800ull, 0x00088a0902000200ull,
+ 0x0008413068024070ull, 0xa001c00000000000ull, 0x0006830900020002ull,
+ 0x00088281e0002000ull, 0xa84a868108000800ull, 0xa861c00000000000ull,
+ 0x000a41814c03ffffull, 0x000a41814c03ffffull, 0x00065380680300f0ull,
+ 0x000c5321680040b0ull, 0x000dd3260fff0fffull, 0x0006810900800080ull,
+ 0x0000003f70000400ull, 0x000082a907000200ull, 0x000a413268024070ull,
+ 0xa50a822902800280ull, 0x0004893d08000800ull, 0x00098301ffffffffull,
+ 0xa4c98381f000e000ull, 0x00095f00680100f0ull, 0xa5295f3e64010000ull,
+ 0x0001c00000000000ull, 0xa4ec8b01ffffffffull, 0x00095d00680100f0ull,
+ 0xa1895d3a64010000ull, 0x000cd5ab80008000ull, 0x00088a01ff00ff00ull,
+ 0x0008d5ab40004000ull, 0x000ed5ab40004000ull, 0x0004893d40000000ull,
+ 0x00005700680800f0ull, 0x00005780680900f0ull, 0x00008229f800a000ull,
+ 0x0008c180ffff0018ull, 0x000857af680320f0ull, 0x0007d72ef1ff0000ull,
+ 0x0007d7aff0000000ull, 0x0004d72e00fc0000ull, 0x0000812c00020002ull,
+ 0x0004892907c00200ull, 0x000441a7680040f0ull, 0x000441be4c03ffffull,
+ 0x000441ba4c03ffffull, 0x000481a803c00200ull, 0x0006413168024078ull,
+ 0x9801c00000000000ull, 0x9821c00000000000ull, 0x00065f80680100f0ull,
+ 0x00065fbf64010000ull, 0x000641bf4c03ffffull, 0x000452a568030250ull,
+ 0x0000000008000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+ 0x0001c00000000000ull
+};
diff --git a/arch/mips/mach-octeon/include/mach/cvmx-pko.h b/arch/mips/mach-octeon/include/mach/cvmx-pko.h
new file mode 100644
index 000000000000..26e7a9adf4b3
--- /dev/null
+++ b/arch/mips/mach-octeon/include/mach/cvmx-pko.h
@@ -0,0 +1,213 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ * Backward compatibility for packet transmission using legacy PKO command.
+ */
+
+#ifndef __CVMX_PKO_H__
+#define __CVMX_PKO_H__
+
+extern cvmx_pko_return_value_t
+cvmx_pko3_legacy_xmit(unsigned int dq, cvmx_pko_command_word0_t pko_command,
+ cvmx_buf_ptr_t packet, uint64_t addr, bool tag_sw);
+
+/**
+ * Complete packet output. cvmx_pko_send_packet_prepare() must be called exactly
+ * once before this, and the same parameters must be passed to both
+ * cvmx_pko_send_packet_prepare() and cvmx_pko_send_packet_finish().
+ *
+ * WARNING: This function may have to look up the proper PKO port in
+ * the IPD port to PKO port map, and is thus slower than calling
+ * cvmx_pko_send_packet_finish_pkoid() directly if the PKO port
+ * identifier is known.
+ *
+ * @param ipd_port The IPD port corresponding the to pko port the packet is for
+ * @param queue Queue to use
+ * @param pko_command
+ * PKO HW command word
+ * @param packet to send
+ * @param use_locking
+ * CVMX_PKO_LOCK_NONE, CVMX_PKO_LOCK_ATOMIC_TAG,
+ * or CVMX_PKO_LOCK_CMD_QUEUE
+ *
+ * @return returns CVMX_PKO_SUCCESS on success, or error code on failure of output
+ */
+static inline cvmx_pko_return_value_t
+cvmx_pko_send_packet_finish(u64 ipd_port, uint64_t queue,
+ cvmx_pko_command_word0_t pko_command,
+ cvmx_buf_ptr_t packet, cvmx_pko_lock_t use_locking)
+{
+ cvmx_cmd_queue_result_t result;
+
+ if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
+ return cvmx_pko3_legacy_xmit(queue, pko_command, packet, 0,
+ use_locking ==
+ CVMX_PKO_LOCK_ATOMIC_TAG);
+ }
+
+ if (use_locking == CVMX_PKO_LOCK_ATOMIC_TAG)
+ cvmx_pow_tag_sw_wait();
+
+ result = cvmx_cmd_queue_write2(CVMX_CMD_QUEUE_PKO(queue),
+ (use_locking == CVMX_PKO_LOCK_CMD_QUEUE),
+ pko_command.u64, packet.u64);
+ if (cvmx_likely(result == CVMX_CMD_QUEUE_SUCCESS)) {
+ cvmx_pko_doorbell(ipd_port, queue, 2);
+ return CVMX_PKO_SUCCESS;
+ } else if ((result == CVMX_CMD_QUEUE_NO_MEMORY) ||
+ (result == CVMX_CMD_QUEUE_FULL)) {
+ return CVMX_PKO_NO_MEMORY;
+ } else {
+ return CVMX_PKO_INVALID_QUEUE;
+ }
+}
+
+/**
+ * Complete packet output. cvmx_pko_send_packet_prepare() must be called exactly
+ * once before this, and the same parameters must be passed to both
+ * cvmx_pko_send_packet_prepare() and cvmx_pko_send_packet_finish().
+ *
+ * WARNING: This function may have to look up the proper PKO port in
+ * the IPD port to PKO port map, and is thus slower than calling
+ * cvmx_pko_send_packet_finish3_pkoid() directly if the PKO port
+ * identifier is known.
+ *
+ * @param ipd_port The IPD port corresponding the to pko port the packet is for
+ * @param queue Queue to use
+ * @param pko_command
+ * PKO HW command word
+ * @param packet to send
+ * @param addr Physical address of a work queue entry or physical address to zero
+ * on complete.
+ * @param use_locking
+ * CVMX_PKO_LOCK_NONE, CVMX_PKO_LOCK_ATOMIC_TAG,
+ * or CVMX_PKO_LOCK_CMD_QUEUE
+ *
+ * @return returns CVMX_PKO_SUCCESS on success, or error code on failure of output
+ */
+static inline cvmx_pko_return_value_t
+cvmx_pko_send_packet_finish3(u64 ipd_port, uint64_t queue,
+ cvmx_pko_command_word0_t pko_command,
+ cvmx_buf_ptr_t packet, uint64_t addr,
+ cvmx_pko_lock_t use_locking)
+{
+ cvmx_cmd_queue_result_t result;
+
+ if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
+ return cvmx_pko3_legacy_xmit(queue, pko_command, packet, addr,
+ use_locking ==
+ CVMX_PKO_LOCK_ATOMIC_TAG);
+ }
+
+ if (use_locking == CVMX_PKO_LOCK_ATOMIC_TAG)
+ cvmx_pow_tag_sw_wait();
+
+ result = cvmx_cmd_queue_write3(CVMX_CMD_QUEUE_PKO(queue),
+ (use_locking == CVMX_PKO_LOCK_CMD_QUEUE),
+ pko_command.u64, packet.u64, addr);
+ if (cvmx_likely(result == CVMX_CMD_QUEUE_SUCCESS)) {
+ cvmx_pko_doorbell(ipd_port, queue, 3);
+ return CVMX_PKO_SUCCESS;
+ } else if ((result == CVMX_CMD_QUEUE_NO_MEMORY) ||
+ (result == CVMX_CMD_QUEUE_FULL)) {
+ return CVMX_PKO_NO_MEMORY;
+ } else {
+ return CVMX_PKO_INVALID_QUEUE;
+ }
+}
+
+/**
+ * Complete packet output. cvmx_pko_send_packet_prepare() must be called exactly
+ * once before this, and the same parameters must be passed to both
+ * cvmx_pko_send_packet_prepare() and cvmx_pko_send_packet_finish_pkoid().
+ *
+ * @param pko_port Port to send it on
+ * @param queue Queue to use
+ * @param pko_command
+ * PKO HW command word
+ * @param packet to send
+ * @param use_locking
+ * CVMX_PKO_LOCK_NONE, CVMX_PKO_LOCK_ATOMIC_TAG,
+ * or CVMX_PKO_LOCK_CMD_QUEUE
+ *
+ * @return returns CVMX_PKO_SUCCESS on success, or error code on failure of output
+ */
+static inline cvmx_pko_return_value_t
+cvmx_pko_send_packet_finish_pkoid(int pko_port, uint64_t queue,
+ cvmx_pko_command_word0_t pko_command,
+ cvmx_buf_ptr_t packet, cvmx_pko_lock_t use_locking)
+{
+ cvmx_cmd_queue_result_t result;
+
+ if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
+ return cvmx_pko3_legacy_xmit(queue, pko_command, packet, 0,
+ use_locking ==
+ CVMX_PKO_LOCK_ATOMIC_TAG);
+ }
+
+ if (use_locking == CVMX_PKO_LOCK_ATOMIC_TAG)
+ cvmx_pow_tag_sw_wait();
+ result = cvmx_cmd_queue_write2(CVMX_CMD_QUEUE_PKO(queue),
+ (use_locking == CVMX_PKO_LOCK_CMD_QUEUE),
+ pko_command.u64, packet.u64);
+ if (cvmx_likely(result == CVMX_CMD_QUEUE_SUCCESS)) {
+ cvmx_pko_doorbell_pkoid(pko_port, queue, 2);
+ return CVMX_PKO_SUCCESS;
+ } else if ((result == CVMX_CMD_QUEUE_NO_MEMORY) ||
+ (result == CVMX_CMD_QUEUE_FULL)) {
+ return CVMX_PKO_NO_MEMORY;
+ } else {
+ return CVMX_PKO_INVALID_QUEUE;
+ }
+}
+
+/**
+ * Complete packet output. cvmx_pko_send_packet_prepare() must be called exactly
+ * once before this, and the same parameters must be passed to both
+ * cvmx_pko_send_packet_prepare() and cvmx_pko_send_packet_finish_pkoid().
+ *
+ * @param pko_port The PKO port the packet is for
+ * @param queue Queue to use
+ * @param pko_command
+ * PKO HW command word
+ * @param packet to send
+ * @param addr Plysical address of a work queue entry or physical address to zero
+ * on complete.
+ * @param use_locking
+ * CVMX_PKO_LOCK_NONE, CVMX_PKO_LOCK_ATOMIC_TAG,
+ * or CVMX_PKO_LOCK_CMD_QUEUE
+ *
+ * @return returns CVMX_PKO_SUCCESS on success, or error code on failure of output
+ */
+static inline cvmx_pko_return_value_t
+cvmx_pko_send_packet_finish3_pkoid(u64 pko_port, uint64_t queue,
+ cvmx_pko_command_word0_t pko_command,
+ cvmx_buf_ptr_t packet, uint64_t addr,
+ cvmx_pko_lock_t use_locking)
+{
+ cvmx_cmd_queue_result_t result;
+
+ if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
+ return cvmx_pko3_legacy_xmit(queue, pko_command, packet, addr,
+ use_locking ==
+ CVMX_PKO_LOCK_ATOMIC_TAG);
+ }
+
+ if (use_locking == CVMX_PKO_LOCK_ATOMIC_TAG)
+ cvmx_pow_tag_sw_wait();
+ result = cvmx_cmd_queue_write3(CVMX_CMD_QUEUE_PKO(queue),
+ (use_locking == CVMX_PKO_LOCK_CMD_QUEUE),
+ pko_command.u64, packet.u64, addr);
+ if (cvmx_likely(result == CVMX_CMD_QUEUE_SUCCESS)) {
+ cvmx_pko_doorbell_pkoid(pko_port, queue, 3);
+ return CVMX_PKO_SUCCESS;
+ } else if ((result == CVMX_CMD_QUEUE_NO_MEMORY) ||
+ (result == CVMX_CMD_QUEUE_FULL)) {
+ return CVMX_PKO_NO_MEMORY;
+ } else {
+ return CVMX_PKO_INVALID_QUEUE;
+ }
+}
+
+#endif /* __CVMX_PKO_H__ */
diff --git a/arch/mips/mach-octeon/include/mach/cvmx-pko3-resources.h b/arch/mips/mach-octeon/include/mach/cvmx-pko3-resources.h
new file mode 100644
index 000000000000..cc9f37500b0e
--- /dev/null
+++ b/arch/mips/mach-octeon/include/mach/cvmx-pko3-resources.h
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ */
+
+#ifndef __CVMX_PKO3_RESOURCES_H__
+#define __CVMX_PKO3_RESOURCES_H__
+
+/*
+ * Allocate or reserve contiguous list of PKO queues.
+ *
+ * @param node is the node number for PKO queues.
+ * @param level is the PKO queue level.
+ * @param owner is the owner of PKO queue resources.
+ * @param base_queue is the PKO queue base number(specify -1 to allocate).
+ * @param num_queues is the number of PKO queues that have to be reserved or allocated.
+ * @return returns queue_base if successful or -1 on failure.
+ */
+int cvmx_pko_alloc_queues(int node, int level, int owner, int base_queue,
+ int num_queues);
+
+/**
+ * Free an allocated/reserved PKO queues for a certain level and owner
+ *
+ * @param node on which to allocate/reserve PKO queues
+ * @param level of PKO queue
+ * @param owner of reserved/allocated resources
+ * @return 0 on success, -1 on failure
+ */
+int cvmx_pko_free_queues(int node, int level, int owner);
+
+int __cvmx_pko3_dq_param_setup(unsigned node);
+
+int cvmx_pko3_num_level_queues(enum cvmx_pko3_level_e level);
+
+#endif /* __CVMX_PKO3_RESOURCES_H__ */
diff --git a/arch/mips/mach-octeon/include/mach/cvmx-pko3.h b/arch/mips/mach-octeon/include/mach/cvmx-pko3.h
new file mode 100644
index 000000000000..86f89be855fe
--- /dev/null
+++ b/arch/mips/mach-octeon/include/mach/cvmx-pko3.h
@@ -0,0 +1,1052 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ */
+
+#ifndef __CVMX_PKO3_H__
+#define __CVMX_PKO3_H__
+
+DECLARE_GLOBAL_DATA_PTR;
+
+/* Use full LMTDMA when PARAMETER_CHECKINS is enabled */
+#undef CVMX_ENABLE_PARAMETER_CHECKING
+#define CVMX_ENABLE_PARAMETER_CHECKING 0
+
+/*
+ * CVMSEG, scratch line for LMTDMA/LMTST operations:
+ * 1. It should differ from other CVMSEG uses, e.g. IOBDMA,
+ * 2. It must agree with the setting of CvmCtl[LMTLINE] control register.
+ * Contains 16 words, words 1-15 are cleared when word 0 is written to.
+ */
+#define CVMX_PKO_LMTLINE 2ull
+
+/* PKO3 queue level identifier */
+enum cvmx_pko3_level_e {
+ CVMX_PKO_LEVEL_INVAL = 0,
+ CVMX_PKO_PORT_QUEUES = 0xd1,
+ CVMX_PKO_L2_QUEUES = 0xc2,
+ CVMX_PKO_L3_QUEUES = 0xb3,
+ CVMX_PKO_L4_QUEUES = 0xa4,
+ CVMX_PKO_L5_QUEUES = 0x95,
+ CVMX_PKO_DESCR_QUEUES = 0x86,
+};
+
+enum cvmx_pko_dqop {
+ CVMX_PKO_DQ_SEND = 0ULL,
+ CVMX_PKO_DQ_OPEN = 1ULL,
+ CVMX_PKO_DQ_CLOSE = 2ULL,
+ CVMX_PKO_DQ_QUERY = 3ULL
+};
+
+/**
+ * Returns the PKO DQ..L2 Shaper Time-Wheel clock rate for specified node.
+ */
+static inline u64 cvmx_pko3_dq_tw_clock_rate_node(int node)
+{
+ return gd->bus_clk / 768;
+}
+
+/**
+ * Returns the PKO Port Shaper Time-Wheel clock rate for specified node.
+ */
+static inline u64 cvmx_pko3_pq_tw_clock_rate_node(int node)
+{
+ int div;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX))
+ div = 96;
+ else
+ div = 48;
+ return gd->bus_clk / div;
+}
+
+/**
+ * @INTERNAL
+ * Return the number of MACs in the PKO (exclusing the NULL MAC)
+ * in a model-dependent manner.
+ */
+static inline unsigned int __cvmx_pko3_num_macs(void)
+{
+ if (OCTEON_IS_MODEL(OCTEON_CNF75XX))
+ return 10;
+ if (OCTEON_IS_MODEL(OCTEON_CN73XX))
+ return 14;
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX))
+ return 28;
+ return 0;
+}
+
+/**
+ * @INTERNAL
+ * Return the number of queue levels, depending on SoC model
+ */
+static inline int __cvmx_pko3_sq_lvl_max(void)
+{
+ if (OCTEON_IS_MODEL(OCTEON_CN73XX))
+ return CVMX_PKO_L3_QUEUES;
+ if (OCTEON_IS_MODEL(OCTEON_CNF75XX))
+ return CVMX_PKO_L3_QUEUES;
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX))
+ return CVMX_PKO_L5_QUEUES;
+ return -1;
+}
+
+/**
+ * @INTERNAL
+ * Return the next (lower) queue level for a given level
+ */
+static inline enum cvmx_pko3_level_e
+__cvmx_pko3_sq_lvl_next(enum cvmx_pko3_level_e level)
+{
+ switch (level) {
+ default:
+ return CVMX_PKO_LEVEL_INVAL;
+ case CVMX_PKO_PORT_QUEUES:
+ return CVMX_PKO_L2_QUEUES;
+ case CVMX_PKO_L2_QUEUES:
+ return CVMX_PKO_L3_QUEUES;
+ case CVMX_PKO_L3_QUEUES:
+ if (OCTEON_IS_MODEL(OCTEON_CN73XX) ||
+ OCTEON_IS_MODEL(OCTEON_CNF75XX))
+ return CVMX_PKO_DESCR_QUEUES;
+ return CVMX_PKO_L4_QUEUES;
+ case CVMX_PKO_L4_QUEUES:
+ if (OCTEON_IS_MODEL(OCTEON_CN73XX) ||
+ OCTEON_IS_MODEL(OCTEON_CNF75XX))
+ return CVMX_PKO_LEVEL_INVAL;
+ return CVMX_PKO_L5_QUEUES;
+ case CVMX_PKO_L5_QUEUES:
+ if (OCTEON_IS_MODEL(OCTEON_CN73XX) ||
+ OCTEON_IS_MODEL(OCTEON_CNF75XX))
+ return CVMX_PKO_LEVEL_INVAL;
+ return CVMX_PKO_DESCR_QUEUES;
+ }
+}
+
+/**
+ * @INTERNAL
+ * Return an SQ identifier string, for debug messages.
+ */
+static inline char *__cvmx_pko3_sq_str(char *buf, enum cvmx_pko3_level_e level,
+ unsigned int q)
+{
+ char *p;
+
+ switch (level) {
+ default:
+ strcpy(buf, "ERR-SQ/");
+ break;
+ case CVMX_PKO_PORT_QUEUES:
+ strcpy(buf, "PQ_L1/");
+ break;
+ case CVMX_PKO_L2_QUEUES:
+ strcpy(buf, "SQ_L2/");
+ break;
+ case CVMX_PKO_L3_QUEUES:
+ strcpy(buf, "SQ_L3/");
+ break;
+ case CVMX_PKO_L4_QUEUES:
+ strcpy(buf, "SQ_L4/");
+ break;
+ case CVMX_PKO_L5_QUEUES:
+ strcpy(buf, "SQ_L5/");
+ break;
+ case CVMX_PKO_DESCR_QUEUES:
+ strcpy(buf, "DQ/");
+ break;
+ }
+
+ for (p = buf; *p; p++)
+ ;
+ *p++ = '0' + q / 1000;
+ q -= (q / 1000) * 1000;
+ *p++ = '0' + q / 100;
+ q -= (q / 100) * 100;
+ *p++ = '0' + q / 10;
+ q -= (q / 10) * 10;
+ *p++ = '0' + q;
+ *p++ = ':';
+ *p++ = '\0';
+ return buf;
+}
+
+union cvmx_pko_query_rtn {
+ u64 u64;
+ struct {
+ u64 dqstatus : 4;
+ u64 rsvd_50_59 : 10;
+ u64 dqop : 2;
+ u64 depth : 48;
+ } s;
+};
+
+typedef union cvmx_pko_query_rtn cvmx_pko_query_rtn_t;
+
+/* PKO_QUERY_RTN_S[DQSTATUS] - cvmx_pko_query_rtn_t->s.dqstatus */
+enum pko_query_dqstatus {
+ PKO_DQSTATUS_PASS = 0, /* No error */
+ PKO_DQSTATUS_BADSTATE = 0x8, /* queue was not ready to enqueue */
+ PKO_DQSTATUS_NOFPABUF = 0x9, /* FPA out of buffers */
+ PKO_DQSTATUS_NOPKOBUF = 0xA, /* PKO out of buffers */
+ PKO_DQSTATUS_FAILRTNPTR = 0xB, /* can't return buffer ptr to FPA */
+ PKO_DQSTATUS_ALREADY = 0xC, /* already created */
+ PKO_DQSTATUS_NOTCREATED = 0xD, /* not created */
+ PKO_DQSTATUS_NOTEMPTY = 0xE, /* queue not empty */
+ PKO_DQSTATUS_SENDPKTDROP = 0xF /* packet dropped, illegal construct */
+};
+
+typedef enum pko_query_dqstatus pko_query_dqstatus_t;
+
+/* Sub-command three bit codes (SUBDC3) */
+#define CVMX_PKO_SENDSUBDC_LINK 0x0
+#define CVMX_PKO_SENDSUBDC_GATHER 0x1
+#define CVMX_PKO_SENDSUBDC_JUMP 0x2
+/* Sub-command four bit codes (SUBDC4) */
+#define CVMX_PKO_SENDSUBDC_TSO 0x8
+#define CVMX_PKO_SENDSUBDC_FREE 0x9
+#define CVMX_PKO_SENDSUBDC_WORK 0xA
+#define CVMX_PKO_SENDSUBDC_AURA 0xB
+#define CVMX_PKO_SENDSUBDC_MEM 0xC
+#define CVMX_PKO_SENDSUBDC_EXT 0xD
+#define CVMX_PKO_SENDSUBDC_CRC 0xE
+#define CVMX_PKO_SENDSUBDC_IMM 0xF
+
+/**
+ * pko buf ptr
+ * This is good for LINK_S, GATHER_S and PKI_BUFLINK_S structure use.
+ * It can also be used for JUMP_S with F-bit represented by "i" field,
+ * and the size limited to 8-bit.
+ */
+
+union cvmx_pko_buf_ptr {
+ u64 u64;
+ struct {
+ u64 size : 16;
+ u64 subdc3 : 3;
+ u64 i : 1;
+ u64 rsvd_42_43 : 2;
+ u64 addr : 42;
+ } s;
+};
+
+typedef union cvmx_pko_buf_ptr cvmx_pko_buf_ptr_t;
+
+/**
+ * pko_auraalg_e
+ */
+enum pko_auraalg_e {
+ AURAALG_NOP = 0x0, /* aura_cnt = No change */
+ AURAALG_SUB = 0x3, /* aura_cnt -= pko_send_aura_t.offset */
+ AURAALG_SUBLEN = 0x7, /* aura_cnt -= pko_send_aura_t.offset +
+ * pko_send_hdr_t.total_bytes
+ */
+ AURAALG_SUBMBUF = 0xB /* aura_cnt -= pko_send_aura_t.offset +
+ * mbufs_freed
+ */
+};
+
+/**
+ * PKO_CKL4ALG_E
+ */
+enum pko_clk4alg_e {
+ CKL4ALG_NONE = 0x0, /* No checksum. */
+ CKL4ALG_UDP = 0x1, /* UDP L4 checksum. */
+ CKL4ALG_TCP = 0x2, /* TCP L4 checksum. */
+ CKL4ALG_SCTP = 0x3, /* SCTP L4 checksum. */
+};
+
+/**
+ * pko_send_aura
+ */
+union cvmx_pko_send_aura {
+ u64 u64;
+ struct {
+ u64 rsvd_60_63 : 4;
+ u64 aura : 12; /* NODE+LAURA */
+ u64 subdc4 : 4;
+ u64 alg : 4; /* pko_auraalg_e */
+ u64 rsvd_08_39 : 32;
+ u64 offset : 8;
+ } s;
+};
+
+typedef union cvmx_pko_send_aura cvmx_pko_send_aura_t;
+
+/**
+ * pko_send_tso
+ */
+union cvmx_pko_send_tso {
+ u64 u64;
+ struct {
+ u64 l2len : 8;
+ u64 rsvd_48_55 : 8;
+ u64 subdc4 : 4; /* 0x8 */
+ u64 rsvd_32_43 : 12;
+ u64 sb : 8;
+ u64 mss : 16;
+ u64 eom : 1;
+ u64 fn : 7;
+ } s;
+};
+
+typedef union cvmx_pko_send_tso cvmx_pko_send_tso_t;
+
+/**
+ * pko_send_free
+ */
+union cvmx_pko_send_free {
+ u64 u64;
+ struct {
+ u64 rsvd_48_63 : 16;
+ u64 subdc4 : 4; /* 0x9 */
+ u64 rsvd : 2;
+ u64 addr : 42;
+ } s;
+};
+
+typedef union cvmx_pko_send_free cvmx_pko_send_free_t;
+
+/* PKO_SEND_HDR_S - PKO header subcommand */
+union cvmx_pko_send_hdr {
+ u64 u64;
+ struct {
+ u64 rsvd_60_63 : 4;
+ u64 aura : 12;
+ u64 ckl4 : 2; /* PKO_CKL4ALG_E */
+ u64 ckl3 : 1;
+ u64 ds : 1;
+ u64 le : 1;
+ u64 n2 : 1;
+ u64 ii : 1;
+ u64 df : 1;
+ u64 rsvd_39 : 1;
+ u64 format : 7;
+ u64 l4ptr : 8;
+ u64 l3ptr : 8;
+ u64 total : 16;
+ } s;
+};
+
+typedef union cvmx_pko_send_hdr cvmx_pko_send_hdr_t;
+
+/* PKO_SEND_EXT_S - extended header subcommand */
+union cvmx_pko_send_ext {
+ u64 u64;
+ struct {
+ u64 rsvd_48_63 : 16;
+ u64 subdc4 : 4; /* _SENDSUBDC_EXT */
+ u64 col : 2; /* _COLORALG_E */
+ u64 ra : 2; /* _REDALG_E */
+ u64 tstmp : 1;
+ u64 rsvd_24_38 : 15;
+ u64 markptr : 8;
+ u64 rsvd_9_15 : 7;
+ u64 shapechg : 9;
+ } s;
+};
+
+typedef union cvmx_pko_send_ext cvmx_pko_send_ext_t;
+
+/* PKO_MEMDSZ_E */
+enum cvmx_pko_memdsz_e {
+ MEMDSZ_B64 = 0,
+ MEMDSZ_B32 = 1,
+ MEMDSZ_B16 = 2, /* Not in HRM, assumed unsupported */
+ MEMDSZ_B8 = 3
+};
+
+/* PKO_MEMALG_E */
+enum cvmx_pko_memalg_e {
+ MEMALG_SET = 0, /* Set mem = PKO_SEND_MEM_S[OFFSET] */
+ MEMALG_SETTSTMP = 1, /* Set the memory location to the timestamp
+ * PKO_SEND_MEM_S[DSZ] must be B64 and a
+ * PKO_SEND_EXT_S subdescriptor must be in
+ * the descriptor with PKO_SEND_EXT_S[TSTMP]=1
+ */
+ MEMALG_SETRSLT = 2, /* [DSZ] = B64; mem = PKO_MEM_RESULT_S. */
+ MEMALG_ADD = 8, /* mem = mem + PKO_SEND_MEM_S[OFFSET] */
+ MEMALG_SUB = 9, /* mem = mem – PKO_SEND_MEM_S[OFFSET] */
+ MEMALG_ADDLEN = 0xA, /* mem += [OFFSET] + PKO_SEND_HDR_S[TOTAL] */
+ MEMALG_SUBLEN = 0xB, /* mem -= [OFFSET] + PKO_SEND_HDR_S[TOTAL] */
+ MEMALG_ADDMBUF = 0xC, /* mem += [OFFSET] + mbufs_freed */
+ MEMALG_SUBMBUF = 0xD /* mem -= [OFFSET] + mbufs_freed */
+};
+
+union cvmx_pko_send_mem {
+ u64 u64;
+ struct {
+ u64 rsvd_63 : 1;
+ u64 wmem : 1;
+ u64 dsz : 2;
+ u64 alg : 4;
+ u64 offset : 8;
+ u64 subdc4 : 4;
+ u64 rsvd_42_43 : 2;
+ u64 addr : 42;
+ } s;
+};
+
+typedef union cvmx_pko_send_mem cvmx_pko_send_mem_t;
+
+union cvmx_pko_send_work {
+ u64 u64;
+ struct {
+ u64 rsvd_62_63 : 2;
+ u64 grp : 10;
+ u64 tt : 2;
+ u64 rsvd_48_49 : 2;
+ u64 subdc4 : 4;
+ u64 rsvd_42_43 : 2;
+ u64 addr : 42;
+ } s;
+};
+
+typedef union cvmx_pko_send_work cvmx_pko_send_work_t;
+
+/*** PKO_SEND_DMA_S - format of IOBDMA/LMTDMA data word ***/
+union cvmx_pko_lmtdma_data {
+ u64 u64;
+ struct {
+ u64 scraddr : 8;
+ u64 rtnlen : 8;
+ u64 did : 8; /* 0x51 */
+ u64 node : 4;
+ u64 rsvd_34_35 : 2;
+ u64 dqop : 2; /* PKO_DQOP_E */
+ u64 rsvd_26_31 : 6;
+ u64 dq : 10;
+ u64 rsvd_0_15 : 16;
+ } s;
+};
+
+typedef union cvmx_pko_lmtdma_data cvmx_pko_lmtdma_data_t;
+
+typedef struct cvmx_pko3_dq_params_s {
+ s32 depth;
+ s32 limit;
+ u64 pad[15];
+} cvmx_pko3_dq_params_t;
+
+/* DQ depth cached value */
+extern cvmx_pko3_dq_params_t *__cvmx_pko3_dq_params[CVMX_MAX_NODES];
+
+int cvmx_pko3_internal_buffer_count(unsigned int node);
+
+/**
+ * @INTERNAL
+ * PKO3 DQ parameter location
+ * @param node node
+ * @param dq dq
+ */
+static inline cvmx_pko3_dq_params_t *cvmx_pko3_dq_parameters(unsigned int node,
+ unsigned int dq)
+{
+ cvmx_pko3_dq_params_t *pparam = NULL;
+ static cvmx_pko3_dq_params_t dummy;
+
+ dummy.depth = 0;
+ dummy.limit = (1 << 16);
+
+ if (cvmx_likely(node < CVMX_MAX_NODES))
+ pparam = __cvmx_pko3_dq_params[node];
+
+ if (cvmx_likely(pparam))
+ pparam += dq;
+ else
+ pparam = &dummy;
+
+ return pparam;
+}
+
+static inline void cvmx_pko3_dq_set_limit(unsigned int node, unsigned int dq,
+ unsigned int limit)
+{
+ cvmx_pko3_dq_params_t *pparam;
+
+ pparam = cvmx_pko3_dq_parameters(node, dq);
+ pparam->limit = limit;
+}
+
+/**
+ * PKO descriptor queue operation error string
+ *
+ * @param dqstatus is the enumeration returned from hardware,
+ * PKO_QUERY_RTN_S[DQSTATUS].
+ *
+ * @return static constant string error description
+ */
+const char *pko_dqstatus_error(pko_query_dqstatus_t dqstatus);
+
+/*
+ * This function gets PKO mac num for a interface/port.
+ *
+ * @param interface is the interface number.
+ * @param index is the port number.
+ * @return returns mac number if successful or -1 on failure.
+ */
+static inline int __cvmx_pko3_get_mac_num(int xiface, int index)
+{
+ struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+ cvmx_helper_interface_mode_t mode;
+ int interface_index;
+ int ilk_mac_base = -1, bgx_mac_base = -1, bgx_ports = 4;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN73XX))
+ bgx_mac_base = 2;
+
+ if (OCTEON_IS_MODEL(OCTEON_CNF75XX))
+ bgx_mac_base = 2;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
+ ilk_mac_base = 2;
+ bgx_mac_base = 4;
+ }
+
+ mode = cvmx_helper_interface_get_mode(xiface);
+ switch (mode) {
+ case CVMX_HELPER_INTERFACE_MODE_LOOP:
+ return 0;
+ case CVMX_HELPER_INTERFACE_MODE_NPI:
+ return 1;
+ case CVMX_HELPER_INTERFACE_MODE_ILK:
+ if (ilk_mac_base < 0)
+ return -1;
+ interface_index = (xi.interface - CVMX_ILK_GBL_BASE());
+ if (interface_index < 0)
+ return -1;
+ return (ilk_mac_base + interface_index);
+ case CVMX_HELPER_INTERFACE_MODE_SRIO:
+ return (4 + 2 * xi.interface + index);
+ default:
+ if (xi.interface >= CVMX_ILK_GBL_BASE() && ilk_mac_base >= 0)
+ return -1;
+ /* All other modes belong to BGX */
+ return (bgx_mac_base + bgx_ports * xi.interface + index);
+ }
+}
+
+/**
+ * @INTERNAL
+ *
+ * Get scratch offset for LMTDMA/LMTST data buffer
+ *
+ */
+static inline unsigned int cvmx_pko3_lmtdma_scr_base(void)
+{
+ return CVMX_PKO_LMTLINE * CVMX_CACHE_LINE_SIZE;
+}
+
+/**
+ * @INTERNAL
+ *
+ * Get address for LMTDMA/LMTST data buffer
+ *
+ */
+static inline u64 *cvmx_pko3_cvmseg_addr(void)
+{
+ const unsigned int scr = cvmx_pko3_lmtdma_scr_base();
+
+ return (u64 *)(CVMX_SCRATCH_BASE + scr);
+}
+
+/**
+ * Save scratchpad area
+ * @param buf storage buffer for saving previous scratchpad contents.
+ *
+ * This function should be used whenever the cache line is used
+ * from a context that might preempt another context that too uses
+ * the same cache line designated for LMTST/LMTDMA and Wide-Atomic
+ * operations, such as the hard interrupt context in Linux kernel,
+ * that could preempt a user-space application on the same processor
+ * core also using the same scratchpad.
+ * 'cvmx_lmtline_save()' should be called upon entry into the
+ * potentially interrupting context, and 'cvmx_lmtline_restore()' should
+ * be called prior to exitting that context.
+ */
+static inline void cvmx_lmtline_save(u64 buf[16])
+{
+ unsigned int i, scr_off = cvmx_pko3_lmtdma_scr_base();
+ unsigned int sz = CVMX_CACHE_LINE_SIZE / sizeof(u64);
+
+ /* wait LMTDMA to finish (if any) */
+ CVMX_SYNCIOBDMA;
+
+ /* Copy LMTLINE to user-provided buffer */
+ for (i = 0; i < sz; i++)
+ buf[i] = cvmx_scratch_read64(scr_off + i * sizeof(u64));
+}
+
+/**
+ * Restore scratchpad area
+ * @param buf storage buffer containing the previous content of scratchpad.
+ */
+static inline void cvmx_lmtline_restore(const u64 buf[16])
+{
+ unsigned int i, scr_off = cvmx_pko3_lmtdma_scr_base();
+ unsigned int sz = CVMX_CACHE_LINE_SIZE / sizeof(u64);
+
+ /* wait LMTDMA to finsh (if any) */
+ CVMX_SYNCIOBDMA;
+
+ /* restore scratchpad area from buf[] */
+ for (i = 0; i < sz; i++)
+ cvmx_scratch_write64(scr_off + i * sizeof(u64), buf[i]);
+}
+
+/*
+ * @INTERNAL
+ * Deliver PKO SEND commands via CVMSEG LM and LMTDMA/LMTST.
+ * The command should be already stored in the CVMSEG address.
+ *
+ * @param node is the destination node
+ * @param dq is the destination descriptor queue.
+ * @param numwords is the number of outgoing words
+ * @param tag_wait Wait to finish tag switch just before issueing LMTDMA
+ * @return the PKO3 native query result structure.
+ *
+ * <numwords> must be between 1 and 15 for CVMX_PKO_DQ_SEND command
+ *
+ * NOTE: Internal use only.
+ */
+static inline cvmx_pko_query_rtn_t
+__cvmx_pko3_lmtdma(u8 node, uint16_t dq, unsigned int numwords, bool tag_wait)
+{
+ const enum cvmx_pko_dqop dqop = CVMX_PKO_DQ_SEND;
+ cvmx_pko_query_rtn_t pko_status;
+ cvmx_pko_lmtdma_data_t pko_send_dma_data;
+ u64 dma_addr;
+ unsigned int scr_base = cvmx_pko3_lmtdma_scr_base();
+ unsigned int scr_off;
+ cvmx_pko3_dq_params_t *pparam;
+
+ if (cvmx_unlikely(numwords < 1 || numwords > 15)) {
+ debug("%s: ERROR: Internal error\n", __func__);
+ pko_status.u64 = ~0ull;
+ return pko_status;
+ }
+
+ pparam = cvmx_pko3_dq_parameters(node, dq);
+
+ pko_status.u64 = 0;
+ pko_send_dma_data.u64 = 0;
+
+ /* LMTDMA address offset is (nWords-1) */
+ dma_addr = CVMX_LMTDMA_ORDERED_IO_ADDR;
+ dma_addr += (numwords - 1) << 3;
+
+ scr_off = scr_base + numwords * sizeof(u64);
+
+ /* Write all-ones into the return area */
+ cvmx_scratch_write64(scr_off, ~0ull);
+
+ /* Barrier: make sure all prior writes complete before the following */
+ CVMX_SYNCWS;
+
+ /* If cached depth exceeds limit, check the real depth */
+ if (cvmx_unlikely(pparam->depth > pparam->limit)) {
+ cvmx_pko_dqx_wm_cnt_t wm_cnt;
+
+ wm_cnt.u64 = csr_rd_node(node, CVMX_PKO_DQX_WM_CNT(dq));
+ pko_status.s.depth = wm_cnt.s.count;
+ pparam->depth = pko_status.s.depth;
+
+ if (pparam->depth > pparam->limit) {
+ pko_status.s.dqop = dqop;
+ pko_status.s.dqstatus = PKO_DQSTATUS_NOFPABUF;
+ return pko_status;
+ }
+ } else {
+ cvmx_atomic_add32_nosync(&pparam->depth, 1);
+ }
+
+ if (CVMX_ENABLE_PARAMETER_CHECKING) {
+ /* Request one return word */
+ pko_send_dma_data.s.rtnlen = 1;
+ } else {
+ /* Do not expect a return word */
+ pko_send_dma_data.s.rtnlen = 0;
+ }
+
+ /* build store data for DMA */
+ pko_send_dma_data.s.scraddr = scr_off >> 3;
+ pko_send_dma_data.s.did = 0x51;
+ pko_send_dma_data.s.node = node;
+ pko_send_dma_data.s.dqop = dqop;
+ pko_send_dma_data.s.dq = dq;
+
+ /* Wait to finish tag switch just before issueing LMTDMA */
+ if (tag_wait)
+ cvmx_pow_tag_sw_wait();
+
+ /* issue PKO DMA */
+ cvmx_write64_uint64(dma_addr, pko_send_dma_data.u64);
+
+ if (cvmx_unlikely(pko_send_dma_data.s.rtnlen)) {
+ /* Wait for LMTDMA completion */
+ CVMX_SYNCIOBDMA;
+
+ /* Retrieve real result */
+ pko_status.u64 = cvmx_scratch_read64(scr_off);
+ pparam->depth = pko_status.s.depth;
+ } else {
+ /* Fake positive result */
+ pko_status.s.dqop = dqop;
+ pko_status.s.dqstatus = PKO_DQSTATUS_PASS;
+ }
+
+ return pko_status;
+}
+
+/*
+ * @INTERNAL
+ * Sends PKO descriptor commands via CVMSEG LM and LMTDMA.
+ * @param node is the destination node
+ * @param dq is the destination descriptor queue.
+ * @param cmds[] is an array of 64-bit PKO3 headers/subheaders
+ * @param numwords is the number of outgoing words
+ * @param dqop is the operation code
+ * @return the PKO3 native query result structure.
+ *
+ * <numwords> must be between 1 and 15 for CVMX_PKO_DQ_SEND command
+ * otherwise it must be 0.
+ *
+ * NOTE: Internal use only.
+ */
+static inline cvmx_pko_query_rtn_t __cvmx_pko3_do_dma(u8 node, uint16_t dq,
+ u64 cmds[],
+ unsigned int numwords,
+ enum cvmx_pko_dqop dqop)
+{
+ const unsigned int scr_base = cvmx_pko3_lmtdma_scr_base();
+ cvmx_pko_query_rtn_t pko_status;
+ cvmx_pko_lmtdma_data_t pko_send_dma_data;
+ u64 dma_addr;
+ unsigned int i, scr_off;
+ cvmx_pko3_dq_params_t *pparam;
+
+ pparam = cvmx_pko3_dq_parameters(node, dq);
+ CVMX_PREFETCH0(pparam);
+ /* Push WB */
+ CVMX_SYNCWS;
+
+ pko_status.u64 = 0;
+ pko_send_dma_data.u64 = 0;
+
+ if (cvmx_unlikely(numwords > 15)) {
+ debug("%s: ERROR: Internal error\n", __func__);
+ pko_status.u64 = ~0ull;
+ return pko_status;
+ }
+
+ /* Store the command words into CVMSEG LM */
+ for (i = 0, scr_off = scr_base; i < numwords; i++) {
+ cvmx_scratch_write64(scr_off, cmds[i]);
+ scr_off += sizeof(cmds[0]);
+ }
+
+ /* With 0 data to send, this is an IOBDMA, else LMTDMA operation */
+ if (numwords == 0) {
+ dma_addr = CVMX_IOBDMA_ORDERED_IO_ADDR;
+ } else {
+ /* LMTDMA address offset is (nWords-1) */
+ dma_addr = CVMX_LMTDMA_ORDERED_IO_ADDR;
+ dma_addr += (numwords - 1) << 3;
+ }
+
+ if (cvmx_likely(dqop == CVMX_PKO_DQ_SEND)) {
+ if (cvmx_unlikely(pparam->depth > pparam->limit)) {
+ cvmx_pko_dqx_wm_cnt_t wm_cnt;
+
+ wm_cnt.u64 = csr_rd_node(node, CVMX_PKO_DQX_WM_CNT(dq));
+ pko_status.s.depth = wm_cnt.s.count;
+ pparam->depth = pko_status.s.depth;
+ }
+
+ if (cvmx_unlikely(pparam->depth > pparam->limit)) {
+ pko_status.s.dqop = dqop;
+ pko_status.s.dqstatus = PKO_DQSTATUS_NOFPABUF;
+ return pko_status;
+ }
+
+ cvmx_atomic_add32_nosync(&pparam->depth, 1);
+ }
+
+ if (cvmx_unlikely(dqop != CVMX_PKO_DQ_SEND) ||
+ CVMX_ENABLE_PARAMETER_CHECKING) {
+ /* Request one return word */
+ pko_send_dma_data.s.rtnlen = 1;
+ /* Write all-ones into the return area */
+ cvmx_scratch_write64(scr_off, ~0ull);
+ } else {
+ /* Do not expext a return word */
+ pko_send_dma_data.s.rtnlen = 0;
+ }
+
+ /* build store data for DMA */
+ pko_send_dma_data.s.scraddr = scr_off >> 3;
+ pko_send_dma_data.s.did = 0x51;
+ pko_send_dma_data.s.node = node;
+ pko_send_dma_data.s.dqop = dqop;
+ pko_send_dma_data.s.dq = dq;
+
+ /* Barrier: make sure all prior writes complete before the following */
+ CVMX_SYNCWS;
+
+ /* Wait to finish tag switch just before issueing LMTDMA */
+ cvmx_pow_tag_sw_wait();
+
+ /* issue PKO DMA */
+ cvmx_write64_uint64(dma_addr, pko_send_dma_data.u64);
+
+ if (pko_send_dma_data.s.rtnlen) {
+ /* Wait LMTDMA for completion */
+ CVMX_SYNCIOBDMA;
+
+ /* Retrieve real result */
+ pko_status.u64 = cvmx_scratch_read64(scr_off);
+ pparam->depth = pko_status.s.depth;
+ } else {
+ /* Fake positive result */
+ pko_status.s.dqop = dqop;
+ pko_status.s.dqstatus = PKO_DQSTATUS_PASS;
+ }
+
+ return pko_status;
+}
+
+/*
+ * Transmit packets through PKO, simplified API
+ *
+ * @INTERNAL
+ *
+ * @param dq is a global destination queue number
+ * @param pki_ptr specifies packet first linked pointer as returned from
+ * 'cvmx_wqe_get_pki_pkt_ptr()'.
+ * @param len is the total number of bytes in the packet.
+ * @param gaura is the aura to free packet buffers after trasnmit.
+ * @param pCounter is an address of a 64-bit counter to atomically
+ * @param ptag is a Flow Tag pointer for packet odering or NULL
+ * decrement when packet transmission is complete.
+ *
+ * @return returns 0 if successful and -1 on failure.
+ *
+ *
+ * NOTE: This is a provisional API, and is subject to change.
+ */
+static inline int cvmx_pko3_xmit_link_buf(int dq, cvmx_buf_ptr_pki_t pki_ptr,
+ unsigned int len, int gaura,
+ u64 *pcounter, u32 *ptag)
+{
+ cvmx_pko_query_rtn_t pko_status;
+ cvmx_pko_send_hdr_t hdr_s;
+ cvmx_pko_buf_ptr_t gtr_s;
+ unsigned int node, nwords;
+ unsigned int scr_base = cvmx_pko3_lmtdma_scr_base();
+
+ /* Separate global DQ# into node and local DQ */
+ node = dq >> 10;
+ dq &= (1 << 10) - 1;
+
+ /* Fill in header */
+ hdr_s.u64 = 0;
+ hdr_s.s.total = len;
+ hdr_s.s.df = (gaura < 0);
+ hdr_s.s.ii = 1;
+ hdr_s.s.aura = (gaura >= 0) ? gaura : 0;
+
+ /* Fill in gather */
+ gtr_s.u64 = 0;
+ gtr_s.s.subdc3 = CVMX_PKO_SENDSUBDC_LINK;
+ gtr_s.s.addr = pki_ptr.addr;
+ gtr_s.s.size = pki_ptr.size;
+
+ /* Setup command word pointers */
+ cvmx_scratch_write64(scr_base + sizeof(u64) * 0, hdr_s.u64);
+ cvmx_scratch_write64(scr_base + sizeof(u64) * 1, gtr_s.u64);
+ nwords = 2;
+
+ /* Conditionally setup an atomic decrement counter */
+ if (pcounter) {
+ cvmx_pko_send_mem_t mem_s;
+
+ mem_s.s.subdc4 = CVMX_PKO_SENDSUBDC_MEM;
+ mem_s.s.dsz = MEMDSZ_B64;
+ mem_s.s.alg = MEMALG_SUB;
+ mem_s.s.offset = 1;
+ mem_s.s.wmem = 0;
+ mem_s.s.addr = cvmx_ptr_to_phys(CASTPTR(void, pcounter));
+ cvmx_scratch_write64(scr_base + sizeof(u64) * nwords++,
+ mem_s.u64);
+ }
+
+ /* To preserve packet order, go atomic with DQ-specific tag */
+ if (ptag)
+ cvmx_pow_tag_sw(*ptag ^ dq, CVMX_POW_TAG_TYPE_ATOMIC);
+
+ /* Do LMTDMA */
+ pko_status = __cvmx_pko3_lmtdma(node, dq, nwords, ptag);
+
+ if (cvmx_likely(pko_status.s.dqstatus == PKO_DQSTATUS_PASS))
+ return 0;
+ else
+ return -1;
+}
+
+/**
+ * @INTERNAL
+ *
+ * Retrieve PKO internal AURA from register.
+ */
+static inline unsigned int __cvmx_pko3_aura_get(unsigned int node)
+{
+ static s16 aura = -1;
+ cvmx_pko_dpfi_fpa_aura_t pko_aura;
+
+ if (aura >= 0)
+ return aura;
+
+ pko_aura.u64 = csr_rd_node(node, CVMX_PKO_DPFI_FPA_AURA);
+
+ aura = (pko_aura.s.node << 10) | pko_aura.s.laura;
+ return aura;
+}
+
+/** Open configured descriptor queues before queueing packets into them.
+ *
+ * @param node is to specify the node to which this configuration is applied.
+ * @param dq is the descriptor queue number to be opened.
+ * @return returns 0 on success or -1 on failure.
+ */
+int cvmx_pko_dq_open(int node, int dq);
+
+/** Close a descriptor queue
+ *
+ * @param node is to specify the node to which this configuration is applied.
+ * @param dq is the descriptor queue number to be opened.
+ * @return returns 0 on success or -1 on failure.
+ *
+ * This should be called before changing the DQ parent link, topology,
+ * or when shutting down the PKO.
+ */
+int cvmx_pko3_dq_close(int node, int dq);
+
+/** Query a descriptor queue
+ *
+ * @param node is to specify the node to which this configuration is applied.
+ * @param dq is the descriptor queue number to be opened.
+ * @return returns the descriptor queue depth on success or -1 on failure.
+ *
+ * This should be called before changing the DQ parent link, topology,
+ * or when shutting down the PKO.
+ */
+int cvmx_pko3_dq_query(int node, int dq);
+
+/** Drain a descriptor queue
+ *
+ * Before closing a DQ, this call will drain all pending traffic
+ * on the DQ to the NULL MAC, which will circumvent any traffic
+ * shaping and flow control to quickly reclaim all packet buffers.
+ */
+void cvmx_pko3_dq_drain(int node, int dq);
+
+/*
+ * PKO global initialization for 78XX.
+ *
+ * @param node is the node on which PKO block is initialized.
+ * @param aura is the 12-bit AURA (including node) for PKO internal use.
+ * @return none.
+ */
+int cvmx_pko3_hw_init_global(int node, uint16_t aura);
+
+/**
+ * Shutdown the entire PKO
+ */
+int cvmx_pko3_hw_disable(int node);
+
+/* Define legacy type here to break circular dependency */
+typedef struct cvmx_pko_port_status cvmx_pko_port_status_t;
+
+/**
+ * @INTERNAL
+ * Backward compatibility for collecting statistics from PKO3
+ *
+ */
+void cvmx_pko3_get_legacy_port_stats(u16 ipd_port, unsigned int clear,
+ cvmx_pko_port_status_t *status);
+
+/** Set MAC options
+ *
+ * The options supported are the parameters below:
+ *
+ * @param xiface The physical interface number
+ * @param index The physical sub-interface port
+ * @param fcs_enable Enable FCS generation
+ * @param pad_enable Enable padding to minimum packet size
+ * @param fcs_sop_off Number of bytes at start of packet to exclude from FCS
+ *
+ * The typical use for `fcs_sop_off` is when the interface is configured
+ * to use a header such as HighGig to precede every Ethernet packet,
+ * such a header usually does not partake in the CRC32 computation stream,
+ * and its size must be set with this parameter.
+ *
+ * @return Returns 0 on success, -1 if interface/port is invalid.
+ */
+int cvmx_pko3_interface_options(int xiface, int index, bool fcs_enable,
+ bool pad_enable, unsigned int fcs_sop_off);
+
+/** Set Descriptor Queue options
+ *
+ * The `min_pad` parameter must be in agreement with the interface-level
+ * padding option for all descriptor queues assigned to that particular
+ * interface/port.
+ */
+void cvmx_pko3_dq_options(unsigned int node, unsigned int dq, bool min_pad);
+
+int cvmx_pko3_port_fifo_size(unsigned int xiface, unsigned int index);
+int cvmx_pko3_channel_credit_level(int node, enum cvmx_pko3_level_e level);
+int cvmx_pko3_port_xoff(unsigned int xiface, unsigned int index);
+int cvmx_pko3_port_xon(unsigned int xiface, unsigned int index);
+
+/* Packet descriptor - PKO3 command buffer + internal state */
+typedef struct cvmx_pko3_pdesc_s {
+ u64 *jump_buf; /**< jump buffer vaddr */
+ s16 last_aura; /**< AURA of the latest LINK_S/GATHER_S */
+ unsigned num_words : 5, /**< valid words in word array 2..16 */
+ headroom : 10, /**< free bytes at start of 1st buf */
+ hdr_offsets : 1, pki_word4_present : 1;
+ /* PKO3 command buffer: */
+ cvmx_pko_send_hdr_t *hdr_s;
+ u64 word[16]; /**< header and subcommands buffer */
+ /* Bookkeeping fields: */
+ u64 send_work_s; /**< SEND_WORK_S must be the very last subdc */
+ s16 jb_aura; /**< AURA where the jump buffer belongs */
+ u16 mem_s_ix; /**< index of first MEM_S subcommand */
+ u8 ckl4_alg; /**< L3/L4 alg to use if recalc is needed */
+ /* Fields saved from WQE for later inspection */
+ cvmx_pki_wqe_word4_t pki_word4;
+ cvmx_pki_wqe_word2_t pki_word2;
+} cvmx_pko3_pdesc_t;
+
+void cvmx_pko3_pdesc_init(cvmx_pko3_pdesc_t *pdesc);
+int cvmx_pko3_pdesc_from_wqe(cvmx_pko3_pdesc_t *pdesc, cvmx_wqe_78xx_t *wqe,
+ bool free_bufs);
+int cvmx_pko3_pdesc_transmit(cvmx_pko3_pdesc_t *pdesc, uint16_t dq,
+ u32 *flow_tag);
+int cvmx_pko3_pdesc_notify_decrement(cvmx_pko3_pdesc_t *pdesc,
+ volatile u64 *p_counter);
+int cvmx_pko3_pdesc_notify_wqe(cvmx_pko3_pdesc_t *pdesc, cvmx_wqe_78xx_t *wqe,
+ u8 node, uint8_t group, uint8_t tt, u32 tag);
+int cvmx_pko3_pdesc_buf_append(cvmx_pko3_pdesc_t *pdesc, void *p_data,
+ unsigned int data_bytes, unsigned int gaura);
+int cvmx_pko3_pdesc_append_free(cvmx_pko3_pdesc_t *pdesc, u64 addr,
+ unsigned int gaura);
+int cvmx_pko3_pdesc_hdr_push(cvmx_pko3_pdesc_t *pdesc, const void *p_data,
+ u8 data_bytes, uint8_t layer);
+int cvmx_pko3_pdesc_hdr_pop(cvmx_pko3_pdesc_t *pdesc, void *hdr_buf,
+ unsigned int num_bytes);
+int cvmx_pko3_pdesc_hdr_peek(cvmx_pko3_pdesc_t *pdesc, void *hdr_buf,
+ unsigned int num_bytes, unsigned int offset);
+void cvmx_pko3_pdesc_set_free(cvmx_pko3_pdesc_t *pdesc, bool free_bufs);
+
+#endif /* __CVMX_PKO3_H__ */
diff --git a/arch/mips/mach-octeon/include/mach/cvmx-range.h b/arch/mips/mach-octeon/include/mach/cvmx-range.h
new file mode 100644
index 000000000000..f0c1307e6173
--- /dev/null
+++ b/arch/mips/mach-octeon/include/mach/cvmx-range.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ */
+
+#ifndef __CVMX_RANGE_H__
+#define __CVMX_RANGE_H__
+
+int cvmx_range_init(u64 range_addr, int size);
+int cvmx_range_alloc(u64 range_addr, uint64_t owner, uint64_t cnt, int align);
+int cvmx_range_alloc_ordered(u64 range_addr, uint64_t owner, u64 cnt, int align,
+ int reverse);
+int cvmx_range_alloc_non_contiguos(u64 range_addr, uint64_t owner, u64 cnt,
+ int elements[]);
+int cvmx_range_reserve(u64 range_addr, uint64_t owner, u64 base, uint64_t cnt);
+int cvmx_range_free_with_base(u64 range_addr, int base, int cnt);
+int cvmx_range_free_with_owner(u64 range_addr, uint64_t owner);
+u64 cvmx_range_get_owner(u64 range_addr, uint64_t base);
+void cvmx_range_show(uint64_t range_addr);
+int cvmx_range_memory_size(int nelements);
+int cvmx_range_free_mutiple(u64 range_addr, int bases[], int count);
+
+#endif // __CVMX_RANGE_H__
--
2.35.1
^ permalink raw reply related [flat|nested] 52+ messages in thread* [PATCH 02/52] mips: octeon: Add cvmx-ilk-defs.h header file
2022-03-30 10:06 [PATCH 00/52] mips: octeon: Add ethernet support Stefan Roese
2022-03-30 10:06 ` [PATCH 01/52] mips: octeon: Add misc cvmx-* header files Stefan Roese
@ 2022-03-30 10:06 ` Stefan Roese
2022-03-30 10:06 ` [PATCH 03/52] mips: octeon: Add cvmx-iob-defs.h " Stefan Roese
` (47 subsequent siblings)
49 siblings, 0 replies; 52+ messages in thread
From: Stefan Roese @ 2022-03-30 10:06 UTC (permalink / raw)
To: u-boot; +Cc: awilliams, cchavva
From: Aaron Williams <awilliams@marvell.com>
Import cvmx-igl-defs.h header file from 2013 U-Boot. It will be used
by the later added drivers to support networking on the MIPS Octeon II /
III platforms.
Signed-off-by: Aaron Williams <awilliams@marvell.com>
Signed-off-by: Stefan Roese <sr@denx.de>
---
.../mach-octeon/include/mach/cvmx-ilk-defs.h | 2269 +++++++++++++++++
1 file changed, 2269 insertions(+)
create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-ilk-defs.h
diff --git a/arch/mips/mach-octeon/include/mach/cvmx-ilk-defs.h b/arch/mips/mach-octeon/include/mach/cvmx-ilk-defs.h
new file mode 100644
index 000000000000..d54f9ca792f5
--- /dev/null
+++ b/arch/mips/mach-octeon/include/mach/cvmx-ilk-defs.h
@@ -0,0 +1,2269 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon ilk.
+ */
+
+#ifndef __CVMX_ILK_DEFS_H__
+#define __CVMX_ILK_DEFS_H__
+
+#define CVMX_ILK_BIST_SUM (0x0001180014000038ull)
+#define CVMX_ILK_GBL_CFG (0x0001180014000000ull)
+#define CVMX_ILK_GBL_ERR_CFG (0x0001180014000058ull)
+#define CVMX_ILK_GBL_INT (0x0001180014000008ull)
+#define CVMX_ILK_GBL_INT_EN (0x0001180014000010ull)
+#define CVMX_ILK_INT_SUM (0x0001180014000030ull)
+#define CVMX_ILK_LNEX_TRN_CTL(offset) \
+ (0x00011800140380F0ull + ((offset) & 15) * 1024)
+#define CVMX_ILK_LNEX_TRN_LD(offset) \
+ (0x00011800140380E0ull + ((offset) & 15) * 1024)
+#define CVMX_ILK_LNEX_TRN_LP(offset) \
+ (0x00011800140380E8ull + ((offset) & 15) * 1024)
+#define CVMX_ILK_LNE_DBG (0x0001180014030008ull)
+#define CVMX_ILK_LNE_STS_MSG (0x0001180014030000ull)
+#define CVMX_ILK_RID_CFG (0x0001180014000050ull)
+#define CVMX_ILK_RXF_IDX_PMAP (0x0001180014000020ull)
+#define CVMX_ILK_RXF_MEM_PMAP (0x0001180014000028ull)
+#define CVMX_ILK_RXX_BYTE_CNTX(offset, block_id) \
+ (0x0001180014023000ull + \
+ (((offset) & 255) + ((block_id) & 1) * 0x800ull) * 8)
+#define CVMX_ILK_RXX_CAL_ENTRYX(offset, block_id) \
+ (0x0001180014021000ull + \
+ (((offset) & 511) + ((block_id) & 1) * 0x800ull) * 8)
+#define CVMX_ILK_RXX_CFG0(offset) (0x0001180014020000ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_RXX_CFG1(offset) (0x0001180014020008ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_RXX_CHAX(offset, block_id) \
+ (0x0001180014002000ull + \
+ (((offset) & 255) + ((block_id) & 1) * 0x200ull) * 8)
+#define CVMX_ILK_RXX_CHA_XONX(offset, block_id) \
+ (0x0001180014020400ull + (((offset) & 3) + ((block_id) & 1) * 0x800ull) * 8)
+#define CVMX_ILK_RXX_ERR_CFG(offset) \
+ (0x00011800140200E0ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_RXX_FLOW_CTL0(offset) \
+ (0x0001180014020090ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_RXX_FLOW_CTL1(offset) \
+ (0x0001180014020098ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_RXX_IDX_CAL(offset) \
+ (0x00011800140200A0ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_RXX_IDX_STAT0(offset) \
+ (0x0001180014020070ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_RXX_IDX_STAT1(offset) \
+ (0x0001180014020078ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_RXX_INT(offset) (0x0001180014020010ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_RXX_INT_EN(offset) \
+ (0x0001180014020018ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_RXX_JABBER(offset) \
+ (0x00011800140200B8ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_RXX_MEM_CAL0(offset) \
+ (0x00011800140200A8ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_RXX_MEM_CAL1(offset) \
+ (0x00011800140200B0ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_RXX_MEM_STAT0(offset) \
+ (0x0001180014020080ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_RXX_MEM_STAT1(offset) \
+ (0x0001180014020088ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_RXX_PKT_CNTX(offset, block_id) \
+ (0x0001180014022000ull + \
+ (((offset) & 255) + ((block_id) & 1) * 0x800ull) * 8)
+#define CVMX_ILK_RXX_RID(offset) (0x00011800140200C0ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_RXX_STAT0(offset) \
+ (0x0001180014020020ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_RXX_STAT1(offset) \
+ (0x0001180014020028ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_RXX_STAT2(offset) \
+ (0x0001180014020030ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_RXX_STAT3(offset) \
+ (0x0001180014020038ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_RXX_STAT4(offset) \
+ (0x0001180014020040ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_RXX_STAT5(offset) \
+ (0x0001180014020048ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_RXX_STAT6(offset) \
+ (0x0001180014020050ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_RXX_STAT7(offset) \
+ (0x0001180014020058ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_RXX_STAT8(offset) \
+ (0x0001180014020060ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_RXX_STAT9(offset) \
+ (0x0001180014020068ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_RX_LNEX_CFG(offset) \
+ (0x0001180014038000ull + ((offset) & 15) * 1024)
+#define CVMX_ILK_RX_LNEX_INT(offset) \
+ (0x0001180014038008ull + ((offset) & 15) * 1024)
+#define CVMX_ILK_RX_LNEX_INT_EN(offset) \
+ (0x0001180014038010ull + ((offset) & 7) * 1024)
+#define CVMX_ILK_RX_LNEX_STAT0(offset) \
+ (0x0001180014038018ull + ((offset) & 15) * 1024)
+#define CVMX_ILK_RX_LNEX_STAT1(offset) \
+ (0x0001180014038020ull + ((offset) & 15) * 1024)
+#define CVMX_ILK_RX_LNEX_STAT10(offset) \
+ (0x0001180014038068ull + ((offset) & 15) * 1024)
+#define CVMX_ILK_RX_LNEX_STAT2(offset) \
+ (0x0001180014038028ull + ((offset) & 15) * 1024)
+#define CVMX_ILK_RX_LNEX_STAT3(offset) \
+ (0x0001180014038030ull + ((offset) & 15) * 1024)
+#define CVMX_ILK_RX_LNEX_STAT4(offset) \
+ (0x0001180014038038ull + ((offset) & 15) * 1024)
+#define CVMX_ILK_RX_LNEX_STAT5(offset) \
+ (0x0001180014038040ull + ((offset) & 15) * 1024)
+#define CVMX_ILK_RX_LNEX_STAT6(offset) \
+ (0x0001180014038048ull + ((offset) & 15) * 1024)
+#define CVMX_ILK_RX_LNEX_STAT7(offset) \
+ (0x0001180014038050ull + ((offset) & 15) * 1024)
+#define CVMX_ILK_RX_LNEX_STAT8(offset) \
+ (0x0001180014038058ull + ((offset) & 15) * 1024)
+#define CVMX_ILK_RX_LNEX_STAT9(offset) \
+ (0x0001180014038060ull + ((offset) & 15) * 1024)
+#define CVMX_ILK_SER_CFG (0x0001180014000018ull)
+#define CVMX_ILK_TXX_BYTE_CNTX(offset, block_id) \
+ (0x0001180014013000ull + \
+ (((offset) & 255) + ((block_id) & 1) * 0x800ull) * 8)
+#define CVMX_ILK_TXX_CAL_ENTRYX(offset, block_id) \
+ (0x0001180014011000ull + \
+ (((offset) & 511) + ((block_id) & 1) * 0x800ull) * 8)
+#define CVMX_ILK_TXX_CFG0(offset) (0x0001180014010000ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_TXX_CFG1(offset) (0x0001180014010008ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_TXX_CHA_XONX(offset, block_id) \
+ (0x0001180014010400ull + (((offset) & 3) + ((block_id) & 1) * 0x800ull) * 8)
+#define CVMX_ILK_TXX_DBG(offset) (0x0001180014010070ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_TXX_ERR_CFG(offset) \
+ (0x00011800140100B0ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_TXX_FLOW_CTL0(offset) \
+ (0x0001180014010048ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_TXX_FLOW_CTL1(offset) \
+ (0x0001180014010050ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_TXX_IDX_CAL(offset) \
+ (0x0001180014010058ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_TXX_IDX_PMAP(offset) \
+ (0x0001180014010010ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_TXX_IDX_STAT0(offset) \
+ (0x0001180014010020ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_TXX_IDX_STAT1(offset) \
+ (0x0001180014010028ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_TXX_INT(offset) (0x0001180014010078ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_TXX_INT_EN(offset) \
+ (0x0001180014010080ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_TXX_MEM_CAL0(offset) \
+ (0x0001180014010060ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_TXX_MEM_CAL1(offset) \
+ (0x0001180014010068ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_TXX_MEM_PMAP(offset) \
+ (0x0001180014010018ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_TXX_MEM_STAT0(offset) \
+ (0x0001180014010030ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_TXX_MEM_STAT1(offset) \
+ (0x0001180014010038ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_TXX_PIPE(offset) (0x0001180014010088ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_TXX_PKT_CNTX(offset, block_id) \
+ (0x0001180014012000ull + \
+ (((offset) & 255) + ((block_id) & 1) * 0x800ull) * 8)
+#define CVMX_ILK_TXX_RMATCH(offset) \
+ (0x0001180014010040ull + ((offset) & 1) * 16384)
+
+/**
+ * cvmx_ilk_bist_sum
+ */
+union cvmx_ilk_bist_sum {
+ u64 u64;
+ struct cvmx_ilk_bist_sum_s {
+ u64 rxf_x2p : 1;
+ u64 rxf_mem19 : 1;
+ u64 rxf_mem18 : 1;
+ u64 rxf_mem17 : 1;
+ u64 rxf_mem16 : 1;
+ u64 rxf_mem15 : 1;
+ u64 reserved_52_57 : 6;
+ u64 rxf_mem8 : 1;
+ u64 rxf_mem7 : 1;
+ u64 rxf_mem6 : 1;
+ u64 rxf_mem5 : 1;
+ u64 rxf_mem4 : 1;
+ u64 rxf_mem3 : 1;
+ u64 reserved_36_45 : 10;
+ u64 rle7_dsk1 : 1;
+ u64 rle7_dsk0 : 1;
+ u64 rle6_dsk1 : 1;
+ u64 rle6_dsk0 : 1;
+ u64 rle5_dsk1 : 1;
+ u64 rle5_dsk0 : 1;
+ u64 rle4_dsk1 : 1;
+ u64 rle4_dsk0 : 1;
+ u64 rle3_dsk1 : 1;
+ u64 rle3_dsk0 : 1;
+ u64 rle2_dsk1 : 1;
+ u64 rle2_dsk0 : 1;
+ u64 rle1_dsk1 : 1;
+ u64 rle1_dsk0 : 1;
+ u64 rle0_dsk1 : 1;
+ u64 rle0_dsk0 : 1;
+ u64 rlk1_pmap : 1;
+ u64 reserved_18_18 : 1;
+ u64 rlk1_fwc : 1;
+ u64 reserved_16_16 : 1;
+ u64 rlk0_pmap : 1;
+ u64 rlk0_stat1 : 1;
+ u64 rlk0_fwc : 1;
+ u64 rlk0_stat : 1;
+ u64 tlk1_stat1 : 1;
+ u64 tlk1_fwc : 1;
+ u64 reserved_9_9 : 1;
+ u64 tlk1_txf2 : 1;
+ u64 tlk1_txf1 : 1;
+ u64 tlk1_txf0 : 1;
+ u64 tlk0_stat1 : 1;
+ u64 tlk0_fwc : 1;
+ u64 reserved_3_3 : 1;
+ u64 tlk0_txf2 : 1;
+ u64 tlk0_txf1 : 1;
+ u64 tlk0_txf0 : 1;
+ } s;
+ struct cvmx_ilk_bist_sum_cn68xx {
+ u64 reserved_58_63 : 6;
+ u64 rxf_x2p1 : 1;
+ u64 rxf_x2p0 : 1;
+ u64 rxf_pmap : 1;
+ u64 rxf_mem2 : 1;
+ u64 rxf_mem1 : 1;
+ u64 rxf_mem0 : 1;
+ u64 reserved_36_51 : 16;
+ u64 rle7_dsk1 : 1;
+ u64 rle7_dsk0 : 1;
+ u64 rle6_dsk1 : 1;
+ u64 rle6_dsk0 : 1;
+ u64 rle5_dsk1 : 1;
+ u64 rle5_dsk0 : 1;
+ u64 rle4_dsk1 : 1;
+ u64 rle4_dsk0 : 1;
+ u64 rle3_dsk1 : 1;
+ u64 rle3_dsk0 : 1;
+ u64 rle2_dsk1 : 1;
+ u64 rle2_dsk0 : 1;
+ u64 rle1_dsk1 : 1;
+ u64 rle1_dsk0 : 1;
+ u64 rle0_dsk1 : 1;
+ u64 rle0_dsk0 : 1;
+ u64 reserved_19_19 : 1;
+ u64 rlk1_stat1 : 1;
+ u64 rlk1_fwc : 1;
+ u64 rlk1_stat : 1;
+ u64 reserved_15_15 : 1;
+ u64 rlk0_stat1 : 1;
+ u64 rlk0_fwc : 1;
+ u64 rlk0_stat : 1;
+ u64 tlk1_stat1 : 1;
+ u64 tlk1_fwc : 1;
+ u64 tlk1_stat0 : 1;
+ u64 tlk1_txf2 : 1;
+ u64 tlk1_txf1 : 1;
+ u64 tlk1_txf0 : 1;
+ u64 tlk0_stat1 : 1;
+ u64 tlk0_fwc : 1;
+ u64 tlk0_stat0 : 1;
+ u64 tlk0_txf2 : 1;
+ u64 tlk0_txf1 : 1;
+ u64 tlk0_txf0 : 1;
+ } cn68xx;
+ struct cvmx_ilk_bist_sum_cn68xxp1 {
+ u64 reserved_58_63 : 6;
+ u64 rxf_x2p1 : 1;
+ u64 rxf_x2p0 : 1;
+ u64 rxf_pmap : 1;
+ u64 rxf_mem2 : 1;
+ u64 rxf_mem1 : 1;
+ u64 rxf_mem0 : 1;
+ u64 reserved_36_51 : 16;
+ u64 rle7_dsk1 : 1;
+ u64 rle7_dsk0 : 1;
+ u64 rle6_dsk1 : 1;
+ u64 rle6_dsk0 : 1;
+ u64 rle5_dsk1 : 1;
+ u64 rle5_dsk0 : 1;
+ u64 rle4_dsk1 : 1;
+ u64 rle4_dsk0 : 1;
+ u64 rle3_dsk1 : 1;
+ u64 rle3_dsk0 : 1;
+ u64 rle2_dsk1 : 1;
+ u64 rle2_dsk0 : 1;
+ u64 rle1_dsk1 : 1;
+ u64 rle1_dsk0 : 1;
+ u64 rle0_dsk1 : 1;
+ u64 rle0_dsk0 : 1;
+ u64 reserved_18_19 : 2;
+ u64 rlk1_fwc : 1;
+ u64 rlk1_stat : 1;
+ u64 reserved_14_15 : 2;
+ u64 rlk0_fwc : 1;
+ u64 rlk0_stat : 1;
+ u64 reserved_11_11 : 1;
+ u64 tlk1_fwc : 1;
+ u64 tlk1_stat : 1;
+ u64 tlk1_txf2 : 1;
+ u64 tlk1_txf1 : 1;
+ u64 tlk1_txf0 : 1;
+ u64 reserved_5_5 : 1;
+ u64 tlk0_fwc : 1;
+ u64 tlk0_stat : 1;
+ u64 tlk0_txf2 : 1;
+ u64 tlk0_txf1 : 1;
+ u64 tlk0_txf0 : 1;
+ } cn68xxp1;
+ struct cvmx_ilk_bist_sum_cn78xx {
+ u64 rxf_x2p : 1;
+ u64 rxf_mem19 : 1;
+ u64 rxf_mem18 : 1;
+ u64 rxf_mem17 : 1;
+ u64 rxf_mem16 : 1;
+ u64 rxf_mem15 : 1;
+ u64 rxf_mem14 : 1;
+ u64 rxf_mem13 : 1;
+ u64 rxf_mem12 : 1;
+ u64 rxf_mem11 : 1;
+ u64 rxf_mem10 : 1;
+ u64 rxf_mem9 : 1;
+ u64 rxf_mem8 : 1;
+ u64 rxf_mem7 : 1;
+ u64 rxf_mem6 : 1;
+ u64 rxf_mem5 : 1;
+ u64 rxf_mem4 : 1;
+ u64 rxf_mem3 : 1;
+ u64 rxf_mem2 : 1;
+ u64 rxf_mem1 : 1;
+ u64 rxf_mem0 : 1;
+ u64 reserved_36_42 : 7;
+ u64 rle7_dsk1 : 1;
+ u64 rle7_dsk0 : 1;
+ u64 rle6_dsk1 : 1;
+ u64 rle6_dsk0 : 1;
+ u64 rle5_dsk1 : 1;
+ u64 rle5_dsk0 : 1;
+ u64 rle4_dsk1 : 1;
+ u64 rle4_dsk0 : 1;
+ u64 rle3_dsk1 : 1;
+ u64 rle3_dsk0 : 1;
+ u64 rle2_dsk1 : 1;
+ u64 rle2_dsk0 : 1;
+ u64 rle1_dsk1 : 1;
+ u64 rle1_dsk0 : 1;
+ u64 rle0_dsk1 : 1;
+ u64 rle0_dsk0 : 1;
+ u64 rlk1_pmap : 1;
+ u64 rlk1_stat : 1;
+ u64 rlk1_fwc : 1;
+ u64 rlk1_stat1 : 1;
+ u64 rlk0_pmap : 1;
+ u64 rlk0_stat1 : 1;
+ u64 rlk0_fwc : 1;
+ u64 rlk0_stat : 1;
+ u64 tlk1_stat1 : 1;
+ u64 tlk1_fwc : 1;
+ u64 tlk1_stat0 : 1;
+ u64 tlk1_txf2 : 1;
+ u64 tlk1_txf1 : 1;
+ u64 tlk1_txf0 : 1;
+ u64 tlk0_stat1 : 1;
+ u64 tlk0_fwc : 1;
+ u64 tlk0_stat0 : 1;
+ u64 tlk0_txf2 : 1;
+ u64 tlk0_txf1 : 1;
+ u64 tlk0_txf0 : 1;
+ } cn78xx;
+ struct cvmx_ilk_bist_sum_cn78xx cn78xxp1;
+};
+
+typedef union cvmx_ilk_bist_sum cvmx_ilk_bist_sum_t;
+
+/**
+ * cvmx_ilk_gbl_cfg
+ */
+union cvmx_ilk_gbl_cfg {
+ u64 u64;
+ struct cvmx_ilk_gbl_cfg_s {
+ u64 reserved_4_63 : 60;
+ u64 rid_rstdis : 1;
+ u64 reset : 1;
+ u64 cclk_dis : 1;
+ u64 rxf_xlink : 1;
+ } s;
+ struct cvmx_ilk_gbl_cfg_s cn68xx;
+ struct cvmx_ilk_gbl_cfg_cn68xxp1 {
+ u64 reserved_2_63 : 62;
+ u64 cclk_dis : 1;
+ u64 rxf_xlink : 1;
+ } cn68xxp1;
+ struct cvmx_ilk_gbl_cfg_s cn78xx;
+ struct cvmx_ilk_gbl_cfg_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_gbl_cfg cvmx_ilk_gbl_cfg_t;
+
+/**
+ * cvmx_ilk_gbl_err_cfg
+ */
+union cvmx_ilk_gbl_err_cfg {
+ u64 u64;
+ struct cvmx_ilk_gbl_err_cfg_s {
+ u64 reserved_20_63 : 44;
+ u64 rxf_flip : 2;
+ u64 x2p_flip : 2;
+ u64 reserved_2_15 : 14;
+ u64 rxf_cor_dis : 1;
+ u64 x2p_cor_dis : 1;
+ } s;
+ struct cvmx_ilk_gbl_err_cfg_s cn78xx;
+ struct cvmx_ilk_gbl_err_cfg_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_gbl_err_cfg cvmx_ilk_gbl_err_cfg_t;
+
+/**
+ * cvmx_ilk_gbl_int
+ */
+union cvmx_ilk_gbl_int {
+ u64 u64;
+ struct cvmx_ilk_gbl_int_s {
+ u64 reserved_9_63 : 55;
+ u64 x2p_dbe : 1;
+ u64 x2p_sbe : 1;
+ u64 rxf_dbe : 1;
+ u64 rxf_sbe : 1;
+ u64 rxf_push_full : 1;
+ u64 rxf_pop_empty : 1;
+ u64 rxf_ctl_perr : 1;
+ u64 rxf_lnk1_perr : 1;
+ u64 rxf_lnk0_perr : 1;
+ } s;
+ struct cvmx_ilk_gbl_int_cn68xx {
+ u64 reserved_5_63 : 59;
+ u64 rxf_push_full : 1;
+ u64 rxf_pop_empty : 1;
+ u64 rxf_ctl_perr : 1;
+ u64 rxf_lnk1_perr : 1;
+ u64 rxf_lnk0_perr : 1;
+ } cn68xx;
+ struct cvmx_ilk_gbl_int_cn68xx cn68xxp1;
+ struct cvmx_ilk_gbl_int_s cn78xx;
+ struct cvmx_ilk_gbl_int_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_gbl_int cvmx_ilk_gbl_int_t;
+
+/**
+ * cvmx_ilk_gbl_int_en
+ */
+union cvmx_ilk_gbl_int_en {
+ u64 u64;
+ struct cvmx_ilk_gbl_int_en_s {
+ u64 reserved_5_63 : 59;
+ u64 rxf_push_full : 1;
+ u64 rxf_pop_empty : 1;
+ u64 rxf_ctl_perr : 1;
+ u64 rxf_lnk1_perr : 1;
+ u64 rxf_lnk0_perr : 1;
+ } s;
+ struct cvmx_ilk_gbl_int_en_s cn68xx;
+ struct cvmx_ilk_gbl_int_en_s cn68xxp1;
+};
+
+typedef union cvmx_ilk_gbl_int_en cvmx_ilk_gbl_int_en_t;
+
+/**
+ * cvmx_ilk_int_sum
+ */
+union cvmx_ilk_int_sum {
+ u64 u64;
+ struct cvmx_ilk_int_sum_s {
+ u64 reserved_13_63 : 51;
+ u64 rle7_int : 1;
+ u64 rle6_int : 1;
+ u64 rle5_int : 1;
+ u64 rle4_int : 1;
+ u64 rle3_int : 1;
+ u64 rle2_int : 1;
+ u64 rle1_int : 1;
+ u64 rle0_int : 1;
+ u64 rlk1_int : 1;
+ u64 rlk0_int : 1;
+ u64 tlk1_int : 1;
+ u64 tlk0_int : 1;
+ u64 gbl_int : 1;
+ } s;
+ struct cvmx_ilk_int_sum_s cn68xx;
+ struct cvmx_ilk_int_sum_s cn68xxp1;
+};
+
+typedef union cvmx_ilk_int_sum cvmx_ilk_int_sum_t;
+
+/**
+ * cvmx_ilk_lne#_trn_ctl
+ */
+union cvmx_ilk_lnex_trn_ctl {
+ u64 u64;
+ struct cvmx_ilk_lnex_trn_ctl_s {
+ u64 reserved_4_63 : 60;
+ u64 trn_lock : 1;
+ u64 trn_done : 1;
+ u64 trn_ena : 1;
+ u64 eie_det : 1;
+ } s;
+ struct cvmx_ilk_lnex_trn_ctl_s cn78xx;
+ struct cvmx_ilk_lnex_trn_ctl_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_lnex_trn_ctl cvmx_ilk_lnex_trn_ctl_t;
+
+/**
+ * cvmx_ilk_lne#_trn_ld
+ */
+union cvmx_ilk_lnex_trn_ld {
+ u64 u64;
+ struct cvmx_ilk_lnex_trn_ld_s {
+ u64 lp_manual : 1;
+ u64 reserved_49_62 : 14;
+ u64 ld_cu_val : 1;
+ u64 ld_cu_dat : 16;
+ u64 reserved_17_31 : 15;
+ u64 ld_sr_val : 1;
+ u64 ld_sr_dat : 16;
+ } s;
+ struct cvmx_ilk_lnex_trn_ld_s cn78xx;
+ struct cvmx_ilk_lnex_trn_ld_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_lnex_trn_ld cvmx_ilk_lnex_trn_ld_t;
+
+/**
+ * cvmx_ilk_lne#_trn_lp
+ */
+union cvmx_ilk_lnex_trn_lp {
+ u64 u64;
+ struct cvmx_ilk_lnex_trn_lp_s {
+ u64 reserved_49_63 : 15;
+ u64 lp_cu_val : 1;
+ u64 lp_cu_dat : 16;
+ u64 reserved_17_31 : 15;
+ u64 lp_sr_val : 1;
+ u64 lp_sr_dat : 16;
+ } s;
+ struct cvmx_ilk_lnex_trn_lp_s cn78xx;
+ struct cvmx_ilk_lnex_trn_lp_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_lnex_trn_lp cvmx_ilk_lnex_trn_lp_t;
+
+/**
+ * cvmx_ilk_lne_dbg
+ */
+union cvmx_ilk_lne_dbg {
+ u64 u64;
+ struct cvmx_ilk_lne_dbg_s {
+ u64 reserved_60_63 : 4;
+ u64 tx_bad_crc32 : 1;
+ u64 tx_bad_6467_cnt : 5;
+ u64 tx_bad_sync_cnt : 3;
+ u64 tx_bad_scram_cnt : 3;
+ u64 tx_bad_lane_sel : 16;
+ u64 tx_dis_dispr : 16;
+ u64 tx_dis_scram : 16;
+ } s;
+ struct cvmx_ilk_lne_dbg_cn68xx {
+ u64 reserved_60_63 : 4;
+ u64 tx_bad_crc32 : 1;
+ u64 tx_bad_6467_cnt : 5;
+ u64 tx_bad_sync_cnt : 3;
+ u64 tx_bad_scram_cnt : 3;
+ u64 reserved_40_47 : 8;
+ u64 tx_bad_lane_sel : 8;
+ u64 reserved_24_31 : 8;
+ u64 tx_dis_dispr : 8;
+ u64 reserved_8_15 : 8;
+ u64 tx_dis_scram : 8;
+ } cn68xx;
+ struct cvmx_ilk_lne_dbg_cn68xx cn68xxp1;
+ struct cvmx_ilk_lne_dbg_s cn78xx;
+ struct cvmx_ilk_lne_dbg_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_lne_dbg cvmx_ilk_lne_dbg_t;
+
+/**
+ * cvmx_ilk_lne_sts_msg
+ */
+union cvmx_ilk_lne_sts_msg {
+ u64 u64;
+ struct cvmx_ilk_lne_sts_msg_s {
+ u64 rx_lnk_stat : 16;
+ u64 rx_lne_stat : 16;
+ u64 tx_lnk_stat : 16;
+ u64 tx_lne_stat : 16;
+ } s;
+ struct cvmx_ilk_lne_sts_msg_cn68xx {
+ u64 reserved_56_63 : 8;
+ u64 rx_lnk_stat : 8;
+ u64 reserved_40_47 : 8;
+ u64 rx_lne_stat : 8;
+ u64 reserved_24_31 : 8;
+ u64 tx_lnk_stat : 8;
+ u64 reserved_8_15 : 8;
+ u64 tx_lne_stat : 8;
+ } cn68xx;
+ struct cvmx_ilk_lne_sts_msg_cn68xx cn68xxp1;
+ struct cvmx_ilk_lne_sts_msg_s cn78xx;
+ struct cvmx_ilk_lne_sts_msg_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_lne_sts_msg cvmx_ilk_lne_sts_msg_t;
+
+/**
+ * cvmx_ilk_rid_cfg
+ */
+union cvmx_ilk_rid_cfg {
+ u64 u64;
+ struct cvmx_ilk_rid_cfg_s {
+ u64 reserved_39_63 : 25;
+ u64 max_cnt : 7;
+ u64 reserved_7_31 : 25;
+ u64 base : 7;
+ } s;
+ struct cvmx_ilk_rid_cfg_s cn78xx;
+ struct cvmx_ilk_rid_cfg_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rid_cfg cvmx_ilk_rid_cfg_t;
+
+/**
+ * cvmx_ilk_rx#_byte_cnt#
+ */
+union cvmx_ilk_rxx_byte_cntx {
+ u64 u64;
+ struct cvmx_ilk_rxx_byte_cntx_s {
+ u64 reserved_40_63 : 24;
+ u64 rx_bytes : 40;
+ } s;
+ struct cvmx_ilk_rxx_byte_cntx_s cn78xx;
+ struct cvmx_ilk_rxx_byte_cntx_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rxx_byte_cntx cvmx_ilk_rxx_byte_cntx_t;
+
+/**
+ * cvmx_ilk_rx#_cal_entry#
+ */
+union cvmx_ilk_rxx_cal_entryx {
+ u64 u64;
+ struct cvmx_ilk_rxx_cal_entryx_s {
+ u64 reserved_34_63 : 30;
+ u64 ctl : 2;
+ u64 reserved_8_31 : 24;
+ u64 channel : 8;
+ } s;
+ struct cvmx_ilk_rxx_cal_entryx_s cn78xx;
+ struct cvmx_ilk_rxx_cal_entryx_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rxx_cal_entryx cvmx_ilk_rxx_cal_entryx_t;
+
+/**
+ * cvmx_ilk_rx#_cfg0
+ */
+union cvmx_ilk_rxx_cfg0 {
+ u64 u64;
+ struct cvmx_ilk_rxx_cfg0_s {
+ u64 ext_lpbk_fc : 1;
+ u64 ext_lpbk : 1;
+ u64 reserved_60_61 : 2;
+ u64 lnk_stats_wrap : 1;
+ u64 bcw_push : 1;
+ u64 mproto_ign : 1;
+ u64 ptrn_mode : 1;
+ u64 lnk_stats_rdclr : 1;
+ u64 lnk_stats_ena : 1;
+ u64 mltuse_fc_ena : 1;
+ u64 cal_ena : 1;
+ u64 mfrm_len : 13;
+ u64 brst_shrt : 7;
+ u64 lane_rev : 1;
+ u64 brst_max : 5;
+ u64 reserved_25_25 : 1;
+ u64 cal_depth : 9;
+ u64 lane_ena : 16;
+ } s;
+ struct cvmx_ilk_rxx_cfg0_cn68xx {
+ u64 ext_lpbk_fc : 1;
+ u64 ext_lpbk : 1;
+ u64 reserved_60_61 : 2;
+ u64 lnk_stats_wrap : 1;
+ u64 bcw_push : 1;
+ u64 mproto_ign : 1;
+ u64 ptrn_mode : 1;
+ u64 lnk_stats_rdclr : 1;
+ u64 lnk_stats_ena : 1;
+ u64 mltuse_fc_ena : 1;
+ u64 cal_ena : 1;
+ u64 mfrm_len : 13;
+ u64 brst_shrt : 7;
+ u64 lane_rev : 1;
+ u64 brst_max : 5;
+ u64 reserved_25_25 : 1;
+ u64 cal_depth : 9;
+ u64 reserved_8_15 : 8;
+ u64 lane_ena : 8;
+ } cn68xx;
+ struct cvmx_ilk_rxx_cfg0_cn68xxp1 {
+ u64 ext_lpbk_fc : 1;
+ u64 ext_lpbk : 1;
+ u64 reserved_57_61 : 5;
+ u64 ptrn_mode : 1;
+ u64 lnk_stats_rdclr : 1;
+ u64 lnk_stats_ena : 1;
+ u64 mltuse_fc_ena : 1;
+ u64 cal_ena : 1;
+ u64 mfrm_len : 13;
+ u64 brst_shrt : 7;
+ u64 lane_rev : 1;
+ u64 brst_max : 5;
+ u64 reserved_25_25 : 1;
+ u64 cal_depth : 9;
+ u64 reserved_8_15 : 8;
+ u64 lane_ena : 8;
+ } cn68xxp1;
+ struct cvmx_ilk_rxx_cfg0_s cn78xx;
+ struct cvmx_ilk_rxx_cfg0_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rxx_cfg0 cvmx_ilk_rxx_cfg0_t;
+
+/**
+ * cvmx_ilk_rx#_cfg1
+ */
+union cvmx_ilk_rxx_cfg1 {
+ u64 u64;
+ struct cvmx_ilk_rxx_cfg1_s {
+ u64 reserved_62_63 : 2;
+ u64 rx_fifo_cnt : 12;
+ u64 reserved_49_49 : 1;
+ u64 rx_fifo_hwm : 13;
+ u64 reserved_35_35 : 1;
+ u64 rx_fifo_max : 13;
+ u64 pkt_flush : 1;
+ u64 pkt_ena : 1;
+ u64 la_mode : 1;
+ u64 tx_link_fc : 1;
+ u64 rx_link_fc : 1;
+ u64 rx_align_ena : 1;
+ u64 rx_bdry_lock_ena : 16;
+ } s;
+ struct cvmx_ilk_rxx_cfg1_cn68xx {
+ u64 reserved_62_63 : 2;
+ u64 rx_fifo_cnt : 12;
+ u64 reserved_48_49 : 2;
+ u64 rx_fifo_hwm : 12;
+ u64 reserved_34_35 : 2;
+ u64 rx_fifo_max : 12;
+ u64 pkt_flush : 1;
+ u64 pkt_ena : 1;
+ u64 la_mode : 1;
+ u64 tx_link_fc : 1;
+ u64 rx_link_fc : 1;
+ u64 rx_align_ena : 1;
+ u64 reserved_8_15 : 8;
+ u64 rx_bdry_lock_ena : 8;
+ } cn68xx;
+ struct cvmx_ilk_rxx_cfg1_cn68xx cn68xxp1;
+ struct cvmx_ilk_rxx_cfg1_s cn78xx;
+ struct cvmx_ilk_rxx_cfg1_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rxx_cfg1 cvmx_ilk_rxx_cfg1_t;
+
+/**
+ * cvmx_ilk_rx#_cha#
+ */
+union cvmx_ilk_rxx_chax {
+ u64 u64;
+ struct cvmx_ilk_rxx_chax_s {
+ u64 reserved_6_63 : 58;
+ u64 port_kind : 6;
+ } s;
+ struct cvmx_ilk_rxx_chax_s cn78xx;
+ struct cvmx_ilk_rxx_chax_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rxx_chax cvmx_ilk_rxx_chax_t;
+
+/**
+ * cvmx_ilk_rx#_cha_xon#
+ */
+union cvmx_ilk_rxx_cha_xonx {
+ u64 u64;
+ struct cvmx_ilk_rxx_cha_xonx_s {
+ u64 xon : 64;
+ } s;
+ struct cvmx_ilk_rxx_cha_xonx_s cn78xx;
+ struct cvmx_ilk_rxx_cha_xonx_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rxx_cha_xonx cvmx_ilk_rxx_cha_xonx_t;
+
+/**
+ * cvmx_ilk_rx#_err_cfg
+ */
+union cvmx_ilk_rxx_err_cfg {
+ u64 u64;
+ struct cvmx_ilk_rxx_err_cfg_s {
+ u64 reserved_20_63 : 44;
+ u64 fwc_flip : 2;
+ u64 pmap_flip : 2;
+ u64 reserved_2_15 : 14;
+ u64 fwc_cor_dis : 1;
+ u64 pmap_cor_dis : 1;
+ } s;
+ struct cvmx_ilk_rxx_err_cfg_s cn78xx;
+ struct cvmx_ilk_rxx_err_cfg_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rxx_err_cfg cvmx_ilk_rxx_err_cfg_t;
+
+/**
+ * cvmx_ilk_rx#_flow_ctl0
+ */
+union cvmx_ilk_rxx_flow_ctl0 {
+ u64 u64;
+ struct cvmx_ilk_rxx_flow_ctl0_s {
+ u64 status : 64;
+ } s;
+ struct cvmx_ilk_rxx_flow_ctl0_s cn68xx;
+ struct cvmx_ilk_rxx_flow_ctl0_s cn68xxp1;
+};
+
+typedef union cvmx_ilk_rxx_flow_ctl0 cvmx_ilk_rxx_flow_ctl0_t;
+
+/**
+ * cvmx_ilk_rx#_flow_ctl1
+ */
+union cvmx_ilk_rxx_flow_ctl1 {
+ u64 u64;
+ struct cvmx_ilk_rxx_flow_ctl1_s {
+ u64 status : 64;
+ } s;
+ struct cvmx_ilk_rxx_flow_ctl1_s cn68xx;
+ struct cvmx_ilk_rxx_flow_ctl1_s cn68xxp1;
+};
+
+typedef union cvmx_ilk_rxx_flow_ctl1 cvmx_ilk_rxx_flow_ctl1_t;
+
+/**
+ * cvmx_ilk_rx#_idx_cal
+ */
+union cvmx_ilk_rxx_idx_cal {
+ u64 u64;
+ struct cvmx_ilk_rxx_idx_cal_s {
+ u64 reserved_14_63 : 50;
+ u64 inc : 6;
+ u64 reserved_6_7 : 2;
+ u64 index : 6;
+ } s;
+ struct cvmx_ilk_rxx_idx_cal_s cn68xx;
+ struct cvmx_ilk_rxx_idx_cal_s cn68xxp1;
+};
+
+typedef union cvmx_ilk_rxx_idx_cal cvmx_ilk_rxx_idx_cal_t;
+
+/**
+ * cvmx_ilk_rx#_idx_stat0
+ */
+union cvmx_ilk_rxx_idx_stat0 {
+ u64 u64;
+ struct cvmx_ilk_rxx_idx_stat0_s {
+ u64 reserved_32_63 : 32;
+ u64 clr : 1;
+ u64 reserved_24_30 : 7;
+ u64 inc : 8;
+ u64 reserved_8_15 : 8;
+ u64 index : 8;
+ } s;
+ struct cvmx_ilk_rxx_idx_stat0_s cn68xx;
+ struct cvmx_ilk_rxx_idx_stat0_s cn68xxp1;
+};
+
+typedef union cvmx_ilk_rxx_idx_stat0 cvmx_ilk_rxx_idx_stat0_t;
+
+/**
+ * cvmx_ilk_rx#_idx_stat1
+ */
+union cvmx_ilk_rxx_idx_stat1 {
+ u64 u64;
+ struct cvmx_ilk_rxx_idx_stat1_s {
+ u64 reserved_32_63 : 32;
+ u64 clr : 1;
+ u64 reserved_24_30 : 7;
+ u64 inc : 8;
+ u64 reserved_8_15 : 8;
+ u64 index : 8;
+ } s;
+ struct cvmx_ilk_rxx_idx_stat1_s cn68xx;
+ struct cvmx_ilk_rxx_idx_stat1_s cn68xxp1;
+};
+
+typedef union cvmx_ilk_rxx_idx_stat1 cvmx_ilk_rxx_idx_stat1_t;
+
+/**
+ * cvmx_ilk_rx#_int
+ */
+union cvmx_ilk_rxx_int {
+ u64 u64;
+ struct cvmx_ilk_rxx_int_s {
+ u64 reserved_13_63 : 51;
+ u64 pmap_dbe : 1;
+ u64 pmap_sbe : 1;
+ u64 fwc_dbe : 1;
+ u64 fwc_sbe : 1;
+ u64 pkt_drop_sop : 1;
+ u64 pkt_drop_rid : 1;
+ u64 pkt_drop_rxf : 1;
+ u64 lane_bad_word : 1;
+ u64 stat_cnt_ovfl : 1;
+ u64 lane_align_done : 1;
+ u64 word_sync_done : 1;
+ u64 crc24_err : 1;
+ u64 lane_align_fail : 1;
+ } s;
+ struct cvmx_ilk_rxx_int_cn68xx {
+ u64 reserved_9_63 : 55;
+ u64 pkt_drop_sop : 1;
+ u64 pkt_drop_rid : 1;
+ u64 pkt_drop_rxf : 1;
+ u64 lane_bad_word : 1;
+ u64 stat_cnt_ovfl : 1;
+ u64 lane_align_done : 1;
+ u64 word_sync_done : 1;
+ u64 crc24_err : 1;
+ u64 lane_align_fail : 1;
+ } cn68xx;
+ struct cvmx_ilk_rxx_int_cn68xxp1 {
+ u64 reserved_8_63 : 56;
+ u64 pkt_drop_rid : 1;
+ u64 pkt_drop_rxf : 1;
+ u64 lane_bad_word : 1;
+ u64 stat_cnt_ovfl : 1;
+ u64 lane_align_done : 1;
+ u64 word_sync_done : 1;
+ u64 crc24_err : 1;
+ u64 lane_align_fail : 1;
+ } cn68xxp1;
+ struct cvmx_ilk_rxx_int_s cn78xx;
+ struct cvmx_ilk_rxx_int_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rxx_int cvmx_ilk_rxx_int_t;
+
+/**
+ * cvmx_ilk_rx#_int_en
+ */
+union cvmx_ilk_rxx_int_en {
+ u64 u64;
+ struct cvmx_ilk_rxx_int_en_s {
+ u64 reserved_9_63 : 55;
+ u64 pkt_drop_sop : 1;
+ u64 pkt_drop_rid : 1;
+ u64 pkt_drop_rxf : 1;
+ u64 lane_bad_word : 1;
+ u64 stat_cnt_ovfl : 1;
+ u64 lane_align_done : 1;
+ u64 word_sync_done : 1;
+ u64 crc24_err : 1;
+ u64 lane_align_fail : 1;
+ } s;
+ struct cvmx_ilk_rxx_int_en_s cn68xx;
+ struct cvmx_ilk_rxx_int_en_cn68xxp1 {
+ u64 reserved_8_63 : 56;
+ u64 pkt_drop_rid : 1;
+ u64 pkt_drop_rxf : 1;
+ u64 lane_bad_word : 1;
+ u64 stat_cnt_ovfl : 1;
+ u64 lane_align_done : 1;
+ u64 word_sync_done : 1;
+ u64 crc24_err : 1;
+ u64 lane_align_fail : 1;
+ } cn68xxp1;
+};
+
+typedef union cvmx_ilk_rxx_int_en cvmx_ilk_rxx_int_en_t;
+
+/**
+ * cvmx_ilk_rx#_jabber
+ */
+union cvmx_ilk_rxx_jabber {
+ u64 u64;
+ struct cvmx_ilk_rxx_jabber_s {
+ u64 reserved_16_63 : 48;
+ u64 cnt : 16;
+ } s;
+ struct cvmx_ilk_rxx_jabber_s cn68xx;
+ struct cvmx_ilk_rxx_jabber_s cn68xxp1;
+ struct cvmx_ilk_rxx_jabber_s cn78xx;
+ struct cvmx_ilk_rxx_jabber_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rxx_jabber cvmx_ilk_rxx_jabber_t;
+
+/**
+ * cvmx_ilk_rx#_mem_cal0
+ *
+ * Notes:
+ * Software must program the calendar table prior to enabling the
+ * link.
+ *
+ * Software must always write ILK_RXx_MEM_CAL0 then ILK_RXx_MEM_CAL1.
+ * Software must never write them in reverse order or write one without
+ * writing the other.
+ *
+ * A given calendar table entry has no effect on PKO pipe
+ * backpressure when either:
+ * - ENTRY_CTLx=Link (1), or
+ * - ENTRY_CTLx=XON (3) and PORT_PIPEx is outside the range of ILK_TXx_PIPE[BASE/NUMP].
+ *
+ * Within the 8 calendar table entries of one IDX value, if more
+ * than one affects the same PKO pipe, XOFF always wins over XON,
+ * regardless of the calendar table order.
+ *
+ * Software must always read ILK_RXx_MEM_CAL0 then ILK_RXx_MEM_CAL1. Software
+ * must never read them in reverse order or read one without reading the
+ * other.
+ */
+union cvmx_ilk_rxx_mem_cal0 {
+ u64 u64;
+ struct cvmx_ilk_rxx_mem_cal0_s {
+ u64 reserved_36_63 : 28;
+ u64 entry_ctl3 : 2;
+ u64 port_pipe3 : 7;
+ u64 entry_ctl2 : 2;
+ u64 port_pipe2 : 7;
+ u64 entry_ctl1 : 2;
+ u64 port_pipe1 : 7;
+ u64 entry_ctl0 : 2;
+ u64 port_pipe0 : 7;
+ } s;
+ struct cvmx_ilk_rxx_mem_cal0_s cn68xx;
+ struct cvmx_ilk_rxx_mem_cal0_s cn68xxp1;
+};
+
+typedef union cvmx_ilk_rxx_mem_cal0 cvmx_ilk_rxx_mem_cal0_t;
+
+/**
+ * cvmx_ilk_rx#_mem_cal1
+ *
+ * Notes:
+ * Software must program the calendar table prior to enabling the
+ * link.
+ *
+ * Software must always write ILK_RXx_MEM_CAL0 then ILK_RXx_MEM_CAL1.
+ * Software must never write them in reverse order or write one without
+ * writing the other.
+ *
+ * A given calendar table entry has no effect on PKO pipe
+ * backpressure when either:
+ * - ENTRY_CTLx=Link (1), or
+ * - ENTRY_CTLx=XON (3) and PORT_PIPEx is outside the range of ILK_TXx_PIPE[BASE/NUMP].
+ *
+ * Within the 8 calendar table entries of one IDX value, if more
+ * than one affects the same PKO pipe, XOFF always wins over XON,
+ * regardless of the calendar table order.
+ *
+ * Software must always read ILK_RXx_MEM_CAL0 then ILK_Rx_MEM_CAL1. Software
+ * must never read them in reverse order or read one without reading the
+ * other.
+ */
+union cvmx_ilk_rxx_mem_cal1 {
+ u64 u64;
+ struct cvmx_ilk_rxx_mem_cal1_s {
+ u64 reserved_36_63 : 28;
+ u64 entry_ctl7 : 2;
+ u64 port_pipe7 : 7;
+ u64 entry_ctl6 : 2;
+ u64 port_pipe6 : 7;
+ u64 entry_ctl5 : 2;
+ u64 port_pipe5 : 7;
+ u64 entry_ctl4 : 2;
+ u64 port_pipe4 : 7;
+ } s;
+ struct cvmx_ilk_rxx_mem_cal1_s cn68xx;
+ struct cvmx_ilk_rxx_mem_cal1_s cn68xxp1;
+};
+
+typedef union cvmx_ilk_rxx_mem_cal1 cvmx_ilk_rxx_mem_cal1_t;
+
+/**
+ * cvmx_ilk_rx#_mem_stat0
+ */
+union cvmx_ilk_rxx_mem_stat0 {
+ u64 u64;
+ struct cvmx_ilk_rxx_mem_stat0_s {
+ u64 reserved_28_63 : 36;
+ u64 rx_pkt : 28;
+ } s;
+ struct cvmx_ilk_rxx_mem_stat0_s cn68xx;
+ struct cvmx_ilk_rxx_mem_stat0_s cn68xxp1;
+};
+
+typedef union cvmx_ilk_rxx_mem_stat0 cvmx_ilk_rxx_mem_stat0_t;
+
+/**
+ * cvmx_ilk_rx#_mem_stat1
+ */
+union cvmx_ilk_rxx_mem_stat1 {
+ u64 u64;
+ struct cvmx_ilk_rxx_mem_stat1_s {
+ u64 reserved_36_63 : 28;
+ u64 rx_bytes : 36;
+ } s;
+ struct cvmx_ilk_rxx_mem_stat1_s cn68xx;
+ struct cvmx_ilk_rxx_mem_stat1_s cn68xxp1;
+};
+
+typedef union cvmx_ilk_rxx_mem_stat1 cvmx_ilk_rxx_mem_stat1_t;
+
+/**
+ * cvmx_ilk_rx#_pkt_cnt#
+ */
+union cvmx_ilk_rxx_pkt_cntx {
+ u64 u64;
+ struct cvmx_ilk_rxx_pkt_cntx_s {
+ u64 reserved_34_63 : 30;
+ u64 rx_pkt : 34;
+ } s;
+ struct cvmx_ilk_rxx_pkt_cntx_s cn78xx;
+ struct cvmx_ilk_rxx_pkt_cntx_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rxx_pkt_cntx cvmx_ilk_rxx_pkt_cntx_t;
+
+/**
+ * cvmx_ilk_rx#_rid
+ */
+union cvmx_ilk_rxx_rid {
+ u64 u64;
+ struct cvmx_ilk_rxx_rid_s {
+ u64 reserved_7_63 : 57;
+ u64 max_cnt : 7;
+ } s;
+ struct cvmx_ilk_rxx_rid_cn68xx {
+ u64 reserved_6_63 : 58;
+ u64 max_cnt : 6;
+ } cn68xx;
+ struct cvmx_ilk_rxx_rid_s cn78xx;
+ struct cvmx_ilk_rxx_rid_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rxx_rid cvmx_ilk_rxx_rid_t;
+
+/**
+ * cvmx_ilk_rx#_stat0
+ */
+union cvmx_ilk_rxx_stat0 {
+ u64 u64;
+ struct cvmx_ilk_rxx_stat0_s {
+ u64 reserved_35_63 : 29;
+ u64 crc24_match_cnt : 35;
+ } s;
+ struct cvmx_ilk_rxx_stat0_cn68xx {
+ u64 reserved_33_63 : 31;
+ u64 crc24_match_cnt : 33;
+ } cn68xx;
+ struct cvmx_ilk_rxx_stat0_cn68xxp1 {
+ u64 reserved_27_63 : 37;
+ u64 crc24_match_cnt : 27;
+ } cn68xxp1;
+ struct cvmx_ilk_rxx_stat0_s cn78xx;
+ struct cvmx_ilk_rxx_stat0_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rxx_stat0 cvmx_ilk_rxx_stat0_t;
+
+/**
+ * cvmx_ilk_rx#_stat1
+ */
+union cvmx_ilk_rxx_stat1 {
+ u64 u64;
+ struct cvmx_ilk_rxx_stat1_s {
+ u64 reserved_20_63 : 44;
+ u64 crc24_err_cnt : 20;
+ } s;
+ struct cvmx_ilk_rxx_stat1_cn68xx {
+ u64 reserved_18_63 : 46;
+ u64 crc24_err_cnt : 18;
+ } cn68xx;
+ struct cvmx_ilk_rxx_stat1_cn68xx cn68xxp1;
+ struct cvmx_ilk_rxx_stat1_s cn78xx;
+ struct cvmx_ilk_rxx_stat1_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rxx_stat1 cvmx_ilk_rxx_stat1_t;
+
+/**
+ * cvmx_ilk_rx#_stat2
+ */
+union cvmx_ilk_rxx_stat2 {
+ u64 u64;
+ struct cvmx_ilk_rxx_stat2_s {
+ u64 reserved_50_63 : 14;
+ u64 brst_not_full_cnt : 18;
+ u64 reserved_30_31 : 2;
+ u64 brst_cnt : 30;
+ } s;
+ struct cvmx_ilk_rxx_stat2_cn68xx {
+ u64 reserved_48_63 : 16;
+ u64 brst_not_full_cnt : 16;
+ u64 reserved_28_31 : 4;
+ u64 brst_cnt : 28;
+ } cn68xx;
+ struct cvmx_ilk_rxx_stat2_cn68xxp1 {
+ u64 reserved_48_63 : 16;
+ u64 brst_not_full_cnt : 16;
+ u64 reserved_16_31 : 16;
+ u64 brst_cnt : 16;
+ } cn68xxp1;
+ struct cvmx_ilk_rxx_stat2_s cn78xx;
+ struct cvmx_ilk_rxx_stat2_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rxx_stat2 cvmx_ilk_rxx_stat2_t;
+
+/**
+ * cvmx_ilk_rx#_stat3
+ */
+union cvmx_ilk_rxx_stat3 {
+ u64 u64;
+ struct cvmx_ilk_rxx_stat3_s {
+ u64 reserved_18_63 : 46;
+ u64 brst_max_err_cnt : 18;
+ } s;
+ struct cvmx_ilk_rxx_stat3_cn68xx {
+ u64 reserved_16_63 : 48;
+ u64 brst_max_err_cnt : 16;
+ } cn68xx;
+ struct cvmx_ilk_rxx_stat3_cn68xx cn68xxp1;
+ struct cvmx_ilk_rxx_stat3_s cn78xx;
+ struct cvmx_ilk_rxx_stat3_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rxx_stat3 cvmx_ilk_rxx_stat3_t;
+
+/**
+ * cvmx_ilk_rx#_stat4
+ */
+union cvmx_ilk_rxx_stat4 {
+ u64 u64;
+ struct cvmx_ilk_rxx_stat4_s {
+ u64 reserved_18_63 : 46;
+ u64 brst_shrt_err_cnt : 18;
+ } s;
+ struct cvmx_ilk_rxx_stat4_cn68xx {
+ u64 reserved_16_63 : 48;
+ u64 brst_shrt_err_cnt : 16;
+ } cn68xx;
+ struct cvmx_ilk_rxx_stat4_cn68xx cn68xxp1;
+ struct cvmx_ilk_rxx_stat4_s cn78xx;
+ struct cvmx_ilk_rxx_stat4_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rxx_stat4 cvmx_ilk_rxx_stat4_t;
+
+/**
+ * cvmx_ilk_rx#_stat5
+ */
+union cvmx_ilk_rxx_stat5 {
+ u64 u64;
+ struct cvmx_ilk_rxx_stat5_s {
+ u64 reserved_25_63 : 39;
+ u64 align_cnt : 25;
+ } s;
+ struct cvmx_ilk_rxx_stat5_cn68xx {
+ u64 reserved_23_63 : 41;
+ u64 align_cnt : 23;
+ } cn68xx;
+ struct cvmx_ilk_rxx_stat5_cn68xxp1 {
+ u64 reserved_16_63 : 48;
+ u64 align_cnt : 16;
+ } cn68xxp1;
+ struct cvmx_ilk_rxx_stat5_s cn78xx;
+ struct cvmx_ilk_rxx_stat5_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rxx_stat5 cvmx_ilk_rxx_stat5_t;
+
+/**
+ * cvmx_ilk_rx#_stat6
+ */
+union cvmx_ilk_rxx_stat6 {
+ u64 u64;
+ struct cvmx_ilk_rxx_stat6_s {
+ u64 reserved_18_63 : 46;
+ u64 align_err_cnt : 18;
+ } s;
+ struct cvmx_ilk_rxx_stat6_cn68xx {
+ u64 reserved_16_63 : 48;
+ u64 align_err_cnt : 16;
+ } cn68xx;
+ struct cvmx_ilk_rxx_stat6_cn68xx cn68xxp1;
+ struct cvmx_ilk_rxx_stat6_s cn78xx;
+ struct cvmx_ilk_rxx_stat6_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rxx_stat6 cvmx_ilk_rxx_stat6_t;
+
+/**
+ * cvmx_ilk_rx#_stat7
+ */
+union cvmx_ilk_rxx_stat7 {
+ u64 u64;
+ struct cvmx_ilk_rxx_stat7_s {
+ u64 reserved_18_63 : 46;
+ u64 bad_64b67b_cnt : 18;
+ } s;
+ struct cvmx_ilk_rxx_stat7_cn68xx {
+ u64 reserved_16_63 : 48;
+ u64 bad_64b67b_cnt : 16;
+ } cn68xx;
+ struct cvmx_ilk_rxx_stat7_cn68xx cn68xxp1;
+ struct cvmx_ilk_rxx_stat7_s cn78xx;
+ struct cvmx_ilk_rxx_stat7_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rxx_stat7 cvmx_ilk_rxx_stat7_t;
+
+/**
+ * cvmx_ilk_rx#_stat8
+ */
+union cvmx_ilk_rxx_stat8 {
+ u64 u64;
+ struct cvmx_ilk_rxx_stat8_s {
+ u64 reserved_32_63 : 32;
+ u64 pkt_drop_rid_cnt : 16;
+ u64 pkt_drop_rxf_cnt : 16;
+ } s;
+ struct cvmx_ilk_rxx_stat8_s cn68xx;
+ struct cvmx_ilk_rxx_stat8_s cn68xxp1;
+ struct cvmx_ilk_rxx_stat8_s cn78xx;
+ struct cvmx_ilk_rxx_stat8_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rxx_stat8 cvmx_ilk_rxx_stat8_t;
+
+/**
+ * cvmx_ilk_rx#_stat9
+ *
+ * This register is reserved.
+ *
+ */
+union cvmx_ilk_rxx_stat9 {
+ u64 u64;
+ struct cvmx_ilk_rxx_stat9_s {
+ u64 reserved_0_63 : 64;
+ } s;
+ struct cvmx_ilk_rxx_stat9_s cn68xx;
+ struct cvmx_ilk_rxx_stat9_s cn68xxp1;
+ struct cvmx_ilk_rxx_stat9_s cn78xx;
+ struct cvmx_ilk_rxx_stat9_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rxx_stat9 cvmx_ilk_rxx_stat9_t;
+
+/**
+ * cvmx_ilk_rx_lne#_cfg
+ */
+union cvmx_ilk_rx_lnex_cfg {
+ u64 u64;
+ struct cvmx_ilk_rx_lnex_cfg_s {
+ u64 reserved_9_63 : 55;
+ u64 rx_dis_psh_skip : 1;
+ u64 reserved_7_7 : 1;
+ u64 rx_dis_disp_chk : 1;
+ u64 rx_scrm_sync : 1;
+ u64 rx_bdry_sync : 1;
+ u64 rx_dis_ukwn : 1;
+ u64 rx_dis_scram : 1;
+ u64 stat_rdclr : 1;
+ u64 stat_ena : 1;
+ } s;
+ struct cvmx_ilk_rx_lnex_cfg_cn68xx {
+ u64 reserved_9_63 : 55;
+ u64 rx_dis_psh_skip : 1;
+ u64 reserved_6_7 : 2;
+ u64 rx_scrm_sync : 1;
+ u64 rx_bdry_sync : 1;
+ u64 rx_dis_ukwn : 1;
+ u64 rx_dis_scram : 1;
+ u64 stat_rdclr : 1;
+ u64 stat_ena : 1;
+ } cn68xx;
+ struct cvmx_ilk_rx_lnex_cfg_cn68xxp1 {
+ u64 reserved_5_63 : 59;
+ u64 rx_bdry_sync : 1;
+ u64 rx_dis_ukwn : 1;
+ u64 rx_dis_scram : 1;
+ u64 stat_rdclr : 1;
+ u64 stat_ena : 1;
+ } cn68xxp1;
+ struct cvmx_ilk_rx_lnex_cfg_s cn78xx;
+ struct cvmx_ilk_rx_lnex_cfg_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rx_lnex_cfg cvmx_ilk_rx_lnex_cfg_t;
+
+/**
+ * cvmx_ilk_rx_lne#_int
+ */
+union cvmx_ilk_rx_lnex_int {
+ u64 u64;
+ struct cvmx_ilk_rx_lnex_int_s {
+ u64 reserved_10_63 : 54;
+ u64 disp_err : 1;
+ u64 bad_64b67b : 1;
+ u64 stat_cnt_ovfl : 1;
+ u64 stat_msg : 1;
+ u64 dskew_fifo_ovfl : 1;
+ u64 scrm_sync_loss : 1;
+ u64 ukwn_cntl_word : 1;
+ u64 crc32_err : 1;
+ u64 bdry_sync_loss : 1;
+ u64 serdes_lock_loss : 1;
+ } s;
+ struct cvmx_ilk_rx_lnex_int_cn68xx {
+ u64 reserved_9_63 : 55;
+ u64 bad_64b67b : 1;
+ u64 stat_cnt_ovfl : 1;
+ u64 stat_msg : 1;
+ u64 dskew_fifo_ovfl : 1;
+ u64 scrm_sync_loss : 1;
+ u64 ukwn_cntl_word : 1;
+ u64 crc32_err : 1;
+ u64 bdry_sync_loss : 1;
+ u64 serdes_lock_loss : 1;
+ } cn68xx;
+ struct cvmx_ilk_rx_lnex_int_cn68xx cn68xxp1;
+ struct cvmx_ilk_rx_lnex_int_s cn78xx;
+ struct cvmx_ilk_rx_lnex_int_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rx_lnex_int cvmx_ilk_rx_lnex_int_t;
+
+/**
+ * cvmx_ilk_rx_lne#_int_en
+ */
+union cvmx_ilk_rx_lnex_int_en {
+ u64 u64;
+ struct cvmx_ilk_rx_lnex_int_en_s {
+ u64 reserved_9_63 : 55;
+ u64 bad_64b67b : 1;
+ u64 stat_cnt_ovfl : 1;
+ u64 stat_msg : 1;
+ u64 dskew_fifo_ovfl : 1;
+ u64 scrm_sync_loss : 1;
+ u64 ukwn_cntl_word : 1;
+ u64 crc32_err : 1;
+ u64 bdry_sync_loss : 1;
+ u64 serdes_lock_loss : 1;
+ } s;
+ struct cvmx_ilk_rx_lnex_int_en_s cn68xx;
+ struct cvmx_ilk_rx_lnex_int_en_s cn68xxp1;
+};
+
+typedef union cvmx_ilk_rx_lnex_int_en cvmx_ilk_rx_lnex_int_en_t;
+
+/**
+ * cvmx_ilk_rx_lne#_stat0
+ */
+union cvmx_ilk_rx_lnex_stat0 {
+ u64 u64;
+ struct cvmx_ilk_rx_lnex_stat0_s {
+ u64 reserved_18_63 : 46;
+ u64 ser_lock_loss_cnt : 18;
+ } s;
+ struct cvmx_ilk_rx_lnex_stat0_s cn68xx;
+ struct cvmx_ilk_rx_lnex_stat0_s cn68xxp1;
+ struct cvmx_ilk_rx_lnex_stat0_s cn78xx;
+ struct cvmx_ilk_rx_lnex_stat0_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rx_lnex_stat0 cvmx_ilk_rx_lnex_stat0_t;
+
+/**
+ * cvmx_ilk_rx_lne#_stat1
+ */
+union cvmx_ilk_rx_lnex_stat1 {
+ u64 u64;
+ struct cvmx_ilk_rx_lnex_stat1_s {
+ u64 reserved_18_63 : 46;
+ u64 bdry_sync_loss_cnt : 18;
+ } s;
+ struct cvmx_ilk_rx_lnex_stat1_s cn68xx;
+ struct cvmx_ilk_rx_lnex_stat1_s cn68xxp1;
+ struct cvmx_ilk_rx_lnex_stat1_s cn78xx;
+ struct cvmx_ilk_rx_lnex_stat1_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rx_lnex_stat1 cvmx_ilk_rx_lnex_stat1_t;
+
+/**
+ * cvmx_ilk_rx_lne#_stat10
+ */
+union cvmx_ilk_rx_lnex_stat10 {
+ u64 u64;
+ struct cvmx_ilk_rx_lnex_stat10_s {
+ u64 reserved_43_63 : 21;
+ u64 prbs_bad : 11;
+ u64 reserved_11_31 : 21;
+ u64 prbs_good : 11;
+ } s;
+ struct cvmx_ilk_rx_lnex_stat10_s cn78xx;
+ struct cvmx_ilk_rx_lnex_stat10_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rx_lnex_stat10 cvmx_ilk_rx_lnex_stat10_t;
+
+/**
+ * cvmx_ilk_rx_lne#_stat2
+ */
+union cvmx_ilk_rx_lnex_stat2 {
+ u64 u64;
+ struct cvmx_ilk_rx_lnex_stat2_s {
+ u64 reserved_50_63 : 14;
+ u64 syncw_good_cnt : 18;
+ u64 reserved_18_31 : 14;
+ u64 syncw_bad_cnt : 18;
+ } s;
+ struct cvmx_ilk_rx_lnex_stat2_s cn68xx;
+ struct cvmx_ilk_rx_lnex_stat2_s cn68xxp1;
+ struct cvmx_ilk_rx_lnex_stat2_s cn78xx;
+ struct cvmx_ilk_rx_lnex_stat2_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rx_lnex_stat2 cvmx_ilk_rx_lnex_stat2_t;
+
+/**
+ * cvmx_ilk_rx_lne#_stat3
+ */
+union cvmx_ilk_rx_lnex_stat3 {
+ u64 u64;
+ struct cvmx_ilk_rx_lnex_stat3_s {
+ u64 reserved_18_63 : 46;
+ u64 bad_64b67b_cnt : 18;
+ } s;
+ struct cvmx_ilk_rx_lnex_stat3_s cn68xx;
+ struct cvmx_ilk_rx_lnex_stat3_s cn68xxp1;
+ struct cvmx_ilk_rx_lnex_stat3_s cn78xx;
+ struct cvmx_ilk_rx_lnex_stat3_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rx_lnex_stat3 cvmx_ilk_rx_lnex_stat3_t;
+
+/**
+ * cvmx_ilk_rx_lne#_stat4
+ */
+union cvmx_ilk_rx_lnex_stat4 {
+ u64 u64;
+ struct cvmx_ilk_rx_lnex_stat4_s {
+ u64 reserved_59_63 : 5;
+ u64 cntl_word_cnt : 27;
+ u64 reserved_27_31 : 5;
+ u64 data_word_cnt : 27;
+ } s;
+ struct cvmx_ilk_rx_lnex_stat4_s cn68xx;
+ struct cvmx_ilk_rx_lnex_stat4_s cn68xxp1;
+ struct cvmx_ilk_rx_lnex_stat4_s cn78xx;
+ struct cvmx_ilk_rx_lnex_stat4_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rx_lnex_stat4 cvmx_ilk_rx_lnex_stat4_t;
+
+/**
+ * cvmx_ilk_rx_lne#_stat5
+ */
+union cvmx_ilk_rx_lnex_stat5 {
+ u64 u64;
+ struct cvmx_ilk_rx_lnex_stat5_s {
+ u64 reserved_18_63 : 46;
+ u64 unkwn_word_cnt : 18;
+ } s;
+ struct cvmx_ilk_rx_lnex_stat5_s cn68xx;
+ struct cvmx_ilk_rx_lnex_stat5_s cn68xxp1;
+ struct cvmx_ilk_rx_lnex_stat5_s cn78xx;
+ struct cvmx_ilk_rx_lnex_stat5_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rx_lnex_stat5 cvmx_ilk_rx_lnex_stat5_t;
+
+/**
+ * cvmx_ilk_rx_lne#_stat6
+ */
+union cvmx_ilk_rx_lnex_stat6 {
+ u64 u64;
+ struct cvmx_ilk_rx_lnex_stat6_s {
+ u64 reserved_18_63 : 46;
+ u64 scrm_sync_loss_cnt : 18;
+ } s;
+ struct cvmx_ilk_rx_lnex_stat6_s cn68xx;
+ struct cvmx_ilk_rx_lnex_stat6_s cn68xxp1;
+ struct cvmx_ilk_rx_lnex_stat6_s cn78xx;
+ struct cvmx_ilk_rx_lnex_stat6_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rx_lnex_stat6 cvmx_ilk_rx_lnex_stat6_t;
+
+/**
+ * cvmx_ilk_rx_lne#_stat7
+ */
+union cvmx_ilk_rx_lnex_stat7 {
+ u64 u64;
+ struct cvmx_ilk_rx_lnex_stat7_s {
+ u64 reserved_18_63 : 46;
+ u64 scrm_match_cnt : 18;
+ } s;
+ struct cvmx_ilk_rx_lnex_stat7_s cn68xx;
+ struct cvmx_ilk_rx_lnex_stat7_s cn68xxp1;
+ struct cvmx_ilk_rx_lnex_stat7_s cn78xx;
+ struct cvmx_ilk_rx_lnex_stat7_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rx_lnex_stat7 cvmx_ilk_rx_lnex_stat7_t;
+
+/**
+ * cvmx_ilk_rx_lne#_stat8
+ */
+union cvmx_ilk_rx_lnex_stat8 {
+ u64 u64;
+ struct cvmx_ilk_rx_lnex_stat8_s {
+ u64 reserved_18_63 : 46;
+ u64 skipw_good_cnt : 18;
+ } s;
+ struct cvmx_ilk_rx_lnex_stat8_s cn68xx;
+ struct cvmx_ilk_rx_lnex_stat8_s cn68xxp1;
+ struct cvmx_ilk_rx_lnex_stat8_s cn78xx;
+ struct cvmx_ilk_rx_lnex_stat8_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rx_lnex_stat8 cvmx_ilk_rx_lnex_stat8_t;
+
+/**
+ * cvmx_ilk_rx_lne#_stat9
+ */
+union cvmx_ilk_rx_lnex_stat9 {
+ u64 u64;
+ struct cvmx_ilk_rx_lnex_stat9_s {
+ u64 reserved_50_63 : 14;
+ u64 crc32_err_cnt : 18;
+ u64 reserved_27_31 : 5;
+ u64 crc32_match_cnt : 27;
+ } s;
+ struct cvmx_ilk_rx_lnex_stat9_s cn68xx;
+ struct cvmx_ilk_rx_lnex_stat9_s cn68xxp1;
+ struct cvmx_ilk_rx_lnex_stat9_s cn78xx;
+ struct cvmx_ilk_rx_lnex_stat9_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rx_lnex_stat9 cvmx_ilk_rx_lnex_stat9_t;
+
+/**
+ * cvmx_ilk_rxf_idx_pmap
+ */
+union cvmx_ilk_rxf_idx_pmap {
+ u64 u64;
+ struct cvmx_ilk_rxf_idx_pmap_s {
+ u64 reserved_25_63 : 39;
+ u64 inc : 9;
+ u64 reserved_9_15 : 7;
+ u64 index : 9;
+ } s;
+ struct cvmx_ilk_rxf_idx_pmap_s cn68xx;
+ struct cvmx_ilk_rxf_idx_pmap_s cn68xxp1;
+};
+
+typedef union cvmx_ilk_rxf_idx_pmap cvmx_ilk_rxf_idx_pmap_t;
+
+/**
+ * cvmx_ilk_rxf_mem_pmap
+ */
+union cvmx_ilk_rxf_mem_pmap {
+ u64 u64;
+ struct cvmx_ilk_rxf_mem_pmap_s {
+ u64 reserved_6_63 : 58;
+ u64 port_kind : 6;
+ } s;
+ struct cvmx_ilk_rxf_mem_pmap_s cn68xx;
+ struct cvmx_ilk_rxf_mem_pmap_s cn68xxp1;
+};
+
+typedef union cvmx_ilk_rxf_mem_pmap cvmx_ilk_rxf_mem_pmap_t;
+
+/**
+ * cvmx_ilk_ser_cfg
+ */
+union cvmx_ilk_ser_cfg {
+ u64 u64;
+ struct cvmx_ilk_ser_cfg_s {
+ u64 reserved_57_63 : 7;
+ u64 ser_rxpol_auto : 1;
+ u64 ser_rxpol : 16;
+ u64 ser_txpol : 16;
+ u64 ser_reset_n : 16;
+ u64 ser_pwrup : 4;
+ u64 ser_haul : 4;
+ } s;
+ struct cvmx_ilk_ser_cfg_cn68xx {
+ u64 reserved_57_63 : 7;
+ u64 ser_rxpol_auto : 1;
+ u64 reserved_48_55 : 8;
+ u64 ser_rxpol : 8;
+ u64 reserved_32_39 : 8;
+ u64 ser_txpol : 8;
+ u64 reserved_16_23 : 8;
+ u64 ser_reset_n : 8;
+ u64 reserved_6_7 : 2;
+ u64 ser_pwrup : 2;
+ u64 reserved_2_3 : 2;
+ u64 ser_haul : 2;
+ } cn68xx;
+ struct cvmx_ilk_ser_cfg_cn68xx cn68xxp1;
+ struct cvmx_ilk_ser_cfg_s cn78xx;
+ struct cvmx_ilk_ser_cfg_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_ser_cfg cvmx_ilk_ser_cfg_t;
+
+/**
+ * cvmx_ilk_tx#_byte_cnt#
+ */
+union cvmx_ilk_txx_byte_cntx {
+ u64 u64;
+ struct cvmx_ilk_txx_byte_cntx_s {
+ u64 reserved_40_63 : 24;
+ u64 tx_bytes : 40;
+ } s;
+ struct cvmx_ilk_txx_byte_cntx_s cn78xx;
+ struct cvmx_ilk_txx_byte_cntx_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_txx_byte_cntx cvmx_ilk_txx_byte_cntx_t;
+
+/**
+ * cvmx_ilk_tx#_cal_entry#
+ */
+union cvmx_ilk_txx_cal_entryx {
+ u64 u64;
+ struct cvmx_ilk_txx_cal_entryx_s {
+ u64 reserved_34_63 : 30;
+ u64 ctl : 2;
+ u64 reserved_8_31 : 24;
+ u64 channel : 8;
+ } s;
+ struct cvmx_ilk_txx_cal_entryx_s cn78xx;
+ struct cvmx_ilk_txx_cal_entryx_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_txx_cal_entryx cvmx_ilk_txx_cal_entryx_t;
+
+/**
+ * cvmx_ilk_tx#_cfg0
+ */
+union cvmx_ilk_txx_cfg0 {
+ u64 u64;
+ struct cvmx_ilk_txx_cfg0_s {
+ u64 ext_lpbk_fc : 1;
+ u64 ext_lpbk : 1;
+ u64 int_lpbk : 1;
+ u64 txf_byp_dis : 1;
+ u64 reserved_57_59 : 3;
+ u64 ptrn_mode : 1;
+ u64 lnk_stats_rdclr : 1;
+ u64 lnk_stats_ena : 1;
+ u64 mltuse_fc_ena : 1;
+ u64 cal_ena : 1;
+ u64 mfrm_len : 13;
+ u64 brst_shrt : 7;
+ u64 lane_rev : 1;
+ u64 brst_max : 5;
+ u64 reserved_25_25 : 1;
+ u64 cal_depth : 9;
+ u64 lane_ena : 16;
+ } s;
+ struct cvmx_ilk_txx_cfg0_cn68xx {
+ u64 ext_lpbk_fc : 1;
+ u64 ext_lpbk : 1;
+ u64 int_lpbk : 1;
+ u64 reserved_57_60 : 4;
+ u64 ptrn_mode : 1;
+ u64 reserved_55_55 : 1;
+ u64 lnk_stats_ena : 1;
+ u64 mltuse_fc_ena : 1;
+ u64 cal_ena : 1;
+ u64 mfrm_len : 13;
+ u64 brst_shrt : 7;
+ u64 lane_rev : 1;
+ u64 brst_max : 5;
+ u64 reserved_25_25 : 1;
+ u64 cal_depth : 9;
+ u64 reserved_8_15 : 8;
+ u64 lane_ena : 8;
+ } cn68xx;
+ struct cvmx_ilk_txx_cfg0_cn68xx cn68xxp1;
+ struct cvmx_ilk_txx_cfg0_s cn78xx;
+ struct cvmx_ilk_txx_cfg0_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_txx_cfg0 cvmx_ilk_txx_cfg0_t;
+
+/**
+ * cvmx_ilk_tx#_cfg1
+ */
+union cvmx_ilk_txx_cfg1 {
+ u64 u64;
+ struct cvmx_ilk_txx_cfg1_s {
+ u64 ser_low : 4;
+ u64 reserved_53_59 : 7;
+ u64 brst_min : 5;
+ u64 reserved_43_47 : 5;
+ u64 ser_limit : 10;
+ u64 pkt_busy : 1;
+ u64 pipe_crd_dis : 1;
+ u64 ptp_delay : 5;
+ u64 skip_cnt : 4;
+ u64 pkt_flush : 1;
+ u64 pkt_ena : 1;
+ u64 la_mode : 1;
+ u64 tx_link_fc : 1;
+ u64 rx_link_fc : 1;
+ u64 reserved_12_16 : 5;
+ u64 tx_link_fc_jam : 1;
+ u64 rx_link_fc_pkt : 1;
+ u64 rx_link_fc_ign : 1;
+ u64 rmatch : 1;
+ u64 tx_mltuse : 8;
+ } s;
+ struct cvmx_ilk_txx_cfg1_cn68xx {
+ u64 reserved_33_63 : 31;
+ u64 pkt_busy : 1;
+ u64 pipe_crd_dis : 1;
+ u64 ptp_delay : 5;
+ u64 skip_cnt : 4;
+ u64 pkt_flush : 1;
+ u64 pkt_ena : 1;
+ u64 la_mode : 1;
+ u64 tx_link_fc : 1;
+ u64 rx_link_fc : 1;
+ u64 reserved_12_16 : 5;
+ u64 tx_link_fc_jam : 1;
+ u64 rx_link_fc_pkt : 1;
+ u64 rx_link_fc_ign : 1;
+ u64 rmatch : 1;
+ u64 tx_mltuse : 8;
+ } cn68xx;
+ struct cvmx_ilk_txx_cfg1_cn68xxp1 {
+ u64 reserved_32_63 : 32;
+ u64 pipe_crd_dis : 1;
+ u64 ptp_delay : 5;
+ u64 skip_cnt : 4;
+ u64 pkt_flush : 1;
+ u64 pkt_ena : 1;
+ u64 la_mode : 1;
+ u64 tx_link_fc : 1;
+ u64 rx_link_fc : 1;
+ u64 reserved_12_16 : 5;
+ u64 tx_link_fc_jam : 1;
+ u64 rx_link_fc_pkt : 1;
+ u64 rx_link_fc_ign : 1;
+ u64 rmatch : 1;
+ u64 tx_mltuse : 8;
+ } cn68xxp1;
+ struct cvmx_ilk_txx_cfg1_s cn78xx;
+ struct cvmx_ilk_txx_cfg1_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_txx_cfg1 cvmx_ilk_txx_cfg1_t;
+
+/**
+ * cvmx_ilk_tx#_cha_xon#
+ */
+union cvmx_ilk_txx_cha_xonx {
+ u64 u64;
+ struct cvmx_ilk_txx_cha_xonx_s {
+ u64 status : 64;
+ } s;
+ struct cvmx_ilk_txx_cha_xonx_s cn78xx;
+ struct cvmx_ilk_txx_cha_xonx_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_txx_cha_xonx cvmx_ilk_txx_cha_xonx_t;
+
+/**
+ * cvmx_ilk_tx#_dbg
+ */
+union cvmx_ilk_txx_dbg {
+ u64 u64;
+ struct cvmx_ilk_txx_dbg_s {
+ u64 reserved_29_63 : 35;
+ u64 data_rate : 13;
+ u64 low_delay : 6;
+ u64 reserved_3_9 : 7;
+ u64 tx_bad_crc24 : 1;
+ u64 tx_bad_ctlw2 : 1;
+ u64 tx_bad_ctlw1 : 1;
+ } s;
+ struct cvmx_ilk_txx_dbg_cn68xx {
+ u64 reserved_3_63 : 61;
+ u64 tx_bad_crc24 : 1;
+ u64 tx_bad_ctlw2 : 1;
+ u64 tx_bad_ctlw1 : 1;
+ } cn68xx;
+ struct cvmx_ilk_txx_dbg_cn68xx cn68xxp1;
+ struct cvmx_ilk_txx_dbg_s cn78xx;
+ struct cvmx_ilk_txx_dbg_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_txx_dbg cvmx_ilk_txx_dbg_t;
+
+/**
+ * cvmx_ilk_tx#_err_cfg
+ */
+union cvmx_ilk_txx_err_cfg {
+ u64 u64;
+ struct cvmx_ilk_txx_err_cfg_s {
+ u64 reserved_20_63 : 44;
+ u64 fwc_flip : 2;
+ u64 txf_flip : 2;
+ u64 reserved_2_15 : 14;
+ u64 fwc_cor_dis : 1;
+ u64 txf_cor_dis : 1;
+ } s;
+ struct cvmx_ilk_txx_err_cfg_s cn78xx;
+ struct cvmx_ilk_txx_err_cfg_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_txx_err_cfg cvmx_ilk_txx_err_cfg_t;
+
+/**
+ * cvmx_ilk_tx#_flow_ctl0
+ */
+union cvmx_ilk_txx_flow_ctl0 {
+ u64 u64;
+ struct cvmx_ilk_txx_flow_ctl0_s {
+ u64 status : 64;
+ } s;
+ struct cvmx_ilk_txx_flow_ctl0_s cn68xx;
+ struct cvmx_ilk_txx_flow_ctl0_s cn68xxp1;
+};
+
+typedef union cvmx_ilk_txx_flow_ctl0 cvmx_ilk_txx_flow_ctl0_t;
+
+/**
+ * cvmx_ilk_tx#_flow_ctl1
+ *
+ * Notes:
+ * Do not publish.
+ *
+ */
+union cvmx_ilk_txx_flow_ctl1 {
+ u64 u64;
+ struct cvmx_ilk_txx_flow_ctl1_s {
+ u64 reserved_0_63 : 64;
+ } s;
+ struct cvmx_ilk_txx_flow_ctl1_s cn68xx;
+ struct cvmx_ilk_txx_flow_ctl1_s cn68xxp1;
+};
+
+typedef union cvmx_ilk_txx_flow_ctl1 cvmx_ilk_txx_flow_ctl1_t;
+
+/**
+ * cvmx_ilk_tx#_idx_cal
+ */
+union cvmx_ilk_txx_idx_cal {
+ u64 u64;
+ struct cvmx_ilk_txx_idx_cal_s {
+ u64 reserved_14_63 : 50;
+ u64 inc : 6;
+ u64 reserved_6_7 : 2;
+ u64 index : 6;
+ } s;
+ struct cvmx_ilk_txx_idx_cal_s cn68xx;
+ struct cvmx_ilk_txx_idx_cal_s cn68xxp1;
+};
+
+typedef union cvmx_ilk_txx_idx_cal cvmx_ilk_txx_idx_cal_t;
+
+/**
+ * cvmx_ilk_tx#_idx_pmap
+ */
+union cvmx_ilk_txx_idx_pmap {
+ u64 u64;
+ struct cvmx_ilk_txx_idx_pmap_s {
+ u64 reserved_23_63 : 41;
+ u64 inc : 7;
+ u64 reserved_7_15 : 9;
+ u64 index : 7;
+ } s;
+ struct cvmx_ilk_txx_idx_pmap_s cn68xx;
+ struct cvmx_ilk_txx_idx_pmap_s cn68xxp1;
+};
+
+typedef union cvmx_ilk_txx_idx_pmap cvmx_ilk_txx_idx_pmap_t;
+
+/**
+ * cvmx_ilk_tx#_idx_stat0
+ */
+union cvmx_ilk_txx_idx_stat0 {
+ u64 u64;
+ struct cvmx_ilk_txx_idx_stat0_s {
+ u64 reserved_32_63 : 32;
+ u64 clr : 1;
+ u64 reserved_24_30 : 7;
+ u64 inc : 8;
+ u64 reserved_8_15 : 8;
+ u64 index : 8;
+ } s;
+ struct cvmx_ilk_txx_idx_stat0_s cn68xx;
+ struct cvmx_ilk_txx_idx_stat0_s cn68xxp1;
+};
+
+typedef union cvmx_ilk_txx_idx_stat0 cvmx_ilk_txx_idx_stat0_t;
+
+/**
+ * cvmx_ilk_tx#_idx_stat1
+ */
+union cvmx_ilk_txx_idx_stat1 {
+ u64 u64;
+ struct cvmx_ilk_txx_idx_stat1_s {
+ u64 reserved_32_63 : 32;
+ u64 clr : 1;
+ u64 reserved_24_30 : 7;
+ u64 inc : 8;
+ u64 reserved_8_15 : 8;
+ u64 index : 8;
+ } s;
+ struct cvmx_ilk_txx_idx_stat1_s cn68xx;
+ struct cvmx_ilk_txx_idx_stat1_s cn68xxp1;
+};
+
+typedef union cvmx_ilk_txx_idx_stat1 cvmx_ilk_txx_idx_stat1_t;
+
+/**
+ * cvmx_ilk_tx#_int
+ */
+union cvmx_ilk_txx_int {
+ u64 u64;
+ struct cvmx_ilk_txx_int_s {
+ u64 reserved_8_63 : 56;
+ u64 fwc_dbe : 1;
+ u64 fwc_sbe : 1;
+ u64 txf_dbe : 1;
+ u64 txf_sbe : 1;
+ u64 stat_cnt_ovfl : 1;
+ u64 bad_pipe : 1;
+ u64 bad_seq : 1;
+ u64 txf_err : 1;
+ } s;
+ struct cvmx_ilk_txx_int_cn68xx {
+ u64 reserved_4_63 : 60;
+ u64 stat_cnt_ovfl : 1;
+ u64 bad_pipe : 1;
+ u64 bad_seq : 1;
+ u64 txf_err : 1;
+ } cn68xx;
+ struct cvmx_ilk_txx_int_cn68xx cn68xxp1;
+ struct cvmx_ilk_txx_int_s cn78xx;
+ struct cvmx_ilk_txx_int_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_txx_int cvmx_ilk_txx_int_t;
+
+/**
+ * cvmx_ilk_tx#_int_en
+ */
+union cvmx_ilk_txx_int_en {
+ u64 u64;
+ struct cvmx_ilk_txx_int_en_s {
+ u64 reserved_4_63 : 60;
+ u64 stat_cnt_ovfl : 1;
+ u64 bad_pipe : 1;
+ u64 bad_seq : 1;
+ u64 txf_err : 1;
+ } s;
+ struct cvmx_ilk_txx_int_en_s cn68xx;
+ struct cvmx_ilk_txx_int_en_s cn68xxp1;
+};
+
+typedef union cvmx_ilk_txx_int_en cvmx_ilk_txx_int_en_t;
+
+/**
+ * cvmx_ilk_tx#_mem_cal0
+ *
+ * Notes:
+ * Software must always read ILK_TXx_MEM_CAL0 then ILK_TXx_MEM_CAL1. Software
+ * must never read them in reverse order or read one without reading the
+ * other.
+ *
+ * Software must always write ILK_TXx_MEM_CAL0 then ILK_TXx_MEM_CAL1.
+ * Software must never write them in reverse order or write one without
+ * writing the other.
+ */
+union cvmx_ilk_txx_mem_cal0 {
+ u64 u64;
+ struct cvmx_ilk_txx_mem_cal0_s {
+ u64 reserved_36_63 : 28;
+ u64 entry_ctl3 : 2;
+ u64 reserved_33_33 : 1;
+ u64 bpid3 : 6;
+ u64 entry_ctl2 : 2;
+ u64 reserved_24_24 : 1;
+ u64 bpid2 : 6;
+ u64 entry_ctl1 : 2;
+ u64 reserved_15_15 : 1;
+ u64 bpid1 : 6;
+ u64 entry_ctl0 : 2;
+ u64 reserved_6_6 : 1;
+ u64 bpid0 : 6;
+ } s;
+ struct cvmx_ilk_txx_mem_cal0_s cn68xx;
+ struct cvmx_ilk_txx_mem_cal0_s cn68xxp1;
+};
+
+typedef union cvmx_ilk_txx_mem_cal0 cvmx_ilk_txx_mem_cal0_t;
+
+/**
+ * cvmx_ilk_tx#_mem_cal1
+ *
+ * Notes:
+ * Software must always read ILK_TXx_MEM_CAL0 then ILK_TXx_MEM_CAL1. Software
+ * must never read them in reverse order or read one without reading the
+ * other.
+ *
+ * Software must always write ILK_TXx_MEM_CAL0 then ILK_TXx_MEM_CAL1.
+ * Software must never write them in reverse order or write one without
+ * writing the other.
+ */
+union cvmx_ilk_txx_mem_cal1 {
+ u64 u64;
+ struct cvmx_ilk_txx_mem_cal1_s {
+ u64 reserved_36_63 : 28;
+ u64 entry_ctl7 : 2;
+ u64 reserved_33_33 : 1;
+ u64 bpid7 : 6;
+ u64 entry_ctl6 : 2;
+ u64 reserved_24_24 : 1;
+ u64 bpid6 : 6;
+ u64 entry_ctl5 : 2;
+ u64 reserved_15_15 : 1;
+ u64 bpid5 : 6;
+ u64 entry_ctl4 : 2;
+ u64 reserved_6_6 : 1;
+ u64 bpid4 : 6;
+ } s;
+ struct cvmx_ilk_txx_mem_cal1_s cn68xx;
+ struct cvmx_ilk_txx_mem_cal1_s cn68xxp1;
+};
+
+typedef union cvmx_ilk_txx_mem_cal1 cvmx_ilk_txx_mem_cal1_t;
+
+/**
+ * cvmx_ilk_tx#_mem_pmap
+ */
+union cvmx_ilk_txx_mem_pmap {
+ u64 u64;
+ struct cvmx_ilk_txx_mem_pmap_s {
+ u64 reserved_17_63 : 47;
+ u64 remap : 1;
+ u64 reserved_8_15 : 8;
+ u64 channel : 8;
+ } s;
+ struct cvmx_ilk_txx_mem_pmap_s cn68xx;
+ struct cvmx_ilk_txx_mem_pmap_cn68xxp1 {
+ u64 reserved_8_63 : 56;
+ u64 channel : 8;
+ } cn68xxp1;
+};
+
+typedef union cvmx_ilk_txx_mem_pmap cvmx_ilk_txx_mem_pmap_t;
+
+/**
+ * cvmx_ilk_tx#_mem_stat0
+ */
+union cvmx_ilk_txx_mem_stat0 {
+ u64 u64;
+ struct cvmx_ilk_txx_mem_stat0_s {
+ u64 reserved_28_63 : 36;
+ u64 tx_pkt : 28;
+ } s;
+ struct cvmx_ilk_txx_mem_stat0_s cn68xx;
+ struct cvmx_ilk_txx_mem_stat0_s cn68xxp1;
+};
+
+typedef union cvmx_ilk_txx_mem_stat0 cvmx_ilk_txx_mem_stat0_t;
+
+/**
+ * cvmx_ilk_tx#_mem_stat1
+ */
+union cvmx_ilk_txx_mem_stat1 {
+ u64 u64;
+ struct cvmx_ilk_txx_mem_stat1_s {
+ u64 reserved_36_63 : 28;
+ u64 tx_bytes : 36;
+ } s;
+ struct cvmx_ilk_txx_mem_stat1_s cn68xx;
+ struct cvmx_ilk_txx_mem_stat1_s cn68xxp1;
+};
+
+typedef union cvmx_ilk_txx_mem_stat1 cvmx_ilk_txx_mem_stat1_t;
+
+/**
+ * cvmx_ilk_tx#_pipe
+ */
+union cvmx_ilk_txx_pipe {
+ u64 u64;
+ struct cvmx_ilk_txx_pipe_s {
+ u64 reserved_24_63 : 40;
+ u64 nump : 8;
+ u64 reserved_7_15 : 9;
+ u64 base : 7;
+ } s;
+ struct cvmx_ilk_txx_pipe_s cn68xx;
+ struct cvmx_ilk_txx_pipe_s cn68xxp1;
+};
+
+typedef union cvmx_ilk_txx_pipe cvmx_ilk_txx_pipe_t;
+
+/**
+ * cvmx_ilk_tx#_pkt_cnt#
+ */
+union cvmx_ilk_txx_pkt_cntx {
+ u64 u64;
+ struct cvmx_ilk_txx_pkt_cntx_s {
+ u64 reserved_34_63 : 30;
+ u64 tx_pkt : 34;
+ } s;
+ struct cvmx_ilk_txx_pkt_cntx_s cn78xx;
+ struct cvmx_ilk_txx_pkt_cntx_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_txx_pkt_cntx cvmx_ilk_txx_pkt_cntx_t;
+
+/**
+ * cvmx_ilk_tx#_rmatch
+ */
+union cvmx_ilk_txx_rmatch {
+ u64 u64;
+ struct cvmx_ilk_txx_rmatch_s {
+ u64 reserved_50_63 : 14;
+ u64 grnlrty : 2;
+ u64 brst_limit : 16;
+ u64 time_limit : 16;
+ u64 rate_limit : 16;
+ } s;
+ struct cvmx_ilk_txx_rmatch_s cn68xx;
+ struct cvmx_ilk_txx_rmatch_s cn68xxp1;
+ struct cvmx_ilk_txx_rmatch_s cn78xx;
+ struct cvmx_ilk_txx_rmatch_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_txx_rmatch cvmx_ilk_txx_rmatch_t;
+
+#endif
--
2.35.1
^ permalink raw reply related [flat|nested] 52+ messages in thread* [PATCH 03/52] mips: octeon: Add cvmx-iob-defs.h header file
2022-03-30 10:06 [PATCH 00/52] mips: octeon: Add ethernet support Stefan Roese
2022-03-30 10:06 ` [PATCH 01/52] mips: octeon: Add misc cvmx-* header files Stefan Roese
2022-03-30 10:06 ` [PATCH 02/52] mips: octeon: Add cvmx-ilk-defs.h header file Stefan Roese
@ 2022-03-30 10:06 ` Stefan Roese
2022-03-30 10:06 ` [PATCH 04/52] mips: octeon: Add cvmx-lbk-defs.h " Stefan Roese
` (46 subsequent siblings)
49 siblings, 0 replies; 52+ messages in thread
From: Stefan Roese @ 2022-03-30 10:06 UTC (permalink / raw)
To: u-boot; +Cc: awilliams, cchavva
From: Aaron Williams <awilliams@marvell.com>
Import cvmx-iob-defs.h header file from 2013 U-Boot. It will be used
by the later added drivers to support networking on the MIPS Octeon II /
III platforms.
Signed-off-by: Aaron Williams <awilliams@marvell.com>
Signed-off-by: Stefan Roese <sr@denx.de>
---
.../mach-octeon/include/mach/cvmx-iob-defs.h | 1328 +++++++++++++++++
1 file changed, 1328 insertions(+)
create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-iob-defs.h
diff --git a/arch/mips/mach-octeon/include/mach/cvmx-iob-defs.h b/arch/mips/mach-octeon/include/mach/cvmx-iob-defs.h
new file mode 100644
index 000000000000..0af444daf418
--- /dev/null
+++ b/arch/mips/mach-octeon/include/mach/cvmx-iob-defs.h
@@ -0,0 +1,1328 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon iob.
+ */
+
+#ifndef __CVMX_IOB_DEFS_H__
+#define __CVMX_IOB_DEFS_H__
+
+#define CVMX_IOB_BIST_STATUS (0x00011800F00007F8ull)
+#define CVMX_IOB_CHIP_CUR_PWR (0x00011800F0000828ull)
+#define CVMX_IOB_CHIP_GLB_PWR_THROTTLE (0x00011800F0000808ull)
+#define CVMX_IOB_CHIP_PWR_OUT (0x00011800F0000818ull)
+#define CVMX_IOB_CTL_STATUS (0x00011800F0000050ull)
+#define CVMX_IOB_DWB_PRI_CNT (0x00011800F0000028ull)
+#define CVMX_IOB_FAU_TIMEOUT (0x00011800F0000000ull)
+#define CVMX_IOB_I2C_PRI_CNT (0x00011800F0000010ull)
+#define CVMX_IOB_INB_CONTROL_MATCH (0x00011800F0000078ull)
+#define CVMX_IOB_INB_CONTROL_MATCH_ENB (0x00011800F0000088ull)
+#define CVMX_IOB_INB_DATA_MATCH (0x00011800F0000070ull)
+#define CVMX_IOB_INB_DATA_MATCH_ENB (0x00011800F0000080ull)
+#define CVMX_IOB_INT_ENB (0x00011800F0000060ull)
+#define CVMX_IOB_INT_SUM (0x00011800F0000058ull)
+#define CVMX_IOB_N2C_L2C_PRI_CNT (0x00011800F0000020ull)
+#define CVMX_IOB_N2C_RSP_PRI_CNT (0x00011800F0000008ull)
+#define CVMX_IOB_OUTB_COM_PRI_CNT (0x00011800F0000040ull)
+#define CVMX_IOB_OUTB_CONTROL_MATCH (0x00011800F0000098ull)
+#define CVMX_IOB_OUTB_CONTROL_MATCH_ENB (0x00011800F00000A8ull)
+#define CVMX_IOB_OUTB_DATA_MATCH (0x00011800F0000090ull)
+#define CVMX_IOB_OUTB_DATA_MATCH_ENB (0x00011800F00000A0ull)
+#define CVMX_IOB_OUTB_FPA_PRI_CNT (0x00011800F0000048ull)
+#define CVMX_IOB_OUTB_REQ_PRI_CNT (0x00011800F0000038ull)
+#define CVMX_IOB_P2C_REQ_PRI_CNT (0x00011800F0000018ull)
+#define CVMX_IOB_PKT_ERR (0x00011800F0000068ull)
+#define CVMX_IOB_PP_BIST_STATUS (0x00011800F0000700ull)
+#define CVMX_IOB_TO_CMB_CREDITS (0x00011800F00000B0ull)
+#define CVMX_IOB_TO_NCB_DID_00_CREDITS (0x00011800F0000800ull)
+#define CVMX_IOB_TO_NCB_DID_111_CREDITS (0x00011800F0000B78ull)
+#define CVMX_IOB_TO_NCB_DID_223_CREDITS (0x00011800F0000EF8ull)
+#define CVMX_IOB_TO_NCB_DID_24_CREDITS (0x00011800F00008C0ull)
+#define CVMX_IOB_TO_NCB_DID_32_CREDITS (0x00011800F0000900ull)
+#define CVMX_IOB_TO_NCB_DID_40_CREDITS (0x00011800F0000940ull)
+#define CVMX_IOB_TO_NCB_DID_55_CREDITS (0x00011800F00009B8ull)
+#define CVMX_IOB_TO_NCB_DID_64_CREDITS (0x00011800F0000A00ull)
+#define CVMX_IOB_TO_NCB_DID_79_CREDITS (0x00011800F0000A78ull)
+#define CVMX_IOB_TO_NCB_DID_96_CREDITS (0x00011800F0000B00ull)
+#define CVMX_IOB_TO_NCB_DID_98_CREDITS (0x00011800F0000B10ull)
+
+/**
+ * cvmx_iob_bist_status
+ *
+ * The result of the BIST run on the IOB memories.
+ *
+ */
+union cvmx_iob_bist_status {
+ u64 u64;
+ struct cvmx_iob_bist_status_s {
+ u64 reserved_2_63 : 62;
+ u64 ibd : 1;
+ u64 icd : 1;
+ } s;
+ struct cvmx_iob_bist_status_cn30xx {
+ u64 reserved_18_63 : 46;
+ u64 icnrcb : 1;
+ u64 icr0 : 1;
+ u64 icr1 : 1;
+ u64 icnr1 : 1;
+ u64 icnr0 : 1;
+ u64 ibdr0 : 1;
+ u64 ibdr1 : 1;
+ u64 ibr0 : 1;
+ u64 ibr1 : 1;
+ u64 icnrt : 1;
+ u64 ibrq0 : 1;
+ u64 ibrq1 : 1;
+ u64 icrn0 : 1;
+ u64 icrn1 : 1;
+ u64 icrp0 : 1;
+ u64 icrp1 : 1;
+ u64 ibd : 1;
+ u64 icd : 1;
+ } cn30xx;
+ struct cvmx_iob_bist_status_cn30xx cn31xx;
+ struct cvmx_iob_bist_status_cn30xx cn38xx;
+ struct cvmx_iob_bist_status_cn30xx cn38xxp2;
+ struct cvmx_iob_bist_status_cn30xx cn50xx;
+ struct cvmx_iob_bist_status_cn30xx cn52xx;
+ struct cvmx_iob_bist_status_cn30xx cn52xxp1;
+ struct cvmx_iob_bist_status_cn30xx cn56xx;
+ struct cvmx_iob_bist_status_cn30xx cn56xxp1;
+ struct cvmx_iob_bist_status_cn30xx cn58xx;
+ struct cvmx_iob_bist_status_cn30xx cn58xxp1;
+ struct cvmx_iob_bist_status_cn61xx {
+ u64 reserved_23_63 : 41;
+ u64 xmdfif : 1;
+ u64 xmcfif : 1;
+ u64 iorfif : 1;
+ u64 rsdfif : 1;
+ u64 iocfif : 1;
+ u64 icnrcb : 1;
+ u64 icr0 : 1;
+ u64 icr1 : 1;
+ u64 icnr1 : 1;
+ u64 icnr0 : 1;
+ u64 ibdr0 : 1;
+ u64 ibdr1 : 1;
+ u64 ibr0 : 1;
+ u64 ibr1 : 1;
+ u64 icnrt : 1;
+ u64 ibrq0 : 1;
+ u64 ibrq1 : 1;
+ u64 icrn0 : 1;
+ u64 icrn1 : 1;
+ u64 icrp0 : 1;
+ u64 icrp1 : 1;
+ u64 ibd : 1;
+ u64 icd : 1;
+ } cn61xx;
+ struct cvmx_iob_bist_status_cn61xx cn63xx;
+ struct cvmx_iob_bist_status_cn61xx cn63xxp1;
+ struct cvmx_iob_bist_status_cn61xx cn66xx;
+ struct cvmx_iob_bist_status_cn68xx {
+ u64 reserved_18_63 : 46;
+ u64 xmdfif : 1;
+ u64 xmcfif : 1;
+ u64 iorfif : 1;
+ u64 rsdfif : 1;
+ u64 iocfif : 1;
+ u64 icnrcb : 1;
+ u64 icr0 : 1;
+ u64 icr1 : 1;
+ u64 icnr0 : 1;
+ u64 ibr0 : 1;
+ u64 ibr1 : 1;
+ u64 icnrt : 1;
+ u64 ibrq0 : 1;
+ u64 ibrq1 : 1;
+ u64 icrn0 : 1;
+ u64 icrn1 : 1;
+ u64 ibd : 1;
+ u64 icd : 1;
+ } cn68xx;
+ struct cvmx_iob_bist_status_cn68xx cn68xxp1;
+ struct cvmx_iob_bist_status_cn61xx cn70xx;
+ struct cvmx_iob_bist_status_cn61xx cn70xxp1;
+ struct cvmx_iob_bist_status_cn61xx cnf71xx;
+};
+
+typedef union cvmx_iob_bist_status cvmx_iob_bist_status_t;
+
+/**
+ * cvmx_iob_chip_cur_pwr
+ */
+union cvmx_iob_chip_cur_pwr {
+ u64 u64;
+ struct cvmx_iob_chip_cur_pwr_s {
+ u64 reserved_8_63 : 56;
+ u64 current_power_setting : 8;
+ } s;
+ struct cvmx_iob_chip_cur_pwr_s cn70xx;
+ struct cvmx_iob_chip_cur_pwr_s cn70xxp1;
+};
+
+typedef union cvmx_iob_chip_cur_pwr cvmx_iob_chip_cur_pwr_t;
+
+/**
+ * cvmx_iob_chip_glb_pwr_throttle
+ *
+ * Controls the min/max power settings.
+ *
+ */
+union cvmx_iob_chip_glb_pwr_throttle {
+ u64 u64;
+ struct cvmx_iob_chip_glb_pwr_throttle_s {
+ u64 reserved_34_63 : 30;
+ u64 pwr_bw : 2;
+ u64 pwr_max : 8;
+ u64 pwr_min : 8;
+ u64 pwr_setting : 16;
+ } s;
+ struct cvmx_iob_chip_glb_pwr_throttle_s cn70xx;
+ struct cvmx_iob_chip_glb_pwr_throttle_s cn70xxp1;
+};
+
+typedef union cvmx_iob_chip_glb_pwr_throttle cvmx_iob_chip_glb_pwr_throttle_t;
+
+/**
+ * cvmx_iob_chip_pwr_out
+ *
+ * Power numbers from the various partitions on the chip.
+ *
+ */
+union cvmx_iob_chip_pwr_out {
+ u64 u64;
+ struct cvmx_iob_chip_pwr_out_s {
+ u64 cpu_pwr : 16;
+ u64 chip_power : 16;
+ u64 coproc_power : 16;
+ u64 avg_chip_power : 16;
+ } s;
+ struct cvmx_iob_chip_pwr_out_s cn70xx;
+ struct cvmx_iob_chip_pwr_out_s cn70xxp1;
+};
+
+typedef union cvmx_iob_chip_pwr_out cvmx_iob_chip_pwr_out_t;
+
+/**
+ * cvmx_iob_ctl_status
+ *
+ * IOB Control Status = IOB Control and Status Register
+ * Provides control for IOB functions.
+ */
+union cvmx_iob_ctl_status {
+ u64 u64;
+ struct cvmx_iob_ctl_status_s {
+ u64 reserved_11_63 : 53;
+ u64 fif_dly : 1;
+ u64 xmc_per : 4;
+ u64 reserved_3_5 : 3;
+ u64 pko_enb : 1;
+ u64 dwb_enb : 1;
+ u64 fau_end : 1;
+ } s;
+ struct cvmx_iob_ctl_status_cn30xx {
+ u64 reserved_5_63 : 59;
+ u64 outb_mat : 1;
+ u64 inb_mat : 1;
+ u64 pko_enb : 1;
+ u64 dwb_enb : 1;
+ u64 fau_end : 1;
+ } cn30xx;
+ struct cvmx_iob_ctl_status_cn30xx cn31xx;
+ struct cvmx_iob_ctl_status_cn30xx cn38xx;
+ struct cvmx_iob_ctl_status_cn30xx cn38xxp2;
+ struct cvmx_iob_ctl_status_cn30xx cn50xx;
+ struct cvmx_iob_ctl_status_cn52xx {
+ u64 reserved_6_63 : 58;
+ u64 rr_mode : 1;
+ u64 outb_mat : 1;
+ u64 inb_mat : 1;
+ u64 pko_enb : 1;
+ u64 dwb_enb : 1;
+ u64 fau_end : 1;
+ } cn52xx;
+ struct cvmx_iob_ctl_status_cn30xx cn52xxp1;
+ struct cvmx_iob_ctl_status_cn30xx cn56xx;
+ struct cvmx_iob_ctl_status_cn30xx cn56xxp1;
+ struct cvmx_iob_ctl_status_cn30xx cn58xx;
+ struct cvmx_iob_ctl_status_cn30xx cn58xxp1;
+ struct cvmx_iob_ctl_status_cn61xx {
+ u64 reserved_11_63 : 53;
+ u64 fif_dly : 1;
+ u64 xmc_per : 4;
+ u64 rr_mode : 1;
+ u64 outb_mat : 1;
+ u64 inb_mat : 1;
+ u64 pko_enb : 1;
+ u64 dwb_enb : 1;
+ u64 fau_end : 1;
+ } cn61xx;
+ struct cvmx_iob_ctl_status_cn63xx {
+ u64 reserved_10_63 : 54;
+ u64 xmc_per : 4;
+ u64 rr_mode : 1;
+ u64 outb_mat : 1;
+ u64 inb_mat : 1;
+ u64 pko_enb : 1;
+ u64 dwb_enb : 1;
+ u64 fau_end : 1;
+ } cn63xx;
+ struct cvmx_iob_ctl_status_cn63xx cn63xxp1;
+ struct cvmx_iob_ctl_status_cn61xx cn66xx;
+ struct cvmx_iob_ctl_status_cn68xx {
+ u64 reserved_11_63 : 53;
+ u64 fif_dly : 1;
+ u64 xmc_per : 4;
+ u64 rsvr5 : 1;
+ u64 outb_mat : 1;
+ u64 inb_mat : 1;
+ u64 pko_enb : 1;
+ u64 dwb_enb : 1;
+ u64 fau_end : 1;
+ } cn68xx;
+ struct cvmx_iob_ctl_status_cn68xx cn68xxp1;
+ struct cvmx_iob_ctl_status_cn70xx {
+ u64 reserved_10_63 : 54;
+ u64 xmc_per : 4;
+ u64 rr_mode : 1;
+ u64 rsv4 : 1;
+ u64 rsv3 : 1;
+ u64 pko_enb : 1;
+ u64 dwb_enb : 1;
+ u64 fau_end : 1;
+ } cn70xx;
+ struct cvmx_iob_ctl_status_cn70xx cn70xxp1;
+ struct cvmx_iob_ctl_status_cn61xx cnf71xx;
+};
+
+typedef union cvmx_iob_ctl_status cvmx_iob_ctl_status_t;
+
+/**
+ * cvmx_iob_dwb_pri_cnt
+ *
+ * DWB To CMB Priority Counter = Don't Write Back to CMB Priority Counter Enable and Timer Value
+ * Enables and supplies the timeout count for raising the priority of Don't Write Back request to
+ * the L2C.
+ */
+union cvmx_iob_dwb_pri_cnt {
+ u64 u64;
+ struct cvmx_iob_dwb_pri_cnt_s {
+ u64 reserved_16_63 : 48;
+ u64 cnt_enb : 1;
+ u64 cnt_val : 15;
+ } s;
+ struct cvmx_iob_dwb_pri_cnt_s cn38xx;
+ struct cvmx_iob_dwb_pri_cnt_s cn38xxp2;
+ struct cvmx_iob_dwb_pri_cnt_s cn52xx;
+ struct cvmx_iob_dwb_pri_cnt_s cn52xxp1;
+ struct cvmx_iob_dwb_pri_cnt_s cn56xx;
+ struct cvmx_iob_dwb_pri_cnt_s cn56xxp1;
+ struct cvmx_iob_dwb_pri_cnt_s cn58xx;
+ struct cvmx_iob_dwb_pri_cnt_s cn58xxp1;
+ struct cvmx_iob_dwb_pri_cnt_s cn61xx;
+ struct cvmx_iob_dwb_pri_cnt_s cn63xx;
+ struct cvmx_iob_dwb_pri_cnt_s cn63xxp1;
+ struct cvmx_iob_dwb_pri_cnt_s cn66xx;
+ struct cvmx_iob_dwb_pri_cnt_s cn70xx;
+ struct cvmx_iob_dwb_pri_cnt_s cn70xxp1;
+ struct cvmx_iob_dwb_pri_cnt_s cnf71xx;
+};
+
+typedef union cvmx_iob_dwb_pri_cnt cvmx_iob_dwb_pri_cnt_t;
+
+/**
+ * cvmx_iob_fau_timeout
+ *
+ * FAU Timeout = Fetch and Add Unit Tag-Switch Timeout
+ * How many clokc ticks the FAU unit will wait for a tag-switch before timing out.
+ * for Queue 0.
+ */
+union cvmx_iob_fau_timeout {
+ u64 u64;
+ struct cvmx_iob_fau_timeout_s {
+ u64 reserved_13_63 : 51;
+ u64 tout_enb : 1;
+ u64 tout_val : 12;
+ } s;
+ struct cvmx_iob_fau_timeout_s cn30xx;
+ struct cvmx_iob_fau_timeout_s cn31xx;
+ struct cvmx_iob_fau_timeout_s cn38xx;
+ struct cvmx_iob_fau_timeout_s cn38xxp2;
+ struct cvmx_iob_fau_timeout_s cn50xx;
+ struct cvmx_iob_fau_timeout_s cn52xx;
+ struct cvmx_iob_fau_timeout_s cn52xxp1;
+ struct cvmx_iob_fau_timeout_s cn56xx;
+ struct cvmx_iob_fau_timeout_s cn56xxp1;
+ struct cvmx_iob_fau_timeout_s cn58xx;
+ struct cvmx_iob_fau_timeout_s cn58xxp1;
+ struct cvmx_iob_fau_timeout_s cn61xx;
+ struct cvmx_iob_fau_timeout_s cn63xx;
+ struct cvmx_iob_fau_timeout_s cn63xxp1;
+ struct cvmx_iob_fau_timeout_s cn66xx;
+ struct cvmx_iob_fau_timeout_s cn68xx;
+ struct cvmx_iob_fau_timeout_s cn68xxp1;
+ struct cvmx_iob_fau_timeout_s cn70xx;
+ struct cvmx_iob_fau_timeout_s cn70xxp1;
+ struct cvmx_iob_fau_timeout_s cnf71xx;
+};
+
+typedef union cvmx_iob_fau_timeout cvmx_iob_fau_timeout_t;
+
+/**
+ * cvmx_iob_i2c_pri_cnt
+ *
+ * IPD To CMB Store Priority Counter = IPD to CMB Store Priority Counter Enable and Timer Value
+ * Enables and supplies the timeout count for raising the priority of IPD Store access to the
+ * CMB.
+ */
+union cvmx_iob_i2c_pri_cnt {
+ u64 u64;
+ struct cvmx_iob_i2c_pri_cnt_s {
+ u64 reserved_16_63 : 48;
+ u64 cnt_enb : 1;
+ u64 cnt_val : 15;
+ } s;
+ struct cvmx_iob_i2c_pri_cnt_s cn38xx;
+ struct cvmx_iob_i2c_pri_cnt_s cn38xxp2;
+ struct cvmx_iob_i2c_pri_cnt_s cn52xx;
+ struct cvmx_iob_i2c_pri_cnt_s cn52xxp1;
+ struct cvmx_iob_i2c_pri_cnt_s cn56xx;
+ struct cvmx_iob_i2c_pri_cnt_s cn56xxp1;
+ struct cvmx_iob_i2c_pri_cnt_s cn58xx;
+ struct cvmx_iob_i2c_pri_cnt_s cn58xxp1;
+ struct cvmx_iob_i2c_pri_cnt_s cn61xx;
+ struct cvmx_iob_i2c_pri_cnt_s cn63xx;
+ struct cvmx_iob_i2c_pri_cnt_s cn63xxp1;
+ struct cvmx_iob_i2c_pri_cnt_s cn66xx;
+ struct cvmx_iob_i2c_pri_cnt_s cn70xx;
+ struct cvmx_iob_i2c_pri_cnt_s cn70xxp1;
+ struct cvmx_iob_i2c_pri_cnt_s cnf71xx;
+};
+
+typedef union cvmx_iob_i2c_pri_cnt cvmx_iob_i2c_pri_cnt_t;
+
+/**
+ * cvmx_iob_inb_control_match
+ *
+ * Match pattern for the inbound control to set the INB_MATCH_BIT.
+ *
+ */
+union cvmx_iob_inb_control_match {
+ u64 u64;
+ struct cvmx_iob_inb_control_match_s {
+ u64 reserved_29_63 : 35;
+ u64 mask : 8;
+ u64 opc : 4;
+ u64 dst : 9;
+ u64 src : 8;
+ } s;
+ struct cvmx_iob_inb_control_match_s cn30xx;
+ struct cvmx_iob_inb_control_match_s cn31xx;
+ struct cvmx_iob_inb_control_match_s cn38xx;
+ struct cvmx_iob_inb_control_match_s cn38xxp2;
+ struct cvmx_iob_inb_control_match_s cn50xx;
+ struct cvmx_iob_inb_control_match_s cn52xx;
+ struct cvmx_iob_inb_control_match_s cn52xxp1;
+ struct cvmx_iob_inb_control_match_s cn56xx;
+ struct cvmx_iob_inb_control_match_s cn56xxp1;
+ struct cvmx_iob_inb_control_match_s cn58xx;
+ struct cvmx_iob_inb_control_match_s cn58xxp1;
+ struct cvmx_iob_inb_control_match_s cn61xx;
+ struct cvmx_iob_inb_control_match_s cn63xx;
+ struct cvmx_iob_inb_control_match_s cn63xxp1;
+ struct cvmx_iob_inb_control_match_s cn66xx;
+ struct cvmx_iob_inb_control_match_s cn68xx;
+ struct cvmx_iob_inb_control_match_s cn68xxp1;
+ struct cvmx_iob_inb_control_match_s cn70xx;
+ struct cvmx_iob_inb_control_match_s cn70xxp1;
+ struct cvmx_iob_inb_control_match_s cnf71xx;
+};
+
+typedef union cvmx_iob_inb_control_match cvmx_iob_inb_control_match_t;
+
+/**
+ * cvmx_iob_inb_control_match_enb
+ *
+ * Enables the match of the corresponding bit in the IOB_INB_CONTROL_MATCH reister.
+ *
+ */
+union cvmx_iob_inb_control_match_enb {
+ u64 u64;
+ struct cvmx_iob_inb_control_match_enb_s {
+ u64 reserved_29_63 : 35;
+ u64 mask : 8;
+ u64 opc : 4;
+ u64 dst : 9;
+ u64 src : 8;
+ } s;
+ struct cvmx_iob_inb_control_match_enb_s cn30xx;
+ struct cvmx_iob_inb_control_match_enb_s cn31xx;
+ struct cvmx_iob_inb_control_match_enb_s cn38xx;
+ struct cvmx_iob_inb_control_match_enb_s cn38xxp2;
+ struct cvmx_iob_inb_control_match_enb_s cn50xx;
+ struct cvmx_iob_inb_control_match_enb_s cn52xx;
+ struct cvmx_iob_inb_control_match_enb_s cn52xxp1;
+ struct cvmx_iob_inb_control_match_enb_s cn56xx;
+ struct cvmx_iob_inb_control_match_enb_s cn56xxp1;
+ struct cvmx_iob_inb_control_match_enb_s cn58xx;
+ struct cvmx_iob_inb_control_match_enb_s cn58xxp1;
+ struct cvmx_iob_inb_control_match_enb_s cn61xx;
+ struct cvmx_iob_inb_control_match_enb_s cn63xx;
+ struct cvmx_iob_inb_control_match_enb_s cn63xxp1;
+ struct cvmx_iob_inb_control_match_enb_s cn66xx;
+ struct cvmx_iob_inb_control_match_enb_s cn68xx;
+ struct cvmx_iob_inb_control_match_enb_s cn68xxp1;
+ struct cvmx_iob_inb_control_match_enb_s cn70xx;
+ struct cvmx_iob_inb_control_match_enb_s cn70xxp1;
+ struct cvmx_iob_inb_control_match_enb_s cnf71xx;
+};
+
+typedef union cvmx_iob_inb_control_match_enb cvmx_iob_inb_control_match_enb_t;
+
+/**
+ * cvmx_iob_inb_data_match
+ *
+ * Match pattern for the inbound data to set the INB_MATCH_BIT.
+ *
+ */
+union cvmx_iob_inb_data_match {
+ u64 u64;
+ struct cvmx_iob_inb_data_match_s {
+ u64 data : 64;
+ } s;
+ struct cvmx_iob_inb_data_match_s cn30xx;
+ struct cvmx_iob_inb_data_match_s cn31xx;
+ struct cvmx_iob_inb_data_match_s cn38xx;
+ struct cvmx_iob_inb_data_match_s cn38xxp2;
+ struct cvmx_iob_inb_data_match_s cn50xx;
+ struct cvmx_iob_inb_data_match_s cn52xx;
+ struct cvmx_iob_inb_data_match_s cn52xxp1;
+ struct cvmx_iob_inb_data_match_s cn56xx;
+ struct cvmx_iob_inb_data_match_s cn56xxp1;
+ struct cvmx_iob_inb_data_match_s cn58xx;
+ struct cvmx_iob_inb_data_match_s cn58xxp1;
+ struct cvmx_iob_inb_data_match_s cn61xx;
+ struct cvmx_iob_inb_data_match_s cn63xx;
+ struct cvmx_iob_inb_data_match_s cn63xxp1;
+ struct cvmx_iob_inb_data_match_s cn66xx;
+ struct cvmx_iob_inb_data_match_s cn68xx;
+ struct cvmx_iob_inb_data_match_s cn68xxp1;
+ struct cvmx_iob_inb_data_match_s cn70xx;
+ struct cvmx_iob_inb_data_match_s cn70xxp1;
+ struct cvmx_iob_inb_data_match_s cnf71xx;
+};
+
+typedef union cvmx_iob_inb_data_match cvmx_iob_inb_data_match_t;
+
+/**
+ * cvmx_iob_inb_data_match_enb
+ *
+ * Enables the match of the corresponding bit in the IOB_INB_DATA_MATCH reister.
+ *
+ */
+union cvmx_iob_inb_data_match_enb {
+ u64 u64;
+ struct cvmx_iob_inb_data_match_enb_s {
+ u64 data : 64;
+ } s;
+ struct cvmx_iob_inb_data_match_enb_s cn30xx;
+ struct cvmx_iob_inb_data_match_enb_s cn31xx;
+ struct cvmx_iob_inb_data_match_enb_s cn38xx;
+ struct cvmx_iob_inb_data_match_enb_s cn38xxp2;
+ struct cvmx_iob_inb_data_match_enb_s cn50xx;
+ struct cvmx_iob_inb_data_match_enb_s cn52xx;
+ struct cvmx_iob_inb_data_match_enb_s cn52xxp1;
+ struct cvmx_iob_inb_data_match_enb_s cn56xx;
+ struct cvmx_iob_inb_data_match_enb_s cn56xxp1;
+ struct cvmx_iob_inb_data_match_enb_s cn58xx;
+ struct cvmx_iob_inb_data_match_enb_s cn58xxp1;
+ struct cvmx_iob_inb_data_match_enb_s cn61xx;
+ struct cvmx_iob_inb_data_match_enb_s cn63xx;
+ struct cvmx_iob_inb_data_match_enb_s cn63xxp1;
+ struct cvmx_iob_inb_data_match_enb_s cn66xx;
+ struct cvmx_iob_inb_data_match_enb_s cn68xx;
+ struct cvmx_iob_inb_data_match_enb_s cn68xxp1;
+ struct cvmx_iob_inb_data_match_enb_s cn70xx;
+ struct cvmx_iob_inb_data_match_enb_s cn70xxp1;
+ struct cvmx_iob_inb_data_match_enb_s cnf71xx;
+};
+
+typedef union cvmx_iob_inb_data_match_enb cvmx_iob_inb_data_match_enb_t;
+
+/**
+ * cvmx_iob_int_enb
+ *
+ * The IOB's interrupt enable register.
+ *
+ */
+union cvmx_iob_int_enb {
+ u64 u64;
+ struct cvmx_iob_int_enb_s {
+ u64 reserved_8_63 : 56;
+ u64 outb_mat : 1;
+ u64 inb_mat : 1;
+ u64 p_dat : 1;
+ u64 np_dat : 1;
+ u64 p_eop : 1;
+ u64 p_sop : 1;
+ u64 np_eop : 1;
+ u64 np_sop : 1;
+ } s;
+ struct cvmx_iob_int_enb_cn30xx {
+ u64 reserved_4_63 : 60;
+ u64 p_eop : 1;
+ u64 p_sop : 1;
+ u64 np_eop : 1;
+ u64 np_sop : 1;
+ } cn30xx;
+ struct cvmx_iob_int_enb_cn30xx cn31xx;
+ struct cvmx_iob_int_enb_cn30xx cn38xx;
+ struct cvmx_iob_int_enb_cn30xx cn38xxp2;
+ struct cvmx_iob_int_enb_cn50xx {
+ u64 reserved_6_63 : 58;
+ u64 p_dat : 1;
+ u64 np_dat : 1;
+ u64 p_eop : 1;
+ u64 p_sop : 1;
+ u64 np_eop : 1;
+ u64 np_sop : 1;
+ } cn50xx;
+ struct cvmx_iob_int_enb_cn50xx cn52xx;
+ struct cvmx_iob_int_enb_cn50xx cn52xxp1;
+ struct cvmx_iob_int_enb_cn50xx cn56xx;
+ struct cvmx_iob_int_enb_cn50xx cn56xxp1;
+ struct cvmx_iob_int_enb_cn50xx cn58xx;
+ struct cvmx_iob_int_enb_cn50xx cn58xxp1;
+ struct cvmx_iob_int_enb_cn50xx cn61xx;
+ struct cvmx_iob_int_enb_cn50xx cn63xx;
+ struct cvmx_iob_int_enb_cn50xx cn63xxp1;
+ struct cvmx_iob_int_enb_cn50xx cn66xx;
+ struct cvmx_iob_int_enb_cn68xx {
+ u64 reserved_0_63 : 64;
+ } cn68xx;
+ struct cvmx_iob_int_enb_cn68xx cn68xxp1;
+ struct cvmx_iob_int_enb_s cn70xx;
+ struct cvmx_iob_int_enb_s cn70xxp1;
+ struct cvmx_iob_int_enb_cn50xx cnf71xx;
+};
+
+typedef union cvmx_iob_int_enb cvmx_iob_int_enb_t;
+
+/**
+ * cvmx_iob_int_sum
+ *
+ * Contains the different interrupt summary bits of the IOB.
+ *
+ */
+union cvmx_iob_int_sum {
+ u64 u64;
+ struct cvmx_iob_int_sum_s {
+ u64 reserved_8_63 : 56;
+ u64 outb_mat : 1;
+ u64 inb_mat : 1;
+ u64 p_dat : 1;
+ u64 np_dat : 1;
+ u64 p_eop : 1;
+ u64 p_sop : 1;
+ u64 np_eop : 1;
+ u64 np_sop : 1;
+ } s;
+ struct cvmx_iob_int_sum_cn30xx {
+ u64 reserved_4_63 : 60;
+ u64 p_eop : 1;
+ u64 p_sop : 1;
+ u64 np_eop : 1;
+ u64 np_sop : 1;
+ } cn30xx;
+ struct cvmx_iob_int_sum_cn30xx cn31xx;
+ struct cvmx_iob_int_sum_cn30xx cn38xx;
+ struct cvmx_iob_int_sum_cn30xx cn38xxp2;
+ struct cvmx_iob_int_sum_cn50xx {
+ u64 reserved_6_63 : 58;
+ u64 p_dat : 1;
+ u64 np_dat : 1;
+ u64 p_eop : 1;
+ u64 p_sop : 1;
+ u64 np_eop : 1;
+ u64 np_sop : 1;
+ } cn50xx;
+ struct cvmx_iob_int_sum_cn50xx cn52xx;
+ struct cvmx_iob_int_sum_cn50xx cn52xxp1;
+ struct cvmx_iob_int_sum_cn50xx cn56xx;
+ struct cvmx_iob_int_sum_cn50xx cn56xxp1;
+ struct cvmx_iob_int_sum_cn50xx cn58xx;
+ struct cvmx_iob_int_sum_cn50xx cn58xxp1;
+ struct cvmx_iob_int_sum_cn50xx cn61xx;
+ struct cvmx_iob_int_sum_cn50xx cn63xx;
+ struct cvmx_iob_int_sum_cn50xx cn63xxp1;
+ struct cvmx_iob_int_sum_cn50xx cn66xx;
+ struct cvmx_iob_int_sum_cn68xx {
+ u64 reserved_0_63 : 64;
+ } cn68xx;
+ struct cvmx_iob_int_sum_cn68xx cn68xxp1;
+ struct cvmx_iob_int_sum_s cn70xx;
+ struct cvmx_iob_int_sum_s cn70xxp1;
+ struct cvmx_iob_int_sum_cn50xx cnf71xx;
+};
+
+typedef union cvmx_iob_int_sum cvmx_iob_int_sum_t;
+
+/**
+ * cvmx_iob_n2c_l2c_pri_cnt
+ *
+ * NCB To CMB L2C Priority Counter = NCB to CMB L2C Priority Counter Enable and Timer Value
+ * Enables and supplies the timeout count for raising the priority of NCB Store/Load access to
+ * the CMB.
+ */
+union cvmx_iob_n2c_l2c_pri_cnt {
+ u64 u64;
+ struct cvmx_iob_n2c_l2c_pri_cnt_s {
+ u64 reserved_16_63 : 48;
+ u64 cnt_enb : 1;
+ u64 cnt_val : 15;
+ } s;
+ struct cvmx_iob_n2c_l2c_pri_cnt_s cn38xx;
+ struct cvmx_iob_n2c_l2c_pri_cnt_s cn38xxp2;
+ struct cvmx_iob_n2c_l2c_pri_cnt_s cn52xx;
+ struct cvmx_iob_n2c_l2c_pri_cnt_s cn52xxp1;
+ struct cvmx_iob_n2c_l2c_pri_cnt_s cn56xx;
+ struct cvmx_iob_n2c_l2c_pri_cnt_s cn56xxp1;
+ struct cvmx_iob_n2c_l2c_pri_cnt_s cn58xx;
+ struct cvmx_iob_n2c_l2c_pri_cnt_s cn58xxp1;
+ struct cvmx_iob_n2c_l2c_pri_cnt_s cn61xx;
+ struct cvmx_iob_n2c_l2c_pri_cnt_s cn63xx;
+ struct cvmx_iob_n2c_l2c_pri_cnt_s cn63xxp1;
+ struct cvmx_iob_n2c_l2c_pri_cnt_s cn66xx;
+ struct cvmx_iob_n2c_l2c_pri_cnt_s cn70xx;
+ struct cvmx_iob_n2c_l2c_pri_cnt_s cn70xxp1;
+ struct cvmx_iob_n2c_l2c_pri_cnt_s cnf71xx;
+};
+
+typedef union cvmx_iob_n2c_l2c_pri_cnt cvmx_iob_n2c_l2c_pri_cnt_t;
+
+/**
+ * cvmx_iob_n2c_rsp_pri_cnt
+ *
+ * NCB To CMB Response Priority Counter = NCB to CMB Response Priority Counter Enable and Timer
+ * Value
+ * Enables and supplies the timeout count for raising the priority of NCB Responses access to the
+ * CMB.
+ */
+union cvmx_iob_n2c_rsp_pri_cnt {
+ u64 u64;
+ struct cvmx_iob_n2c_rsp_pri_cnt_s {
+ u64 reserved_16_63 : 48;
+ u64 cnt_enb : 1;
+ u64 cnt_val : 15;
+ } s;
+ struct cvmx_iob_n2c_rsp_pri_cnt_s cn38xx;
+ struct cvmx_iob_n2c_rsp_pri_cnt_s cn38xxp2;
+ struct cvmx_iob_n2c_rsp_pri_cnt_s cn52xx;
+ struct cvmx_iob_n2c_rsp_pri_cnt_s cn52xxp1;
+ struct cvmx_iob_n2c_rsp_pri_cnt_s cn56xx;
+ struct cvmx_iob_n2c_rsp_pri_cnt_s cn56xxp1;
+ struct cvmx_iob_n2c_rsp_pri_cnt_s cn58xx;
+ struct cvmx_iob_n2c_rsp_pri_cnt_s cn58xxp1;
+ struct cvmx_iob_n2c_rsp_pri_cnt_s cn61xx;
+ struct cvmx_iob_n2c_rsp_pri_cnt_s cn63xx;
+ struct cvmx_iob_n2c_rsp_pri_cnt_s cn63xxp1;
+ struct cvmx_iob_n2c_rsp_pri_cnt_s cn66xx;
+ struct cvmx_iob_n2c_rsp_pri_cnt_s cn70xx;
+ struct cvmx_iob_n2c_rsp_pri_cnt_s cn70xxp1;
+ struct cvmx_iob_n2c_rsp_pri_cnt_s cnf71xx;
+};
+
+typedef union cvmx_iob_n2c_rsp_pri_cnt cvmx_iob_n2c_rsp_pri_cnt_t;
+
+/**
+ * cvmx_iob_outb_com_pri_cnt
+ *
+ * Commit To NCB Priority Counter = Commit to NCB Priority Counter Enable and Timer Value
+ * Enables and supplies the timeout count for raising the priority of Commit request to the
+ * Outbound NCB.
+ */
+union cvmx_iob_outb_com_pri_cnt {
+ u64 u64;
+ struct cvmx_iob_outb_com_pri_cnt_s {
+ u64 reserved_16_63 : 48;
+ u64 cnt_enb : 1;
+ u64 cnt_val : 15;
+ } s;
+ struct cvmx_iob_outb_com_pri_cnt_s cn38xx;
+ struct cvmx_iob_outb_com_pri_cnt_s cn38xxp2;
+ struct cvmx_iob_outb_com_pri_cnt_s cn52xx;
+ struct cvmx_iob_outb_com_pri_cnt_s cn52xxp1;
+ struct cvmx_iob_outb_com_pri_cnt_s cn56xx;
+ struct cvmx_iob_outb_com_pri_cnt_s cn56xxp1;
+ struct cvmx_iob_outb_com_pri_cnt_s cn58xx;
+ struct cvmx_iob_outb_com_pri_cnt_s cn58xxp1;
+ struct cvmx_iob_outb_com_pri_cnt_s cn61xx;
+ struct cvmx_iob_outb_com_pri_cnt_s cn63xx;
+ struct cvmx_iob_outb_com_pri_cnt_s cn63xxp1;
+ struct cvmx_iob_outb_com_pri_cnt_s cn66xx;
+ struct cvmx_iob_outb_com_pri_cnt_s cn68xx;
+ struct cvmx_iob_outb_com_pri_cnt_s cn68xxp1;
+ struct cvmx_iob_outb_com_pri_cnt_s cn70xx;
+ struct cvmx_iob_outb_com_pri_cnt_s cn70xxp1;
+ struct cvmx_iob_outb_com_pri_cnt_s cnf71xx;
+};
+
+typedef union cvmx_iob_outb_com_pri_cnt cvmx_iob_outb_com_pri_cnt_t;
+
+/**
+ * cvmx_iob_outb_control_match
+ *
+ * Match pattern for the outbound control to set the OUTB_MATCH_BIT.
+ *
+ */
+union cvmx_iob_outb_control_match {
+ u64 u64;
+ struct cvmx_iob_outb_control_match_s {
+ u64 reserved_26_63 : 38;
+ u64 mask : 8;
+ u64 eot : 1;
+ u64 dst : 8;
+ u64 src : 9;
+ } s;
+ struct cvmx_iob_outb_control_match_s cn30xx;
+ struct cvmx_iob_outb_control_match_s cn31xx;
+ struct cvmx_iob_outb_control_match_s cn38xx;
+ struct cvmx_iob_outb_control_match_s cn38xxp2;
+ struct cvmx_iob_outb_control_match_s cn50xx;
+ struct cvmx_iob_outb_control_match_s cn52xx;
+ struct cvmx_iob_outb_control_match_s cn52xxp1;
+ struct cvmx_iob_outb_control_match_s cn56xx;
+ struct cvmx_iob_outb_control_match_s cn56xxp1;
+ struct cvmx_iob_outb_control_match_s cn58xx;
+ struct cvmx_iob_outb_control_match_s cn58xxp1;
+ struct cvmx_iob_outb_control_match_s cn61xx;
+ struct cvmx_iob_outb_control_match_s cn63xx;
+ struct cvmx_iob_outb_control_match_s cn63xxp1;
+ struct cvmx_iob_outb_control_match_s cn66xx;
+ struct cvmx_iob_outb_control_match_s cn68xx;
+ struct cvmx_iob_outb_control_match_s cn68xxp1;
+ struct cvmx_iob_outb_control_match_s cn70xx;
+ struct cvmx_iob_outb_control_match_s cn70xxp1;
+ struct cvmx_iob_outb_control_match_s cnf71xx;
+};
+
+typedef union cvmx_iob_outb_control_match cvmx_iob_outb_control_match_t;
+
+/**
+ * cvmx_iob_outb_control_match_enb
+ *
+ * Enables the match of the corresponding bit in the IOB_OUTB_CONTROL_MATCH reister.
+ *
+ */
+union cvmx_iob_outb_control_match_enb {
+ u64 u64;
+ struct cvmx_iob_outb_control_match_enb_s {
+ u64 reserved_26_63 : 38;
+ u64 mask : 8;
+ u64 eot : 1;
+ u64 dst : 8;
+ u64 src : 9;
+ } s;
+ struct cvmx_iob_outb_control_match_enb_s cn30xx;
+ struct cvmx_iob_outb_control_match_enb_s cn31xx;
+ struct cvmx_iob_outb_control_match_enb_s cn38xx;
+ struct cvmx_iob_outb_control_match_enb_s cn38xxp2;
+ struct cvmx_iob_outb_control_match_enb_s cn50xx;
+ struct cvmx_iob_outb_control_match_enb_s cn52xx;
+ struct cvmx_iob_outb_control_match_enb_s cn52xxp1;
+ struct cvmx_iob_outb_control_match_enb_s cn56xx;
+ struct cvmx_iob_outb_control_match_enb_s cn56xxp1;
+ struct cvmx_iob_outb_control_match_enb_s cn58xx;
+ struct cvmx_iob_outb_control_match_enb_s cn58xxp1;
+ struct cvmx_iob_outb_control_match_enb_s cn61xx;
+ struct cvmx_iob_outb_control_match_enb_s cn63xx;
+ struct cvmx_iob_outb_control_match_enb_s cn63xxp1;
+ struct cvmx_iob_outb_control_match_enb_s cn66xx;
+ struct cvmx_iob_outb_control_match_enb_s cn68xx;
+ struct cvmx_iob_outb_control_match_enb_s cn68xxp1;
+ struct cvmx_iob_outb_control_match_enb_s cn70xx;
+ struct cvmx_iob_outb_control_match_enb_s cn70xxp1;
+ struct cvmx_iob_outb_control_match_enb_s cnf71xx;
+};
+
+typedef union cvmx_iob_outb_control_match_enb cvmx_iob_outb_control_match_enb_t;
+
+/**
+ * cvmx_iob_outb_data_match
+ *
+ * Match pattern for the outbound data to set the OUTB_MATCH_BIT.
+ *
+ */
+union cvmx_iob_outb_data_match {
+ u64 u64;
+ struct cvmx_iob_outb_data_match_s {
+ u64 data : 64;
+ } s;
+ struct cvmx_iob_outb_data_match_s cn30xx;
+ struct cvmx_iob_outb_data_match_s cn31xx;
+ struct cvmx_iob_outb_data_match_s cn38xx;
+ struct cvmx_iob_outb_data_match_s cn38xxp2;
+ struct cvmx_iob_outb_data_match_s cn50xx;
+ struct cvmx_iob_outb_data_match_s cn52xx;
+ struct cvmx_iob_outb_data_match_s cn52xxp1;
+ struct cvmx_iob_outb_data_match_s cn56xx;
+ struct cvmx_iob_outb_data_match_s cn56xxp1;
+ struct cvmx_iob_outb_data_match_s cn58xx;
+ struct cvmx_iob_outb_data_match_s cn58xxp1;
+ struct cvmx_iob_outb_data_match_s cn61xx;
+ struct cvmx_iob_outb_data_match_s cn63xx;
+ struct cvmx_iob_outb_data_match_s cn63xxp1;
+ struct cvmx_iob_outb_data_match_s cn66xx;
+ struct cvmx_iob_outb_data_match_s cn68xx;
+ struct cvmx_iob_outb_data_match_s cn68xxp1;
+ struct cvmx_iob_outb_data_match_s cn70xx;
+ struct cvmx_iob_outb_data_match_s cn70xxp1;
+ struct cvmx_iob_outb_data_match_s cnf71xx;
+};
+
+typedef union cvmx_iob_outb_data_match cvmx_iob_outb_data_match_t;
+
+/**
+ * cvmx_iob_outb_data_match_enb
+ *
+ * Enables the match of the corresponding bit in the IOB_OUTB_DATA_MATCH reister.
+ *
+ */
+union cvmx_iob_outb_data_match_enb {
+ u64 u64;
+ struct cvmx_iob_outb_data_match_enb_s {
+ u64 data : 64;
+ } s;
+ struct cvmx_iob_outb_data_match_enb_s cn30xx;
+ struct cvmx_iob_outb_data_match_enb_s cn31xx;
+ struct cvmx_iob_outb_data_match_enb_s cn38xx;
+ struct cvmx_iob_outb_data_match_enb_s cn38xxp2;
+ struct cvmx_iob_outb_data_match_enb_s cn50xx;
+ struct cvmx_iob_outb_data_match_enb_s cn52xx;
+ struct cvmx_iob_outb_data_match_enb_s cn52xxp1;
+ struct cvmx_iob_outb_data_match_enb_s cn56xx;
+ struct cvmx_iob_outb_data_match_enb_s cn56xxp1;
+ struct cvmx_iob_outb_data_match_enb_s cn58xx;
+ struct cvmx_iob_outb_data_match_enb_s cn58xxp1;
+ struct cvmx_iob_outb_data_match_enb_s cn61xx;
+ struct cvmx_iob_outb_data_match_enb_s cn63xx;
+ struct cvmx_iob_outb_data_match_enb_s cn63xxp1;
+ struct cvmx_iob_outb_data_match_enb_s cn66xx;
+ struct cvmx_iob_outb_data_match_enb_s cn68xx;
+ struct cvmx_iob_outb_data_match_enb_s cn68xxp1;
+ struct cvmx_iob_outb_data_match_enb_s cn70xx;
+ struct cvmx_iob_outb_data_match_enb_s cn70xxp1;
+ struct cvmx_iob_outb_data_match_enb_s cnf71xx;
+};
+
+typedef union cvmx_iob_outb_data_match_enb cvmx_iob_outb_data_match_enb_t;
+
+/**
+ * cvmx_iob_outb_fpa_pri_cnt
+ *
+ * FPA To NCB Priority Counter = FPA Returns to NCB Priority Counter Enable and Timer Value
+ * Enables and supplies the timeout count for raising the priority of FPA Rreturn Page request to
+ * the Outbound NCB.
+ */
+union cvmx_iob_outb_fpa_pri_cnt {
+ u64 u64;
+ struct cvmx_iob_outb_fpa_pri_cnt_s {
+ u64 reserved_16_63 : 48;
+ u64 cnt_enb : 1;
+ u64 cnt_val : 15;
+ } s;
+ struct cvmx_iob_outb_fpa_pri_cnt_s cn38xx;
+ struct cvmx_iob_outb_fpa_pri_cnt_s cn38xxp2;
+ struct cvmx_iob_outb_fpa_pri_cnt_s cn52xx;
+ struct cvmx_iob_outb_fpa_pri_cnt_s cn52xxp1;
+ struct cvmx_iob_outb_fpa_pri_cnt_s cn56xx;
+ struct cvmx_iob_outb_fpa_pri_cnt_s cn56xxp1;
+ struct cvmx_iob_outb_fpa_pri_cnt_s cn58xx;
+ struct cvmx_iob_outb_fpa_pri_cnt_s cn58xxp1;
+ struct cvmx_iob_outb_fpa_pri_cnt_s cn61xx;
+ struct cvmx_iob_outb_fpa_pri_cnt_s cn63xx;
+ struct cvmx_iob_outb_fpa_pri_cnt_s cn63xxp1;
+ struct cvmx_iob_outb_fpa_pri_cnt_s cn66xx;
+ struct cvmx_iob_outb_fpa_pri_cnt_s cn68xx;
+ struct cvmx_iob_outb_fpa_pri_cnt_s cn68xxp1;
+ struct cvmx_iob_outb_fpa_pri_cnt_s cn70xx;
+ struct cvmx_iob_outb_fpa_pri_cnt_s cn70xxp1;
+ struct cvmx_iob_outb_fpa_pri_cnt_s cnf71xx;
+};
+
+typedef union cvmx_iob_outb_fpa_pri_cnt cvmx_iob_outb_fpa_pri_cnt_t;
+
+/**
+ * cvmx_iob_outb_req_pri_cnt
+ *
+ * Request To NCB Priority Counter = Request to NCB Priority Counter Enable and Timer Value
+ * Enables and supplies the timeout count for raising the priority of Request transfers to the
+ * Outbound NCB.
+ */
+union cvmx_iob_outb_req_pri_cnt {
+ u64 u64;
+ struct cvmx_iob_outb_req_pri_cnt_s {
+ u64 reserved_16_63 : 48;
+ u64 cnt_enb : 1;
+ u64 cnt_val : 15;
+ } s;
+ struct cvmx_iob_outb_req_pri_cnt_s cn38xx;
+ struct cvmx_iob_outb_req_pri_cnt_s cn38xxp2;
+ struct cvmx_iob_outb_req_pri_cnt_s cn52xx;
+ struct cvmx_iob_outb_req_pri_cnt_s cn52xxp1;
+ struct cvmx_iob_outb_req_pri_cnt_s cn56xx;
+ struct cvmx_iob_outb_req_pri_cnt_s cn56xxp1;
+ struct cvmx_iob_outb_req_pri_cnt_s cn58xx;
+ struct cvmx_iob_outb_req_pri_cnt_s cn58xxp1;
+ struct cvmx_iob_outb_req_pri_cnt_s cn61xx;
+ struct cvmx_iob_outb_req_pri_cnt_s cn63xx;
+ struct cvmx_iob_outb_req_pri_cnt_s cn63xxp1;
+ struct cvmx_iob_outb_req_pri_cnt_s cn66xx;
+ struct cvmx_iob_outb_req_pri_cnt_s cn68xx;
+ struct cvmx_iob_outb_req_pri_cnt_s cn68xxp1;
+ struct cvmx_iob_outb_req_pri_cnt_s cn70xx;
+ struct cvmx_iob_outb_req_pri_cnt_s cn70xxp1;
+ struct cvmx_iob_outb_req_pri_cnt_s cnf71xx;
+};
+
+typedef union cvmx_iob_outb_req_pri_cnt cvmx_iob_outb_req_pri_cnt_t;
+
+/**
+ * cvmx_iob_p2c_req_pri_cnt
+ *
+ * PKO To CMB Response Priority Counter = PKO to CMB Response Priority Counter Enable and Timer
+ * Value
+ * Enables and supplies the timeout count for raising the priority of PKO Load access to the CMB.
+ */
+union cvmx_iob_p2c_req_pri_cnt {
+ u64 u64;
+ struct cvmx_iob_p2c_req_pri_cnt_s {
+ u64 reserved_16_63 : 48;
+ u64 cnt_enb : 1;
+ u64 cnt_val : 15;
+ } s;
+ struct cvmx_iob_p2c_req_pri_cnt_s cn38xx;
+ struct cvmx_iob_p2c_req_pri_cnt_s cn38xxp2;
+ struct cvmx_iob_p2c_req_pri_cnt_s cn52xx;
+ struct cvmx_iob_p2c_req_pri_cnt_s cn52xxp1;
+ struct cvmx_iob_p2c_req_pri_cnt_s cn56xx;
+ struct cvmx_iob_p2c_req_pri_cnt_s cn56xxp1;
+ struct cvmx_iob_p2c_req_pri_cnt_s cn58xx;
+ struct cvmx_iob_p2c_req_pri_cnt_s cn58xxp1;
+ struct cvmx_iob_p2c_req_pri_cnt_s cn61xx;
+ struct cvmx_iob_p2c_req_pri_cnt_s cn63xx;
+ struct cvmx_iob_p2c_req_pri_cnt_s cn63xxp1;
+ struct cvmx_iob_p2c_req_pri_cnt_s cn66xx;
+ struct cvmx_iob_p2c_req_pri_cnt_s cn70xx;
+ struct cvmx_iob_p2c_req_pri_cnt_s cn70xxp1;
+ struct cvmx_iob_p2c_req_pri_cnt_s cnf71xx;
+};
+
+typedef union cvmx_iob_p2c_req_pri_cnt cvmx_iob_p2c_req_pri_cnt_t;
+
+/**
+ * cvmx_iob_pkt_err
+ *
+ * Provides status about the failing packet recevie error.
+ *
+ */
+union cvmx_iob_pkt_err {
+ u64 u64;
+ struct cvmx_iob_pkt_err_s {
+ u64 reserved_12_63 : 52;
+ u64 vport : 6;
+ u64 port : 6;
+ } s;
+ struct cvmx_iob_pkt_err_cn30xx {
+ u64 reserved_6_63 : 58;
+ u64 port : 6;
+ } cn30xx;
+ struct cvmx_iob_pkt_err_cn30xx cn31xx;
+ struct cvmx_iob_pkt_err_cn30xx cn38xx;
+ struct cvmx_iob_pkt_err_cn30xx cn38xxp2;
+ struct cvmx_iob_pkt_err_cn30xx cn50xx;
+ struct cvmx_iob_pkt_err_cn30xx cn52xx;
+ struct cvmx_iob_pkt_err_cn30xx cn52xxp1;
+ struct cvmx_iob_pkt_err_cn30xx cn56xx;
+ struct cvmx_iob_pkt_err_cn30xx cn56xxp1;
+ struct cvmx_iob_pkt_err_cn30xx cn58xx;
+ struct cvmx_iob_pkt_err_cn30xx cn58xxp1;
+ struct cvmx_iob_pkt_err_s cn61xx;
+ struct cvmx_iob_pkt_err_s cn63xx;
+ struct cvmx_iob_pkt_err_s cn63xxp1;
+ struct cvmx_iob_pkt_err_s cn66xx;
+ struct cvmx_iob_pkt_err_s cn70xx;
+ struct cvmx_iob_pkt_err_s cn70xxp1;
+ struct cvmx_iob_pkt_err_s cnf71xx;
+};
+
+typedef union cvmx_iob_pkt_err cvmx_iob_pkt_err_t;
+
+/**
+ * cvmx_iob_pp_bist_status
+ *
+ * The result of the BIST run on the PPs.
+ *
+ */
+union cvmx_iob_pp_bist_status {
+ u64 u64;
+ struct cvmx_iob_pp_bist_status_s {
+ u64 reserved_4_63 : 60;
+ u64 pp_bstat : 4;
+ } s;
+ struct cvmx_iob_pp_bist_status_s cn70xx;
+ struct cvmx_iob_pp_bist_status_s cn70xxp1;
+};
+
+typedef union cvmx_iob_pp_bist_status cvmx_iob_pp_bist_status_t;
+
+/**
+ * cvmx_iob_to_cmb_credits
+ *
+ * Controls the number of reads and writes that may be outstanding to the L2C (via the CMB).
+ *
+ */
+union cvmx_iob_to_cmb_credits {
+ u64 u64;
+ struct cvmx_iob_to_cmb_credits_s {
+ u64 reserved_6_63 : 58;
+ u64 ncb_rd : 3;
+ u64 ncb_wr : 3;
+ } s;
+ struct cvmx_iob_to_cmb_credits_cn52xx {
+ u64 reserved_9_63 : 55;
+ u64 pko_rd : 3;
+ u64 ncb_rd : 3;
+ u64 ncb_wr : 3;
+ } cn52xx;
+ struct cvmx_iob_to_cmb_credits_cn52xx cn61xx;
+ struct cvmx_iob_to_cmb_credits_cn52xx cn63xx;
+ struct cvmx_iob_to_cmb_credits_cn52xx cn63xxp1;
+ struct cvmx_iob_to_cmb_credits_cn52xx cn66xx;
+ struct cvmx_iob_to_cmb_credits_cn68xx {
+ u64 reserved_9_63 : 55;
+ u64 dwb : 3;
+ u64 ncb_rd : 3;
+ u64 ncb_wr : 3;
+ } cn68xx;
+ struct cvmx_iob_to_cmb_credits_cn68xx cn68xxp1;
+ struct cvmx_iob_to_cmb_credits_cn52xx cn70xx;
+ struct cvmx_iob_to_cmb_credits_cn52xx cn70xxp1;
+ struct cvmx_iob_to_cmb_credits_cn52xx cnf71xx;
+};
+
+typedef union cvmx_iob_to_cmb_credits cvmx_iob_to_cmb_credits_t;
+
+/**
+ * cvmx_iob_to_ncb_did_00_credits
+ *
+ * IOB_TO_NCB_DID_00_CREDITS = IOB NCB DID 00 Credits
+ *
+ * Number of credits for NCB DID 00.
+ */
+union cvmx_iob_to_ncb_did_00_credits {
+ u64 u64;
+ struct cvmx_iob_to_ncb_did_00_credits_s {
+ u64 reserved_7_63 : 57;
+ u64 crd : 7;
+ } s;
+ struct cvmx_iob_to_ncb_did_00_credits_s cn68xx;
+ struct cvmx_iob_to_ncb_did_00_credits_s cn68xxp1;
+};
+
+typedef union cvmx_iob_to_ncb_did_00_credits cvmx_iob_to_ncb_did_00_credits_t;
+
+/**
+ * cvmx_iob_to_ncb_did_111_credits
+ *
+ * IOB_TO_NCB_DID_111_CREDITS = IOB NCB DID 111 Credits
+ *
+ * Number of credits for NCB DID 111.
+ */
+union cvmx_iob_to_ncb_did_111_credits {
+ u64 u64;
+ struct cvmx_iob_to_ncb_did_111_credits_s {
+ u64 reserved_7_63 : 57;
+ u64 crd : 7;
+ } s;
+ struct cvmx_iob_to_ncb_did_111_credits_s cn68xx;
+ struct cvmx_iob_to_ncb_did_111_credits_s cn68xxp1;
+};
+
+typedef union cvmx_iob_to_ncb_did_111_credits cvmx_iob_to_ncb_did_111_credits_t;
+
+/**
+ * cvmx_iob_to_ncb_did_223_credits
+ *
+ * IOB_TO_NCB_DID_223_CREDITS = IOB NCB DID 223 Credits
+ *
+ * Number of credits for NCB DID 223.
+ */
+union cvmx_iob_to_ncb_did_223_credits {
+ u64 u64;
+ struct cvmx_iob_to_ncb_did_223_credits_s {
+ u64 reserved_7_63 : 57;
+ u64 crd : 7;
+ } s;
+ struct cvmx_iob_to_ncb_did_223_credits_s cn68xx;
+ struct cvmx_iob_to_ncb_did_223_credits_s cn68xxp1;
+};
+
+typedef union cvmx_iob_to_ncb_did_223_credits cvmx_iob_to_ncb_did_223_credits_t;
+
+/**
+ * cvmx_iob_to_ncb_did_24_credits
+ *
+ * IOB_TO_NCB_DID_24_CREDITS = IOB NCB DID 24 Credits
+ *
+ * Number of credits for NCB DID 24.
+ */
+union cvmx_iob_to_ncb_did_24_credits {
+ u64 u64;
+ struct cvmx_iob_to_ncb_did_24_credits_s {
+ u64 reserved_7_63 : 57;
+ u64 crd : 7;
+ } s;
+ struct cvmx_iob_to_ncb_did_24_credits_s cn68xx;
+ struct cvmx_iob_to_ncb_did_24_credits_s cn68xxp1;
+};
+
+typedef union cvmx_iob_to_ncb_did_24_credits cvmx_iob_to_ncb_did_24_credits_t;
+
+/**
+ * cvmx_iob_to_ncb_did_32_credits
+ *
+ * IOB_TO_NCB_DID_32_CREDITS = IOB NCB DID 32 Credits
+ *
+ * Number of credits for NCB DID 32.
+ */
+union cvmx_iob_to_ncb_did_32_credits {
+ u64 u64;
+ struct cvmx_iob_to_ncb_did_32_credits_s {
+ u64 reserved_7_63 : 57;
+ u64 crd : 7;
+ } s;
+ struct cvmx_iob_to_ncb_did_32_credits_s cn68xx;
+ struct cvmx_iob_to_ncb_did_32_credits_s cn68xxp1;
+};
+
+typedef union cvmx_iob_to_ncb_did_32_credits cvmx_iob_to_ncb_did_32_credits_t;
+
+/**
+ * cvmx_iob_to_ncb_did_40_credits
+ *
+ * IOB_TO_NCB_DID_40_CREDITS = IOB NCB DID 40 Credits
+ *
+ * Number of credits for NCB DID 40.
+ */
+union cvmx_iob_to_ncb_did_40_credits {
+ u64 u64;
+ struct cvmx_iob_to_ncb_did_40_credits_s {
+ u64 reserved_7_63 : 57;
+ u64 crd : 7;
+ } s;
+ struct cvmx_iob_to_ncb_did_40_credits_s cn68xx;
+ struct cvmx_iob_to_ncb_did_40_credits_s cn68xxp1;
+};
+
+typedef union cvmx_iob_to_ncb_did_40_credits cvmx_iob_to_ncb_did_40_credits_t;
+
+/**
+ * cvmx_iob_to_ncb_did_55_credits
+ *
+ * IOB_TO_NCB_DID_55_CREDITS = IOB NCB DID 55 Credits
+ *
+ * Number of credits for NCB DID 55.
+ */
+union cvmx_iob_to_ncb_did_55_credits {
+ u64 u64;
+ struct cvmx_iob_to_ncb_did_55_credits_s {
+ u64 reserved_7_63 : 57;
+ u64 crd : 7;
+ } s;
+ struct cvmx_iob_to_ncb_did_55_credits_s cn68xx;
+ struct cvmx_iob_to_ncb_did_55_credits_s cn68xxp1;
+};
+
+typedef union cvmx_iob_to_ncb_did_55_credits cvmx_iob_to_ncb_did_55_credits_t;
+
+/**
+ * cvmx_iob_to_ncb_did_64_credits
+ *
+ * IOB_TO_NCB_DID_64_CREDITS = IOB NCB DID 64 Credits
+ *
+ * Number of credits for NCB DID 64.
+ */
+union cvmx_iob_to_ncb_did_64_credits {
+ u64 u64;
+ struct cvmx_iob_to_ncb_did_64_credits_s {
+ u64 reserved_7_63 : 57;
+ u64 crd : 7;
+ } s;
+ struct cvmx_iob_to_ncb_did_64_credits_s cn68xx;
+ struct cvmx_iob_to_ncb_did_64_credits_s cn68xxp1;
+};
+
+typedef union cvmx_iob_to_ncb_did_64_credits cvmx_iob_to_ncb_did_64_credits_t;
+
+/**
+ * cvmx_iob_to_ncb_did_79_credits
+ *
+ * IOB_TO_NCB_DID_79_CREDITS = IOB NCB DID 79 Credits
+ *
+ * Number of credits for NCB DID 79.
+ */
+union cvmx_iob_to_ncb_did_79_credits {
+ u64 u64;
+ struct cvmx_iob_to_ncb_did_79_credits_s {
+ u64 reserved_7_63 : 57;
+ u64 crd : 7;
+ } s;
+ struct cvmx_iob_to_ncb_did_79_credits_s cn68xx;
+ struct cvmx_iob_to_ncb_did_79_credits_s cn68xxp1;
+};
+
+typedef union cvmx_iob_to_ncb_did_79_credits cvmx_iob_to_ncb_did_79_credits_t;
+
+/**
+ * cvmx_iob_to_ncb_did_96_credits
+ *
+ * IOB_TO_NCB_DID_96_CREDITS = IOB NCB DID 96 Credits
+ *
+ * Number of credits for NCB DID 96.
+ */
+union cvmx_iob_to_ncb_did_96_credits {
+ u64 u64;
+ struct cvmx_iob_to_ncb_did_96_credits_s {
+ u64 reserved_7_63 : 57;
+ u64 crd : 7;
+ } s;
+ struct cvmx_iob_to_ncb_did_96_credits_s cn68xx;
+ struct cvmx_iob_to_ncb_did_96_credits_s cn68xxp1;
+};
+
+typedef union cvmx_iob_to_ncb_did_96_credits cvmx_iob_to_ncb_did_96_credits_t;
+
+/**
+ * cvmx_iob_to_ncb_did_98_credits
+ *
+ * IOB_TO_NCB_DID_98_CREDITS = IOB NCB DID 96 Credits
+ *
+ * Number of credits for NCB DID 98.
+ */
+union cvmx_iob_to_ncb_did_98_credits {
+ u64 u64;
+ struct cvmx_iob_to_ncb_did_98_credits_s {
+ u64 reserved_7_63 : 57;
+ u64 crd : 7;
+ } s;
+ struct cvmx_iob_to_ncb_did_98_credits_s cn68xx;
+ struct cvmx_iob_to_ncb_did_98_credits_s cn68xxp1;
+};
+
+typedef union cvmx_iob_to_ncb_did_98_credits cvmx_iob_to_ncb_did_98_credits_t;
+
+#endif
--
2.35.1
^ permalink raw reply related [flat|nested] 52+ messages in thread* [PATCH 04/52] mips: octeon: Add cvmx-lbk-defs.h header file
2022-03-30 10:06 [PATCH 00/52] mips: octeon: Add ethernet support Stefan Roese
` (2 preceding siblings ...)
2022-03-30 10:06 ` [PATCH 03/52] mips: octeon: Add cvmx-iob-defs.h " Stefan Roese
@ 2022-03-30 10:06 ` Stefan Roese
2022-03-30 10:06 ` [PATCH 05/52] mips: octeon: Add cvmx-npei-defs.h " Stefan Roese
` (45 subsequent siblings)
49 siblings, 0 replies; 52+ messages in thread
From: Stefan Roese @ 2022-03-30 10:06 UTC (permalink / raw)
To: u-boot; +Cc: awilliams, cchavva
From: Aaron Williams <awilliams@marvell.com>
Import cvmx-lbk-defs.h header file from 2013 U-Boot. It will be used
by the later added drivers to support networking on the MIPS Octeon II /
III platforms.
Signed-off-by: Aaron Williams <awilliams@marvell.com>
Signed-off-by: Stefan Roese <sr@denx.de>
---
.../mach-octeon/include/mach/cvmx-lbk-defs.h | 157 ++++++++++++++++++
1 file changed, 157 insertions(+)
create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-lbk-defs.h
diff --git a/arch/mips/mach-octeon/include/mach/cvmx-lbk-defs.h b/arch/mips/mach-octeon/include/mach/cvmx-lbk-defs.h
new file mode 100644
index 000000000000..1068a19ad80b
--- /dev/null
+++ b/arch/mips/mach-octeon/include/mach/cvmx-lbk-defs.h
@@ -0,0 +1,157 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon lbk.
+ */
+
+#ifndef __CVMX_LBK_DEFS_H__
+#define __CVMX_LBK_DEFS_H__
+
+#define CVMX_LBK_BIST_RESULT (0x0001180012000020ull)
+#define CVMX_LBK_CHX_PKIND(offset) (0x0001180012000200ull + ((offset) & 63) * 8)
+#define CVMX_LBK_CLK_GATE_CTL (0x0001180012000008ull)
+#define CVMX_LBK_DAT_ERR_INFO (0x0001180012000050ull)
+#define CVMX_LBK_ECC_CFG (0x0001180012000060ull)
+#define CVMX_LBK_INT (0x0001180012000040ull)
+#define CVMX_LBK_SFT_RST (0x0001180012000000ull)
+
+/**
+ * cvmx_lbk_bist_result
+ *
+ * This register provides access to the internal BIST results. Each bit is the
+ * BIST result of an individual memory (per bit, 0 = pass and 1 = fail).
+ */
+union cvmx_lbk_bist_result {
+ u64 u64;
+ struct cvmx_lbk_bist_result_s {
+ u64 reserved_1_63 : 63;
+ u64 dat : 1;
+ } s;
+ struct cvmx_lbk_bist_result_s cn73xx;
+ struct cvmx_lbk_bist_result_s cn78xx;
+ struct cvmx_lbk_bist_result_s cn78xxp1;
+ struct cvmx_lbk_bist_result_s cnf75xx;
+};
+
+typedef union cvmx_lbk_bist_result cvmx_lbk_bist_result_t;
+
+/**
+ * cvmx_lbk_ch#_pkind
+ */
+union cvmx_lbk_chx_pkind {
+ u64 u64;
+ struct cvmx_lbk_chx_pkind_s {
+ u64 reserved_6_63 : 58;
+ u64 pkind : 6;
+ } s;
+ struct cvmx_lbk_chx_pkind_s cn73xx;
+ struct cvmx_lbk_chx_pkind_s cn78xx;
+ struct cvmx_lbk_chx_pkind_s cn78xxp1;
+ struct cvmx_lbk_chx_pkind_s cnf75xx;
+};
+
+typedef union cvmx_lbk_chx_pkind cvmx_lbk_chx_pkind_t;
+
+/**
+ * cvmx_lbk_clk_gate_ctl
+ *
+ * This register is for diagnostic use only.
+ *
+ */
+union cvmx_lbk_clk_gate_ctl {
+ u64 u64;
+ struct cvmx_lbk_clk_gate_ctl_s {
+ u64 reserved_1_63 : 63;
+ u64 dis : 1;
+ } s;
+ struct cvmx_lbk_clk_gate_ctl_s cn73xx;
+ struct cvmx_lbk_clk_gate_ctl_s cn78xx;
+ struct cvmx_lbk_clk_gate_ctl_s cn78xxp1;
+ struct cvmx_lbk_clk_gate_ctl_s cnf75xx;
+};
+
+typedef union cvmx_lbk_clk_gate_ctl cvmx_lbk_clk_gate_ctl_t;
+
+/**
+ * cvmx_lbk_dat_err_info
+ */
+union cvmx_lbk_dat_err_info {
+ u64 u64;
+ struct cvmx_lbk_dat_err_info_s {
+ u64 reserved_58_63 : 6;
+ u64 dbe_ecc_out : 9;
+ u64 dbe_synd : 9;
+ u64 dbe_addr : 8;
+ u64 reserved_26_31 : 6;
+ u64 sbe_ecc_out : 9;
+ u64 sbe_synd : 9;
+ u64 sbe_addr : 8;
+ } s;
+ struct cvmx_lbk_dat_err_info_s cn73xx;
+ struct cvmx_lbk_dat_err_info_s cn78xx;
+ struct cvmx_lbk_dat_err_info_s cn78xxp1;
+ struct cvmx_lbk_dat_err_info_s cnf75xx;
+};
+
+typedef union cvmx_lbk_dat_err_info cvmx_lbk_dat_err_info_t;
+
+/**
+ * cvmx_lbk_ecc_cfg
+ */
+union cvmx_lbk_ecc_cfg {
+ u64 u64;
+ struct cvmx_lbk_ecc_cfg_s {
+ u64 reserved_3_63 : 61;
+ u64 dat_flip : 2;
+ u64 dat_cdis : 1;
+ } s;
+ struct cvmx_lbk_ecc_cfg_s cn73xx;
+ struct cvmx_lbk_ecc_cfg_s cn78xx;
+ struct cvmx_lbk_ecc_cfg_s cn78xxp1;
+ struct cvmx_lbk_ecc_cfg_s cnf75xx;
+};
+
+typedef union cvmx_lbk_ecc_cfg cvmx_lbk_ecc_cfg_t;
+
+/**
+ * cvmx_lbk_int
+ */
+union cvmx_lbk_int {
+ u64 u64;
+ struct cvmx_lbk_int_s {
+ u64 reserved_6_63 : 58;
+ u64 chan_oflow : 1;
+ u64 chan_uflow : 1;
+ u64 dat_oflow : 1;
+ u64 dat_uflow : 1;
+ u64 dat_dbe : 1;
+ u64 dat_sbe : 1;
+ } s;
+ struct cvmx_lbk_int_s cn73xx;
+ struct cvmx_lbk_int_s cn78xx;
+ struct cvmx_lbk_int_s cn78xxp1;
+ struct cvmx_lbk_int_s cnf75xx;
+};
+
+typedef union cvmx_lbk_int cvmx_lbk_int_t;
+
+/**
+ * cvmx_lbk_sft_rst
+ */
+union cvmx_lbk_sft_rst {
+ u64 u64;
+ struct cvmx_lbk_sft_rst_s {
+ u64 reserved_1_63 : 63;
+ u64 reset : 1;
+ } s;
+ struct cvmx_lbk_sft_rst_s cn73xx;
+ struct cvmx_lbk_sft_rst_s cn78xx;
+ struct cvmx_lbk_sft_rst_s cn78xxp1;
+ struct cvmx_lbk_sft_rst_s cnf75xx;
+};
+
+typedef union cvmx_lbk_sft_rst cvmx_lbk_sft_rst_t;
+
+#endif
--
2.35.1
^ permalink raw reply related [flat|nested] 52+ messages in thread* [PATCH 05/52] mips: octeon: Add cvmx-npei-defs.h header file
2022-03-30 10:06 [PATCH 00/52] mips: octeon: Add ethernet support Stefan Roese
` (3 preceding siblings ...)
2022-03-30 10:06 ` [PATCH 04/52] mips: octeon: Add cvmx-lbk-defs.h " Stefan Roese
@ 2022-03-30 10:06 ` Stefan Roese
2022-03-30 10:06 ` [PATCH 06/52] mips: octeon: Add cvmx-pcsxx-defs.h " Stefan Roese
` (44 subsequent siblings)
49 siblings, 0 replies; 52+ messages in thread
From: Stefan Roese @ 2022-03-30 10:06 UTC (permalink / raw)
To: u-boot; +Cc: awilliams, cchavva
From: Aaron Williams <awilliams@marvell.com>
Import cvmx-npei-defs.h header file from 2013 U-Boot. It will be used
by the later added drivers to support networking on the MIPS Octeon II /
III platforms.
Signed-off-by: Aaron Williams <awilliams@marvell.com>
Signed-off-by: Stefan Roese <sr@denx.de>
---
.../mach-octeon/include/mach/cvmx-npei-defs.h | 3550 +++++++++++++++++
1 file changed, 3550 insertions(+)
create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-npei-defs.h
diff --git a/arch/mips/mach-octeon/include/mach/cvmx-npei-defs.h b/arch/mips/mach-octeon/include/mach/cvmx-npei-defs.h
new file mode 100644
index 000000000000..2e2c24819445
--- /dev/null
+++ b/arch/mips/mach-octeon/include/mach/cvmx-npei-defs.h
@@ -0,0 +1,3550 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon npei.
+ */
+
+#ifndef __CVMX_NPEI_DEFS_H__
+#define __CVMX_NPEI_DEFS_H__
+
+#define CVMX_NPEI_BAR1_INDEXX(offset) \
+ (0x0000000000000000ull + ((offset) & 31) * 16)
+#define CVMX_NPEI_BIST_STATUS (0x0000000000000580ull)
+#define CVMX_NPEI_BIST_STATUS2 (0x0000000000000680ull)
+#define CVMX_NPEI_CTL_PORT0 (0x0000000000000250ull)
+#define CVMX_NPEI_CTL_PORT1 (0x0000000000000260ull)
+#define CVMX_NPEI_CTL_STATUS (0x0000000000000570ull)
+#define CVMX_NPEI_CTL_STATUS2 (0x0000000000003C00ull)
+#define CVMX_NPEI_DATA_OUT_CNT (0x00000000000005F0ull)
+#define CVMX_NPEI_DBG_DATA (0x0000000000000510ull)
+#define CVMX_NPEI_DBG_SELECT (0x0000000000000500ull)
+#define CVMX_NPEI_DMA0_INT_LEVEL (0x00000000000005C0ull)
+#define CVMX_NPEI_DMA1_INT_LEVEL (0x00000000000005D0ull)
+#define CVMX_NPEI_DMAX_COUNTS(offset) \
+ (0x0000000000000450ull + ((offset) & 7) * 16)
+#define CVMX_NPEI_DMAX_DBELL(offset) (0x00000000000003B0ull + ((offset) & 7) * 16)
+#define CVMX_NPEI_DMAX_IBUFF_SADDR(offset) \
+ (0x0000000000000400ull + ((offset) & 7) * 16)
+#define CVMX_NPEI_DMAX_NADDR(offset) (0x00000000000004A0ull + ((offset) & 7) * 16)
+#define CVMX_NPEI_DMA_CNTS (0x00000000000005E0ull)
+#define CVMX_NPEI_DMA_CONTROL (0x00000000000003A0ull)
+#define CVMX_NPEI_DMA_PCIE_REQ_NUM (0x00000000000005B0ull)
+#define CVMX_NPEI_DMA_STATE1 (0x00000000000006C0ull)
+#define CVMX_NPEI_DMA_STATE1_P1 (0x0000000000000680ull)
+#define CVMX_NPEI_DMA_STATE2 (0x00000000000006D0ull)
+#define CVMX_NPEI_DMA_STATE2_P1 (0x0000000000000690ull)
+#define CVMX_NPEI_DMA_STATE3_P1 (0x00000000000006A0ull)
+#define CVMX_NPEI_DMA_STATE4_P1 (0x00000000000006B0ull)
+#define CVMX_NPEI_DMA_STATE5_P1 (0x00000000000006C0ull)
+#define CVMX_NPEI_INT_A_ENB (0x0000000000000560ull)
+#define CVMX_NPEI_INT_A_ENB2 (0x0000000000003CE0ull)
+#define CVMX_NPEI_INT_A_SUM (0x0000000000000550ull)
+#define CVMX_NPEI_INT_ENB (0x0000000000000540ull)
+#define CVMX_NPEI_INT_ENB2 (0x0000000000003CD0ull)
+#define CVMX_NPEI_INT_INFO (0x0000000000000590ull)
+#define CVMX_NPEI_INT_SUM (0x0000000000000530ull)
+#define CVMX_NPEI_INT_SUM2 (0x0000000000003CC0ull)
+#define CVMX_NPEI_LAST_WIN_RDATA0 (0x0000000000000600ull)
+#define CVMX_NPEI_LAST_WIN_RDATA1 (0x0000000000000610ull)
+#define CVMX_NPEI_MEM_ACCESS_CTL (0x00000000000004F0ull)
+#define CVMX_NPEI_MEM_ACCESS_SUBIDX(offset) \
+ (0x0000000000000280ull + ((offset) & 31) * 16 - 16 * 12)
+#define CVMX_NPEI_MSI_ENB0 (0x0000000000003C50ull)
+#define CVMX_NPEI_MSI_ENB1 (0x0000000000003C60ull)
+#define CVMX_NPEI_MSI_ENB2 (0x0000000000003C70ull)
+#define CVMX_NPEI_MSI_ENB3 (0x0000000000003C80ull)
+#define CVMX_NPEI_MSI_RCV0 (0x0000000000003C10ull)
+#define CVMX_NPEI_MSI_RCV1 (0x0000000000003C20ull)
+#define CVMX_NPEI_MSI_RCV2 (0x0000000000003C30ull)
+#define CVMX_NPEI_MSI_RCV3 (0x0000000000003C40ull)
+#define CVMX_NPEI_MSI_RD_MAP (0x0000000000003CA0ull)
+#define CVMX_NPEI_MSI_W1C_ENB0 (0x0000000000003CF0ull)
+#define CVMX_NPEI_MSI_W1C_ENB1 (0x0000000000003D00ull)
+#define CVMX_NPEI_MSI_W1C_ENB2 (0x0000000000003D10ull)
+#define CVMX_NPEI_MSI_W1C_ENB3 (0x0000000000003D20ull)
+#define CVMX_NPEI_MSI_W1S_ENB0 (0x0000000000003D30ull)
+#define CVMX_NPEI_MSI_W1S_ENB1 (0x0000000000003D40ull)
+#define CVMX_NPEI_MSI_W1S_ENB2 (0x0000000000003D50ull)
+#define CVMX_NPEI_MSI_W1S_ENB3 (0x0000000000003D60ull)
+#define CVMX_NPEI_MSI_WR_MAP (0x0000000000003C90ull)
+#define CVMX_NPEI_PCIE_CREDIT_CNT (0x0000000000003D70ull)
+#define CVMX_NPEI_PCIE_MSI_RCV (0x0000000000003CB0ull)
+#define CVMX_NPEI_PCIE_MSI_RCV_B1 (0x0000000000000650ull)
+#define CVMX_NPEI_PCIE_MSI_RCV_B2 (0x0000000000000660ull)
+#define CVMX_NPEI_PCIE_MSI_RCV_B3 (0x0000000000000670ull)
+#define CVMX_NPEI_PKTX_CNTS(offset) (0x0000000000002400ull + ((offset) & 31) * 16)
+#define CVMX_NPEI_PKTX_INSTR_BADDR(offset) \
+ (0x0000000000002800ull + ((offset) & 31) * 16)
+#define CVMX_NPEI_PKTX_INSTR_BAOFF_DBELL(offset) \
+ (0x0000000000002C00ull + ((offset) & 31) * 16)
+#define CVMX_NPEI_PKTX_INSTR_FIFO_RSIZE(offset) \
+ (0x0000000000003000ull + ((offset) & 31) * 16)
+#define CVMX_NPEI_PKTX_INSTR_HEADER(offset) \
+ (0x0000000000003400ull + ((offset) & 31) * 16)
+#define CVMX_NPEI_PKTX_IN_BP(offset) \
+ (0x0000000000003800ull + ((offset) & 31) * 16)
+#define CVMX_NPEI_PKTX_SLIST_BADDR(offset) \
+ (0x0000000000001400ull + ((offset) & 31) * 16)
+#define CVMX_NPEI_PKTX_SLIST_BAOFF_DBELL(offset) \
+ (0x0000000000001800ull + ((offset) & 31) * 16)
+#define CVMX_NPEI_PKTX_SLIST_FIFO_RSIZE(offset) \
+ (0x0000000000001C00ull + ((offset) & 31) * 16)
+#define CVMX_NPEI_PKT_CNT_INT (0x0000000000001110ull)
+#define CVMX_NPEI_PKT_CNT_INT_ENB (0x0000000000001130ull)
+#define CVMX_NPEI_PKT_DATA_OUT_ES (0x00000000000010B0ull)
+#define CVMX_NPEI_PKT_DATA_OUT_NS (0x00000000000010A0ull)
+#define CVMX_NPEI_PKT_DATA_OUT_ROR (0x0000000000001090ull)
+#define CVMX_NPEI_PKT_DPADDR (0x0000000000001080ull)
+#define CVMX_NPEI_PKT_INPUT_CONTROL (0x0000000000001150ull)
+#define CVMX_NPEI_PKT_INSTR_ENB (0x0000000000001000ull)
+#define CVMX_NPEI_PKT_INSTR_RD_SIZE (0x0000000000001190ull)
+#define CVMX_NPEI_PKT_INSTR_SIZE (0x0000000000001020ull)
+#define CVMX_NPEI_PKT_INT_LEVELS (0x0000000000001100ull)
+#define CVMX_NPEI_PKT_IN_BP (0x00000000000006B0ull)
+#define CVMX_NPEI_PKT_IN_DONEX_CNTS(offset) \
+ (0x0000000000002000ull + ((offset) & 31) * 16)
+#define CVMX_NPEI_PKT_IN_INSTR_COUNTS (0x00000000000006A0ull)
+#define CVMX_NPEI_PKT_IN_PCIE_PORT (0x00000000000011A0ull)
+#define CVMX_NPEI_PKT_IPTR (0x0000000000001070ull)
+#define CVMX_NPEI_PKT_OUTPUT_WMARK (0x0000000000001160ull)
+#define CVMX_NPEI_PKT_OUT_BMODE (0x00000000000010D0ull)
+#define CVMX_NPEI_PKT_OUT_ENB (0x0000000000001010ull)
+#define CVMX_NPEI_PKT_PCIE_PORT (0x00000000000010E0ull)
+#define CVMX_NPEI_PKT_PORT_IN_RST (0x0000000000000690ull)
+#define CVMX_NPEI_PKT_SLIST_ES (0x0000000000001050ull)
+#define CVMX_NPEI_PKT_SLIST_ID_SIZE (0x0000000000001180ull)
+#define CVMX_NPEI_PKT_SLIST_NS (0x0000000000001040ull)
+#define CVMX_NPEI_PKT_SLIST_ROR (0x0000000000001030ull)
+#define CVMX_NPEI_PKT_TIME_INT (0x0000000000001120ull)
+#define CVMX_NPEI_PKT_TIME_INT_ENB (0x0000000000001140ull)
+#define CVMX_NPEI_RSL_INT_BLOCKS (0x0000000000000520ull)
+#define CVMX_NPEI_SCRATCH_1 (0x0000000000000270ull)
+#define CVMX_NPEI_STATE1 (0x0000000000000620ull)
+#define CVMX_NPEI_STATE2 (0x0000000000000630ull)
+#define CVMX_NPEI_STATE3 (0x0000000000000640ull)
+#define CVMX_NPEI_WINDOW_CTL (0x0000000000000380ull)
+#define CVMX_NPEI_WIN_RD_ADDR (0x0000000000000210ull)
+#define CVMX_NPEI_WIN_RD_DATA (0x0000000000000240ull)
+#define CVMX_NPEI_WIN_WR_ADDR (0x0000000000000200ull)
+#define CVMX_NPEI_WIN_WR_DATA (0x0000000000000220ull)
+#define CVMX_NPEI_WIN_WR_MASK (0x0000000000000230ull)
+
+/**
+ * cvmx_npei_bar1_index#
+ *
+ * Total Address is 16Kb; 0x0000 - 0x3fff, 0x000 - 0x7fe(Reg, every other 8B)
+ *
+ * General 5kb; 0x0000 - 0x13ff, 0x000 - 0x27e(Reg-General)
+ * PktMem 10Kb; 0x1400 - 0x3bff, 0x280 - 0x77e(Reg-General-Packet)
+ * Rsvd 1Kb; 0x3c00 - 0x3fff, 0x780 - 0x7fe(Reg-NCB Only Mode)
+ * == NPEI_PKT_CNT_INT_ENB[PORT]
+ * == NPEI_PKT_TIME_INT_ENB[PORT]
+ * == NPEI_PKT_CNT_INT[PORT]
+ * == NPEI_PKT_TIME_INT[PORT]
+ * == NPEI_PKT_PCIE_PORT[PP]
+ * == NPEI_PKT_SLIST_ROR[ROR]
+ * == NPEI_PKT_SLIST_ROR[NSR] ?
+ * == NPEI_PKT_SLIST_ES[ES]
+ * == NPEI_PKTn_SLIST_BAOFF_DBELL[AOFF]
+ * == NPEI_PKTn_SLIST_BAOFF_DBELL[DBELL]
+ * == NPEI_PKTn_CNTS[CNT]
+ * NPEI_CTL_STATUS[OUTn_ENB] == NPEI_PKT_OUT_ENB[ENB]
+ * NPEI_BASE_ADDRESS_OUTPUTn[BADDR] == NPEI_PKTn_SLIST_BADDR[ADDR]
+ * NPEI_DESC_OUTPUTn[SIZE] == NPEI_PKTn_SLIST_FIFO_RSIZE[RSIZE]
+ * NPEI_Pn_DBPAIR_ADDR[NADDR] == NPEI_PKTn_SLIST_BADDR[ADDR] +
+ * NPEI_PKTn_SLIST_BAOFF_DBELL[AOFF]
+ * NPEI_PKT_CREDITSn[PTR_CNT] == NPEI_PKTn_SLIST_BAOFF_DBELL[DBELL]
+ * NPEI_P0_PAIR_CNTS[AVAIL] == NPEI_PKTn_SLIST_BAOFF_DBELL[DBELL]
+ * NPEI_P0_PAIR_CNTS[FCNT] ==
+ * NPEI_PKTS_SENTn[PKT_CNT] == NPEI_PKTn_CNTS[CNT]
+ * NPEI_OUTPUT_CONTROL[Pn_BMODE] == NPEI_PKT_OUT_BMODE[BMODE]
+ * NPEI_PKT_CREDITSn[PKT_CNT] == NPEI_PKTn_CNTS[CNT]
+ * NPEI_BUFF_SIZE_OUTPUTn[BSIZE] == NPEI_PKT_SLIST_ID_SIZE[BSIZE]
+ * NPEI_BUFF_SIZE_OUTPUTn[ISIZE] == NPEI_PKT_SLIST_ID_SIZE[ISIZE]
+ * NPEI_OUTPUT_CONTROL[On_CSRM] == NPEI_PKT_DPADDR[DPTR] &
+ * NPEI_PKT_OUT_USE_IPTR[PORT]
+ * NPEI_OUTPUT_CONTROL[On_ES] == NPEI_PKT_DATA_OUT_ES[ES]
+ * NPEI_OUTPUT_CONTROL[On_NS] == NPEI_PKT_DATA_OUT_NS[NSR] ?
+ * NPEI_OUTPUT_CONTROL[On_RO] == NPEI_PKT_DATA_OUT_ROR[ROR]
+ * NPEI_PKTS_SENT_INT_LEVn[PKT_CNT] == NPEI_PKT_INT_LEVELS[CNT]
+ * NPEI_PKTS_SENT_TIMEn[PKT_TIME] == NPEI_PKT_INT_LEVELS[TIME]
+ * NPEI_OUTPUT_CONTROL[IPTR_On] == NPEI_PKT_IPTR[IPTR]
+ * NPEI_PCIE_PORT_OUTPUT[] == NPEI_PKT_PCIE_PORT[PP]
+ *
+ * NPEI_BAR1_INDEXX = NPEI BAR1 IndexX Register
+ *
+ * Contains address index and control bits for access to memory ranges of
+ * BAR-1. Index is build from supplied address [25:22].
+ * NPEI_BAR1_INDEX0 through NPEI_BAR1_INDEX15 is used for transactions
+ * orginating with PCIE-PORT0 and NPEI_BAR1_INDEX16
+ * through NPEI_BAR1_INDEX31 is used for transactions originating with
+ * PCIE-PORT1.
+ */
+union cvmx_npei_bar1_indexx {
+ u32 u32;
+ struct cvmx_npei_bar1_indexx_s {
+ u32 reserved_18_31 : 14;
+ u32 addr_idx : 14;
+ u32 ca : 1;
+ u32 end_swp : 2;
+ u32 addr_v : 1;
+ } s;
+ struct cvmx_npei_bar1_indexx_s cn52xx;
+ struct cvmx_npei_bar1_indexx_s cn52xxp1;
+ struct cvmx_npei_bar1_indexx_s cn56xx;
+ struct cvmx_npei_bar1_indexx_s cn56xxp1;
+};
+
+typedef union cvmx_npei_bar1_indexx cvmx_npei_bar1_indexx_t;
+
+/**
+ * cvmx_npei_bist_status
+ *
+ * NPEI_BIST_STATUS = NPI's BIST Status Register
+ *
+ * Results from BIST runs of NPEI's memories.
+ */
+union cvmx_npei_bist_status {
+ u64 u64;
+ struct cvmx_npei_bist_status_s {
+ u64 pkt_rdf : 1;
+ u64 reserved_60_62 : 3;
+ u64 pcr_gim : 1;
+ u64 pkt_pif : 1;
+ u64 pcsr_int : 1;
+ u64 pcsr_im : 1;
+ u64 pcsr_cnt : 1;
+ u64 pcsr_id : 1;
+ u64 pcsr_sl : 1;
+ u64 reserved_50_52 : 3;
+ u64 pkt_ind : 1;
+ u64 pkt_slm : 1;
+ u64 reserved_36_47 : 12;
+ u64 d0_pst : 1;
+ u64 d1_pst : 1;
+ u64 d2_pst : 1;
+ u64 d3_pst : 1;
+ u64 reserved_31_31 : 1;
+ u64 n2p0_c : 1;
+ u64 n2p0_o : 1;
+ u64 n2p1_c : 1;
+ u64 n2p1_o : 1;
+ u64 cpl_p0 : 1;
+ u64 cpl_p1 : 1;
+ u64 p2n1_po : 1;
+ u64 p2n1_no : 1;
+ u64 p2n1_co : 1;
+ u64 p2n0_po : 1;
+ u64 p2n0_no : 1;
+ u64 p2n0_co : 1;
+ u64 p2n0_c0 : 1;
+ u64 p2n0_c1 : 1;
+ u64 p2n0_n : 1;
+ u64 p2n0_p0 : 1;
+ u64 p2n0_p1 : 1;
+ u64 p2n1_c0 : 1;
+ u64 p2n1_c1 : 1;
+ u64 p2n1_n : 1;
+ u64 p2n1_p0 : 1;
+ u64 p2n1_p1 : 1;
+ u64 csm0 : 1;
+ u64 csm1 : 1;
+ u64 dif0 : 1;
+ u64 dif1 : 1;
+ u64 dif2 : 1;
+ u64 dif3 : 1;
+ u64 reserved_2_2 : 1;
+ u64 msi : 1;
+ u64 ncb_cmd : 1;
+ } s;
+ struct cvmx_npei_bist_status_cn52xx {
+ u64 pkt_rdf : 1;
+ u64 reserved_60_62 : 3;
+ u64 pcr_gim : 1;
+ u64 pkt_pif : 1;
+ u64 pcsr_int : 1;
+ u64 pcsr_im : 1;
+ u64 pcsr_cnt : 1;
+ u64 pcsr_id : 1;
+ u64 pcsr_sl : 1;
+ u64 pkt_imem : 1;
+ u64 pkt_pfm : 1;
+ u64 pkt_pof : 1;
+ u64 reserved_48_49 : 2;
+ u64 pkt_pop0 : 1;
+ u64 pkt_pop1 : 1;
+ u64 d0_mem : 1;
+ u64 d1_mem : 1;
+ u64 d2_mem : 1;
+ u64 d3_mem : 1;
+ u64 d4_mem : 1;
+ u64 ds_mem : 1;
+ u64 reserved_36_39 : 4;
+ u64 d0_pst : 1;
+ u64 d1_pst : 1;
+ u64 d2_pst : 1;
+ u64 d3_pst : 1;
+ u64 d4_pst : 1;
+ u64 n2p0_c : 1;
+ u64 n2p0_o : 1;
+ u64 n2p1_c : 1;
+ u64 n2p1_o : 1;
+ u64 cpl_p0 : 1;
+ u64 cpl_p1 : 1;
+ u64 p2n1_po : 1;
+ u64 p2n1_no : 1;
+ u64 p2n1_co : 1;
+ u64 p2n0_po : 1;
+ u64 p2n0_no : 1;
+ u64 p2n0_co : 1;
+ u64 p2n0_c0 : 1;
+ u64 p2n0_c1 : 1;
+ u64 p2n0_n : 1;
+ u64 p2n0_p0 : 1;
+ u64 p2n0_p1 : 1;
+ u64 p2n1_c0 : 1;
+ u64 p2n1_c1 : 1;
+ u64 p2n1_n : 1;
+ u64 p2n1_p0 : 1;
+ u64 p2n1_p1 : 1;
+ u64 csm0 : 1;
+ u64 csm1 : 1;
+ u64 dif0 : 1;
+ u64 dif1 : 1;
+ u64 dif2 : 1;
+ u64 dif3 : 1;
+ u64 dif4 : 1;
+ u64 msi : 1;
+ u64 ncb_cmd : 1;
+ } cn52xx;
+ struct cvmx_npei_bist_status_cn52xxp1 {
+ u64 reserved_46_63 : 18;
+ u64 d0_mem0 : 1;
+ u64 d1_mem1 : 1;
+ u64 d2_mem2 : 1;
+ u64 d3_mem3 : 1;
+ u64 dr0_mem : 1;
+ u64 d0_mem : 1;
+ u64 d1_mem : 1;
+ u64 d2_mem : 1;
+ u64 d3_mem : 1;
+ u64 dr1_mem : 1;
+ u64 d0_pst : 1;
+ u64 d1_pst : 1;
+ u64 d2_pst : 1;
+ u64 d3_pst : 1;
+ u64 dr2_mem : 1;
+ u64 n2p0_c : 1;
+ u64 n2p0_o : 1;
+ u64 n2p1_c : 1;
+ u64 n2p1_o : 1;
+ u64 cpl_p0 : 1;
+ u64 cpl_p1 : 1;
+ u64 p2n1_po : 1;
+ u64 p2n1_no : 1;
+ u64 p2n1_co : 1;
+ u64 p2n0_po : 1;
+ u64 p2n0_no : 1;
+ u64 p2n0_co : 1;
+ u64 p2n0_c0 : 1;
+ u64 p2n0_c1 : 1;
+ u64 p2n0_n : 1;
+ u64 p2n0_p0 : 1;
+ u64 p2n0_p1 : 1;
+ u64 p2n1_c0 : 1;
+ u64 p2n1_c1 : 1;
+ u64 p2n1_n : 1;
+ u64 p2n1_p0 : 1;
+ u64 p2n1_p1 : 1;
+ u64 csm0 : 1;
+ u64 csm1 : 1;
+ u64 dif0 : 1;
+ u64 dif1 : 1;
+ u64 dif2 : 1;
+ u64 dif3 : 1;
+ u64 dr3_mem : 1;
+ u64 msi : 1;
+ u64 ncb_cmd : 1;
+ } cn52xxp1;
+ struct cvmx_npei_bist_status_cn52xx cn56xx;
+ struct cvmx_npei_bist_status_cn56xxp1 {
+ u64 reserved_58_63 : 6;
+ u64 pcsr_int : 1;
+ u64 pcsr_im : 1;
+ u64 pcsr_cnt : 1;
+ u64 pcsr_id : 1;
+ u64 pcsr_sl : 1;
+ u64 pkt_pout : 1;
+ u64 pkt_imem : 1;
+ u64 pkt_cntm : 1;
+ u64 pkt_ind : 1;
+ u64 pkt_slm : 1;
+ u64 pkt_odf : 1;
+ u64 pkt_oif : 1;
+ u64 pkt_out : 1;
+ u64 pkt_i0 : 1;
+ u64 pkt_i1 : 1;
+ u64 pkt_s0 : 1;
+ u64 pkt_s1 : 1;
+ u64 d0_mem : 1;
+ u64 d1_mem : 1;
+ u64 d2_mem : 1;
+ u64 d3_mem : 1;
+ u64 d4_mem : 1;
+ u64 d0_pst : 1;
+ u64 d1_pst : 1;
+ u64 d2_pst : 1;
+ u64 d3_pst : 1;
+ u64 d4_pst : 1;
+ u64 n2p0_c : 1;
+ u64 n2p0_o : 1;
+ u64 n2p1_c : 1;
+ u64 n2p1_o : 1;
+ u64 cpl_p0 : 1;
+ u64 cpl_p1 : 1;
+ u64 p2n1_po : 1;
+ u64 p2n1_no : 1;
+ u64 p2n1_co : 1;
+ u64 p2n0_po : 1;
+ u64 p2n0_no : 1;
+ u64 p2n0_co : 1;
+ u64 p2n0_c0 : 1;
+ u64 p2n0_c1 : 1;
+ u64 p2n0_n : 1;
+ u64 p2n0_p0 : 1;
+ u64 p2n0_p1 : 1;
+ u64 p2n1_c0 : 1;
+ u64 p2n1_c1 : 1;
+ u64 p2n1_n : 1;
+ u64 p2n1_p0 : 1;
+ u64 p2n1_p1 : 1;
+ u64 csm0 : 1;
+ u64 csm1 : 1;
+ u64 dif0 : 1;
+ u64 dif1 : 1;
+ u64 dif2 : 1;
+ u64 dif3 : 1;
+ u64 dif4 : 1;
+ u64 msi : 1;
+ u64 ncb_cmd : 1;
+ } cn56xxp1;
+};
+
+typedef union cvmx_npei_bist_status cvmx_npei_bist_status_t;
+
+/**
+ * cvmx_npei_bist_status2
+ *
+ * NPEI_BIST_STATUS2 = NPI's BIST Status Register2
+ *
+ * Results from BIST runs of NPEI's memories.
+ */
+union cvmx_npei_bist_status2 {
+ u64 u64;
+ struct cvmx_npei_bist_status2_s {
+ u64 reserved_14_63 : 50;
+ u64 prd_tag : 1;
+ u64 prd_st0 : 1;
+ u64 prd_st1 : 1;
+ u64 prd_err : 1;
+ u64 nrd_st : 1;
+ u64 nwe_st : 1;
+ u64 nwe_wr0 : 1;
+ u64 nwe_wr1 : 1;
+ u64 pkt_rd : 1;
+ u64 psc_p0 : 1;
+ u64 psc_p1 : 1;
+ u64 pkt_gd : 1;
+ u64 pkt_gl : 1;
+ u64 pkt_blk : 1;
+ } s;
+ struct cvmx_npei_bist_status2_s cn52xx;
+ struct cvmx_npei_bist_status2_s cn56xx;
+};
+
+typedef union cvmx_npei_bist_status2 cvmx_npei_bist_status2_t;
+
+/**
+ * cvmx_npei_ctl_port0
+ *
+ * NPEI_CTL_PORT0 = NPEI's Control Port 0
+ *
+ * Contains control for access for Port0
+ */
+union cvmx_npei_ctl_port0 {
+ u64 u64;
+ struct cvmx_npei_ctl_port0_s {
+ u64 reserved_21_63 : 43;
+ u64 waitl_com : 1;
+ u64 intd : 1;
+ u64 intc : 1;
+ u64 intb : 1;
+ u64 inta : 1;
+ u64 intd_map : 2;
+ u64 intc_map : 2;
+ u64 intb_map : 2;
+ u64 inta_map : 2;
+ u64 ctlp_ro : 1;
+ u64 reserved_6_6 : 1;
+ u64 ptlp_ro : 1;
+ u64 bar2_enb : 1;
+ u64 bar2_esx : 2;
+ u64 bar2_cax : 1;
+ u64 wait_com : 1;
+ } s;
+ struct cvmx_npei_ctl_port0_s cn52xx;
+ struct cvmx_npei_ctl_port0_s cn52xxp1;
+ struct cvmx_npei_ctl_port0_s cn56xx;
+ struct cvmx_npei_ctl_port0_s cn56xxp1;
+};
+
+typedef union cvmx_npei_ctl_port0 cvmx_npei_ctl_port0_t;
+
+/**
+ * cvmx_npei_ctl_port1
+ *
+ * NPEI_CTL_PORT1 = NPEI's Control Port1
+ *
+ * Contains control for access for Port1
+ */
+union cvmx_npei_ctl_port1 {
+ u64 u64;
+ struct cvmx_npei_ctl_port1_s {
+ u64 reserved_21_63 : 43;
+ u64 waitl_com : 1;
+ u64 intd : 1;
+ u64 intc : 1;
+ u64 intb : 1;
+ u64 inta : 1;
+ u64 intd_map : 2;
+ u64 intc_map : 2;
+ u64 intb_map : 2;
+ u64 inta_map : 2;
+ u64 ctlp_ro : 1;
+ u64 reserved_6_6 : 1;
+ u64 ptlp_ro : 1;
+ u64 bar2_enb : 1;
+ u64 bar2_esx : 2;
+ u64 bar2_cax : 1;
+ u64 wait_com : 1;
+ } s;
+ struct cvmx_npei_ctl_port1_s cn52xx;
+ struct cvmx_npei_ctl_port1_s cn52xxp1;
+ struct cvmx_npei_ctl_port1_s cn56xx;
+ struct cvmx_npei_ctl_port1_s cn56xxp1;
+};
+
+typedef union cvmx_npei_ctl_port1 cvmx_npei_ctl_port1_t;
+
+/**
+ * cvmx_npei_ctl_status
+ *
+ * NPEI_CTL_STATUS = NPEI Control Status Register
+ *
+ * Contains control and status for NPEI. Writes to this register are not
+ * oSrdered with writes/reads to the PCIe Memory space.
+ * To ensure that a write has completed the user must read the register
+ * before making an access(i.e. PCIe memory space)
+ * that requires the value of this register to be updated.
+ */
+union cvmx_npei_ctl_status {
+ u64 u64;
+ struct cvmx_npei_ctl_status_s {
+ u64 reserved_44_63 : 20;
+ u64 p1_ntags : 6;
+ u64 p0_ntags : 6;
+ u64 cfg_rtry : 16;
+ u64 ring_en : 1;
+ u64 lnk_rst : 1;
+ u64 arb : 1;
+ u64 pkt_bp : 4;
+ u64 host_mode : 1;
+ u64 chip_rev : 8;
+ } s;
+ struct cvmx_npei_ctl_status_s cn52xx;
+ struct cvmx_npei_ctl_status_cn52xxp1 {
+ u64 reserved_44_63 : 20;
+ u64 p1_ntags : 6;
+ u64 p0_ntags : 6;
+ u64 cfg_rtry : 16;
+ u64 reserved_15_15 : 1;
+ u64 lnk_rst : 1;
+ u64 arb : 1;
+ u64 reserved_9_12 : 4;
+ u64 host_mode : 1;
+ u64 chip_rev : 8;
+ } cn52xxp1;
+ struct cvmx_npei_ctl_status_s cn56xx;
+ struct cvmx_npei_ctl_status_cn56xxp1 {
+ u64 reserved_15_63 : 49;
+ u64 lnk_rst : 1;
+ u64 arb : 1;
+ u64 pkt_bp : 4;
+ u64 host_mode : 1;
+ u64 chip_rev : 8;
+ } cn56xxp1;
+};
+
+typedef union cvmx_npei_ctl_status cvmx_npei_ctl_status_t;
+
+/**
+ * cvmx_npei_ctl_status2
+ *
+ * NPEI_CTL_STATUS2 = NPEI's Control Status2 Register
+ *
+ * Contains control and status for NPEI.
+ * Writes to this register are not ordered with writes/reads to the PCI
+ * Memory space.
+ * To ensure that a write has completed the user must read the register before
+ * making an access(i.e. PCI memory space) that requires the value of this
+ * register to be updated.
+ */
+union cvmx_npei_ctl_status2 {
+ u64 u64;
+ struct cvmx_npei_ctl_status2_s {
+ u64 reserved_16_63 : 48;
+ u64 mps : 1;
+ u64 mrrs : 3;
+ u64 c1_w_flt : 1;
+ u64 c0_w_flt : 1;
+ u64 c1_b1_s : 3;
+ u64 c0_b1_s : 3;
+ u64 c1_wi_d : 1;
+ u64 c1_b0_d : 1;
+ u64 c0_wi_d : 1;
+ u64 c0_b0_d : 1;
+ } s;
+ struct cvmx_npei_ctl_status2_s cn52xx;
+ struct cvmx_npei_ctl_status2_s cn52xxp1;
+ struct cvmx_npei_ctl_status2_s cn56xx;
+ struct cvmx_npei_ctl_status2_s cn56xxp1;
+};
+
+typedef union cvmx_npei_ctl_status2 cvmx_npei_ctl_status2_t;
+
+/**
+ * cvmx_npei_data_out_cnt
+ *
+ * NPEI_DATA_OUT_CNT = NPEI DATA OUT COUNT
+ *
+ * The EXEC data out fifo-count and the data unload counter.
+ */
+union cvmx_npei_data_out_cnt {
+ u64 u64;
+ struct cvmx_npei_data_out_cnt_s {
+ u64 reserved_44_63 : 20;
+ u64 p1_ucnt : 16;
+ u64 p1_fcnt : 6;
+ u64 p0_ucnt : 16;
+ u64 p0_fcnt : 6;
+ } s;
+ struct cvmx_npei_data_out_cnt_s cn52xx;
+ struct cvmx_npei_data_out_cnt_s cn52xxp1;
+ struct cvmx_npei_data_out_cnt_s cn56xx;
+ struct cvmx_npei_data_out_cnt_s cn56xxp1;
+};
+
+typedef union cvmx_npei_data_out_cnt cvmx_npei_data_out_cnt_t;
+
+/**
+ * cvmx_npei_dbg_data
+ *
+ * NPEI_DBG_DATA = NPEI Debug Data Register
+ *
+ * Value returned on the debug-data lines from the RSLs
+ */
+union cvmx_npei_dbg_data {
+ u64 u64;
+ struct cvmx_npei_dbg_data_s {
+ u64 reserved_28_63 : 36;
+ u64 qlm0_rev_lanes : 1;
+ u64 reserved_25_26 : 2;
+ u64 qlm1_spd : 2;
+ u64 c_mul : 5;
+ u64 dsel_ext : 1;
+ u64 data : 17;
+ } s;
+ struct cvmx_npei_dbg_data_cn52xx {
+ u64 reserved_29_63 : 35;
+ u64 qlm0_link_width : 1;
+ u64 qlm0_rev_lanes : 1;
+ u64 qlm1_mode : 2;
+ u64 qlm1_spd : 2;
+ u64 c_mul : 5;
+ u64 dsel_ext : 1;
+ u64 data : 17;
+ } cn52xx;
+ struct cvmx_npei_dbg_data_cn52xx cn52xxp1;
+ struct cvmx_npei_dbg_data_cn56xx {
+ u64 reserved_29_63 : 35;
+ u64 qlm2_rev_lanes : 1;
+ u64 qlm0_rev_lanes : 1;
+ u64 qlm3_spd : 2;
+ u64 qlm1_spd : 2;
+ u64 c_mul : 5;
+ u64 dsel_ext : 1;
+ u64 data : 17;
+ } cn56xx;
+ struct cvmx_npei_dbg_data_cn56xx cn56xxp1;
+};
+
+typedef union cvmx_npei_dbg_data cvmx_npei_dbg_data_t;
+
+/**
+ * cvmx_npei_dbg_select
+ *
+ * NPEI_DBG_SELECT = Debug Select Register
+ *
+ * Contains the debug select value last written to the RSLs.
+ */
+union cvmx_npei_dbg_select {
+ u64 u64;
+ struct cvmx_npei_dbg_select_s {
+ u64 reserved_16_63 : 48;
+ u64 dbg_sel : 16;
+ } s;
+ struct cvmx_npei_dbg_select_s cn52xx;
+ struct cvmx_npei_dbg_select_s cn52xxp1;
+ struct cvmx_npei_dbg_select_s cn56xx;
+ struct cvmx_npei_dbg_select_s cn56xxp1;
+};
+
+typedef union cvmx_npei_dbg_select cvmx_npei_dbg_select_t;
+
+/**
+ * cvmx_npei_dma#_counts
+ *
+ * NPEI_DMA[0..4]_COUNTS = DMA Instruction Counts
+ *
+ * Values for determing the number of instructions for DMA[0..4] in the NPEI.
+ */
+union cvmx_npei_dmax_counts {
+ u64 u64;
+ struct cvmx_npei_dmax_counts_s {
+ u64 reserved_39_63 : 25;
+ u64 fcnt : 7;
+ u64 dbell : 32;
+ } s;
+ struct cvmx_npei_dmax_counts_s cn52xx;
+ struct cvmx_npei_dmax_counts_s cn52xxp1;
+ struct cvmx_npei_dmax_counts_s cn56xx;
+ struct cvmx_npei_dmax_counts_s cn56xxp1;
+};
+
+typedef union cvmx_npei_dmax_counts cvmx_npei_dmax_counts_t;
+
+/**
+ * cvmx_npei_dma#_dbell
+ *
+ * NPEI_DMA_DBELL[0..4] = DMA Door Bell
+ *
+ * The door bell register for DMA[0..4] queue.
+ */
+union cvmx_npei_dmax_dbell {
+ u32 u32;
+ struct cvmx_npei_dmax_dbell_s {
+ u32 reserved_16_31 : 16;
+ u32 dbell : 16;
+ } s;
+ struct cvmx_npei_dmax_dbell_s cn52xx;
+ struct cvmx_npei_dmax_dbell_s cn52xxp1;
+ struct cvmx_npei_dmax_dbell_s cn56xx;
+ struct cvmx_npei_dmax_dbell_s cn56xxp1;
+};
+
+typedef union cvmx_npei_dmax_dbell cvmx_npei_dmax_dbell_t;
+
+/**
+ * cvmx_npei_dma#_ibuff_saddr
+ *
+ * NPEI_DMA[0..4]_IBUFF_SADDR = DMA Instruction Buffer Starting Address
+ *
+ * The address to start reading Instructions from for DMA[0..4].
+ */
+union cvmx_npei_dmax_ibuff_saddr {
+ u64 u64;
+ struct cvmx_npei_dmax_ibuff_saddr_s {
+ u64 reserved_37_63 : 27;
+ u64 idle : 1;
+ u64 saddr : 29;
+ u64 reserved_0_6 : 7;
+ } s;
+ struct cvmx_npei_dmax_ibuff_saddr_s cn52xx;
+ struct cvmx_npei_dmax_ibuff_saddr_cn52xxp1 {
+ u64 reserved_36_63 : 28;
+ u64 saddr : 29;
+ u64 reserved_0_6 : 7;
+ } cn52xxp1;
+ struct cvmx_npei_dmax_ibuff_saddr_s cn56xx;
+ struct cvmx_npei_dmax_ibuff_saddr_cn52xxp1 cn56xxp1;
+};
+
+typedef union cvmx_npei_dmax_ibuff_saddr cvmx_npei_dmax_ibuff_saddr_t;
+
+/**
+ * cvmx_npei_dma#_naddr
+ *
+ * NPEI_DMA[0..4]_NADDR = DMA Next Ichunk Address
+ *
+ * Place NPEI will read the next Ichunk data from. This is valid when state is 0
+ */
+union cvmx_npei_dmax_naddr {
+ u64 u64;
+ struct cvmx_npei_dmax_naddr_s {
+ u64 reserved_36_63 : 28;
+ u64 addr : 36;
+ } s;
+ struct cvmx_npei_dmax_naddr_s cn52xx;
+ struct cvmx_npei_dmax_naddr_s cn52xxp1;
+ struct cvmx_npei_dmax_naddr_s cn56xx;
+ struct cvmx_npei_dmax_naddr_s cn56xxp1;
+};
+
+typedef union cvmx_npei_dmax_naddr cvmx_npei_dmax_naddr_t;
+
+/**
+ * cvmx_npei_dma0_int_level
+ *
+ * NPEI_DMA0_INT_LEVEL = NPEI DMA0 Interrupt Level
+ *
+ * Thresholds for DMA count and timer interrupts for DMA0.
+ */
+union cvmx_npei_dma0_int_level {
+ u64 u64;
+ struct cvmx_npei_dma0_int_level_s {
+ u64 time : 32;
+ u64 cnt : 32;
+ } s;
+ struct cvmx_npei_dma0_int_level_s cn52xx;
+ struct cvmx_npei_dma0_int_level_s cn52xxp1;
+ struct cvmx_npei_dma0_int_level_s cn56xx;
+ struct cvmx_npei_dma0_int_level_s cn56xxp1;
+};
+
+typedef union cvmx_npei_dma0_int_level cvmx_npei_dma0_int_level_t;
+
+/**
+ * cvmx_npei_dma1_int_level
+ *
+ * NPEI_DMA1_INT_LEVEL = NPEI DMA1 Interrupt Level
+ *
+ * Thresholds for DMA count and timer interrupts for DMA1.
+ */
+union cvmx_npei_dma1_int_level {
+ u64 u64;
+ struct cvmx_npei_dma1_int_level_s {
+ u64 time : 32;
+ u64 cnt : 32;
+ } s;
+ struct cvmx_npei_dma1_int_level_s cn52xx;
+ struct cvmx_npei_dma1_int_level_s cn52xxp1;
+ struct cvmx_npei_dma1_int_level_s cn56xx;
+ struct cvmx_npei_dma1_int_level_s cn56xxp1;
+};
+
+typedef union cvmx_npei_dma1_int_level cvmx_npei_dma1_int_level_t;
+
+/**
+ * cvmx_npei_dma_cnts
+ *
+ * NPEI_DMA_CNTS = NPEI DMA Count
+ *
+ * The DMA Count values for DMA0 and DMA1.
+ */
+union cvmx_npei_dma_cnts {
+ u64 u64;
+ struct cvmx_npei_dma_cnts_s {
+ u64 dma1 : 32;
+ u64 dma0 : 32;
+ } s;
+ struct cvmx_npei_dma_cnts_s cn52xx;
+ struct cvmx_npei_dma_cnts_s cn52xxp1;
+ struct cvmx_npei_dma_cnts_s cn56xx;
+ struct cvmx_npei_dma_cnts_s cn56xxp1;
+};
+
+typedef union cvmx_npei_dma_cnts cvmx_npei_dma_cnts_t;
+
+/**
+ * cvmx_npei_dma_control
+ *
+ * NPEI_DMA_CONTROL = DMA Control Register
+ *
+ * Controls operation of the DMA IN/OUT.
+ */
+union cvmx_npei_dma_control {
+ u64 u64;
+ struct cvmx_npei_dma_control_s {
+ u64 reserved_40_63 : 24;
+ u64 p_32b_m : 1;
+ u64 dma4_enb : 1;
+ u64 dma3_enb : 1;
+ u64 dma2_enb : 1;
+ u64 dma1_enb : 1;
+ u64 dma0_enb : 1;
+ u64 b0_lend : 1;
+ u64 dwb_denb : 1;
+ u64 dwb_ichk : 9;
+ u64 fpa_que : 3;
+ u64 o_add1 : 1;
+ u64 o_ro : 1;
+ u64 o_ns : 1;
+ u64 o_es : 2;
+ u64 o_mode : 1;
+ u64 csize : 14;
+ } s;
+ struct cvmx_npei_dma_control_s cn52xx;
+ struct cvmx_npei_dma_control_cn52xxp1 {
+ u64 reserved_38_63 : 26;
+ u64 dma3_enb : 1;
+ u64 dma2_enb : 1;
+ u64 dma1_enb : 1;
+ u64 dma0_enb : 1;
+ u64 b0_lend : 1;
+ u64 dwb_denb : 1;
+ u64 dwb_ichk : 9;
+ u64 fpa_que : 3;
+ u64 o_add1 : 1;
+ u64 o_ro : 1;
+ u64 o_ns : 1;
+ u64 o_es : 2;
+ u64 o_mode : 1;
+ u64 csize : 14;
+ } cn52xxp1;
+ struct cvmx_npei_dma_control_s cn56xx;
+ struct cvmx_npei_dma_control_cn56xxp1 {
+ u64 reserved_39_63 : 25;
+ u64 dma4_enb : 1;
+ u64 dma3_enb : 1;
+ u64 dma2_enb : 1;
+ u64 dma1_enb : 1;
+ u64 dma0_enb : 1;
+ u64 b0_lend : 1;
+ u64 dwb_denb : 1;
+ u64 dwb_ichk : 9;
+ u64 fpa_que : 3;
+ u64 o_add1 : 1;
+ u64 o_ro : 1;
+ u64 o_ns : 1;
+ u64 o_es : 2;
+ u64 o_mode : 1;
+ u64 csize : 14;
+ } cn56xxp1;
+};
+
+typedef union cvmx_npei_dma_control cvmx_npei_dma_control_t;
+
+/**
+ * cvmx_npei_dma_pcie_req_num
+ *
+ * NPEI_DMA_PCIE_REQ_NUM = NPEI DMA PCIE Outstanding Read Request Number
+ *
+ * Outstanding PCIE read request number for DMAs and Packet, maximum number
+ * is 16
+ */
+union cvmx_npei_dma_pcie_req_num {
+ u64 u64;
+ struct cvmx_npei_dma_pcie_req_num_s {
+ u64 dma_arb : 1;
+ u64 reserved_53_62 : 10;
+ u64 pkt_cnt : 5;
+ u64 reserved_45_47 : 3;
+ u64 dma4_cnt : 5;
+ u64 reserved_37_39 : 3;
+ u64 dma3_cnt : 5;
+ u64 reserved_29_31 : 3;
+ u64 dma2_cnt : 5;
+ u64 reserved_21_23 : 3;
+ u64 dma1_cnt : 5;
+ u64 reserved_13_15 : 3;
+ u64 dma0_cnt : 5;
+ u64 reserved_5_7 : 3;
+ u64 dma_cnt : 5;
+ } s;
+ struct cvmx_npei_dma_pcie_req_num_s cn52xx;
+ struct cvmx_npei_dma_pcie_req_num_s cn56xx;
+};
+
+typedef union cvmx_npei_dma_pcie_req_num cvmx_npei_dma_pcie_req_num_t;
+
+/**
+ * cvmx_npei_dma_state1
+ *
+ * NPEI_DMA_STATE1 = NPI's DMA State 1
+ *
+ * Results from DMA state register 1
+ */
+union cvmx_npei_dma_state1 {
+ u64 u64;
+ struct cvmx_npei_dma_state1_s {
+ u64 reserved_40_63 : 24;
+ u64 d4_dwe : 8;
+ u64 d3_dwe : 8;
+ u64 d2_dwe : 8;
+ u64 d1_dwe : 8;
+ u64 d0_dwe : 8;
+ } s;
+ struct cvmx_npei_dma_state1_s cn52xx;
+};
+
+typedef union cvmx_npei_dma_state1 cvmx_npei_dma_state1_t;
+
+/**
+ * cvmx_npei_dma_state1_p1
+ *
+ * NPEI_DMA_STATE1_P1 = NPEI DMA Request and Instruction State
+ *
+ * DMA engine Debug information.
+ */
+union cvmx_npei_dma_state1_p1 {
+ u64 u64;
+ struct cvmx_npei_dma_state1_p1_s {
+ u64 reserved_60_63 : 4;
+ u64 d0_difst : 7;
+ u64 d1_difst : 7;
+ u64 d2_difst : 7;
+ u64 d3_difst : 7;
+ u64 d4_difst : 7;
+ u64 d0_reqst : 5;
+ u64 d1_reqst : 5;
+ u64 d2_reqst : 5;
+ u64 d3_reqst : 5;
+ u64 d4_reqst : 5;
+ } s;
+ struct cvmx_npei_dma_state1_p1_cn52xxp1 {
+ u64 reserved_60_63 : 4;
+ u64 d0_difst : 7;
+ u64 d1_difst : 7;
+ u64 d2_difst : 7;
+ u64 d3_difst : 7;
+ u64 reserved_25_31 : 7;
+ u64 d0_reqst : 5;
+ u64 d1_reqst : 5;
+ u64 d2_reqst : 5;
+ u64 d3_reqst : 5;
+ u64 reserved_0_4 : 5;
+ } cn52xxp1;
+ struct cvmx_npei_dma_state1_p1_s cn56xxp1;
+};
+
+typedef union cvmx_npei_dma_state1_p1 cvmx_npei_dma_state1_p1_t;
+
+/**
+ * cvmx_npei_dma_state2
+ *
+ * NPEI_DMA_STATE2 = NPI's DMA State 2
+ *
+ * Results from DMA state register 2
+ */
+union cvmx_npei_dma_state2 {
+ u64 u64;
+ struct cvmx_npei_dma_state2_s {
+ u64 reserved_28_63 : 36;
+ u64 ndwe : 4;
+ u64 reserved_21_23 : 3;
+ u64 ndre : 5;
+ u64 reserved_10_15 : 6;
+ u64 prd : 10;
+ } s;
+ struct cvmx_npei_dma_state2_s cn52xx;
+};
+
+typedef union cvmx_npei_dma_state2 cvmx_npei_dma_state2_t;
+
+/**
+ * cvmx_npei_dma_state2_p1
+ *
+ * NPEI_DMA_STATE2_P1 = NPEI DMA Instruction Fetch State
+ *
+ * DMA engine Debug information.
+ */
+union cvmx_npei_dma_state2_p1 {
+ u64 u64;
+ struct cvmx_npei_dma_state2_p1_s {
+ u64 reserved_45_63 : 19;
+ u64 d0_dffst : 9;
+ u64 d1_dffst : 9;
+ u64 d2_dffst : 9;
+ u64 d3_dffst : 9;
+ u64 d4_dffst : 9;
+ } s;
+ struct cvmx_npei_dma_state2_p1_cn52xxp1 {
+ u64 reserved_45_63 : 19;
+ u64 d0_dffst : 9;
+ u64 d1_dffst : 9;
+ u64 d2_dffst : 9;
+ u64 d3_dffst : 9;
+ u64 reserved_0_8 : 9;
+ } cn52xxp1;
+ struct cvmx_npei_dma_state2_p1_s cn56xxp1;
+};
+
+typedef union cvmx_npei_dma_state2_p1 cvmx_npei_dma_state2_p1_t;
+
+/**
+ * cvmx_npei_dma_state3_p1
+ *
+ * NPEI_DMA_STATE3_P1 = NPEI DMA DRE State
+ *
+ * DMA engine Debug information.
+ */
+union cvmx_npei_dma_state3_p1 {
+ u64 u64;
+ struct cvmx_npei_dma_state3_p1_s {
+ u64 reserved_60_63 : 4;
+ u64 d0_drest : 15;
+ u64 d1_drest : 15;
+ u64 d2_drest : 15;
+ u64 d3_drest : 15;
+ } s;
+ struct cvmx_npei_dma_state3_p1_s cn52xxp1;
+ struct cvmx_npei_dma_state3_p1_s cn56xxp1;
+};
+
+typedef union cvmx_npei_dma_state3_p1 cvmx_npei_dma_state3_p1_t;
+
+/**
+ * cvmx_npei_dma_state4_p1
+ *
+ * NPEI_DMA_STATE4_P1 = NPEI DMA DWE State
+ *
+ * DMA engine Debug information.
+ */
+union cvmx_npei_dma_state4_p1 {
+ u64 u64;
+ struct cvmx_npei_dma_state4_p1_s {
+ u64 reserved_52_63 : 12;
+ u64 d0_dwest : 13;
+ u64 d1_dwest : 13;
+ u64 d2_dwest : 13;
+ u64 d3_dwest : 13;
+ } s;
+ struct cvmx_npei_dma_state4_p1_s cn52xxp1;
+ struct cvmx_npei_dma_state4_p1_s cn56xxp1;
+};
+
+typedef union cvmx_npei_dma_state4_p1 cvmx_npei_dma_state4_p1_t;
+
+/**
+ * cvmx_npei_dma_state5_p1
+ *
+ * NPEI_DMA_STATE5_P1 = NPEI DMA DWE and DRE State
+ *
+ * DMA engine Debug information.
+ */
+union cvmx_npei_dma_state5_p1 {
+ u64 u64;
+ struct cvmx_npei_dma_state5_p1_s {
+ u64 reserved_28_63 : 36;
+ u64 d4_drest : 15;
+ u64 d4_dwest : 13;
+ } s;
+ struct cvmx_npei_dma_state5_p1_s cn56xxp1;
+};
+
+typedef union cvmx_npei_dma_state5_p1 cvmx_npei_dma_state5_p1_t;
+
+/**
+ * cvmx_npei_int_a_enb
+ *
+ * NPEI_INTERRUPT_A_ENB = NPI's Interrupt A Enable Register
+ *
+ * Used to allow the generation of interrupts (MSI/INTA) to the PCIe
+ * CoresUsed to enable the various interrupting conditions of NPEI
+ */
+union cvmx_npei_int_a_enb {
+ u64 u64;
+ struct cvmx_npei_int_a_enb_s {
+ u64 reserved_10_63 : 54;
+ u64 pout_err : 1;
+ u64 pin_bp : 1;
+ u64 p1_rdlk : 1;
+ u64 p0_rdlk : 1;
+ u64 pgl_err : 1;
+ u64 pdi_err : 1;
+ u64 pop_err : 1;
+ u64 pins_err : 1;
+ u64 dma1_cpl : 1;
+ u64 dma0_cpl : 1;
+ } s;
+ struct cvmx_npei_int_a_enb_s cn52xx;
+ struct cvmx_npei_int_a_enb_cn52xxp1 {
+ u64 reserved_2_63 : 62;
+ u64 dma1_cpl : 1;
+ u64 dma0_cpl : 1;
+ } cn52xxp1;
+ struct cvmx_npei_int_a_enb_s cn56xx;
+};
+
+typedef union cvmx_npei_int_a_enb cvmx_npei_int_a_enb_t;
+
+/**
+ * cvmx_npei_int_a_enb2
+ *
+ * NPEI_INTERRUPT_A_ENB2 = NPEI's Interrupt A Enable2 Register
+ *
+ * Used to enable the various interrupting conditions of NPEI
+ */
+union cvmx_npei_int_a_enb2 {
+ u64 u64;
+ struct cvmx_npei_int_a_enb2_s {
+ u64 reserved_10_63 : 54;
+ u64 pout_err : 1;
+ u64 pin_bp : 1;
+ u64 p1_rdlk : 1;
+ u64 p0_rdlk : 1;
+ u64 pgl_err : 1;
+ u64 pdi_err : 1;
+ u64 pop_err : 1;
+ u64 pins_err : 1;
+ u64 dma1_cpl : 1;
+ u64 dma0_cpl : 1;
+ } s;
+ struct cvmx_npei_int_a_enb2_s cn52xx;
+ struct cvmx_npei_int_a_enb2_cn52xxp1 {
+ u64 reserved_2_63 : 62;
+ u64 dma1_cpl : 1;
+ u64 dma0_cpl : 1;
+ } cn52xxp1;
+ struct cvmx_npei_int_a_enb2_s cn56xx;
+};
+
+typedef union cvmx_npei_int_a_enb2 cvmx_npei_int_a_enb2_t;
+
+/**
+ * cvmx_npei_int_a_sum
+ *
+ * NPEI_INTERRUPT_A_SUM = NPI Interrupt A Summary Register
+ *
+ * Set when an interrupt condition occurs, write '1' to clear. When an
+ * interrupt bitin this register is set and
+ * the cooresponding bit in the NPEI_INT_A_ENB register is set, then
+ * NPEI_INT_SUM[61] will be set.
+ */
+union cvmx_npei_int_a_sum {
+ u64 u64;
+ struct cvmx_npei_int_a_sum_s {
+ u64 reserved_10_63 : 54;
+ u64 pout_err : 1;
+ u64 pin_bp : 1;
+ u64 p1_rdlk : 1;
+ u64 p0_rdlk : 1;
+ u64 pgl_err : 1;
+ u64 pdi_err : 1;
+ u64 pop_err : 1;
+ u64 pins_err : 1;
+ u64 dma1_cpl : 1;
+ u64 dma0_cpl : 1;
+ } s;
+ struct cvmx_npei_int_a_sum_s cn52xx;
+ struct cvmx_npei_int_a_sum_cn52xxp1 {
+ u64 reserved_2_63 : 62;
+ u64 dma1_cpl : 1;
+ u64 dma0_cpl : 1;
+ } cn52xxp1;
+ struct cvmx_npei_int_a_sum_s cn56xx;
+};
+
+typedef union cvmx_npei_int_a_sum cvmx_npei_int_a_sum_t;
+
+/**
+ * cvmx_npei_int_enb
+ *
+ * NPEI_INTERRUPT_ENB = NPI's Interrupt Enable Register
+ *
+ * Used to allow the generation of interrupts (MSI/INTA) to the PCIe
+ * CoresUsed to enable the various interrupting conditions of NPI
+ */
+union cvmx_npei_int_enb {
+ u64 u64;
+ struct cvmx_npei_int_enb_s {
+ u64 mio_inta : 1;
+ u64 reserved_62_62 : 1;
+ u64 int_a : 1;
+ u64 c1_ldwn : 1;
+ u64 c0_ldwn : 1;
+ u64 c1_exc : 1;
+ u64 c0_exc : 1;
+ u64 c1_up_wf : 1;
+ u64 c0_up_wf : 1;
+ u64 c1_un_wf : 1;
+ u64 c0_un_wf : 1;
+ u64 c1_un_bx : 1;
+ u64 c1_un_wi : 1;
+ u64 c1_un_b2 : 1;
+ u64 c1_un_b1 : 1;
+ u64 c1_un_b0 : 1;
+ u64 c1_up_bx : 1;
+ u64 c1_up_wi : 1;
+ u64 c1_up_b2 : 1;
+ u64 c1_up_b1 : 1;
+ u64 c1_up_b0 : 1;
+ u64 c0_un_bx : 1;
+ u64 c0_un_wi : 1;
+ u64 c0_un_b2 : 1;
+ u64 c0_un_b1 : 1;
+ u64 c0_un_b0 : 1;
+ u64 c0_up_bx : 1;
+ u64 c0_up_wi : 1;
+ u64 c0_up_b2 : 1;
+ u64 c0_up_b1 : 1;
+ u64 c0_up_b0 : 1;
+ u64 c1_hpint : 1;
+ u64 c1_pmei : 1;
+ u64 c1_wake : 1;
+ u64 crs1_dr : 1;
+ u64 c1_se : 1;
+ u64 crs1_er : 1;
+ u64 c1_aeri : 1;
+ u64 c0_hpint : 1;
+ u64 c0_pmei : 1;
+ u64 c0_wake : 1;
+ u64 crs0_dr : 1;
+ u64 c0_se : 1;
+ u64 crs0_er : 1;
+ u64 c0_aeri : 1;
+ u64 ptime : 1;
+ u64 pcnt : 1;
+ u64 pidbof : 1;
+ u64 psldbof : 1;
+ u64 dtime1 : 1;
+ u64 dtime0 : 1;
+ u64 dcnt1 : 1;
+ u64 dcnt0 : 1;
+ u64 dma1fi : 1;
+ u64 dma0fi : 1;
+ u64 dma4dbo : 1;
+ u64 dma3dbo : 1;
+ u64 dma2dbo : 1;
+ u64 dma1dbo : 1;
+ u64 dma0dbo : 1;
+ u64 iob2big : 1;
+ u64 bar0_to : 1;
+ u64 rml_wto : 1;
+ u64 rml_rto : 1;
+ } s;
+ struct cvmx_npei_int_enb_s cn52xx;
+ struct cvmx_npei_int_enb_cn52xxp1 {
+ u64 mio_inta : 1;
+ u64 reserved_62_62 : 1;
+ u64 int_a : 1;
+ u64 c1_ldwn : 1;
+ u64 c0_ldwn : 1;
+ u64 c1_exc : 1;
+ u64 c0_exc : 1;
+ u64 c1_up_wf : 1;
+ u64 c0_up_wf : 1;
+ u64 c1_un_wf : 1;
+ u64 c0_un_wf : 1;
+ u64 c1_un_bx : 1;
+ u64 c1_un_wi : 1;
+ u64 c1_un_b2 : 1;
+ u64 c1_un_b1 : 1;
+ u64 c1_un_b0 : 1;
+ u64 c1_up_bx : 1;
+ u64 c1_up_wi : 1;
+ u64 c1_up_b2 : 1;
+ u64 c1_up_b1 : 1;
+ u64 c1_up_b0 : 1;
+ u64 c0_un_bx : 1;
+ u64 c0_un_wi : 1;
+ u64 c0_un_b2 : 1;
+ u64 c0_un_b1 : 1;
+ u64 c0_un_b0 : 1;
+ u64 c0_up_bx : 1;
+ u64 c0_up_wi : 1;
+ u64 c0_up_b2 : 1;
+ u64 c0_up_b1 : 1;
+ u64 c0_up_b0 : 1;
+ u64 c1_hpint : 1;
+ u64 c1_pmei : 1;
+ u64 c1_wake : 1;
+ u64 crs1_dr : 1;
+ u64 c1_se : 1;
+ u64 crs1_er : 1;
+ u64 c1_aeri : 1;
+ u64 c0_hpint : 1;
+ u64 c0_pmei : 1;
+ u64 c0_wake : 1;
+ u64 crs0_dr : 1;
+ u64 c0_se : 1;
+ u64 crs0_er : 1;
+ u64 c0_aeri : 1;
+ u64 ptime : 1;
+ u64 pcnt : 1;
+ u64 pidbof : 1;
+ u64 psldbof : 1;
+ u64 dtime1 : 1;
+ u64 dtime0 : 1;
+ u64 dcnt1 : 1;
+ u64 dcnt0 : 1;
+ u64 dma1fi : 1;
+ u64 dma0fi : 1;
+ u64 reserved_8_8 : 1;
+ u64 dma3dbo : 1;
+ u64 dma2dbo : 1;
+ u64 dma1dbo : 1;
+ u64 dma0dbo : 1;
+ u64 iob2big : 1;
+ u64 bar0_to : 1;
+ u64 rml_wto : 1;
+ u64 rml_rto : 1;
+ } cn52xxp1;
+ struct cvmx_npei_int_enb_s cn56xx;
+ struct cvmx_npei_int_enb_cn56xxp1 {
+ u64 mio_inta : 1;
+ u64 reserved_61_62 : 2;
+ u64 c1_ldwn : 1;
+ u64 c0_ldwn : 1;
+ u64 c1_exc : 1;
+ u64 c0_exc : 1;
+ u64 c1_up_wf : 1;
+ u64 c0_up_wf : 1;
+ u64 c1_un_wf : 1;
+ u64 c0_un_wf : 1;
+ u64 c1_un_bx : 1;
+ u64 c1_un_wi : 1;
+ u64 c1_un_b2 : 1;
+ u64 c1_un_b1 : 1;
+ u64 c1_un_b0 : 1;
+ u64 c1_up_bx : 1;
+ u64 c1_up_wi : 1;
+ u64 c1_up_b2 : 1;
+ u64 c1_up_b1 : 1;
+ u64 c1_up_b0 : 1;
+ u64 c0_un_bx : 1;
+ u64 c0_un_wi : 1;
+ u64 c0_un_b2 : 1;
+ u64 c0_un_b1 : 1;
+ u64 c0_un_b0 : 1;
+ u64 c0_up_bx : 1;
+ u64 c0_up_wi : 1;
+ u64 c0_up_b2 : 1;
+ u64 c0_up_b1 : 1;
+ u64 c0_up_b0 : 1;
+ u64 c1_hpint : 1;
+ u64 c1_pmei : 1;
+ u64 c1_wake : 1;
+ u64 reserved_29_29 : 1;
+ u64 c1_se : 1;
+ u64 reserved_27_27 : 1;
+ u64 c1_aeri : 1;
+ u64 c0_hpint : 1;
+ u64 c0_pmei : 1;
+ u64 c0_wake : 1;
+ u64 reserved_22_22 : 1;
+ u64 c0_se : 1;
+ u64 reserved_20_20 : 1;
+ u64 c0_aeri : 1;
+ u64 ptime : 1;
+ u64 pcnt : 1;
+ u64 pidbof : 1;
+ u64 psldbof : 1;
+ u64 dtime1 : 1;
+ u64 dtime0 : 1;
+ u64 dcnt1 : 1;
+ u64 dcnt0 : 1;
+ u64 dma1fi : 1;
+ u64 dma0fi : 1;
+ u64 dma4dbo : 1;
+ u64 dma3dbo : 1;
+ u64 dma2dbo : 1;
+ u64 dma1dbo : 1;
+ u64 dma0dbo : 1;
+ u64 iob2big : 1;
+ u64 bar0_to : 1;
+ u64 rml_wto : 1;
+ u64 rml_rto : 1;
+ } cn56xxp1;
+};
+
+typedef union cvmx_npei_int_enb cvmx_npei_int_enb_t;
+
+/**
+ * cvmx_npei_int_enb2
+ *
+ * NPEI_INTERRUPT_ENB2 = NPI's Interrupt Enable2 Register
+ *
+ * Used to enable the various interrupting conditions of NPI
+ */
+union cvmx_npei_int_enb2 {
+ u64 u64;
+ struct cvmx_npei_int_enb2_s {
+ u64 reserved_62_63 : 2;
+ u64 int_a : 1;
+ u64 c1_ldwn : 1;
+ u64 c0_ldwn : 1;
+ u64 c1_exc : 1;
+ u64 c0_exc : 1;
+ u64 c1_up_wf : 1;
+ u64 c0_up_wf : 1;
+ u64 c1_un_wf : 1;
+ u64 c0_un_wf : 1;
+ u64 c1_un_bx : 1;
+ u64 c1_un_wi : 1;
+ u64 c1_un_b2 : 1;
+ u64 c1_un_b1 : 1;
+ u64 c1_un_b0 : 1;
+ u64 c1_up_bx : 1;
+ u64 c1_up_wi : 1;
+ u64 c1_up_b2 : 1;
+ u64 c1_up_b1 : 1;
+ u64 c1_up_b0 : 1;
+ u64 c0_un_bx : 1;
+ u64 c0_un_wi : 1;
+ u64 c0_un_b2 : 1;
+ u64 c0_un_b1 : 1;
+ u64 c0_un_b0 : 1;
+ u64 c0_up_bx : 1;
+ u64 c0_up_wi : 1;
+ u64 c0_up_b2 : 1;
+ u64 c0_up_b1 : 1;
+ u64 c0_up_b0 : 1;
+ u64 c1_hpint : 1;
+ u64 c1_pmei : 1;
+ u64 c1_wake : 1;
+ u64 crs1_dr : 1;
+ u64 c1_se : 1;
+ u64 crs1_er : 1;
+ u64 c1_aeri : 1;
+ u64 c0_hpint : 1;
+ u64 c0_pmei : 1;
+ u64 c0_wake : 1;
+ u64 crs0_dr : 1;
+ u64 c0_se : 1;
+ u64 crs0_er : 1;
+ u64 c0_aeri : 1;
+ u64 ptime : 1;
+ u64 pcnt : 1;
+ u64 pidbof : 1;
+ u64 psldbof : 1;
+ u64 dtime1 : 1;
+ u64 dtime0 : 1;
+ u64 dcnt1 : 1;
+ u64 dcnt0 : 1;
+ u64 dma1fi : 1;
+ u64 dma0fi : 1;
+ u64 dma4dbo : 1;
+ u64 dma3dbo : 1;
+ u64 dma2dbo : 1;
+ u64 dma1dbo : 1;
+ u64 dma0dbo : 1;
+ u64 iob2big : 1;
+ u64 bar0_to : 1;
+ u64 rml_wto : 1;
+ u64 rml_rto : 1;
+ } s;
+ struct cvmx_npei_int_enb2_s cn52xx;
+ struct cvmx_npei_int_enb2_cn52xxp1 {
+ u64 reserved_62_63 : 2;
+ u64 int_a : 1;
+ u64 c1_ldwn : 1;
+ u64 c0_ldwn : 1;
+ u64 c1_exc : 1;
+ u64 c0_exc : 1;
+ u64 c1_up_wf : 1;
+ u64 c0_up_wf : 1;
+ u64 c1_un_wf : 1;
+ u64 c0_un_wf : 1;
+ u64 c1_un_bx : 1;
+ u64 c1_un_wi : 1;
+ u64 c1_un_b2 : 1;
+ u64 c1_un_b1 : 1;
+ u64 c1_un_b0 : 1;
+ u64 c1_up_bx : 1;
+ u64 c1_up_wi : 1;
+ u64 c1_up_b2 : 1;
+ u64 c1_up_b1 : 1;
+ u64 c1_up_b0 : 1;
+ u64 c0_un_bx : 1;
+ u64 c0_un_wi : 1;
+ u64 c0_un_b2 : 1;
+ u64 c0_un_b1 : 1;
+ u64 c0_un_b0 : 1;
+ u64 c0_up_bx : 1;
+ u64 c0_up_wi : 1;
+ u64 c0_up_b2 : 1;
+ u64 c0_up_b1 : 1;
+ u64 c0_up_b0 : 1;
+ u64 c1_hpint : 1;
+ u64 c1_pmei : 1;
+ u64 c1_wake : 1;
+ u64 crs1_dr : 1;
+ u64 c1_se : 1;
+ u64 crs1_er : 1;
+ u64 c1_aeri : 1;
+ u64 c0_hpint : 1;
+ u64 c0_pmei : 1;
+ u64 c0_wake : 1;
+ u64 crs0_dr : 1;
+ u64 c0_se : 1;
+ u64 crs0_er : 1;
+ u64 c0_aeri : 1;
+ u64 ptime : 1;
+ u64 pcnt : 1;
+ u64 pidbof : 1;
+ u64 psldbof : 1;
+ u64 dtime1 : 1;
+ u64 dtime0 : 1;
+ u64 dcnt1 : 1;
+ u64 dcnt0 : 1;
+ u64 dma1fi : 1;
+ u64 dma0fi : 1;
+ u64 reserved_8_8 : 1;
+ u64 dma3dbo : 1;
+ u64 dma2dbo : 1;
+ u64 dma1dbo : 1;
+ u64 dma0dbo : 1;
+ u64 iob2big : 1;
+ u64 bar0_to : 1;
+ u64 rml_wto : 1;
+ u64 rml_rto : 1;
+ } cn52xxp1;
+ struct cvmx_npei_int_enb2_s cn56xx;
+ struct cvmx_npei_int_enb2_cn56xxp1 {
+ u64 reserved_61_63 : 3;
+ u64 c1_ldwn : 1;
+ u64 c0_ldwn : 1;
+ u64 c1_exc : 1;
+ u64 c0_exc : 1;
+ u64 c1_up_wf : 1;
+ u64 c0_up_wf : 1;
+ u64 c1_un_wf : 1;
+ u64 c0_un_wf : 1;
+ u64 c1_un_bx : 1;
+ u64 c1_un_wi : 1;
+ u64 c1_un_b2 : 1;
+ u64 c1_un_b1 : 1;
+ u64 c1_un_b0 : 1;
+ u64 c1_up_bx : 1;
+ u64 c1_up_wi : 1;
+ u64 c1_up_b2 : 1;
+ u64 c1_up_b1 : 1;
+ u64 c1_up_b0 : 1;
+ u64 c0_un_bx : 1;
+ u64 c0_un_wi : 1;
+ u64 c0_un_b2 : 1;
+ u64 c0_un_b1 : 1;
+ u64 c0_un_b0 : 1;
+ u64 c0_up_bx : 1;
+ u64 c0_up_wi : 1;
+ u64 c0_up_b2 : 1;
+ u64 c0_up_b1 : 1;
+ u64 c0_up_b0 : 1;
+ u64 c1_hpint : 1;
+ u64 c1_pmei : 1;
+ u64 c1_wake : 1;
+ u64 reserved_29_29 : 1;
+ u64 c1_se : 1;
+ u64 reserved_27_27 : 1;
+ u64 c1_aeri : 1;
+ u64 c0_hpint : 1;
+ u64 c0_pmei : 1;
+ u64 c0_wake : 1;
+ u64 reserved_22_22 : 1;
+ u64 c0_se : 1;
+ u64 reserved_20_20 : 1;
+ u64 c0_aeri : 1;
+ u64 ptime : 1;
+ u64 pcnt : 1;
+ u64 pidbof : 1;
+ u64 psldbof : 1;
+ u64 dtime1 : 1;
+ u64 dtime0 : 1;
+ u64 dcnt1 : 1;
+ u64 dcnt0 : 1;
+ u64 dma1fi : 1;
+ u64 dma0fi : 1;
+ u64 dma4dbo : 1;
+ u64 dma3dbo : 1;
+ u64 dma2dbo : 1;
+ u64 dma1dbo : 1;
+ u64 dma0dbo : 1;
+ u64 iob2big : 1;
+ u64 bar0_to : 1;
+ u64 rml_wto : 1;
+ u64 rml_rto : 1;
+ } cn56xxp1;
+};
+
+typedef union cvmx_npei_int_enb2 cvmx_npei_int_enb2_t;
+
+/**
+ * cvmx_npei_int_info
+ *
+ * NPEI_INT_INFO = NPI Interrupt Information
+ *
+ * Contains information about some of the interrupt condition that can occur
+ * in the NPEI_INTERRUPT_SUM register.
+ */
+union cvmx_npei_int_info {
+ u64 u64;
+ struct cvmx_npei_int_info_s {
+ u64 reserved_12_63 : 52;
+ u64 pidbof : 6;
+ u64 psldbof : 6;
+ } s;
+ struct cvmx_npei_int_info_s cn52xx;
+ struct cvmx_npei_int_info_s cn56xx;
+ struct cvmx_npei_int_info_s cn56xxp1;
+};
+
+typedef union cvmx_npei_int_info cvmx_npei_int_info_t;
+
+/**
+ * cvmx_npei_int_sum
+ *
+ * NPEI_INTERRUPT_SUM = NPI Interrupt Summary Register
+ *
+ * Set when an interrupt condition occurs, write '1' to clear.
+ */
+union cvmx_npei_int_sum {
+ u64 u64;
+ struct cvmx_npei_int_sum_s {
+ u64 mio_inta : 1;
+ u64 reserved_62_62 : 1;
+ u64 int_a : 1;
+ u64 c1_ldwn : 1;
+ u64 c0_ldwn : 1;
+ u64 c1_exc : 1;
+ u64 c0_exc : 1;
+ u64 c1_up_wf : 1;
+ u64 c0_up_wf : 1;
+ u64 c1_un_wf : 1;
+ u64 c0_un_wf : 1;
+ u64 c1_un_bx : 1;
+ u64 c1_un_wi : 1;
+ u64 c1_un_b2 : 1;
+ u64 c1_un_b1 : 1;
+ u64 c1_un_b0 : 1;
+ u64 c1_up_bx : 1;
+ u64 c1_up_wi : 1;
+ u64 c1_up_b2 : 1;
+ u64 c1_up_b1 : 1;
+ u64 c1_up_b0 : 1;
+ u64 c0_un_bx : 1;
+ u64 c0_un_wi : 1;
+ u64 c0_un_b2 : 1;
+ u64 c0_un_b1 : 1;
+ u64 c0_un_b0 : 1;
+ u64 c0_up_bx : 1;
+ u64 c0_up_wi : 1;
+ u64 c0_up_b2 : 1;
+ u64 c0_up_b1 : 1;
+ u64 c0_up_b0 : 1;
+ u64 c1_hpint : 1;
+ u64 c1_pmei : 1;
+ u64 c1_wake : 1;
+ u64 crs1_dr : 1;
+ u64 c1_se : 1;
+ u64 crs1_er : 1;
+ u64 c1_aeri : 1;
+ u64 c0_hpint : 1;
+ u64 c0_pmei : 1;
+ u64 c0_wake : 1;
+ u64 crs0_dr : 1;
+ u64 c0_se : 1;
+ u64 crs0_er : 1;
+ u64 c0_aeri : 1;
+ u64 ptime : 1;
+ u64 pcnt : 1;
+ u64 pidbof : 1;
+ u64 psldbof : 1;
+ u64 dtime1 : 1;
+ u64 dtime0 : 1;
+ u64 dcnt1 : 1;
+ u64 dcnt0 : 1;
+ u64 dma1fi : 1;
+ u64 dma0fi : 1;
+ u64 dma4dbo : 1;
+ u64 dma3dbo : 1;
+ u64 dma2dbo : 1;
+ u64 dma1dbo : 1;
+ u64 dma0dbo : 1;
+ u64 iob2big : 1;
+ u64 bar0_to : 1;
+ u64 rml_wto : 1;
+ u64 rml_rto : 1;
+ } s;
+ struct cvmx_npei_int_sum_s cn52xx;
+ struct cvmx_npei_int_sum_cn52xxp1 {
+ u64 mio_inta : 1;
+ u64 reserved_62_62 : 1;
+ u64 int_a : 1;
+ u64 c1_ldwn : 1;
+ u64 c0_ldwn : 1;
+ u64 c1_exc : 1;
+ u64 c0_exc : 1;
+ u64 c1_up_wf : 1;
+ u64 c0_up_wf : 1;
+ u64 c1_un_wf : 1;
+ u64 c0_un_wf : 1;
+ u64 c1_un_bx : 1;
+ u64 c1_un_wi : 1;
+ u64 c1_un_b2 : 1;
+ u64 c1_un_b1 : 1;
+ u64 c1_un_b0 : 1;
+ u64 c1_up_bx : 1;
+ u64 c1_up_wi : 1;
+ u64 c1_up_b2 : 1;
+ u64 c1_up_b1 : 1;
+ u64 c1_up_b0 : 1;
+ u64 c0_un_bx : 1;
+ u64 c0_un_wi : 1;
+ u64 c0_un_b2 : 1;
+ u64 c0_un_b1 : 1;
+ u64 c0_un_b0 : 1;
+ u64 c0_up_bx : 1;
+ u64 c0_up_wi : 1;
+ u64 c0_up_b2 : 1;
+ u64 c0_up_b1 : 1;
+ u64 c0_up_b0 : 1;
+ u64 c1_hpint : 1;
+ u64 c1_pmei : 1;
+ u64 c1_wake : 1;
+ u64 crs1_dr : 1;
+ u64 c1_se : 1;
+ u64 crs1_er : 1;
+ u64 c1_aeri : 1;
+ u64 c0_hpint : 1;
+ u64 c0_pmei : 1;
+ u64 c0_wake : 1;
+ u64 crs0_dr : 1;
+ u64 c0_se : 1;
+ u64 crs0_er : 1;
+ u64 c0_aeri : 1;
+ u64 reserved_15_18 : 4;
+ u64 dtime1 : 1;
+ u64 dtime0 : 1;
+ u64 dcnt1 : 1;
+ u64 dcnt0 : 1;
+ u64 dma1fi : 1;
+ u64 dma0fi : 1;
+ u64 reserved_8_8 : 1;
+ u64 dma3dbo : 1;
+ u64 dma2dbo : 1;
+ u64 dma1dbo : 1;
+ u64 dma0dbo : 1;
+ u64 iob2big : 1;
+ u64 bar0_to : 1;
+ u64 rml_wto : 1;
+ u64 rml_rto : 1;
+ } cn52xxp1;
+ struct cvmx_npei_int_sum_s cn56xx;
+ struct cvmx_npei_int_sum_cn56xxp1 {
+ u64 mio_inta : 1;
+ u64 reserved_61_62 : 2;
+ u64 c1_ldwn : 1;
+ u64 c0_ldwn : 1;
+ u64 c1_exc : 1;
+ u64 c0_exc : 1;
+ u64 c1_up_wf : 1;
+ u64 c0_up_wf : 1;
+ u64 c1_un_wf : 1;
+ u64 c0_un_wf : 1;
+ u64 c1_un_bx : 1;
+ u64 c1_un_wi : 1;
+ u64 c1_un_b2 : 1;
+ u64 c1_un_b1 : 1;
+ u64 c1_un_b0 : 1;
+ u64 c1_up_bx : 1;
+ u64 c1_up_wi : 1;
+ u64 c1_up_b2 : 1;
+ u64 c1_up_b1 : 1;
+ u64 c1_up_b0 : 1;
+ u64 c0_un_bx : 1;
+ u64 c0_un_wi : 1;
+ u64 c0_un_b2 : 1;
+ u64 c0_un_b1 : 1;
+ u64 c0_un_b0 : 1;
+ u64 c0_up_bx : 1;
+ u64 c0_up_wi : 1;
+ u64 c0_up_b2 : 1;
+ u64 c0_up_b1 : 1;
+ u64 c0_up_b0 : 1;
+ u64 c1_hpint : 1;
+ u64 c1_pmei : 1;
+ u64 c1_wake : 1;
+ u64 reserved_29_29 : 1;
+ u64 c1_se : 1;
+ u64 reserved_27_27 : 1;
+ u64 c1_aeri : 1;
+ u64 c0_hpint : 1;
+ u64 c0_pmei : 1;
+ u64 c0_wake : 1;
+ u64 reserved_22_22 : 1;
+ u64 c0_se : 1;
+ u64 reserved_20_20 : 1;
+ u64 c0_aeri : 1;
+ u64 reserved_15_18 : 4;
+ u64 dtime1 : 1;
+ u64 dtime0 : 1;
+ u64 dcnt1 : 1;
+ u64 dcnt0 : 1;
+ u64 dma1fi : 1;
+ u64 dma0fi : 1;
+ u64 dma4dbo : 1;
+ u64 dma3dbo : 1;
+ u64 dma2dbo : 1;
+ u64 dma1dbo : 1;
+ u64 dma0dbo : 1;
+ u64 iob2big : 1;
+ u64 bar0_to : 1;
+ u64 rml_wto : 1;
+ u64 rml_rto : 1;
+ } cn56xxp1;
+};
+
+typedef union cvmx_npei_int_sum cvmx_npei_int_sum_t;
+
+/**
+ * cvmx_npei_int_sum2
+ *
+ * NPEI_INTERRUPT_SUM2 = NPI Interrupt Summary2 Register
+ *
+ * This is a read only copy of the NPEI_INTERRUPT_SUM register with bit
+ * variances.
+ */
+union cvmx_npei_int_sum2 {
+ u64 u64;
+ struct cvmx_npei_int_sum2_s {
+ u64 mio_inta : 1;
+ u64 reserved_62_62 : 1;
+ u64 int_a : 1;
+ u64 c1_ldwn : 1;
+ u64 c0_ldwn : 1;
+ u64 c1_exc : 1;
+ u64 c0_exc : 1;
+ u64 c1_up_wf : 1;
+ u64 c0_up_wf : 1;
+ u64 c1_un_wf : 1;
+ u64 c0_un_wf : 1;
+ u64 c1_un_bx : 1;
+ u64 c1_un_wi : 1;
+ u64 c1_un_b2 : 1;
+ u64 c1_un_b1 : 1;
+ u64 c1_un_b0 : 1;
+ u64 c1_up_bx : 1;
+ u64 c1_up_wi : 1;
+ u64 c1_up_b2 : 1;
+ u64 c1_up_b1 : 1;
+ u64 c1_up_b0 : 1;
+ u64 c0_un_bx : 1;
+ u64 c0_un_wi : 1;
+ u64 c0_un_b2 : 1;
+ u64 c0_un_b1 : 1;
+ u64 c0_un_b0 : 1;
+ u64 c0_up_bx : 1;
+ u64 c0_up_wi : 1;
+ u64 c0_up_b2 : 1;
+ u64 c0_up_b1 : 1;
+ u64 c0_up_b0 : 1;
+ u64 c1_hpint : 1;
+ u64 c1_pmei : 1;
+ u64 c1_wake : 1;
+ u64 crs1_dr : 1;
+ u64 c1_se : 1;
+ u64 crs1_er : 1;
+ u64 c1_aeri : 1;
+ u64 c0_hpint : 1;
+ u64 c0_pmei : 1;
+ u64 c0_wake : 1;
+ u64 crs0_dr : 1;
+ u64 c0_se : 1;
+ u64 crs0_er : 1;
+ u64 c0_aeri : 1;
+ u64 reserved_15_18 : 4;
+ u64 dtime1 : 1;
+ u64 dtime0 : 1;
+ u64 dcnt1 : 1;
+ u64 dcnt0 : 1;
+ u64 dma1fi : 1;
+ u64 dma0fi : 1;
+ u64 reserved_8_8 : 1;
+ u64 dma3dbo : 1;
+ u64 dma2dbo : 1;
+ u64 dma1dbo : 1;
+ u64 dma0dbo : 1;
+ u64 iob2big : 1;
+ u64 bar0_to : 1;
+ u64 rml_wto : 1;
+ u64 rml_rto : 1;
+ } s;
+ struct cvmx_npei_int_sum2_s cn52xx;
+ struct cvmx_npei_int_sum2_s cn52xxp1;
+ struct cvmx_npei_int_sum2_s cn56xx;
+};
+
+typedef union cvmx_npei_int_sum2 cvmx_npei_int_sum2_t;
+
+/**
+ * cvmx_npei_last_win_rdata0
+ *
+ * NPEI_LAST_WIN_RDATA0 = NPEI Last Window Read Data Port0
+ *
+ * The data from the last initiated window read.
+ */
+union cvmx_npei_last_win_rdata0 {
+ u64 u64;
+ struct cvmx_npei_last_win_rdata0_s {
+ u64 data : 64;
+ } s;
+ struct cvmx_npei_last_win_rdata0_s cn52xx;
+ struct cvmx_npei_last_win_rdata0_s cn52xxp1;
+ struct cvmx_npei_last_win_rdata0_s cn56xx;
+ struct cvmx_npei_last_win_rdata0_s cn56xxp1;
+};
+
+typedef union cvmx_npei_last_win_rdata0 cvmx_npei_last_win_rdata0_t;
+
+/**
+ * cvmx_npei_last_win_rdata1
+ *
+ * NPEI_LAST_WIN_RDATA1 = NPEI Last Window Read Data Port1
+ *
+ * The data from the last initiated window read.
+ */
+union cvmx_npei_last_win_rdata1 {
+ u64 u64;
+ struct cvmx_npei_last_win_rdata1_s {
+ u64 data : 64;
+ } s;
+ struct cvmx_npei_last_win_rdata1_s cn52xx;
+ struct cvmx_npei_last_win_rdata1_s cn52xxp1;
+ struct cvmx_npei_last_win_rdata1_s cn56xx;
+ struct cvmx_npei_last_win_rdata1_s cn56xxp1;
+};
+
+typedef union cvmx_npei_last_win_rdata1 cvmx_npei_last_win_rdata1_t;
+
+/**
+ * cvmx_npei_mem_access_ctl
+ *
+ * NPEI_MEM_ACCESS_CTL = NPEI's Memory Access Control
+ *
+ * Contains control for access to the PCIe address space.
+ */
+union cvmx_npei_mem_access_ctl {
+ u64 u64;
+ struct cvmx_npei_mem_access_ctl_s {
+ u64 reserved_14_63 : 50;
+ u64 max_word : 4;
+ u64 timer : 10;
+ } s;
+ struct cvmx_npei_mem_access_ctl_s cn52xx;
+ struct cvmx_npei_mem_access_ctl_s cn52xxp1;
+ struct cvmx_npei_mem_access_ctl_s cn56xx;
+ struct cvmx_npei_mem_access_ctl_s cn56xxp1;
+};
+
+typedef union cvmx_npei_mem_access_ctl cvmx_npei_mem_access_ctl_t;
+
+/**
+ * cvmx_npei_mem_access_subid#
+ *
+ * NPEI_MEM_ACCESS_SUBIDX = NPEI Memory Access SubidX Register
+ *
+ * Contains address index and control bits for access to memory from Core PPs.
+ */
+union cvmx_npei_mem_access_subidx {
+ u64 u64;
+ struct cvmx_npei_mem_access_subidx_s {
+ u64 reserved_42_63 : 22;
+ u64 zero : 1;
+ u64 port : 2;
+ u64 nmerge : 1;
+ u64 esr : 2;
+ u64 esw : 2;
+ u64 nsr : 1;
+ u64 nsw : 1;
+ u64 ror : 1;
+ u64 row : 1;
+ u64 ba : 30;
+ } s;
+ struct cvmx_npei_mem_access_subidx_s cn52xx;
+ struct cvmx_npei_mem_access_subidx_s cn52xxp1;
+ struct cvmx_npei_mem_access_subidx_s cn56xx;
+ struct cvmx_npei_mem_access_subidx_s cn56xxp1;
+};
+
+typedef union cvmx_npei_mem_access_subidx cvmx_npei_mem_access_subidx_t;
+
+/**
+ * cvmx_npei_msi_enb0
+ *
+ * NPEI_MSI_ENB0 = NPEI MSI Enable0
+ *
+ * Used to enable the interrupt generation for the bits in the NPEI_MSI_RCV0.
+ */
+union cvmx_npei_msi_enb0 {
+ u64 u64;
+ struct cvmx_npei_msi_enb0_s {
+ u64 enb : 64;
+ } s;
+ struct cvmx_npei_msi_enb0_s cn52xx;
+ struct cvmx_npei_msi_enb0_s cn52xxp1;
+ struct cvmx_npei_msi_enb0_s cn56xx;
+ struct cvmx_npei_msi_enb0_s cn56xxp1;
+};
+
+typedef union cvmx_npei_msi_enb0 cvmx_npei_msi_enb0_t;
+
+/**
+ * cvmx_npei_msi_enb1
+ *
+ * NPEI_MSI_ENB1 = NPEI MSI Enable1
+ *
+ * Used to enable the interrupt generation for the bits in the NPEI_MSI_RCV1.
+ */
+union cvmx_npei_msi_enb1 {
+ u64 u64;
+ struct cvmx_npei_msi_enb1_s {
+ u64 enb : 64;
+ } s;
+ struct cvmx_npei_msi_enb1_s cn52xx;
+ struct cvmx_npei_msi_enb1_s cn52xxp1;
+ struct cvmx_npei_msi_enb1_s cn56xx;
+ struct cvmx_npei_msi_enb1_s cn56xxp1;
+};
+
+typedef union cvmx_npei_msi_enb1 cvmx_npei_msi_enb1_t;
+
+/**
+ * cvmx_npei_msi_enb2
+ *
+ * NPEI_MSI_ENB2 = NPEI MSI Enable2
+ *
+ * Used to enable the interrupt generation for the bits in the NPEI_MSI_RCV2.
+ */
+union cvmx_npei_msi_enb2 {
+ u64 u64;
+ struct cvmx_npei_msi_enb2_s {
+ u64 enb : 64;
+ } s;
+ struct cvmx_npei_msi_enb2_s cn52xx;
+ struct cvmx_npei_msi_enb2_s cn52xxp1;
+ struct cvmx_npei_msi_enb2_s cn56xx;
+ struct cvmx_npei_msi_enb2_s cn56xxp1;
+};
+
+typedef union cvmx_npei_msi_enb2 cvmx_npei_msi_enb2_t;
+
+/**
+ * cvmx_npei_msi_enb3
+ *
+ * NPEI_MSI_ENB3 = NPEI MSI Enable3
+ *
+ * Used to enable the interrupt generation for the bits in the NPEI_MSI_RCV3.
+ */
+union cvmx_npei_msi_enb3 {
+ u64 u64;
+ struct cvmx_npei_msi_enb3_s {
+ u64 enb : 64;
+ } s;
+ struct cvmx_npei_msi_enb3_s cn52xx;
+ struct cvmx_npei_msi_enb3_s cn52xxp1;
+ struct cvmx_npei_msi_enb3_s cn56xx;
+ struct cvmx_npei_msi_enb3_s cn56xxp1;
+};
+
+typedef union cvmx_npei_msi_enb3 cvmx_npei_msi_enb3_t;
+
+/**
+ * cvmx_npei_msi_rcv0
+ *
+ * NPEI_MSI_RCV0 = NPEI MSI Receive0
+ *
+ * Contains bits [63:0] of the 256 bits oof MSI interrupts.
+ */
+union cvmx_npei_msi_rcv0 {
+ u64 u64;
+ struct cvmx_npei_msi_rcv0_s {
+ u64 intr : 64;
+ } s;
+ struct cvmx_npei_msi_rcv0_s cn52xx;
+ struct cvmx_npei_msi_rcv0_s cn52xxp1;
+ struct cvmx_npei_msi_rcv0_s cn56xx;
+ struct cvmx_npei_msi_rcv0_s cn56xxp1;
+};
+
+typedef union cvmx_npei_msi_rcv0 cvmx_npei_msi_rcv0_t;
+
+/**
+ * cvmx_npei_msi_rcv1
+ *
+ * NPEI_MSI_RCV1 = NPEI MSI Receive1
+ *
+ * Contains bits [127:64] of the 256 bits oof MSI interrupts.
+ */
+union cvmx_npei_msi_rcv1 {
+ u64 u64;
+ struct cvmx_npei_msi_rcv1_s {
+ u64 intr : 64;
+ } s;
+ struct cvmx_npei_msi_rcv1_s cn52xx;
+ struct cvmx_npei_msi_rcv1_s cn52xxp1;
+ struct cvmx_npei_msi_rcv1_s cn56xx;
+ struct cvmx_npei_msi_rcv1_s cn56xxp1;
+};
+
+typedef union cvmx_npei_msi_rcv1 cvmx_npei_msi_rcv1_t;
+
+/**
+ * cvmx_npei_msi_rcv2
+ *
+ * NPEI_MSI_RCV2 = NPEI MSI Receive2
+ *
+ * Contains bits [191:128] of the 256 bits oof MSI interrupts.
+ */
+union cvmx_npei_msi_rcv2 {
+ u64 u64;
+ struct cvmx_npei_msi_rcv2_s {
+ u64 intr : 64;
+ } s;
+ struct cvmx_npei_msi_rcv2_s cn52xx;
+ struct cvmx_npei_msi_rcv2_s cn52xxp1;
+ struct cvmx_npei_msi_rcv2_s cn56xx;
+ struct cvmx_npei_msi_rcv2_s cn56xxp1;
+};
+
+typedef union cvmx_npei_msi_rcv2 cvmx_npei_msi_rcv2_t;
+
+/**
+ * cvmx_npei_msi_rcv3
+ *
+ * NPEI_MSI_RCV3 = NPEI MSI Receive3
+ *
+ * Contains bits [255:192] of the 256 bits oof MSI interrupts.
+ */
+union cvmx_npei_msi_rcv3 {
+ u64 u64;
+ struct cvmx_npei_msi_rcv3_s {
+ u64 intr : 64;
+ } s;
+ struct cvmx_npei_msi_rcv3_s cn52xx;
+ struct cvmx_npei_msi_rcv3_s cn52xxp1;
+ struct cvmx_npei_msi_rcv3_s cn56xx;
+ struct cvmx_npei_msi_rcv3_s cn56xxp1;
+};
+
+typedef union cvmx_npei_msi_rcv3 cvmx_npei_msi_rcv3_t;
+
+/**
+ * cvmx_npei_msi_rd_map
+ *
+ * NPEI_MSI_RD_MAP = NPEI MSI Read MAP
+ *
+ * Used to read the mapping function of the NPEI_PCIE_MSI_RCV to NPEI_MSI_RCV
+ * registers.
+ */
+union cvmx_npei_msi_rd_map {
+ u64 u64;
+ struct cvmx_npei_msi_rd_map_s {
+ u64 reserved_16_63 : 48;
+ u64 rd_int : 8;
+ u64 msi_int : 8;
+ } s;
+ struct cvmx_npei_msi_rd_map_s cn52xx;
+ struct cvmx_npei_msi_rd_map_s cn52xxp1;
+ struct cvmx_npei_msi_rd_map_s cn56xx;
+ struct cvmx_npei_msi_rd_map_s cn56xxp1;
+};
+
+typedef union cvmx_npei_msi_rd_map cvmx_npei_msi_rd_map_t;
+
+/**
+ * cvmx_npei_msi_w1c_enb0
+ *
+ * NPEI_MSI_W1C_ENB0 = NPEI MSI Write 1 To Clear Enable0
+ *
+ * Used to clear bits in NPEI_MSI_ENB0. This is a PASS2 register.
+ */
+union cvmx_npei_msi_w1c_enb0 {
+ u64 u64;
+ struct cvmx_npei_msi_w1c_enb0_s {
+ u64 clr : 64;
+ } s;
+ struct cvmx_npei_msi_w1c_enb0_s cn52xx;
+ struct cvmx_npei_msi_w1c_enb0_s cn56xx;
+};
+
+typedef union cvmx_npei_msi_w1c_enb0 cvmx_npei_msi_w1c_enb0_t;
+
+/**
+ * cvmx_npei_msi_w1c_enb1
+ *
+ * NPEI_MSI_W1C_ENB1 = NPEI MSI Write 1 To Clear Enable1
+ *
+ * Used to clear bits in NPEI_MSI_ENB1. This is a PASS2 register.
+ */
+union cvmx_npei_msi_w1c_enb1 {
+ u64 u64;
+ struct cvmx_npei_msi_w1c_enb1_s {
+ u64 clr : 64;
+ } s;
+ struct cvmx_npei_msi_w1c_enb1_s cn52xx;
+ struct cvmx_npei_msi_w1c_enb1_s cn56xx;
+};
+
+typedef union cvmx_npei_msi_w1c_enb1 cvmx_npei_msi_w1c_enb1_t;
+
+/**
+ * cvmx_npei_msi_w1c_enb2
+ *
+ * NPEI_MSI_W1C_ENB2 = NPEI MSI Write 1 To Clear Enable2
+ *
+ * Used to clear bits in NPEI_MSI_ENB2. This is a PASS2 register.
+ */
+union cvmx_npei_msi_w1c_enb2 {
+ u64 u64;
+ struct cvmx_npei_msi_w1c_enb2_s {
+ u64 clr : 64;
+ } s;
+ struct cvmx_npei_msi_w1c_enb2_s cn52xx;
+ struct cvmx_npei_msi_w1c_enb2_s cn56xx;
+};
+
+typedef union cvmx_npei_msi_w1c_enb2 cvmx_npei_msi_w1c_enb2_t;
+
+/**
+ * cvmx_npei_msi_w1c_enb3
+ *
+ * NPEI_MSI_W1C_ENB3 = NPEI MSI Write 1 To Clear Enable3
+ *
+ * Used to clear bits in NPEI_MSI_ENB3. This is a PASS2 register.
+ */
+union cvmx_npei_msi_w1c_enb3 {
+ u64 u64;
+ struct cvmx_npei_msi_w1c_enb3_s {
+ u64 clr : 64;
+ } s;
+ struct cvmx_npei_msi_w1c_enb3_s cn52xx;
+ struct cvmx_npei_msi_w1c_enb3_s cn56xx;
+};
+
+typedef union cvmx_npei_msi_w1c_enb3 cvmx_npei_msi_w1c_enb3_t;
+
+/**
+ * cvmx_npei_msi_w1s_enb0
+ *
+ * NPEI_MSI_W1S_ENB0 = NPEI MSI Write 1 To Set Enable0
+ *
+ * Used to set bits in NPEI_MSI_ENB0. This is a PASS2 register.
+ */
+union cvmx_npei_msi_w1s_enb0 {
+ u64 u64;
+ struct cvmx_npei_msi_w1s_enb0_s {
+ u64 set : 64;
+ } s;
+ struct cvmx_npei_msi_w1s_enb0_s cn52xx;
+ struct cvmx_npei_msi_w1s_enb0_s cn56xx;
+};
+
+typedef union cvmx_npei_msi_w1s_enb0 cvmx_npei_msi_w1s_enb0_t;
+
+/**
+ * cvmx_npei_msi_w1s_enb1
+ *
+ * NPEI_MSI_W1S_ENB0 = NPEI MSI Write 1 To Set Enable1
+ *
+ * Used to set bits in NPEI_MSI_ENB1. This is a PASS2 register.
+ */
+union cvmx_npei_msi_w1s_enb1 {
+ u64 u64;
+ struct cvmx_npei_msi_w1s_enb1_s {
+ u64 set : 64;
+ } s;
+ struct cvmx_npei_msi_w1s_enb1_s cn52xx;
+ struct cvmx_npei_msi_w1s_enb1_s cn56xx;
+};
+
+typedef union cvmx_npei_msi_w1s_enb1 cvmx_npei_msi_w1s_enb1_t;
+
+/**
+ * cvmx_npei_msi_w1s_enb2
+ *
+ * NPEI_MSI_W1S_ENB2 = NPEI MSI Write 1 To Set Enable2
+ *
+ * Used to set bits in NPEI_MSI_ENB2. This is a PASS2 register.
+ */
+union cvmx_npei_msi_w1s_enb2 {
+ u64 u64;
+ struct cvmx_npei_msi_w1s_enb2_s {
+ u64 set : 64;
+ } s;
+ struct cvmx_npei_msi_w1s_enb2_s cn52xx;
+ struct cvmx_npei_msi_w1s_enb2_s cn56xx;
+};
+
+typedef union cvmx_npei_msi_w1s_enb2 cvmx_npei_msi_w1s_enb2_t;
+
+/**
+ * cvmx_npei_msi_w1s_enb3
+ *
+ * NPEI_MSI_W1S_ENB3 = NPEI MSI Write 1 To Set Enable3
+ *
+ * Used to set bits in NPEI_MSI_ENB3. This is a PASS2 register.
+ */
+union cvmx_npei_msi_w1s_enb3 {
+ u64 u64;
+ struct cvmx_npei_msi_w1s_enb3_s {
+ u64 set : 64;
+ } s;
+ struct cvmx_npei_msi_w1s_enb3_s cn52xx;
+ struct cvmx_npei_msi_w1s_enb3_s cn56xx;
+};
+
+typedef union cvmx_npei_msi_w1s_enb3 cvmx_npei_msi_w1s_enb3_t;
+
+/**
+ * cvmx_npei_msi_wr_map
+ *
+ * NPEI_MSI_WR_MAP = NPEI MSI Write MAP
+ *
+ * Used to write the mapping function of the NPEI_PCIE_MSI_RCV to NPEI_MSI_RCV
+ * registers.
+ */
+union cvmx_npei_msi_wr_map {
+ u64 u64;
+ struct cvmx_npei_msi_wr_map_s {
+ u64 reserved_16_63 : 48;
+ u64 ciu_int : 8;
+ u64 msi_int : 8;
+ } s;
+ struct cvmx_npei_msi_wr_map_s cn52xx;
+ struct cvmx_npei_msi_wr_map_s cn52xxp1;
+ struct cvmx_npei_msi_wr_map_s cn56xx;
+ struct cvmx_npei_msi_wr_map_s cn56xxp1;
+};
+
+typedef union cvmx_npei_msi_wr_map cvmx_npei_msi_wr_map_t;
+
+/**
+ * cvmx_npei_pcie_credit_cnt
+ *
+ * NPEI_PCIE_CREDIT_CNT = NPEI PCIE Credit Count
+ *
+ * Contains the number of credits for the pcie port FIFOs used by the NPEI.
+ * This value needs to be set BEFORE PCIe traffic
+ * flow from NPEI to PCIE Ports starts. A write to this register will cause
+ * the credit counts in the NPEI for the two
+ * PCIE ports to be reset to the value in this register.
+ */
+union cvmx_npei_pcie_credit_cnt {
+ u64 u64;
+ struct cvmx_npei_pcie_credit_cnt_s {
+ u64 reserved_48_63 : 16;
+ u64 p1_ccnt : 8;
+ u64 p1_ncnt : 8;
+ u64 p1_pcnt : 8;
+ u64 p0_ccnt : 8;
+ u64 p0_ncnt : 8;
+ u64 p0_pcnt : 8;
+ } s;
+ struct cvmx_npei_pcie_credit_cnt_s cn52xx;
+ struct cvmx_npei_pcie_credit_cnt_s cn56xx;
+};
+
+typedef union cvmx_npei_pcie_credit_cnt cvmx_npei_pcie_credit_cnt_t;
+
+/**
+ * cvmx_npei_pcie_msi_rcv
+ *
+ * NPEI_PCIE_MSI_RCV = NPEI PCIe MSI Receive
+ *
+ * Register where MSI writes are directed from the PCIe.
+ */
+union cvmx_npei_pcie_msi_rcv {
+ u64 u64;
+ struct cvmx_npei_pcie_msi_rcv_s {
+ u64 reserved_8_63 : 56;
+ u64 intr : 8;
+ } s;
+ struct cvmx_npei_pcie_msi_rcv_s cn52xx;
+ struct cvmx_npei_pcie_msi_rcv_s cn52xxp1;
+ struct cvmx_npei_pcie_msi_rcv_s cn56xx;
+ struct cvmx_npei_pcie_msi_rcv_s cn56xxp1;
+};
+
+typedef union cvmx_npei_pcie_msi_rcv cvmx_npei_pcie_msi_rcv_t;
+
+/**
+ * cvmx_npei_pcie_msi_rcv_b1
+ *
+ * NPEI_PCIE_MSI_RCV_B1 = NPEI PCIe MSI Receive Byte 1
+ *
+ * Register where MSI writes are directed from the PCIe.
+ */
+union cvmx_npei_pcie_msi_rcv_b1 {
+ u64 u64;
+ struct cvmx_npei_pcie_msi_rcv_b1_s {
+ u64 reserved_16_63 : 48;
+ u64 intr : 8;
+ u64 reserved_0_7 : 8;
+ } s;
+ struct cvmx_npei_pcie_msi_rcv_b1_s cn52xx;
+ struct cvmx_npei_pcie_msi_rcv_b1_s cn52xxp1;
+ struct cvmx_npei_pcie_msi_rcv_b1_s cn56xx;
+ struct cvmx_npei_pcie_msi_rcv_b1_s cn56xxp1;
+};
+
+typedef union cvmx_npei_pcie_msi_rcv_b1 cvmx_npei_pcie_msi_rcv_b1_t;
+
+/**
+ * cvmx_npei_pcie_msi_rcv_b2
+ *
+ * NPEI_PCIE_MSI_RCV_B2 = NPEI PCIe MSI Receive Byte 2
+ *
+ * Register where MSI writes are directed from the PCIe.
+ */
+union cvmx_npei_pcie_msi_rcv_b2 {
+ u64 u64;
+ struct cvmx_npei_pcie_msi_rcv_b2_s {
+ u64 reserved_24_63 : 40;
+ u64 intr : 8;
+ u64 reserved_0_15 : 16;
+ } s;
+ struct cvmx_npei_pcie_msi_rcv_b2_s cn52xx;
+ struct cvmx_npei_pcie_msi_rcv_b2_s cn52xxp1;
+ struct cvmx_npei_pcie_msi_rcv_b2_s cn56xx;
+ struct cvmx_npei_pcie_msi_rcv_b2_s cn56xxp1;
+};
+
+typedef union cvmx_npei_pcie_msi_rcv_b2 cvmx_npei_pcie_msi_rcv_b2_t;
+
+/**
+ * cvmx_npei_pcie_msi_rcv_b3
+ *
+ * NPEI_PCIE_MSI_RCV_B3 = NPEI PCIe MSI Receive Byte 3
+ *
+ * Register where MSI writes are directed from the PCIe.
+ */
+union cvmx_npei_pcie_msi_rcv_b3 {
+ u64 u64;
+ struct cvmx_npei_pcie_msi_rcv_b3_s {
+ u64 reserved_32_63 : 32;
+ u64 intr : 8;
+ u64 reserved_0_23 : 24;
+ } s;
+ struct cvmx_npei_pcie_msi_rcv_b3_s cn52xx;
+ struct cvmx_npei_pcie_msi_rcv_b3_s cn52xxp1;
+ struct cvmx_npei_pcie_msi_rcv_b3_s cn56xx;
+ struct cvmx_npei_pcie_msi_rcv_b3_s cn56xxp1;
+};
+
+typedef union cvmx_npei_pcie_msi_rcv_b3 cvmx_npei_pcie_msi_rcv_b3_t;
+
+/**
+ * cvmx_npei_pkt#_cnts
+ *
+ * NPEI_PKT[0..31]_CNTS = NPEI Packet ring# Counts
+ *
+ * The counters for output rings.
+ */
+union cvmx_npei_pktx_cnts {
+ u64 u64;
+ struct cvmx_npei_pktx_cnts_s {
+ u64 reserved_54_63 : 10;
+ u64 timer : 22;
+ u64 cnt : 32;
+ } s;
+ struct cvmx_npei_pktx_cnts_s cn52xx;
+ struct cvmx_npei_pktx_cnts_s cn56xx;
+};
+
+typedef union cvmx_npei_pktx_cnts cvmx_npei_pktx_cnts_t;
+
+/**
+ * cvmx_npei_pkt#_in_bp
+ *
+ * NPEI_PKT[0..31]_IN_BP = NPEI Packet ring# Input Backpressure
+ *
+ * The counters and thresholds for input packets to apply backpressure to
+ * processing of the packets.
+ */
+union cvmx_npei_pktx_in_bp {
+ u64 u64;
+ struct cvmx_npei_pktx_in_bp_s {
+ u64 wmark : 32;
+ u64 cnt : 32;
+ } s;
+ struct cvmx_npei_pktx_in_bp_s cn52xx;
+ struct cvmx_npei_pktx_in_bp_s cn56xx;
+};
+
+typedef union cvmx_npei_pktx_in_bp cvmx_npei_pktx_in_bp_t;
+
+/**
+ * cvmx_npei_pkt#_instr_baddr
+ *
+ * NPEI_PKT[0..31]_INSTR_BADDR = NPEI Packet ring# Instruction Base Address
+ *
+ * Start of Instruction for input packets.
+ */
+union cvmx_npei_pktx_instr_baddr {
+ u64 u64;
+ struct cvmx_npei_pktx_instr_baddr_s {
+ u64 addr : 61;
+ u64 reserved_0_2 : 3;
+ } s;
+ struct cvmx_npei_pktx_instr_baddr_s cn52xx;
+ struct cvmx_npei_pktx_instr_baddr_s cn56xx;
+};
+
+typedef union cvmx_npei_pktx_instr_baddr cvmx_npei_pktx_instr_baddr_t;
+
+/**
+ * cvmx_npei_pkt#_instr_baoff_dbell
+ *
+ * NPEI_PKT[0..31]_INSTR_BAOFF_DBELL = NPEI Packet ring# Instruction Base
+ * Address Offset and Doorbell
+ *
+ * The doorbell and base address offset for next read.
+ */
+union cvmx_npei_pktx_instr_baoff_dbell {
+ u64 u64;
+ struct cvmx_npei_pktx_instr_baoff_dbell_s {
+ u64 aoff : 32;
+ u64 dbell : 32;
+ } s;
+ struct cvmx_npei_pktx_instr_baoff_dbell_s cn52xx;
+ struct cvmx_npei_pktx_instr_baoff_dbell_s cn56xx;
+};
+
+typedef union cvmx_npei_pktx_instr_baoff_dbell
+ cvmx_npei_pktx_instr_baoff_dbell_t;
+
+/**
+ * cvmx_npei_pkt#_instr_fifo_rsize
+ *
+ * NPEI_PKT[0..31]_INSTR_FIFO_RSIZE = NPEI Packet ring# Instruction FIFO and
+ * Ring Size.
+ *
+ * Fifo field and ring size for Instructions.
+ */
+union cvmx_npei_pktx_instr_fifo_rsize {
+ u64 u64;
+ struct cvmx_npei_pktx_instr_fifo_rsize_s {
+ u64 max : 9;
+ u64 rrp : 9;
+ u64 wrp : 9;
+ u64 fcnt : 5;
+ u64 rsize : 32;
+ } s;
+ struct cvmx_npei_pktx_instr_fifo_rsize_s cn52xx;
+ struct cvmx_npei_pktx_instr_fifo_rsize_s cn56xx;
+};
+
+typedef union cvmx_npei_pktx_instr_fifo_rsize cvmx_npei_pktx_instr_fifo_rsize_t;
+
+/**
+ * cvmx_npei_pkt#_instr_header
+ *
+ * NPEI_PKT[0..31]_INSTR_HEADER = NPEI Packet ring# Instruction Header.
+ *
+ * VAlues used to build input packet header.
+ */
+union cvmx_npei_pktx_instr_header {
+ u64 u64;
+ struct cvmx_npei_pktx_instr_header_s {
+ u64 reserved_44_63 : 20;
+ u64 pbp : 1;
+ u64 reserved_38_42 : 5;
+ u64 rparmode : 2;
+ u64 reserved_35_35 : 1;
+ u64 rskp_len : 7;
+ u64 reserved_22_27 : 6;
+ u64 use_ihdr : 1;
+ u64 reserved_16_20 : 5;
+ u64 par_mode : 2;
+ u64 reserved_13_13 : 1;
+ u64 skp_len : 7;
+ u64 reserved_0_5 : 6;
+ } s;
+ struct cvmx_npei_pktx_instr_header_s cn52xx;
+ struct cvmx_npei_pktx_instr_header_s cn56xx;
+};
+
+typedef union cvmx_npei_pktx_instr_header cvmx_npei_pktx_instr_header_t;
+
+/**
+ * cvmx_npei_pkt#_slist_baddr
+ *
+ * NPEI_PKT[0..31]_SLIST_BADDR = NPEI Packet ring# Scatter List Base Address
+ *
+ * Start of Scatter List for output packet pointers - MUST be 16 byte aligned
+ */
+union cvmx_npei_pktx_slist_baddr {
+ u64 u64;
+ struct cvmx_npei_pktx_slist_baddr_s {
+ u64 addr : 60;
+ u64 reserved_0_3 : 4;
+ } s;
+ struct cvmx_npei_pktx_slist_baddr_s cn52xx;
+ struct cvmx_npei_pktx_slist_baddr_s cn56xx;
+};
+
+typedef union cvmx_npei_pktx_slist_baddr cvmx_npei_pktx_slist_baddr_t;
+
+/**
+ * cvmx_npei_pkt#_slist_baoff_dbell
+ *
+ * NPEI_PKT[0..31]_SLIST_BAOFF_DBELL = NPEI Packet ring# Scatter List Base
+ * Address Offset and Doorbell
+ *
+ * The doorbell and base address offset for next read.
+ */
+union cvmx_npei_pktx_slist_baoff_dbell {
+ u64 u64;
+ struct cvmx_npei_pktx_slist_baoff_dbell_s {
+ u64 aoff : 32;
+ u64 dbell : 32;
+ } s;
+ struct cvmx_npei_pktx_slist_baoff_dbell_s cn52xx;
+ struct cvmx_npei_pktx_slist_baoff_dbell_s cn56xx;
+};
+
+typedef union cvmx_npei_pktx_slist_baoff_dbell
+ cvmx_npei_pktx_slist_baoff_dbell_t;
+
+/**
+ * cvmx_npei_pkt#_slist_fifo_rsize
+ *
+ * NPEI_PKT[0..31]_SLIST_FIFO_RSIZE = NPEI Packet ring# Scatter List FIFO and
+ * Ring Size.
+ *
+ * The number of scatter pointer pairs in the scatter list.
+ */
+union cvmx_npei_pktx_slist_fifo_rsize {
+ u64 u64;
+ struct cvmx_npei_pktx_slist_fifo_rsize_s {
+ u64 reserved_32_63 : 32;
+ u64 rsize : 32;
+ } s;
+ struct cvmx_npei_pktx_slist_fifo_rsize_s cn52xx;
+ struct cvmx_npei_pktx_slist_fifo_rsize_s cn56xx;
+};
+
+typedef union cvmx_npei_pktx_slist_fifo_rsize cvmx_npei_pktx_slist_fifo_rsize_t;
+
+/**
+ * cvmx_npei_pkt_cnt_int
+ *
+ * NPEI_PKT_CNT_INT = NPI Packet Counter Interrupt
+ *
+ * The packets rings that are interrupting because of Packet Counters.
+ */
+union cvmx_npei_pkt_cnt_int {
+ u64 u64;
+ struct cvmx_npei_pkt_cnt_int_s {
+ u64 reserved_32_63 : 32;
+ u64 port : 32;
+ } s;
+ struct cvmx_npei_pkt_cnt_int_s cn52xx;
+ struct cvmx_npei_pkt_cnt_int_s cn56xx;
+};
+
+typedef union cvmx_npei_pkt_cnt_int cvmx_npei_pkt_cnt_int_t;
+
+/**
+ * cvmx_npei_pkt_cnt_int_enb
+ *
+ * NPEI_PKT_CNT_INT_ENB = NPI Packet Counter Interrupt Enable
+ *
+ * Enable for the packets rings that are interrupting because of Packet Counters.
+ */
+union cvmx_npei_pkt_cnt_int_enb {
+ u64 u64;
+ struct cvmx_npei_pkt_cnt_int_enb_s {
+ u64 reserved_32_63 : 32;
+ u64 port : 32;
+ } s;
+ struct cvmx_npei_pkt_cnt_int_enb_s cn52xx;
+ struct cvmx_npei_pkt_cnt_int_enb_s cn56xx;
+};
+
+typedef union cvmx_npei_pkt_cnt_int_enb cvmx_npei_pkt_cnt_int_enb_t;
+
+/**
+ * cvmx_npei_pkt_data_out_es
+ *
+ * NPEI_PKT_DATA_OUT_ES = NPEI's Packet Data Out Endian Swap
+ *
+ * The Endian Swap for writing Data Out.
+ */
+union cvmx_npei_pkt_data_out_es {
+ u64 u64;
+ struct cvmx_npei_pkt_data_out_es_s {
+ u64 es : 64;
+ } s;
+ struct cvmx_npei_pkt_data_out_es_s cn52xx;
+ struct cvmx_npei_pkt_data_out_es_s cn56xx;
+};
+
+typedef union cvmx_npei_pkt_data_out_es cvmx_npei_pkt_data_out_es_t;
+
+/**
+ * cvmx_npei_pkt_data_out_ns
+ *
+ * NPEI_PKT_DATA_OUT_NS = NPEI's Packet Data Out No Snoop
+ *
+ * The NS field for the TLP when writing packet data.
+ */
+union cvmx_npei_pkt_data_out_ns {
+ u64 u64;
+ struct cvmx_npei_pkt_data_out_ns_s {
+ u64 reserved_32_63 : 32;
+ u64 nsr : 32;
+ } s;
+ struct cvmx_npei_pkt_data_out_ns_s cn52xx;
+ struct cvmx_npei_pkt_data_out_ns_s cn56xx;
+};
+
+typedef union cvmx_npei_pkt_data_out_ns cvmx_npei_pkt_data_out_ns_t;
+
+/**
+ * cvmx_npei_pkt_data_out_ror
+ *
+ * NPEI_PKT_DATA_OUT_ROR = NPEI's Packet Data Out Relaxed Ordering
+ *
+ * The ROR field for the TLP when writing Packet Data.
+ */
+union cvmx_npei_pkt_data_out_ror {
+ u64 u64;
+ struct cvmx_npei_pkt_data_out_ror_s {
+ u64 reserved_32_63 : 32;
+ u64 ror : 32;
+ } s;
+ struct cvmx_npei_pkt_data_out_ror_s cn52xx;
+ struct cvmx_npei_pkt_data_out_ror_s cn56xx;
+};
+
+typedef union cvmx_npei_pkt_data_out_ror cvmx_npei_pkt_data_out_ror_t;
+
+/**
+ * cvmx_npei_pkt_dpaddr
+ *
+ * NPEI_PKT_DPADDR = NPEI's Packet Data Pointer Addr
+ *
+ * Used to detemine address and attributes for packet data writes.
+ */
+union cvmx_npei_pkt_dpaddr {
+ u64 u64;
+ struct cvmx_npei_pkt_dpaddr_s {
+ u64 reserved_32_63 : 32;
+ u64 dptr : 32;
+ } s;
+ struct cvmx_npei_pkt_dpaddr_s cn52xx;
+ struct cvmx_npei_pkt_dpaddr_s cn56xx;
+};
+
+typedef union cvmx_npei_pkt_dpaddr cvmx_npei_pkt_dpaddr_t;
+
+/**
+ * cvmx_npei_pkt_in_bp
+ *
+ * NPEI_PKT_IN_BP = NPEI Packet Input Backpressure
+ *
+ * Which input rings have backpressure applied.
+ */
+union cvmx_npei_pkt_in_bp {
+ u64 u64;
+ struct cvmx_npei_pkt_in_bp_s {
+ u64 reserved_32_63 : 32;
+ u64 bp : 32;
+ } s;
+ struct cvmx_npei_pkt_in_bp_s cn52xx;
+ struct cvmx_npei_pkt_in_bp_s cn56xx;
+};
+
+typedef union cvmx_npei_pkt_in_bp cvmx_npei_pkt_in_bp_t;
+
+/**
+ * cvmx_npei_pkt_in_done#_cnts
+ *
+ * NPEI_PKT_IN_DONE[0..31]_CNTS = NPEI Instruction Done ring# Counts
+ *
+ * Counters for instructions completed on Input rings.
+ */
+union cvmx_npei_pkt_in_donex_cnts {
+ u64 u64;
+ struct cvmx_npei_pkt_in_donex_cnts_s {
+ u64 reserved_32_63 : 32;
+ u64 cnt : 32;
+ } s;
+ struct cvmx_npei_pkt_in_donex_cnts_s cn52xx;
+ struct cvmx_npei_pkt_in_donex_cnts_s cn56xx;
+};
+
+typedef union cvmx_npei_pkt_in_donex_cnts cvmx_npei_pkt_in_donex_cnts_t;
+
+/**
+ * cvmx_npei_pkt_in_instr_counts
+ *
+ * NPEI_PKT_IN_INSTR_COUNTS = NPEI Packet Input Instrutction Counts
+ *
+ * Keeps track of the number of instructions read into the FIFO and Packets
+ * sent to IPD.
+ */
+union cvmx_npei_pkt_in_instr_counts {
+ u64 u64;
+ struct cvmx_npei_pkt_in_instr_counts_s {
+ u64 wr_cnt : 32;
+ u64 rd_cnt : 32;
+ } s;
+ struct cvmx_npei_pkt_in_instr_counts_s cn52xx;
+ struct cvmx_npei_pkt_in_instr_counts_s cn56xx;
+};
+
+typedef union cvmx_npei_pkt_in_instr_counts cvmx_npei_pkt_in_instr_counts_t;
+
+/**
+ * cvmx_npei_pkt_in_pcie_port
+ *
+ * NPEI_PKT_IN_PCIE_PORT = NPEI's Packet In To PCIe Port Assignment
+ *
+ * Assigns Packet Input rings to PCIe ports.
+ */
+union cvmx_npei_pkt_in_pcie_port {
+ u64 u64;
+ struct cvmx_npei_pkt_in_pcie_port_s {
+ u64 pp : 64;
+ } s;
+ struct cvmx_npei_pkt_in_pcie_port_s cn52xx;
+ struct cvmx_npei_pkt_in_pcie_port_s cn56xx;
+};
+
+typedef union cvmx_npei_pkt_in_pcie_port cvmx_npei_pkt_in_pcie_port_t;
+
+/**
+ * cvmx_npei_pkt_input_control
+ *
+ * NPEI_PKT_INPUT_CONTROL = NPEI's Packet Input Control
+ *
+ * Control for reads for gather list and instructions.
+ */
+union cvmx_npei_pkt_input_control {
+ u64 u64;
+ struct cvmx_npei_pkt_input_control_s {
+ u64 reserved_23_63 : 41;
+ u64 pkt_rr : 1;
+ u64 pbp_dhi : 13;
+ u64 d_nsr : 1;
+ u64 d_esr : 2;
+ u64 d_ror : 1;
+ u64 use_csr : 1;
+ u64 nsr : 1;
+ u64 esr : 2;
+ u64 ror : 1;
+ } s;
+ struct cvmx_npei_pkt_input_control_s cn52xx;
+ struct cvmx_npei_pkt_input_control_s cn56xx;
+};
+
+typedef union cvmx_npei_pkt_input_control cvmx_npei_pkt_input_control_t;
+
+/**
+ * cvmx_npei_pkt_instr_enb
+ *
+ * NPEI_PKT_INSTR_ENB = NPEI's Packet Instruction Enable
+ *
+ * Enables the instruction fetch for a Packet-ring.
+ */
+union cvmx_npei_pkt_instr_enb {
+ u64 u64;
+ struct cvmx_npei_pkt_instr_enb_s {
+ u64 reserved_32_63 : 32;
+ u64 enb : 32;
+ } s;
+ struct cvmx_npei_pkt_instr_enb_s cn52xx;
+ struct cvmx_npei_pkt_instr_enb_s cn56xx;
+};
+
+typedef union cvmx_npei_pkt_instr_enb cvmx_npei_pkt_instr_enb_t;
+
+/**
+ * cvmx_npei_pkt_instr_rd_size
+ *
+ * NPEI_PKT_INSTR_RD_SIZE = NPEI Instruction Read Size
+ *
+ * The number of instruction allowed to be read at one time.
+ */
+union cvmx_npei_pkt_instr_rd_size {
+ u64 u64;
+ struct cvmx_npei_pkt_instr_rd_size_s {
+ u64 rdsize : 64;
+ } s;
+ struct cvmx_npei_pkt_instr_rd_size_s cn52xx;
+ struct cvmx_npei_pkt_instr_rd_size_s cn56xx;
+};
+
+typedef union cvmx_npei_pkt_instr_rd_size cvmx_npei_pkt_instr_rd_size_t;
+
+/**
+ * cvmx_npei_pkt_instr_size
+ *
+ * NPEI_PKT_INSTR_SIZE = NPEI's Packet Instruction Size
+ *
+ * Determines if instructions are 64 or 32 byte in size for a Packet-ring.
+ */
+union cvmx_npei_pkt_instr_size {
+ u64 u64;
+ struct cvmx_npei_pkt_instr_size_s {
+ u64 reserved_32_63 : 32;
+ u64 is_64b : 32;
+ } s;
+ struct cvmx_npei_pkt_instr_size_s cn52xx;
+ struct cvmx_npei_pkt_instr_size_s cn56xx;
+};
+
+typedef union cvmx_npei_pkt_instr_size cvmx_npei_pkt_instr_size_t;
+
+/**
+ * cvmx_npei_pkt_int_levels
+ *
+ * 0x90F0 reserved NPEI_PKT_PCIE_PORT2
+ *
+ *
+ * NPEI_PKT_INT_LEVELS = NPEI's Packet Interrupt Levels
+ *
+ * Output packet interrupt levels.
+ */
+union cvmx_npei_pkt_int_levels {
+ u64 u64;
+ struct cvmx_npei_pkt_int_levels_s {
+ u64 reserved_54_63 : 10;
+ u64 time : 22;
+ u64 cnt : 32;
+ } s;
+ struct cvmx_npei_pkt_int_levels_s cn52xx;
+ struct cvmx_npei_pkt_int_levels_s cn56xx;
+};
+
+typedef union cvmx_npei_pkt_int_levels cvmx_npei_pkt_int_levels_t;
+
+/**
+ * cvmx_npei_pkt_iptr
+ *
+ * NPEI_PKT_IPTR = NPEI's Packet Info Poitner
+ *
+ * Controls using the Info-Pointer to store length and data.
+ */
+union cvmx_npei_pkt_iptr {
+ u64 u64;
+ struct cvmx_npei_pkt_iptr_s {
+ u64 reserved_32_63 : 32;
+ u64 iptr : 32;
+ } s;
+ struct cvmx_npei_pkt_iptr_s cn52xx;
+ struct cvmx_npei_pkt_iptr_s cn56xx;
+};
+
+typedef union cvmx_npei_pkt_iptr cvmx_npei_pkt_iptr_t;
+
+/**
+ * cvmx_npei_pkt_out_bmode
+ *
+ * NPEI_PKT_OUT_BMODE = NPEI's Packet Out Byte Mode
+ *
+ * Control the updating of the NPEI_PKT#_CNT register.
+ */
+union cvmx_npei_pkt_out_bmode {
+ u64 u64;
+ struct cvmx_npei_pkt_out_bmode_s {
+ u64 reserved_32_63 : 32;
+ u64 bmode : 32;
+ } s;
+ struct cvmx_npei_pkt_out_bmode_s cn52xx;
+ struct cvmx_npei_pkt_out_bmode_s cn56xx;
+};
+
+typedef union cvmx_npei_pkt_out_bmode cvmx_npei_pkt_out_bmode_t;
+
+/**
+ * cvmx_npei_pkt_out_enb
+ *
+ * NPEI_PKT_OUT_ENB = NPEI's Packet Output Enable
+ *
+ * Enables the output packet engines.
+ */
+union cvmx_npei_pkt_out_enb {
+ u64 u64;
+ struct cvmx_npei_pkt_out_enb_s {
+ u64 reserved_32_63 : 32;
+ u64 enb : 32;
+ } s;
+ struct cvmx_npei_pkt_out_enb_s cn52xx;
+ struct cvmx_npei_pkt_out_enb_s cn56xx;
+};
+
+typedef union cvmx_npei_pkt_out_enb cvmx_npei_pkt_out_enb_t;
+
+/**
+ * cvmx_npei_pkt_output_wmark
+ *
+ * NPEI_PKT_OUTPUT_WMARK = NPEI's Packet Output Water Mark
+ *
+ * Value that when the NPEI_PKT#_SLIST_BAOFF_DBELL[DBELL] value is less then
+ * that backpressure for the rings will be applied.
+ */
+union cvmx_npei_pkt_output_wmark {
+ u64 u64;
+ struct cvmx_npei_pkt_output_wmark_s {
+ u64 reserved_32_63 : 32;
+ u64 wmark : 32;
+ } s;
+ struct cvmx_npei_pkt_output_wmark_s cn52xx;
+ struct cvmx_npei_pkt_output_wmark_s cn56xx;
+};
+
+typedef union cvmx_npei_pkt_output_wmark cvmx_npei_pkt_output_wmark_t;
+
+/**
+ * cvmx_npei_pkt_pcie_port
+ *
+ * NPEI_PKT_PCIE_PORT = NPEI's Packet To PCIe Port Assignment
+ *
+ * Assigns Packet Ports to PCIe ports.
+ */
+union cvmx_npei_pkt_pcie_port {
+ u64 u64;
+ struct cvmx_npei_pkt_pcie_port_s {
+ u64 pp : 64;
+ } s;
+ struct cvmx_npei_pkt_pcie_port_s cn52xx;
+ struct cvmx_npei_pkt_pcie_port_s cn56xx;
+};
+
+typedef union cvmx_npei_pkt_pcie_port cvmx_npei_pkt_pcie_port_t;
+
+/**
+ * cvmx_npei_pkt_port_in_rst
+ *
+ * NPEI_PKT_PORT_IN_RST = NPEI Packet Port In Reset
+ *
+ * Vector bits related to ring-port for ones that are reset.
+ */
+union cvmx_npei_pkt_port_in_rst {
+ u64 u64;
+ struct cvmx_npei_pkt_port_in_rst_s {
+ u64 in_rst : 32;
+ u64 out_rst : 32;
+ } s;
+ struct cvmx_npei_pkt_port_in_rst_s cn52xx;
+ struct cvmx_npei_pkt_port_in_rst_s cn56xx;
+};
+
+typedef union cvmx_npei_pkt_port_in_rst cvmx_npei_pkt_port_in_rst_t;
+
+/**
+ * cvmx_npei_pkt_slist_es
+ *
+ * NPEI_PKT_SLIST_ES = NPEI's Packet Scatter List Endian Swap
+ *
+ * The Endian Swap for Scatter List Read.
+ */
+union cvmx_npei_pkt_slist_es {
+ u64 u64;
+ struct cvmx_npei_pkt_slist_es_s {
+ u64 es : 64;
+ } s;
+ struct cvmx_npei_pkt_slist_es_s cn52xx;
+ struct cvmx_npei_pkt_slist_es_s cn56xx;
+};
+
+typedef union cvmx_npei_pkt_slist_es cvmx_npei_pkt_slist_es_t;
+
+/**
+ * cvmx_npei_pkt_slist_id_size
+ *
+ * NPEI_PKT_SLIST_ID_SIZE = NPEI Packet Scatter List Info and Data Size
+ *
+ * The Size of the information and data fields pointed to by Scatter List
+ * pointers.
+ */
+union cvmx_npei_pkt_slist_id_size {
+ u64 u64;
+ struct cvmx_npei_pkt_slist_id_size_s {
+ u64 reserved_23_63 : 41;
+ u64 isize : 7;
+ u64 bsize : 16;
+ } s;
+ struct cvmx_npei_pkt_slist_id_size_s cn52xx;
+ struct cvmx_npei_pkt_slist_id_size_s cn56xx;
+};
+
+typedef union cvmx_npei_pkt_slist_id_size cvmx_npei_pkt_slist_id_size_t;
+
+/**
+ * cvmx_npei_pkt_slist_ns
+ *
+ * NPEI_PKT_SLIST_NS = NPEI's Packet Scatter List No Snoop
+ *
+ * The NS field for the TLP when fetching Scatter List.
+ */
+union cvmx_npei_pkt_slist_ns {
+ u64 u64;
+ struct cvmx_npei_pkt_slist_ns_s {
+ u64 reserved_32_63 : 32;
+ u64 nsr : 32;
+ } s;
+ struct cvmx_npei_pkt_slist_ns_s cn52xx;
+ struct cvmx_npei_pkt_slist_ns_s cn56xx;
+};
+
+typedef union cvmx_npei_pkt_slist_ns cvmx_npei_pkt_slist_ns_t;
+
+/**
+ * cvmx_npei_pkt_slist_ror
+ *
+ * NPEI_PKT_SLIST_ROR = NPEI's Packet Scatter List Relaxed Ordering
+ *
+ * The ROR field for the TLP when fetching Scatter List.
+ */
+union cvmx_npei_pkt_slist_ror {
+ u64 u64;
+ struct cvmx_npei_pkt_slist_ror_s {
+ u64 reserved_32_63 : 32;
+ u64 ror : 32;
+ } s;
+ struct cvmx_npei_pkt_slist_ror_s cn52xx;
+ struct cvmx_npei_pkt_slist_ror_s cn56xx;
+};
+
+typedef union cvmx_npei_pkt_slist_ror cvmx_npei_pkt_slist_ror_t;
+
+/**
+ * cvmx_npei_pkt_time_int
+ *
+ * NPEI_PKT_TIME_INT = NPEI Packet Timer Interrupt
+ *
+ * The packets rings that are interrupting because of Packet Timers.
+ */
+union cvmx_npei_pkt_time_int {
+ u64 u64;
+ struct cvmx_npei_pkt_time_int_s {
+ u64 reserved_32_63 : 32;
+ u64 port : 32;
+ } s;
+ struct cvmx_npei_pkt_time_int_s cn52xx;
+ struct cvmx_npei_pkt_time_int_s cn56xx;
+};
+
+typedef union cvmx_npei_pkt_time_int cvmx_npei_pkt_time_int_t;
+
+/**
+ * cvmx_npei_pkt_time_int_enb
+ *
+ * NPEI_PKT_TIME_INT_ENB = NPEI Packet Timer Interrupt Enable
+ *
+ * The packets rings that are interrupting because of Packet Timers.
+ */
+union cvmx_npei_pkt_time_int_enb {
+ u64 u64;
+ struct cvmx_npei_pkt_time_int_enb_s {
+ u64 reserved_32_63 : 32;
+ u64 port : 32;
+ } s;
+ struct cvmx_npei_pkt_time_int_enb_s cn52xx;
+ struct cvmx_npei_pkt_time_int_enb_s cn56xx;
+};
+
+typedef union cvmx_npei_pkt_time_int_enb cvmx_npei_pkt_time_int_enb_t;
+
+/**
+ * cvmx_npei_rsl_int_blocks
+ *
+ * NPEI_RSL_INT_BLOCKS = NPEI RSL Interrupt Blocks Register
+ *
+ * Reading this register will return a vector with a bit set '1' for a
+ * corresponding RSL block
+ * that presently has an interrupt pending. The Field Description below
+ * supplies the name of the
+ * register that software should read to find out why that intterupt bit is set.
+ */
+union cvmx_npei_rsl_int_blocks {
+ u64 u64;
+ struct cvmx_npei_rsl_int_blocks_s {
+ u64 reserved_31_63 : 33;
+ u64 iob : 1;
+ u64 lmc1 : 1;
+ u64 agl : 1;
+ u64 reserved_24_27 : 4;
+ u64 asxpcs1 : 1;
+ u64 asxpcs0 : 1;
+ u64 reserved_21_21 : 1;
+ u64 pip : 1;
+ u64 spx1 : 1;
+ u64 spx0 : 1;
+ u64 lmc0 : 1;
+ u64 l2c : 1;
+ u64 usb1 : 1;
+ u64 rad : 1;
+ u64 usb : 1;
+ u64 pow : 1;
+ u64 tim : 1;
+ u64 pko : 1;
+ u64 ipd : 1;
+ u64 reserved_8_8 : 1;
+ u64 zip : 1;
+ u64 dfa : 1;
+ u64 fpa : 1;
+ u64 key : 1;
+ u64 npei : 1;
+ u64 gmx1 : 1;
+ u64 gmx0 : 1;
+ u64 mio : 1;
+ } s;
+ struct cvmx_npei_rsl_int_blocks_s cn52xx;
+ struct cvmx_npei_rsl_int_blocks_s cn52xxp1;
+ struct cvmx_npei_rsl_int_blocks_s cn56xx;
+ struct cvmx_npei_rsl_int_blocks_s cn56xxp1;
+};
+
+typedef union cvmx_npei_rsl_int_blocks cvmx_npei_rsl_int_blocks_t;
+
+/**
+ * cvmx_npei_scratch_1
+ *
+ * NPEI_SCRATCH_1 = NPEI's Scratch 1
+ *
+ * A general purpose 64 bit register for SW use.
+ */
+union cvmx_npei_scratch_1 {
+ u64 u64;
+ struct cvmx_npei_scratch_1_s {
+ u64 data : 64;
+ } s;
+ struct cvmx_npei_scratch_1_s cn52xx;
+ struct cvmx_npei_scratch_1_s cn52xxp1;
+ struct cvmx_npei_scratch_1_s cn56xx;
+ struct cvmx_npei_scratch_1_s cn56xxp1;
+};
+
+typedef union cvmx_npei_scratch_1 cvmx_npei_scratch_1_t;
+
+/**
+ * cvmx_npei_state1
+ *
+ * NPEI_STATE1 = NPEI State 1
+ *
+ * State machines in NPEI. For debug.
+ */
+union cvmx_npei_state1 {
+ u64 u64;
+ struct cvmx_npei_state1_s {
+ u64 cpl1 : 12;
+ u64 cpl0 : 12;
+ u64 arb : 1;
+ u64 csr : 39;
+ } s;
+ struct cvmx_npei_state1_s cn52xx;
+ struct cvmx_npei_state1_s cn52xxp1;
+ struct cvmx_npei_state1_s cn56xx;
+ struct cvmx_npei_state1_s cn56xxp1;
+};
+
+typedef union cvmx_npei_state1 cvmx_npei_state1_t;
+
+/**
+ * cvmx_npei_state2
+ *
+ * NPEI_STATE2 = NPEI State 2
+ *
+ * State machines in NPEI. For debug.
+ */
+union cvmx_npei_state2 {
+ u64 u64;
+ struct cvmx_npei_state2_s {
+ u64 reserved_48_63 : 16;
+ u64 npei : 1;
+ u64 rac : 1;
+ u64 csm1 : 15;
+ u64 csm0 : 15;
+ u64 nnp0 : 8;
+ u64 nnd : 8;
+ } s;
+ struct cvmx_npei_state2_s cn52xx;
+ struct cvmx_npei_state2_s cn52xxp1;
+ struct cvmx_npei_state2_s cn56xx;
+ struct cvmx_npei_state2_s cn56xxp1;
+};
+
+typedef union cvmx_npei_state2 cvmx_npei_state2_t;
+
+/**
+ * cvmx_npei_state3
+ *
+ * NPEI_STATE3 = NPEI State 3
+ *
+ * State machines in NPEI. For debug.
+ */
+union cvmx_npei_state3 {
+ u64 u64;
+ struct cvmx_npei_state3_s {
+ u64 reserved_56_63 : 8;
+ u64 psm1 : 15;
+ u64 psm0 : 15;
+ u64 nsm1 : 13;
+ u64 nsm0 : 13;
+ } s;
+ struct cvmx_npei_state3_s cn52xx;
+ struct cvmx_npei_state3_s cn52xxp1;
+ struct cvmx_npei_state3_s cn56xx;
+ struct cvmx_npei_state3_s cn56xxp1;
+};
+
+typedef union cvmx_npei_state3 cvmx_npei_state3_t;
+
+/**
+ * cvmx_npei_win_rd_addr
+ *
+ * NPEI_WIN_RD_ADDR = NPEI Window Read Address Register
+ *
+ * The address to be read when the NPEI_WIN_RD_DATA register is read.
+ */
+union cvmx_npei_win_rd_addr {
+ u64 u64;
+ struct cvmx_npei_win_rd_addr_s {
+ u64 reserved_51_63 : 13;
+ u64 ld_cmd : 2;
+ u64 iobit : 1;
+ u64 rd_addr : 48;
+ } s;
+ struct cvmx_npei_win_rd_addr_s cn52xx;
+ struct cvmx_npei_win_rd_addr_s cn52xxp1;
+ struct cvmx_npei_win_rd_addr_s cn56xx;
+ struct cvmx_npei_win_rd_addr_s cn56xxp1;
+};
+
+typedef union cvmx_npei_win_rd_addr cvmx_npei_win_rd_addr_t;
+
+/**
+ * cvmx_npei_win_rd_data
+ *
+ * NPEI_WIN_RD_DATA = NPEI Window Read Data Register
+ *
+ * Reading this register causes a window read operation to take place.
+ * Address read is that contained in the NPEI_WIN_RD_ADDR
+ * register.
+ */
+union cvmx_npei_win_rd_data {
+ u64 u64;
+ struct cvmx_npei_win_rd_data_s {
+ u64 rd_data : 64;
+ } s;
+ struct cvmx_npei_win_rd_data_s cn52xx;
+ struct cvmx_npei_win_rd_data_s cn52xxp1;
+ struct cvmx_npei_win_rd_data_s cn56xx;
+ struct cvmx_npei_win_rd_data_s cn56xxp1;
+};
+
+typedef union cvmx_npei_win_rd_data cvmx_npei_win_rd_data_t;
+
+/**
+ * cvmx_npei_win_wr_addr
+ *
+ * NPEI_WIN_WR_ADDR = NPEI Window Write Address Register
+ *
+ * Contains the address to be writen to when a write operation is started by
+ * writing the
+ * NPEI_WIN_WR_DATA register (see below).
+ *
+ * Notes:
+ * Even though address bit [2] can be set, it should always be kept to '0'.
+ *
+ */
+union cvmx_npei_win_wr_addr {
+ u64 u64;
+ struct cvmx_npei_win_wr_addr_s {
+ u64 reserved_49_63 : 15;
+ u64 iobit : 1;
+ u64 wr_addr : 46;
+ u64 reserved_0_1 : 2;
+ } s;
+ struct cvmx_npei_win_wr_addr_s cn52xx;
+ struct cvmx_npei_win_wr_addr_s cn52xxp1;
+ struct cvmx_npei_win_wr_addr_s cn56xx;
+ struct cvmx_npei_win_wr_addr_s cn56xxp1;
+};
+
+typedef union cvmx_npei_win_wr_addr cvmx_npei_win_wr_addr_t;
+
+/**
+ * cvmx_npei_win_wr_data
+ *
+ * NPEI_WIN_WR_DATA = NPEI Window Write Data Register
+ *
+ * Contains the data to write to the address located in the NPEI_WIN_WR_ADDR
+ * Register.
+ * Writing the least-significant-byte of this register will cause a write
+ * operation to take place.
+ */
+union cvmx_npei_win_wr_data {
+ u64 u64;
+ struct cvmx_npei_win_wr_data_s {
+ u64 wr_data : 64;
+ } s;
+ struct cvmx_npei_win_wr_data_s cn52xx;
+ struct cvmx_npei_win_wr_data_s cn52xxp1;
+ struct cvmx_npei_win_wr_data_s cn56xx;
+ struct cvmx_npei_win_wr_data_s cn56xxp1;
+};
+
+typedef union cvmx_npei_win_wr_data cvmx_npei_win_wr_data_t;
+
+/**
+ * cvmx_npei_win_wr_mask
+ *
+ * NPEI_WIN_WR_MASK = NPEI Window Write Mask Register
+ *
+ * Contains the mask for the data in the NPEI_WIN_WR_DATA Register.
+ */
+union cvmx_npei_win_wr_mask {
+ u64 u64;
+ struct cvmx_npei_win_wr_mask_s {
+ u64 reserved_8_63 : 56;
+ u64 wr_mask : 8;
+ } s;
+ struct cvmx_npei_win_wr_mask_s cn52xx;
+ struct cvmx_npei_win_wr_mask_s cn52xxp1;
+ struct cvmx_npei_win_wr_mask_s cn56xx;
+ struct cvmx_npei_win_wr_mask_s cn56xxp1;
+};
+
+typedef union cvmx_npei_win_wr_mask cvmx_npei_win_wr_mask_t;
+
+/**
+ * cvmx_npei_window_ctl
+ *
+ * NPEI_WINDOW_CTL = NPEI's Window Control
+ *
+ * The name of this register is misleading. The timeout value is used for BAR0
+ * access from PCIE0 and PCIE1.
+ * Any access to the regigisters on the RML will timeout as 0xFFFF clock cycle.
+ * At time of timeout the next
+ * RML access will start, and interrupt will be set, and in the case of reads
+ * no data will be returned.
+ *
+ * The value of this register should be set to a minimum of 0x200000 to ensure
+ * that a timeout to an RML register
+ * occurs on the RML 0xFFFF timer before the timeout for a BAR0 access from
+ * the PCIE#.
+ */
+union cvmx_npei_window_ctl {
+ u64 u64;
+ struct cvmx_npei_window_ctl_s {
+ u64 reserved_32_63 : 32;
+ u64 time : 32;
+ } s;
+ struct cvmx_npei_window_ctl_s cn52xx;
+ struct cvmx_npei_window_ctl_s cn52xxp1;
+ struct cvmx_npei_window_ctl_s cn56xx;
+ struct cvmx_npei_window_ctl_s cn56xxp1;
+};
+
+typedef union cvmx_npei_window_ctl cvmx_npei_window_ctl_t;
+
+#endif
--
2.35.1
^ permalink raw reply related [flat|nested] 52+ messages in thread* [PATCH 06/52] mips: octeon: Add cvmx-pcsxx-defs.h header file
2022-03-30 10:06 [PATCH 00/52] mips: octeon: Add ethernet support Stefan Roese
` (4 preceding siblings ...)
2022-03-30 10:06 ` [PATCH 05/52] mips: octeon: Add cvmx-npei-defs.h " Stefan Roese
@ 2022-03-30 10:06 ` Stefan Roese
2022-03-30 10:06 ` [PATCH 07/52] mips: octeon: Add cvmx-xcv-defs.h " Stefan Roese
` (43 subsequent siblings)
49 siblings, 0 replies; 52+ messages in thread
From: Stefan Roese @ 2022-03-30 10:06 UTC (permalink / raw)
To: u-boot; +Cc: awilliams, cchavva
From: Aaron Williams <awilliams@marvell.com>
Import cvmx-pcsxxx-defs.h header file from 2013 U-Boot. It will be used
by the later added drivers to support networking on the MIPS Octeon II /
III platforms.
Signed-off-by: Aaron Williams <awilliams@marvell.com>
Signed-off-by: Stefan Roese <sr@denx.de>
---
.../include/mach/cvmx-pcsxx-defs.h | 787 ++++++++++++++++++
1 file changed, 787 insertions(+)
create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-pcsxx-defs.h
diff --git a/arch/mips/mach-octeon/include/mach/cvmx-pcsxx-defs.h b/arch/mips/mach-octeon/include/mach/cvmx-pcsxx-defs.h
new file mode 100644
index 000000000000..e16a4c404f63
--- /dev/null
+++ b/arch/mips/mach-octeon/include/mach/cvmx-pcsxx-defs.h
@@ -0,0 +1,787 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon pcsxx.
+ */
+
+#ifndef __CVMX_PCSXX_DEFS_H__
+#define __CVMX_PCSXX_DEFS_H__
+
+static inline u64 CVMX_PCSXX_10GBX_STATUS_REG(unsigned long offset)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ return 0x00011800B0000828ull + (offset) * 0x8000000ull;
+ case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ return 0x00011800B0000828ull + (offset) * 0x8000000ull;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return 0x00011800B0000828ull + (offset) * 0x1000000ull;
+ }
+ return 0x00011800B0000828ull + (offset) * 0x8000000ull;
+}
+
+static inline u64 CVMX_PCSXX_BIST_STATUS_REG(unsigned long offset)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ return 0x00011800B0000870ull + (offset) * 0x8000000ull;
+ case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ return 0x00011800B0000870ull + (offset) * 0x8000000ull;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return 0x00011800B0000870ull + (offset) * 0x1000000ull;
+ }
+ return 0x00011800B0000870ull + (offset) * 0x8000000ull;
+}
+
+static inline u64 CVMX_PCSXX_BIT_LOCK_STATUS_REG(unsigned long offset)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ return 0x00011800B0000850ull + (offset) * 0x8000000ull;
+ case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ return 0x00011800B0000850ull + (offset) * 0x8000000ull;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return 0x00011800B0000850ull + (offset) * 0x1000000ull;
+ }
+ return 0x00011800B0000850ull + (offset) * 0x8000000ull;
+}
+
+static inline u64 CVMX_PCSXX_CONTROL1_REG(unsigned long offset)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ return 0x00011800B0000800ull + (offset) * 0x8000000ull;
+ case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ return 0x00011800B0000800ull + (offset) * 0x8000000ull;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return 0x00011800B0000800ull + (offset) * 0x1000000ull;
+ }
+ return 0x00011800B0000800ull + (offset) * 0x8000000ull;
+}
+
+static inline u64 CVMX_PCSXX_CONTROL2_REG(unsigned long offset)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ return 0x00011800B0000818ull + (offset) * 0x8000000ull;
+ case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ return 0x00011800B0000818ull + (offset) * 0x8000000ull;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return 0x00011800B0000818ull + (offset) * 0x1000000ull;
+ }
+ return 0x00011800B0000818ull + (offset) * 0x8000000ull;
+}
+
+static inline u64 CVMX_PCSXX_INT_EN_REG(unsigned long offset)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ return 0x00011800B0000860ull + (offset) * 0x8000000ull;
+ case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ return 0x00011800B0000860ull + (offset) * 0x8000000ull;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return 0x00011800B0000860ull + (offset) * 0x1000000ull;
+ }
+ return 0x00011800B0000860ull + (offset) * 0x8000000ull;
+}
+
+static inline u64 CVMX_PCSXX_INT_REG(unsigned long offset)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ return 0x00011800B0000858ull + (offset) * 0x8000000ull;
+ case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ return 0x00011800B0000858ull + (offset) * 0x8000000ull;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return 0x00011800B0000858ull + (offset) * 0x1000000ull;
+ }
+ return 0x00011800B0000858ull + (offset) * 0x8000000ull;
+}
+
+static inline u64 CVMX_PCSXX_LOG_ANL_REG(unsigned long offset)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ return 0x00011800B0000868ull + (offset) * 0x8000000ull;
+ case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ return 0x00011800B0000868ull + (offset) * 0x8000000ull;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return 0x00011800B0000868ull + (offset) * 0x1000000ull;
+ }
+ return 0x00011800B0000868ull + (offset) * 0x8000000ull;
+}
+
+static inline u64 CVMX_PCSXX_MISC_CTL_REG(unsigned long offset)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ return 0x00011800B0000848ull + (offset) * 0x8000000ull;
+ case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ return 0x00011800B0000848ull + (offset) * 0x8000000ull;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return 0x00011800B0000848ull + (offset) * 0x1000000ull;
+ }
+ return 0x00011800B0000848ull + (offset) * 0x8000000ull;
+}
+
+static inline u64 CVMX_PCSXX_RX_SYNC_STATES_REG(unsigned long offset)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ return 0x00011800B0000838ull + (offset) * 0x8000000ull;
+ case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ return 0x00011800B0000838ull + (offset) * 0x8000000ull;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return 0x00011800B0000838ull + (offset) * 0x1000000ull;
+ }
+ return 0x00011800B0000838ull + (offset) * 0x8000000ull;
+}
+
+#define CVMX_PCSXX_SERDES_CRDT_CNT_REG(offset) (0x00011800B0000880ull)
+static inline u64 CVMX_PCSXX_SPD_ABIL_REG(unsigned long offset)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ return 0x00011800B0000810ull + (offset) * 0x8000000ull;
+ case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ return 0x00011800B0000810ull + (offset) * 0x8000000ull;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return 0x00011800B0000810ull + (offset) * 0x1000000ull;
+ }
+ return 0x00011800B0000810ull + (offset) * 0x8000000ull;
+}
+
+static inline u64 CVMX_PCSXX_STATUS1_REG(unsigned long offset)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ return 0x00011800B0000808ull + (offset) * 0x8000000ull;
+ case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ return 0x00011800B0000808ull + (offset) * 0x8000000ull;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return 0x00011800B0000808ull + (offset) * 0x1000000ull;
+ }
+ return 0x00011800B0000808ull + (offset) * 0x8000000ull;
+}
+
+static inline u64 CVMX_PCSXX_STATUS2_REG(unsigned long offset)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ return 0x00011800B0000820ull + (offset) * 0x8000000ull;
+ case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ return 0x00011800B0000820ull + (offset) * 0x8000000ull;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return 0x00011800B0000820ull + (offset) * 0x1000000ull;
+ }
+ return 0x00011800B0000820ull + (offset) * 0x8000000ull;
+}
+
+static inline u64 CVMX_PCSXX_TX_RX_POLARITY_REG(unsigned long offset)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ return 0x00011800B0000840ull + (offset) * 0x8000000ull;
+ case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ return 0x00011800B0000840ull + (offset) * 0x8000000ull;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return 0x00011800B0000840ull + (offset) * 0x1000000ull;
+ }
+ return 0x00011800B0000840ull + (offset) * 0x8000000ull;
+}
+
+static inline u64 CVMX_PCSXX_TX_RX_STATES_REG(unsigned long offset)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ return 0x00011800B0000830ull + (offset) * 0x8000000ull;
+ case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ return 0x00011800B0000830ull + (offset) * 0x8000000ull;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return 0x00011800B0000830ull + (offset) * 0x1000000ull;
+ }
+ return 0x00011800B0000830ull + (offset) * 0x8000000ull;
+}
+
+/**
+ * cvmx_pcsx#_10gbx_status_reg
+ *
+ * PCSX_10GBX_STATUS_REG = 10gbx_status_reg
+ *
+ */
+union cvmx_pcsxx_10gbx_status_reg {
+ u64 u64;
+ struct cvmx_pcsxx_10gbx_status_reg_s {
+ u64 reserved_13_63 : 51;
+ u64 alignd : 1;
+ u64 pattst : 1;
+ u64 reserved_4_10 : 7;
+ u64 l3sync : 1;
+ u64 l2sync : 1;
+ u64 l1sync : 1;
+ u64 l0sync : 1;
+ } s;
+ struct cvmx_pcsxx_10gbx_status_reg_s cn52xx;
+ struct cvmx_pcsxx_10gbx_status_reg_s cn52xxp1;
+ struct cvmx_pcsxx_10gbx_status_reg_s cn56xx;
+ struct cvmx_pcsxx_10gbx_status_reg_s cn56xxp1;
+ struct cvmx_pcsxx_10gbx_status_reg_s cn61xx;
+ struct cvmx_pcsxx_10gbx_status_reg_s cn63xx;
+ struct cvmx_pcsxx_10gbx_status_reg_s cn63xxp1;
+ struct cvmx_pcsxx_10gbx_status_reg_s cn66xx;
+ struct cvmx_pcsxx_10gbx_status_reg_s cn68xx;
+ struct cvmx_pcsxx_10gbx_status_reg_s cn68xxp1;
+ struct cvmx_pcsxx_10gbx_status_reg_s cn70xx;
+ struct cvmx_pcsxx_10gbx_status_reg_s cn70xxp1;
+};
+
+typedef union cvmx_pcsxx_10gbx_status_reg cvmx_pcsxx_10gbx_status_reg_t;
+
+/**
+ * cvmx_pcsx#_bist_status_reg
+ *
+ * PCSX Bist Status Register
+ *
+ */
+union cvmx_pcsxx_bist_status_reg {
+ u64 u64;
+ struct cvmx_pcsxx_bist_status_reg_s {
+ u64 reserved_1_63 : 63;
+ u64 bist_status : 1;
+ } s;
+ struct cvmx_pcsxx_bist_status_reg_s cn52xx;
+ struct cvmx_pcsxx_bist_status_reg_s cn52xxp1;
+ struct cvmx_pcsxx_bist_status_reg_s cn56xx;
+ struct cvmx_pcsxx_bist_status_reg_s cn56xxp1;
+ struct cvmx_pcsxx_bist_status_reg_s cn61xx;
+ struct cvmx_pcsxx_bist_status_reg_s cn63xx;
+ struct cvmx_pcsxx_bist_status_reg_s cn63xxp1;
+ struct cvmx_pcsxx_bist_status_reg_s cn66xx;
+ struct cvmx_pcsxx_bist_status_reg_s cn68xx;
+ struct cvmx_pcsxx_bist_status_reg_s cn68xxp1;
+ struct cvmx_pcsxx_bist_status_reg_s cn70xx;
+ struct cvmx_pcsxx_bist_status_reg_s cn70xxp1;
+};
+
+typedef union cvmx_pcsxx_bist_status_reg cvmx_pcsxx_bist_status_reg_t;
+
+/**
+ * cvmx_pcsx#_bit_lock_status_reg
+ *
+ * PCSX Bit Lock Status Register
+ *
+ */
+union cvmx_pcsxx_bit_lock_status_reg {
+ u64 u64;
+ struct cvmx_pcsxx_bit_lock_status_reg_s {
+ u64 reserved_4_63 : 60;
+ u64 bitlck3 : 1;
+ u64 bitlck2 : 1;
+ u64 bitlck1 : 1;
+ u64 bitlck0 : 1;
+ } s;
+ struct cvmx_pcsxx_bit_lock_status_reg_s cn52xx;
+ struct cvmx_pcsxx_bit_lock_status_reg_s cn52xxp1;
+ struct cvmx_pcsxx_bit_lock_status_reg_s cn56xx;
+ struct cvmx_pcsxx_bit_lock_status_reg_s cn56xxp1;
+ struct cvmx_pcsxx_bit_lock_status_reg_s cn61xx;
+ struct cvmx_pcsxx_bit_lock_status_reg_s cn63xx;
+ struct cvmx_pcsxx_bit_lock_status_reg_s cn63xxp1;
+ struct cvmx_pcsxx_bit_lock_status_reg_s cn66xx;
+ struct cvmx_pcsxx_bit_lock_status_reg_s cn68xx;
+ struct cvmx_pcsxx_bit_lock_status_reg_s cn68xxp1;
+ struct cvmx_pcsxx_bit_lock_status_reg_s cn70xx;
+ struct cvmx_pcsxx_bit_lock_status_reg_s cn70xxp1;
+};
+
+typedef union cvmx_pcsxx_bit_lock_status_reg cvmx_pcsxx_bit_lock_status_reg_t;
+
+/**
+ * cvmx_pcsx#_control1_reg
+ *
+ * NOTE: Logic Analyzer is enabled with LA_EN for the specified PCS lane only.
+ * PKT_SZ is effective only when LA_EN=1
+ * For normal operation(sgmii or 1000Base-X), this bit must be 0.
+ * See pcsx.csr for xaui logic analyzer mode.
+ * For full description see document at .../rtl/pcs/readme_logic_analyzer.txt
+ *
+ *
+ * PCSX regs follow IEEE Std 802.3-2005, Section: 45.2.3
+ *
+ *
+ * PCSX_CONTROL1_REG = Control Register1
+ */
+union cvmx_pcsxx_control1_reg {
+ u64 u64;
+ struct cvmx_pcsxx_control1_reg_s {
+ u64 reserved_16_63 : 48;
+ u64 reset : 1;
+ u64 loopbck1 : 1;
+ u64 spdsel1 : 1;
+ u64 reserved_12_12 : 1;
+ u64 lo_pwr : 1;
+ u64 reserved_7_10 : 4;
+ u64 spdsel0 : 1;
+ u64 spd : 4;
+ u64 reserved_0_1 : 2;
+ } s;
+ struct cvmx_pcsxx_control1_reg_s cn52xx;
+ struct cvmx_pcsxx_control1_reg_s cn52xxp1;
+ struct cvmx_pcsxx_control1_reg_s cn56xx;
+ struct cvmx_pcsxx_control1_reg_s cn56xxp1;
+ struct cvmx_pcsxx_control1_reg_s cn61xx;
+ struct cvmx_pcsxx_control1_reg_s cn63xx;
+ struct cvmx_pcsxx_control1_reg_s cn63xxp1;
+ struct cvmx_pcsxx_control1_reg_s cn66xx;
+ struct cvmx_pcsxx_control1_reg_s cn68xx;
+ struct cvmx_pcsxx_control1_reg_s cn68xxp1;
+ struct cvmx_pcsxx_control1_reg_s cn70xx;
+ struct cvmx_pcsxx_control1_reg_s cn70xxp1;
+};
+
+typedef union cvmx_pcsxx_control1_reg cvmx_pcsxx_control1_reg_t;
+
+/**
+ * cvmx_pcsx#_control2_reg
+ *
+ * PCSX_CONTROL2_REG = Control Register2
+ *
+ */
+union cvmx_pcsxx_control2_reg {
+ u64 u64;
+ struct cvmx_pcsxx_control2_reg_s {
+ u64 reserved_2_63 : 62;
+ u64 type : 2;
+ } s;
+ struct cvmx_pcsxx_control2_reg_s cn52xx;
+ struct cvmx_pcsxx_control2_reg_s cn52xxp1;
+ struct cvmx_pcsxx_control2_reg_s cn56xx;
+ struct cvmx_pcsxx_control2_reg_s cn56xxp1;
+ struct cvmx_pcsxx_control2_reg_s cn61xx;
+ struct cvmx_pcsxx_control2_reg_s cn63xx;
+ struct cvmx_pcsxx_control2_reg_s cn63xxp1;
+ struct cvmx_pcsxx_control2_reg_s cn66xx;
+ struct cvmx_pcsxx_control2_reg_s cn68xx;
+ struct cvmx_pcsxx_control2_reg_s cn68xxp1;
+ struct cvmx_pcsxx_control2_reg_s cn70xx;
+ struct cvmx_pcsxx_control2_reg_s cn70xxp1;
+};
+
+typedef union cvmx_pcsxx_control2_reg cvmx_pcsxx_control2_reg_t;
+
+/**
+ * cvmx_pcsx#_int_en_reg
+ *
+ * PCSX Interrupt Enable Register
+ *
+ */
+union cvmx_pcsxx_int_en_reg {
+ u64 u64;
+ struct cvmx_pcsxx_int_en_reg_s {
+ u64 reserved_7_63 : 57;
+ u64 dbg_sync_en : 1;
+ u64 algnlos_en : 1;
+ u64 synlos_en : 1;
+ u64 bitlckls_en : 1;
+ u64 rxsynbad_en : 1;
+ u64 rxbad_en : 1;
+ u64 txflt_en : 1;
+ } s;
+ struct cvmx_pcsxx_int_en_reg_cn52xx {
+ u64 reserved_6_63 : 58;
+ u64 algnlos_en : 1;
+ u64 synlos_en : 1;
+ u64 bitlckls_en : 1;
+ u64 rxsynbad_en : 1;
+ u64 rxbad_en : 1;
+ u64 txflt_en : 1;
+ } cn52xx;
+ struct cvmx_pcsxx_int_en_reg_cn52xx cn52xxp1;
+ struct cvmx_pcsxx_int_en_reg_cn52xx cn56xx;
+ struct cvmx_pcsxx_int_en_reg_cn52xx cn56xxp1;
+ struct cvmx_pcsxx_int_en_reg_s cn61xx;
+ struct cvmx_pcsxx_int_en_reg_s cn63xx;
+ struct cvmx_pcsxx_int_en_reg_s cn63xxp1;
+ struct cvmx_pcsxx_int_en_reg_s cn66xx;
+ struct cvmx_pcsxx_int_en_reg_s cn68xx;
+ struct cvmx_pcsxx_int_en_reg_s cn68xxp1;
+ struct cvmx_pcsxx_int_en_reg_s cn70xx;
+ struct cvmx_pcsxx_int_en_reg_s cn70xxp1;
+};
+
+typedef union cvmx_pcsxx_int_en_reg cvmx_pcsxx_int_en_reg_t;
+
+/**
+ * cvmx_pcsx#_int_reg
+ *
+ * PCSX Interrupt Register
+ * Note: DBG_SYNC is a edge triggered interrupt. When set it indicates PCS Synchronization state
+ * machine in
+ * Figure 48-7 state diagram in IEEE Std 802.3-2005 changes state SYNC_ACQUIRED_1 to
+ * SYNC_ACQUIRED_2
+ * indicating an invalid code group was received on one of the 4 receive lanes.
+ * This interrupt should be always disabled and used only for link problem debugging help.
+ */
+union cvmx_pcsxx_int_reg {
+ u64 u64;
+ struct cvmx_pcsxx_int_reg_s {
+ u64 reserved_7_63 : 57;
+ u64 dbg_sync : 1;
+ u64 algnlos : 1;
+ u64 synlos : 1;
+ u64 bitlckls : 1;
+ u64 rxsynbad : 1;
+ u64 rxbad : 1;
+ u64 txflt : 1;
+ } s;
+ struct cvmx_pcsxx_int_reg_cn52xx {
+ u64 reserved_6_63 : 58;
+ u64 algnlos : 1;
+ u64 synlos : 1;
+ u64 bitlckls : 1;
+ u64 rxsynbad : 1;
+ u64 rxbad : 1;
+ u64 txflt : 1;
+ } cn52xx;
+ struct cvmx_pcsxx_int_reg_cn52xx cn52xxp1;
+ struct cvmx_pcsxx_int_reg_cn52xx cn56xx;
+ struct cvmx_pcsxx_int_reg_cn52xx cn56xxp1;
+ struct cvmx_pcsxx_int_reg_s cn61xx;
+ struct cvmx_pcsxx_int_reg_s cn63xx;
+ struct cvmx_pcsxx_int_reg_s cn63xxp1;
+ struct cvmx_pcsxx_int_reg_s cn66xx;
+ struct cvmx_pcsxx_int_reg_s cn68xx;
+ struct cvmx_pcsxx_int_reg_s cn68xxp1;
+ struct cvmx_pcsxx_int_reg_s cn70xx;
+ struct cvmx_pcsxx_int_reg_s cn70xxp1;
+};
+
+typedef union cvmx_pcsxx_int_reg cvmx_pcsxx_int_reg_t;
+
+/**
+ * cvmx_pcsx#_log_anl_reg
+ *
+ * PCSX Logic Analyzer Register
+ * NOTE: Logic Analyzer is enabled with LA_EN for xaui only. PKT_SZ is effective only when
+ * LA_EN=1
+ * For normal operation(xaui), this bit must be 0. The dropped lane is used to send rxc[3:0].
+ * See pcs.csr for sgmii/1000Base-X logic analyzer mode.
+ * For full description see document at .../rtl/pcs/readme_logic_analyzer.txt
+ */
+union cvmx_pcsxx_log_anl_reg {
+ u64 u64;
+ struct cvmx_pcsxx_log_anl_reg_s {
+ u64 reserved_7_63 : 57;
+ u64 enc_mode : 1;
+ u64 drop_ln : 2;
+ u64 lafifovfl : 1;
+ u64 la_en : 1;
+ u64 pkt_sz : 2;
+ } s;
+ struct cvmx_pcsxx_log_anl_reg_s cn52xx;
+ struct cvmx_pcsxx_log_anl_reg_s cn52xxp1;
+ struct cvmx_pcsxx_log_anl_reg_s cn56xx;
+ struct cvmx_pcsxx_log_anl_reg_s cn56xxp1;
+ struct cvmx_pcsxx_log_anl_reg_s cn61xx;
+ struct cvmx_pcsxx_log_anl_reg_s cn63xx;
+ struct cvmx_pcsxx_log_anl_reg_s cn63xxp1;
+ struct cvmx_pcsxx_log_anl_reg_s cn66xx;
+ struct cvmx_pcsxx_log_anl_reg_s cn68xx;
+ struct cvmx_pcsxx_log_anl_reg_s cn68xxp1;
+ struct cvmx_pcsxx_log_anl_reg_s cn70xx;
+ struct cvmx_pcsxx_log_anl_reg_s cn70xxp1;
+};
+
+typedef union cvmx_pcsxx_log_anl_reg cvmx_pcsxx_log_anl_reg_t;
+
+/**
+ * cvmx_pcsx#_misc_ctl_reg
+ *
+ * PCSX Misc Control Register
+ * LN_SWAP for XAUI is to simplify interconnection layout between devices
+ */
+union cvmx_pcsxx_misc_ctl_reg {
+ u64 u64;
+ struct cvmx_pcsxx_misc_ctl_reg_s {
+ u64 reserved_4_63 : 60;
+ u64 tx_swap : 1;
+ u64 rx_swap : 1;
+ u64 xaui : 1;
+ u64 gmxeno : 1;
+ } s;
+ struct cvmx_pcsxx_misc_ctl_reg_s cn52xx;
+ struct cvmx_pcsxx_misc_ctl_reg_s cn52xxp1;
+ struct cvmx_pcsxx_misc_ctl_reg_s cn56xx;
+ struct cvmx_pcsxx_misc_ctl_reg_s cn56xxp1;
+ struct cvmx_pcsxx_misc_ctl_reg_s cn61xx;
+ struct cvmx_pcsxx_misc_ctl_reg_s cn63xx;
+ struct cvmx_pcsxx_misc_ctl_reg_s cn63xxp1;
+ struct cvmx_pcsxx_misc_ctl_reg_s cn66xx;
+ struct cvmx_pcsxx_misc_ctl_reg_s cn68xx;
+ struct cvmx_pcsxx_misc_ctl_reg_s cn68xxp1;
+ struct cvmx_pcsxx_misc_ctl_reg_s cn70xx;
+ struct cvmx_pcsxx_misc_ctl_reg_s cn70xxp1;
+};
+
+typedef union cvmx_pcsxx_misc_ctl_reg cvmx_pcsxx_misc_ctl_reg_t;
+
+/**
+ * cvmx_pcsx#_rx_sync_states_reg
+ *
+ * PCSX_RX_SYNC_STATES_REG = Receive Sync States Register
+ *
+ */
+union cvmx_pcsxx_rx_sync_states_reg {
+ u64 u64;
+ struct cvmx_pcsxx_rx_sync_states_reg_s {
+ u64 reserved_16_63 : 48;
+ u64 sync3st : 4;
+ u64 sync2st : 4;
+ u64 sync1st : 4;
+ u64 sync0st : 4;
+ } s;
+ struct cvmx_pcsxx_rx_sync_states_reg_s cn52xx;
+ struct cvmx_pcsxx_rx_sync_states_reg_s cn52xxp1;
+ struct cvmx_pcsxx_rx_sync_states_reg_s cn56xx;
+ struct cvmx_pcsxx_rx_sync_states_reg_s cn56xxp1;
+ struct cvmx_pcsxx_rx_sync_states_reg_s cn61xx;
+ struct cvmx_pcsxx_rx_sync_states_reg_s cn63xx;
+ struct cvmx_pcsxx_rx_sync_states_reg_s cn63xxp1;
+ struct cvmx_pcsxx_rx_sync_states_reg_s cn66xx;
+ struct cvmx_pcsxx_rx_sync_states_reg_s cn68xx;
+ struct cvmx_pcsxx_rx_sync_states_reg_s cn68xxp1;
+ struct cvmx_pcsxx_rx_sync_states_reg_s cn70xx;
+ struct cvmx_pcsxx_rx_sync_states_reg_s cn70xxp1;
+};
+
+typedef union cvmx_pcsxx_rx_sync_states_reg cvmx_pcsxx_rx_sync_states_reg_t;
+
+/**
+ * cvmx_pcsx#_serdes_crdt_cnt_reg
+ *
+ * PCSX SERDES Credit Count
+ *
+ */
+union cvmx_pcsxx_serdes_crdt_cnt_reg {
+ u64 u64;
+ struct cvmx_pcsxx_serdes_crdt_cnt_reg_s {
+ u64 reserved_5_63 : 59;
+ u64 cnt : 5;
+ } s;
+ struct cvmx_pcsxx_serdes_crdt_cnt_reg_s cn70xx;
+ struct cvmx_pcsxx_serdes_crdt_cnt_reg_s cn70xxp1;
+};
+
+typedef union cvmx_pcsxx_serdes_crdt_cnt_reg cvmx_pcsxx_serdes_crdt_cnt_reg_t;
+
+/**
+ * cvmx_pcsx#_spd_abil_reg
+ *
+ * PCSX_SPD_ABIL_REG = Speed ability register
+ *
+ */
+union cvmx_pcsxx_spd_abil_reg {
+ u64 u64;
+ struct cvmx_pcsxx_spd_abil_reg_s {
+ u64 reserved_2_63 : 62;
+ u64 tenpasst : 1;
+ u64 tengb : 1;
+ } s;
+ struct cvmx_pcsxx_spd_abil_reg_s cn52xx;
+ struct cvmx_pcsxx_spd_abil_reg_s cn52xxp1;
+ struct cvmx_pcsxx_spd_abil_reg_s cn56xx;
+ struct cvmx_pcsxx_spd_abil_reg_s cn56xxp1;
+ struct cvmx_pcsxx_spd_abil_reg_s cn61xx;
+ struct cvmx_pcsxx_spd_abil_reg_s cn63xx;
+ struct cvmx_pcsxx_spd_abil_reg_s cn63xxp1;
+ struct cvmx_pcsxx_spd_abil_reg_s cn66xx;
+ struct cvmx_pcsxx_spd_abil_reg_s cn68xx;
+ struct cvmx_pcsxx_spd_abil_reg_s cn68xxp1;
+ struct cvmx_pcsxx_spd_abil_reg_s cn70xx;
+ struct cvmx_pcsxx_spd_abil_reg_s cn70xxp1;
+};
+
+typedef union cvmx_pcsxx_spd_abil_reg cvmx_pcsxx_spd_abil_reg_t;
+
+/**
+ * cvmx_pcsx#_status1_reg
+ *
+ * PCSX_STATUS1_REG = Status Register1
+ *
+ */
+union cvmx_pcsxx_status1_reg {
+ u64 u64;
+ struct cvmx_pcsxx_status1_reg_s {
+ u64 reserved_8_63 : 56;
+ u64 flt : 1;
+ u64 reserved_3_6 : 4;
+ u64 rcv_lnk : 1;
+ u64 lpable : 1;
+ u64 reserved_0_0 : 1;
+ } s;
+ struct cvmx_pcsxx_status1_reg_s cn52xx;
+ struct cvmx_pcsxx_status1_reg_s cn52xxp1;
+ struct cvmx_pcsxx_status1_reg_s cn56xx;
+ struct cvmx_pcsxx_status1_reg_s cn56xxp1;
+ struct cvmx_pcsxx_status1_reg_s cn61xx;
+ struct cvmx_pcsxx_status1_reg_s cn63xx;
+ struct cvmx_pcsxx_status1_reg_s cn63xxp1;
+ struct cvmx_pcsxx_status1_reg_s cn66xx;
+ struct cvmx_pcsxx_status1_reg_s cn68xx;
+ struct cvmx_pcsxx_status1_reg_s cn68xxp1;
+ struct cvmx_pcsxx_status1_reg_s cn70xx;
+ struct cvmx_pcsxx_status1_reg_s cn70xxp1;
+};
+
+typedef union cvmx_pcsxx_status1_reg cvmx_pcsxx_status1_reg_t;
+
+/**
+ * cvmx_pcsx#_status2_reg
+ *
+ * PCSX_STATUS2_REG = Status Register2
+ *
+ */
+union cvmx_pcsxx_status2_reg {
+ u64 u64;
+ struct cvmx_pcsxx_status2_reg_s {
+ u64 reserved_16_63 : 48;
+ u64 dev : 2;
+ u64 reserved_12_13 : 2;
+ u64 xmtflt : 1;
+ u64 rcvflt : 1;
+ u64 reserved_3_9 : 7;
+ u64 tengb_w : 1;
+ u64 tengb_x : 1;
+ u64 tengb_r : 1;
+ } s;
+ struct cvmx_pcsxx_status2_reg_s cn52xx;
+ struct cvmx_pcsxx_status2_reg_s cn52xxp1;
+ struct cvmx_pcsxx_status2_reg_s cn56xx;
+ struct cvmx_pcsxx_status2_reg_s cn56xxp1;
+ struct cvmx_pcsxx_status2_reg_s cn61xx;
+ struct cvmx_pcsxx_status2_reg_s cn63xx;
+ struct cvmx_pcsxx_status2_reg_s cn63xxp1;
+ struct cvmx_pcsxx_status2_reg_s cn66xx;
+ struct cvmx_pcsxx_status2_reg_s cn68xx;
+ struct cvmx_pcsxx_status2_reg_s cn68xxp1;
+ struct cvmx_pcsxx_status2_reg_s cn70xx;
+ struct cvmx_pcsxx_status2_reg_s cn70xxp1;
+};
+
+typedef union cvmx_pcsxx_status2_reg cvmx_pcsxx_status2_reg_t;
+
+/**
+ * cvmx_pcsx#_tx_rx_polarity_reg
+ *
+ * RX lane polarity vector [3:0] = XOR_RXPLRT<9:6> ^ [4[RXPLRT<1>]];
+ * TX lane polarity vector [3:0] = XOR_TXPLRT<5:2> ^ [4[TXPLRT<0>]];
+ * In short keep <1:0> to 2'b00, and use <5:2> and <9:6> fields to define per lane polarities
+ */
+union cvmx_pcsxx_tx_rx_polarity_reg {
+ u64 u64;
+ struct cvmx_pcsxx_tx_rx_polarity_reg_s {
+ u64 reserved_10_63 : 54;
+ u64 xor_rxplrt : 4;
+ u64 xor_txplrt : 4;
+ u64 rxplrt : 1;
+ u64 txplrt : 1;
+ } s;
+ struct cvmx_pcsxx_tx_rx_polarity_reg_s cn52xx;
+ struct cvmx_pcsxx_tx_rx_polarity_reg_cn52xxp1 {
+ u64 reserved_2_63 : 62;
+ u64 rxplrt : 1;
+ u64 txplrt : 1;
+ } cn52xxp1;
+ struct cvmx_pcsxx_tx_rx_polarity_reg_s cn56xx;
+ struct cvmx_pcsxx_tx_rx_polarity_reg_cn52xxp1 cn56xxp1;
+ struct cvmx_pcsxx_tx_rx_polarity_reg_s cn61xx;
+ struct cvmx_pcsxx_tx_rx_polarity_reg_s cn63xx;
+ struct cvmx_pcsxx_tx_rx_polarity_reg_s cn63xxp1;
+ struct cvmx_pcsxx_tx_rx_polarity_reg_s cn66xx;
+ struct cvmx_pcsxx_tx_rx_polarity_reg_s cn68xx;
+ struct cvmx_pcsxx_tx_rx_polarity_reg_s cn68xxp1;
+ struct cvmx_pcsxx_tx_rx_polarity_reg_s cn70xx;
+ struct cvmx_pcsxx_tx_rx_polarity_reg_s cn70xxp1;
+};
+
+typedef union cvmx_pcsxx_tx_rx_polarity_reg cvmx_pcsxx_tx_rx_polarity_reg_t;
+
+/**
+ * cvmx_pcsx#_tx_rx_states_reg
+ *
+ * PCSX_TX_RX_STATES_REG = Transmit Receive States Register
+ *
+ */
+union cvmx_pcsxx_tx_rx_states_reg {
+ u64 u64;
+ struct cvmx_pcsxx_tx_rx_states_reg_s {
+ u64 reserved_14_63 : 50;
+ u64 term_err : 1;
+ u64 syn3bad : 1;
+ u64 syn2bad : 1;
+ u64 syn1bad : 1;
+ u64 syn0bad : 1;
+ u64 rxbad : 1;
+ u64 algn_st : 3;
+ u64 rx_st : 2;
+ u64 tx_st : 3;
+ } s;
+ struct cvmx_pcsxx_tx_rx_states_reg_s cn52xx;
+ struct cvmx_pcsxx_tx_rx_states_reg_cn52xxp1 {
+ u64 reserved_13_63 : 51;
+ u64 syn3bad : 1;
+ u64 syn2bad : 1;
+ u64 syn1bad : 1;
+ u64 syn0bad : 1;
+ u64 rxbad : 1;
+ u64 algn_st : 3;
+ u64 rx_st : 2;
+ u64 tx_st : 3;
+ } cn52xxp1;
+ struct cvmx_pcsxx_tx_rx_states_reg_s cn56xx;
+ struct cvmx_pcsxx_tx_rx_states_reg_cn52xxp1 cn56xxp1;
+ struct cvmx_pcsxx_tx_rx_states_reg_s cn61xx;
+ struct cvmx_pcsxx_tx_rx_states_reg_s cn63xx;
+ struct cvmx_pcsxx_tx_rx_states_reg_s cn63xxp1;
+ struct cvmx_pcsxx_tx_rx_states_reg_s cn66xx;
+ struct cvmx_pcsxx_tx_rx_states_reg_s cn68xx;
+ struct cvmx_pcsxx_tx_rx_states_reg_s cn68xxp1;
+ struct cvmx_pcsxx_tx_rx_states_reg_s cn70xx;
+ struct cvmx_pcsxx_tx_rx_states_reg_s cn70xxp1;
+};
+
+typedef union cvmx_pcsxx_tx_rx_states_reg cvmx_pcsxx_tx_rx_states_reg_t;
+
+#endif
--
2.35.1
^ permalink raw reply related [flat|nested] 52+ messages in thread* [PATCH 07/52] mips: octeon: Add cvmx-xcv-defs.h header file
2022-03-30 10:06 [PATCH 00/52] mips: octeon: Add ethernet support Stefan Roese
` (5 preceding siblings ...)
2022-03-30 10:06 ` [PATCH 06/52] mips: octeon: Add cvmx-pcsxx-defs.h " Stefan Roese
@ 2022-03-30 10:06 ` Stefan Roese
2022-03-30 10:06 ` [PATCH 08/52] mips: octeon: Misc changes to existing headers for upcoming eth support Stefan Roese
` (42 subsequent siblings)
49 siblings, 0 replies; 52+ messages in thread
From: Stefan Roese @ 2022-03-30 10:06 UTC (permalink / raw)
To: u-boot; +Cc: awilliams, cchavva
From: Aaron Williams <awilliams@marvell.com>
Import cvmx-xcv-defs.h header file from 2013 U-Boot. It will be used
by the later added drivers to support networking on the MIPS Octeon II /
III platforms.
Signed-off-by: Aaron Williams <awilliams@marvell.com>
Signed-off-by: Stefan Roese <sr@denx.de>
---
.../mach-octeon/include/mach/cvmx-xcv-defs.h | 226 ++++++++++++++++++
1 file changed, 226 insertions(+)
create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-xcv-defs.h
diff --git a/arch/mips/mach-octeon/include/mach/cvmx-xcv-defs.h b/arch/mips/mach-octeon/include/mach/cvmx-xcv-defs.h
new file mode 100644
index 000000000000..4fd4d163eae1
--- /dev/null
+++ b/arch/mips/mach-octeon/include/mach/cvmx-xcv-defs.h
@@ -0,0 +1,226 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon xcv.
+ */
+
+#ifndef __CVMX_XCV_DEFS_H__
+#define __CVMX_XCV_DEFS_H__
+
+#define CVMX_XCV_BATCH_CRD_RET (0x00011800DB000100ull)
+#define CVMX_XCV_COMP_CTL (0x00011800DB000020ull)
+#define CVMX_XCV_CTL (0x00011800DB000030ull)
+#define CVMX_XCV_DLL_CTL (0x00011800DB000010ull)
+#define CVMX_XCV_ECO (0x00011800DB000200ull)
+#define CVMX_XCV_INBND_STATUS (0x00011800DB000080ull)
+#define CVMX_XCV_INT (0x00011800DB000040ull)
+#define CVMX_XCV_RESET (0x00011800DB000000ull)
+
+/**
+ * cvmx_xcv_batch_crd_ret
+ */
+union cvmx_xcv_batch_crd_ret {
+ u64 u64;
+ struct cvmx_xcv_batch_crd_ret_s {
+ u64 reserved_1_63 : 63;
+ u64 crd_ret : 1;
+ } s;
+ struct cvmx_xcv_batch_crd_ret_s cn73xx;
+};
+
+typedef union cvmx_xcv_batch_crd_ret cvmx_xcv_batch_crd_ret_t;
+
+/**
+ * cvmx_xcv_comp_ctl
+ *
+ * This register controls programmable compensation.
+ *
+ */
+union cvmx_xcv_comp_ctl {
+ u64 u64;
+ struct cvmx_xcv_comp_ctl_s {
+ u64 drv_byp : 1;
+ u64 reserved_61_62 : 2;
+ u64 cmp_pctl : 5;
+ u64 reserved_53_55 : 3;
+ u64 cmp_nctl : 5;
+ u64 reserved_45_47 : 3;
+ u64 drv_pctl : 5;
+ u64 reserved_37_39 : 3;
+ u64 drv_nctl : 5;
+ u64 reserved_31_31 : 1;
+ u64 pctl_lock : 1;
+ u64 pctl_sat : 1;
+ u64 reserved_28_28 : 1;
+ u64 nctl_lock : 1;
+ u64 reserved_1_26 : 26;
+ u64 nctl_sat : 1;
+ } s;
+ struct cvmx_xcv_comp_ctl_s cn73xx;
+};
+
+typedef union cvmx_xcv_comp_ctl cvmx_xcv_comp_ctl_t;
+
+/**
+ * cvmx_xcv_ctl
+ *
+ * This register contains the status control bits.
+ *
+ */
+union cvmx_xcv_ctl {
+ u64 u64;
+ struct cvmx_xcv_ctl_s {
+ u64 reserved_4_63 : 60;
+ u64 lpbk_ext : 1;
+ u64 lpbk_int : 1;
+ u64 speed : 2;
+ } s;
+ struct cvmx_xcv_ctl_s cn73xx;
+};
+
+typedef union cvmx_xcv_ctl cvmx_xcv_ctl_t;
+
+/**
+ * cvmx_xcv_dll_ctl
+ *
+ * The RGMII timing specification requires that devices transmit clock and
+ * data synchronously. The specification requires external sources (namely
+ * the PC board trace routes) to introduce the appropriate 1.5 to 2.0 ns of
+ * delay.
+ *
+ * To eliminate the need for the PC board delays, the RGMII interface has optional
+ * on-board DLLs for both transmit and receive. For correct operation, at most one
+ * of the transmitter, board, or receiver involved in an RGMII link should
+ * introduce delay. By default/reset, the RGMII receivers delay the received clock,
+ * and the RGMII transmitters do not delay the transmitted clock. Whether this
+ * default works as-is with a given link partner depends on the behavior of the
+ * link partner and the PC board.
+ *
+ * These are the possible modes of RGMII receive operation:
+ *
+ * * XCV_DLL_CTL[CLKRX_BYP] = 0 (reset value) - The RGMII
+ * receive interface introduces clock delay using its internal DLL.
+ * This mode is appropriate if neither the remote
+ * transmitter nor the PC board delays the clock.
+ *
+ * * XCV_DLL_CTL[CLKRX_BYP] = 1, [CLKRX_SET] = 0x0 - The
+ * RGMII receive interface introduces no clock delay. This mode
+ * is appropriate if either the remote transmitter or the PC board
+ * delays the clock.
+ *
+ * These are the possible modes of RGMII transmit operation:
+ *
+ * * XCV_DLL_CTL[CLKTX_BYP] = 1, [CLKTX_SET] = 0x0 (reset value) -
+ * The RGMII transmit interface introduces no clock
+ * delay. This mode is appropriate is either the remote receiver
+ * or the PC board delays the clock.
+ *
+ * * XCV_DLL_CTL[CLKTX_BYP] = 0 - The RGMII transmit
+ * interface introduces clock delay using its internal DLL.
+ * This mode is appropriate if neither the remote receiver
+ * nor the PC board delays the clock.
+ */
+union cvmx_xcv_dll_ctl {
+ u64 u64;
+ struct cvmx_xcv_dll_ctl_s {
+ u64 reserved_32_63 : 32;
+ u64 lock : 1;
+ u64 clk_set : 7;
+ u64 clkrx_byp : 1;
+ u64 clkrx_set : 7;
+ u64 clktx_byp : 1;
+ u64 clktx_set : 7;
+ u64 reserved_2_7 : 6;
+ u64 refclk_sel : 2;
+ } s;
+ struct cvmx_xcv_dll_ctl_s cn73xx;
+};
+
+typedef union cvmx_xcv_dll_ctl cvmx_xcv_dll_ctl_t;
+
+/**
+ * cvmx_xcv_eco
+ */
+union cvmx_xcv_eco {
+ u64 u64;
+ struct cvmx_xcv_eco_s {
+ u64 reserved_16_63 : 48;
+ u64 eco_rw : 16;
+ } s;
+ struct cvmx_xcv_eco_s cn73xx;
+};
+
+typedef union cvmx_xcv_eco cvmx_xcv_eco_t;
+
+/**
+ * cvmx_xcv_inbnd_status
+ *
+ * This register contains RGMII in-band status.
+ *
+ */
+union cvmx_xcv_inbnd_status {
+ u64 u64;
+ struct cvmx_xcv_inbnd_status_s {
+ u64 reserved_4_63 : 60;
+ u64 duplex : 1;
+ u64 speed : 2;
+ u64 link : 1;
+ } s;
+ struct cvmx_xcv_inbnd_status_s cn73xx;
+};
+
+typedef union cvmx_xcv_inbnd_status cvmx_xcv_inbnd_status_t;
+
+/**
+ * cvmx_xcv_int
+ *
+ * This register controls interrupts.
+ *
+ */
+union cvmx_xcv_int {
+ u64 u64;
+ struct cvmx_xcv_int_s {
+ u64 reserved_7_63 : 57;
+ u64 tx_ovrflw : 1;
+ u64 tx_undflw : 1;
+ u64 incomp_byte : 1;
+ u64 duplex : 1;
+ u64 reserved_2_2 : 1;
+ u64 speed : 1;
+ u64 link : 1;
+ } s;
+ struct cvmx_xcv_int_s cn73xx;
+};
+
+typedef union cvmx_xcv_int cvmx_xcv_int_t;
+
+/**
+ * cvmx_xcv_reset
+ *
+ * This register controls reset.
+ *
+ */
+union cvmx_xcv_reset {
+ u64 u64;
+ struct cvmx_xcv_reset_s {
+ u64 enable : 1;
+ u64 reserved_16_62 : 47;
+ u64 clkrst : 1;
+ u64 reserved_12_14 : 3;
+ u64 dllrst : 1;
+ u64 reserved_8_10 : 3;
+ u64 comp : 1;
+ u64 reserved_4_6 : 3;
+ u64 tx_pkt_rst_n : 1;
+ u64 tx_dat_rst_n : 1;
+ u64 rx_pkt_rst_n : 1;
+ u64 rx_dat_rst_n : 1;
+ } s;
+ struct cvmx_xcv_reset_s cn73xx;
+};
+
+typedef union cvmx_xcv_reset cvmx_xcv_reset_t;
+
+#endif
--
2.35.1
^ permalink raw reply related [flat|nested] 52+ messages in thread* [PATCH 08/52] mips: octeon: Misc changes to existing headers for upcoming eth support
2022-03-30 10:06 [PATCH 00/52] mips: octeon: Add ethernet support Stefan Roese
` (6 preceding siblings ...)
2022-03-30 10:06 ` [PATCH 07/52] mips: octeon: Add cvmx-xcv-defs.h " Stefan Roese
@ 2022-03-30 10:06 ` Stefan Roese
2022-03-30 10:06 ` [PATCH 09/52] mips: octeon: Add cvmx-helper-agl.c Stefan Roese
` (41 subsequent siblings)
49 siblings, 0 replies; 52+ messages in thread
From: Stefan Roese @ 2022-03-30 10:06 UTC (permalink / raw)
To: u-boot; +Cc: awilliams, cchavva
This patch includes misc changes to already present Octeon MIPS header
files, which are necessary for the upcoming ethernet support.
The changes are mostly:
- DM GPIO & I2C infrastructure
- Coding style cleanup while reworking the headers
Signed-off-by: Stefan Roese <sr@denx.de>
---
.../mach-octeon/include/mach/cvmx-bootmem.h | 3 +-
arch/mips/mach-octeon/include/mach/cvmx-fpa.h | 3 +-
.../mips/mach-octeon/include/mach/cvmx-fpa3.h | 37 -------
.../include/mach/cvmx-helper-board.h | 6 +-
.../include/mach/cvmx-helper-fdt.h | 40 +++----
.../include/mach/cvmx-helper-pko.h | 2 +-
.../mach-octeon/include/mach/cvmx-helper.h | 20 ++++
.../mips/mach-octeon/include/mach/cvmx-regs.h | 100 ++++++++++++++++--
.../mach-octeon/include/mach/octeon_eth.h | 54 +++++-----
9 files changed, 166 insertions(+), 99 deletions(-)
diff --git a/arch/mips/mach-octeon/include/mach/cvmx-bootmem.h b/arch/mips/mach-octeon/include/mach/cvmx-bootmem.h
index 283ac5c6bb56..d5c004d017ef 100644
--- a/arch/mips/mach-octeon/include/mach/cvmx-bootmem.h
+++ b/arch/mips/mach-octeon/include/mach/cvmx-bootmem.h
@@ -26,7 +26,8 @@
/* Real physical addresses of memory regions */
#define OCTEON_DDR0_BASE (0x0ULL)
-#define OCTEON_DDR0_SIZE (0x010000000ULL)
+/* Use 16MiB here, as 256 leads to overwriting U-Boot reloc space */
+#define OCTEON_DDR0_SIZE (0x001000000ULL)
#define OCTEON_DDR1_BASE ((OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3()) \
? 0x20000000ULL : 0x410000000ULL)
#define OCTEON_DDR1_SIZE (0x010000000ULL)
diff --git a/arch/mips/mach-octeon/include/mach/cvmx-fpa.h b/arch/mips/mach-octeon/include/mach/cvmx-fpa.h
index aa238a885072..0660c31b4f59 100644
--- a/arch/mips/mach-octeon/include/mach/cvmx-fpa.h
+++ b/arch/mips/mach-octeon/include/mach/cvmx-fpa.h
@@ -104,8 +104,9 @@ static inline void *cvmx_fpa_alloc(u64 pool)
/* FPA3 is handled differently */
if ((octeon_has_feature(OCTEON_FEATURE_FPA3))) {
return cvmx_fpa3_alloc(cvmx_fpa1_pool_to_fpa3_aura(pool));
- } else
+ } else {
return cvmx_fpa1_alloc(pool);
+ }
}
/**
diff --git a/arch/mips/mach-octeon/include/mach/cvmx-fpa3.h b/arch/mips/mach-octeon/include/mach/cvmx-fpa3.h
index b3e04d7f02a1..9bab03f59a09 100644
--- a/arch/mips/mach-octeon/include/mach/cvmx-fpa3.h
+++ b/arch/mips/mach-octeon/include/mach/cvmx-fpa3.h
@@ -526,41 +526,4 @@ const char *cvmx_fpa3_get_pool_name(cvmx_fpa3_pool_t pool);
int cvmx_fpa3_get_pool_buf_size(cvmx_fpa3_pool_t pool);
const char *cvmx_fpa3_get_aura_name(cvmx_fpa3_gaura_t aura);
-/* FIXME: Need a different macro for stage2 of u-boot */
-
-static inline void cvmx_fpa3_stage2_init(int aura, int pool, u64 stack_paddr, int stacklen,
- int buffer_sz, int buf_cnt)
-{
- cvmx_fpa_poolx_cfg_t pool_cfg;
-
- /* Configure pool stack */
- cvmx_write_csr_node(0, CVMX_FPA_POOLX_STACK_BASE(pool), stack_paddr);
- cvmx_write_csr_node(0, CVMX_FPA_POOLX_STACK_ADDR(pool), stack_paddr);
- cvmx_write_csr_node(0, CVMX_FPA_POOLX_STACK_END(pool), stack_paddr + stacklen);
-
- /* Configure pool with buffer size */
- pool_cfg.u64 = 0;
- pool_cfg.cn78xx.nat_align = 1;
- pool_cfg.cn78xx.buf_size = buffer_sz >> 7;
- pool_cfg.cn78xx.l_type = 0x2;
- pool_cfg.cn78xx.ena = 0;
- cvmx_write_csr_node(0, CVMX_FPA_POOLX_CFG(pool), pool_cfg.u64);
- /* Reset pool before starting */
- pool_cfg.cn78xx.ena = 1;
- cvmx_write_csr_node(0, CVMX_FPA_POOLX_CFG(pool), pool_cfg.u64);
-
- cvmx_write_csr_node(0, CVMX_FPA_AURAX_CFG(aura), 0);
- cvmx_write_csr_node(0, CVMX_FPA_AURAX_CNT_ADD(aura), buf_cnt);
- cvmx_write_csr_node(0, CVMX_FPA_AURAX_POOL(aura), (u64)pool);
-}
-
-static inline void cvmx_fpa3_stage2_disable(int aura, int pool)
-{
- cvmx_write_csr_node(0, CVMX_FPA_AURAX_POOL(aura), 0);
- cvmx_write_csr_node(0, CVMX_FPA_POOLX_CFG(pool), 0);
- cvmx_write_csr_node(0, CVMX_FPA_POOLX_STACK_BASE(pool), 0);
- cvmx_write_csr_node(0, CVMX_FPA_POOLX_STACK_ADDR(pool), 0);
- cvmx_write_csr_node(0, CVMX_FPA_POOLX_STACK_END(pool), 0);
-}
-
#endif /* __CVMX_FPA3_H__ */
diff --git a/arch/mips/mach-octeon/include/mach/cvmx-helper-board.h b/arch/mips/mach-octeon/include/mach/cvmx-helper-board.h
index 5837592d21aa..9cc61b1a350e 100644
--- a/arch/mips/mach-octeon/include/mach/cvmx-helper-board.h
+++ b/arch/mips/mach-octeon/include/mach/cvmx-helper-board.h
@@ -9,6 +9,8 @@
#ifndef __CVMX_HELPER_BOARD_H__
#define __CVMX_HELPER_BOARD_H__
+#include <asm-generic/gpio.h>
+
#define CVMX_VSC7224_NAME_LEN 16
typedef enum {
@@ -185,8 +187,8 @@ struct cvmx_vsc7224 {
struct cvmx_fdt_i2c_bus_info *i2c_bus;
/** Address of VSC7224 on i2c bus */
int i2c_addr;
- struct cvmx_fdt_gpio_info *los_gpio; /** LoS GPIO pin */
- struct cvmx_fdt_gpio_info *reset_gpio; /** Reset GPIO pin */
+ struct gpio_desc los_gpio; /** LoS GPIO pin */
+ struct gpio_desc reset_gpio; /** Reset GPIO pin */
int of_offset; /** Offset in device tree */
};
diff --git a/arch/mips/mach-octeon/include/mach/cvmx-helper-fdt.h b/arch/mips/mach-octeon/include/mach/cvmx-helper-fdt.h
index 332884557031..c3ce35958372 100644
--- a/arch/mips/mach-octeon/include/mach/cvmx-helper-fdt.h
+++ b/arch/mips/mach-octeon/include/mach/cvmx-helper-fdt.h
@@ -14,10 +14,13 @@
#include <fdtdec.h>
#include <time.h>
#include <asm/global_data.h>
+#include <asm-generic/gpio.h>
+#include <dm/device.h>
#include <linux/libfdt.h>
#include <mach/cvmx-helper-sfp.h>
+/* todo: this is deprecated and some of it can be removed at some time */
enum cvmx_i2c_bus_type {
CVMX_I2C_BUS_OCTEON,
CVMX_I2C_MUX_PCA9540,
@@ -55,6 +58,8 @@ struct cvmx_fdt_i2c_bus_info {
u8 enable_bit;
/** True if mux, false if switch */
bool is_mux;
+
+ struct udevice *i2c_bus;
};
/**
@@ -85,22 +90,24 @@ struct cvmx_fdt_sfp_info {
bool is_qsfp;
/** True if EEPROM data is valid */
bool valid;
+
/** SFP tx_disable GPIO descriptor */
- struct cvmx_fdt_gpio_info *tx_disable;
+ struct gpio_desc tx_disable;
/** SFP mod_abs/QSFP mod_prs GPIO descriptor */
- struct cvmx_fdt_gpio_info *mod_abs;
+ struct gpio_desc mod_abs;
/** SFP tx_error GPIO descriptor */
- struct cvmx_fdt_gpio_info *tx_error;
+ struct gpio_desc tx_error;
/** SFP rx_los GPIO discriptor */
- struct cvmx_fdt_gpio_info *rx_los;
+ struct gpio_desc rx_los;
/** QSFP select GPIO descriptor */
- struct cvmx_fdt_gpio_info *select;
+ struct gpio_desc select;
/** QSFP reset GPIO descriptor */
- struct cvmx_fdt_gpio_info *reset;
+ struct gpio_desc reset;
/** QSFP interrupt GPIO descriptor */
- struct cvmx_fdt_gpio_info *interrupt;
+ struct gpio_desc interrupt;
/** QSFP lp_mode GPIO descriptor */
- struct cvmx_fdt_gpio_info *lp_mode;
+ struct gpio_desc lp_mode;
+
/** Last mod_abs value */
int last_mod_abs;
/** Last rx_los value */
@@ -146,6 +153,9 @@ struct cvmx_fdt_sfp_info {
int cvmx_fdt_lookup_phandles(const void *fdt_addr, int node, const char *prop_name, int *lenp,
int *nodes);
+int cvmx_ofnode_lookup_phandles(ofnode node, const char *prop_name,
+ int *lenp, ofnode *nodes);
+
/**
* Helper to return the address property
*
@@ -341,8 +351,7 @@ int cvmx_fdt_node_offset_by_compatible_list(const void *fdt_addr, int startoffse
* Given the parent offset of an i2c device build up a list describing the bus
* which can contain i2c muxes and switches.
*
- * @param[in] fdt_addr address of device tree
- * @param of_offset Offset of the parent node of a GPIO device in
+ * @param[in] node ofnode of the parent node of a GPIO device in
* the device tree.
*
* Return: pointer to list of i2c devices starting from the root which
@@ -351,7 +360,7 @@ int cvmx_fdt_node_offset_by_compatible_list(const void *fdt_addr, int startoffse
*
* @see cvmx_fdt_free_i2c_bus()
*/
-struct cvmx_fdt_i2c_bus_info *cvmx_fdt_get_i2c_bus(const void *fdt_addr, int of_offset);
+struct cvmx_fdt_i2c_bus_info *cvmx_ofnode_get_i2c_bus(ofnode node);
/**
* Return the Octeon bus number for a bus descriptor
@@ -496,15 +505,6 @@ int __cvmx_fdt_parse_vsc7224(const void *fdt_addr);
*/
int __cvmx_fdt_parse_avsp5410(const void *fdt_addr);
-/**
- * Parse SFP information from device tree
- *
- * @param[in] fdt_addr Address of flat device tree
- *
- * Return: pointer to sfp info or NULL if error
- */
-struct cvmx_fdt_sfp_info *cvmx_helper_fdt_parse_sfp_info(const void *fdt_addr, int of_offset);
-
/**
* @INTERNAL
* Parses either a CS4343 phy or a slice of the phy from the device tree
diff --git a/arch/mips/mach-octeon/include/mach/cvmx-helper-pko.h b/arch/mips/mach-octeon/include/mach/cvmx-helper-pko.h
index 806102df2243..e1eb824c9346 100644
--- a/arch/mips/mach-octeon/include/mach/cvmx-helper-pko.h
+++ b/arch/mips/mach-octeon/include/mach/cvmx-helper-pko.h
@@ -17,7 +17,7 @@
* number. Users should set this pointer to a function before
* calling any cvmx-helper operations.
*/
-void (*cvmx_override_pko_queue_priority)(int ipd_port, u8 *priorities);
+extern void (*cvmx_override_pko_queue_priority)(int ipd_port, u8 *priorities);
/**
* Gets the fpa pool number of pko pool
diff --git a/arch/mips/mach-octeon/include/mach/cvmx-helper.h b/arch/mips/mach-octeon/include/mach/cvmx-helper.h
index caa0c69fc05e..2a7b13322db2 100644
--- a/arch/mips/mach-octeon/include/mach/cvmx-helper.h
+++ b/arch/mips/mach-octeon/include/mach/cvmx-helper.h
@@ -127,6 +127,26 @@ enum cvmx_pko_padding {
CVMX_PKO_PADDING_60 = 1,
};
+/**
+ * cvmx_override_iface_phy_mode(int interface, int index) is a function pointer.
+ * It is meant to allow customization of interfaces which do not have a PHY.
+ *
+ * @returns 0 if MAC decides TX_CONFIG_REG or 1 if PHY decides TX_CONFIG_REG.
+ *
+ * If this function pointer is NULL then it defaults to the MAC.
+ */
+extern int (*cvmx_override_iface_phy_mode) (int interface, int index);
+
+/**
+ * cvmx_override_ipd_port_setup(int ipd_port) is a function
+ * pointer. It is meant to allow customization of the IPD port/port kind
+ * setup before packet input/output comes online. It is called
+ * after cvmx-helper does the default IPD configuration, but
+ * before IPD is enabled. Users should set this pointer to a
+ * function before calling any cvmx-helper operations.
+ */
+extern void (*cvmx_override_ipd_port_setup) (int ipd_port);
+
/**
* This function enables the IPD and also enables the packet interfaces.
* The packet interfaces (RGMII and SPI) must be enabled after the
diff --git a/arch/mips/mach-octeon/include/mach/cvmx-regs.h b/arch/mips/mach-octeon/include/mach/cvmx-regs.h
index dbb77232e262..f97c1e907f0b 100644
--- a/arch/mips/mach-octeon/include/mach/cvmx-regs.h
+++ b/arch/mips/mach-octeon/include/mach/cvmx-regs.h
@@ -6,6 +6,7 @@
#ifndef __CVMX_REGS_H__
#define __CVMX_REGS_H__
+#include <log.h>
#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/io.h>
@@ -32,6 +33,7 @@
/* Regs */
#define CVMX_CIU3_NMI 0x0001010000000160ULL
+#define CVMX_CIU3_ISCX_W1C(x) (0x0001010090000000ull + ((x) & 1048575) * 8)
#define CVMX_MIO_BOOT_LOC_CFGX(x) (0x0001180000000080ULL + ((x) & 1) * 8)
#define MIO_BOOT_LOC_CFG_BASE GENMASK_ULL(27, 3)
@@ -55,11 +57,19 @@
#define CVMX_RNM_CTL_STATUS 0x0001180040000000ULL
#define RNM_CTL_STATUS_EER_VAL BIT_ULL(9)
+/* IOBDMA/LMTDMA IO addresses */
+#define CVMX_LMTDMA_ORDERED_IO_ADDR 0xffffffffffffa400ull
#define CVMX_IOBDMA_ORDERED_IO_ADDR 0xffffffffffffa200ull
/* turn the variable name into a string */
#define CVMX_TMP_STR(x) CVMX_TMP_STR2(x)
#define CVMX_TMP_STR2(x) #x
+#define VASTR(...) #__VA_ARGS__
+
+#define CVMX_PKO_LMTLINE 2ull
+#define CVMX_SCRATCH_BASE (-32768l) /* 0xffffffffffff8000 */
+
+#define COP0_CVMMEMCTL $11,7 /* Cavium memory control */
#define CVMX_RDHWR(result, regstr) \
asm volatile("rdhwr %[rt],$" CVMX_TMP_STR(regstr) : [rt] "=d"(result))
@@ -67,6 +77,13 @@
asm("rdhwr %[rt],$" CVMX_TMP_STR(regstr) : [rt] "=d"(result))
#define CVMX_POP(result, input) \
asm("pop %[rd],%[rs]" : [rd] "=d"(result) : [rs] "d"(input))
+#define CVMX_MF_COP0(val, cop0) \
+ asm("dmfc0 %[rt]," VASTR(cop0) : [rt] "=d" (val))
+#define CVMX_MT_COP0(val, cop0) \
+ asm("dmtc0 %[rt]," VASTR(cop0) : : [rt] "d" (val))
+
+#define CVMX_MF_CVM_MEM_CTL(val) CVMX_MF_COP0(val, COP0_CVMMEMCTL)
+#define CVMX_MT_CVM_MEM_CTL(val) CVMX_MT_COP0(val, COP0_CVMMEMCTL)
#define CVMX_SYNC asm volatile("sync\n" : : : "memory")
#define CVMX_SYNCW asm volatile("syncw\nsyncw\n" : : : "memory")
@@ -81,6 +98,18 @@
#define CVMX_MF_CHORD(dest) CVMX_RDHWR(dest, 30)
+#define CVMX_PREFETCH0(address) CVMX_PREFETCH(address, 0)
+#define CVMX_PREFETCH128(address) CVMX_PREFETCH(address, 128)
+
+/** a normal prefetch */
+#define CVMX_PREFETCH(address, offset) CVMX_PREFETCH_PREF0(address, offset)
+
+/** normal prefetches that use the pref instruction */
+#define CVMX_PREFETCH_PREFX(X, address, offset) \
+ asm volatile ("pref %[type], %[off](%[rbase])" : : [rbase] "d" (address), [off] "I" (offset), [type] "n" (X))
+#define CVMX_PREFETCH_PREF0(address, offset) \
+ CVMX_PREFETCH_PREFX(0, address, offset)
+
/*
* The macros cvmx_likely and cvmx_unlikely use the
* __builtin_expect GCC operation to control branch
@@ -405,6 +434,30 @@ static inline unsigned int cvmx_get_local_core_num(void)
return core_num & core_mask;
}
+/**
+ * Given a CSR address return the node number of that address
+ *
+ * @param addr Address to extract node number from
+ *
+ * @return node number
+ */
+static inline u8 cvmx_csr_addr_to_node(u64 addr)
+{
+ return (addr >> CVMX_NODE_IO_SHIFT) & CVMX_NODE_MASK;
+}
+
+/**
+ * Strip the node address bits from a CSR address
+ *
+ * @param addr CSR address to strip the node bits from
+ *
+ * @return CSR address with the node bits set to zero
+ */
+static inline u64 cvmx_csr_addr_strip_node(u64 addr)
+{
+ return addr & ~((u64)CVMX_NODE_MASK << CVMX_NODE_IO_SHIFT);
+}
+
/**
* Returns the number of bits set in the provided value.
* Simple wrapper for POP instruction.
@@ -428,14 +481,45 @@ static inline u32 cvmx_pop(u32 val)
#define cvmx_printf printf
#define cvmx_vprintf vprintf
-#if defined(DEBUG)
-void cvmx_warn(const char *format, ...) __printf(1, 2);
-#else
-void cvmx_warn(const char *format, ...);
-#endif
+/* Use common debug macros */
+#define cvmx_warn debug
+#define cvmx_warn_if debug_cond
+
+/**
+ * Atomically adds a signed value to a 32 bit (aligned) memory location,
+ * and returns previous value.
+ *
+ * Memory access ordering is enforced before/after the atomic operation,
+ * so no additional 'sync' instructions are required.
+ *
+ * @param ptr address in memory to add incr to
+ * @param incr amount to increment memory location by (signed)
+ *
+ * @return Value of memory location before increment
+ */
+static inline int32_t cvmx_atomic_fetch_and_add32(int32_t * ptr, int32_t incr)
+{
+ int32_t val;
-#define cvmx_warn_if(expression, format, ...) \
- if (expression) \
- cvmx_warn(format, ##__VA_ARGS__)
+ val = *ptr;
+ *ptr += incr;
+ return val;
+}
+
+/**
+ * Atomically adds a signed value to a 32 bit (aligned) memory location.
+ *
+ * This version does not perform 'sync' operations to enforce memory
+ * operations. This should only be used when there are no memory operation
+ * ordering constraints. (This should NOT be used for reference counting -
+ * use the standard version instead.)
+ *
+ * @param ptr address in memory to add incr to
+ * @param incr amount to increment memory location by (signed)
+ */
+static inline void cvmx_atomic_add32_nosync(int32_t * ptr, int32_t incr)
+{
+ *ptr += incr;
+}
#endif /* __CVMX_REGS_H__ */
diff --git a/arch/mips/mach-octeon/include/mach/octeon_eth.h b/arch/mips/mach-octeon/include/mach/octeon_eth.h
index 096fcfbfcfaa..83e62075ed9f 100644
--- a/arch/mips/mach-octeon/include/mach/octeon_eth.h
+++ b/arch/mips/mach-octeon/include/mach/octeon_eth.h
@@ -1,17 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Copyright (C) 2020 Marvell International Ltd.
+ * Copyright (C) 2020-2022 Marvell International Ltd.
*/
#ifndef __OCTEON_ETH_H__
#define __OCTEON_ETH_H__
-#include <phy.h>
-#include <miiphy.h>
-
#include <mach/cvmx-helper.h>
#include <mach/cvmx-helper-board.h>
-#include <mach/octeon_fdt.h>
struct eth_device;
@@ -27,33 +23,25 @@ struct octeon_eth_info {
struct phy_device *phydev; /** PHY device */
struct eth_device *ethdev; /** Eth device this priv is part of */
int mii_addr;
- int phy_fdt_offset; /** Offset of PHY info in device tree */
- int fdt_offset; /** Offset of Eth interface in DT */
- int phy_offset; /** Offset of PHY dev in device tree */
+ int phy_fdt_offset; /** Offset of PHY info in device tree */
+ int fdt_offset; /** Offset of Eth interface in DT */
+ int phy_offset; /** Offset of PHY device in device tree */
enum cvmx_phy_type phy_device_type; /** Type of PHY */
/* current link status, use to reconfigure on status changes */
u64 packets_sent;
u64 packets_received;
- u32 link_speed : 2;
- u32 link_duplex : 1;
- u32 link_status : 1;
- u32 loopback : 1;
- u32 enabled : 1;
- u32 is_c45 : 1; /** Set if we need to use clause 45 */
- u32 vitesse_sfp_config : 1; /** Need Vitesse SFP config */
- u32 ti_gpio_config : 1; /** Need TI GPIO config */
- u32 bgx_mac_set : 1; /** Has the BGX MAC been set already */
- u64 last_bgx_mac; /** Last BGX MAC address set */
- u64 gmx_base; /** Base address to access GMX CSRs */
- bool mod_abs; /** True if module is absent */
-
- /**
- * User defined function to check if a SFP+ module is absent or not.
- *
- * @param dev Ethernet device
- * @param data User supplied data
- */
- int (*check_mod_abs)(struct eth_device *dev, void *data);
+ uint32_t link_speed : 2;
+ uint32_t link_duplex : 1;
+ uint32_t link_status : 1;
+ uint32_t loopback : 1;
+ uint32_t enabled : 1;
+ uint32_t is_c45 : 1; /** Set if we need to use clause 45 */
+ uint32_t vitesse_sfp_config : 1; /** Need Vitesse SFP config */
+ uint32_t ti_gpio_config : 1; /** Need TI GPIO configuration */
+ uint32_t bgx_mac_set : 1; /** Has the BGX MAC been set already */
+ u64 last_bgx_mac; /** Last BGX MAC address set */
+ u64 gmx_base; /** Base address to access GMX CSRs */
+ bool mod_abs; /** True if module is absent */
/** User supplied data for check_mod_abs */
void *mod_abs_data;
@@ -71,12 +59,20 @@ struct octeon_eth_info {
* @return 0 for success, otherwise error
*/
int (*mod_abs_changed)(struct eth_device *dev, bool mod_abs);
+
/** SDK phy information data structure */
cvmx_phy_info_t phy_info;
+
+ struct udevice *mdio_dev;
+ struct mii_dev *bus;
+ struct phy_device *phy_dev;
+
#ifdef CONFIG_OCTEON_SFP
/** Information about connected SFP/SFP+/SFP28/QSFP+/QSFP28 module */
struct octeon_sfp_info sfp;
#endif
+
+ cvmx_wqe_t *work;
};
/**
@@ -136,6 +132,6 @@ void octeon_eth_register_mod_abs_changed(struct eth_device *dev,
*
* NOTE: If the module state is changed then the module callback is called.
*/
-void octeon_phy_port_check(struct eth_device *dev);
+void octeon_phy_port_check(struct udevice *dev);
#endif /* __OCTEON_ETH_H__ */
--
2.35.1
^ permalink raw reply related [flat|nested] 52+ messages in thread* [PATCH 09/52] mips: octeon: Add cvmx-helper-agl.c
2022-03-30 10:06 [PATCH 00/52] mips: octeon: Add ethernet support Stefan Roese
` (7 preceding siblings ...)
2022-03-30 10:06 ` [PATCH 08/52] mips: octeon: Misc changes to existing headers for upcoming eth support Stefan Roese
@ 2022-03-30 10:06 ` Stefan Roese
2022-03-30 10:06 ` [PATCH 10/52] mips: octeon: Add cvmx-helper-bgx.c Stefan Roese
` (40 subsequent siblings)
49 siblings, 0 replies; 52+ messages in thread
From: Stefan Roese @ 2022-03-30 10:06 UTC (permalink / raw)
To: u-boot; +Cc: awilliams, cchavva
From: Aaron Williams <awilliams@marvell.com>
Import cvmx-helper-agl.c from 2013 U-Boot. It will be used by the later
added drivers to support networking on the MIPS Octeon II / III
platforms.
Signed-off-by: Aaron Williams <awilliams@marvell.com>
Signed-off-by: Stefan Roese <sr@denx.de>
---
arch/mips/mach-octeon/cvmx-helper-agl.c | 231 ++++++++++++++++++++++++
1 file changed, 231 insertions(+)
create mode 100644 arch/mips/mach-octeon/cvmx-helper-agl.c
diff --git a/arch/mips/mach-octeon/cvmx-helper-agl.c b/arch/mips/mach-octeon/cvmx-helper-agl.c
new file mode 100644
index 000000000000..7eb99ac7c22b
--- /dev/null
+++ b/arch/mips/mach-octeon/cvmx-helper-agl.c
@@ -0,0 +1,231 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ * Functions for AGL (RGMII) initialization, configuration,
+ * and monitoring.
+ */
+
+#include <log.h>
+#include <time.h>
+#include <linux/delay.h>
+
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-csr.h>
+#include <mach/cvmx-bootmem.h>
+#include <mach/octeon-model.h>
+#include <mach/cvmx-fuse.h>
+#include <mach/octeon-feature.h>
+#include <mach/cvmx-qlm.h>
+#include <mach/octeon_qlm.h>
+#include <mach/cvmx-pcie.h>
+#include <mach/cvmx-coremask.h>
+
+#include <mach/cvmx-helper.h>
+#include <mach/cvmx-helper-cfg.h>
+
+#include <mach/cvmx-agl.h>
+#include <mach/cvmx-pki.h>
+
+#include <mach/cvmx-agl-defs.h>
+#include <mach/cvmx-pko-defs.h>
+
+int __cvmx_helper_agl_enumerate(int xiface)
+{
+ if (OCTEON_IS_MODEL(OCTEON_CN70XX)) {
+ union cvmx_agl_prtx_ctl agl_prtx_ctl;
+
+ agl_prtx_ctl.u64 = csr_rd(CVMX_AGL_PRTX_CTL(0));
+ if (agl_prtx_ctl.s.mode == 0) /* RGMII */
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * @INTERNAL
+ * Convert interface to port to assess CSRs.
+ *
+ * @param xiface Interface to probe
+ * @return The port corresponding to the interface
+ */
+int cvmx_helper_agl_get_port(int xiface)
+{
+ struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+
+ if (OCTEON_IS_MODEL(OCTEON_CN70XX))
+ return xi.interface - 4;
+ return -1;
+}
+
+/**
+ * @INTERNAL
+ * Probe a RGMII interface and determine the number of ports
+ * connected to it. The RGMII interface should still be down
+ * after this call.
+ *
+ * @param interface to probe
+ *
+ * @return Number of ports on the interface. Zero to disable.
+ */
+int __cvmx_helper_agl_probe(int interface)
+{
+ int port = cvmx_helper_agl_get_port(interface);
+ union cvmx_agl_gmx_bist gmx_bist;
+ union cvmx_agl_gmx_prtx_cfg gmx_prtx_cfg;
+ union cvmx_agl_prtx_ctl agl_prtx_ctl;
+ int result;
+
+ result = __cvmx_helper_agl_enumerate(interface);
+ if (result == 0)
+ return 0;
+
+ /* Check BIST status */
+ gmx_bist.u64 = csr_rd(CVMX_AGL_GMX_BIST);
+ if (gmx_bist.u64)
+ printf("Management port AGL failed BIST (0x%016llx) on AGL%d\n",
+ CAST64(gmx_bist.u64), port);
+
+ /* Disable the external input/output */
+ gmx_prtx_cfg.u64 = csr_rd(CVMX_AGL_GMX_PRTX_CFG(port));
+ gmx_prtx_cfg.s.en = 0;
+ csr_wr(CVMX_AGL_GMX_PRTX_CFG(port), gmx_prtx_cfg.u64);
+
+ /* Set the rgx_ref_clk MUX with AGL_PRTx_CTL[REFCLK_SEL]. Default value
+ * is 0 (RGMII REFCLK). Recommended to use RGMII RXC(1) or sclk/4 (2)
+ * to save cost.
+ */
+
+ agl_prtx_ctl.u64 = csr_rd(CVMX_AGL_PRTX_CTL(port));
+ agl_prtx_ctl.s.clkrst = 0;
+ agl_prtx_ctl.s.dllrst = 0;
+ agl_prtx_ctl.s.clktx_byp = 0;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN70XX)) {
+ bool tx_enable_bypass;
+ int tx_delay;
+
+ agl_prtx_ctl.s.refclk_sel =
+ cvmx_helper_get_agl_refclk_sel(interface, port);
+ agl_prtx_ctl.s.clkrx_set =
+ cvmx_helper_get_agl_rx_clock_skew(interface, port);
+ agl_prtx_ctl.s.clkrx_byp =
+ cvmx_helper_get_agl_rx_clock_delay_bypass(interface,
+ port);
+ cvmx_helper_cfg_get_rgmii_tx_clk_delay(
+ interface, port, &tx_enable_bypass, &tx_delay);
+ agl_prtx_ctl.s.clktx_byp = tx_enable_bypass;
+ agl_prtx_ctl.s.clktx_set = tx_delay;
+ }
+ csr_wr(CVMX_AGL_PRTX_CTL(port), agl_prtx_ctl.u64);
+ /* Force write out before wait */
+ csr_rd(CVMX_AGL_PRTX_CTL(port));
+ udelay(500);
+
+ /* Enable the componsation controller */
+ agl_prtx_ctl.u64 = csr_rd(CVMX_AGL_PRTX_CTL(port));
+ agl_prtx_ctl.s.drv_byp = 0;
+ csr_wr(CVMX_AGL_PRTX_CTL(port), agl_prtx_ctl.u64);
+ /* Force write out before wait */
+ csr_rd(CVMX_AGL_PRTX_CTL(port));
+
+ if (!OCTEON_IS_OCTEON3()) {
+ /* Enable the interface */
+ agl_prtx_ctl.u64 = csr_rd(CVMX_AGL_PRTX_CTL(port));
+ agl_prtx_ctl.s.enable = 1;
+ csr_wr(CVMX_AGL_PRTX_CTL(port), agl_prtx_ctl.u64);
+ /* Read the value back to force the previous write */
+ agl_prtx_ctl.u64 = csr_rd(CVMX_AGL_PRTX_CTL(port));
+ }
+
+ /* Enable the compensation controller */
+ agl_prtx_ctl.u64 = csr_rd(CVMX_AGL_PRTX_CTL(port));
+ agl_prtx_ctl.s.comp = 1;
+ csr_wr(CVMX_AGL_PRTX_CTL(port), agl_prtx_ctl.u64);
+ /* Force write out before wait */
+ csr_rd(CVMX_AGL_PRTX_CTL(port));
+
+ /* for componsation state to lock. */
+ udelay(500);
+
+ return result;
+}
+
+/**
+ * @INTERNAL
+ * Bringup and enable a RGMII interface. After this call packet
+ * I/O should be fully functional. This is called with IPD
+ * enabled but PKO disabled.
+ *
+ * @param interface to bring up
+ *
+ * @return Zero on success, negative on failure
+ */
+int __cvmx_helper_agl_enable(int interface)
+{
+ int port = cvmx_helper_agl_get_port(interface);
+ int ipd_port = cvmx_helper_get_ipd_port(interface, port);
+ union cvmx_pko_mem_port_ptrs pko_mem_port_ptrs;
+ union cvmx_pko_reg_read_idx read_idx;
+ int do_link_set = 1;
+ int i;
+
+ /* Setup PKO for AGL interface. Back pressure is not supported. */
+ pko_mem_port_ptrs.u64 = 0;
+ read_idx.u64 = 0;
+ read_idx.s.inc = 1;
+ csr_wr(CVMX_PKO_REG_READ_IDX, read_idx.u64);
+
+ for (i = 0; i < 40; i++) {
+ pko_mem_port_ptrs.u64 = csr_rd(CVMX_PKO_MEM_PORT_PTRS);
+ if (pko_mem_port_ptrs.s.pid == 24) {
+ pko_mem_port_ptrs.s.eid = 10;
+ pko_mem_port_ptrs.s.bp_port = 40;
+ csr_wr(CVMX_PKO_MEM_PORT_PTRS, pko_mem_port_ptrs.u64);
+ break;
+ }
+ }
+
+ cvmx_agl_enable(port);
+ if (do_link_set)
+ cvmx_agl_link_set(port, cvmx_agl_link_get(ipd_port));
+
+ return 0;
+}
+
+/**
+ * @INTERNAL
+ * Return the link state of an IPD/PKO port as returned by
+ * auto negotiation. The result of this function may not match
+ * Octeon's link config if auto negotiation has changed since
+ * the last call to cvmx_helper_link_set().
+ *
+ * @param ipd_port IPD/PKO port to query
+ *
+ * @return Link state
+ */
+cvmx_helper_link_info_t __cvmx_helper_agl_link_get(int ipd_port)
+{
+ return cvmx_agl_link_get(ipd_port);
+}
+
+/**
+ * @INTERNAL
+ * Configure an IPD/PKO port for the specified link state. This
+ * function does not influence auto negotiation at the PHY level.
+ * The passed link state must always match the link state returned
+ * by cvmx_helper_link_get(). It is normally best to use
+ * cvmx_helper_link_autoconf() instead.
+ *
+ * @param ipd_port IPD/PKO port to configure
+ * @param link_info The new link state
+ *
+ * @return Zero on success, negative on failure
+ */
+int __cvmx_helper_agl_link_set(int ipd_port, cvmx_helper_link_info_t link_info)
+{
+ int interface = cvmx_helper_get_interface_num(ipd_port);
+ int port = cvmx_helper_agl_get_port(interface);
+
+ return cvmx_agl_link_set(port, link_info);
+}
--
2.35.1
^ permalink raw reply related [flat|nested] 52+ messages in thread* [PATCH 10/52] mips: octeon: Add cvmx-helper-bgx.c
2022-03-30 10:06 [PATCH 00/52] mips: octeon: Add ethernet support Stefan Roese
` (8 preceding siblings ...)
2022-03-30 10:06 ` [PATCH 09/52] mips: octeon: Add cvmx-helper-agl.c Stefan Roese
@ 2022-03-30 10:06 ` Stefan Roese
2022-03-30 10:06 ` [PATCH 11/52] mips: octeon: Add cvmx-helper-board.c Stefan Roese
` (39 subsequent siblings)
49 siblings, 0 replies; 52+ messages in thread
From: Stefan Roese @ 2022-03-30 10:06 UTC (permalink / raw)
To: u-boot; +Cc: awilliams, cchavva
From: Aaron Williams <awilliams@marvell.com>
Import cvmx-helper-bgx.c from 2013 U-Boot. It will be used by the later
added drivers to support networking on the MIPS Octeon II / III
platforms.
Signed-off-by: Aaron Williams <awilliams@marvell.com>
Signed-off-by: Stefan Roese <sr@denx.de>
---
arch/mips/mach-octeon/cvmx-helper-bgx.c | 3215 +++++++++++++++++++++++
1 file changed, 3215 insertions(+)
create mode 100644 arch/mips/mach-octeon/cvmx-helper-bgx.c
diff --git a/arch/mips/mach-octeon/cvmx-helper-bgx.c b/arch/mips/mach-octeon/cvmx-helper-bgx.c
new file mode 100644
index 000000000000..eb828e3608fe
--- /dev/null
+++ b/arch/mips/mach-octeon/cvmx-helper-bgx.c
@@ -0,0 +1,3215 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ * Functions to configure the BGX MAC.
+ */
+
+#include <log.h>
+#include <linux/delay.h>
+
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-csr.h>
+#include <mach/cvmx-bootmem.h>
+#include <mach/octeon-model.h>
+#include <mach/cvmx-fuse.h>
+#include <mach/octeon-feature.h>
+#include <mach/cvmx-qlm.h>
+#include <mach/octeon_qlm.h>
+#include <mach/cvmx-pcie.h>
+#include <mach/cvmx-coremask.h>
+
+#include <mach/cvmx-agl-defs.h>
+#include <mach/cvmx-bgxx-defs.h>
+#include <mach/cvmx-gmxx-defs.h>
+#include <mach/cvmx-ipd-defs.h>
+#include <mach/cvmx-pki-defs.h>
+#include <mach/cvmx-xcv-defs.h>
+
+#include <mach/cvmx-helper.h>
+#include <mach/cvmx-helper-board.h>
+#include <mach/cvmx-helper-fdt.h>
+#include <mach/cvmx-helper-bgx.h>
+#include <mach/cvmx-helper-cfg.h>
+#include <mach/cvmx-helper-util.h>
+#include <mach/cvmx-helper-pki.h>
+
+#include <mach/cvmx-global-resources.h>
+#include <mach/cvmx-pko-internal-ports-range.h>
+#include <mach/cvmx-ilk.h>
+#include <mach/cvmx-pip.h>
+
+/* Enable this define to see BGX error messages */
+/*#define DEBUG_BGX */
+
+/* Enable this variable to trace functions called for initializing BGX */
+static const int debug;
+
+/**
+ * cvmx_helper_bgx_override_autoneg(int xiface, int index) is a function pointer
+ * to override enabling/disabling of autonegotiation for SGMII, 10G-KR or 40G-KR4
+ * interfaces. This function is called when interface is initialized.
+ */
+int (*cvmx_helper_bgx_override_autoneg)(int xiface, int index) = NULL;
+
+/*
+ * cvmx_helper_bgx_override_fec(int xiface) is a function pointer
+ * to override enabling/disabling of FEC for 10G interfaces. This function
+ * is called when interface is initialized.
+ */
+int (*cvmx_helper_bgx_override_fec)(int xiface, int index) = NULL;
+
+/**
+ * Delay after enabling an interface based on the mode. Different modes take
+ * different amounts of time.
+ */
+static void
+__cvmx_helper_bgx_interface_enable_delay(cvmx_helper_interface_mode_t mode)
+{
+ switch (mode) {
+ case CVMX_HELPER_INTERFACE_MODE_10G_KR:
+ case CVMX_HELPER_INTERFACE_MODE_40G_KR4:
+ case CVMX_HELPER_INTERFACE_MODE_XLAUI:
+ case CVMX_HELPER_INTERFACE_MODE_XFI:
+ mdelay(250);
+ break;
+ case CVMX_HELPER_INTERFACE_MODE_RXAUI:
+ case CVMX_HELPER_INTERFACE_MODE_XAUI:
+ mdelay(100);
+ break;
+ case CVMX_HELPER_INTERFACE_MODE_SGMII:
+ mdelay(50);
+ break;
+ default:
+ mdelay(50);
+ break;
+ }
+}
+
+/**
+ * @INTERNAL
+ *
+ * Returns number of ports based on interface
+ * @param xiface Which xiface
+ * @return Number of ports based on xiface
+ */
+int __cvmx_helper_bgx_enumerate(int xiface)
+{
+ cvmx_bgxx_cmr_tx_lmacs_t lmacs;
+ struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+
+ lmacs.u64 = csr_rd_node(xi.node, CVMX_BGXX_CMR_TX_LMACS(xi.interface));
+ return lmacs.s.lmacs;
+}
+
+/**
+ * @INTERNAL
+ *
+ * Returns mode of each BGX LMAC (port).
+ * This is different than 'cvmx_helper_interface_get_mode()' which
+ * provides mode of an entire interface, but when BGX is in "mixed"
+ * mode this function should be called instead to get the protocol
+ * for each port (BGX LMAC) individually.
+ * Both function return the same enumerated mode.
+ *
+ * @param xiface is the global interface identifier
+ * @param index is the interface port index
+ * @returns mode of the individual port
+ */
+cvmx_helper_interface_mode_t cvmx_helper_bgx_get_mode(int xiface, int index)
+{
+ struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+ cvmx_bgxx_cmrx_config_t cmr_config;
+ cvmx_bgxx_spux_br_pmd_control_t pmd_control;
+
+ cmr_config.u64 = csr_rd_node(
+ xi.node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface));
+
+ switch (cmr_config.s.lmac_type) {
+ case 0:
+ return CVMX_HELPER_INTERFACE_MODE_SGMII;
+ case 1:
+ return CVMX_HELPER_INTERFACE_MODE_XAUI;
+ case 2:
+ return CVMX_HELPER_INTERFACE_MODE_RXAUI;
+ case 3:
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X))
+ return cvmx_helper_interface_get_mode(xiface);
+ pmd_control.u64 = csr_rd_node(
+ xi.node,
+ CVMX_BGXX_SPUX_BR_PMD_CONTROL(index, xi.interface));
+ if (pmd_control.s.train_en)
+ return CVMX_HELPER_INTERFACE_MODE_10G_KR;
+ else
+ return CVMX_HELPER_INTERFACE_MODE_XFI;
+ break;
+ case 4:
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X))
+ return cvmx_helper_interface_get_mode(xiface);
+ pmd_control.u64 = csr_rd_node(
+ xi.node,
+ CVMX_BGXX_SPUX_BR_PMD_CONTROL(index, xi.interface));
+ if (pmd_control.s.train_en)
+ return CVMX_HELPER_INTERFACE_MODE_40G_KR4;
+ else
+ return CVMX_HELPER_INTERFACE_MODE_XLAUI;
+ break;
+ case 5:
+ return CVMX_HELPER_INTERFACE_MODE_RGMII;
+ default:
+ return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+ }
+}
+
+/**
+ * @INTERNAL
+ * Disable the BGX port
+ *
+ * @param xipd_port IPD port of the BGX interface to disable
+ */
+void cvmx_helper_bgx_disable(int xipd_port)
+{
+ int xiface = cvmx_helper_get_interface_num(xipd_port);
+ struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+ struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(xipd_port);
+ int node = xi.node;
+ int index = cvmx_helper_get_interface_index_num(xp.port);
+ cvmx_bgxx_cmrx_config_t cmr_config;
+
+ cmr_config.u64 =
+ csr_rd_node(node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface));
+ if (debug)
+ debug("%s: Disabling tx and rx packets on xipd port 0x%x\n",
+ __func__, xipd_port);
+ cmr_config.s.data_pkt_tx_en = 0;
+ cmr_config.s.data_pkt_rx_en = 0;
+ csr_wr_node(node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface),
+ cmr_config.u64);
+}
+
+static int __cvmx_helper_bgx_rgmii_speed(cvmx_helper_link_info_t link_info)
+{
+ cvmx_xcv_reset_t xcv_reset;
+ cvmx_xcv_ctl_t xcv_ctl;
+ cvmx_xcv_batch_crd_ret_t crd_ret;
+ cvmx_xcv_dll_ctl_t dll_ctl;
+ cvmx_xcv_comp_ctl_t comp_ctl;
+ int speed;
+ int up = link_info.s.link_up;
+ int do_credits;
+
+ if (link_info.s.speed == 100)
+ speed = 1;
+ else if (link_info.s.speed == 10)
+ speed = 0;
+ else
+ speed = 2;
+
+ xcv_reset.u64 = csr_rd(CVMX_XCV_RESET);
+ xcv_ctl.u64 = csr_rd(CVMX_XCV_CTL);
+ do_credits = up && !xcv_reset.s.enable;
+
+ if (xcv_ctl.s.lpbk_int) {
+ xcv_reset.s.clkrst = 0;
+ csr_wr(CVMX_XCV_RESET, xcv_reset.u64);
+ }
+
+ if (up && (!xcv_reset.s.enable || xcv_ctl.s.speed != speed)) {
+ if (debug)
+ debug("%s: *** Enabling XCV block\n", __func__);
+ /* Enable the XCV block */
+ xcv_reset.u64 = csr_rd(CVMX_XCV_RESET);
+ xcv_reset.s.enable = 1;
+ csr_wr(CVMX_XCV_RESET, xcv_reset.u64);
+
+ /* Set operating mode */
+ xcv_ctl.u64 = csr_rd(CVMX_XCV_CTL);
+ xcv_ctl.s.speed = speed;
+ csr_wr(CVMX_XCV_CTL, xcv_ctl.u64);
+
+ /* Configure DLL - enable or bypass */
+ /* TX no bypass, RX bypass */
+ dll_ctl.u64 = csr_rd(CVMX_XCV_DLL_CTL);
+ dll_ctl.s.clkrx_set = 0;
+ dll_ctl.s.clkrx_byp = 1;
+ dll_ctl.s.clktx_byp = 0;
+ csr_wr(CVMX_XCV_DLL_CTL, dll_ctl.u64);
+
+ /* Enable */
+ dll_ctl.u64 = csr_rd(CVMX_XCV_DLL_CTL);
+ dll_ctl.s.refclk_sel = 0;
+ csr_wr(CVMX_XCV_DLL_CTL, dll_ctl.u64);
+ xcv_reset.u64 = csr_rd(CVMX_XCV_RESET);
+ xcv_reset.s.dllrst = 0;
+ csr_wr(CVMX_XCV_RESET, xcv_reset.u64);
+
+ /* Delay deems to be need so XCV_DLL_CTL[CLK_SET] works */
+ udelay(10);
+
+ comp_ctl.u64 = csr_rd(CVMX_XCV_COMP_CTL);
+ //comp_ctl.s.drv_pctl = 0;
+ //comp_ctl.s.drv_nctl = 0;
+ comp_ctl.s.drv_byp = 0;
+ csr_wr(CVMX_XCV_COMP_CTL, comp_ctl.u64);
+
+ /* enable */
+ xcv_reset.u64 = csr_rd(CVMX_XCV_RESET);
+ xcv_reset.s.comp = 1;
+ csr_wr(CVMX_XCV_RESET, xcv_reset.u64);
+
+ /* setup the RXC */
+ xcv_reset.u64 = csr_rd(CVMX_XCV_RESET);
+ xcv_reset.s.clkrst = !xcv_ctl.s.lpbk_int;
+ csr_wr(CVMX_XCV_RESET, xcv_reset.u64);
+
+ /* datapaths come out of the reset
+ * - the datapath resets will disengage BGX from the RGMII
+ * interface
+ * - XCV will continue to return TX credits for each tick that
+ * is sent on the TX data path
+ */
+ xcv_reset.u64 = csr_rd(CVMX_XCV_RESET);
+ xcv_reset.s.tx_dat_rst_n = 1;
+ xcv_reset.s.rx_dat_rst_n = 1;
+ csr_wr(CVMX_XCV_RESET, xcv_reset.u64);
+ } else if (debug) {
+ debug("%s: *** Not enabling XCV\n", __func__);
+ debug(" up: %s, xcv_reset.s.enable: %d, xcv_ctl.s.speed: %d, speed: %d\n",
+ up ? "true" : "false", (unsigned int)xcv_reset.s.enable,
+ (unsigned int)xcv_ctl.s.speed, speed);
+ }
+
+ /* enable the packet flow
+ * - The packet resets will be only disengage on packet boundaries
+ * - XCV will continue to return TX credits for each tick that is
+ * sent on the TX datapath
+ */
+ xcv_reset.u64 = csr_rd(CVMX_XCV_RESET);
+ xcv_reset.s.tx_pkt_rst_n = up;
+ xcv_reset.s.rx_pkt_rst_n = up;
+ csr_wr(CVMX_XCV_RESET, xcv_reset.u64);
+
+ /* Full reset when link is down */
+ if (!up) {
+ if (debug)
+ debug("%s: *** Disabling XCV reset\n", __func__);
+ /* wait 2*MTU in time */
+ mdelay(10);
+ /* reset the world */
+ csr_wr(CVMX_XCV_RESET, 0);
+ }
+
+ /* grant PKO TX credits */
+ if (do_credits) {
+ crd_ret.u64 = csr_rd(CVMX_XCV_BATCH_CRD_RET);
+ crd_ret.s.crd_ret = 1;
+ csr_wr(CVMX_XCV_BATCH_CRD_RET, crd_ret.u64);
+ }
+
+ return 0;
+}
+
+static void __cvmx_bgx_common_init_pknd(int xiface, int index)
+{
+ int num_ports;
+ int num_chl = 16;
+ struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+ int node = xi.node;
+ int pknd;
+ cvmx_bgxx_cmrx_rx_bp_on_t bgx_rx_bp_on;
+ cvmx_bgxx_cmrx_rx_id_map_t cmr_rx_id_map;
+ cvmx_bgxx_cmr_chan_msk_and_t chan_msk_and;
+ cvmx_bgxx_cmr_chan_msk_or_t chan_msk_or;
+
+ if (debug)
+ debug("%s: interface %u:%d/%d\n", __func__, xi.node,
+ xi.interface, index);
+
+ num_ports = cvmx_helper_ports_on_interface(xiface);
+ /* Modify bp_on mark, depending on number of LMACS on that interface
+ * and write it for every port
+ */
+ bgx_rx_bp_on.u64 = 0;
+ bgx_rx_bp_on.s.mark = (CVMX_BGX_RX_FIFO_SIZE / (num_ports * 4 * 16));
+
+ /* Setup pkind */
+ pknd = cvmx_helper_get_pknd(xiface, index);
+ cmr_rx_id_map.u64 = csr_rd_node(
+ node, CVMX_BGXX_CMRX_RX_ID_MAP(index, xi.interface));
+ cmr_rx_id_map.s.pknd = pknd;
+ /* Change the default reassembly id (RID), as max 14 RIDs allowed */
+ if (OCTEON_IS_MODEL(OCTEON_CN73XX))
+ cmr_rx_id_map.s.rid = ((4 * xi.interface) + 2 + index);
+ csr_wr_node(node, CVMX_BGXX_CMRX_RX_ID_MAP(index, xi.interface),
+ cmr_rx_id_map.u64);
+ /* Set backpressure channel mask AND/OR registers */
+ chan_msk_and.u64 =
+ csr_rd_node(node, CVMX_BGXX_CMR_CHAN_MSK_AND(xi.interface));
+ chan_msk_or.u64 =
+ csr_rd_node(node, CVMX_BGXX_CMR_CHAN_MSK_OR(xi.interface));
+ chan_msk_and.s.msk_and |= ((1 << num_chl) - 1) << (16 * index);
+ chan_msk_or.s.msk_or |= ((1 << num_chl) - 1) << (16 * index);
+ csr_wr_node(node, CVMX_BGXX_CMR_CHAN_MSK_AND(xi.interface),
+ chan_msk_and.u64);
+ csr_wr_node(node, CVMX_BGXX_CMR_CHAN_MSK_OR(xi.interface),
+ chan_msk_or.u64);
+ /* set rx back pressure (bp_on) on value */
+ csr_wr_node(node, CVMX_BGXX_CMRX_RX_BP_ON(index, xi.interface),
+ bgx_rx_bp_on.u64);
+}
+
+/**
+ * @INTERNAL
+ * Probe a SGMII interface and determine the number of ports
+ * connected to it. The SGMII interface should still be down after
+ * this call. This is used by interfaces using the bgx mac.
+ *
+ * @param xiface Interface to probe
+ *
+ * @return Number of ports on the interface. Zero to disable.
+ */
+int __cvmx_helper_bgx_probe(int xiface)
+{
+ return __cvmx_helper_bgx_enumerate(xiface);
+}
+
+/**
+ * @INTERNAL
+ * Return the size of the BGX TX_FIFO for a given LMAC,
+ * or 0 if the requested LMAC is inactive.
+ *
+ * TBD: Need also to add a "__cvmx_helper_bgx_speed()" function to
+ * return the speed of each LMAC.
+ */
+int __cvmx_helper_bgx_fifo_size(int xiface, unsigned int lmac)
+{
+ cvmx_bgxx_cmr_tx_lmacs_t lmacs;
+ struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+ unsigned int tx_fifo_size = CVMX_BGX_TX_FIFO_SIZE;
+
+ /* FIXME: Add validation for interface# < BGX_count */
+ lmacs.u64 = csr_rd_node(xi.node, CVMX_BGXX_CMR_TX_LMACS(xi.interface));
+
+ switch (lmacs.s.lmacs) {
+ case 1:
+ if (lmac > 0)
+ return 0;
+ else
+ return tx_fifo_size;
+ case 2:
+ if (lmac > 1)
+ return 0;
+ else
+ return tx_fifo_size >> 1;
+ case 3:
+ if (lmac > 2)
+ return 0;
+ else
+ return tx_fifo_size >> 2;
+ case 4:
+ if (lmac > 3)
+ return 0;
+ else
+ return tx_fifo_size >> 2;
+ default:
+ return 0;
+ }
+}
+
+/**
+ * @INTERNAL
+ * Perform initialization required only once for an SGMII port.
+ *
+ * @param xiface Interface to init
+ * @param index Index of prot on the interface
+ *
+ * @return Zero on success, negative on failure
+ */
+static int __cvmx_helper_bgx_sgmii_hardware_init_one_time(int xiface, int index)
+{
+ struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+ int node = xi.node;
+ const u64 clock_mhz = 1200; /* todo: fixme */
+ cvmx_bgxx_gmp_pcs_miscx_ctl_t gmp_misc_ctl;
+ cvmx_bgxx_gmp_pcs_linkx_timer_t gmp_timer;
+
+ if (!cvmx_helper_is_port_valid(xi.interface, index))
+ return 0;
+
+ if (debug)
+ debug("%s: interface %u:%d/%d\n", __func__, xi.node,
+ xi.interface, index);
+
+ /*
+ * Write PCS*_LINK*_TIMER_COUNT_REG[COUNT] with the
+ * appropriate value. 1000BASE-X specifies a 10ms
+ * interval. SGMII specifies a 1.6ms interval.
+ */
+ gmp_misc_ctl.u64 = csr_rd_node(
+ node, CVMX_BGXX_GMP_PCS_MISCX_CTL(index, xi.interface));
+ /* Adjust the MAC mode if requested by device tree */
+ gmp_misc_ctl.s.mac_phy = cvmx_helper_get_mac_phy_mode(xiface, index);
+ gmp_misc_ctl.s.mode = cvmx_helper_get_1000x_mode(xiface, index);
+ csr_wr_node(node, CVMX_BGXX_GMP_PCS_MISCX_CTL(index, xi.interface),
+ gmp_misc_ctl.u64);
+
+ gmp_timer.u64 = csr_rd_node(
+ node, CVMX_BGXX_GMP_PCS_LINKX_TIMER(index, xi.interface));
+ if (gmp_misc_ctl.s.mode)
+ /* 1000BASE-X */
+ gmp_timer.s.count = (10000ull * clock_mhz) >> 10;
+ else
+ /* SGMII */
+ gmp_timer.s.count = (1600ull * clock_mhz) >> 10;
+
+ csr_wr_node(node, CVMX_BGXX_GMP_PCS_LINKX_TIMER(index, xi.interface),
+ gmp_timer.u64);
+
+ /*
+ * Write the advertisement register to be used as the
+ * tx_Config_Reg<D15:D0> of the autonegotiation. In
+ * 1000BASE-X mode, tx_Config_Reg<D15:D0> is PCS*_AN*_ADV_REG.
+ * In SGMII PHY mode, tx_Config_Reg<D15:D0> is
+ * PCS*_SGM*_AN_ADV_REG. In SGMII MAC mode,
+ * tx_Config_Reg<D15:D0> is the fixed value 0x4001, so this
+ * step can be skipped.
+ */
+ if (gmp_misc_ctl.s.mode) {
+ /* 1000BASE-X */
+ cvmx_bgxx_gmp_pcs_anx_adv_t gmp_an_adv;
+
+ gmp_an_adv.u64 = csr_rd_node(
+ node, CVMX_BGXX_GMP_PCS_ANX_ADV(index, xi.interface));
+ gmp_an_adv.s.rem_flt = 0;
+ gmp_an_adv.s.pause = 3;
+ gmp_an_adv.s.hfd = 1;
+ gmp_an_adv.s.fd = 1;
+ csr_wr_node(node,
+ CVMX_BGXX_GMP_PCS_ANX_ADV(index, xi.interface),
+ gmp_an_adv.u64);
+ } else {
+ if (gmp_misc_ctl.s.mac_phy) {
+ /* PHY Mode */
+ cvmx_bgxx_gmp_pcs_sgmx_an_adv_t gmp_sgmx_an_adv;
+
+ gmp_sgmx_an_adv.u64 =
+ csr_rd_node(node, CVMX_BGXX_GMP_PCS_SGMX_AN_ADV(
+ index, xi.interface));
+ gmp_sgmx_an_adv.s.dup = 1;
+ gmp_sgmx_an_adv.s.speed = 2;
+ csr_wr_node(node,
+ CVMX_BGXX_GMP_PCS_SGMX_AN_ADV(index,
+ xi.interface),
+ gmp_sgmx_an_adv.u64);
+ } else {
+ /* MAC Mode - Nothing to do */
+ }
+ }
+ return 0;
+}
+
+/**
+ * @INTERNAL
+ * Bring up the SGMII interface to be ready for packet I/O but
+ * leave I/O disabled using the GMX override. This function
+ * follows the bringup documented in 10.6.3 of the manual.
+ *
+ * @param xiface Interface to bringup
+ * @param num_ports Number of ports on the interface
+ *
+ * @return Zero on success, negative on failure
+ */
+static int __cvmx_helper_bgx_sgmii_hardware_init(int xiface, int num_ports)
+{
+ int index;
+ int do_link_set = 1;
+
+ for (index = 0; index < num_ports; index++) {
+ int xipd_port = cvmx_helper_get_ipd_port(xiface, index);
+ cvmx_helper_interface_mode_t mode;
+
+ if (!cvmx_helper_is_port_valid(xiface, index))
+ continue;
+
+ __cvmx_helper_bgx_port_init(xipd_port, 0);
+
+ mode = cvmx_helper_bgx_get_mode(xiface, index);
+ if (mode == CVMX_HELPER_INTERFACE_MODE_RGMII)
+ continue;
+
+ if (do_link_set)
+ __cvmx_helper_bgx_sgmii_link_set(
+ xipd_port,
+ __cvmx_helper_bgx_sgmii_link_get(xipd_port));
+ }
+
+ return 0;
+}
+
+/**
+ * @INTERNAL
+ * Bringup and enable a SGMII interface. After this call packet
+ * I/O should be fully functional. This is called with IPD
+ * enabled but PKO disabled. This is used by interfaces using
+ * the bgx mac.
+ *
+ * @param xiface Interface to bring up
+ *
+ * @return Zero on success, negative on failure
+ */
+int __cvmx_helper_bgx_sgmii_enable(int xiface)
+{
+ int num_ports;
+
+ num_ports = cvmx_helper_ports_on_interface(xiface);
+ __cvmx_helper_bgx_sgmii_hardware_init(xiface, num_ports);
+
+ return 0;
+}
+
+/**
+ * @INTERNAL
+ * Initialize the SERDES link for the first time or after a loss
+ * of link.
+ *
+ * @param xiface Interface to init
+ * @param index Index of prot on the interface
+ *
+ * @return Zero on success, negative on failure
+ */
+static int __cvmx_helper_bgx_sgmii_hardware_init_link(int xiface, int index)
+{
+ cvmx_bgxx_gmp_pcs_mrx_control_t gmp_control;
+ cvmx_bgxx_gmp_pcs_miscx_ctl_t gmp_misc_ctl;
+ cvmx_bgxx_cmrx_config_t cmr_config;
+ int phy_mode, mode_1000x;
+ struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+ int interface = xi.interface;
+ int node = xi.node;
+ int autoneg = 0;
+
+ if (!cvmx_helper_is_port_valid(xiface, index))
+ return 0;
+
+ if (debug)
+ debug("%s: interface %u:%d/%d\n", __func__, xi.node,
+ xi.interface, index);
+
+ gmp_control.u64 = csr_rd_node(
+ node, CVMX_BGXX_GMP_PCS_MRX_CONTROL(index, xi.interface));
+ /* Take PCS through a reset sequence */
+ gmp_control.s.reset = 1;
+ csr_wr_node(node, CVMX_BGXX_GMP_PCS_MRX_CONTROL(index, xi.interface),
+ gmp_control.u64);
+
+ /* Wait until GMP_PCS_MRX_CONTROL[reset] comes out of reset */
+ if (CVMX_WAIT_FOR_FIELD64_NODE(
+ node, CVMX_BGXX_GMP_PCS_MRX_CONTROL(index, xi.interface),
+ cvmx_bgxx_gmp_pcs_mrx_control_t, reset, ==, 0, 10000)) {
+ debug("SGMII%d: Timeout waiting for port %d to finish reset\n",
+ interface, index);
+ return -1;
+ }
+
+ cmr_config.u64 =
+ csr_rd_node(node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface));
+
+ gmp_control.u64 = csr_rd_node(
+ node, CVMX_BGXX_GMP_PCS_MRX_CONTROL(index, xi.interface));
+ if (cvmx_helper_get_port_phy_present(xiface, index)) {
+ gmp_control.s.pwr_dn = 0;
+ } else {
+ gmp_control.s.spdmsb = 1;
+ gmp_control.s.spdlsb = 0;
+ gmp_control.s.pwr_dn = 0;
+ }
+ /* Write GMP_PCS_MR*_CONTROL[RST_AN]=1 to ensure a fresh SGMII
+ * negotiation starts.
+ */
+ autoneg = cvmx_helper_get_port_autonegotiation(xiface, index);
+ gmp_control.s.rst_an = 1;
+ gmp_control.s.an_en = (cmr_config.s.lmac_type != 5) && autoneg;
+ csr_wr_node(node, CVMX_BGXX_GMP_PCS_MRX_CONTROL(index, xi.interface),
+ gmp_control.u64);
+
+ phy_mode = cvmx_helper_get_mac_phy_mode(xiface, index);
+ mode_1000x = cvmx_helper_get_1000x_mode(xiface, index);
+
+ gmp_misc_ctl.u64 = csr_rd_node(
+ node, CVMX_BGXX_GMP_PCS_MISCX_CTL(index, xi.interface));
+ gmp_misc_ctl.s.mac_phy = phy_mode;
+ gmp_misc_ctl.s.mode = mode_1000x;
+ csr_wr_node(node, CVMX_BGXX_GMP_PCS_MISCX_CTL(index, xi.interface),
+ gmp_misc_ctl.u64);
+
+ if (phy_mode || !autoneg)
+ /* In PHY mode we can't query the link status so we just
+ * assume that the link is up
+ */
+ return 0;
+
+ /* Wait for GMP_PCS_MRX_CONTROL[an_cpt] to be set, indicating that
+ * SGMII autonegotiation is complete. In MAC mode this isn't an
+ * ethernet link, but a link between OCTEON and PHY.
+ */
+ if (cmr_config.s.lmac_type != 5 &&
+ CVMX_WAIT_FOR_FIELD64_NODE(
+ node, CVMX_BGXX_GMP_PCS_MRX_STATUS(index, xi.interface),
+ cvmx_bgxx_gmp_pcs_mrx_status_t, an_cpt, ==, 1, 10000)) {
+ debug("SGMII%d: Port %d link timeout\n", interface, index);
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
+ * @INTERNAL
+ * Configure an SGMII link to the specified speed after the SERDES
+ * link is up.
+ *
+ * @param xiface Interface to init
+ * @param index Index of prot on the interface
+ * @param link_info Link state to configure
+ *
+ * @return Zero on success, negative on failure
+ */
+static int __cvmx_helper_bgx_sgmii_hardware_init_link_speed(
+ int xiface, int index, cvmx_helper_link_info_t link_info)
+{
+ cvmx_bgxx_cmrx_config_t cmr_config;
+ cvmx_bgxx_gmp_pcs_miscx_ctl_t gmp_miscx_ctl;
+ cvmx_bgxx_gmp_gmi_prtx_cfg_t gmp_prtx_cfg;
+ struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+ int node = xi.node;
+
+ if (!cvmx_helper_is_port_valid(xiface, index))
+ return 0;
+
+ if (debug)
+ debug("%s: interface %u:%d/%d\n", __func__, xi.node,
+ xi.interface, index);
+
+ /* Disable GMX before we make any changes. */
+ cmr_config.u64 =
+ csr_rd_node(node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface));
+ cmr_config.s.data_pkt_tx_en = 0;
+ cmr_config.s.data_pkt_rx_en = 0;
+ csr_wr_node(node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface),
+ cmr_config.u64);
+
+ /* Wait for GMX to be idle */
+ if (CVMX_WAIT_FOR_FIELD64_NODE(
+ node, CVMX_BGXX_GMP_GMI_PRTX_CFG(index, xi.interface),
+ cvmx_bgxx_gmp_gmi_prtx_cfg_t, rx_idle, ==, 1, 10000) ||
+ CVMX_WAIT_FOR_FIELD64_NODE(
+ node, CVMX_BGXX_GMP_GMI_PRTX_CFG(index, xi.interface),
+ cvmx_bgxx_gmp_gmi_prtx_cfg_t, tx_idle, ==, 1, 10000)) {
+ debug("SGMII%d:%d: Timeout waiting for port %d to be idle\n",
+ node, xi.interface, index);
+ return -1;
+ }
+
+ /* Read GMX CFG again to make sure the disable completed */
+ gmp_prtx_cfg.u64 = csr_rd_node(
+ node, CVMX_BGXX_GMP_GMI_PRTX_CFG(index, xi.interface));
+
+ /*
+ * Get the misc control for PCS. We will need to set the
+ * duplication amount.
+ */
+ gmp_miscx_ctl.u64 = csr_rd_node(
+ node, CVMX_BGXX_GMP_PCS_MISCX_CTL(index, xi.interface));
+
+ /*
+ * Use GMXENO to force the link down if the status we get says
+ * it should be down.
+ */
+ gmp_miscx_ctl.s.gmxeno = !link_info.s.link_up;
+
+ /* Only change the duplex setting if the link is up */
+ if (link_info.s.link_up)
+ gmp_prtx_cfg.s.duplex = link_info.s.full_duplex;
+
+ /* Do speed based setting for GMX */
+ switch (link_info.s.speed) {
+ case 10:
+ gmp_prtx_cfg.s.speed = 0;
+ gmp_prtx_cfg.s.speed_msb = 1;
+ gmp_prtx_cfg.s.slottime = 0;
+ /* Setting from GMX-603 */
+ gmp_miscx_ctl.s.samp_pt = 25;
+ csr_wr_node(node,
+ CVMX_BGXX_GMP_GMI_TXX_SLOT(index, xi.interface),
+ 64);
+ csr_wr_node(node,
+ CVMX_BGXX_GMP_GMI_TXX_BURST(index, xi.interface),
+ 0);
+ break;
+ case 100:
+ gmp_prtx_cfg.s.speed = 0;
+ gmp_prtx_cfg.s.speed_msb = 0;
+ gmp_prtx_cfg.s.slottime = 0;
+ gmp_miscx_ctl.s.samp_pt = 0x5;
+ csr_wr_node(node,
+ CVMX_BGXX_GMP_GMI_TXX_SLOT(index, xi.interface),
+ 64);
+ csr_wr_node(node,
+ CVMX_BGXX_GMP_GMI_TXX_BURST(index, xi.interface),
+ 0);
+ break;
+ case 1000:
+ gmp_prtx_cfg.s.speed = 1;
+ gmp_prtx_cfg.s.speed_msb = 0;
+ gmp_prtx_cfg.s.slottime = 1;
+ gmp_miscx_ctl.s.samp_pt = 1;
+ csr_wr_node(node,
+ CVMX_BGXX_GMP_GMI_TXX_SLOT(index, xi.interface),
+ 512);
+ if (gmp_prtx_cfg.s.duplex)
+ /* full duplex */
+ csr_wr_node(node,
+ CVMX_BGXX_GMP_GMI_TXX_BURST(index,
+ xi.interface),
+ 0);
+ else
+ /* half duplex */
+ csr_wr_node(node,
+ CVMX_BGXX_GMP_GMI_TXX_BURST(index,
+ xi.interface),
+ 8192);
+ break;
+ default:
+ break;
+ }
+
+ /* Write the new misc control for PCS */
+ csr_wr_node(node, CVMX_BGXX_GMP_PCS_MISCX_CTL(index, xi.interface),
+ gmp_miscx_ctl.u64);
+
+ /* Write the new GMX settings with the port still disabled */
+ csr_wr_node(node, CVMX_BGXX_GMP_GMI_PRTX_CFG(index, xi.interface),
+ gmp_prtx_cfg.u64);
+
+ /* Read GMX CFG again to make sure the config completed */
+ csr_rd_node(node, CVMX_BGXX_GMP_GMI_PRTX_CFG(index, xi.interface));
+
+ /* Enable back BGX. */
+ cmr_config.u64 =
+ csr_rd_node(node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface));
+ if (debug)
+ debug("%s: Enabling tx and rx packets on %d:%d\n", __func__,
+ xi.interface, index);
+ cmr_config.s.data_pkt_tx_en = 1;
+ cmr_config.s.data_pkt_rx_en = 1;
+ csr_wr_node(node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface),
+ cmr_config.u64);
+
+ return 0;
+}
+
+/**
+ * Enables or disables forward error correction
+ *
+ * @param xiface interface
+ * @param index port index
+ * @param enable set to true to enable FEC, false to disable
+ *
+ * @return 0 for success, -1 on error
+ *
+ * @NOTE: If autonegotiation is enabled then autonegotiation will be
+ * restarted for negotiating FEC.
+ */
+int cvmx_helper_set_fec(int xiface, int index, bool enable)
+{
+ cvmx_bgxx_spux_fec_control_t spu_fec_control;
+ struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+ int interface = xi.interface;
+ int node = xi.node;
+ cvmx_helper_interface_mode_t mode;
+
+ if (debug)
+ debug("%s: interface: %u:%d/%d: %sable\n", __func__, interface,
+ node, index, enable ? "en" : "dis");
+
+ mode = cvmx_helper_bgx_get_mode(xiface, index);
+
+ /* FEC is only supported for KR mode and XLAUI*/
+ if (mode != CVMX_HELPER_INTERFACE_MODE_10G_KR &&
+ mode != CVMX_HELPER_INTERFACE_MODE_40G_KR4 &&
+ mode != CVMX_HELPER_INTERFACE_MODE_XLAUI &&
+ mode != CVMX_HELPER_INTERFACE_MODE_XFI)
+ return 0;
+
+ spu_fec_control.u64 =
+ csr_rd_node(node, CVMX_BGXX_SPUX_FEC_CONTROL(index, interface));
+
+ spu_fec_control.s.fec_en = enable;
+ csr_wr_node(node, CVMX_BGXX_SPUX_FEC_CONTROL(index, interface),
+ spu_fec_control.u64);
+
+ cvmx_helper_set_port_fec(xiface, index, enable);
+
+ if (cvmx_helper_get_port_autonegotiation(xiface, index))
+ return cvmx_helper_set_autonegotiation(xiface, index, true);
+ else
+ return 0;
+}
+
+/**
+ * Enables or disables autonegotiation for an interface.
+ *
+ * @param xiface interface to set autonegotiation
+ * @param index port index
+ * @param enable true to enable autonegotiation, false to disable it
+ *
+ * @return 0 for success, -1 on error.
+ */
+int cvmx_helper_set_autonegotiation(int xiface, int index, bool enable)
+{
+ union cvmx_bgxx_gmp_pcs_mrx_control gmp_control;
+ union cvmx_bgxx_spux_an_control spu_an_control;
+ union cvmx_bgxx_spux_an_adv spu_an_adv;
+ union cvmx_bgxx_spux_fec_control spu_fec_control;
+ struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+ int interface = xi.interface;
+ int node = xi.node;
+ cvmx_helper_interface_mode_t mode;
+
+ if (debug)
+ debug("%s: interface: %u:%d/%d: %sable\n", __func__, interface,
+ node, index, enable ? "en" : "dis");
+
+ mode = cvmx_helper_bgx_get_mode(xiface, index);
+
+ switch (mode) {
+ case CVMX_HELPER_INTERFACE_MODE_RGMII:
+ enable = false;
+ case CVMX_HELPER_INTERFACE_MODE_SGMII:
+ gmp_control.u64 = csr_rd_node(
+ node, CVMX_BGXX_GMP_PCS_MRX_CONTROL(index, interface));
+ gmp_control.s.an_en = enable;
+ gmp_control.s.rst_an = enable;
+ csr_wr_node(node,
+ CVMX_BGXX_GMP_PCS_MRX_CONTROL(index, interface),
+ gmp_control.u64);
+ if (enable &&
+ CVMX_WAIT_FOR_FIELD64_NODE(
+ node,
+ CVMX_BGXX_GMP_PCS_MRX_STATUS(index, interface),
+ cvmx_bgxx_gmp_pcs_mrx_status_t, an_cpt, ==, 1,
+ 10000)) {
+ debug("SGMII%d: Port %d link timeout\n", interface,
+ index);
+ return -1;
+ }
+ break;
+ case CVMX_HELPER_INTERFACE_MODE_XAUI:
+ case CVMX_HELPER_INTERFACE_MODE_XFI:
+ case CVMX_HELPER_INTERFACE_MODE_10G_KR:
+ case CVMX_HELPER_INTERFACE_MODE_XLAUI:
+ case CVMX_HELPER_INTERFACE_MODE_40G_KR4:
+ spu_an_adv.u64 = csr_rd_node(
+ node, CVMX_BGXX_SPUX_AN_ADV(index, interface));
+ spu_fec_control.u64 = csr_rd_node(
+ node, CVMX_BGXX_SPUX_FEC_CONTROL(index, interface));
+ spu_an_adv.s.fec_req = spu_fec_control.s.fec_en;
+ spu_an_adv.s.fec_able = 1;
+ spu_an_adv.s.a100g_cr10 = 0;
+ spu_an_adv.s.a40g_cr4 = 0;
+ spu_an_adv.s.a40g_kr4 =
+ (mode == CVMX_HELPER_INTERFACE_MODE_40G_KR4);
+ spu_an_adv.s.a10g_kr =
+ (mode == CVMX_HELPER_INTERFACE_MODE_10G_KR);
+ spu_an_adv.s.a10g_kx4 = 0;
+ spu_an_adv.s.a1g_kx = 0;
+ spu_an_adv.s.xnp_able = 0;
+ spu_an_adv.s.rf = 0;
+ csr_wr_node(node, CVMX_BGXX_SPUX_AN_ADV(index, interface),
+ spu_an_adv.u64);
+ spu_fec_control.u64 = csr_rd_node(
+ node, CVMX_BGXX_SPUX_FEC_CONTROL(index, interface));
+ spu_an_control.u64 = csr_rd_node(
+ node, CVMX_BGXX_SPUX_AN_CONTROL(index, interface));
+ spu_an_control.s.an_en = enable;
+ spu_an_control.s.xnp_en = 0;
+ spu_an_control.s.an_restart = enable;
+ csr_wr_node(node, CVMX_BGXX_SPUX_AN_CONTROL(index, interface),
+ spu_an_control.u64);
+
+ if (enable &&
+ CVMX_WAIT_FOR_FIELD64_NODE(
+ node, CVMX_BGXX_SPUX_AN_STATUS(index, interface),
+ cvmx_bgxx_spux_an_status_t, an_complete, ==, 1,
+ 10000)) {
+ debug("XAUI/XLAUI/XFI%d: Port %d link timeout\n",
+ interface, index);
+ return -1;
+ }
+ break;
+ default:
+ break;
+ }
+
+ cvmx_helper_set_port_autonegotiation(xiface, index, enable);
+
+ return 0;
+}
+
+/**
+ * @INTERNAL
+ * Return the link state of an IPD/PKO port as returned by
+ * auto negotiation. The result of this function may not match
+ * Octeon's link config if auto negotiation has changed since
+ * the last call to cvmx_helper_link_set(). This is used by
+ * interfaces using the bgx mac.
+ *
+ * @param xipd_port IPD/PKO port to query
+ *
+ * @return Link state
+ */
+cvmx_helper_link_info_t __cvmx_helper_bgx_sgmii_link_get(int xipd_port)
+{
+ cvmx_helper_link_info_t result;
+ cvmx_bgxx_gmp_pcs_mrx_control_t gmp_control;
+ cvmx_bgxx_gmp_pcs_miscx_ctl_t gmp_misc_ctl;
+ int xiface = cvmx_helper_get_interface_num(xipd_port);
+ struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+ struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(xipd_port);
+ int node = xi.node;
+ int index = cvmx_helper_get_interface_index_num(xp.port);
+
+ result.u64 = 0;
+
+ if (!cvmx_helper_is_port_valid(xiface, index))
+ return result;
+
+ if (debug)
+ debug("%s: interface %u:%d/%d\n", __func__, xi.node,
+ xi.interface, index);
+
+ gmp_control.u64 = csr_rd_node(
+ node, CVMX_BGXX_GMP_PCS_MRX_CONTROL(index, xi.interface));
+ if (gmp_control.s.loopbck1) {
+ int qlm = cvmx_qlm_lmac(xiface, index);
+ int speed;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX))
+ speed = cvmx_qlm_get_gbaud_mhz_node(node, qlm);
+ else
+ speed = cvmx_qlm_get_gbaud_mhz(qlm);
+ /* Force 1Gbps full duplex link for internal loopback */
+ result.s.link_up = 1;
+ result.s.full_duplex = 1;
+ result.s.speed = speed * 8 / 10;
+ return result;
+ }
+
+ gmp_misc_ctl.u64 = csr_rd_node(
+ node, CVMX_BGXX_GMP_PCS_MISCX_CTL(index, xi.interface));
+ if (gmp_misc_ctl.s.mac_phy ||
+ cvmx_helper_get_port_force_link_up(xiface, index)) {
+ int qlm = cvmx_qlm_lmac(xiface, index);
+ int speed;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX))
+ speed = cvmx_qlm_get_gbaud_mhz_node(node, qlm);
+ else
+ speed = cvmx_qlm_get_gbaud_mhz(qlm);
+ /* PHY Mode */
+ /* Note that this also works for 1000base-X mode */
+
+ result.s.speed = speed * 8 / 10;
+ result.s.full_duplex = 1;
+ result.s.link_up = 1;
+ return result;
+ }
+
+ /* MAC Mode */
+ return __cvmx_helper_board_link_get(xipd_port);
+}
+
+/**
+ * This sequence brings down the link for the XCV RGMII interface
+ *
+ * @param interface Interface (BGX) number. Port index is always 0
+ */
+static void __cvmx_helper_bgx_rgmii_link_set_down(int interface)
+{
+ union cvmx_xcv_reset xcv_reset;
+ union cvmx_bgxx_cmrx_config cmr_config;
+ union cvmx_bgxx_gmp_pcs_mrx_control mr_control;
+ union cvmx_bgxx_cmrx_rx_fifo_len rx_fifo_len;
+ union cvmx_bgxx_cmrx_tx_fifo_len tx_fifo_len;
+
+ xcv_reset.u64 = csr_rd(CVMX_XCV_RESET);
+ xcv_reset.s.rx_pkt_rst_n = 0;
+ csr_wr(CVMX_XCV_RESET, xcv_reset.u64);
+ csr_rd(CVMX_XCV_RESET);
+ mdelay(10); /* Wait for 1 MTU */
+
+ cmr_config.u64 = csr_rd(CVMX_BGXX_CMRX_CONFIG(0, interface));
+ cmr_config.s.data_pkt_rx_en = 0;
+ csr_wr(CVMX_BGXX_CMRX_CONFIG(0, interface), cmr_config.u64);
+
+ /* Wait for RX and TX to be idle */
+ do {
+ rx_fifo_len.u64 =
+ csr_rd(CVMX_BGXX_CMRX_RX_FIFO_LEN(0, interface));
+ tx_fifo_len.u64 =
+ csr_rd(CVMX_BGXX_CMRX_TX_FIFO_LEN(0, interface));
+ } while (rx_fifo_len.s.fifo_len > 0 && tx_fifo_len.s.lmac_idle != 1);
+
+ cmr_config.u64 = csr_rd(CVMX_BGXX_CMRX_CONFIG(0, interface));
+ cmr_config.s.data_pkt_tx_en = 0;
+ csr_wr(CVMX_BGXX_CMRX_CONFIG(0, interface), cmr_config.u64);
+
+ xcv_reset.u64 = csr_rd(CVMX_XCV_RESET);
+ xcv_reset.s.tx_pkt_rst_n = 0;
+ csr_wr(CVMX_XCV_RESET, xcv_reset.u64);
+ mr_control.u64 = csr_rd(CVMX_BGXX_GMP_PCS_MRX_CONTROL(0, interface));
+ mr_control.s.pwr_dn = 1;
+ csr_wr(CVMX_BGXX_GMP_PCS_MRX_CONTROL(0, interface), mr_control.u64);
+}
+
+/**
+ * Sets a BGS SGMII link down.
+ *
+ * @param node Octeon node number
+ * @param iface BGX interface number
+ * @param index BGX port index
+ */
+static void __cvmx_helper_bgx_sgmii_link_set_down(int node, int iface,
+ int index)
+{
+ union cvmx_bgxx_gmp_pcs_miscx_ctl gmp_misc_ctl;
+ union cvmx_bgxx_gmp_pcs_mrx_control gmp_control;
+ union cvmx_bgxx_cmrx_config cmr_config;
+
+ cmr_config.u64 = csr_rd_node(node, CVMX_BGXX_CMRX_CONFIG(index, iface));
+ cmr_config.s.data_pkt_tx_en = 0;
+ cmr_config.s.data_pkt_rx_en = 0;
+ csr_wr_node(node, CVMX_BGXX_CMRX_CONFIG(index, iface), cmr_config.u64);
+
+ gmp_misc_ctl.u64 =
+ csr_rd_node(node, CVMX_BGXX_GMP_PCS_MISCX_CTL(index, iface));
+
+ /* Disable autonegotiation only when in MAC mode. */
+ if (gmp_misc_ctl.s.mac_phy == 0) {
+ gmp_control.u64 = csr_rd_node(
+ node, CVMX_BGXX_GMP_PCS_MRX_CONTROL(index, iface));
+ gmp_control.s.an_en = 0;
+ csr_wr_node(node, CVMX_BGXX_GMP_PCS_MRX_CONTROL(index, iface),
+ gmp_control.u64);
+ }
+
+ /* Use GMXENO to force the link down. It will get reenabled later... */
+ gmp_misc_ctl.s.gmxeno = 1;
+ csr_wr_node(node, CVMX_BGXX_GMP_PCS_MISCX_CTL(index, iface),
+ gmp_misc_ctl.u64);
+ csr_rd_node(node, CVMX_BGXX_GMP_PCS_MISCX_CTL(index, iface));
+}
+
+/**
+ * @INTERNAL
+ * Configure an IPD/PKO port for the specified link state. This
+ * function does not influence auto negotiation at the PHY level.
+ * The passed link state must always match the link state returned
+ * by cvmx_helper_link_get(). It is normally best to use
+ * cvmx_helper_link_autoconf() instead. This is used by interfaces
+ * using the bgx mac.
+ *
+ * @param xipd_port IPD/PKO port to configure
+ * @param link_info The new link state
+ *
+ * @return Zero on success, negative on failure
+ */
+int __cvmx_helper_bgx_sgmii_link_set(int xipd_port,
+ cvmx_helper_link_info_t link_info)
+{
+ cvmx_bgxx_cmrx_config_t cmr_config;
+ int xiface = cvmx_helper_get_interface_num(xipd_port);
+ struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+ struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(xipd_port);
+ int node = xi.node;
+ int index = cvmx_helper_get_interface_index_num(xp.port);
+ const int iface = xi.interface;
+ int rc = 0;
+
+ if (!cvmx_helper_is_port_valid(xiface, index))
+ return 0;
+
+ if (debug)
+ debug("%s: interface %u:%d/%d\n", __func__, xi.node,
+ xi.interface, index);
+
+ cmr_config.u64 = csr_rd_node(node, CVMX_BGXX_CMRX_CONFIG(index, iface));
+ if (link_info.s.link_up) {
+ cmr_config.s.enable = 1;
+ csr_wr_node(node, CVMX_BGXX_CMRX_CONFIG(index, iface),
+ cmr_config.u64);
+ /* Apply workaround for errata BGX-22429 */
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X) && index) {
+ cvmx_bgxx_cmrx_config_t cmr0;
+
+ cmr0.u64 = csr_rd_node(node,
+ CVMX_BGXX_CMRX_CONFIG(0, iface));
+ cmr0.s.enable = 1;
+ csr_wr_node(node, CVMX_BGXX_CMRX_CONFIG(0, iface),
+ cmr0.u64);
+ }
+ __cvmx_helper_bgx_sgmii_hardware_init_link(xiface, index);
+ } else if (cvmx_helper_bgx_is_rgmii(xi.interface, index)) {
+ if (debug)
+ debug("%s: Bringing down XCV RGMII interface %d\n",
+ __func__, xi.interface);
+ __cvmx_helper_bgx_rgmii_link_set_down(xi.interface);
+ } else { /* Link is down, not RGMII */
+ __cvmx_helper_bgx_sgmii_link_set_down(node, iface, index);
+ return 0;
+ }
+ rc = __cvmx_helper_bgx_sgmii_hardware_init_link_speed(xiface, index,
+ link_info);
+ if (cvmx_helper_bgx_is_rgmii(xiface, index))
+ rc = __cvmx_helper_bgx_rgmii_speed(link_info);
+
+ return rc;
+}
+
+/**
+ * @INTERNAL
+ * Bringup XAUI interface. After this call packet I/O should be
+ * fully functional.
+ *
+ * @param index port on interface to bring up
+ * @param xiface Interface to bring up
+ *
+ * @return Zero on success, negative on failure
+ */
+static int __cvmx_helper_bgx_xaui_init(int index, int xiface)
+{
+ cvmx_bgxx_cmrx_config_t cmr_config;
+ cvmx_bgxx_spux_br_pmd_control_t pmd_control;
+ cvmx_bgxx_spux_misc_control_t spu_misc_control;
+ cvmx_bgxx_spux_control1_t spu_control1;
+ cvmx_bgxx_spux_an_control_t spu_an_control;
+ cvmx_bgxx_spux_an_adv_t spu_an_adv;
+ cvmx_bgxx_spux_fec_control_t spu_fec_control;
+ cvmx_bgxx_spu_dbg_control_t spu_dbg_control;
+ cvmx_bgxx_smux_tx_append_t smu_tx_append;
+ cvmx_bgxx_smux_tx_ctl_t smu_tx_ctl;
+ cvmx_helper_interface_mode_t mode;
+ struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+ int interface = xi.interface;
+ int node = xi.node;
+ int use_auto_neg = 0;
+ int kr_mode = 0;
+
+ if (debug)
+ debug("%s: interface %u:%d/%d\n", __func__, xi.node,
+ xi.interface, index);
+
+ mode = cvmx_helper_bgx_get_mode(xiface, index);
+
+ if (mode == CVMX_HELPER_INTERFACE_MODE_10G_KR ||
+ mode == CVMX_HELPER_INTERFACE_MODE_40G_KR4) {
+ kr_mode = 1;
+ if (cvmx_helper_bgx_override_autoneg)
+ use_auto_neg =
+ cvmx_helper_bgx_override_autoneg(xiface, index);
+ else
+ use_auto_neg = cvmx_helper_get_port_autonegotiation(
+ xiface, index);
+ }
+
+ /* NOTE: This code was moved first, out of order compared to the HRM
+ * because the RESET causes all SPU registers to loose their value
+ */
+ /* 4. Next, bring up the SMU/SPU and the BGX reconciliation layer
+ * logic:
+ */
+ /* 4a. Take SMU/SPU through a reset sequence. Write
+ * BGX(0..5)_SPU(0..3)_CONTROL1[RESET] = 1. Read
+ * BGX(0..5)_SPU(0..3)_CONTROL1[RESET] until it changes value to 0. Keep
+ * BGX(0..5)_SPU(0..3)_MISC_CONTROL[RX_PACKET_DIS] = 1 to disable
+ * reception.
+ */
+ spu_control1.u64 =
+ csr_rd_node(node, CVMX_BGXX_SPUX_CONTROL1(index, xi.interface));
+ spu_control1.s.reset = 1;
+ csr_wr_node(node, CVMX_BGXX_SPUX_CONTROL1(index, xi.interface),
+ spu_control1.u64);
+
+ /* 1. Wait for PCS to come out of reset */
+ if (CVMX_WAIT_FOR_FIELD64_NODE(
+ node, CVMX_BGXX_SPUX_CONTROL1(index, xi.interface),
+ cvmx_bgxx_spux_control1_t, reset, ==, 0, 10000)) {
+ debug("BGX%d:%d: SPU stuck in reset\n", node, interface);
+ return -1;
+ }
+
+ /* 2. Write BGX(0..5)_CMR(0..3)_CONFIG[ENABLE] to 0,
+ * BGX(0..5)_SPU(0..3)_CONTROL1[LO_PWR] = 1 and
+ * BGX(0..5)_SPU(0..3)_MISC_CONTROL[RX_PACKET_DIS] = 1.
+ */
+ spu_control1.u64 =
+ csr_rd_node(node, CVMX_BGXX_SPUX_CONTROL1(index, xi.interface));
+ spu_control1.s.lo_pwr = 1;
+ csr_wr_node(node, CVMX_BGXX_SPUX_CONTROL1(index, xi.interface),
+ spu_control1.u64);
+
+ spu_misc_control.u64 = csr_rd_node(
+ node, CVMX_BGXX_SPUX_MISC_CONTROL(index, xi.interface));
+ spu_misc_control.s.rx_packet_dis = 1;
+ csr_wr_node(node, CVMX_BGXX_SPUX_MISC_CONTROL(index, xi.interface),
+ spu_misc_control.u64);
+
+ /* 3. At this point, it may be appropriate to disable all BGX and
+ * SMU/SPU interrupts, as a number of them will occur during bring-up
+ * of the Link.
+ * - zero BGX(0..5)_SMU(0..3)_RX_INT
+ * - zero BGX(0..5)_SMU(0..3)_TX_INT
+ * - zero BGX(0..5)_SPU(0..3)_INT
+ */
+ csr_wr_node(node, CVMX_BGXX_SMUX_RX_INT(index, xi.interface),
+ csr_rd_node(node,
+ CVMX_BGXX_SMUX_RX_INT(index, xi.interface)));
+ csr_wr_node(node, CVMX_BGXX_SMUX_TX_INT(index, xi.interface),
+ csr_rd_node(node,
+ CVMX_BGXX_SMUX_TX_INT(index, xi.interface)));
+ csr_wr_node(node, CVMX_BGXX_SPUX_INT(index, xi.interface),
+ csr_rd_node(node, CVMX_BGXX_SPUX_INT(index, xi.interface)));
+
+ /* 4. Configure the BGX LMAC. */
+ /* 4a. Configure the LMAC type (40GBASE-R/10GBASE-R/RXAUI/XAUI) and
+ * SerDes selection in the BGX(0..5)_CMR(0..3)_CONFIG register, but keep
+ * the ENABLE, DATA_PKT_TX_EN and DATA_PKT_RX_EN bits clear.
+ */
+ /* Already done in bgx_setup_one_time */
+
+ /* 4b. Write BGX(0..5)_SPU(0..3)_CONTROL1[LO_PWR] = 1 and
+ * BGX(0..5)_SPU(0..3)_MISC_CONTROL[RX_PACKET_DIS] = 1.
+ */
+ /* 4b. Initialize the selected SerDes lane(s) in the QLM. See Section
+ * 28.1.2.2 in the GSER chapter.
+ */
+ /* Already done in QLM setup */
+
+ /* 4c. For 10GBASE-KR or 40GBASE-KR, enable link training by writing
+ * BGX(0..5)_SPU(0..3)_BR_PMD_CONTROL[TRAIN_EN] = 1.
+ */
+
+ if (kr_mode && !OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X)) {
+ csr_wr_node(node,
+ CVMX_BGXX_SPUX_BR_PMD_LP_CUP(index, interface), 0);
+ csr_wr_node(node,
+ CVMX_BGXX_SPUX_BR_PMD_LD_CUP(index, interface), 0);
+ csr_wr_node(node,
+ CVMX_BGXX_SPUX_BR_PMD_LD_REP(index, interface), 0);
+ pmd_control.u64 = csr_rd_node(
+ node, CVMX_BGXX_SPUX_BR_PMD_CONTROL(index, interface));
+ pmd_control.s.train_en = 1;
+ csr_wr_node(node,
+ CVMX_BGXX_SPUX_BR_PMD_CONTROL(index, interface),
+ pmd_control.u64);
+ }
+
+ /* 4d. Program all other relevant BGX configuration while
+ * BGX(0..5)_CMR(0..3)_CONFIG[ENABLE] = 0. This includes all things
+ * described in this chapter.
+ */
+ /* Always add FCS to PAUSE frames */
+ smu_tx_append.u64 = csr_rd_node(
+ node, CVMX_BGXX_SMUX_TX_APPEND(index, xi.interface));
+ smu_tx_append.s.fcs_c = 1;
+ csr_wr_node(node, CVMX_BGXX_SMUX_TX_APPEND(index, xi.interface),
+ smu_tx_append.u64);
+
+ /* 4e. If Forward Error Correction is desired for 10GBASE-R or
+ * 40GBASE-R, enable it by writing
+ * BGX(0..5)_SPU(0..3)_FEC_CONTROL[FEC_EN] = 1.
+ */
+ /* FEC is optional for 10GBASE-KR, 40GBASE-KR4, and XLAUI. We're going
+ * to disable it by default
+ */
+ spu_fec_control.u64 = csr_rd_node(
+ node, CVMX_BGXX_SPUX_FEC_CONTROL(index, xi.interface));
+ if (cvmx_helper_bgx_override_fec)
+ spu_fec_control.s.fec_en =
+ cvmx_helper_bgx_override_fec(xiface, index);
+ else
+ spu_fec_control.s.fec_en =
+ cvmx_helper_get_port_fec(xiface, index);
+ csr_wr_node(node, CVMX_BGXX_SPUX_FEC_CONTROL(index, xi.interface),
+ spu_fec_control.u64);
+
+ /* 4f. If Auto-Negotiation is desired, configure and enable
+ * Auto-Negotiation as described in Section 33.6.2.
+ */
+ spu_an_control.u64 = csr_rd_node(
+ node, CVMX_BGXX_SPUX_AN_CONTROL(index, xi.interface));
+ /* Disable extended next pages */
+ spu_an_control.s.xnp_en = 0;
+ spu_an_control.s.an_en = use_auto_neg;
+ csr_wr_node(node, CVMX_BGXX_SPUX_AN_CONTROL(index, xi.interface),
+ spu_an_control.u64);
+
+ spu_fec_control.u64 = csr_rd_node(
+ node, CVMX_BGXX_SPUX_FEC_CONTROL(index, xi.interface));
+ spu_an_adv.u64 =
+ csr_rd_node(node, CVMX_BGXX_SPUX_AN_ADV(index, xi.interface));
+ spu_an_adv.s.fec_req = spu_fec_control.s.fec_en;
+ spu_an_adv.s.fec_able = 1;
+ spu_an_adv.s.a100g_cr10 = 0;
+ spu_an_adv.s.a40g_cr4 = 0;
+ spu_an_adv.s.a40g_kr4 = (mode == CVMX_HELPER_INTERFACE_MODE_40G_KR4);
+ spu_an_adv.s.a10g_kr = (mode == CVMX_HELPER_INTERFACE_MODE_10G_KR);
+ spu_an_adv.s.a10g_kx4 = 0;
+ spu_an_adv.s.a1g_kx = 0;
+ spu_an_adv.s.xnp_able = 0;
+ spu_an_adv.s.rf = 0;
+ csr_wr_node(node, CVMX_BGXX_SPUX_AN_ADV(index, xi.interface),
+ spu_an_adv.u64);
+
+ /* 3. Set BGX(0..5)_SPU_DBG_CONTROL[AN_ARB_LINK_CHK_EN] = 1. */
+ spu_dbg_control.u64 =
+ csr_rd_node(node, CVMX_BGXX_SPU_DBG_CONTROL(xi.interface));
+ spu_dbg_control.s.an_nonce_match_dis = 1; /* Needed for loopback */
+ spu_dbg_control.s.an_arb_link_chk_en |= kr_mode;
+ csr_wr_node(node, CVMX_BGXX_SPU_DBG_CONTROL(xi.interface),
+ spu_dbg_control.u64);
+
+ /* 4. Execute the link bring-up sequence in Section 33.6.3. */
+
+ /* 5. If the auto-negotiation protocol is successful,
+ * BGX(0..5)_SPU(0..3)_AN_ADV[AN_COMPLETE] is set along with
+ * BGX(0..5)_SPU(0..3)_INT[AN_COMPLETE] when the link is up.
+ */
+
+ /* 3h. Set BGX(0..5)_CMR(0..3)_CONFIG[ENABLE] = 1 and
+ * BGX(0..5)_SPU(0..3)_CONTROL1[LO_PWR] = 0 to enable the LMAC.
+ */
+ cmr_config.u64 =
+ csr_rd_node(node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface));
+ cmr_config.s.enable = 1;
+ csr_wr_node(node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface),
+ cmr_config.u64);
+ /* Apply workaround for errata BGX-22429 */
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X) && index) {
+ cvmx_bgxx_cmrx_config_t cmr0;
+
+ cmr0.u64 = csr_rd_node(node,
+ CVMX_BGXX_CMRX_CONFIG(0, xi.interface));
+ cmr0.s.enable = 1;
+ csr_wr_node(node, CVMX_BGXX_CMRX_CONFIG(0, xi.interface),
+ cmr0.u64);
+ }
+
+ spu_control1.u64 =
+ csr_rd_node(node, CVMX_BGXX_SPUX_CONTROL1(index, xi.interface));
+ spu_control1.s.lo_pwr = 0;
+ csr_wr_node(node, CVMX_BGXX_SPUX_CONTROL1(index, xi.interface),
+ spu_control1.u64);
+
+ /* 4g. Set the polarity and lane swapping of the QLM SerDes. Refer to
+ * Section 33.4.1, BGX(0..5)_SPU(0..3)_MISC_CONTROL[XOR_TXPLRT,XOR_RXPLRT]
+ * and BGX(0..5)_SPU(0..3)_MISC_CONTROL[TXPLRT,RXPLRT].
+ */
+
+ /* 4c. Write BGX(0..5)_SPU(0..3)_CONTROL1[LO_PWR] = 0. */
+ spu_control1.u64 =
+ csr_rd_node(node, CVMX_BGXX_SPUX_CONTROL1(index, xi.interface));
+ spu_control1.s.lo_pwr = 0;
+ csr_wr_node(node, CVMX_BGXX_SPUX_CONTROL1(index, xi.interface),
+ spu_control1.u64);
+
+ /* 4d. Select Deficit Idle Count mode and unidirectional enable/disable
+ * via BGX(0..5)_SMU(0..3)_TX_CTL[DIC_EN,UNI_EN].
+ */
+ smu_tx_ctl.u64 =
+ csr_rd_node(node, CVMX_BGXX_SMUX_TX_CTL(index, xi.interface));
+ smu_tx_ctl.s.dic_en = 1;
+ smu_tx_ctl.s.uni_en = 0;
+ csr_wr_node(node, CVMX_BGXX_SMUX_TX_CTL(index, xi.interface),
+ smu_tx_ctl.u64);
+
+ {
+ /* Calculate the number of s-clk cycles per usec. */
+ const u64 clock_mhz = 1200; /* todo: fixme */
+ cvmx_bgxx_spu_dbg_control_t dbg_control;
+
+ dbg_control.u64 = csr_rd_node(
+ node, CVMX_BGXX_SPU_DBG_CONTROL(xi.interface));
+ dbg_control.s.us_clk_period = clock_mhz - 1;
+ csr_wr_node(node, CVMX_BGXX_SPU_DBG_CONTROL(xi.interface),
+ dbg_control.u64);
+ }
+ /* The PHY often takes at least 100ms to stabilize */
+ __cvmx_helper_bgx_interface_enable_delay(mode);
+ return 0;
+}
+
+static void __cvmx_bgx_start_training(int node, int unit, int index)
+{
+ cvmx_bgxx_spux_int_t spu_int;
+ cvmx_bgxx_spux_br_pmd_control_t pmd_control;
+ cvmx_bgxx_spux_an_control_t an_control;
+
+ /* Clear the training interrupts (W1C) */
+ spu_int.u64 = 0;
+ spu_int.s.training_failure = 1;
+ spu_int.s.training_done = 1;
+ csr_wr_node(node, CVMX_BGXX_SPUX_INT(index, unit), spu_int.u64);
+
+ /* These registers aren't cleared when training is restarted. Manually
+ * clear them as per Errata BGX-20968.
+ */
+ csr_wr_node(node, CVMX_BGXX_SPUX_BR_PMD_LP_CUP(index, unit), 0);
+ csr_wr_node(node, CVMX_BGXX_SPUX_BR_PMD_LD_CUP(index, unit), 0);
+ csr_wr_node(node, CVMX_BGXX_SPUX_BR_PMD_LD_REP(index, unit), 0);
+
+ /* Disable autonegotiation */
+ an_control.u64 =
+ csr_rd_node(node, CVMX_BGXX_SPUX_AN_CONTROL(index, unit));
+ an_control.s.an_en = 0;
+ csr_wr_node(node, CVMX_BGXX_SPUX_AN_CONTROL(index, unit),
+ an_control.u64);
+ udelay(1);
+
+ /* Restart training */
+ pmd_control.u64 =
+ csr_rd_node(node, CVMX_BGXX_SPUX_BR_PMD_CONTROL(index, unit));
+ pmd_control.s.train_en = 1;
+ csr_wr_node(node, CVMX_BGXX_SPUX_BR_PMD_CONTROL(index, unit),
+ pmd_control.u64);
+
+ udelay(1);
+ pmd_control.u64 =
+ csr_rd_node(node, CVMX_BGXX_SPUX_BR_PMD_CONTROL(index, unit));
+ pmd_control.s.train_restart = 1;
+ csr_wr_node(node, CVMX_BGXX_SPUX_BR_PMD_CONTROL(index, unit),
+ pmd_control.u64);
+}
+
+static void __cvmx_bgx_restart_training(int node, int unit, int index)
+{
+ cvmx_bgxx_spux_int_t spu_int;
+ cvmx_bgxx_spux_br_pmd_control_t pmd_control;
+
+ /* Clear the training interrupts (W1C) */
+ spu_int.u64 = 0;
+ spu_int.s.training_failure = 1;
+ spu_int.s.training_done = 1;
+ csr_wr_node(node, CVMX_BGXX_SPUX_INT(index, unit), spu_int.u64);
+
+ udelay(1700); /* Wait 1.7 msec */
+
+ /* These registers aren't cleared when training is restarted. Manually
+ * clear them as per Errata BGX-20968.
+ */
+ csr_wr_node(node, CVMX_BGXX_SPUX_BR_PMD_LP_CUP(index, unit), 0);
+ csr_wr_node(node, CVMX_BGXX_SPUX_BR_PMD_LD_CUP(index, unit), 0);
+ csr_wr_node(node, CVMX_BGXX_SPUX_BR_PMD_LD_REP(index, unit), 0);
+
+ /* Restart training */
+ pmd_control.u64 =
+ csr_rd_node(node, CVMX_BGXX_SPUX_BR_PMD_CONTROL(index, unit));
+ pmd_control.s.train_restart = 1;
+ csr_wr_node(node, CVMX_BGXX_SPUX_BR_PMD_CONTROL(index, unit),
+ pmd_control.u64);
+}
+
+/*
+ * @INTERNAL
+ * Wrapper function to configure the BGX, does not enable.
+ *
+ * @param xipd_port IPD/PKO port to configure.
+ * @param phy_pres If set, enable disparity, only applies to RXAUI interface
+ *
+ * @return Zero on success, negative on failure.
+ */
+int __cvmx_helper_bgx_port_init(int xipd_port, int phy_pres)
+{
+ int xiface = cvmx_helper_get_interface_num(xipd_port);
+ struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+ struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(xipd_port);
+ int index = cvmx_helper_get_interface_index_num(xp.port);
+ cvmx_helper_interface_mode_t mode;
+
+ if (debug)
+ debug("%s: interface %u:%d/%d\n", __func__, xi.node,
+ xi.interface, index);
+
+ mode = cvmx_helper_bgx_get_mode(xiface, index);
+
+ __cvmx_bgx_common_init_pknd(xiface, index);
+
+ if (mode == CVMX_HELPER_INTERFACE_MODE_SGMII ||
+ mode == CVMX_HELPER_INTERFACE_MODE_RGMII) {
+ cvmx_bgxx_gmp_gmi_txx_thresh_t gmi_tx_thresh;
+ cvmx_bgxx_gmp_gmi_txx_append_t gmp_txx_append;
+ cvmx_bgxx_gmp_gmi_txx_sgmii_ctl_t gmp_sgmii_ctl;
+
+ /* Set TX Threshold */
+ gmi_tx_thresh.u64 = 0;
+ gmi_tx_thresh.s.cnt = 0x20;
+ csr_wr_node(xi.node,
+ CVMX_BGXX_GMP_GMI_TXX_THRESH(index, xi.interface),
+ gmi_tx_thresh.u64);
+ __cvmx_helper_bgx_sgmii_hardware_init_one_time(xiface, index);
+ gmp_txx_append.u64 = csr_rd_node(
+ xi.node,
+ CVMX_BGXX_GMP_GMI_TXX_APPEND(index, xi.interface));
+ gmp_sgmii_ctl.u64 = csr_rd_node(
+ xi.node,
+ CVMX_BGXX_GMP_GMI_TXX_SGMII_CTL(index, xi.interface));
+ gmp_sgmii_ctl.s.align = gmp_txx_append.s.preamble ? 0 : 1;
+ csr_wr_node(xi.node,
+ CVMX_BGXX_GMP_GMI_TXX_SGMII_CTL(index,
+ xi.interface),
+ gmp_sgmii_ctl.u64);
+ if (mode == CVMX_HELPER_INTERFACE_MODE_RGMII) {
+ /* Disable XCV interface when initialized */
+ union cvmx_xcv_reset xcv_reset;
+
+ if (debug)
+ debug("%s: Disabling RGMII XCV interface\n",
+ __func__);
+ xcv_reset.u64 = csr_rd(CVMX_XCV_RESET);
+ xcv_reset.s.enable = 0;
+ xcv_reset.s.tx_pkt_rst_n = 0;
+ xcv_reset.s.rx_pkt_rst_n = 0;
+ csr_wr(CVMX_XCV_RESET, xcv_reset.u64);
+ }
+ } else {
+ int res, cred;
+ cvmx_bgxx_smux_tx_thresh_t smu_tx_thresh;
+
+ res = __cvmx_helper_bgx_xaui_init(index, xiface);
+ if (res == -1) {
+#ifdef DEBUG_BGX
+ debug("Failed to enable XAUI for %d:BGX(%d,%d)\n",
+ xi.node, xi.interface, index);
+#endif
+ return res;
+ }
+ /* See BVX_SMU_TX_THRESH register descriptin */
+ cred = __cvmx_helper_bgx_fifo_size(xiface, index) >> 4;
+ smu_tx_thresh.u64 = 0;
+ smu_tx_thresh.s.cnt = cred - 10;
+ csr_wr_node(xi.node,
+ CVMX_BGXX_SMUX_TX_THRESH(index, xi.interface),
+ smu_tx_thresh.u64);
+ if (debug)
+ debug("%s: BGX%d:%d TX-thresh=%d\n", __func__,
+ xi.interface, index,
+ (unsigned int)smu_tx_thresh.s.cnt);
+
+ /* Set disparity for RXAUI interface as described in the
+ * Marvell RXAUI Interface specification.
+ */
+ if (mode == CVMX_HELPER_INTERFACE_MODE_RXAUI && phy_pres) {
+ cvmx_bgxx_spux_misc_control_t misc_control;
+
+ misc_control.u64 = csr_rd_node(
+ xi.node, CVMX_BGXX_SPUX_MISC_CONTROL(
+ index, xi.interface));
+ misc_control.s.intlv_rdisp = 1;
+ csr_wr_node(xi.node,
+ CVMX_BGXX_SPUX_MISC_CONTROL(index,
+ xi.interface),
+ misc_control.u64);
+ }
+ }
+ return 0;
+}
+
+/**
+ * @INTERNAL
+ * Configure a port for internal and/or external loopback. Internal loopback
+ * causes packets sent by the port to be received by Octeon. External loopback
+ * causes packets received from the wire to sent out again. This is used by
+ * interfaces using the bgx mac.
+ *
+ * @param xipd_port IPD/PKO port to loopback.
+ * @param enable_internal
+ * Non zero if you want internal loopback
+ * @param enable_external
+ * Non zero if you want external loopback
+ *
+ * @return Zero on success, negative on failure.
+ */
+int __cvmx_helper_bgx_sgmii_configure_loopback(int xipd_port,
+ int enable_internal,
+ int enable_external)
+{
+ int xiface = cvmx_helper_get_interface_num(xipd_port);
+ struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+ struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(xipd_port);
+ int node = xi.node;
+ int index = cvmx_helper_get_interface_index_num(xp.port);
+ cvmx_bgxx_gmp_pcs_mrx_control_t gmp_mrx_control;
+ cvmx_bgxx_gmp_pcs_miscx_ctl_t gmp_misc_ctl;
+
+ if (!cvmx_helper_is_port_valid(xiface, index))
+ return 0;
+
+ if (debug)
+ debug("%s: interface %u:%d/%d\n", __func__, xi.node,
+ xi.interface, index);
+
+ if (cvmx_helper_bgx_is_rgmii(xi.interface, index)) {
+ cvmx_xcv_ctl_t xcv_ctl;
+ cvmx_helper_link_info_t link_info;
+
+ xcv_ctl.u64 = csr_rd(CVMX_XCV_CTL);
+ xcv_ctl.s.lpbk_int = enable_internal;
+ xcv_ctl.s.lpbk_ext = enable_external;
+ csr_wr(CVMX_XCV_CTL, xcv_ctl.u64);
+
+ /* Initialize link and speed */
+ __cvmx_helper_bgx_sgmii_hardware_init_link(xiface, index);
+ link_info = __cvmx_helper_bgx_sgmii_link_get(xipd_port);
+ __cvmx_helper_bgx_sgmii_hardware_init_link_speed(xiface, index,
+ link_info);
+ __cvmx_helper_bgx_rgmii_speed(link_info);
+ } else {
+ gmp_mrx_control.u64 = csr_rd_node(
+ node,
+ CVMX_BGXX_GMP_PCS_MRX_CONTROL(index, xi.interface));
+ gmp_mrx_control.s.loopbck1 = enable_internal;
+ csr_wr_node(node,
+ CVMX_BGXX_GMP_PCS_MRX_CONTROL(index, xi.interface),
+ gmp_mrx_control.u64);
+
+ gmp_misc_ctl.u64 = csr_rd_node(
+ node, CVMX_BGXX_GMP_PCS_MISCX_CTL(index, xi.interface));
+ gmp_misc_ctl.s.loopbck2 = enable_external;
+ csr_wr_node(node,
+ CVMX_BGXX_GMP_PCS_MISCX_CTL(index, xi.interface),
+ gmp_misc_ctl.u64);
+ __cvmx_helper_bgx_sgmii_hardware_init_link(xiface, index);
+ }
+
+ return 0;
+}
+
+static int __cvmx_helper_bgx_xaui_link_init(int index, int xiface)
+{
+ struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+ int node = xi.node;
+ cvmx_bgxx_spux_status1_t spu_status1;
+ cvmx_bgxx_spux_status2_t spu_status2;
+ cvmx_bgxx_spux_br_status2_t br_status2;
+ cvmx_bgxx_spux_int_t spu_int;
+ cvmx_bgxx_spux_misc_control_t spu_misc_control;
+ cvmx_bgxx_spux_an_control_t spu_an_control;
+ cvmx_bgxx_spux_an_status_t spu_an_status;
+ cvmx_bgxx_spux_br_pmd_control_t pmd_control;
+ cvmx_bgxx_cmrx_config_t cmr_config;
+ cvmx_helper_interface_mode_t mode;
+ int use_training = 0;
+ int rgmii_first = 0;
+ int qlm = cvmx_qlm_lmac(xiface, index);
+ int use_ber = 0;
+ u64 err_blks;
+ u64 ber_cnt;
+ u64 error_debounce;
+
+ if (debug)
+ debug("%s: interface %u:%d/%d\n", __func__, xi.node,
+ xi.interface, index);
+
+ rgmii_first = cvmx_helper_bgx_is_rgmii(xi.interface, index);
+
+ mode = cvmx_helper_bgx_get_mode(xiface, index);
+ if (mode == CVMX_HELPER_INTERFACE_MODE_10G_KR ||
+ mode == CVMX_HELPER_INTERFACE_MODE_40G_KR4)
+ use_training = 1;
+
+ if ((mode == CVMX_HELPER_INTERFACE_MODE_XFI ||
+ mode == CVMX_HELPER_INTERFACE_MODE_XLAUI ||
+ mode == CVMX_HELPER_INTERFACE_MODE_10G_KR ||
+ mode == CVMX_HELPER_INTERFACE_MODE_40G_KR4))
+ use_ber = 1;
+
+ /* Disable packet reception, CMR as well as SPU block */
+ cmr_config.u64 =
+ csr_rd_node(node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface));
+ cmr_config.s.data_pkt_tx_en = 0;
+ cmr_config.s.data_pkt_rx_en = 0;
+ csr_wr_node(node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface),
+ cmr_config.u64);
+ spu_misc_control.u64 = csr_rd_node(
+ node, CVMX_BGXX_SPUX_MISC_CONTROL(index, xi.interface));
+ spu_misc_control.s.rx_packet_dis = 1;
+ csr_wr_node(node, CVMX_BGXX_SPUX_MISC_CONTROL(index, xi.interface),
+ spu_misc_control.u64);
+
+ spu_an_control.u64 = csr_rd_node(
+ node, CVMX_BGXX_SPUX_AN_CONTROL(index, xi.interface));
+ if (spu_an_control.s.an_en) {
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X)) {
+ cvmx_bgxx_spux_int_t spu_int;
+
+ spu_int.u64 = csr_rd_node(
+ node, CVMX_BGXX_SPUX_INT(index, xi.interface));
+ if (!spu_int.s.an_link_good) {
+ static u64 restart_auto_neg[2][6][4] = {
+ [0 ... 1][0 ... 5] = { [0 ... 3] = 0 }
+ };
+ u64 now = get_timer(0);
+ u64 next_restart =
+ restart_auto_neg[node][xi.interface]
+ [index] +
+ 2000;
+
+ if (now >= next_restart)
+ return -1;
+
+ restart_auto_neg[node][xi.interface][index] =
+ now;
+
+ /* Clear the auto negotiation (W1C) */
+ spu_int.u64 = 0;
+ spu_int.s.an_complete = 1;
+ spu_int.s.an_link_good = 1;
+ spu_int.s.an_page_rx = 1;
+ csr_wr_node(node,
+ CVMX_BGXX_SPUX_INT(index,
+ xi.interface),
+ spu_int.u64);
+ /* Restart auto negotiation */
+ spu_an_control.u64 = csr_rd_node(
+ node, CVMX_BGXX_SPUX_AN_CONTROL(
+ index, xi.interface));
+ spu_an_control.s.an_restart = 1;
+ csr_wr_node(node,
+ CVMX_BGXX_SPUX_AN_CONTROL(
+ index, xi.interface),
+ spu_an_control.u64);
+ return -1;
+ }
+ } else {
+ spu_an_status.u64 = csr_rd_node(
+ node,
+ CVMX_BGXX_SPUX_AN_STATUS(index, xi.interface));
+ if (!spu_an_status.s.an_complete) {
+ static u64 restart_auto_neg[2][6][4] = {
+ [0 ... 1][0 ... 5] = { [0 ... 3] = 0 }
+ };
+ u64 now = get_timer(0);
+ u64 next_restart =
+ restart_auto_neg[node][xi.interface]
+ [index] +
+ 2000;
+ if (now >= next_restart) {
+#ifdef DEBUG_BGX
+ debug("WARNING: BGX%d:%d: Waiting for autoneg to complete\n",
+ xi.interface, index);
+#endif
+ return -1;
+ }
+
+ restart_auto_neg[node][xi.interface][index] =
+ now;
+ /* Restart auto negotiation */
+ spu_an_control.u64 = csr_rd_node(
+ node, CVMX_BGXX_SPUX_AN_CONTROL(
+ index, xi.interface));
+ spu_an_control.s.an_restart = 1;
+ csr_wr_node(node,
+ CVMX_BGXX_SPUX_AN_CONTROL(
+ index, xi.interface),
+ spu_an_control.u64);
+ return -1;
+ }
+ }
+ }
+
+ if (use_training) {
+ spu_int.u64 = csr_rd_node(
+ node, CVMX_BGXX_SPUX_INT(index, xi.interface));
+ pmd_control.u64 = csr_rd_node(
+ node,
+ CVMX_BGXX_SPUX_BR_PMD_CONTROL(index, xi.interface));
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X) &&
+ pmd_control.s.train_en == 0) {
+ __cvmx_bgx_start_training(node, xi.interface, index);
+ return -1;
+ }
+ cvmx_qlm_gser_errata_27882(node, qlm, index);
+ spu_int.u64 = csr_rd_node(
+ node, CVMX_BGXX_SPUX_INT(index, xi.interface));
+
+ if (spu_int.s.training_failure &&
+ !OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X)) {
+ __cvmx_bgx_restart_training(node, xi.interface, index);
+ return -1;
+ }
+ if (!spu_int.s.training_done) {
+ debug("Waiting for link training\n");
+ return -1;
+ }
+ }
+
+ /* (GSER-21957) GSER RX Equalization may make >= 5gbaud non-KR
+ * channel with DXAUI, RXAUI, XFI and XLAUI, we need to perform
+ * RX equalization when the link is receiving data the first time
+ */
+ if (use_training == 0) {
+ int lane = index;
+ cvmx_bgxx_spux_control1_t control1;
+
+ cmr_config.u64 = csr_rd_node(
+ node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface));
+ control1.u64 = csr_rd_node(
+ node, CVMX_BGXX_SPUX_CONTROL1(index, xi.interface));
+ if (control1.s.loopbck) {
+ /* Skip RX equalization when in loopback */
+ } else if (mode == CVMX_HELPER_INTERFACE_MODE_XLAUI ||
+ mode == CVMX_HELPER_INTERFACE_MODE_XAUI) {
+ lane = -1;
+ if (__cvmx_qlm_rx_equalization(node, qlm, lane)) {
+#ifdef DEBUG_BGX
+ debug("%d:%d:%d: Waiting for RX Equalization on QLM%d\n",
+ node, xi.interface, index, qlm);
+#endif
+ return -1;
+ }
+ /* If BGX2 uses both dlms, then configure other dlm also. */
+ if (OCTEON_IS_MODEL(OCTEON_CN73XX) &&
+ xi.interface == 2) {
+ if (__cvmx_qlm_rx_equalization(node, 6, lane)) {
+#ifdef DEBUG_BGX
+ debug("%d:%d:%d: Waiting for RX Equalization on QLM%d\n",
+ node, xi.interface, index, qlm);
+#endif
+ return -1;
+ }
+ }
+ /* RXAUI */
+ } else if (mode == CVMX_HELPER_INTERFACE_MODE_RXAUI) {
+ lane = index * 2;
+ if (OCTEON_IS_MODEL(OCTEON_CN73XX) && index >= 2 &&
+ xi.interface == 2) {
+ lane = 0;
+ }
+ if (rgmii_first)
+ lane--;
+ if (__cvmx_qlm_rx_equalization(node, qlm, lane) ||
+ __cvmx_qlm_rx_equalization(node, qlm, lane + 1)) {
+#ifdef DEBUG_BGX
+ debug("%d:%d:%d: Waiting for RX Equalization on QLM%d\n",
+ node, xi.interface, index, qlm);
+#endif
+ return -1;
+ }
+ /* XFI */
+ } else if (cmr_config.s.lmac_type != 5) {
+ if (rgmii_first)
+ lane--;
+ if (OCTEON_IS_MODEL(OCTEON_CN73XX) && index >= 2 &&
+ xi.interface == 2) {
+ lane = index - 2;
+ } else if (OCTEON_IS_MODEL(OCTEON_CNF75XX) &&
+ index >= 2) {
+ lane = index - 2;
+ }
+ if (__cvmx_qlm_rx_equalization(node, qlm, lane)) {
+#ifdef DEBUG_BGX
+ debug("%d:%d:%d: Waiting for RX Equalization on QLM%d\n",
+ node, xi.interface, index, qlm);
+#endif
+ return -1;
+ }
+ }
+ }
+
+ if (CVMX_WAIT_FOR_FIELD64_NODE(
+ node, CVMX_BGXX_SPUX_CONTROL1(index, xi.interface),
+ cvmx_bgxx_spux_control1_t, reset, ==, 0, 10000)) {
+#ifdef DEBUG_BGX
+ debug("ERROR: %d:BGX%d:%d: PCS in reset", node, xi.interface,
+ index);
+#endif
+ return -1;
+ }
+
+ if (use_ber) {
+ if (CVMX_WAIT_FOR_FIELD64_NODE(
+ node,
+ CVMX_BGXX_SPUX_BR_STATUS1(index, xi.interface),
+ cvmx_bgxx_spux_br_status1_t, blk_lock, ==, 1,
+ 10000)) {
+#ifdef DEBUG_BGX
+ debug("ERROR: %d:BGX%d:%d: BASE-R PCS block not locked\n",
+ node, xi.interface, index);
+
+ if (mode == CVMX_HELPER_INTERFACE_MODE_XLAUI ||
+ mode == CVMX_HELPER_INTERFACE_MODE_40G_KR4) {
+ cvmx_bgxx_spux_br_algn_status_t bstatus;
+
+ bstatus.u64 = csr_rd_node(
+ node, CVMX_BGXX_SPUX_BR_ALGN_STATUS(
+ index, xi.interface));
+ debug("ERROR: %d:BGX%d:%d: LANE BLOCK_LOCK:%x LANE MARKER_LOCK:%x\n",
+ node, xi.interface, index,
+ bstatus.s.block_lock,
+ bstatus.s.marker_lock);
+ }
+#endif
+ return -1;
+ }
+ } else {
+ /* (5) Check to make sure that the link appears up and stable.
+ */
+ /* Wait for PCS to be aligned */
+ if (CVMX_WAIT_FOR_FIELD64_NODE(
+ node, CVMX_BGXX_SPUX_BX_STATUS(index, xi.interface),
+ cvmx_bgxx_spux_bx_status_t, alignd, ==, 1, 10000)) {
+#ifdef DEBUG_BGX
+ debug("ERROR: %d:BGX%d:%d: PCS not aligned\n", node,
+ xi.interface, index);
+#endif
+ return -1;
+ }
+ }
+
+ if (use_ber) {
+ /* Set the BGXX_SPUX_BR_STATUS2.latched_lock bit (latching low).
+ * This will be checked prior to enabling packet tx and rx,
+ * ensuring block lock is sustained throughout the BGX link-up
+ * procedure
+ */
+ br_status2.u64 = csr_rd_node(
+ node, CVMX_BGXX_SPUX_BR_STATUS2(index, xi.interface));
+ br_status2.s.latched_lock = 1;
+ csr_wr_node(node,
+ CVMX_BGXX_SPUX_BR_STATUS2(index, xi.interface),
+ br_status2.u64);
+ }
+
+ /* Clear rcvflt bit (latching high) and read it back */
+ spu_status2.u64 =
+ csr_rd_node(node, CVMX_BGXX_SPUX_STATUS2(index, xi.interface));
+ spu_status2.s.rcvflt = 1;
+ csr_wr_node(node, CVMX_BGXX_SPUX_STATUS2(index, xi.interface),
+ spu_status2.u64);
+
+ spu_status2.u64 =
+ csr_rd_node(node, CVMX_BGXX_SPUX_STATUS2(index, xi.interface));
+ if (spu_status2.s.rcvflt) {
+#ifdef DEBUG_BGX
+ debug("ERROR: %d:BGX%d:%d: Receive fault, need to retry\n",
+ node, xi.interface, index);
+#endif
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X) && use_training)
+ __cvmx_bgx_restart_training(node, xi.interface, index);
+ /* debug("training restarting\n"); */
+ return -1;
+ }
+
+ /* Wait for MAC RX to be ready */
+ if (CVMX_WAIT_FOR_FIELD64_NODE(
+ node, CVMX_BGXX_SMUX_RX_CTL(index, xi.interface),
+ cvmx_bgxx_smux_rx_ctl_t, status, ==, 0, 10000)) {
+#ifdef DEBUG_BGX
+ debug("ERROR: %d:BGX%d:%d: RX not ready\n", node, xi.interface,
+ index);
+#endif
+ return -1;
+ }
+
+ /* Wait for BGX RX to be idle */
+ if (CVMX_WAIT_FOR_FIELD64_NODE(
+ node, CVMX_BGXX_SMUX_CTRL(index, xi.interface),
+ cvmx_bgxx_smux_ctrl_t, rx_idle, ==, 1, 10000)) {
+#ifdef DEBUG_BGX
+ debug("ERROR: %d:BGX%d:%d: RX not idle\n", node, xi.interface,
+ index);
+#endif
+ return -1;
+ }
+
+ /* Wait for GMX TX to be idle */
+ if (CVMX_WAIT_FOR_FIELD64_NODE(
+ node, CVMX_BGXX_SMUX_CTRL(index, xi.interface),
+ cvmx_bgxx_smux_ctrl_t, tx_idle, ==, 1, 10000)) {
+#ifdef DEBUG_BGX
+ debug("ERROR: %d:BGX%d:%d: TX not idle\n", node, xi.interface,
+ index);
+#endif
+ return -1;
+ }
+
+ /* rcvflt should still be 0 */
+ spu_status2.u64 =
+ csr_rd_node(node, CVMX_BGXX_SPUX_STATUS2(index, xi.interface));
+ if (spu_status2.s.rcvflt) {
+#ifdef DEBUG_BGX
+ debug("ERROR: %d:BGX%d:%d: Receive fault, need to retry\n",
+ node, xi.interface, index);
+#endif
+ return -1;
+ }
+
+ /* Receive link is latching low. Force it high and verify it */
+ spu_status1.u64 =
+ csr_rd_node(node, CVMX_BGXX_SPUX_STATUS1(index, xi.interface));
+ spu_status1.s.rcv_lnk = 1;
+ csr_wr_node(node, CVMX_BGXX_SPUX_STATUS1(index, xi.interface),
+ spu_status1.u64);
+
+ if (CVMX_WAIT_FOR_FIELD64_NODE(
+ node, CVMX_BGXX_SPUX_STATUS1(index, xi.interface),
+ cvmx_bgxx_spux_status1_t, rcv_lnk, ==, 1, 10000)) {
+#ifdef DEBUG_BGX
+ debug("ERROR: %d:BGX%d:%d: Receive link down\n", node,
+ xi.interface, index);
+#endif
+ return -1;
+ }
+
+ if (use_ber) {
+ /* Clearing BER_CNT and ERR_BLKs */
+ br_status2.u64 = csr_rd_node(
+ node, CVMX_BGXX_SPUX_BR_STATUS2(index, xi.interface));
+
+ /* If set, clear the LATCHED_BER by writing it to a one. */
+ if (br_status2.s.latched_ber)
+ csr_wr_node(node,
+ CVMX_BGXX_SPUX_BR_STATUS2(index,
+ xi.interface),
+ br_status2.u64);
+
+ error_debounce = get_timer(0);
+
+ /* Clear error counts */
+ err_blks = 0;
+ ber_cnt = 0;
+
+ /* Verify that the link is up and error free for 100ms */
+ while (get_timer(error_debounce) < 100) {
+ spu_status1.u64 = csr_rd_node(
+ node,
+ CVMX_BGXX_SPUX_STATUS1(index, xi.interface));
+ /* Checking that Receive link is still up (rcv_lnk = 1 (up)) */
+ if (!spu_status1.s.rcv_lnk) {
+#ifdef DEBUG_BGX
+ debug("ERROR: %d:BGX%d:%d: Receive link down\n",
+ node, xi.interface, index);
+#endif
+ return -1;
+ }
+
+ /* Checking if latched_ber = 1 (BER >= 10e^4) */
+ br_status2.u64 = csr_rd_node(
+ node,
+ CVMX_BGXX_SPUX_BR_STATUS2(index, xi.interface));
+ err_blks += br_status2.s.err_blks;
+ ber_cnt += br_status2.s.ber_cnt;
+
+ if (br_status2.s.latched_ber) {
+#ifdef DEBUG_BGX
+ debug("ERROR: %d:BGX%d:%d: BER test failed, BER >= 10e^4, need to retry\n",
+ node, xi.interface, index);
+#endif
+ return -1;
+ }
+ /* Checking that latched BLOCK_LOCK is still set (Block Lock never lost) */
+ if (!br_status2.s.latched_lock) {
+#ifdef DEBUG_BGX
+ debug("ERROR: %d:BGX%d:%d: BASE-R PCS block lock lost, need to retry\n",
+ node, xi.interface, index);
+#endif
+ return -1;
+ }
+
+ /* Check error counters. Must be 0 (this error rate#
+ * is much higher than 1E-12)
+ */
+ if (err_blks > 0) {
+#ifdef DEBUG_BGX
+ debug("ERROR: %d:BGX%d:%d: BASE-R errored-blocks (%llu) detected, need to retry\n",
+ node, xi.interface, index,
+ (unsigned long long)err_blks);
+#endif
+ return -1;
+ }
+
+ if (ber_cnt > 0) {
+#ifdef DEBUG_BGX
+ debug("ERROR: %d:BGX%d:%d: BASE-R bit-errors (%llu) detected, need to retry\n",
+ node, xi.interface, index,
+ (unsigned long long)ber_cnt);
+#endif
+ return -1;
+ }
+
+ udelay(1000);
+ }
+
+ /* Clear out the BGX error counters/bits. These errors are
+ * expected as part of the BGX link up procedure
+ */
+ /* BIP_ERR counters clear as part of this read */
+ csr_rd_node(node,
+ CVMX_BGXX_SPUX_BR_BIP_ERR_CNT(index, xi.interface));
+ /* BER_CNT and ERR_BLKs clear as part of this read */
+ br_status2.u64 = csr_rd_node(
+ node, CVMX_BGXX_SPUX_BR_STATUS2(index, xi.interface));
+ }
+
+ /* (7) Enable packet transmit and receive */
+ spu_misc_control.u64 = csr_rd_node(
+ node, CVMX_BGXX_SPUX_MISC_CONTROL(index, xi.interface));
+ spu_misc_control.s.rx_packet_dis = 0;
+ csr_wr_node(node, CVMX_BGXX_SPUX_MISC_CONTROL(index, xi.interface),
+ spu_misc_control.u64);
+
+ if (debug)
+ debug("%s: Enabling tx and rx data packets\n", __func__);
+ cmr_config.u64 =
+ csr_rd_node(node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface));
+ cmr_config.s.data_pkt_tx_en = 1;
+ cmr_config.s.data_pkt_rx_en = 1;
+ csr_wr_node(node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface),
+ cmr_config.u64);
+ return 0;
+}
+
+int __cvmx_helper_bgx_xaui_enable(int xiface)
+{
+ int index;
+ cvmx_helper_interface_mode_t mode;
+ int num_ports = cvmx_helper_ports_on_interface(xiface);
+
+ for (index = 0; index < num_ports; index++) {
+ int res;
+ int xipd_port = cvmx_helper_get_ipd_port(xiface, index);
+ int phy_pres;
+ struct cvmx_xiface xi =
+ cvmx_helper_xiface_to_node_interface(xiface);
+ static int count
+ [CVMX_MAX_NODES][CVMX_HELPER_MAX_IFACE]
+ [CVMX_HELPER_CFG_MAX_PORT_PER_IFACE] = {
+ [0 ... CVMX_MAX_NODES -
+ 1][0 ... CVMX_HELPER_MAX_IFACE -
+ 1] = { [0 ... CVMX_HELPER_CFG_MAX_PORT_PER_IFACE -
+ 1] = 0 }
+ };
+
+ mode = cvmx_helper_bgx_get_mode(xiface, index);
+
+ /* Set disparity for RXAUI interface as described in the
+ * Marvell RXAUI Interface specification.
+ */
+ if (mode == CVMX_HELPER_INTERFACE_MODE_RXAUI &&
+ (cvmx_helper_get_port_phy_present(xiface, index)))
+ phy_pres = 1;
+ else
+ phy_pres = 0;
+ __cvmx_helper_bgx_port_init(xipd_port, phy_pres);
+
+retry_link:
+ res = __cvmx_helper_bgx_xaui_link_init(index, xiface);
+ /* RX Equalization or autonegotiation can take little longer
+ * retry the link maybe 5 times for now
+ */
+ if (res == -1 && count[xi.node][xi.interface][index] < 5) {
+ count[xi.node][xi.interface][index]++;
+#ifdef DEBUG_BGX
+ debug("%d:BGX(%d,%d): Failed to get link, retrying\n",
+ xi.node, xi.interface, index);
+#endif
+ goto retry_link;
+ }
+
+ if (res == -1) {
+#ifdef DEBUG_BGX
+ debug("%d:BGX(%d,%d): Failed to get link\n", xi.node,
+ xi.interface, index);
+#endif
+ continue;
+ }
+ }
+ return 0;
+}
+
+cvmx_helper_link_info_t __cvmx_helper_bgx_xaui_link_get(int xipd_port)
+{
+ int xiface = cvmx_helper_get_interface_num(xipd_port);
+ struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+ struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(xipd_port);
+ int index = cvmx_helper_get_interface_index_num(xp.port);
+ cvmx_bgxx_spux_status1_t spu_status1;
+ cvmx_bgxx_smux_tx_ctl_t smu_tx_ctl;
+ cvmx_bgxx_smux_rx_ctl_t smu_rx_ctl;
+ cvmx_bgxx_cmrx_config_t cmr_config;
+ cvmx_helper_link_info_t result;
+ cvmx_helper_interface_mode_t mode;
+ cvmx_bgxx_spux_misc_control_t spu_misc_control;
+ cvmx_bgxx_spux_br_status2_t br_status2;
+
+ result.u64 = 0;
+
+ if (debug)
+ debug("%s: interface %u:%d/%d\n", __func__, xi.node,
+ xi.interface, index);
+
+ mode = cvmx_helper_bgx_get_mode(xiface, index);
+ if (mode == CVMX_HELPER_INTERFACE_MODE_RGMII)
+ return __cvmx_helper_bgx_sgmii_link_get(xipd_port);
+
+ /* Reading current rx/tx link status */
+ spu_status1.u64 = csr_rd_node(
+ xi.node, CVMX_BGXX_SPUX_STATUS1(index, xi.interface));
+ smu_tx_ctl.u64 = csr_rd_node(
+ xi.node, CVMX_BGXX_SMUX_TX_CTL(index, xi.interface));
+ smu_rx_ctl.u64 = csr_rd_node(
+ xi.node, CVMX_BGXX_SMUX_RX_CTL(index, xi.interface));
+ /* Reading tx/rx packet enables */
+ cmr_config.u64 = csr_rd_node(
+ xi.node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface));
+ spu_misc_control.u64 = csr_rd_node(
+ xi.node, CVMX_BGXX_SPUX_MISC_CONTROL(index, xi.interface));
+
+ if (smu_tx_ctl.s.ls == 0 && smu_rx_ctl.s.status == 0 &&
+ cmr_config.s.data_pkt_tx_en == 1 &&
+ cmr_config.s.data_pkt_rx_en == 1 &&
+ spu_misc_control.s.rx_packet_dis == 0 &&
+ spu_status1.s.rcv_lnk) {
+ int lanes;
+ int qlm = cvmx_qlm_lmac(xiface, index);
+ u64 speed;
+
+ result.s.link_up = 1;
+ result.s.full_duplex = 1;
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX))
+ speed = cvmx_qlm_get_gbaud_mhz_node(xi.node, qlm);
+ else
+ speed = cvmx_qlm_get_gbaud_mhz(qlm);
+
+ cmr_config.u64 = csr_rd_node(
+ xi.node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface));
+ switch (cmr_config.s.lmac_type) {
+ default:
+ case 1: // XAUI
+ speed = (speed * 8 + 5) / 10;
+ lanes = 4;
+ break;
+ case 2: // RXAUI
+ speed = (speed * 8 + 5) / 10;
+ lanes = 2;
+ break;
+ case 3: // XFI
+ speed = (speed * 64 + 33) / 66;
+ lanes = 1;
+ break;
+ case 4: // XLAUI
+ /* Adjust the speed when XLAUI is configured at 6.250Gbps */
+ if (speed == 6250)
+ speed = 6445;
+ speed = (speed * 64 + 33) / 66;
+ lanes = 4;
+ break;
+ }
+
+ if (debug)
+ debug("%s: baud: %llu, lanes: %d\n", __func__,
+ (unsigned long long)speed, lanes);
+ speed *= lanes;
+ result.s.speed = speed;
+ } else {
+ int res;
+ u64 err_blks = 0;
+ u64 ber_cnt = 0;
+
+ /* Check for err_blk and ber errors if 10G or 40G */
+ if ((mode == CVMX_HELPER_INTERFACE_MODE_XFI ||
+ mode == CVMX_HELPER_INTERFACE_MODE_XLAUI ||
+ mode == CVMX_HELPER_INTERFACE_MODE_10G_KR ||
+ mode == CVMX_HELPER_INTERFACE_MODE_40G_KR4)) {
+ br_status2.u64 = csr_rd_node(
+ xi.node,
+ CVMX_BGXX_SPUX_BR_STATUS2(index, xi.interface));
+ err_blks = br_status2.s.err_blks;
+ ber_cnt = br_status2.s.ber_cnt;
+ }
+
+ /* Checking if the link is up and error-free but we are receiving remote-faults */
+ if (smu_tx_ctl.s.ls != 1 && smu_rx_ctl.s.status != 1 &&
+ cmr_config.s.data_pkt_tx_en == 1 &&
+ cmr_config.s.data_pkt_rx_en == 1 &&
+ spu_misc_control.s.rx_packet_dis == 0 &&
+ err_blks == 0 && ber_cnt == 0 &&
+ spu_status1.s.rcv_lnk) {
+ result.s.init_success = 1;
+#ifdef DEBUG_BGX
+ debug("Receiving remote-fault ordered sets %d:BGX(%d,%d)\n",
+ xi.node, xi.interface, index);
+#endif
+
+ } else {
+ res = __cvmx_helper_bgx_xaui_link_init(index, xiface);
+ if (res == -1) {
+#ifdef DEBUG_BGX
+ debug("Failed to get %d:BGX(%d,%d) link\n",
+ xi.node, xi.interface, index);
+#endif
+ } else {
+#ifdef DEBUG_BGX
+ debug("Link initialization successful %d:BGX(%d,%d)\n",
+ xi.node, xi.interface, index);
+#endif
+ result.s.init_success = 1;
+ }
+ }
+ }
+
+ return result;
+}
+
+int __cvmx_helper_bgx_xaui_link_set(int xipd_port,
+ cvmx_helper_link_info_t link_info)
+{
+ int xiface = cvmx_helper_get_interface_num(xipd_port);
+ struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+ struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(xipd_port);
+ int node = xi.node;
+ int index = cvmx_helper_get_interface_index_num(xp.port);
+ cvmx_bgxx_smux_tx_ctl_t smu_tx_ctl;
+ cvmx_bgxx_smux_rx_ctl_t smu_rx_ctl;
+ cvmx_bgxx_spux_status1_t spu_status1;
+ cvmx_helper_interface_mode_t mode;
+ cvmx_bgxx_cmrx_config_t cmr_config;
+ cvmx_bgxx_spux_misc_control_t spu_misc_control;
+
+ if (debug)
+ debug("%s: interface %u:%d/%d\n", __func__, xi.node,
+ xi.interface, index);
+
+ mode = cvmx_helper_bgx_get_mode(xiface, index);
+ if (mode == CVMX_HELPER_INTERFACE_MODE_RGMII)
+ return __cvmx_helper_bgx_sgmii_link_set(xipd_port, link_info);
+
+ /* Reading current rx/tx link status */
+ smu_tx_ctl.u64 =
+ csr_rd_node(node, CVMX_BGXX_SMUX_TX_CTL(index, xi.interface));
+ smu_rx_ctl.u64 =
+ csr_rd_node(node, CVMX_BGXX_SMUX_RX_CTL(index, xi.interface));
+ spu_status1.u64 =
+ csr_rd_node(node, CVMX_BGXX_SPUX_STATUS1(index, xi.interface));
+ /* Reading tx/rx packet enables */
+ cmr_config.u64 = csr_rd_node(
+ xi.node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface));
+ spu_misc_control.u64 = csr_rd_node(
+ xi.node, CVMX_BGXX_SPUX_MISC_CONTROL(index, xi.interface));
+
+ /* If the link shouldn't be up, then just return */
+ if (!link_info.s.link_up)
+ return 0;
+
+ /* Do nothing if both RX and TX are happy and packet
+ * transmission/reception is enabled
+ */
+ if (smu_tx_ctl.s.ls == 0 && smu_rx_ctl.s.status == 0 &&
+ cmr_config.s.data_pkt_tx_en == 1 &&
+ cmr_config.s.data_pkt_rx_en == 1 &&
+ spu_misc_control.s.rx_packet_dis == 0 && spu_status1.s.rcv_lnk)
+ return 0;
+
+ /* Bring the link up */
+ return __cvmx_helper_bgx_xaui_link_init(index, xiface);
+}
+
+int __cvmx_helper_bgx_xaui_configure_loopback(int xipd_port,
+ int enable_internal,
+ int enable_external)
+{
+ int xiface = cvmx_helper_get_interface_num(xipd_port);
+ struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+ struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(xipd_port);
+ int node = xi.node;
+ int index = cvmx_helper_get_interface_index_num(xp.port);
+ cvmx_bgxx_spux_control1_t spu_control1;
+ cvmx_bgxx_smux_ext_loopback_t smu_ext_loopback;
+
+ if (debug)
+ debug("%s: interface %u:%d/%d\n", __func__, xi.node,
+ xi.interface, index);
+
+ /* INT_BEAT_GEN must be set for loopback if the QLMs are not clocked.
+ * Set it whenever we use internal loopback
+ */
+ if (enable_internal) {
+ cvmx_bgxx_cmrx_config_t cmr_config;
+
+ cmr_config.u64 = csr_rd_node(
+ node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface));
+ cmr_config.s.int_beat_gen = 1;
+ csr_wr_node(node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface),
+ cmr_config.u64);
+ }
+ /* Set the internal loop */
+ spu_control1.u64 =
+ csr_rd_node(node, CVMX_BGXX_SPUX_CONTROL1(index, xi.interface));
+ spu_control1.s.loopbck = enable_internal;
+ csr_wr_node(node, CVMX_BGXX_SPUX_CONTROL1(index, xi.interface),
+ spu_control1.u64);
+ /* Set the external loop */
+ smu_ext_loopback.u64 = csr_rd_node(
+ node, CVMX_BGXX_SMUX_EXT_LOOPBACK(index, xi.interface));
+ smu_ext_loopback.s.en = enable_external;
+ csr_wr_node(node, CVMX_BGXX_SMUX_EXT_LOOPBACK(index, xi.interface),
+ smu_ext_loopback.u64);
+
+ return __cvmx_helper_bgx_xaui_link_init(index, xiface);
+}
+
+int __cvmx_helper_bgx_mixed_enable(int xiface)
+{
+ int index;
+ int num_ports = cvmx_helper_ports_on_interface(xiface);
+ cvmx_helper_interface_mode_t mode;
+
+ for (index = 0; index < num_ports; index++) {
+ int xipd_port, phy_pres = 0;
+
+ if (!cvmx_helper_is_port_valid(xiface, index))
+ continue;
+
+ mode = cvmx_helper_bgx_get_mode(xiface, index);
+
+ xipd_port = cvmx_helper_get_ipd_port(xiface, index);
+
+ if (mode == CVMX_HELPER_INTERFACE_MODE_RXAUI &&
+ (cvmx_helper_get_port_phy_present(xiface, index)))
+ phy_pres = 1;
+
+ if (__cvmx_helper_bgx_port_init(xipd_port, phy_pres))
+ continue;
+
+ /* For RGMII interface, initialize the link after PKO is setup */
+ if (mode == CVMX_HELPER_INTERFACE_MODE_RGMII)
+ continue;
+ /* Call SGMII init code for lmac_type = 0|5 */
+ else if (mode == CVMX_HELPER_INTERFACE_MODE_SGMII) {
+ int do_link_set = 1;
+
+ if (do_link_set)
+ __cvmx_helper_bgx_sgmii_link_set(
+ xipd_port,
+ __cvmx_helper_bgx_sgmii_link_get(
+ xipd_port));
+ /* All other lmac type call XAUI init code */
+ } else {
+ int res;
+ struct cvmx_xiface xi =
+ cvmx_helper_xiface_to_node_interface(xiface);
+ static int count
+ [CVMX_MAX_NODES][CVMX_HELPER_MAX_IFACE]
+ [CVMX_HELPER_CFG_MAX_PORT_PER_IFACE] = {
+ [0 ... CVMX_MAX_NODES -
+ 1][0 ... CVMX_HELPER_MAX_IFACE -
+ 1] = { [0 ... CVMX_HELPER_CFG_MAX_PORT_PER_IFACE -
+ 1] = 0 }
+ };
+
+retry_link:
+ res = __cvmx_helper_bgx_xaui_link_init(index, xiface);
+ /* RX Equalization or autonegotiation can take little
+ * longer retry the link maybe 5 times for now
+ */
+ if (res == -1 &&
+ count[xi.node][xi.interface][index] < 5) {
+ count[xi.node][xi.interface][index]++;
+ goto retry_link;
+ }
+
+ if (res == -1) {
+#ifdef DEBUG_BGX
+ debug("Failed to get %d:BGX(%d,%d) link\n",
+ xi.node, xi.interface, index);
+#endif
+ continue;
+ }
+ }
+ }
+ return 0;
+}
+
+cvmx_helper_link_info_t __cvmx_helper_bgx_mixed_link_get(int xipd_port)
+{
+ int xiface = cvmx_helper_get_interface_num(xipd_port);
+ int index = cvmx_helper_get_interface_index_num(xipd_port);
+ cvmx_helper_interface_mode_t mode;
+
+ mode = cvmx_helper_bgx_get_mode(xiface, index);
+ if (mode == CVMX_HELPER_INTERFACE_MODE_SGMII ||
+ mode == CVMX_HELPER_INTERFACE_MODE_RGMII)
+ return __cvmx_helper_bgx_sgmii_link_get(xipd_port);
+ else
+ return __cvmx_helper_bgx_xaui_link_get(xipd_port);
+}
+
+int __cvmx_helper_bgx_mixed_link_set(int xipd_port,
+ cvmx_helper_link_info_t link_info)
+{
+ int xiface = cvmx_helper_get_interface_num(xipd_port);
+ int index = cvmx_helper_get_interface_index_num(xipd_port);
+ cvmx_helper_interface_mode_t mode;
+
+ mode = cvmx_helper_bgx_get_mode(xiface, index);
+ if (mode == CVMX_HELPER_INTERFACE_MODE_SGMII ||
+ mode == CVMX_HELPER_INTERFACE_MODE_RGMII)
+ return __cvmx_helper_bgx_sgmii_link_set(xipd_port, link_info);
+ else
+ return __cvmx_helper_bgx_xaui_link_set(xipd_port, link_info);
+}
+
+int __cvmx_helper_bgx_mixed_configure_loopback(int xipd_port,
+ int enable_internal,
+ int enable_external)
+{
+ int xiface = cvmx_helper_get_interface_num(xipd_port);
+ int index = cvmx_helper_get_interface_index_num(xipd_port);
+ cvmx_helper_interface_mode_t mode;
+
+ mode = cvmx_helper_bgx_get_mode(xiface, index);
+ if (mode == CVMX_HELPER_INTERFACE_MODE_SGMII ||
+ mode == CVMX_HELPER_INTERFACE_MODE_RGMII)
+ return __cvmx_helper_bgx_sgmii_configure_loopback(
+ xipd_port, enable_internal, enable_external);
+ else
+ return __cvmx_helper_bgx_xaui_configure_loopback(
+ xipd_port, enable_internal, enable_external);
+}
+
+/**
+ * @INTERNAL
+ * Configure Priority-Based Flow Control (a.k.a. PFC/CBFC)
+ * on a specific BGX interface/port.
+ */
+void __cvmx_helper_bgx_xaui_config_pfc(unsigned int node,
+ unsigned int interface,
+ unsigned int index, bool pfc_enable)
+{
+ int xiface = cvmx_helper_node_interface_to_xiface(node, interface);
+ struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+ cvmx_bgxx_smux_cbfc_ctl_t cbfc_ctl;
+
+ if (debug)
+ debug("%s: interface %u:%d/%d\n", __func__, xi.node,
+ xi.interface, index);
+
+ cbfc_ctl.u64 =
+ csr_rd_node(node, CVMX_BGXX_SMUX_CBFC_CTL(index, xi.interface));
+
+ /* Enable all PFC controls if requiested */
+ cbfc_ctl.s.rx_en = pfc_enable;
+ cbfc_ctl.s.tx_en = pfc_enable;
+ if (debug)
+ debug("%s: CVMX_BGXX_SMUX_CBFC_CTL(%d,%d)=%#llx\n", __func__,
+ index, xi.interface, (unsigned long long)cbfc_ctl.u64);
+ csr_wr_node(node, CVMX_BGXX_SMUX_CBFC_CTL(index, xi.interface),
+ cbfc_ctl.u64);
+}
+
+/**
+ * This function control how the hardware handles incoming PAUSE
+ * packets. The most common modes of operation:
+ * ctl_bck = 1, ctl_drp = 1: hardware handles everything
+ * ctl_bck = 0, ctl_drp = 0: software sees all PAUSE frames
+ * ctl_bck = 0, ctl_drp = 1: all PAUSE frames are completely ignored
+ * @param node node number.
+ * @param interface interface number
+ * @param index port number
+ * @param ctl_bck 1: Forward PAUSE information to TX block
+ * @param ctl_drp 1: Drop control PAUSE frames.
+ */
+void cvmx_helper_bgx_rx_pause_ctl(unsigned int node, unsigned int interface,
+ unsigned int index, unsigned int ctl_bck,
+ unsigned int ctl_drp)
+{
+ int xiface = cvmx_helper_node_interface_to_xiface(node, interface);
+ struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+ cvmx_bgxx_smux_rx_frm_ctl_t frm_ctl;
+
+ if (debug)
+ debug("%s: interface %u:%d/%d\n", __func__, xi.node,
+ xi.interface, index);
+
+ frm_ctl.u64 = csr_rd_node(
+ node, CVMX_BGXX_SMUX_RX_FRM_CTL(index, xi.interface));
+ frm_ctl.s.ctl_bck = ctl_bck;
+ frm_ctl.s.ctl_drp = ctl_drp;
+ csr_wr_node(node, CVMX_BGXX_SMUX_RX_FRM_CTL(index, xi.interface),
+ frm_ctl.u64);
+}
+
+/**
+ * This function configures the receive action taken for multicast, broadcast
+ * and dmac filter match packets.
+ * @param node node number.
+ * @param interface interface number
+ * @param index port number
+ * @param cam_accept 0: reject packets on dmac filter match
+ * 1: accept packet on dmac filter match
+ * @param mcast_mode 0x0 = Force reject all multicast packets
+ * 0x1 = Force accept all multicast packets
+ * 0x2 = Use the address filter CAM
+ * @param bcast_accept 0 = Reject all broadcast packets
+ * 1 = Accept all broadcast packets
+ */
+void cvmx_helper_bgx_rx_adr_ctl(unsigned int node, unsigned int interface,
+ unsigned int index, unsigned int cam_accept,
+ unsigned int mcast_mode,
+ unsigned int bcast_accept)
+{
+ int xiface = cvmx_helper_node_interface_to_xiface(node, interface);
+ struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+ cvmx_bgxx_cmrx_rx_adr_ctl_t adr_ctl;
+
+ if (debug)
+ debug("%s: interface %u:%d/%d\n", __func__, xi.node,
+ xi.interface, index);
+
+ adr_ctl.u64 = csr_rd_node(
+ node, CVMX_BGXX_CMRX_RX_ADR_CTL(index, xi.interface));
+ adr_ctl.s.cam_accept = cam_accept;
+ adr_ctl.s.mcst_mode = mcast_mode;
+ adr_ctl.s.bcst_accept = bcast_accept;
+
+ csr_wr_node(node, CVMX_BGXX_CMRX_RX_ADR_CTL(index, xi.interface),
+ adr_ctl.u64);
+}
+
+/**
+ * Function to control the generation of FCS, padding by the BGX
+ *
+ */
+void cvmx_helper_bgx_tx_options(unsigned int node, unsigned int interface,
+ unsigned int index, bool fcs_enable,
+ bool pad_enable)
+{
+ cvmx_bgxx_cmrx_config_t cmr_config;
+ cvmx_bgxx_gmp_gmi_txx_append_t gmp_txx_append;
+ cvmx_bgxx_gmp_gmi_txx_min_pkt_t gmp_min_pkt;
+ cvmx_bgxx_smux_tx_min_pkt_t smu_min_pkt;
+ cvmx_bgxx_smux_tx_append_t smu_tx_append;
+ int xiface = cvmx_helper_node_interface_to_xiface(node, interface);
+ struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+
+ if (!cvmx_helper_is_port_valid(xiface, index))
+ return;
+
+ if (debug)
+ debug("%s: interface %u:%d/%d, fcs: %s, pad: %s\n", __func__,
+ xi.node, xi.interface, index,
+ fcs_enable ? "true" : "false",
+ pad_enable ? "true" : "false");
+
+ cmr_config.u64 =
+ csr_rd_node(node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface));
+
+ (void)cmr_config; /* In case we need LMAC_TYPE later */
+
+ /* Setting options for both BGX subsystems, regardless of LMAC type */
+
+ /* Set GMP (SGMII) Tx options */
+ gmp_min_pkt.u64 = 0;
+ /* per HRM Sec 34.3.4.4 */
+ gmp_min_pkt.s.min_size = 59;
+ csr_wr_node(node, CVMX_BGXX_GMP_GMI_TXX_MIN_PKT(index, xi.interface),
+ gmp_min_pkt.u64);
+ gmp_txx_append.u64 = csr_rd_node(
+ node, CVMX_BGXX_GMP_GMI_TXX_APPEND(index, xi.interface));
+ gmp_txx_append.s.fcs = fcs_enable;
+ gmp_txx_append.s.pad = pad_enable;
+ csr_wr_node(node, CVMX_BGXX_GMP_GMI_TXX_APPEND(index, xi.interface),
+ gmp_txx_append.u64);
+
+ /* Set SMUX (XAUI/XFI) Tx options */
+ /* HRM Sec 33.3.4.3 should read 64 */
+ smu_min_pkt.u64 = 0;
+ smu_min_pkt.s.min_size = 0x40;
+ csr_wr_node(node, CVMX_BGXX_SMUX_TX_MIN_PKT(index, xi.interface),
+ smu_min_pkt.u64);
+ smu_tx_append.u64 = csr_rd_node(
+ node, CVMX_BGXX_SMUX_TX_APPEND(index, xi.interface));
+ smu_tx_append.s.fcs_d = fcs_enable; /* Set data-packet FCS */
+ smu_tx_append.s.pad = pad_enable;
+ csr_wr_node(node, CVMX_BGXX_SMUX_TX_APPEND(index, xi.interface),
+ smu_tx_append.u64);
+}
+
+/**
+ * Set mac for the ipd_port
+ *
+ * @param xipd_port ipd_port to set the mac
+ * @param bcst If set, accept all broadcast packets
+ * @param mcst Multicast mode
+ * 0 = Force reject all multicast packets
+ * 1 = Force accept all multicast packets
+ * 2 = use the address filter CAM.
+ * @param mac mac address for the ipd_port, or 0 to disable MAC filtering
+ */
+void cvmx_helper_bgx_set_mac(int xipd_port, int bcst, int mcst, u64 mac)
+{
+ int xiface = cvmx_helper_get_interface_num(xipd_port);
+ struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+ int node = xi.node;
+ int index;
+ cvmx_bgxx_cmr_rx_adrx_cam_t adr_cam;
+ cvmx_bgxx_cmrx_rx_adr_ctl_t adr_ctl;
+ cvmx_bgxx_cmrx_config_t cmr_config;
+ int saved_state_tx, saved_state_rx;
+
+ index = cvmx_helper_get_interface_index_num(xipd_port);
+
+ if (!cvmx_helper_is_port_valid(xiface, index))
+ return;
+
+ if (debug)
+ debug("%s: interface %u:%d/%d\n", __func__, xi.node,
+ xi.interface, index);
+
+ cmr_config.u64 =
+ csr_rd_node(node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface));
+ saved_state_tx = cmr_config.s.data_pkt_tx_en;
+ saved_state_rx = cmr_config.s.data_pkt_rx_en;
+ cmr_config.s.data_pkt_tx_en = 0;
+ cmr_config.s.data_pkt_rx_en = 0;
+ csr_wr_node(node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface),
+ cmr_config.u64);
+
+ /* Set the mac */
+ adr_cam.u64 = 0;
+ adr_cam.s.id = index;
+
+ if (mac != 0ull)
+ adr_cam.s.en = 1;
+ adr_cam.s.adr = mac;
+
+ csr_wr_node(node, CVMX_BGXX_CMR_RX_ADRX_CAM(index * 8, xi.interface),
+ adr_cam.u64);
+
+ adr_ctl.u64 = csr_rd_node(
+ node, CVMX_BGXX_CMRX_RX_ADR_CTL(index, xi.interface));
+ if (mac != 0ull)
+ adr_ctl.s.cam_accept =
+ 1; /* Accept the packet on DMAC CAM address */
+ else
+ adr_ctl.s.cam_accept = 0; /* No filtering, promiscuous */
+
+ adr_ctl.s.mcst_mode = mcst; /* Use the address filter CAM */
+ adr_ctl.s.bcst_accept = bcst; /* Accept all broadcast packets */
+ csr_wr_node(node, CVMX_BGXX_CMRX_RX_ADR_CTL(index, xi.interface),
+ adr_ctl.u64);
+ /* Set SMAC for PAUSE frames */
+ csr_wr_node(node, CVMX_BGXX_GMP_GMI_SMACX(index, xi.interface), mac);
+
+ /* Restore back the interface state */
+ cmr_config.s.data_pkt_tx_en = saved_state_tx;
+ cmr_config.s.data_pkt_rx_en = saved_state_rx;
+ csr_wr_node(node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface),
+ cmr_config.u64);
+
+ /* Wait 100ms after bringing up the link to give the PHY some time */
+ if (cmr_config.s.enable) {
+ cvmx_helper_interface_mode_t mode;
+
+ mode = cvmx_helper_bgx_get_mode(xiface, index);
+ __cvmx_helper_bgx_interface_enable_delay(mode);
+ }
+}
+
+/**
+ * Disables the sending of flow control (pause) frames on the specified
+ * BGX port(s).
+ *
+ * @param xiface Which xiface
+ * @param port_mask Mask (4bits) of which ports on the interface to disable
+ * backpressure on.
+ * 1 => disable backpressure
+ * 0 => enable backpressure
+ *
+ * @return 0 on success
+ * -1 on error
+ *
+ * FIXME: Should change the API to handle a single port in every
+ * invocation, for consistency with other API calls.
+ */
+int cvmx_bgx_set_backpressure_override(int xiface, unsigned int port_mask)
+{
+ struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+ cvmx_bgxx_cmr_rx_ovr_bp_t rx_ovr_bp;
+ int node = xi.node;
+
+ if (xi.interface >= CVMX_HELPER_MAX_GMX)
+ return 0;
+
+ if (debug)
+ debug("%s: interface %u:%d port_mask=%#x\n", __func__, xi.node,
+ xi.interface, port_mask);
+
+ /* Check for valid arguments */
+ rx_ovr_bp.u64 = 0;
+ rx_ovr_bp.s.en = port_mask; /* Per port Enable back pressure override */
+ rx_ovr_bp.s.ign_fifo_bp =
+ port_mask; /* Ignore the RX FIFO full when computing BP */
+
+ csr_wr_node(node, CVMX_BGXX_CMR_RX_OVR_BP(xi.interface), rx_ovr_bp.u64);
+ return 0;
+}
+
+/**
+ * Set maximum packet size for a BGX port
+ *
+ */
+void cvmx_helper_bgx_set_jabber(int xiface, unsigned int index,
+ unsigned int size)
+{
+ struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+ int node;
+ cvmx_helper_interface_mode_t mode;
+
+ if (!octeon_has_feature(OCTEON_FEATURE_BGX))
+ return;
+
+ if (!cvmx_helper_is_port_valid(xiface, index))
+ return;
+
+ node = xi.node;
+
+ mode = cvmx_helper_bgx_get_mode(xiface, index);
+
+ /* Set GMI or SMUX register based on lmac_type */
+ if (mode == CVMX_HELPER_INTERFACE_MODE_SGMII ||
+ mode == CVMX_HELPER_INTERFACE_MODE_RGMII) {
+ csr_wr_node(node,
+ CVMX_BGXX_GMP_GMI_RXX_JABBER(index, xi.interface),
+ size);
+ } else {
+ csr_wr_node(node, CVMX_BGXX_SMUX_RX_JABBER(index, xi.interface),
+ size);
+ }
+}
+
+/**
+ * Shutdown a BGX port
+ *
+ */
+int cvmx_helper_bgx_shutdown_port(int xiface, int index)
+{
+ cvmx_bgxx_cmrx_config_t cmr_config;
+ int node;
+ struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+
+ node = xi.node;
+
+ if (xi.interface >= CVMX_HELPER_MAX_GMX)
+ return 0;
+
+ if (debug)
+ debug("%s: interface %u:%d/%d\n", __func__, node, xi.interface,
+ index);
+
+ if (!cvmx_helper_is_port_valid(xiface, index))
+ return 0;
+
+ /* Disable BGX CMR before we make any changes. */
+ cmr_config.u64 =
+ csr_rd_node(node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface));
+
+ cmr_config.s.data_pkt_tx_en = 0;
+ cmr_config.s.data_pkt_rx_en = 0;
+ csr_wr_node(node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface),
+ cmr_config.u64);
+
+ /* Clear pending common interrupts */
+ csr_wr_node(node, CVMX_BGXX_CMRX_INT(index, xi.interface), 0x7);
+
+ if (cmr_config.s.lmac_type == 0 ||
+ cmr_config.s.lmac_type == 5) { /* SGMII */
+ /* Clear GMP interrupts */
+ csr_wr_node(node,
+ CVMX_BGXX_GMP_GMI_RXX_INT(index, xi.interface),
+ 0xfff);
+ csr_wr_node(node,
+ CVMX_BGXX_GMP_GMI_TXX_INT(index, xi.interface),
+ 0x1f);
+ /* Wait for GMX to be idle */
+ if (CVMX_WAIT_FOR_FIELD64_NODE(
+ node,
+ CVMX_BGXX_GMP_GMI_PRTX_CFG(index, xi.interface),
+ cvmx_bgxx_gmp_gmi_prtx_cfg_t, rx_idle, ==, 1,
+ 10000) ||
+ CVMX_WAIT_FOR_FIELD64_NODE(
+ node,
+ CVMX_BGXX_GMP_GMI_PRTX_CFG(index, xi.interface),
+ cvmx_bgxx_gmp_gmi_prtx_cfg_t, tx_idle, ==, 1,
+ 10000)) {
+ printf("ERROR: %s: SGMII: Timeout waiting for port %u:%d/%d to stop\n",
+ __func__, node, xi.interface, index);
+ return -1;
+ }
+ /* Read GMX CFG again to make sure the disable completed */
+ csr_rd_node(node,
+ CVMX_BGXX_GMP_GMI_PRTX_CFG(index, xi.interface));
+ /* FIXME Disable RGMII interface */
+ } else { /* XAUI/XFI/10-KR */
+ /* Clear all pending SMUX interrupts */
+ csr_wr_node(node, CVMX_BGXX_SMUX_RX_INT(index, xi.interface),
+ 0xfff);
+ csr_wr_node(node, CVMX_BGXX_SMUX_TX_INT(index, xi.interface),
+ 0x1f);
+ csr_wr_node(node, CVMX_BGXX_SPUX_INT(index, xi.interface),
+ 0x7fff);
+
+ /* Wait for GMX RX to be idle */
+ if (CVMX_WAIT_FOR_FIELD64_NODE(
+ node, CVMX_BGXX_SMUX_CTRL(index, xi.interface),
+ cvmx_bgxx_smux_ctrl_t, rx_idle, ==, 1, 10000) ||
+ CVMX_WAIT_FOR_FIELD64_NODE(
+ node, CVMX_BGXX_SMUX_CTRL(index, xi.interface),
+ cvmx_bgxx_smux_ctrl_t, tx_idle, ==, 1, 10000)) {
+ printf("ERROR: %s: XAUI: Timeout waiting for port %u:%d/%d to stop\n",
+ __func__, node, xi.interface, index);
+ return -1;
+ }
+ }
+ return 0;
+}
+
+int cvmx_bgx_set_pause_pkt_param(int xipd_port, u64 smac, u64 dmac,
+ unsigned int type, unsigned int time,
+ unsigned int interval)
+{
+ int node, xiface, iface, index, mode;
+ struct cvmx_xiface xi;
+
+ xiface = cvmx_helper_get_interface_num(xipd_port);
+ xi = cvmx_helper_xiface_to_node_interface(xiface);
+
+ if (xi.interface >= CVMX_HELPER_MAX_GMX)
+ return 0;
+
+ node = xi.node;
+ iface = xi.interface;
+ index = cvmx_helper_get_interface_index_num(xipd_port);
+
+ mode = cvmx_helper_bgx_get_mode(xiface, index);
+ switch (mode) {
+ case CVMX_HELPER_INTERFACE_MODE_10G_KR:
+ case CVMX_HELPER_INTERFACE_MODE_40G_KR4:
+ case CVMX_HELPER_INTERFACE_MODE_XLAUI:
+ case CVMX_HELPER_INTERFACE_MODE_XFI:
+ case CVMX_HELPER_INTERFACE_MODE_RXAUI:
+ case CVMX_HELPER_INTERFACE_MODE_XAUI: {
+ cvmx_bgxx_smux_smac_t psmac;
+ cvmx_bgxx_smux_tx_pause_pkt_dmac_t pdmac;
+ cvmx_bgxx_smux_tx_pause_pkt_type_t ptype;
+ cvmx_bgxx_smux_tx_pause_pkt_time_t ptime;
+ cvmx_bgxx_smux_tx_pause_pkt_interval_t interv;
+
+ psmac.u64 =
+ csr_rd_node(node, CVMX_BGXX_SMUX_SMAC(index, iface));
+ psmac.s.smac = smac;
+ csr_wr_node(node, CVMX_BGXX_SMUX_SMAC(index, iface), psmac.u64);
+
+ pdmac.u64 = csr_rd_node(
+ node, CVMX_BGXX_SMUX_TX_PAUSE_PKT_DMAC(index, iface));
+ pdmac.s.dmac = dmac;
+ csr_wr_node(node,
+ CVMX_BGXX_SMUX_TX_PAUSE_PKT_DMAC(index, iface),
+ pdmac.u64);
+
+ ptype.u64 = csr_rd_node(
+ node, CVMX_BGXX_SMUX_TX_PAUSE_PKT_TYPE(index, iface));
+ ptype.s.p_type = type;
+ csr_wr_node(node,
+ CVMX_BGXX_SMUX_TX_PAUSE_PKT_TYPE(index, iface),
+ ptype.u64);
+
+ ptime.u64 = csr_rd_node(
+ node, CVMX_BGXX_SMUX_TX_PAUSE_PKT_TIME(index, iface));
+ ptime.s.p_time = time;
+ csr_wr_node(node,
+ CVMX_BGXX_SMUX_TX_PAUSE_PKT_TIME(index, iface),
+ ptime.u64);
+
+ interv.u64 = csr_rd_node(
+ node,
+ CVMX_BGXX_SMUX_TX_PAUSE_PKT_INTERVAL(index, iface));
+ interv.s.interval = interval;
+ csr_wr_node(node,
+ CVMX_BGXX_SMUX_TX_PAUSE_PKT_INTERVAL(index, iface),
+ interv.u64);
+ break;
+ }
+ case CVMX_HELPER_INTERFACE_MODE_SGMII:
+ case CVMX_HELPER_INTERFACE_MODE_RGMII: {
+ cvmx_bgxx_gmp_gmi_smacx_t psmac;
+ cvmx_bgxx_gmp_gmi_tx_pause_pkt_dmac_t pdmac;
+ cvmx_bgxx_gmp_gmi_tx_pause_pkt_type_t ptype;
+ cvmx_bgxx_gmp_gmi_txx_pause_pkt_time_t ptime;
+ cvmx_bgxx_gmp_gmi_txx_pause_pkt_interval_t interv;
+
+ psmac.u64 = csr_rd_node(node,
+ CVMX_BGXX_GMP_GMI_SMACX(index, iface));
+ psmac.s.smac = smac;
+ csr_wr_node(node, CVMX_BGXX_GMP_GMI_SMACX(index, iface),
+ psmac.u64);
+
+ pdmac.u64 = csr_rd_node(
+ node, CVMX_BGXX_GMP_GMI_TX_PAUSE_PKT_DMAC(iface));
+ pdmac.s.dmac = dmac;
+ csr_wr_node(node, CVMX_BGXX_GMP_GMI_TX_PAUSE_PKT_DMAC(iface),
+ pdmac.u64);
+
+ ptype.u64 = csr_rd_node(
+ node, CVMX_BGXX_GMP_GMI_TX_PAUSE_PKT_TYPE(iface));
+ ptype.s.ptype = type;
+ csr_wr_node(node, CVMX_BGXX_GMP_GMI_TX_PAUSE_PKT_TYPE(iface),
+ ptype.u64);
+
+ ptime.u64 = csr_rd_node(
+ node,
+ CVMX_BGXX_GMP_GMI_TXX_PAUSE_PKT_TIME(index, iface));
+ ptime.s.ptime = time;
+ csr_wr_node(node,
+ CVMX_BGXX_GMP_GMI_TXX_PAUSE_PKT_TIME(index, iface),
+ ptime.u64);
+
+ interv.u64 = csr_rd_node(
+ node,
+ CVMX_BGXX_GMP_GMI_TXX_PAUSE_PKT_INTERVAL(index, iface));
+ interv.s.interval = interval;
+ csr_wr_node(node,
+ CVMX_BGXX_GMP_GMI_TXX_PAUSE_PKT_INTERVAL(index,
+ iface),
+ interv.u64);
+ break;
+ }
+ default:
+ break;
+ } /* switch*/
+ return 0;
+}
+
+int cvmx_bgx_set_flowctl_mode(int xipd_port, cvmx_qos_proto_t qos,
+ cvmx_qos_pkt_mode_t fc_mode)
+{
+ int node, xiface, iface, index, mode;
+ struct cvmx_xiface xi;
+ const struct {
+ int bck;
+ int drp;
+ } fcmode[4] = { [CVMX_QOS_PKT_MODE_HWONLY] = { 1, 1 },
+ [CVMX_QOS_PKT_MODE_SWONLY] = { 0, 0 },
+ [CVMX_QOS_PKT_MODE_HWSW] = { 1, 0 },
+ [CVMX_QOS_PKT_MODE_DROP] = { 0, 1 } };
+
+ xiface = cvmx_helper_get_interface_num(xipd_port);
+ xi = cvmx_helper_xiface_to_node_interface(xiface);
+ node = xi.node;
+ iface = xi.interface;
+
+ if (xi.interface >= CVMX_HELPER_MAX_GMX)
+ return 0;
+
+ index = cvmx_helper_get_interface_index_num(xipd_port);
+ mode = cvmx_helper_bgx_get_mode(xiface, index);
+ switch (mode) {
+ case CVMX_HELPER_INTERFACE_MODE_10G_KR:
+ case CVMX_HELPER_INTERFACE_MODE_40G_KR4:
+ case CVMX_HELPER_INTERFACE_MODE_XLAUI:
+ case CVMX_HELPER_INTERFACE_MODE_XFI:
+ case CVMX_HELPER_INTERFACE_MODE_RXAUI:
+ case CVMX_HELPER_INTERFACE_MODE_XAUI: {
+ cvmx_bgxx_smux_tx_ctl_t txctl;
+ cvmx_bgxx_smux_cbfc_ctl_t cbfc;
+ cvmx_bgxx_smux_rx_frm_ctl_t frmctl;
+ cvmx_bgxx_smux_hg2_control_t hg2ctl;
+
+ txctl.u64 =
+ csr_rd_node(node, CVMX_BGXX_SMUX_TX_CTL(index, iface));
+ cbfc.u64 = csr_rd_node(node,
+ CVMX_BGXX_SMUX_CBFC_CTL(index, iface));
+ frmctl.u64 = csr_rd_node(
+ node, CVMX_BGXX_SMUX_RX_FRM_CTL(index, iface));
+ hg2ctl.u64 = csr_rd_node(
+ node, CVMX_BGXX_SMUX_HG2_CONTROL(index, iface));
+ switch (qos) {
+ case CVMX_QOS_PROTO_PAUSE:
+ cbfc.u64 = 0;
+ hg2ctl.u64 = 0;
+ frmctl.s.ctl_bck = fcmode[fc_mode].bck;
+ frmctl.s.ctl_drp = fcmode[fc_mode].drp;
+ frmctl.s.ctl_mcst = 1;
+ txctl.s.l2p_bp_conv = 1;
+ break;
+ case CVMX_QOS_PROTO_PFC:
+ hg2ctl.u64 = 0;
+ hg2ctl.s.logl_en = 0xff;
+ frmctl.s.ctl_bck = fcmode[fc_mode].bck;
+ frmctl.s.ctl_drp = fcmode[fc_mode].drp;
+ frmctl.s.ctl_mcst = 1;
+ cbfc.s.bck_en = fcmode[fc_mode].bck;
+ cbfc.s.drp_en = fcmode[fc_mode].drp;
+ cbfc.s.phys_en = 0;
+ cbfc.s.logl_en = 0xff;
+ cbfc.s.tx_en = 1;
+ cbfc.s.rx_en = 1;
+ break;
+ case CVMX_QOS_PROTO_NONE:
+ cbfc.u64 = 0;
+ hg2ctl.u64 = 0;
+ frmctl.s.ctl_bck = fcmode[CVMX_QOS_PKT_MODE_DROP].bck;
+ frmctl.s.ctl_drp = fcmode[CVMX_QOS_PKT_MODE_DROP].drp;
+ txctl.s.l2p_bp_conv = 0;
+ break;
+ default:
+ break;
+ }
+ csr_wr_node(node, CVMX_BGXX_SMUX_CBFC_CTL(index, iface),
+ cbfc.u64);
+ csr_wr_node(node, CVMX_BGXX_SMUX_RX_FRM_CTL(index, iface),
+ frmctl.u64);
+ csr_wr_node(node, CVMX_BGXX_SMUX_HG2_CONTROL(index, iface),
+ hg2ctl.u64);
+ csr_wr_node(node, CVMX_BGXX_SMUX_TX_CTL(index, iface),
+ txctl.u64);
+ break;
+ }
+ case CVMX_HELPER_INTERFACE_MODE_SGMII:
+ case CVMX_HELPER_INTERFACE_MODE_RGMII: {
+ cvmx_bgxx_gmp_gmi_rxx_frm_ctl_t gmi_frmctl;
+
+ gmi_frmctl.u64 = csr_rd_node(
+ node, CVMX_BGXX_GMP_GMI_RXX_FRM_CTL(index, iface));
+ switch (qos) {
+ case CVMX_QOS_PROTO_PAUSE:
+ gmi_frmctl.s.ctl_bck = fcmode[fc_mode].bck;
+ gmi_frmctl.s.ctl_drp = fcmode[fc_mode].drp;
+ gmi_frmctl.s.ctl_mcst = 1;
+ break;
+ case CVMX_QOS_PROTO_NONE:
+ gmi_frmctl.s.ctl_bck =
+ fcmode[CVMX_QOS_PKT_MODE_DROP].bck;
+ gmi_frmctl.s.ctl_drp =
+ fcmode[CVMX_QOS_PKT_MODE_DROP].drp;
+ break;
+ default:
+ break;
+ }
+ csr_wr_node(node, CVMX_BGXX_GMP_GMI_RXX_FRM_CTL(index, iface),
+ gmi_frmctl.u64);
+ }
+ } /*switch*/
+
+ return 0;
+}
--
2.35.1
^ permalink raw reply related [flat|nested] 52+ messages in thread* [PATCH 11/52] mips: octeon: Add cvmx-helper-board.c
2022-03-30 10:06 [PATCH 00/52] mips: octeon: Add ethernet support Stefan Roese
` (9 preceding siblings ...)
2022-03-30 10:06 ` [PATCH 10/52] mips: octeon: Add cvmx-helper-bgx.c Stefan Roese
@ 2022-03-30 10:06 ` Stefan Roese
2022-03-30 10:06 ` [PATCH 12/52] mips: octeon: Add cvmx-helper-fpa.c Stefan Roese
` (38 subsequent siblings)
49 siblings, 0 replies; 52+ messages in thread
From: Stefan Roese @ 2022-03-30 10:06 UTC (permalink / raw)
To: u-boot; +Cc: awilliams, cchavva
From: Aaron Williams <awilliams@marvell.com>
Import cvmx-helper-board.c from 2013 U-Boot. It will be used by the later
added drivers to support networking on the MIPS Octeon II / III
platforms.
Signed-off-by: Aaron Williams <awilliams@marvell.com>
Signed-off-by: Stefan Roese <sr@denx.de>
---
arch/mips/mach-octeon/cvmx-helper-board.c | 2030 +++++++++++++++++++++
1 file changed, 2030 insertions(+)
create mode 100644 arch/mips/mach-octeon/cvmx-helper-board.c
diff --git a/arch/mips/mach-octeon/cvmx-helper-board.c b/arch/mips/mach-octeon/cvmx-helper-board.c
new file mode 100644
index 000000000000..783beb4b178e
--- /dev/null
+++ b/arch/mips/mach-octeon/cvmx-helper-board.c
@@ -0,0 +1,2030 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ * Helper functions to abstract board specific data about
+ * network ports from the rest of the cvmx-helper files.
+ */
+
+#include <i2c.h>
+#include <log.h>
+#include <malloc.h>
+#include <net.h>
+#include <linux/delay.h>
+
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-csr.h>
+#include <mach/cvmx-bootmem.h>
+#include <mach/octeon-model.h>
+#include <mach/octeon_fdt.h>
+#include <mach/cvmx-helper.h>
+#include <mach/cvmx-helper-board.h>
+#include <mach/cvmx-helper-cfg.h>
+#include <mach/cvmx-helper-fdt.h>
+#include <mach/cvmx-helper-gpio.h>
+
+#include <mach/cvmx-smix-defs.h>
+#include <mach/cvmx-mdio.h>
+#include <mach/cvmx-qlm.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+static bool sfp_parsed;
+
+static int __cvmx_helper_78xx_parse_phy(struct cvmx_phy_info *phy_info,
+ int ipd_port);
+static int __get_phy_info_from_dt(cvmx_phy_info_t *phy_info, int ipd_port);
+
+/**
+ * Writes to a Microsemi VSC7224 16-bit register
+ *
+ * @param[in] i2c_bus i2c bus data structure (must be enabled)
+ * @param addr Address of VSC7224 on the i2c bus
+ * @param reg 8-bit register number to write to
+ * @param val 16-bit value to write
+ *
+ * @return 0 for success
+ */
+static int cvmx_write_vsc7224_reg(const struct cvmx_fdt_i2c_bus_info *i2c_bus,
+ u8 addr, u8 reg, u16 val)
+{
+ struct udevice *dev;
+ u8 buffer[2];
+ int ret;
+
+ ret = i2c_get_chip(i2c_bus->i2c_bus, addr, 1, &dev);
+ if (ret) {
+ debug("Cannot find I2C device: %d\n", ret);
+ return -1;
+ }
+
+ ret = dm_i2c_write(dev, reg, buffer, 2);
+ if (ret) {
+ debug("Cannot write I2C device: %d\n", ret);
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
+ * Writes to a Microsemi VSC7224 16-bit register
+ *
+ * @param[in] i2c_bus i2c bus data structure (must be enabled)
+ * @param addr Address of VSC7224 on the i2c bus
+ * @param reg 8-bit register number to write to
+ *
+ * @return 16-bit value or error if < 0
+ */
+static int cvmx_read_vsc7224_reg(const struct cvmx_fdt_i2c_bus_info *i2c_bus,
+ u8 addr, u8 reg)
+{
+ struct udevice *dev;
+ u8 buffer[2];
+ int ret;
+
+ ret = i2c_get_chip(i2c_bus->i2c_bus, addr, 1, &dev);
+ if (ret) {
+ debug("Cannot find I2C device: %d\n", ret);
+ return -1;
+ }
+
+ ret = dm_i2c_read(dev, reg, buffer, 2);
+ if (ret) {
+ debug("Cannot read I2C device: %d\n", ret);
+ return -1;
+ }
+
+ return (buffer[0] << 8) | buffer[1];
+}
+
+/**
+ * @INTERNAL
+ * Return loss of signal
+ *
+ * @param xiface xinterface number
+ * @param index port index on interface
+ *
+ * @return 0 if signal present, 1 if loss of signal.
+ *
+ * @NOTE: A result of 0 is possible in some cases where the signal is
+ * not present.
+ *
+ * This is for use with __cvmx_qlm_rx_equilization
+ */
+int __cvmx_helper_get_los(int xiface, int index)
+{
+ struct cvmx_fdt_sfp_info *sfp;
+ struct cvmx_vsc7224_chan *vsc7224_chan;
+ struct cvmx_vsc7224 *vsc7224;
+ int los = 0;
+ int val;
+ int mode = cvmx_helper_interface_get_mode(xiface);
+
+ sfp = cvmx_helper_cfg_get_sfp_info(xiface, index);
+
+ /* Check all SFP slots in the group
+ * NOTE: Usually there is only one SFP or QSFP slot except in the case
+ * where multiple SFP+ slots are grouped together for XLAUI mode.
+ */
+ while (sfp && sfp->check_mod_abs) {
+ los = sfp->check_mod_abs(sfp, sfp->mod_abs_data);
+ if (los >= 0)
+ cvmx_sfp_validate_module(sfp, mode);
+ if (los || los < 0) {
+ debug("%s(0x%x, %d): los detected (mod_abs) los: %d\n",
+ __func__, xiface, index, los);
+ return 1;
+ }
+ vsc7224_chan = sfp->vsc7224_chan;
+ while (vsc7224_chan) {
+ u64 done;
+ int channel_num = vsc7224_chan->lane;
+ int los_bit = 1 << channel_num;
+ int lol_bit = 0x10 << channel_num;
+
+ /* We only care about receive channels so skip rx.
+ * Also, in XFI mode we don't care about different
+ * XFI ports so skip those.
+ */
+ if (vsc7224_chan->is_tx ||
+ vsc7224_chan->index != index ||
+ vsc7224_chan->xiface != xiface) {
+ vsc7224_chan = vsc7224_chan->next;
+ continue;
+ }
+
+ vsc7224 = vsc7224_chan->vsc7224;
+ /* Poll for LoS/LoL for 2ms */
+ cvmx_write_vsc7224_reg(vsc7224->i2c_bus,
+ vsc7224->i2c_addr, 0x7f, 0x40);
+ done = 20;
+ do {
+ val = cvmx_read_vsc7224_reg(vsc7224->i2c_bus,
+ vsc7224->i2c_addr,
+ 0xc0);
+ val &= (los_bit | lol_bit);
+ if (val) {
+ debug("%s(0x%x, %d): LOS/LOL detected from VSC7224: 0x%x\n",
+ __func__, xiface, index, val);
+ return 1;
+ }
+ udelay(100);
+ } while (done--);
+
+ /* Move to the next channel */
+ vsc7224_chan = vsc7224_chan->next;
+ }
+ /* Move to the next SFP+ slot */
+ sfp = sfp->next;
+ }
+ debug("%s(0x%x, %d): los: 0\n", __func__, xiface, index);
+ return 0;
+}
+
+/**
+ * Function called whenever mod_abs/mod_prs has changed for Microsemi VSC7224
+ *
+ * @param sfp pointer to SFP data structure
+ * @param val 1 if absent, 0 if present, otherwise not set
+ * @param data user-defined data
+ *
+ * @return 0 for success, -1 on error
+ */
+int cvmx_sfp_vsc7224_mod_abs_changed(struct cvmx_fdt_sfp_info *sfp, int val,
+ void *data)
+{
+ int err;
+ struct cvmx_sfp_mod_info *mod_info;
+ int length;
+ struct cvmx_vsc7224 *vsc7224;
+ struct cvmx_vsc7224_chan *vsc7224_chan;
+ struct cvmx_vsc7224_tap *taps, *match = NULL;
+ int i;
+
+ debug("%s(%s, %d, %p): Module %s\n", __func__, sfp->name, val, data,
+ val ? "absent" : "present");
+ if (val)
+ return 0;
+
+ /* We're here if we detect that the module is now present */
+ err = cvmx_sfp_read_i2c_eeprom(sfp);
+ if (err) {
+ debug("%s: Error reading the SFP module eeprom for %s\n",
+ __func__, sfp->name);
+ return err;
+ }
+ mod_info = &sfp->sfp_info;
+
+ if (!mod_info->valid || !sfp->valid) {
+ debug("%s: Module data is invalid\n", __func__);
+ return -1;
+ }
+
+ vsc7224_chan = sfp->vsc7224_chan;
+ while (vsc7224_chan) {
+ /* We don't do any rx tuning */
+ if (!vsc7224_chan->is_tx) {
+ vsc7224_chan = vsc7224_chan->next;
+ continue;
+ }
+
+ /* Walk through all the channels */
+ taps = vsc7224_chan->taps;
+ if (mod_info->limiting)
+ length = 0;
+ else
+ length = mod_info->max_copper_cable_len;
+ debug("%s: limiting: %d, length: %d\n", __func__,
+ mod_info->limiting, length);
+
+ /* Find a matching length in the taps table */
+ for (i = 0; i < vsc7224_chan->num_taps; i++) {
+ if (length >= taps->len)
+ match = taps;
+ taps++;
+ }
+ if (!match) {
+ debug("%s(%s, %d, %p): Error: no matching tap for length %d\n",
+ __func__, sfp->name, val, data, length);
+ return -1;
+ }
+ debug("%s(%s): Applying %cx taps to vsc7224 %s:%d for cable length %d+\n",
+ __func__, sfp->name, vsc7224_chan->is_tx ? 't' : 'r',
+ vsc7224_chan->vsc7224->name, vsc7224_chan->lane,
+ match->len);
+ /* Program the taps */
+ vsc7224 = vsc7224_chan->vsc7224;
+ cvmx_write_vsc7224_reg(vsc7224->i2c_bus, vsc7224->i2c_addr,
+ 0x7f, vsc7224_chan->lane);
+ if (!vsc7224_chan->maintap_disable)
+ cvmx_write_vsc7224_reg(vsc7224->i2c_bus,
+ vsc7224->i2c_addr, 0x99,
+ match->main_tap);
+ if (!vsc7224_chan->pretap_disable)
+ cvmx_write_vsc7224_reg(vsc7224->i2c_bus,
+ vsc7224->i2c_addr, 0x9a,
+ match->pre_tap);
+ if (!vsc7224_chan->posttap_disable)
+ cvmx_write_vsc7224_reg(vsc7224->i2c_bus,
+ vsc7224->i2c_addr, 0x9b,
+ match->post_tap);
+
+ /* Re-use val and disable taps if needed */
+ if (vsc7224_chan->maintap_disable ||
+ vsc7224_chan->pretap_disable ||
+ vsc7224_chan->posttap_disable) {
+ val = cvmx_read_vsc7224_reg(vsc7224->i2c_bus,
+ vsc7224->i2c_addr, 0x97);
+ if (vsc7224_chan->maintap_disable)
+ val |= 0x800;
+ if (vsc7224_chan->pretap_disable)
+ val |= 0x1000;
+ if (vsc7224_chan->posttap_disable)
+ val |= 0x400;
+ cvmx_write_vsc7224_reg(vsc7224->i2c_bus,
+ vsc7224->i2c_addr, 0x97, val);
+ }
+ vsc7224_chan = vsc7224_chan->next;
+ }
+
+ return err;
+}
+
+/**
+ * Update the mod_abs and error LED
+ *
+ * @param ipd_port ipd port number
+ * @param link link information
+ */
+static void __cvmx_helper_update_sfp(int ipd_port,
+ struct cvmx_fdt_sfp_info *sfp_info,
+ cvmx_helper_link_info_t link)
+{
+ debug("%s(%d): checking mod_abs\n", __func__, ipd_port);
+
+ cvmx_sfp_check_mod_abs(sfp_info, sfp_info->mod_abs_data);
+}
+
+static void cvmx_sfp_update_link(struct cvmx_fdt_sfp_info *sfp,
+ cvmx_helper_link_info_t link)
+{
+ while (sfp) {
+ debug("%s(%s): checking mod_abs\n", __func__, sfp->name);
+ if (link.s.link_up && sfp->last_mod_abs)
+ cvmx_sfp_check_mod_abs(sfp, sfp->mod_abs_data);
+ sfp = sfp->next_iface_sfp;
+ }
+}
+
+/**
+ * @INTERNAL
+ * This function is used ethernet ports link speed. This functions uses the
+ * device tree information to determine the phy address and type of PHY.
+ * The only supproted PHYs are Marvell and Broadcom.
+ *
+ * @param ipd_port IPD input port associated with the port we want to get link
+ * status for.
+ *
+ * @return The ports link status. If the link isn't fully resolved, this must
+ * return zero.
+ */
+cvmx_helper_link_info_t __cvmx_helper_board_link_get_from_dt(int ipd_port)
+{
+ cvmx_helper_link_info_t result;
+ cvmx_phy_info_t *phy_info = NULL;
+ cvmx_phy_info_t local_phy_info;
+ int xiface = 0, index = 0;
+ bool use_inband = false;
+ struct cvmx_fdt_sfp_info *sfp_info;
+ const void *fdt_addr = CASTPTR(const void *, gd->fdt_blob);
+
+ result.u64 = 0;
+
+ if (ipd_port >= 0) {
+ int mode;
+
+ xiface = cvmx_helper_get_interface_num(ipd_port);
+ index = cvmx_helper_get_interface_index_num(ipd_port);
+ mode = cvmx_helper_interface_get_mode(xiface);
+ if (!cvmx_helper_get_port_autonegotiation(xiface, index)) {
+ result.s.link_up = 1;
+ result.s.full_duplex = 1;
+ switch (mode) {
+ case CVMX_HELPER_INTERFACE_MODE_RGMII:
+ case CVMX_HELPER_INTERFACE_MODE_GMII:
+ case CVMX_HELPER_INTERFACE_MODE_SGMII:
+ case CVMX_HELPER_INTERFACE_MODE_QSGMII:
+ case CVMX_HELPER_INTERFACE_MODE_AGL:
+ case CVMX_HELPER_INTERFACE_MODE_SPI:
+ if (OCTEON_IS_MODEL(OCTEON_CN70XX)) {
+ struct cvmx_xiface xi =
+ cvmx_helper_xiface_to_node_interface(
+ xiface);
+ u64 gbaud = cvmx_qlm_get_gbaud_mhz(0);
+
+ result.s.speed = gbaud * 8 / 10;
+ if (cvmx_qlm_get_dlm_mode(
+ 0, xi.interface) ==
+ CVMX_QLM_MODE_SGMII)
+ result.s.speed >>= 1;
+ else
+ result.s.speed >>= 2;
+ } else {
+ result.s.speed = 1000;
+ }
+ break;
+ case CVMX_HELPER_INTERFACE_MODE_RXAUI:
+ case CVMX_HELPER_INTERFACE_MODE_XAUI:
+ case CVMX_HELPER_INTERFACE_MODE_10G_KR:
+ case CVMX_HELPER_INTERFACE_MODE_XFI:
+ result.s.speed = 10000;
+ break;
+ case CVMX_HELPER_INTERFACE_MODE_XLAUI:
+ case CVMX_HELPER_INTERFACE_MODE_40G_KR4:
+ result.s.speed = 40000;
+ break;
+ default:
+ break;
+ }
+
+ sfp_info = cvmx_helper_cfg_get_sfp_info(xiface, index);
+ /* Initialize the SFP info if it hasn't already been
+ * done.
+ */
+ if (!sfp_info && !sfp_parsed) {
+ cvmx_sfp_parse_device_tree(fdt_addr);
+ sfp_parsed = true;
+ cvmx_sfp_read_all_modules();
+ sfp_info = cvmx_helper_cfg_get_sfp_info(xiface,
+ index);
+ }
+ /* If the link is down or the link is up but we still
+ * register the module as being absent, re-check
+ * mod_abs.
+ */
+ cvmx_sfp_update_link(sfp_info, result);
+
+ cvmx_helper_update_link_led(xiface, index, result);
+
+ return result;
+ }
+ phy_info = cvmx_helper_get_port_phy_info(xiface, index);
+ if (!phy_info) {
+ debug("%s: phy info not saved in config, allocating for 0x%x:%d\n",
+ __func__, xiface, index);
+
+ phy_info = (cvmx_phy_info_t *)cvmx_bootmem_alloc(
+ sizeof(*phy_info), 0);
+ if (!phy_info) {
+ debug("%s: Out of memory\n", __func__);
+ return result;
+ }
+ memset(phy_info, 0, sizeof(*phy_info));
+ phy_info->phy_addr = -1;
+ debug("%s: Setting phy info for 0x%x:%d to %p\n",
+ __func__, xiface, index, phy_info);
+ cvmx_helper_set_port_phy_info(xiface, index, phy_info);
+ }
+ } else {
+ /* For management ports we don't store the PHY information
+ * so we use a local copy instead.
+ */
+ phy_info = &local_phy_info;
+ memset(phy_info, 0, sizeof(*phy_info));
+ phy_info->phy_addr = -1;
+ }
+
+ if (phy_info->phy_addr == -1) {
+ if (octeon_has_feature(OCTEON_FEATURE_BGX)) {
+ if (__cvmx_helper_78xx_parse_phy(phy_info, ipd_port)) {
+ phy_info->phy_addr = -1;
+ use_inband = true;
+ }
+ } else if (__get_phy_info_from_dt(phy_info, ipd_port) < 0) {
+ phy_info->phy_addr = -1;
+ use_inband = true;
+ }
+ }
+
+ /* If we can't get the PHY info from the device tree then try
+ * the inband state.
+ */
+ if (use_inband) {
+ result.s.full_duplex = 1;
+ result.s.link_up = 1;
+ result.s.speed = 1000;
+ return result;
+ }
+
+ if (phy_info->phy_addr < 0)
+ return result;
+
+ if (phy_info->link_function)
+ result = phy_info->link_function(phy_info);
+ else
+ result = cvmx_helper_link_get(ipd_port);
+
+ sfp_info = cvmx_helper_cfg_get_sfp_info(xiface, index);
+ while (sfp_info) {
+ /* If the link is down or the link is up but we still register
+ * the module as being absent, re-check mod_abs.
+ */
+ if (!result.s.link_up ||
+ (result.s.link_up && sfp_info->last_mod_abs))
+ __cvmx_helper_update_sfp(ipd_port, sfp_info, result);
+ sfp_info = sfp_info->next_iface_sfp;
+ }
+
+ return result;
+}
+
+cvmx_helper_link_info_t __cvmx_helper_board_link_get(int ipd_port)
+{
+ cvmx_helper_link_info_t result;
+
+ /* Unless we fix it later, all links are defaulted to down */
+ result.u64 = 0;
+
+ return __cvmx_helper_board_link_get_from_dt(ipd_port);
+}
+
+void cvmx_helper_update_link_led(int xiface, int index,
+ cvmx_helper_link_info_t result)
+{
+}
+
+void cvmx_helper_leds_show_error(struct cvmx_phy_gpio_leds *leds, bool error)
+{
+}
+
+int __cvmx_helper_board_interface_probe(int interface, int supported_ports)
+{
+ return supported_ports;
+}
+
+/**
+ * Returns the Ethernet node offset in the device tree
+ *
+ * @param fdt_addr - pointer to flat device tree in memory
+ * @param aliases - offset of alias in device tree
+ * @param ipd_port - ipd port number to look up
+ *
+ * @returns offset of Ethernet node if >= 0, error if -1
+ */
+int __pip_eth_node(const void *fdt_addr, int aliases, int ipd_port)
+{
+ char name_buffer[20];
+ const char *pip_path;
+ int pip, iface, eth;
+ int interface_num = cvmx_helper_get_interface_num(ipd_port);
+ int interface_index = cvmx_helper_get_interface_index_num(ipd_port);
+ cvmx_helper_interface_mode_t interface_mode =
+ cvmx_helper_interface_get_mode(interface_num);
+
+ /* The following are not found in the device tree */
+ switch (interface_mode) {
+ case CVMX_HELPER_INTERFACE_MODE_ILK:
+ case CVMX_HELPER_INTERFACE_MODE_LOOP:
+ case CVMX_HELPER_INTERFACE_MODE_SRIO:
+ debug("ERROR: No node expected for interface: %d, port: %d, mode: %s\n",
+ interface_index, ipd_port,
+ cvmx_helper_interface_mode_to_string(interface_mode));
+ return -1;
+ default:
+ break;
+ }
+ pip_path = (const char *)fdt_getprop(fdt_addr, aliases, "pip", NULL);
+ if (!pip_path) {
+ debug("ERROR: pip path not found in device tree\n");
+ return -1;
+ }
+ pip = fdt_path_offset(fdt_addr, pip_path);
+ debug("ipdd_port=%d pip_path=%s pip=%d ", ipd_port, pip_path, pip);
+ if (pip < 0) {
+ debug("ERROR: pip not found in device tree\n");
+ return -1;
+ }
+ snprintf(name_buffer, sizeof(name_buffer), "interface@%d",
+ interface_num);
+ iface = fdt_subnode_offset(fdt_addr, pip, name_buffer);
+ debug("iface=%d ", iface);
+ if (iface < 0) {
+ debug("ERROR : pip intf %d not found in device tree\n",
+ interface_num);
+ return -1;
+ }
+ snprintf(name_buffer, sizeof(name_buffer), "ethernet@%x",
+ interface_index);
+ eth = fdt_subnode_offset(fdt_addr, iface, name_buffer);
+ debug("eth=%d\n", eth);
+ if (eth < 0) {
+ debug("ERROR : pip interface@%d ethernet@%d not found in device tree\n",
+ interface_num, interface_index);
+ return -1;
+ }
+ return eth;
+}
+
+int __mix_eth_node(const void *fdt_addr, int aliases, int interface_index)
+{
+ char name_buffer[20];
+ const char *mix_path;
+ int mix;
+
+ snprintf(name_buffer, sizeof(name_buffer), "mix%d", interface_index);
+ mix_path =
+ (const char *)fdt_getprop(fdt_addr, aliases, name_buffer, NULL);
+ if (!mix_path) {
+ debug("ERROR: mix%d path not found in device tree\n",
+ interface_index);
+ }
+ mix = fdt_path_offset(fdt_addr, mix_path);
+ if (mix < 0) {
+ debug("ERROR: %s not found in device tree\n", mix_path);
+ return -1;
+ }
+ return mix;
+}
+
+static int __mdiobus_addr_to_unit(u32 addr)
+{
+ int unit = (addr >> 7) & 3;
+
+ if (!OCTEON_IS_MODEL(OCTEON_CN68XX) && !OCTEON_IS_MODEL(OCTEON_CN78XX))
+ unit >>= 1;
+ return unit;
+}
+
+/**
+ * Parse the muxed MDIO interface information from the device tree
+ *
+ * @param phy_info - pointer to phy info data structure to update
+ * @param mdio_offset - offset of MDIO bus
+ * @param mux_offset - offset of MUX, parent of mdio_offset
+ *
+ * @return 0 for success or -1
+ */
+static int __get_muxed_mdio_info_from_dt(cvmx_phy_info_t *phy_info,
+ int mdio_offset, int mux_offset)
+{
+ const void *fdt_addr = CASTPTR(const void *, gd->fdt_blob);
+ int phandle;
+ int smi_offset;
+ int gpio_offset;
+ u64 smi_addr = 0;
+ int len;
+ u32 *pgpio_handle;
+ int gpio_count = 0;
+ u32 *prop_val;
+ int offset;
+ const char *prop_name;
+
+ debug("%s(%p, 0x%x, 0x%x)\n", __func__, phy_info, mdio_offset,
+ mux_offset);
+
+ /* Get register value to put onto the GPIO lines to select */
+ phy_info->gpio_value =
+ cvmx_fdt_get_int(fdt_addr, mdio_offset, "reg", -1);
+ if (phy_info->gpio_value < 0) {
+ debug("Could not get register value for muxed MDIO bus from DT\n");
+ return -1;
+ }
+
+ smi_offset = cvmx_fdt_lookup_phandle(fdt_addr, mux_offset,
+ "mdio-parent-bus");
+ if (smi_offset < 0) {
+ debug("Invalid SMI offset for muxed MDIO interface in device tree\n");
+ return -1;
+ }
+ smi_addr = cvmx_fdt_get_uint64(fdt_addr, smi_offset, "reg", 0);
+
+ /* Convert SMI address to a MDIO interface */
+ switch (smi_addr) {
+ case 0x1180000001800:
+ case 0x1180000003800: /* 68XX address */
+ phy_info->mdio_unit = 0;
+ break;
+ case 0x1180000001900:
+ case 0x1180000003880:
+ phy_info->mdio_unit = 1;
+ break;
+ case 0x1180000003900:
+ phy_info->mdio_unit = 2;
+ break;
+ case 0x1180000003980:
+ phy_info->mdio_unit = 3;
+ break;
+ default:
+ phy_info->mdio_unit = 1;
+ break;
+ }
+ /* Find the GPIO MUX controller */
+ pgpio_handle =
+ (u32 *)fdt_getprop(fdt_addr, mux_offset, "gpios", &len);
+ if (!pgpio_handle || len < 12 || (len % 12) != 0 ||
+ len > CVMX_PHY_MUX_MAX_GPIO * 12) {
+ debug("Invalid GPIO for muxed MDIO controller in DT\n");
+ return -1;
+ }
+
+ for (gpio_count = 0; gpio_count < len / 12; gpio_count++) {
+ phandle = fdt32_to_cpu(pgpio_handle[gpio_count * 3]);
+ phy_info->gpio[gpio_count] =
+ fdt32_to_cpu(pgpio_handle[gpio_count * 3 + 1]);
+ gpio_offset = fdt_node_offset_by_phandle(fdt_addr, phandle);
+ if (gpio_offset < 0) {
+ debug("Cannot access parent GPIO node in DT\n");
+ return -1;
+ }
+ if (!fdt_node_check_compatible(fdt_addr, gpio_offset,
+ "cavium,octeon-3860-gpio")) {
+ phy_info->gpio_type[gpio_count] = GPIO_OCTEON;
+ } else if (!fdt_node_check_compatible(fdt_addr, gpio_offset,
+ "nxp,pca8574")) {
+ /* GPIO is a TWSI GPIO unit which might sit behind
+ * another mux.
+ */
+ phy_info->gpio_type[gpio_count] = GPIO_PCA8574;
+ prop_val = (u32 *)fdt_getprop(
+ fdt_addr, gpio_offset, "reg", NULL);
+ if (!prop_val) {
+ debug("Could not find TWSI address of npx pca8574 GPIO from DT\n");
+ return -1;
+ }
+ /* Get the TWSI address of the GPIO unit */
+ phy_info->cvmx_gpio_twsi[gpio_count] =
+ fdt32_to_cpu(*prop_val);
+ /* Get the selector on the GPIO mux if present */
+ offset = fdt_parent_offset(fdt_addr, gpio_offset);
+ prop_val = (u32 *)fdt_getprop(fdt_addr, offset,
+ "reg", NULL);
+ if (prop_val) {
+ phy_info->gpio_parent_mux_select =
+ fdt32_to_cpu(*prop_val);
+ /* Go up another level */
+ offset = fdt_parent_offset(fdt_addr, offset);
+ if (!fdt_node_check_compatible(fdt_addr, offset,
+ "nxp,pca9548")) {
+ prop_val = (u32 *)fdt_getprop(
+ fdt_addr, offset, "reg", NULL);
+ if (!prop_val) {
+ debug("Could not read MUX TWSI address from DT\n");
+ return -1;
+ }
+ phy_info->gpio_parent_mux_twsi =
+ fdt32_to_cpu(*prop_val);
+ }
+ }
+ } else {
+ prop_name = (char *)fdt_getprop(fdt_addr, gpio_offset,
+ "compatible", NULL);
+ debug("Unknown GPIO type %s\n", prop_name);
+ return -1;
+ }
+ }
+ return 0;
+}
+
+/**
+ * @INTERNAL
+ * Converts a BGX address to the node, interface and port number
+ *
+ * @param bgx_addr Address of CSR register
+ *
+ * @return node, interface and port number, will be -1 for invalid address.
+ */
+static struct cvmx_xiface __cvmx_bgx_reg_addr_to_xiface(u64 bgx_addr)
+{
+ struct cvmx_xiface xi = { -1, -1 };
+
+ xi.node = cvmx_csr_addr_to_node(bgx_addr);
+ bgx_addr = cvmx_csr_addr_strip_node(bgx_addr);
+ if ((bgx_addr & 0xFFFFFFFFF0000000) != 0x00011800E0000000) {
+ debug("%s: Invalid BGX address 0x%llx\n", __func__,
+ (unsigned long long)bgx_addr);
+ xi.node = -1;
+ return xi;
+ }
+ xi.interface = (bgx_addr >> 24) & 0x0F;
+
+ return xi;
+}
+
+static cvmx_helper_link_info_t
+__get_marvell_phy_link_state(cvmx_phy_info_t *phy_info)
+{
+ cvmx_helper_link_info_t result;
+ int phy_status;
+ u32 phy_addr = phy_info->phy_addr;
+
+ result.u64 = 0;
+ /* Set to page 0 */
+ cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff, 22, 0);
+ /* All the speed information can be read from register 17 in one go. */
+ phy_status = cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff, 17);
+
+ /* If the resolve bit 11 isn't set, see if autoneg is turned off
+ * (bit 12, reg 0). The resolve bit doesn't get set properly when
+ * autoneg is off, so force it
+ */
+ if ((phy_status & (1 << 11)) == 0) {
+ int auto_status =
+ cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff, 0);
+ if ((auto_status & (1 << 12)) == 0)
+ phy_status |= 1 << 11;
+ }
+
+ /* Link is up = Speed/Duplex Resolved + RT-Link Up + G-Link Up. */
+ if ((phy_status & 0x0c08) == 0x0c08) {
+ result.s.link_up = 1;
+ result.s.full_duplex = ((phy_status >> 13) & 1);
+ switch ((phy_status >> 14) & 3) {
+ case 0: /* 10 Mbps */
+ result.s.speed = 10;
+ break;
+ case 1: /* 100 Mbps */
+ result.s.speed = 100;
+ break;
+ case 2: /* 1 Gbps */
+ result.s.speed = 1000;
+ break;
+ case 3: /* Illegal */
+ result.u64 = 0;
+ break;
+ }
+ }
+ return result;
+}
+
+/**
+ * @INTERNAL
+ * Get link state of broadcom PHY
+ *
+ * @param phy_info PHY information
+ */
+static cvmx_helper_link_info_t
+__get_broadcom_phy_link_state(cvmx_phy_info_t *phy_info)
+{
+ cvmx_helper_link_info_t result;
+ u32 phy_addr = phy_info->phy_addr;
+ int phy_status;
+
+ result.u64 = 0;
+ /* Below we are going to read SMI/MDIO register 0x19 which works
+ * on Broadcom parts
+ */
+ phy_status = cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff, 0x19);
+ switch ((phy_status >> 8) & 0x7) {
+ case 0:
+ result.u64 = 0;
+ break;
+ case 1:
+ result.s.link_up = 1;
+ result.s.full_duplex = 0;
+ result.s.speed = 10;
+ break;
+ case 2:
+ result.s.link_up = 1;
+ result.s.full_duplex = 1;
+ result.s.speed = 10;
+ break;
+ case 3:
+ result.s.link_up = 1;
+ result.s.full_duplex = 0;
+ result.s.speed = 100;
+ break;
+ case 4:
+ result.s.link_up = 1;
+ result.s.full_duplex = 1;
+ result.s.speed = 100;
+ break;
+ case 5:
+ result.s.link_up = 1;
+ result.s.full_duplex = 1;
+ result.s.speed = 100;
+ break;
+ case 6:
+ result.s.link_up = 1;
+ result.s.full_duplex = 0;
+ result.s.speed = 1000;
+ break;
+ case 7:
+ result.s.link_up = 1;
+ result.s.full_duplex = 1;
+ result.s.speed = 1000;
+ break;
+ }
+ return result;
+}
+
+/**
+ * @INTERNAL
+ * Get link state of generic gigabit PHY
+ *
+ * @param phy_info - information about the PHY
+ *
+ * @returns link status of the PHY
+ */
+static cvmx_helper_link_info_t
+__cvmx_get_generic_8023_c22_phy_link_state(cvmx_phy_info_t *phy_info)
+{
+ cvmx_helper_link_info_t result;
+ u32 phy_addr = phy_info->phy_addr;
+ int phy_basic_control; /* Register 0x0 */
+ int phy_basic_status; /* Register 0x1 */
+ int phy_anog_adv; /* Register 0x4 */
+ int phy_link_part_avail; /* Register 0x5 */
+ int phy_control; /* Register 0x9 */
+ int phy_status; /* Register 0xA */
+
+ result.u64 = 0;
+
+ phy_basic_status = cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff, 1);
+ if (!(phy_basic_status & 0x4)) /* Check if link is up */
+ return result; /* Link is down, return link down */
+
+ result.s.link_up = 1;
+ phy_basic_control = cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff, 0);
+ /* Check if autonegotiation is enabled and completed */
+ if ((phy_basic_control & (1 << 12)) && (phy_basic_status & (1 << 5))) {
+ phy_status =
+ cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff, 0xA);
+ phy_control =
+ cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff, 0x9);
+
+ phy_status &= phy_control << 2;
+ phy_link_part_avail =
+ cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff, 0x5);
+ phy_anog_adv =
+ cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff, 0x4);
+ phy_link_part_avail &= phy_anog_adv;
+
+ if (phy_status & 0xC00) { /* Gigabit full or half */
+ result.s.speed = 1000;
+ result.s.full_duplex = !!(phy_status & 0x800);
+ } else if (phy_link_part_avail &
+ 0x0180) { /* 100 full or half */
+ result.s.speed = 100;
+ result.s.full_duplex = !!(phy_link_part_avail & 0x100);
+ } else if (phy_link_part_avail & 0x0060) {
+ result.s.speed = 10;
+ result.s.full_duplex = !!(phy_link_part_avail & 0x0040);
+ }
+ } else {
+ /* Not autonegotiated */
+ result.s.full_duplex = !!(phy_basic_control & (1 << 8));
+
+ if (phy_basic_control & (1 << 6))
+ result.s.speed = 1000;
+ else if (phy_basic_control & (1 << 13))
+ result.s.speed = 100;
+ else
+ result.s.speed = 10;
+ }
+ return result;
+}
+
+static cvmx_helper_link_info_t
+__cvmx_get_qualcomm_s17_phy_link_state(cvmx_phy_info_t *phy_info)
+{
+ cvmx_helper_link_info_t result;
+ u32 phy_addr = phy_info->phy_addr;
+ int phy_status;
+ int auto_status;
+
+ result.u64 = 0;
+
+ phy_status = cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff, 17);
+
+ /* If bit 11 isn't set see if autonegotiation is turned off
+ * (bit 12, reg 0). The resolved bit doesn't get set properly when
+ * autonegotiation is off, so force it.
+ */
+ if ((phy_status & (1 << 11)) == 0) {
+ auto_status = cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff, 0);
+ if ((auto_status & (1 << 12)) == 0)
+ phy_status |= 1 << 11;
+ }
+ /* Only return a link if the PHY has finished autonegotiation and set
+ * the resolved bit (bit 11).
+ */
+ if (phy_status & (1 << 11)) {
+ result.s.link_up = 1;
+ result.s.full_duplex = !!(phy_status & (1 << 13));
+ switch ((phy_status >> 14) & 3) {
+ case 0: /* 10Mbps */
+ result.s.speed = 10;
+ break;
+ case 1: /* 100Mbps */
+ result.s.speed = 100;
+ break;
+ case 2: /* 1Gbps */
+ result.s.speed = 1000;
+ break;
+ default: /* Illegal */
+ result.u64 = 0;
+ break;
+ }
+ }
+ debug(" link: %s, duplex: %s, speed: %lu\n",
+ result.s.link_up ? "up" : "down",
+ result.s.full_duplex ? "full" : "half",
+ (unsigned long)result.s.speed);
+ return result;
+}
+
+static cvmx_helper_link_info_t
+__get_generic_8023_c45_phy_link_state(cvmx_phy_info_t *phy_info)
+{
+ cvmx_helper_link_info_t result;
+ int phy_status;
+ int pma_ctrl1;
+ u32 phy_addr = phy_info->phy_addr;
+
+ result.u64 = 0;
+ pma_ctrl1 = cvmx_mdio_45_read(phy_addr >> 8, phy_addr & 0xff, 1, 0);
+ if ((pma_ctrl1 & 0x207c) == 0x2040)
+ result.s.speed = 10000;
+ /* PMA Status 1 (1x0001) */
+ phy_status = cvmx_mdio_45_read(phy_addr >> 8, phy_addr & 0xff, 1, 0xa);
+ if (phy_status < 0)
+ return result;
+
+ result.s.full_duplex = 1;
+ if ((phy_status & 1) == 0)
+ return result;
+ phy_status = cvmx_mdio_45_read(phy_addr >> 8, phy_addr & 0xff, 4, 0x18);
+ if (phy_status < 0)
+ return result;
+ result.s.link_up = (phy_status & 0x1000) ? 1 : 0;
+
+ return result;
+}
+
+static cvmx_helper_link_info_t
+__cvmx_get_cortina_phy_link_state(cvmx_phy_info_t *phy_info)
+{
+ cvmx_helper_link_info_t result;
+
+ result.s.link_up = 1;
+ result.s.full_duplex = 1;
+ result.s.speed = 1000;
+ return result;
+}
+
+static cvmx_helper_link_info_t
+__get_vitesse_vsc8490_phy_link_state(cvmx_phy_info_t *phy_info)
+{
+ cvmx_helper_link_info_t result;
+
+ result.s.link_up = 1;
+ result.s.full_duplex = 1;
+ result.s.speed = 1000;
+ return result;
+}
+
+static cvmx_helper_link_info_t
+__get_aquantia_phy_link_state(cvmx_phy_info_t *phy_info)
+{
+ cvmx_helper_link_info_t result;
+
+ result.s.link_up = 1;
+ result.s.full_duplex = 1;
+ result.s.speed = 1000;
+ return result;
+}
+
+static int __cvmx_helper_78xx_parse_phy(struct cvmx_phy_info *phy_info,
+ int ipd_port)
+{
+ const void *fdt_addr = CASTPTR(const void *, gd->fdt_blob);
+ const char *compat;
+ int phy;
+ int parent;
+ u64 mdio_base;
+ int node, bus;
+ int phy_addr;
+ int index = cvmx_helper_get_interface_index_num(ipd_port);
+ int xiface = cvmx_helper_get_interface_num(ipd_port);
+ int compat_len = 0;
+
+ debug("%s(0x%p, %d) ENTER\n", __func__, phy_info, ipd_port);
+
+ phy = cvmx_helper_get_phy_fdt_node_offset(xiface, index);
+ debug("%s: xiface: 0x%x, index: %d, ipd_port: %d, phy fdt offset: %d\n",
+ __func__, xiface, index, ipd_port, phy);
+ if (phy < 0) {
+ /* If this is the first time through we need to first parse the
+ * device tree to get the node offsets.
+ */
+ debug("No config present, calling __cvmx_helper_parse_bgx_dt\n");
+ if (__cvmx_helper_parse_bgx_dt(fdt_addr)) {
+ printf("Error: could not parse BGX device tree\n");
+ return -1;
+ }
+ if (__cvmx_fdt_parse_vsc7224(fdt_addr)) {
+ debug("Error: could not parse Microsemi VSC7224 in DT\n");
+ return -1;
+ }
+ if (octeon_has_feature(OCTEON_FEATURE_BGX_XCV) &&
+ __cvmx_helper_parse_bgx_rgmii_dt(fdt_addr)) {
+ printf("Error: could not parse BGX XCV device tree\n");
+ return -1;
+ }
+ phy = cvmx_helper_get_phy_fdt_node_offset(xiface, index);
+ if (phy < 0) {
+ debug("%s: Could not get PHY node offset for IPD port 0x%x, xiface: 0x%x, index: %d\n",
+ __func__, ipd_port, xiface, index);
+ return -1;
+ }
+ debug("%s: phy: %d (%s)\n", __func__, phy,
+ fdt_get_name(fdt_addr, phy, NULL));
+ }
+
+ compat = (const char *)fdt_getprop(fdt_addr, phy, "compatible",
+ &compat_len);
+ if (!compat) {
+ printf("ERROR: %d:%d:no compatible prop in phy\n", xiface,
+ index);
+ return -1;
+ }
+
+ debug(" compatible: %s\n", compat);
+
+ phy_info->fdt_offset = phy;
+ phy_addr = cvmx_fdt_get_int(fdt_addr, phy, "reg", -1);
+ if (phy_addr == -1) {
+ printf("Error: %d:%d:could not get PHY address\n", xiface,
+ index);
+ return -1;
+ }
+ debug(" PHY address: %d, compat: %s\n", phy_addr, compat);
+
+ if (!memcmp("marvell", compat, strlen("marvell"))) {
+ phy_info->phy_type = MARVELL_GENERIC_PHY;
+ phy_info->link_function = __get_marvell_phy_link_state;
+ } else if (!memcmp("broadcom", compat, strlen("broadcom"))) {
+ phy_info->phy_type = BROADCOM_GENERIC_PHY;
+ phy_info->link_function = __get_broadcom_phy_link_state;
+ } else if (!memcmp("cortina", compat, strlen("cortina"))) {
+ phy_info->phy_type = CORTINA_PHY;
+ phy_info->link_function = __cvmx_get_cortina_phy_link_state;
+ } else if (!strcmp("vitesse,vsc8490", compat)) {
+ phy_info->phy_type = VITESSE_VSC8490_PHY;
+ phy_info->link_function = __get_vitesse_vsc8490_phy_link_state;
+ } else if (fdt_stringlist_contains(compat, compat_len,
+ "ethernet-phy-ieee802.3-c22")) {
+ phy_info->phy_type = GENERIC_8023_C22_PHY;
+ phy_info->link_function =
+ __cvmx_get_generic_8023_c22_phy_link_state;
+ } else if (fdt_stringlist_contains(compat, compat_len,
+ "ethernet-phy-ieee802.3-c45")) {
+ phy_info->phy_type = GENERIC_8023_C22_PHY;
+ phy_info->link_function = __get_generic_8023_c45_phy_link_state;
+ }
+
+ phy_info->ipd_port = ipd_port;
+ phy_info->phy_sub_addr = 0;
+ phy_info->direct_connect = 1;
+
+ parent = fdt_parent_offset(fdt_addr, phy);
+ if (!fdt_node_check_compatible(fdt_addr, parent,
+ "ethernet-phy-nexus")) {
+ debug(" nexus PHY found\n");
+ if (phy_info->phy_type == CORTINA_PHY) {
+ /* The Cortina CS422X uses the same PHY device for
+ * multiple ports for XFI. In this case we use a
+ * nexus and each PHY address is the slice or
+ * sub-address and the actual PHY address is the
+ * nexus address.
+ */
+ phy_info->phy_sub_addr = phy_addr;
+ phy_addr =
+ cvmx_fdt_get_int(fdt_addr, parent, "reg", -1);
+ debug(" Cortina PHY real address: 0x%x\n", phy_addr);
+ }
+ parent = fdt_parent_offset(fdt_addr, parent);
+ }
+
+ debug(" Parent: %s\n", fdt_get_name(fdt_addr, parent, NULL));
+ if (!fdt_node_check_compatible(fdt_addr, parent,
+ "cavium,octeon-3860-mdio")) {
+ debug(" Found Octeon MDIO\n");
+ mdio_base = cvmx_fdt_get_uint64(fdt_addr, parent, "reg",
+ FDT_ADDR_T_NONE);
+ debug(" MDIO address: 0x%llx\n",
+ (unsigned long long)mdio_base);
+
+ mdio_base = cvmx_fdt_translate_address(fdt_addr, parent,
+ (u32 *)&mdio_base);
+ debug(" Translated: 0x%llx\n", (unsigned long long)mdio_base);
+ if (mdio_base == FDT_ADDR_T_NONE) {
+ printf("Could not get MDIO base address from reg field\n");
+ return -1;
+ }
+ __cvmx_mdio_addr_to_node_bus(mdio_base, &node, &bus);
+ if (bus < 0) {
+ printf("Invalid MDIO address 0x%llx, could not detect bus and node\n",
+ (unsigned long long)mdio_base);
+ return -1;
+ }
+ debug(" MDIO node: %d, bus: %d\n", node, bus);
+ phy_info->mdio_unit = (node << 2) | (bus & 3);
+ phy_info->phy_addr = phy_addr | (phy_info->mdio_unit << 8);
+ } else {
+ printf("%s: Error: incompatible MDIO bus %s for IPD port %d\n",
+ __func__,
+ (const char *)fdt_get_name(fdt_addr, parent, NULL),
+ ipd_port);
+ return -1;
+ }
+
+ debug("%s: EXIT 0\n", __func__);
+
+ return 0;
+}
+
+/**
+ * Return the MII PHY address associated with the given IPD
+ * port. The phy address is obtained from the device tree.
+ *
+ * @param[out] phy_info - PHY information data structure updated
+ * @param ipd_port Octeon IPD port to get the MII address for.
+ *
+ * @return MII PHY address and bus number, -1 on error, -2 if PHY info missing (OK).
+ */
+static int __get_phy_info_from_dt(cvmx_phy_info_t *phy_info, int ipd_port)
+{
+ const void *fdt_addr = CASTPTR(const void *, gd->fdt_blob);
+ int aliases, eth, phy, phy_parent, ret, i;
+ int mdio_parent;
+ const char *phy_compatible_str;
+ const char *host_mode_str = NULL;
+ int interface;
+ int phy_addr_offset = 0;
+
+ debug("%s(%p, %d)\n", __func__, phy_info, ipd_port);
+
+ if (octeon_has_feature(OCTEON_FEATURE_BGX))
+ return __cvmx_helper_78xx_parse_phy(phy_info, ipd_port);
+
+ phy_info->phy_addr = -1;
+ phy_info->phy_sub_addr = 0;
+ phy_info->ipd_port = ipd_port;
+ phy_info->direct_connect = -1;
+ phy_info->phy_type = (cvmx_phy_type_t)-1;
+ for (i = 0; i < CVMX_PHY_MUX_MAX_GPIO; i++)
+ phy_info->gpio[i] = -1;
+ phy_info->mdio_unit = -1;
+ phy_info->gpio_value = -1;
+ phy_info->gpio_parent_mux_twsi = -1;
+ phy_info->gpio_parent_mux_select = -1;
+ phy_info->link_function = NULL;
+ phy_info->fdt_offset = -1;
+ if (!fdt_addr) {
+ debug("No device tree found.\n");
+ return -1;
+ }
+
+ aliases = fdt_path_offset(fdt_addr, "/aliases");
+ if (aliases < 0) {
+ debug("Error: No /aliases node in device tree.\n");
+ return -1;
+ }
+ if (ipd_port < 0) {
+ int interface_index =
+ ipd_port - CVMX_HELPER_BOARD_MGMT_IPD_PORT;
+ eth = __mix_eth_node(fdt_addr, aliases, interface_index);
+ } else {
+ eth = __pip_eth_node(fdt_addr, aliases, ipd_port);
+ }
+ if (eth < 0) {
+ debug("ERROR : cannot find interface for ipd_port=%d\n",
+ ipd_port);
+ return -1;
+ }
+
+ interface = cvmx_helper_get_interface_num(ipd_port);
+ /* Get handle to phy */
+ phy = cvmx_fdt_lookup_phandle(fdt_addr, eth, "phy-handle");
+ if (phy < 0) {
+ cvmx_helper_interface_mode_t if_mode;
+
+ /* Note that it's OK for RXAUI and ILK to not have a PHY
+ * connected (i.e. EBB boards in loopback).
+ */
+ debug("Cannot get phy-handle for ipd_port: %d\n", ipd_port);
+ if_mode = cvmx_helper_interface_get_mode(interface);
+ if (if_mode != CVMX_HELPER_INTERFACE_MODE_RXAUI &&
+ if_mode != CVMX_HELPER_INTERFACE_MODE_ILK) {
+ debug("ERROR : phy handle not found in device tree ipd_port=%d\n",
+ ipd_port);
+ return -1;
+ } else {
+ return -2;
+ }
+ }
+
+ phy_compatible_str =
+ (const char *)fdt_getprop(fdt_addr, phy, "compatible", NULL);
+ if (!phy_compatible_str) {
+ debug("ERROR: no compatible prop in phy\n");
+ return -1;
+ }
+ debug("Checking compatible string \"%s\" for ipd port %d\n",
+ phy_compatible_str, ipd_port);
+ phy_info->fdt_offset = phy;
+ if (!memcmp("marvell", phy_compatible_str, strlen("marvell"))) {
+ debug("Marvell PHY detected for ipd_port %d\n", ipd_port);
+ phy_info->phy_type = MARVELL_GENERIC_PHY;
+ phy_info->link_function = __get_marvell_phy_link_state;
+ } else if (!memcmp("broadcom", phy_compatible_str,
+ strlen("broadcom"))) {
+ phy_info->phy_type = BROADCOM_GENERIC_PHY;
+ phy_info->link_function = __get_broadcom_phy_link_state;
+ debug("Broadcom PHY detected for ipd_port %d\n", ipd_port);
+ } else if (!memcmp("vitesse", phy_compatible_str, strlen("vitesse"))) {
+ debug("Vitesse PHY detected for ipd_port %d\n", ipd_port);
+ if (!fdt_node_check_compatible(fdt_addr, phy,
+ "vitesse,vsc8490")) {
+ phy_info->phy_type = VITESSE_VSC8490_PHY;
+ debug("Vitesse VSC8490 detected\n");
+ phy_info->link_function =
+ __get_vitesse_vsc8490_phy_link_state;
+ } else if (!fdt_node_check_compatible(
+ fdt_addr, phy,
+ "ethernet-phy-ieee802.3-c22")) {
+ phy_info->phy_type = GENERIC_8023_C22_PHY;
+ phy_info->link_function =
+ __cvmx_get_generic_8023_c22_phy_link_state;
+ debug("Vitesse 802.3 c22 detected\n");
+ } else {
+ phy_info->phy_type = GENERIC_8023_C45_PHY;
+ phy_info->link_function =
+ __get_generic_8023_c45_phy_link_state;
+ debug("Vitesse 802.3 c45 detected\n");
+ }
+ } else if (!memcmp("aquantia", phy_compatible_str,
+ strlen("aquantia"))) {
+ phy_info->phy_type = AQUANTIA_PHY;
+ phy_info->link_function = __get_aquantia_phy_link_state;
+ debug("Aquantia c45 PHY detected\n");
+ } else if (!memcmp("cortina", phy_compatible_str, strlen("cortina"))) {
+ phy_info->phy_type = CORTINA_PHY;
+ phy_info->link_function = __cvmx_get_cortina_phy_link_state;
+ host_mode_str = (const char *)fdt_getprop(
+ fdt_addr, phy, "cortina,host-mode", NULL);
+ debug("Cortina PHY detected for ipd_port %d\n", ipd_port);
+ } else if (!memcmp("ti", phy_compatible_str, strlen("ti"))) {
+ phy_info->phy_type = GENERIC_8023_C45_PHY;
+ phy_info->link_function = __get_generic_8023_c45_phy_link_state;
+ debug("TI PHY detected for ipd_port %d\n", ipd_port);
+ } else if (!fdt_node_check_compatible(fdt_addr, phy,
+ "atheros,ar8334") ||
+ !fdt_node_check_compatible(fdt_addr, phy,
+ "qualcomm,qca8334") ||
+ !fdt_node_check_compatible(fdt_addr, phy,
+ "atheros,ar8337") ||
+ !fdt_node_check_compatible(fdt_addr, phy,
+ "qualcomm,qca8337")) {
+ phy_info->phy_type = QUALCOMM_S17;
+ phy_info->link_function =
+ __cvmx_get_qualcomm_s17_phy_link_state;
+ debug("Qualcomm QCA833X switch detected\n");
+ } else if (!fdt_node_check_compatible(fdt_addr, phy,
+ "ethernet-phy-ieee802.3-c22")) {
+ phy_info->phy_type = GENERIC_8023_C22_PHY;
+ phy_info->link_function =
+ __cvmx_get_generic_8023_c22_phy_link_state;
+ debug("Generic 802.3 c22 PHY detected\n");
+ } else if (!fdt_node_check_compatible(fdt_addr, phy,
+ "ethernet-phy-ieee802.3-c45")) {
+ phy_info->phy_type = GENERIC_8023_C45_PHY;
+ phy_info->link_function = __get_generic_8023_c45_phy_link_state;
+ debug("Generic 802.3 c45 PHY detected\n");
+ } else {
+ debug("Unknown PHY compatibility\n");
+ phy_info->phy_type = (cvmx_phy_type_t)-1;
+ phy_info->link_function = NULL;
+ }
+
+ phy_info->host_mode = CVMX_PHY_HOST_MODE_UNKNOWN;
+ if (host_mode_str) {
+ if (strcmp(host_mode_str, "rxaui") == 0)
+ phy_info->host_mode = CVMX_PHY_HOST_MODE_RXAUI;
+ else if (strcmp(host_mode_str, "xaui") == 0)
+ phy_info->host_mode = CVMX_PHY_HOST_MODE_XAUI;
+ else if (strcmp(host_mode_str, "sgmii") == 0)
+ phy_info->host_mode = CVMX_PHY_HOST_MODE_SGMII;
+ else if (strcmp(host_mode_str, "qsgmii") == 0)
+ phy_info->host_mode = CVMX_PHY_HOST_MODE_QSGMII;
+ else
+ debug("Unknown PHY host mode\n");
+ }
+
+ /* Check if PHY parent is the octeon MDIO bus. Some boards are connected
+ * though a MUX and for them direct_connect_to_phy will be 0
+ */
+ phy_parent = fdt_parent_offset(fdt_addr, phy);
+ if (phy_parent < 0) {
+ debug("ERROR : cannot find phy parent for ipd_port=%d ret=%d\n",
+ ipd_port, phy_parent);
+ return -1;
+ }
+ /* For multi-phy devices and devices on a MUX, go to the parent */
+ ret = fdt_node_check_compatible(fdt_addr, phy_parent,
+ "ethernet-phy-nexus");
+ if (ret == 0) {
+ /* It's a nexus so check the grandparent. */
+ phy_addr_offset =
+ cvmx_fdt_get_int(fdt_addr, phy_parent, "reg", 0);
+ phy_parent = fdt_parent_offset(fdt_addr, phy_parent);
+ }
+
+ /* Check for a muxed MDIO interface */
+ mdio_parent = fdt_parent_offset(fdt_addr, phy_parent);
+ ret = fdt_node_check_compatible(fdt_addr, mdio_parent,
+ "cavium,mdio-mux");
+ if (ret == 0) {
+ ret = __get_muxed_mdio_info_from_dt(phy_info, phy_parent,
+ mdio_parent);
+ if (ret) {
+ printf("Error reading mdio mux information for ipd port %d\n",
+ ipd_port);
+ return -1;
+ }
+ }
+ ret = fdt_node_check_compatible(fdt_addr, phy_parent,
+ "cavium,octeon-3860-mdio");
+ if (ret == 0) {
+ u32 *mdio_reg_base =
+ (u32 *)fdt_getprop(fdt_addr, phy_parent, "reg", 0);
+ phy_info->direct_connect = 1;
+ if (mdio_reg_base == 0) {
+ debug("ERROR : unable to get reg property in phy mdio\n");
+ return -1;
+ }
+ phy_info->mdio_unit =
+ __mdiobus_addr_to_unit(fdt32_to_cpu(mdio_reg_base[1]));
+ debug("phy parent=%s reg_base=%08x mdio_unit=%d\n",
+ fdt_get_name(fdt_addr, phy_parent, NULL),
+ (int)mdio_reg_base[1], phy_info->mdio_unit);
+ } else {
+ phy_info->direct_connect = 0;
+ /* The PHY is not directly connected to the Octeon MDIO bus.
+ * SE doesn't have abstractions for MDIO MUX or MDIO MUX
+ * drivers and hence for the non direct cases code will be
+ * needed which is board specific.
+ * For now the MDIO Unit is defaulted to 1.
+ */
+ debug("%s PHY at address: %d is not directly connected\n",
+ __func__, phy_info->phy_addr);
+ }
+
+ phy_info->phy_addr = cvmx_fdt_get_int(fdt_addr, phy, "reg", -1);
+ if (phy_info->phy_addr < 0) {
+ debug("ERROR: Could not read phy address from reg in DT\n");
+ return -1;
+ }
+ phy_info->phy_addr += phy_addr_offset;
+ phy_info->phy_addr |= phy_info->mdio_unit << 8;
+ debug("%s(%p, %d) => 0x%x\n", __func__, phy_info, ipd_port,
+ phy_info->phy_addr);
+ return phy_info->phy_addr;
+}
+
+/**
+ * @INTERNAL
+ * This function outputs the cvmx_phy_info_t data structure for the specified
+ * port.
+ *
+ * @param phy_info - phy info data structure
+ * @param ipd_port - port to get phy info for
+ *
+ * @return 0 for success, -1 if info not available
+ *
+ * NOTE: The phy_info data structure is subject to change.
+ */
+int cvmx_helper_board_get_phy_info(cvmx_phy_info_t *phy_info, int ipd_port)
+{
+ int retcode;
+
+ retcode = __get_phy_info_from_dt(phy_info, ipd_port);
+
+ return (retcode >= 0) ? 0 : -1;
+}
+
+/**
+ * Return the MII PHY address associated with the given IPD
+ * port. The phy address is obtained from the device tree.
+ *
+ * @param ipd_port Octeon IPD port to get the MII address for.
+ *
+ * @return MII PHY address and bus number or -1 on error, -2 if phy info missing (OK).
+ */
+int cvmx_helper_board_get_mii_address_from_dt(int ipd_port)
+{
+ cvmx_phy_info_t phy_info;
+ int retcode = __get_phy_info_from_dt(&phy_info, ipd_port);
+
+ if (retcode >= 0)
+ return phy_info.phy_addr;
+ else
+ return retcode;
+}
+
+/**
+ * Return the MII PHY address associated with the given IPD
+ * port. A result of -1 means there isn't a MII capable PHY
+ * connected to this port. On chips supporting multiple MII
+ * busses the bus number is encoded in bits <15:8>.
+ *
+ * This function must be modified for every new Octeon board.
+ * Internally it uses switch statements based on the cvmx_sysinfo
+ * data to determine board types and revisions. It replies on the
+ * fact that every Octeon board receives a unique board type
+ * enumeration from the bootloader.
+ *
+ * @param ipd_port Octeon IPD port to get the MII address for.
+ *
+ * @return MII PHY address and bus number or -1.
+ */
+int cvmx_helper_board_get_mii_address(int ipd_port)
+{
+ cvmx_phy_info_t phy_info;
+ int retcode;
+
+ debug("%s(0x%x) getting phy info from device tree\n", __func__,
+ ipd_port);
+ memset(&phy_info, 0, sizeof(phy_info));
+ retcode = __get_phy_info_from_dt(&phy_info, ipd_port);
+
+ if (retcode == -2) {
+ debug("%s(0x%x): phy info missing in device tree\n", __func__,
+ ipd_port);
+ return retcode;
+ } else if (retcode < 0) {
+ debug("%s: could not get phy info for port %d\n", __func__,
+ ipd_port);
+ return retcode;
+ }
+ debug("%s: phy address: 0x%x\n", __func__, phy_info.phy_addr);
+
+ /* Some unknown board. Somebody forgot to update this function... */
+ debug("%s: Unknown board type\n", __func__);
+ return -1;
+}
+
+/**
+ * @INTERNAL
+ * Parse the device tree and set whether a port is valid or not.
+ *
+ * @param fdt_addr Pointer to device tree
+ *
+ * @return 0 for success, -1 on error.
+ */
+int __cvmx_helper_parse_bgx_dt(const void *fdt_addr)
+{
+ int port_index;
+ struct cvmx_xiface xi;
+ int fdt_port_node = -1;
+ int fdt_interface_node;
+ int fdt_phy_node;
+ u64 reg_addr;
+ int xiface;
+ struct cvmx_phy_info *phy_info;
+ static bool parsed;
+ int err;
+ int ipd_port;
+
+ if (parsed) {
+ debug("%s: Already parsed\n", __func__);
+ return 0;
+ }
+ while ((fdt_port_node = fdt_node_offset_by_compatible(
+ fdt_addr, fdt_port_node,
+ "cavium,octeon-7890-bgx-port")) >= 0) {
+ /* Get the port number */
+ port_index =
+ cvmx_fdt_get_int(fdt_addr, fdt_port_node, "reg", -1);
+ if (port_index < 0) {
+ debug("Error: missing reg field for bgx port in device tree\n");
+ return -1;
+ }
+ debug("%s: Parsing BGX port %d\n", __func__, port_index);
+ /* Get the interface number */
+ fdt_interface_node = fdt_parent_offset(fdt_addr, fdt_port_node);
+ if (fdt_interface_node < 0) {
+ debug("Error: device tree corrupt!\n");
+ return -1;
+ }
+ if (fdt_node_check_compatible(fdt_addr, fdt_interface_node,
+ "cavium,octeon-7890-bgx")) {
+ debug("Error: incompatible Ethernet MAC Nexus in device tree!\n");
+ return -1;
+ }
+ reg_addr =
+ cvmx_fdt_get_addr(fdt_addr, fdt_interface_node, "reg");
+ debug("%s: BGX interface address: 0x%llx\n", __func__,
+ (unsigned long long)reg_addr);
+ if (reg_addr == FDT_ADDR_T_NONE) {
+ debug("Device tree BGX node has invalid address 0x%llx\n",
+ (unsigned long long)reg_addr);
+ return -1;
+ }
+ reg_addr = cvmx_fdt_translate_address(fdt_addr,
+ fdt_interface_node,
+ (u32 *)®_addr);
+ xi = __cvmx_bgx_reg_addr_to_xiface(reg_addr);
+ if (xi.node < 0) {
+ debug("Device tree BGX node has invalid address 0x%llx\n",
+ (unsigned long long)reg_addr);
+ return -1;
+ }
+ debug("%s: Found BGX node %d, interface %d\n", __func__,
+ xi.node, xi.interface);
+ xiface = cvmx_helper_node_interface_to_xiface(xi.node,
+ xi.interface);
+ cvmx_helper_set_port_fdt_node_offset(xiface, port_index,
+ fdt_port_node);
+ cvmx_helper_set_port_valid(xiface, port_index, true);
+
+ cvmx_helper_set_port_fdt_node_offset(xiface, port_index,
+ fdt_port_node);
+ if (fdt_getprop(fdt_addr, fdt_port_node,
+ "cavium,sgmii-mac-phy-mode", NULL))
+ cvmx_helper_set_mac_phy_mode(xiface, port_index, true);
+ else
+ cvmx_helper_set_mac_phy_mode(xiface, port_index, false);
+
+ if (fdt_getprop(fdt_addr, fdt_port_node, "cavium,force-link-up",
+ NULL))
+ cvmx_helper_set_port_force_link_up(xiface, port_index,
+ true);
+ else
+ cvmx_helper_set_port_force_link_up(xiface, port_index,
+ false);
+
+ if (fdt_getprop(fdt_addr, fdt_port_node,
+ "cavium,sgmii-mac-1000x-mode", NULL))
+ cvmx_helper_set_1000x_mode(xiface, port_index, true);
+ else
+ cvmx_helper_set_1000x_mode(xiface, port_index, false);
+
+ if (fdt_getprop(fdt_addr, fdt_port_node,
+ "cavium,disable-autonegotiation", NULL))
+ cvmx_helper_set_port_autonegotiation(xiface, port_index,
+ false);
+ else
+ cvmx_helper_set_port_autonegotiation(xiface, port_index,
+ true);
+
+ fdt_phy_node = cvmx_fdt_lookup_phandle(fdt_addr, fdt_port_node,
+ "phy-handle");
+ if (fdt_phy_node >= 0) {
+ cvmx_helper_set_phy_fdt_node_offset(xiface, port_index,
+ fdt_phy_node);
+ debug("%s: Setting PHY fdt node offset for interface 0x%x, port %d to %d\n",
+ __func__, xiface, port_index, fdt_phy_node);
+ debug("%s: PHY node name: %s\n", __func__,
+ fdt_get_name(fdt_addr, fdt_phy_node, NULL));
+ cvmx_helper_set_port_phy_present(xiface, port_index,
+ true);
+ ipd_port = cvmx_helper_get_ipd_port(xiface, port_index);
+ if (ipd_port >= 0) {
+ debug("%s: Allocating phy info for 0x%x:%d\n",
+ __func__, xiface, port_index);
+ phy_info =
+ (cvmx_phy_info_t *)cvmx_bootmem_alloc(
+ sizeof(*phy_info), 0);
+ if (!phy_info) {
+ debug("%s: Out of memory\n", __func__);
+ return -1;
+ }
+ memset(phy_info, 0, sizeof(*phy_info));
+ phy_info->phy_addr = -1;
+ err = __get_phy_info_from_dt(phy_info,
+ ipd_port);
+ if (err) {
+ debug("%s: Error parsing phy info for ipd port %d\n",
+ __func__, ipd_port);
+ return -1;
+ }
+ cvmx_helper_set_port_phy_info(
+ xiface, port_index, phy_info);
+ debug("%s: Saved phy info\n", __func__);
+ }
+ } else {
+ cvmx_helper_set_phy_fdt_node_offset(xiface, port_index,
+ -1);
+ debug("%s: No PHY fdt node offset for interface 0x%x, port %d to %d\n",
+ __func__, xiface, port_index, fdt_phy_node);
+ cvmx_helper_set_port_phy_present(xiface, port_index,
+ false);
+ }
+ }
+ if (!sfp_parsed)
+ if (cvmx_sfp_parse_device_tree(fdt_addr))
+ debug("%s: Error parsing SFP device tree\n", __func__);
+ parsed = true;
+ return 0;
+}
+
+int __cvmx_helper_parse_bgx_rgmii_dt(const void *fdt_addr)
+{
+ u64 reg_addr;
+ struct cvmx_xiface xi;
+ int fdt_port_node = -1;
+ int fdt_interface_node;
+ int fdt_phy_node;
+ int port_index;
+ int xiface;
+
+ /* There's only one xcv (RGMII) interface, so just search for the one
+ * that's part of a BGX entry.
+ */
+ while ((fdt_port_node = fdt_node_offset_by_compatible(
+ fdt_addr, fdt_port_node, "cavium,octeon-7360-xcv")) >=
+ 0) {
+ fdt_interface_node = fdt_parent_offset(fdt_addr, fdt_port_node);
+ if (fdt_interface_node < 0) {
+ printf("Error: device tree corrupt!\n");
+ return -1;
+ }
+ debug("%s: XCV parent node compatible: %s\n", __func__,
+ (char *)fdt_getprop(fdt_addr, fdt_interface_node,
+ "compatible", NULL));
+ if (!fdt_node_check_compatible(fdt_addr, fdt_interface_node,
+ "cavium,octeon-7890-bgx"))
+ break;
+ }
+ if (fdt_port_node == -FDT_ERR_NOTFOUND) {
+ debug("No XCV/RGMII interface found in device tree\n");
+ return 0;
+ } else if (fdt_port_node < 0) {
+ debug("%s: Error %d parsing device tree\n", __func__,
+ fdt_port_node);
+ return -1;
+ }
+ port_index = cvmx_fdt_get_int(fdt_addr, fdt_port_node, "reg", -1);
+ if (port_index != 0) {
+ printf("%s: Error: port index (reg) must be 0, not %d.\n",
+ __func__, port_index);
+ return -1;
+ }
+ reg_addr = cvmx_fdt_get_addr(fdt_addr, fdt_interface_node, "reg");
+ if (reg_addr == FDT_ADDR_T_NONE) {
+ printf("%s: Error: could not get BGX interface address\n",
+ __func__);
+ return -1;
+ }
+ /* We don't have to bother translating since only 78xx supports OCX and
+ * doesn't support RGMII.
+ */
+ xi = __cvmx_bgx_reg_addr_to_xiface(reg_addr);
+ debug("%s: xi.node: %d, xi.interface: 0x%x, addr: 0x%llx\n", __func__,
+ xi.node, xi.interface, (unsigned long long)reg_addr);
+ if (xi.node < 0) {
+ printf("%s: Device tree BGX node has invalid address 0x%llx\n",
+ __func__, (unsigned long long)reg_addr);
+ return -1;
+ }
+ debug("%s: Found XCV (RGMII) interface on interface %d\n", __func__,
+ xi.interface);
+ debug(" phy handle: 0x%x\n",
+ cvmx_fdt_get_int(fdt_addr, fdt_port_node, "phy-handle", -1));
+ fdt_phy_node =
+ cvmx_fdt_lookup_phandle(fdt_addr, fdt_port_node, "phy-handle");
+ debug("%s: phy-handle node: 0x%x\n", __func__, fdt_phy_node);
+ xiface = cvmx_helper_node_interface_to_xiface(xi.node, xi.interface);
+
+ cvmx_helper_set_port_fdt_node_offset(xiface, port_index, fdt_port_node);
+ if (fdt_phy_node >= 0) {
+ debug("%s: Setting PHY fdt node offset for interface 0x%x, port %d to %d\n",
+ __func__, xiface, port_index, fdt_phy_node);
+ debug("%s: PHY node name: %s\n", __func__,
+ fdt_get_name(fdt_addr, fdt_phy_node, NULL));
+ cvmx_helper_set_phy_fdt_node_offset(xiface, port_index,
+ fdt_phy_node);
+ cvmx_helper_set_port_phy_present(xiface, port_index, true);
+ } else {
+ cvmx_helper_set_phy_fdt_node_offset(xiface, port_index, -1);
+ debug("%s: No PHY fdt node offset for interface 0x%x, port %d to %d\n",
+ __func__, xiface, port_index, fdt_phy_node);
+ cvmx_helper_set_port_phy_present(xiface, port_index, false);
+ }
+
+ return 0;
+}
+
+/**
+ * Returns if a port is present on an interface
+ *
+ * @param fdt_addr - address fo flat device tree
+ * @param ipd_port - IPD port number
+ *
+ * @return 1 if port is present, 0 if not present, -1 if error
+ */
+int __cvmx_helper_board_get_port_from_dt(void *fdt_addr, int ipd_port)
+{
+ int port_index;
+ int aliases;
+ const char *pip_path;
+ char name_buffer[24];
+ int pip, iface, eth;
+ cvmx_helper_interface_mode_t mode;
+ int xiface = cvmx_helper_get_interface_num(ipd_port);
+ struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+ u32 val;
+ int phy_node_offset;
+ int parse_bgx_dt_err;
+ int parse_vsc7224_err;
+
+ debug("%s(%p, %d)\n", __func__, fdt_addr, ipd_port);
+ if (octeon_has_feature(OCTEON_FEATURE_BGX)) {
+ static int fdt_ports_initialized;
+
+ port_index = cvmx_helper_get_interface_index_num(ipd_port);
+
+ if (!fdt_ports_initialized) {
+ if (octeon_has_feature(OCTEON_FEATURE_BGX_XCV)) {
+ if (!__cvmx_helper_parse_bgx_rgmii_dt(fdt_addr))
+ fdt_ports_initialized = 1;
+ parse_bgx_dt_err =
+ __cvmx_helper_parse_bgx_dt(fdt_addr);
+ parse_vsc7224_err =
+ __cvmx_fdt_parse_vsc7224(fdt_addr);
+ if (!parse_bgx_dt_err && !parse_vsc7224_err)
+ fdt_ports_initialized = 1;
+ } else {
+ debug("%s: Error parsing FDT\n", __func__);
+ return -1;
+ }
+ }
+
+ return cvmx_helper_is_port_valid(xiface, port_index);
+ }
+
+ mode = cvmx_helper_interface_get_mode(xiface);
+
+ switch (mode) {
+ /* Device tree has information about the following mode types. */
+ case CVMX_HELPER_INTERFACE_MODE_RGMII:
+ case CVMX_HELPER_INTERFACE_MODE_GMII:
+ case CVMX_HELPER_INTERFACE_MODE_SPI:
+ case CVMX_HELPER_INTERFACE_MODE_XAUI:
+ case CVMX_HELPER_INTERFACE_MODE_SGMII:
+ case CVMX_HELPER_INTERFACE_MODE_QSGMII:
+ case CVMX_HELPER_INTERFACE_MODE_RXAUI:
+ case CVMX_HELPER_INTERFACE_MODE_AGL:
+ case CVMX_HELPER_INTERFACE_MODE_XLAUI:
+ case CVMX_HELPER_INTERFACE_MODE_XFI:
+ aliases = 1;
+ break;
+ default:
+ aliases = 0;
+ break;
+ }
+
+ /* The device tree information is present on interfaces that have phy */
+ if (!aliases)
+ return 1;
+
+ port_index = cvmx_helper_get_interface_index_num(ipd_port);
+
+ aliases = fdt_path_offset(fdt_addr, "/aliases");
+ if (aliases < 0) {
+ debug("%s: ERROR: /aliases not found in device tree fdt_addr=%p\n",
+ __func__, fdt_addr);
+ return -1;
+ }
+
+ pip_path = (const char *)fdt_getprop(fdt_addr, aliases, "pip", NULL);
+ if (!pip_path) {
+ debug("%s: ERROR: interface %x pip path not found in device tree\n",
+ __func__, xiface);
+ return -1;
+ }
+ pip = fdt_path_offset(fdt_addr, pip_path);
+ if (pip < 0) {
+ debug("%s: ERROR: interface %x pip not found in device tree\n",
+ __func__, xiface);
+ return -1;
+ }
+ snprintf(name_buffer, sizeof(name_buffer), "interface@%d",
+ xi.interface);
+ iface = fdt_subnode_offset(fdt_addr, pip, name_buffer);
+ if (iface < 0)
+ return 0;
+ snprintf(name_buffer, sizeof(name_buffer), "ethernet@%x", port_index);
+ eth = fdt_subnode_offset(fdt_addr, iface, name_buffer);
+ debug("%s: eth subnode offset %d from %s\n", __func__, eth,
+ name_buffer);
+
+ if (eth < 0)
+ return -1;
+
+ cvmx_helper_set_port_fdt_node_offset(xiface, port_index, eth);
+
+ phy_node_offset = cvmx_fdt_get_int(fdt_addr, eth, "phy", -1);
+ cvmx_helper_set_phy_fdt_node_offset(xiface, port_index,
+ phy_node_offset);
+
+ if (fdt_getprop(fdt_addr, eth, "cavium,sgmii-mac-phy-mode", NULL))
+ cvmx_helper_set_mac_phy_mode(xiface, port_index, true);
+ else
+ cvmx_helper_set_mac_phy_mode(xiface, port_index, false);
+
+ if (fdt_getprop(fdt_addr, eth, "cavium,force-link-up", NULL))
+ cvmx_helper_set_port_force_link_up(xiface, port_index, true);
+ else
+ cvmx_helper_set_port_force_link_up(xiface, port_index, false);
+
+ if (fdt_getprop(fdt_addr, eth, "cavium,sgmii-mac-1000x-mode", NULL))
+ cvmx_helper_set_1000x_mode(xiface, port_index, true);
+ else
+ cvmx_helper_set_1000x_mode(xiface, port_index, false);
+
+ if (fdt_getprop(fdt_addr, eth, "cavium,disable-autonegotiation", NULL))
+ cvmx_helper_set_port_autonegotiation(xiface, port_index, false);
+ else
+ cvmx_helper_set_port_autonegotiation(xiface, port_index, true);
+
+ if (mode == CVMX_HELPER_INTERFACE_MODE_AGL) {
+ bool tx_bypass = false;
+
+ if (fdt_getprop(fdt_addr, eth, "cavium,rx-clk-delay-bypass",
+ NULL))
+ cvmx_helper_set_agl_rx_clock_delay_bypass(
+ xiface, port_index, true);
+ else
+ cvmx_helper_set_agl_rx_clock_delay_bypass(
+ xiface, port_index, false);
+
+ val = cvmx_fdt_get_int(fdt_addr, eth, "cavium,rx-clk-skew", 0);
+ cvmx_helper_set_agl_rx_clock_skew(xiface, port_index, val);
+
+ if (fdt_getprop(fdt_addr, eth, "cavium,tx-clk-delay-bypass",
+ NULL))
+ tx_bypass = true;
+
+ val = cvmx_fdt_get_int(fdt_addr, eth, "tx-clk-delay", 0);
+ cvmx_helper_cfg_set_rgmii_tx_clk_delay(xiface, port_index,
+ tx_bypass, val);
+
+ val = cvmx_fdt_get_int(fdt_addr, eth, "cavium,refclk-sel", 0);
+ cvmx_helper_set_agl_refclk_sel(xiface, port_index, val);
+ }
+
+ return (eth >= 0);
+}
+
+/**
+ * Given the address of the MDIO registers, output the CPU node and MDIO bus
+ *
+ * @param addr 64-bit address of MDIO registers (from device tree)
+ * @param[out] node CPU node number (78xx)
+ * @param[out] bus MDIO bus number
+ */
+void __cvmx_mdio_addr_to_node_bus(u64 addr, int *node, int *bus)
+{
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
+ if (node)
+ *node = cvmx_csr_addr_to_node(addr);
+ addr = cvmx_csr_addr_strip_node(addr);
+ } else {
+ if (node)
+ *node = 0;
+ }
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CN78XX)) {
+ switch (addr) {
+ case 0x0001180000003800:
+ *bus = 0;
+ break;
+ case 0x0001180000003880:
+ *bus = 1;
+ break;
+ case 0x0001180000003900:
+ *bus = 2;
+ break;
+ case 0x0001180000003980:
+ *bus = 3;
+ break;
+ default:
+ *bus = -1;
+ printf("%s: Invalid SMI bus address 0x%llx\n", __func__,
+ (unsigned long long)addr);
+ break;
+ }
+ } else if (OCTEON_IS_MODEL(OCTEON_CN73XX) ||
+ OCTEON_IS_MODEL(OCTEON_CNF75XX)) {
+ switch (addr) {
+ case 0x0001180000003800:
+ *bus = 0;
+ break;
+ case 0x0001180000003880:
+ *bus = 1;
+ break;
+ default:
+ *bus = -1;
+ printf("%s: Invalid SMI bus address 0x%llx\n", __func__,
+ (unsigned long long)addr);
+ break;
+ }
+ } else {
+ switch (addr) {
+ case 0x0001180000001800:
+ *bus = 0;
+ break;
+ case 0x0001180000001900:
+ *bus = 1;
+ break;
+ default:
+ *bus = -1;
+ printf("%s: Invalid SMI bus address 0x%llx\n", __func__,
+ (unsigned long long)addr);
+ break;
+ }
+ }
+}
+
+/**
+ * @INTERNAL
+ * Figure out which mod_abs changed function to use based on the phy type
+ *
+ * @param xiface xinterface number
+ * @param index port index on interface
+ *
+ * @return 0 for success, -1 on error
+ *
+ * This function figures out the proper mod_abs_changed function to use and
+ * registers the appropriate function. This should be called after the device
+ * tree has been fully parsed for the given port as well as after all SFP
+ * slots and any Microsemi VSC7224 devices have been parsed in the device tree.
+ */
+int cvmx_helper_phy_register_mod_abs_changed(int xiface, int index)
+{
+ struct cvmx_vsc7224_chan *vsc7224_chan;
+ struct cvmx_fdt_sfp_info *sfp_info;
+
+ debug("%s(0x%x, %d)\n", __func__, xiface, index);
+ sfp_info = cvmx_helper_cfg_get_sfp_info(xiface, index);
+ /* Don't return an error if no SFP module is registered */
+ if (!sfp_info) {
+ debug("%s: No SFP associated with 0x%x:%d\n", __func__, xiface,
+ index);
+ return 0;
+ }
+
+ /* See if the Microsemi VSC7224 reclocking chip has been used */
+ vsc7224_chan = cvmx_helper_cfg_get_vsc7224_chan_info(xiface, index);
+ if (vsc7224_chan) {
+ debug("%s: Registering VSC7224 handler\n", __func__);
+ cvmx_sfp_register_mod_abs_changed(sfp_info,
+ &cvmx_sfp_vsc7224_mod_abs_changed, NULL);
+ return 0;
+ }
+
+ return 0;
+}
--
2.35.1
^ permalink raw reply related [flat|nested] 52+ messages in thread* [PATCH 12/52] mips: octeon: Add cvmx-helper-fpa.c
2022-03-30 10:06 [PATCH 00/52] mips: octeon: Add ethernet support Stefan Roese
` (10 preceding siblings ...)
2022-03-30 10:06 ` [PATCH 11/52] mips: octeon: Add cvmx-helper-board.c Stefan Roese
@ 2022-03-30 10:06 ` Stefan Roese
2022-03-30 10:06 ` [PATCH 13/52] mips: octeon: Add cvmx-helper-igl.c Stefan Roese
` (37 subsequent siblings)
49 siblings, 0 replies; 52+ messages in thread
From: Stefan Roese @ 2022-03-30 10:06 UTC (permalink / raw)
To: u-boot; +Cc: awilliams, cchavva
From: Aaron Williams <awilliams@marvell.com>
Import cvmx-helper-fpa.c from 2013 U-Boot. It will be used by the later
added drivers to support networking on the MIPS Octeon II / III
platforms.
Signed-off-by: Aaron Williams <awilliams@marvell.com>
Signed-off-by: Stefan Roese <sr@denx.de>
---
arch/mips/mach-octeon/cvmx-helper-fpa.c | 329 ++++++++++++++++++++++++
1 file changed, 329 insertions(+)
create mode 100644 arch/mips/mach-octeon/cvmx-helper-fpa.c
diff --git a/arch/mips/mach-octeon/cvmx-helper-fpa.c b/arch/mips/mach-octeon/cvmx-helper-fpa.c
new file mode 100644
index 000000000000..ba31e66579f1
--- /dev/null
+++ b/arch/mips/mach-octeon/cvmx-helper-fpa.c
@@ -0,0 +1,329 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ * Helper functions for FPA setup.
+ */
+
+#include <errno.h>
+#include <log.h>
+#include <time.h>
+#include <linux/delay.h>
+
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-csr.h>
+#include <mach/cvmx-bootmem.h>
+#include <mach/octeon-model.h>
+#include <mach/cvmx-fuse.h>
+#include <mach/octeon-feature.h>
+#include <mach/cvmx-qlm.h>
+#include <mach/octeon_qlm.h>
+#include <mach/cvmx-pcie.h>
+#include <mach/cvmx-coremask.h>
+#include <mach/cvmx-range.h>
+#include <mach/cvmx-global-resources.h>
+
+#include <mach/cvmx-agl-defs.h>
+#include <mach/cvmx-bgxx-defs.h>
+#include <mach/cvmx-ciu-defs.h>
+#include <mach/cvmx-gmxx-defs.h>
+#include <mach/cvmx-gserx-defs.h>
+#include <mach/cvmx-ilk-defs.h>
+#include <mach/cvmx-ipd-defs.h>
+#include <mach/cvmx-pcsx-defs.h>
+#include <mach/cvmx-pcsxx-defs.h>
+#include <mach/cvmx-pki-defs.h>
+#include <mach/cvmx-pko-defs.h>
+#include <mach/cvmx-xcv-defs.h>
+
+#include <mach/cvmx-hwpko.h>
+#include <mach/cvmx-ilk.h>
+#include <mach/cvmx-ipd.h>
+#include <mach/cvmx-pki.h>
+#include <mach/cvmx-pko3.h>
+#include <mach/cvmx-pko3-queue.h>
+#include <mach/cvmx-pko3-resources.h>
+#include <mach/cvmx-pip.h>
+
+#include <mach/cvmx-helper.h>
+#include <mach/cvmx-helper-board.h>
+#include <mach/cvmx-helper-cfg.h>
+
+#include <mach/cvmx-helper-bgx.h>
+#include <mach/cvmx-helper-cfg.h>
+#include <mach/cvmx-helper-util.h>
+#include <mach/cvmx-helper-pki.h>
+#include <mach/cvmx-helper-pko.h>
+
+void cvmx_helper_fpa1_dump(int node)
+{
+ int pool_num = 0, pools_max = cvmx_fpa_get_max_pools();
+
+ if (node == -1)
+ node = cvmx_get_node_num();
+
+ printf("FPA pool status: pools count: %u\n", pools_max);
+ printf("----------------------------------------------------\n");
+ printf("%5s %5s %12s %16s\n", "POOL", "Size", "Free", "Name");
+
+ while (pool_num < pools_max) {
+ int pool = pool_num++;
+ unsigned int sz = cvmx_fpa_get_block_size(pool);
+ char *s = "";
+
+ if (sz == 0)
+ continue;
+ if (cvmx_fpa_get_pool_owner(pool) != cvmx_get_app_id())
+ s = "*";
+
+ printf("%5u %5u %12u %16s %1s\n", pool, sz,
+ cvmx_fpa_get_current_count(pool),
+ cvmx_fpa_get_name(pool), s);
+ }
+}
+
+void cvmx_helper_fpa3_dump(unsigned int node)
+{
+ int lpool, laura;
+ unsigned int intr;
+ char line[128];
+
+ intr = csr_rd_node(node, CVMX_FPA_ERR_INT);
+ memset(line, '*', 80);
+ line[80] = '\0';
+ printf("\n%s\n", line);
+ printf(" FPA3 on node %u: intr=%#x\n", node, intr);
+ printf("%s\n", line);
+ printf("%6s %5s %14s %14s %16s %s\n", "POOL", "Size", "Free", "",
+ "Name", "Intr");
+
+ for (lpool = 0; lpool < cvmx_fpa3_num_pools(); lpool++) {
+ cvmx_fpa3_pool_t pool;
+ cvmx_fpa_poolx_cfg_t pool_cfg;
+ cvmx_fpa_poolx_available_t avail_reg;
+ unsigned int bsz;
+ unsigned long long bcnt;
+
+ pool = __cvmx_fpa3_pool(node, lpool);
+ pool_cfg.u64 =
+ csr_rd_node(pool.node, CVMX_FPA_POOLX_CFG(pool.lpool));
+ bsz = pool_cfg.cn78xx.buf_size << 7;
+ if (bsz == 0)
+ continue;
+ avail_reg.u64 = csr_rd_node(
+ pool.node, CVMX_FPA_POOLX_AVAILABLE(pool.lpool));
+ bcnt = avail_reg.cn78xx.count;
+ intr = csr_rd_node(pool.node, CVMX_FPA_POOLX_INT(pool.lpool));
+
+ printf("%6u %5u %14llu %14s %16s %#4x\n", lpool, bsz, bcnt, "",
+ cvmx_fpa3_get_pool_name(pool), intr);
+ }
+ printf("\n");
+
+ printf("%6s %5s %14s %14s %16s %s\n", "AURA", "POOL", "Allocated",
+ "Remaining", "Name", "Intr");
+
+ for (laura = 0; laura < cvmx_fpa3_num_auras(); laura++) {
+ cvmx_fpa3_gaura_t aura;
+ cvmx_fpa_aurax_cnt_t cnt_reg;
+ cvmx_fpa_aurax_cnt_limit_t limit_reg;
+ cvmx_fpa_aurax_pool_t aurax_pool;
+ unsigned long long cnt, limit, rem;
+ unsigned int pool;
+ const char *name;
+
+ aura = __cvmx_fpa3_gaura(node, laura);
+ aurax_pool.u64 =
+ csr_rd_node(aura.node, CVMX_FPA_AURAX_POOL(aura.laura));
+ pool = aurax_pool.cn78xx.pool;
+ if (pool == 0)
+ continue;
+ cnt_reg.u64 =
+ csr_rd_node(aura.node, CVMX_FPA_AURAX_CNT(aura.laura));
+ limit_reg.u64 = csr_rd_node(
+ aura.node, CVMX_FPA_AURAX_CNT_LIMIT(aura.laura));
+ cnt = cnt_reg.cn78xx.cnt;
+ limit = limit_reg.cn78xx.limit;
+ rem = limit - cnt;
+ name = cvmx_fpa3_get_aura_name(aura);
+ intr = csr_rd_node(aura.node, CVMX_FPA_AURAX_INT(aura.laura));
+
+ if (limit == CVMX_FPA3_AURAX_LIMIT_MAX)
+ printf("%6u %5u %14llu %14s %16s %#4x\n", laura, pool,
+ cnt, "unlimited", name, intr);
+ else
+ printf("%6u %5u %14llu %14llu %16s %#4x\n", laura, pool,
+ cnt, rem, name, intr);
+ }
+ printf("\n");
+}
+
+void cvmx_helper_fpa_dump(int node)
+{
+ if (node == -1)
+ node = cvmx_get_node_num();
+ if (octeon_has_feature(OCTEON_FEATURE_FPA3))
+ cvmx_helper_fpa3_dump(node);
+ else
+ cvmx_helper_fpa1_dump(node);
+}
+
+int cvmx_helper_shutdown_fpa_pools(int node)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_FPA3)) {
+ cvmx_fpa3_gaura_t aura;
+ cvmx_fpa3_pool_t pool;
+ int laura, lpool;
+
+ for (laura = 0; laura < cvmx_fpa3_num_auras(); laura++) {
+ aura = __cvmx_fpa3_gaura(node, laura);
+ (void)cvmx_fpa3_shutdown_aura(aura);
+ }
+
+ for (lpool = 0; lpool < cvmx_fpa3_num_pools(); lpool++) {
+ pool = __cvmx_fpa3_pool(node, lpool);
+ (void)cvmx_fpa3_shutdown_pool(pool);
+ }
+ } else {
+ int pool;
+
+ for (pool = 0; pool < CVMX_FPA1_NUM_POOLS; pool++) {
+ if (cvmx_fpa_get_block_size(pool) > 0)
+ cvmx_fpa_shutdown_pool(pool);
+ }
+ if (!OCTEON_IS_MODEL(OCTEON_CN68XX))
+ cvmx_fpa_disable();
+ }
+
+ return 0;
+}
+
+/**
+ * @INTERNAL
+ * OBSOLETE
+ *
+ * Allocate memory for and initialize a single FPA pool.
+ *
+ * @param pool Pool to initialize
+ * @param buffer_size Size of buffers to allocate in bytes
+ * @param buffers Number of buffers to put in the pool. Zero is allowed
+ * @param name String name of the pool for debugging purposes
+ * @return Zero on success, non-zero on failure
+ *
+ * This function is only for transition, will be removed.
+ */
+int __cvmx_helper_initialize_fpa_pool(int pool, u64 buffer_size, u64 buffers,
+ const char *name)
+{
+ return cvmx_fpa_setup_pool(pool, name, NULL, buffer_size, buffers);
+}
+
+/**
+ * @INTERNAL
+ * Allocate memory and initialize the FPA pools using memory
+ * from cvmx-bootmem. Specifying zero for the number of
+ * buffers will cause that FPA pool to not be setup. This is
+ * useful if you aren't using some of the hardware and want
+ * to save memory. Use cvmx_helper_initialize_fpa instead of
+ * this function directly.
+ *
+ * @param pip_pool Should always be CVMX_FPA_PACKET_POOL
+ * @param pip_size Should always be CVMX_FPA_PACKET_POOL_SIZE
+ * @param pip_buffers
+ * Number of packet buffers.
+ * @param wqe_pool Should always be CVMX_FPA_WQE_POOL
+ * @param wqe_size Should always be CVMX_FPA_WQE_POOL_SIZE
+ * @param wqe_entries
+ * Number of work queue entries
+ * @param pko_pool Should always be CVMX_FPA_OUTPUT_BUFFER_POOL
+ * @param pko_size Should always be CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE
+ * @param pko_buffers
+ * PKO Command buffers. You should at minimum have two per
+ * each PKO queue.
+ * @param tim_pool Should always be CVMX_FPA_TIMER_POOL
+ * @param tim_size Should always be CVMX_FPA_TIMER_POOL_SIZE
+ * @param tim_buffers
+ * TIM ring buffer command queues. At least two per timer bucket
+ * is recommended.
+ * @param dfa_pool Should always be CVMX_FPA_DFA_POOL
+ * @param dfa_size Should always be CVMX_FPA_DFA_POOL_SIZE
+ * @param dfa_buffers
+ * DFA command buffer. A relatively small (32 for example)
+ * number should work.
+ * @return Zero on success, non-zero if out of memory
+ */
+static int
+__cvmx_helper_initialize_fpa(int pip_pool, int pip_size, int pip_buffers,
+ int wqe_pool, int wqe_size, int wqe_entries,
+ int pko_pool, int pko_size, int pko_buffers,
+ int tim_pool, int tim_size, int tim_buffers,
+ int dfa_pool, int dfa_size, int dfa_buffers)
+{
+ cvmx_fpa_enable();
+ if (pip_buffers > 0) {
+ cvmx_fpa_setup_pool(pip_pool, "PKI_POOL", NULL, pip_size,
+ pip_buffers);
+ }
+ /* Allocate WQE pool only if it is distinct from packet pool */
+ if (wqe_entries > 0 && wqe_pool != pip_pool) {
+ cvmx_fpa_setup_pool(wqe_pool, "WQE_POOL", NULL, wqe_size,
+ wqe_entries);
+ }
+ /* cn78xx PKO allocates its own FPA pool per HW constraints */
+ if (pko_buffers > 0) {
+ if (!octeon_has_feature(OCTEON_FEATURE_PKI))
+ cvmx_fpa_setup_pool(pko_pool, "PKO_POOL", NULL,
+ pko_size, pko_buffers);
+ }
+ if (tim_buffers > 0) {
+ cvmx_fpa_setup_pool(tim_pool, "TIM_POOL", NULL, tim_size,
+ tim_buffers);
+ }
+ return 0;
+}
+
+/**
+ * Allocate memory and initialize the FPA pools using memory
+ * from cvmx-bootmem. Sizes of each element in the pools is
+ * controlled by the cvmx-config.h header file. Specifying
+ * zero for any parameter will cause that FPA pool to not be
+ * setup. This is useful if you aren't using some of the
+ * hardware and want to save memory.
+ *
+ * @param packet_buffers
+ * Number of packet buffers to allocate
+ * @param work_queue_entries
+ * Number of work queue entries
+ * @param pko_buffers
+ * PKO Command buffers. You should at minimum have two per
+ * each PKO queue.
+ * @param tim_buffers
+ * TIM ring buffer command queues. At least two per timer bucket
+ * is recommended.
+ * @param dfa_buffers
+ * DFA command buffer. A relatively small (32 for example)
+ * number should work.
+ * @return Zero on success, non-zero if out of memory
+ */
+int cvmx_helper_initialize_fpa(int packet_buffers, int work_queue_entries,
+ int pko_buffers, int tim_buffers,
+ int dfa_buffers)
+{
+ int packet_pool = (int)cvmx_fpa_get_packet_pool();
+ int wqe_pool = (int)cvmx_fpa_get_wqe_pool();
+ int outputbuffer_pool = (int)cvmx_fpa_get_pko_pool();
+ int timer_pool;
+ int dfa_pool = 0;
+ int rv;
+
+ timer_pool = 0;
+
+ rv = __cvmx_helper_initialize_fpa(
+ packet_pool, cvmx_fpa_get_packet_pool_block_size(),
+ packet_buffers, wqe_pool, cvmx_fpa_get_wqe_pool_block_size(),
+ work_queue_entries, outputbuffer_pool,
+ cvmx_fpa_get_pko_pool_block_size(), pko_buffers, timer_pool, 0,
+ tim_buffers, dfa_pool, 0, dfa_buffers);
+
+ return rv;
+}
--
2.35.1
^ permalink raw reply related [flat|nested] 52+ messages in thread* [PATCH 13/52] mips: octeon: Add cvmx-helper-igl.c
2022-03-30 10:06 [PATCH 00/52] mips: octeon: Add ethernet support Stefan Roese
` (11 preceding siblings ...)
2022-03-30 10:06 ` [PATCH 12/52] mips: octeon: Add cvmx-helper-fpa.c Stefan Roese
@ 2022-03-30 10:06 ` Stefan Roese
2022-03-30 10:06 ` [PATCH 14/52] mips: octeon: Add cvmx-helper-ipd.c Stefan Roese
` (36 subsequent siblings)
49 siblings, 0 replies; 52+ messages in thread
From: Stefan Roese @ 2022-03-30 10:06 UTC (permalink / raw)
To: u-boot; +Cc: awilliams, cchavva
From: Aaron Williams <awilliams@marvell.com>
Import cvmx-helper-igl.c from 2013 U-Boot. It will be used by the later
added drivers to support networking on the MIPS Octeon II / III
platforms.
Signed-off-by: Aaron Williams <awilliams@marvell.com>
Signed-off-by: Stefan Roese <sr@denx.de>
---
arch/mips/mach-octeon/cvmx-helper-ilk.c | 926 ++++++++++++++++++++++++
1 file changed, 926 insertions(+)
create mode 100644 arch/mips/mach-octeon/cvmx-helper-ilk.c
diff --git a/arch/mips/mach-octeon/cvmx-helper-ilk.c b/arch/mips/mach-octeon/cvmx-helper-ilk.c
new file mode 100644
index 000000000000..7b55101c505f
--- /dev/null
+++ b/arch/mips/mach-octeon/cvmx-helper-ilk.c
@@ -0,0 +1,926 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ * Functions for ILK initialization, configuration,
+ * and monitoring.
+ */
+
+#include <time.h>
+#include <log.h>
+#include <linux/delay.h>
+
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-csr.h>
+#include <mach/cvmx-bootmem.h>
+#include <mach/octeon-model.h>
+#include <mach/cvmx-fuse.h>
+#include <mach/octeon-feature.h>
+#include <mach/cvmx-qlm.h>
+#include <mach/octeon_qlm.h>
+#include <mach/cvmx-pcie.h>
+#include <mach/cvmx-coremask.h>
+
+#include <mach/cvmx-agl-defs.h>
+#include <mach/cvmx-bgxx-defs.h>
+#include <mach/cvmx-ciu-defs.h>
+#include <mach/cvmx-gmxx-defs.h>
+#include <mach/cvmx-ilk-defs.h>
+#include <mach/cvmx-ipd-defs.h>
+#include <mach/cvmx-pcsx-defs.h>
+#include <mach/cvmx-pcsxx-defs.h>
+#include <mach/cvmx-pki-defs.h>
+#include <mach/cvmx-pko-defs.h>
+#include <mach/cvmx-xcv-defs.h>
+
+#include <mach/cvmx-hwpko.h>
+#include <mach/cvmx-ilk.h>
+#include <mach/cvmx-pki.h>
+
+#include <mach/cvmx-helper.h>
+#include <mach/cvmx-helper-board.h>
+#include <mach/cvmx-helper-cfg.h>
+
+int __cvmx_helper_ilk_enumerate(int xiface)
+{
+ struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+
+ xi.interface -= CVMX_ILK_GBL_BASE();
+ return cvmx_ilk_chans[xi.node][xi.interface];
+}
+
+/**
+ * @INTERNAL
+ * Initialize all tx calendar entries to the xoff state.
+ * Initialize all rx calendar entries to the xon state. The rx calendar entries
+ * must be in the xon state to allow new pko pipe assignments. If a calendar
+ * entry is assigned a different pko pipe while in the xoff state, the old pko
+ * pipe will stay in the xoff state even when no longer used by ilk.
+ *
+ * @param intf Interface whose calendar are to be initialized.
+ */
+static void __cvmx_ilk_clear_cal_cn78xx(int intf)
+{
+ cvmx_ilk_txx_cal_entryx_t tx_entry;
+ cvmx_ilk_rxx_cal_entryx_t rx_entry;
+ int i;
+ int node = (intf >> 4) & 0xf;
+ int interface = (intf & 0xf);
+
+ /* Initialize all tx calendar entries to off */
+ tx_entry.u64 = 0;
+ tx_entry.s.ctl = XOFF;
+ for (i = 0; i < CVMX_ILK_MAX_CAL; i++) {
+ csr_wr_node(node, CVMX_ILK_TXX_CAL_ENTRYX(i, interface),
+ tx_entry.u64);
+ }
+
+ /* Initialize all rx calendar entries to on */
+ rx_entry.u64 = 0;
+ rx_entry.s.ctl = XOFF;
+ for (i = 0; i < CVMX_ILK_MAX_CAL; i++) {
+ csr_wr_node(node, CVMX_ILK_RXX_CAL_ENTRYX(i, interface),
+ rx_entry.u64);
+ }
+}
+
+/**
+ * @INTERNAL
+ * Initialize all tx calendar entries to the xoff state.
+ * Initialize all rx calendar entries to the xon state. The rx calendar entries
+ * must be in the xon state to allow new pko pipe assignments. If a calendar
+ * entry is assigned a different pko pipe while in the xoff state, the old pko
+ * pipe will stay in the xoff state even when no longer used by ilk.
+ *
+ * @param interface whose calendar are to be initialized.
+ */
+static void __cvmx_ilk_clear_cal_cn68xx(int interface)
+{
+ cvmx_ilk_txx_idx_cal_t tx_idx;
+ cvmx_ilk_txx_mem_cal0_t tx_cal0;
+ cvmx_ilk_txx_mem_cal1_t tx_cal1;
+ cvmx_ilk_rxx_idx_cal_t rx_idx;
+ cvmx_ilk_rxx_mem_cal0_t rx_cal0;
+ cvmx_ilk_rxx_mem_cal1_t rx_cal1;
+ int i;
+
+ /*
+ * First we initialize the tx calendar starting from entry 0,
+ * incrementing the entry with every write.
+ */
+ tx_idx.u64 = 0;
+ tx_idx.s.inc = 1;
+ csr_wr(CVMX_ILK_TXX_IDX_CAL(interface), tx_idx.u64);
+
+ /* Set state to xoff for all entries */
+ tx_cal0.u64 = 0;
+ tx_cal0.s.entry_ctl0 = XOFF;
+ tx_cal0.s.entry_ctl1 = XOFF;
+ tx_cal0.s.entry_ctl2 = XOFF;
+ tx_cal0.s.entry_ctl3 = XOFF;
+
+ tx_cal1.u64 = 0;
+ tx_cal1.s.entry_ctl4 = XOFF;
+ tx_cal1.s.entry_ctl5 = XOFF;
+ tx_cal1.s.entry_ctl6 = XOFF;
+ tx_cal1.s.entry_ctl7 = XOFF;
+
+ /* Write all 288 entries */
+ for (i = 0; i < CVMX_ILK_MAX_CAL_IDX; i++) {
+ csr_wr(CVMX_ILK_TXX_MEM_CAL0(interface), tx_cal0.u64);
+ csr_wr(CVMX_ILK_TXX_MEM_CAL1(interface), tx_cal1.u64);
+ }
+
+ /*
+ * Next we initialize the rx calendar starting from entry 0,
+ * incrementing the entry with every write.
+ */
+ rx_idx.u64 = 0;
+ rx_idx.s.inc = 1;
+ csr_wr(CVMX_ILK_RXX_IDX_CAL(interface), rx_idx.u64);
+
+ /* Set state to xon for all entries */
+ rx_cal0.u64 = 0;
+ rx_cal0.s.entry_ctl0 = XON;
+ rx_cal0.s.entry_ctl1 = XON;
+ rx_cal0.s.entry_ctl2 = XON;
+ rx_cal0.s.entry_ctl3 = XON;
+
+ rx_cal1.u64 = 0;
+ rx_cal1.s.entry_ctl4 = XON;
+ rx_cal1.s.entry_ctl5 = XON;
+ rx_cal1.s.entry_ctl6 = XON;
+ rx_cal1.s.entry_ctl7 = XON;
+
+ /* Write all 288 entries */
+ for (i = 0; i < CVMX_ILK_MAX_CAL_IDX; i++) {
+ csr_wr(CVMX_ILK_RXX_MEM_CAL0(interface), rx_cal0.u64);
+ csr_wr(CVMX_ILK_RXX_MEM_CAL1(interface), rx_cal1.u64);
+ }
+}
+
+/**
+ * @INTERNAL
+ * Initialize all calendar entries.
+ *
+ * @param interface whose calendar is to be initialized.
+ */
+void __cvmx_ilk_clear_cal(int interface)
+{
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ __cvmx_ilk_clear_cal_cn68xx(interface);
+ else if (OCTEON_IS_MODEL(OCTEON_CN78XX))
+ __cvmx_ilk_clear_cal_cn78xx(interface);
+}
+
+void __cvmx_ilk_write_tx_cal_entry_cn68xx(int interface, int channel,
+ unsigned char bpid)
+{
+ cvmx_ilk_txx_idx_cal_t tx_idx;
+ cvmx_ilk_txx_mem_cal0_t tx_cal0;
+ cvmx_ilk_txx_mem_cal1_t tx_cal1;
+ int entry;
+ int window;
+ int window_entry;
+
+ /*
+ * The calendar has 288 entries. Each calendar entry represents a
+ * channel's flow control state or the link flow control state.
+ * Starting with the first entry, every sixteenth entry is used for the
+ * link flow control state. The other 15 entries are used for the
+ * channels flow control state:
+ * entry 0 ----> link flow control state
+ * entry 1 ----> channel 0 flow control state
+ * entry 2 ----> channel 1 flow control state
+ * ...
+ * entry 15 ----> channel 14 flow control state
+ * entry 16 ----> link flow control state
+ * entry 17 ----> channel 15 flow control state
+ *
+ * Also, the calendar is accessed via windows into it. Each window maps
+ * to 8 entries.
+ */
+ entry = 1 + channel + (channel / 15);
+ window = entry / 8;
+ window_entry = entry % 8;
+
+ /* Indicate the window we need to access */
+ tx_idx.u64 = 0;
+ tx_idx.s.index = window;
+ csr_wr(CVMX_ILK_TXX_IDX_CAL(interface), tx_idx.u64);
+
+ /* Get the window's current value */
+ tx_cal0.u64 = csr_rd(CVMX_ILK_TXX_MEM_CAL0(interface));
+ tx_cal1.u64 = csr_rd(CVMX_ILK_TXX_MEM_CAL1(interface));
+
+ /* Force every sixteenth entry as link flow control state */
+ if ((window & 1) == 0)
+ tx_cal0.s.entry_ctl0 = LINK;
+
+ /* Update the entry */
+ switch (window_entry) {
+ case 0:
+ tx_cal0.s.entry_ctl0 = 0;
+ tx_cal0.s.bpid0 = bpid;
+ break;
+ case 1:
+ tx_cal0.s.entry_ctl1 = 0;
+ tx_cal0.s.bpid1 = bpid;
+ break;
+ case 2:
+ tx_cal0.s.entry_ctl2 = 0;
+ tx_cal0.s.bpid2 = bpid;
+ break;
+ case 3:
+ tx_cal0.s.entry_ctl3 = 0;
+ tx_cal0.s.bpid3 = bpid;
+ break;
+ case 4:
+ tx_cal1.s.entry_ctl4 = 0;
+ tx_cal1.s.bpid4 = bpid;
+ break;
+ case 5:
+ tx_cal1.s.entry_ctl5 = 0;
+ tx_cal1.s.bpid5 = bpid;
+ break;
+ case 6:
+ tx_cal1.s.entry_ctl6 = 0;
+ tx_cal1.s.bpid6 = bpid;
+ break;
+ case 7:
+ tx_cal1.s.entry_ctl7 = 0;
+ tx_cal1.s.bpid7 = bpid;
+ break;
+ }
+
+ /* Write the window new value */
+ csr_wr(CVMX_ILK_TXX_MEM_CAL0(interface), tx_cal0.u64);
+ csr_wr(CVMX_ILK_TXX_MEM_CAL1(interface), tx_cal1.u64);
+}
+
+void __cvmx_ilk_write_tx_cal_entry_cn78xx(int intf, int channel,
+ unsigned char bpid)
+{
+ cvmx_ilk_txx_cal_entryx_t tx_cal;
+ int calender_16_block = channel / 15;
+ int calender_16_index = channel % 15 + 1;
+ int index = calender_16_block * 16 + calender_16_index;
+ int node = (intf >> 4) & 0xf;
+ int interface = intf & 0xf;
+
+ /* Program the link status on first channel */
+ if (calender_16_index == 1) {
+ tx_cal.u64 = 0;
+ tx_cal.s.ctl = 1;
+ csr_wr_node(node, CVMX_ILK_TXX_CAL_ENTRYX(index - 1, interface),
+ tx_cal.u64);
+ }
+ tx_cal.u64 = 0;
+ tx_cal.s.ctl = 0;
+ tx_cal.s.channel = channel;
+ csr_wr_node(node, CVMX_ILK_TXX_CAL_ENTRYX(index, interface),
+ tx_cal.u64);
+}
+
+/**
+ * @INTERNAL
+ * Setup the channel's tx calendar entry.
+ *
+ * @param interface channel belongs to
+ * @param channel whose calendar entry is to be updated
+ * @param bpid assigned to the channel
+ */
+void __cvmx_ilk_write_tx_cal_entry(int interface, int channel,
+ unsigned char bpid)
+{
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ __cvmx_ilk_write_tx_cal_entry_cn68xx(interface, channel, bpid);
+ else
+ __cvmx_ilk_write_tx_cal_entry_cn78xx(interface, channel, bpid);
+}
+
+void __cvmx_ilk_write_rx_cal_entry_cn78xx(int intf, int channel,
+ unsigned char bpid)
+{
+ cvmx_ilk_rxx_cal_entryx_t rx_cal;
+ int calender_16_block = channel / 15;
+ int calender_16_index = channel % 15 + 1;
+ int index = calender_16_block * 16 + calender_16_index;
+ int node = (intf >> 4) & 0xf;
+ int interface = intf & 0xf;
+
+ /* Program the link status on first channel */
+ if (calender_16_index == 1) {
+ rx_cal.u64 = 0;
+ rx_cal.s.ctl = 1;
+ csr_wr_node(node, CVMX_ILK_RXX_CAL_ENTRYX(index - 1, interface),
+ rx_cal.u64);
+ }
+ rx_cal.u64 = 0;
+ rx_cal.s.ctl = 0;
+ rx_cal.s.channel = channel;
+ csr_wr_node(node, CVMX_ILK_RXX_CAL_ENTRYX(index, interface),
+ rx_cal.u64);
+}
+
+void __cvmx_ilk_write_rx_cal_entry_cn68xx(int interface, int channel,
+ unsigned char pipe)
+{
+ cvmx_ilk_rxx_idx_cal_t rx_idx;
+ cvmx_ilk_rxx_mem_cal0_t rx_cal0;
+ cvmx_ilk_rxx_mem_cal1_t rx_cal1;
+ int entry;
+ int window;
+ int window_entry;
+
+ /*
+ * The calendar has 288 entries. Each calendar entry represents a
+ * channel's flow control state or the link flow control state.
+ * Starting with the first entry, every sixteenth entry is used for the
+ * link flow control state. The other 15 entries are used for the
+ * channels flow control state:
+ * entry 0 ----> link flow control state
+ * entry 1 ----> channel 0 flow control state
+ * entry 2 ----> channel 1 flow control state
+ * ...
+ * entry 15 ----> channel 14 flow control state
+ * entry 16 ----> link flow control state
+ * entry 17 ----> channel 15 flow control state
+ *
+ * Also, the calendar is accessed via windows into it. Each window maps
+ * to 8 entries.
+ */
+ entry = 1 + channel + (channel / 15);
+ window = entry / 8;
+ window_entry = entry % 8;
+
+ /* Indicate the window we need to access */
+ rx_idx.u64 = 0;
+ rx_idx.s.index = window;
+ csr_wr(CVMX_ILK_RXX_IDX_CAL(interface), rx_idx.u64);
+
+ /* Get the window's current value */
+ rx_cal0.u64 = csr_rd(CVMX_ILK_RXX_MEM_CAL0(interface));
+ rx_cal1.u64 = csr_rd(CVMX_ILK_RXX_MEM_CAL1(interface));
+
+ /* Force every sixteenth entry as link flow control state */
+ if ((window & 1) == 0)
+ rx_cal0.s.entry_ctl0 = LINK;
+
+ /* Update the entry */
+ switch (window_entry) {
+ case 0:
+ rx_cal0.s.entry_ctl0 = 0;
+ rx_cal0.s.port_pipe0 = pipe;
+ break;
+ case 1:
+ rx_cal0.s.entry_ctl1 = 0;
+ rx_cal0.s.port_pipe1 = pipe;
+ break;
+ case 2:
+ rx_cal0.s.entry_ctl2 = 0;
+ rx_cal0.s.port_pipe2 = pipe;
+ break;
+ case 3:
+ rx_cal0.s.entry_ctl3 = 0;
+ rx_cal0.s.port_pipe3 = pipe;
+ break;
+ case 4:
+ rx_cal1.s.entry_ctl4 = 0;
+ rx_cal1.s.port_pipe4 = pipe;
+ break;
+ case 5:
+ rx_cal1.s.entry_ctl5 = 0;
+ rx_cal1.s.port_pipe5 = pipe;
+ break;
+ case 6:
+ rx_cal1.s.entry_ctl6 = 0;
+ rx_cal1.s.port_pipe6 = pipe;
+ break;
+ case 7:
+ rx_cal1.s.entry_ctl7 = 0;
+ rx_cal1.s.port_pipe7 = pipe;
+ break;
+ }
+
+ /* Write the window new value */
+ csr_wr(CVMX_ILK_RXX_MEM_CAL0(interface), rx_cal0.u64);
+ csr_wr(CVMX_ILK_RXX_MEM_CAL1(interface), rx_cal1.u64);
+}
+
+/**
+ * @INTERNAL
+ * Setup the channel's rx calendar entry.
+ *
+ * @param interface channel belongs to
+ * @param channel whose calendar entry is to be updated
+ * @param pipe PKO assigned to the channel
+ */
+void __cvmx_ilk_write_rx_cal_entry(int interface, int channel,
+ unsigned char pipe)
+{
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ __cvmx_ilk_write_rx_cal_entry_cn68xx(interface, channel, pipe);
+ else
+ __cvmx_ilk_write_rx_cal_entry_cn78xx(interface, channel, pipe);
+}
+
+/**
+ * @INTERNAL
+ * Probe a ILK interface and determine the number of ports
+ * connected to it. The ILK interface should still be down
+ * after this call.
+ *
+ * @param xiface Interface to probe
+ *
+ * @return Number of ports on the interface. Zero to disable.
+ */
+int __cvmx_helper_ilk_probe(int xiface)
+{
+ int res = 0;
+ int interface;
+ struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+
+ if (!octeon_has_feature(OCTEON_FEATURE_ILK))
+ return res;
+
+ interface = xi.interface - CVMX_ILK_GBL_BASE();
+ if (interface >= CVMX_NUM_ILK_INTF)
+ return 0;
+
+ /* the configuration should be done only once */
+ if (cvmx_ilk_get_intf_ena(xiface))
+ return cvmx_ilk_chans[xi.node][interface];
+
+ /* configure lanes and enable the link */
+ res = cvmx_ilk_start_interface(((xi.node << 4) | interface),
+ cvmx_ilk_lane_mask[xi.node][interface]);
+ if (res < 0)
+ return 0;
+
+ res = __cvmx_helper_ilk_enumerate(xiface);
+
+ return res;
+}
+
+static int __cvmx_helper_ilk_init_port_cn68xx(int xiface)
+{
+ int i, j, res = -1;
+ static int pipe_base = 0, pknd_base;
+ static cvmx_ilk_pipe_chan_t *pch = NULL, *tmp;
+ static cvmx_ilk_chan_pknd_t *chpknd = NULL, *tmp1;
+ static cvmx_ilk_cal_entry_t *calent = NULL, *tmp2;
+ int enable_rx_cal = 1;
+ int interface;
+ struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+ int intf;
+ int num_chans;
+
+ interface = xi.interface - CVMX_ILK_GBL_BASE();
+ intf = (xi.node << 4) | interface;
+ if (interface >= CVMX_NUM_ILK_INTF)
+ return 0;
+
+ num_chans = cvmx_ilk_chans[0][interface];
+
+ /* set up channel to pkind mapping */
+ if (pknd_base == 0)
+ pknd_base = cvmx_helper_get_pknd(xiface, 0);
+
+ /* set up the group of pipes available to ilk */
+ if (pipe_base == 0)
+ pipe_base =
+ __cvmx_pko_get_pipe(interface + CVMX_ILK_GBL_BASE(), 0);
+
+ if (pipe_base == -1) {
+ pipe_base = 0;
+ return 0;
+ }
+
+ res = cvmx_ilk_set_pipe(xiface, pipe_base,
+ cvmx_ilk_chans[0][interface]);
+ if (res < 0)
+ return 0;
+
+ /* set up pipe to channel mapping */
+ i = pipe_base;
+ if (!pch) {
+ pch = (cvmx_ilk_pipe_chan_t *)cvmx_bootmem_alloc(
+ num_chans * sizeof(cvmx_ilk_pipe_chan_t),
+ sizeof(cvmx_ilk_pipe_chan_t));
+ if (!pch)
+ return 0;
+ }
+
+ memset(pch, 0, num_chans * sizeof(cvmx_ilk_pipe_chan_t));
+ tmp = pch;
+ for (j = 0; j < num_chans; j++) {
+ tmp->pipe = i++;
+ tmp->chan = j;
+ tmp++;
+ }
+ res = cvmx_ilk_tx_set_channel(interface, pch,
+ cvmx_ilk_chans[0][interface]);
+ if (res < 0) {
+ res = 0;
+ goto err_free_pch;
+ }
+ pipe_base += cvmx_ilk_chans[0][interface];
+ i = pknd_base;
+ if (!chpknd) {
+ chpknd = (cvmx_ilk_chan_pknd_t *)cvmx_bootmem_alloc(
+ CVMX_ILK_MAX_PKNDS * sizeof(cvmx_ilk_chan_pknd_t),
+ sizeof(cvmx_ilk_chan_pknd_t));
+ if (!chpknd) {
+ pipe_base -= cvmx_ilk_chans[xi.node][interface];
+ res = 0;
+ goto err_free_pch;
+ }
+ }
+
+ memset(chpknd, 0, CVMX_ILK_MAX_PKNDS * sizeof(cvmx_ilk_chan_pknd_t));
+ tmp1 = chpknd;
+ for (j = 0; j < cvmx_ilk_chans[xi.node][interface]; j++) {
+ tmp1->chan = j;
+ tmp1->pknd = i++;
+ tmp1++;
+ }
+
+ res = cvmx_ilk_rx_set_pknd(xiface, chpknd,
+ cvmx_ilk_chans[xi.node][interface]);
+ if (res < 0) {
+ pipe_base -= cvmx_ilk_chans[xi.node][interface];
+ res = 0;
+ goto err_free_chpknd;
+ }
+ pknd_base += cvmx_ilk_chans[xi.node][interface];
+
+ /* Set up tx calendar */
+ if (!calent) {
+ calent = (cvmx_ilk_cal_entry_t *)cvmx_bootmem_alloc(
+ CVMX_ILK_MAX_PIPES * sizeof(cvmx_ilk_cal_entry_t),
+ sizeof(cvmx_ilk_cal_entry_t));
+ if (!calent) {
+ pipe_base -= cvmx_ilk_chans[xi.node][interface];
+ pknd_base -= cvmx_ilk_chans[xi.node][interface];
+ res = 0;
+ goto err_free_chpknd;
+ }
+ }
+
+ memset(calent, 0, CVMX_ILK_MAX_PIPES * sizeof(cvmx_ilk_cal_entry_t));
+ tmp1 = chpknd;
+ tmp2 = calent;
+ for (j = 0; j < cvmx_ilk_chans[xi.node][interface]; j++) {
+ tmp2->pipe_bpid = tmp1->pknd;
+ tmp2->ent_ctrl = PIPE_BPID;
+ tmp1++;
+ tmp2++;
+ }
+ res = cvmx_ilk_cal_setup_tx(intf, cvmx_ilk_chans[xi.node][interface],
+ calent, 1);
+ if (res < 0) {
+ pipe_base -= cvmx_ilk_chans[xi.node][interface];
+ pknd_base -= cvmx_ilk_chans[xi.node][interface];
+ res = 0;
+ goto err_free_calent;
+ }
+
+ /* set up rx calendar. allocated memory can be reused.
+ * this is because max pkind is always less than max pipe
+ */
+ memset(calent, 0, CVMX_ILK_MAX_PIPES * sizeof(cvmx_ilk_cal_entry_t));
+ tmp = pch;
+ tmp2 = calent;
+ for (j = 0; j < cvmx_ilk_chans[0][interface]; j++) {
+ tmp2->pipe_bpid = tmp->pipe;
+ tmp2->ent_ctrl = PIPE_BPID;
+ tmp++;
+ tmp2++;
+ }
+ if (cvmx_ilk_use_la_mode(interface, 0))
+ enable_rx_cal = cvmx_ilk_la_mode_enable_rx_calendar(interface);
+ else
+ enable_rx_cal = 1;
+
+ res = cvmx_ilk_cal_setup_rx(intf, cvmx_ilk_chans[xi.node][interface],
+ calent, CVMX_ILK_RX_FIFO_WM, enable_rx_cal);
+ if (res < 0) {
+ pipe_base -= cvmx_ilk_chans[xi.node][interface];
+ pknd_base -= cvmx_ilk_chans[xi.node][interface];
+ res = 0;
+ goto err_free_calent;
+ }
+ goto out;
+
+err_free_calent:
+ /* no free() for cvmx_bootmem_alloc() */
+
+err_free_chpknd:
+ /* no free() for cvmx_bootmem_alloc() */
+
+err_free_pch:
+ /* no free() for cvmx_bootmem_alloc() */
+out:
+ return res;
+}
+
+static int __cvmx_helper_ilk_init_port_cn78xx(int xiface)
+{
+ struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+ int interface;
+ int intf;
+
+ interface = xi.interface - CVMX_ILK_GBL_BASE();
+ intf = (xi.node << 4) | interface;
+ if (interface >= CVMX_NUM_ILK_INTF)
+ return 0;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
+ struct cvmx_pki_style_config style_cfg;
+ int num_channels = cvmx_ilk_chans[xi.node][interface];
+ int index, i;
+
+ for (i = 0; i < num_channels; i++) {
+ int pknd;
+
+ index = (i % 8);
+
+ /* Set jabber to allow max sized packets */
+ if (i == 0)
+ csr_wr_node(xi.node,
+ CVMX_ILK_RXX_JABBER(interface),
+ 0xfff8);
+
+ /* Setup PKND */
+ pknd = cvmx_helper_get_pknd(xiface, index);
+ csr_wr_node(xi.node, CVMX_ILK_RXX_CHAX(i, interface),
+ pknd);
+ cvmx_pki_read_style_config(
+ 0, pknd, CVMX_PKI_CLUSTER_ALL, &style_cfg);
+ style_cfg.parm_cfg.qpg_port_sh = 0;
+ /* 256 channels */
+ style_cfg.parm_cfg.qpg_port_msb = 8;
+ cvmx_pki_write_style_config(
+ 0, pknd, CVMX_PKI_CLUSTER_ALL, &style_cfg);
+ }
+
+ cvmx_ilk_cal_setup_tx(intf, num_channels, NULL, 1);
+ cvmx_ilk_cal_setup_rx(intf, num_channels, NULL,
+ CVMX_ILK_RX_FIFO_WM, 1);
+ }
+ return 0;
+}
+
+static int __cvmx_helper_ilk_init_port(int xiface)
+{
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ return __cvmx_helper_ilk_init_port_cn68xx(xiface);
+ else
+ return __cvmx_helper_ilk_init_port_cn78xx(xiface);
+}
+
+/**
+ * @INTERNAL
+ * Bringup and enable ILK interface. After this call packet
+ * I/O should be fully functional. This is called with IPD
+ * enabled but PKO disabled.
+ *
+ * @param xiface Interface to bring up
+ *
+ * @return Zero on success, negative on failure
+ */
+int __cvmx_helper_ilk_enable(int xiface)
+{
+ if (__cvmx_helper_ilk_init_port(xiface) < 0)
+ return -1;
+
+ return cvmx_ilk_enable(xiface);
+}
+
+/**
+ * @INTERNAL
+ * Return the link state of an IPD/PKO port as returned by ILK link status.
+ *
+ * @param ipd_port IPD/PKO port to query
+ *
+ * @return Link state
+ */
+cvmx_helper_link_info_t __cvmx_helper_ilk_link_get(int ipd_port)
+{
+ cvmx_helper_link_info_t result;
+ int xiface = cvmx_helper_get_interface_num(ipd_port);
+ struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+ int interface;
+ int retry_count = 0;
+ cvmx_ilk_rxx_cfg1_t ilk_rxx_cfg1;
+ cvmx_ilk_rxx_int_t ilk_rxx_int;
+ int lane_mask = 0;
+ int i;
+ int node = xi.node;
+
+ result.u64 = 0;
+ interface = xi.interface - CVMX_ILK_GBL_BASE();
+
+retry:
+ retry_count++;
+ if (retry_count > 200)
+ goto fail;
+
+ /* Read RX config and status bits */
+ ilk_rxx_cfg1.u64 = csr_rd_node(node, CVMX_ILK_RXX_CFG1(interface));
+ ilk_rxx_int.u64 = csr_rd_node(node, CVMX_ILK_RXX_INT(interface));
+
+ if (ilk_rxx_cfg1.s.rx_bdry_lock_ena == 0) {
+ /* (GSER-21957) GSER RX Equalization may make >= 5gbaud non-KR
+ * channel better
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
+ int qlm, lane_mask;
+
+ for (qlm = 4; qlm < 8; qlm++) {
+ lane_mask = 1 << (qlm - 4) * 4;
+ if (lane_mask &
+ cvmx_ilk_lane_mask[node][interface]) {
+ if (__cvmx_qlm_rx_equalization(
+ node, qlm, -1))
+ goto retry;
+ }
+ }
+ }
+
+ /* Clear the boundary lock status bit */
+ ilk_rxx_int.u64 = 0;
+ ilk_rxx_int.s.word_sync_done = 1;
+ csr_wr_node(node, CVMX_ILK_RXX_INT(interface), ilk_rxx_int.u64);
+
+ /* We need to start looking for word boundary lock */
+ ilk_rxx_cfg1.s.rx_bdry_lock_ena =
+ cvmx_ilk_lane_mask[node][interface];
+ ilk_rxx_cfg1.s.rx_align_ena = 0;
+ csr_wr_node(node, CVMX_ILK_RXX_CFG1(interface),
+ ilk_rxx_cfg1.u64);
+ //debug("ILK%d: Looking for word boundary lock\n", interface);
+ udelay(50);
+ goto retry;
+ }
+
+ if (ilk_rxx_cfg1.s.rx_align_ena == 0) {
+ if (ilk_rxx_int.s.word_sync_done) {
+ /* Clear the lane align status bits */
+ ilk_rxx_int.u64 = 0;
+ ilk_rxx_int.s.lane_align_fail = 1;
+ ilk_rxx_int.s.lane_align_done = 1;
+ csr_wr_node(node, CVMX_ILK_RXX_INT(interface),
+ ilk_rxx_int.u64);
+
+ ilk_rxx_cfg1.s.rx_align_ena = 1;
+ csr_wr_node(node, CVMX_ILK_RXX_CFG1(interface),
+ ilk_rxx_cfg1.u64);
+ //printf("ILK%d: Looking for lane alignment\n", interface);
+ }
+ udelay(50);
+ goto retry;
+ }
+
+ if (ilk_rxx_int.s.lane_align_fail) {
+ ilk_rxx_cfg1.s.rx_bdry_lock_ena = 0;
+ ilk_rxx_cfg1.s.rx_align_ena = 0;
+ csr_wr_node(node, CVMX_ILK_RXX_CFG1(interface),
+ ilk_rxx_cfg1.u64);
+ //debug("ILK%d: Lane alignment failed\n", interface);
+ goto fail;
+ }
+
+ lane_mask = ilk_rxx_cfg1.s.rx_bdry_lock_ena;
+
+ if (ilk_rxx_cfg1.s.pkt_ena == 0 && ilk_rxx_int.s.lane_align_done) {
+ cvmx_ilk_txx_cfg1_t ilk_txx_cfg1;
+
+ ilk_txx_cfg1.u64 =
+ csr_rd_node(node, CVMX_ILK_TXX_CFG1(interface));
+ ilk_rxx_cfg1.u64 =
+ csr_rd_node(node, CVMX_ILK_RXX_CFG1(interface));
+ ilk_rxx_cfg1.s.pkt_ena = ilk_txx_cfg1.s.pkt_ena;
+ csr_wr_node(node, CVMX_ILK_RXX_CFG1(interface),
+ ilk_rxx_cfg1.u64);
+
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+ /*
+ * Enable rxf_ctl_perr, rxf_lnk0_perr, rxf_lnk1_perr,
+ * pop_empty, push_full.
+ */
+ csr_wr(CVMX_ILK_GBL_INT_EN, 0x1f);
+ /* Enable bad_pipe, bad_seq, txf_err */
+ csr_wr(CVMX_ILK_TXX_INT_EN(interface), 0x7);
+
+ /*
+ * Enable crc24_err, lane_bad_word,
+ * pkt_drop_{rid,rxf,sop}
+ */
+ csr_wr(CVMX_ILK_RXX_INT_EN(interface), 0x1e2);
+ }
+ /* Need to enable ILK interrupts for 78xx */
+
+ for (i = 0; i < CVMX_ILK_MAX_LANES(); i++) {
+ if ((1 << i) & lane_mask) {
+ /* clear pending interrupts, before enabling. */
+ csr_wr_node(node, CVMX_ILK_RX_LNEX_INT(i),
+ 0x1ff);
+ /* Enable bad_64b67b, bdry_sync_loss, crc32_err,
+ * dskew_fifo_ovfl, scrm_sync_loss,
+ * serdes_lock_loss, stat_msg, ukwn_cntl_word
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ csr_wr(CVMX_ILK_RX_LNEX_INT_EN(i),
+ 0x1ff);
+ }
+ }
+
+ //debug("ILK%d: Lane alignment complete\n", interface);
+ }
+
+ /* Enable error interrupts, now link is up */
+ cvmx_error_enable_group(CVMX_ERROR_GROUP_ILK,
+ node | (interface << 2) | (lane_mask << 4));
+
+ result.s.link_up = 1;
+ result.s.full_duplex = 1;
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
+ int qlm = cvmx_qlm_lmac(xiface, 0);
+
+ result.s.speed = cvmx_qlm_get_gbaud_mhz(qlm) * 64 / 67;
+ } else {
+ result.s.speed =
+ cvmx_qlm_get_gbaud_mhz(1 + interface) * 64 / 67;
+ }
+ result.s.speed *= cvmx_pop(lane_mask);
+
+ return result;
+
+fail:
+ if (ilk_rxx_cfg1.s.pkt_ena) {
+ /* Disable the interface */
+ ilk_rxx_cfg1.s.pkt_ena = 0;
+ csr_wr_node(node, CVMX_ILK_RXX_CFG1(interface),
+ ilk_rxx_cfg1.u64);
+
+ /* Disable error interrupts */
+ for (i = 0; i < CVMX_ILK_MAX_LANES(); i++) {
+ /* Disable bad_64b67b, bdry_sync_loss, crc32_err,
+ * dskew_fifo_ovfl, scrm_sync_loss, serdes_lock_loss,
+ * stat_msg, ukwn_cntl_word
+ */
+ if ((1 << i) & lane_mask) {
+ csr_wr_node(node, CVMX_ILK_RX_LNEX_INT(i),
+ 0x1ff);
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ csr_wr(CVMX_ILK_RX_LNEX_INT_EN(i),
+ ~0x1ff);
+ }
+ }
+ /* Disable error interrupts */
+ cvmx_error_enable_group(CVMX_ERROR_GROUP_ILK, 0);
+ }
+
+ return result;
+}
+
+/**
+ * @INTERNAL
+ * Set the link state of an IPD/PKO port.
+ *
+ * @param ipd_port IPD/PKO port to configure
+ * @param link_info The new link state
+ *
+ * @return Zero on success, negative on failure
+ */
+int __cvmx_helper_ilk_link_set(int ipd_port, cvmx_helper_link_info_t link_info)
+{
+ /* Do nothing */
+ return 0;
+}
+
+/**
+ * Display ilk interface statistics.
+ *
+ */
+void __cvmx_helper_ilk_show_stats(void)
+{
+ int i, j;
+ int chan_tmp[CVMX_ILK_MAX_CHANS];
+ cvmx_ilk_stats_ctrl_t ilk_stats_ctrl;
+
+ for (i = 0; i < CVMX_NUM_ILK_INTF; i++) {
+ int num_chans = cvmx_ilk_chans[0][i];
+
+ memset(chan_tmp, 0, num_chans * sizeof(int));
+ for (j = 0; j < num_chans; j++)
+ chan_tmp[j] = j;
+
+ ilk_stats_ctrl.chan_list = chan_tmp;
+ ilk_stats_ctrl.num_chans = num_chans;
+ ilk_stats_ctrl.clr_on_rd = 0;
+ cvmx_ilk_show_stats(i, &ilk_stats_ctrl);
+ }
+}
--
2.35.1
^ permalink raw reply related [flat|nested] 52+ messages in thread* [PATCH 14/52] mips: octeon: Add cvmx-helper-ipd.c
2022-03-30 10:06 [PATCH 00/52] mips: octeon: Add ethernet support Stefan Roese
` (12 preceding siblings ...)
2022-03-30 10:06 ` [PATCH 13/52] mips: octeon: Add cvmx-helper-igl.c Stefan Roese
@ 2022-03-30 10:06 ` Stefan Roese
2022-03-30 10:06 ` [PATCH 15/52] mips: octeon: Add cvmx-helper-loop.c Stefan Roese
` (35 subsequent siblings)
49 siblings, 0 replies; 52+ messages in thread
From: Stefan Roese @ 2022-03-30 10:06 UTC (permalink / raw)
To: u-boot; +Cc: awilliams, cchavva
From: Aaron Williams <awilliams@marvell.com>
Import cvmx-helper-ipd.c from 2013 U-Boot. It will be used by the later
added drivers to support networking on the MIPS Octeon II / III
platforms.
Signed-off-by: Aaron Williams <awilliams@marvell.com>
Signed-off-by: Stefan Roese <sr@denx.de>
---
arch/mips/mach-octeon/cvmx-helper-ipd.c | 313 ++++++++++++++++++++++++
1 file changed, 313 insertions(+)
create mode 100644 arch/mips/mach-octeon/cvmx-helper-ipd.c
diff --git a/arch/mips/mach-octeon/cvmx-helper-ipd.c b/arch/mips/mach-octeon/cvmx-helper-ipd.c
new file mode 100644
index 000000000000..1b1b6c84232b
--- /dev/null
+++ b/arch/mips/mach-octeon/cvmx-helper-ipd.c
@@ -0,0 +1,313 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ * IPD helper functions.
+ */
+
+#include <errno.h>
+#include <log.h>
+#include <time.h>
+#include <linux/delay.h>
+
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-csr.h>
+#include <mach/cvmx-bootmem.h>
+#include <mach/octeon-model.h>
+#include <mach/cvmx-fuse.h>
+#include <mach/octeon-feature.h>
+#include <mach/cvmx-qlm.h>
+#include <mach/octeon_qlm.h>
+#include <mach/cvmx-pcie.h>
+#include <mach/cvmx-coremask.h>
+#include <mach/cvmx-range.h>
+#include <mach/cvmx-global-resources.h>
+
+#include <mach/cvmx-agl-defs.h>
+#include <mach/cvmx-bgxx-defs.h>
+#include <mach/cvmx-ciu-defs.h>
+#include <mach/cvmx-gmxx-defs.h>
+#include <mach/cvmx-gserx-defs.h>
+#include <mach/cvmx-ilk-defs.h>
+#include <mach/cvmx-ipd-defs.h>
+#include <mach/cvmx-pcsx-defs.h>
+#include <mach/cvmx-pcsxx-defs.h>
+#include <mach/cvmx-pki-defs.h>
+#include <mach/cvmx-pko-defs.h>
+#include <mach/cvmx-xcv-defs.h>
+
+#include <mach/cvmx-hwpko.h>
+#include <mach/cvmx-ilk.h>
+#include <mach/cvmx-ipd.h>
+#include <mach/cvmx-pki.h>
+#include <mach/cvmx-pko3.h>
+#include <mach/cvmx-pko3-queue.h>
+#include <mach/cvmx-pko3-resources.h>
+#include <mach/cvmx-pip.h>
+
+#include <mach/cvmx-helper.h>
+#include <mach/cvmx-helper-board.h>
+#include <mach/cvmx-helper-cfg.h>
+
+#include <mach/cvmx-helper-bgx.h>
+#include <mach/cvmx-helper-cfg.h>
+#include <mach/cvmx-helper-util.h>
+#include <mach/cvmx-helper-pki.h>
+
+/** It allocate pools for packet and wqe pools
+ * and sets up the FPA hardware
+ */
+int __cvmx_helper_ipd_setup_fpa_pools(void)
+{
+ cvmx_fpa_global_initialize();
+ if (cvmx_ipd_cfg.packet_pool.buffer_count == 0)
+ return 0;
+ __cvmx_helper_initialize_fpa_pool(cvmx_ipd_cfg.packet_pool.pool_num,
+ cvmx_ipd_cfg.packet_pool.buffer_size,
+ cvmx_ipd_cfg.packet_pool.buffer_count,
+ "Packet Buffers");
+ if (cvmx_ipd_cfg.wqe_pool.buffer_count == 0)
+ return 0;
+ __cvmx_helper_initialize_fpa_pool(cvmx_ipd_cfg.wqe_pool.pool_num,
+ cvmx_ipd_cfg.wqe_pool.buffer_size,
+ cvmx_ipd_cfg.wqe_pool.buffer_count,
+ "WQE Buffers");
+ return 0;
+}
+
+/**
+ * @INTERNAL
+ * Setup global setting for IPD/PIP not related to a specific
+ * interface or port. This must be called before IPD is enabled.
+ *
+ * @return Zero on success, negative on failure.
+ */
+int __cvmx_helper_ipd_global_setup(void)
+{
+ /* Setup the packet and wqe pools*/
+ __cvmx_helper_ipd_setup_fpa_pools();
+ /* Setup the global packet input options */
+ cvmx_ipd_config(cvmx_ipd_cfg.packet_pool.buffer_size / 8,
+ cvmx_ipd_cfg.first_mbuf_skip / 8,
+ cvmx_ipd_cfg.not_first_mbuf_skip / 8,
+ /* The +8 is to account for the next ptr */
+ (cvmx_ipd_cfg.first_mbuf_skip + 8) / 128,
+ /* The +8 is to account for the next ptr */
+ (cvmx_ipd_cfg.not_first_mbuf_skip + 8) / 128,
+ cvmx_ipd_cfg.wqe_pool.pool_num,
+ (cvmx_ipd_mode_t)(cvmx_ipd_cfg.cache_mode), 1);
+ return 0;
+}
+
+/**
+ * Enable or disable FCS stripping for all the ports on an interface.
+ *
+ * @param xiface
+ * @param nports number of ports
+ * @param has_fcs 0 for disable and !0 for enable
+ */
+static int cvmx_helper_fcs_op(int xiface, int nports, int has_fcs)
+{
+ u64 port_bit;
+ int index;
+ int pknd;
+ union cvmx_pip_sub_pkind_fcsx pkind_fcsx;
+ union cvmx_pip_prt_cfgx port_cfg;
+ struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+
+ if (!octeon_has_feature(OCTEON_FEATURE_PKND))
+ return 0;
+ if (octeon_has_feature(OCTEON_FEATURE_PKI)) {
+ cvmx_helper_pki_set_fcs_op(xi.node, xi.interface, nports,
+ has_fcs);
+ return 0;
+ }
+
+ port_bit = 0;
+ for (index = 0; index < nports; index++)
+ port_bit |= ((u64)1 << cvmx_helper_get_pknd(xiface, index));
+
+ pkind_fcsx.u64 = csr_rd(CVMX_PIP_SUB_PKIND_FCSX(0));
+ if (has_fcs)
+ pkind_fcsx.s.port_bit |= port_bit;
+ else
+ pkind_fcsx.s.port_bit &= ~port_bit;
+ csr_wr(CVMX_PIP_SUB_PKIND_FCSX(0), pkind_fcsx.u64);
+
+ for (pknd = 0; pknd < 64; pknd++) {
+ if ((1ull << pknd) & port_bit) {
+ port_cfg.u64 = csr_rd(CVMX_PIP_PRT_CFGX(pknd));
+ port_cfg.s.crc_en = (has_fcs) ? 1 : 0;
+ csr_wr(CVMX_PIP_PRT_CFGX(pknd), port_cfg.u64);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * @INTERNAL
+ * Configure the IPD/PIP tagging and QoS options for a specific
+ * port. This function determines the POW work queue entry
+ * contents for a port. The setup performed here is controlled by
+ * the defines in executive-config.h.
+ *
+ * @param ipd_port Port/Port kind to configure. This follows the IPD numbering,
+ * not the per interface numbering
+ *
+ * @return Zero on success, negative on failure
+ */
+static int __cvmx_helper_ipd_port_setup(int ipd_port)
+{
+ union cvmx_pip_prt_cfgx port_config;
+ union cvmx_pip_prt_tagx tag_config;
+
+ if (octeon_has_feature(OCTEON_FEATURE_PKND)) {
+ int xiface, index, pknd;
+ union cvmx_pip_prt_cfgbx prt_cfgbx;
+
+ xiface = cvmx_helper_get_interface_num(ipd_port);
+ index = cvmx_helper_get_interface_index_num(ipd_port);
+ pknd = cvmx_helper_get_pknd(xiface, index);
+
+ port_config.u64 = csr_rd(CVMX_PIP_PRT_CFGX(pknd));
+ tag_config.u64 = csr_rd(CVMX_PIP_PRT_TAGX(pknd));
+
+ port_config.s.qos = pknd & 0x7;
+
+ /* Default BPID to use for packets on this port-kind */
+ prt_cfgbx.u64 = csr_rd(CVMX_PIP_PRT_CFGBX(pknd));
+ prt_cfgbx.s.bpid = pknd;
+ csr_wr(CVMX_PIP_PRT_CFGBX(pknd), prt_cfgbx.u64);
+ } else {
+ port_config.u64 = csr_rd(CVMX_PIP_PRT_CFGX(ipd_port));
+ tag_config.u64 = csr_rd(CVMX_PIP_PRT_TAGX(ipd_port));
+
+ /* Have each port go to a different POW queue */
+ port_config.s.qos = ipd_port & 0x7;
+ }
+
+ /* Process the headers and place the IP header in the work queue */
+ port_config.s.mode =
+ (cvmx_pip_port_parse_mode_t)cvmx_ipd_cfg.port_config.parse_mode;
+
+ tag_config.s.ip6_src_flag =
+ cvmx_ipd_cfg.port_config.tag_fields.ipv6_src_ip;
+ tag_config.s.ip6_dst_flag =
+ cvmx_ipd_cfg.port_config.tag_fields.ipv6_dst_ip;
+ tag_config.s.ip6_sprt_flag =
+ cvmx_ipd_cfg.port_config.tag_fields.ipv6_src_port;
+ tag_config.s.ip6_dprt_flag =
+ cvmx_ipd_cfg.port_config.tag_fields.ipv6_dst_port;
+ tag_config.s.ip6_nxth_flag =
+ cvmx_ipd_cfg.port_config.tag_fields.ipv6_next_header;
+ tag_config.s.ip4_src_flag =
+ cvmx_ipd_cfg.port_config.tag_fields.ipv4_src_ip;
+ tag_config.s.ip4_dst_flag =
+ cvmx_ipd_cfg.port_config.tag_fields.ipv4_dst_ip;
+ tag_config.s.ip4_sprt_flag =
+ cvmx_ipd_cfg.port_config.tag_fields.ipv4_src_port;
+ tag_config.s.ip4_dprt_flag =
+ cvmx_ipd_cfg.port_config.tag_fields.ipv4_dst_port;
+ tag_config.s.ip4_pctl_flag =
+ cvmx_ipd_cfg.port_config.tag_fields.ipv4_protocol;
+ tag_config.s.inc_prt_flag =
+ cvmx_ipd_cfg.port_config.tag_fields.input_port;
+ tag_config.s.tcp6_tag_type =
+ (cvmx_pow_tag_type_t)cvmx_ipd_cfg.port_config.tag_type;
+ tag_config.s.tcp4_tag_type =
+ (cvmx_pow_tag_type_t)cvmx_ipd_cfg.port_config.tag_type;
+ tag_config.s.ip6_tag_type =
+ (cvmx_pow_tag_type_t)cvmx_ipd_cfg.port_config.tag_type;
+ tag_config.s.ip4_tag_type =
+ (cvmx_pow_tag_type_t)cvmx_ipd_cfg.port_config.tag_type;
+ tag_config.s.non_tag_type =
+ (cvmx_pow_tag_type_t)cvmx_ipd_cfg.port_config.tag_type;
+
+ /* Put all packets in group 0. Other groups can be used by the app */
+ tag_config.s.grp = 0;
+
+ cvmx_pip_config_port(ipd_port, port_config, tag_config);
+
+ /* Give the user a chance to override our setting for each port */
+ if (cvmx_override_ipd_port_setup)
+ cvmx_override_ipd_port_setup(ipd_port);
+
+ return 0;
+}
+
+/**
+ * @INTERNAL
+ * Setup the IPD/PIP for the ports on an interface. Packet
+ * classification and tagging are set for every port on the
+ * interface. The number of ports on the interface must already
+ * have been probed.
+ *
+ * @param xiface to setup IPD/PIP for
+ *
+ * @return Zero on success, negative on failure
+ */
+int __cvmx_helper_ipd_setup_interface(int xiface)
+{
+ cvmx_helper_interface_mode_t mode;
+ int num_ports = cvmx_helper_ports_on_interface(xiface);
+ struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+ int ipd_port = cvmx_helper_get_ipd_port(xiface, 0);
+ int delta;
+
+ if (num_ports == CVMX_HELPER_CFG_INVALID_VALUE)
+ return 0;
+
+ delta = 1;
+ if (octeon_has_feature(OCTEON_FEATURE_PKND)) {
+ if (xi.interface < CVMX_HELPER_MAX_GMX)
+ delta = 16;
+ }
+
+ while (num_ports--) {
+ if (!cvmx_helper_is_port_valid(xiface, num_ports))
+ continue;
+ if (octeon_has_feature(OCTEON_FEATURE_PKI))
+ __cvmx_helper_pki_port_setup(xi.node, ipd_port);
+ else
+ __cvmx_helper_ipd_port_setup(ipd_port);
+ ipd_port += delta;
+ }
+ /* FCS settings */
+ cvmx_helper_fcs_op(xiface, cvmx_helper_ports_on_interface(xiface),
+ __cvmx_helper_get_has_fcs(xiface));
+
+ mode = cvmx_helper_interface_get_mode(xiface);
+
+ if (mode == CVMX_HELPER_INTERFACE_MODE_LOOP)
+ __cvmx_helper_loop_enable(xiface);
+
+ return 0;
+}
+
+void cvmx_helper_ipd_set_wqe_no_ptr_mode(bool mode)
+{
+ if (!octeon_has_feature(OCTEON_FEATURE_PKI)) {
+ cvmx_ipd_ctl_status_t ipd_ctl_status;
+
+ ipd_ctl_status.u64 = csr_rd(CVMX_IPD_CTL_STATUS);
+ ipd_ctl_status.s.no_wptr = mode;
+ csr_wr(CVMX_IPD_CTL_STATUS, ipd_ctl_status.u64);
+ }
+}
+
+void cvmx_helper_ipd_pkt_wqe_le_mode(bool mode)
+{
+ if (!octeon_has_feature(OCTEON_FEATURE_PKI)) {
+ cvmx_ipd_ctl_status_t ipd_ctl_status;
+
+ ipd_ctl_status.u64 = csr_rd(CVMX_IPD_CTL_STATUS);
+ ipd_ctl_status.s.pkt_lend = mode;
+ ipd_ctl_status.s.wqe_lend = mode;
+ csr_wr(CVMX_IPD_CTL_STATUS, ipd_ctl_status.u64);
+ } else {
+ int node = cvmx_get_node_num();
+
+ cvmx_helper_pki_set_little_endian(node);
+ }
+}
--
2.35.1
^ permalink raw reply related [flat|nested] 52+ messages in thread* [PATCH 15/52] mips: octeon: Add cvmx-helper-loop.c
2022-03-30 10:06 [PATCH 00/52] mips: octeon: Add ethernet support Stefan Roese
` (13 preceding siblings ...)
2022-03-30 10:06 ` [PATCH 14/52] mips: octeon: Add cvmx-helper-ipd.c Stefan Roese
@ 2022-03-30 10:06 ` Stefan Roese
2022-03-30 10:06 ` [PATCH 17/52] mips: octeon: Add cvmx-helper-pki.c Stefan Roese
` (34 subsequent siblings)
49 siblings, 0 replies; 52+ messages in thread
From: Stefan Roese @ 2022-03-30 10:06 UTC (permalink / raw)
To: u-boot; +Cc: awilliams, cchavva
From: Aaron Williams <awilliams@marvell.com>
Import cvmx-helper-loop.c from 2013 U-Boot. It will be used by the later
added drivers to support networking on the MIPS Octeon II / III
platforms.
Signed-off-by: Aaron Williams <awilliams@marvell.com>
Signed-off-by: Stefan Roese <sr@denx.de>
---
arch/mips/mach-octeon/cvmx-helper-loop.c | 178 +++++++++++++++++++++++
1 file changed, 178 insertions(+)
create mode 100644 arch/mips/mach-octeon/cvmx-helper-loop.c
diff --git a/arch/mips/mach-octeon/cvmx-helper-loop.c b/arch/mips/mach-octeon/cvmx-helper-loop.c
new file mode 100644
index 000000000000..8eaeac387df4
--- /dev/null
+++ b/arch/mips/mach-octeon/cvmx-helper-loop.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ * Functions for LOOP initialization, configuration,
+ * and monitoring.
+ */
+
+#include <log.h>
+#include <malloc.h>
+#include <net.h>
+#include <linux/delay.h>
+
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-csr.h>
+#include <mach/cvmx-bootmem.h>
+#include <mach/octeon-model.h>
+#include <mach/octeon_fdt.h>
+#include <mach/cvmx-helper.h>
+#include <mach/cvmx-helper-board.h>
+#include <mach/cvmx-helper-cfg.h>
+#include <mach/cvmx-helper-fdt.h>
+#include <mach/cvmx-helper-gpio.h>
+
+#include <mach/cvmx-agl-defs.h>
+#include <mach/cvmx-bgxx-defs.h>
+#include <mach/cvmx-ciu-defs.h>
+#include <mach/cvmx-gmxx-defs.h>
+#include <mach/cvmx-gserx-defs.h>
+#include <mach/cvmx-ilk-defs.h>
+#include <mach/cvmx-ipd-defs.h>
+#include <mach/cvmx-lbk-defs.h>
+#include <mach/cvmx-pcsx-defs.h>
+#include <mach/cvmx-pcsxx-defs.h>
+#include <mach/cvmx-pki-defs.h>
+#include <mach/cvmx-pko-defs.h>
+#include <mach/cvmx-xcv-defs.h>
+
+#include <mach/cvmx-hwpko.h>
+#include <mach/cvmx-ilk.h>
+#include <mach/cvmx-pki.h>
+
+int __cvmx_helper_loop_enumerate(int xiface)
+{
+ return OCTEON_IS_MODEL(OCTEON_CN68XX) ?
+ 8 : (OCTEON_IS_MODEL(OCTEON_CNF71XX) ? 2 : 4);
+}
+
+/**
+ * @INTERNAL
+ * Probe a LOOP interface and determine the number of ports
+ * connected to it. The LOOP interface should still be down
+ * after this call.
+ *
+ * @param xiface Interface to probe
+ *
+ * @return Number of ports on the interface. Zero to disable.
+ */
+int __cvmx_helper_loop_probe(int xiface)
+{
+ return __cvmx_helper_loop_enumerate(xiface);
+}
+
+/**
+ * @INTERNAL
+ * Bringup and enable a LOOP interface. After this call packet
+ * I/O should be fully functional. This is called with IPD
+ * enabled but PKO disabled.
+ *
+ * @param interface to bring up
+ *
+ * @return Zero on success, negative on failure
+ */
+int __cvmx_helper_loop_enable(int xiface)
+{
+ cvmx_pip_prt_cfgx_t port_cfg;
+ int num_ports, index;
+ unsigned long offset;
+ struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+
+ num_ports = __cvmx_helper_get_num_ipd_ports(xiface);
+ /*
+ * We need to disable length checking so packet < 64 bytes and jumbo
+ * frames don't get errors
+ */
+ for (index = 0; index < num_ports; index++) {
+ offset = ((octeon_has_feature(OCTEON_FEATURE_PKND)) ?
+ cvmx_helper_get_pknd(xiface, index) :
+ cvmx_helper_get_ipd_port(xiface, index));
+
+ if (octeon_has_feature(OCTEON_FEATURE_PKI)) {
+ cvmx_pki_endis_l2_errs(xi.node, offset, 1, 0, 0);
+ cvmx_pki_endis_fcs_check(xi.node, offset, 0, 0);
+ } else {
+ port_cfg.u64 = csr_rd(CVMX_PIP_PRT_CFGX(offset));
+ port_cfg.s.maxerr_en = 0;
+ port_cfg.s.minerr_en = 0;
+ csr_wr(CVMX_PIP_PRT_CFGX(offset), port_cfg.u64);
+ }
+ }
+
+ /*
+ * Disable FCS stripping for loopback ports
+ */
+ if (!octeon_has_feature(OCTEON_FEATURE_PKND)) {
+ cvmx_ipd_sub_port_fcs_t ipd_sub_port_fcs;
+
+ ipd_sub_port_fcs.u64 = csr_rd(CVMX_IPD_SUB_PORT_FCS);
+ ipd_sub_port_fcs.s.port_bit2 = 0;
+ csr_wr(CVMX_IPD_SUB_PORT_FCS, ipd_sub_port_fcs.u64);
+ }
+ /*
+ * Set PKND and BPID for loopback ports.
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+ cvmx_pko_reg_loopback_pkind_t lp_pknd;
+ cvmx_pko_reg_loopback_bpid_t lp_bpid;
+
+ for (index = 0; index < num_ports; index++) {
+ int pknd = cvmx_helper_get_pknd(xiface, index);
+ int bpid = cvmx_helper_get_bpid(xiface, index);
+
+ lp_pknd.u64 = csr_rd(CVMX_PKO_REG_LOOPBACK_PKIND);
+ lp_bpid.u64 = csr_rd(CVMX_PKO_REG_LOOPBACK_BPID);
+
+ if (index == 0)
+ lp_pknd.s.num_ports = num_ports;
+
+ switch (index) {
+ case 0:
+ lp_pknd.s.pkind0 = pknd;
+ lp_bpid.s.bpid0 = bpid;
+ break;
+ case 1:
+ lp_pknd.s.pkind1 = pknd;
+ lp_bpid.s.bpid1 = bpid;
+ break;
+ case 2:
+ lp_pknd.s.pkind2 = pknd;
+ lp_bpid.s.bpid2 = bpid;
+ break;
+ case 3:
+ lp_pknd.s.pkind3 = pknd;
+ lp_bpid.s.bpid3 = bpid;
+ break;
+ case 4:
+ lp_pknd.s.pkind4 = pknd;
+ lp_bpid.s.bpid4 = bpid;
+ break;
+ case 5:
+ lp_pknd.s.pkind5 = pknd;
+ lp_bpid.s.bpid5 = bpid;
+ break;
+ case 6:
+ lp_pknd.s.pkind6 = pknd;
+ lp_bpid.s.bpid6 = bpid;
+ break;
+ case 7:
+ lp_pknd.s.pkind7 = pknd;
+ lp_bpid.s.bpid7 = bpid;
+ break;
+ }
+ csr_wr(CVMX_PKO_REG_LOOPBACK_PKIND, lp_pknd.u64);
+ csr_wr(CVMX_PKO_REG_LOOPBACK_BPID, lp_bpid.u64);
+ }
+ } else if (octeon_has_feature(OCTEON_FEATURE_BGX)) {
+ cvmx_lbk_chx_pkind_t lbk_pkind;
+
+ for (index = 0; index < num_ports; index++) {
+ lbk_pkind.u64 = 0;
+ lbk_pkind.s.pkind = cvmx_helper_get_pknd(xiface, index);
+ csr_wr_node(xi.node, CVMX_LBK_CHX_PKIND(index),
+ lbk_pkind.u64);
+ }
+ }
+
+ return 0;
+}
--
2.35.1
^ permalink raw reply related [flat|nested] 52+ messages in thread* [PATCH 17/52] mips: octeon: Add cvmx-helper-pki.c
2022-03-30 10:06 [PATCH 00/52] mips: octeon: Add ethernet support Stefan Roese
` (14 preceding siblings ...)
2022-03-30 10:06 ` [PATCH 15/52] mips: octeon: Add cvmx-helper-loop.c Stefan Roese
@ 2022-03-30 10:06 ` Stefan Roese
2022-03-30 10:06 ` [PATCH 18/52] mips: octeon: Add cvmx-helper-pko.c Stefan Roese
` (33 subsequent siblings)
49 siblings, 0 replies; 52+ messages in thread
From: Stefan Roese @ 2022-03-30 10:06 UTC (permalink / raw)
To: u-boot; +Cc: awilliams, cchavva
From: Aaron Williams <awilliams@marvell.com>
Import cvmx-helper-pki.c from 2013 U-Boot. It will be used by the later
added drivers to support networking on the MIPS Octeon II / III
platforms.
Signed-off-by: Aaron Williams <awilliams@marvell.com>
Signed-off-by: Stefan Roese <sr@denx.de>
---
arch/mips/mach-octeon/cvmx-helper-pki.c | 2156 +++++++++++++++++++++++
1 file changed, 2156 insertions(+)
create mode 100644 arch/mips/mach-octeon/cvmx-helper-pki.c
diff --git a/arch/mips/mach-octeon/cvmx-helper-pki.c b/arch/mips/mach-octeon/cvmx-helper-pki.c
new file mode 100644
index 000000000000..d68c7dac0087
--- /dev/null
+++ b/arch/mips/mach-octeon/cvmx-helper-pki.c
@@ -0,0 +1,2156 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ * PKI helper functions.
+ */
+
+#include <time.h>
+#include <log.h>
+#include <linux/delay.h>
+
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-csr.h>
+#include <mach/cvmx-bootmem.h>
+#include <mach/octeon-model.h>
+#include <mach/cvmx-fuse.h>
+#include <mach/octeon-feature.h>
+#include <mach/cvmx-qlm.h>
+#include <mach/octeon_qlm.h>
+#include <mach/cvmx-pcie.h>
+#include <mach/cvmx-coremask.h>
+
+#include <mach/cvmx-agl-defs.h>
+#include <mach/cvmx-bgxx-defs.h>
+#include <mach/cvmx-ciu-defs.h>
+#include <mach/cvmx-gmxx-defs.h>
+#include <mach/cvmx-gserx-defs.h>
+#include <mach/cvmx-ilk-defs.h>
+#include <mach/cvmx-ipd-defs.h>
+#include <mach/cvmx-pexp-defs.h>
+#include <mach/cvmx-pcsx-defs.h>
+#include <mach/cvmx-pcsxx-defs.h>
+#include <mach/cvmx-pki-defs.h>
+#include <mach/cvmx-pko-defs.h>
+#include <mach/cvmx-sli-defs.h>
+#include <mach/cvmx-xcv-defs.h>
+
+#include <mach/cvmx-hwpko.h>
+#include <mach/cvmx-ilk.h>
+#include <mach/cvmx-pki.h>
+
+#include <mach/cvmx-helper.h>
+#include <mach/cvmx-helper-board.h>
+#include <mach/cvmx-helper-cfg.h>
+#include <mach/cvmx-helper-pki.h>
+
+#include <mach/cvmx-global-resources.h>
+#include <mach/cvmx-pko-internal-ports-range.h>
+#include <mach/cvmx-ilk.h>
+#include <mach/cvmx-pip.h>
+
+static int pki_helper_debug;
+
+bool cvmx_pki_dflt_init[CVMX_MAX_NODES] = { [0 ... CVMX_MAX_NODES - 1] = 1 };
+
+static bool cvmx_pki_dflt_bp_en[CVMX_MAX_NODES] = { [0 ... CVMX_MAX_NODES - 1] =
+ true };
+static struct cvmx_pki_cluster_grp_config pki_dflt_clgrp[CVMX_MAX_NODES] = {
+ { 0, 0xf },
+ { 0, 0xf }
+};
+
+struct cvmx_pki_pool_config pki_dflt_pool[CVMX_MAX_NODES] = {
+ [0 ... CVMX_MAX_NODES -
+ 1] = { .pool_num = -1, .buffer_size = 2048, .buffer_count = 0 }
+};
+
+struct cvmx_pki_aura_config pki_dflt_aura[CVMX_MAX_NODES] = {
+ [0 ... CVMX_MAX_NODES -
+ 1] = { .aura_num = 0, .pool_num = -1, .buffer_count = 0 }
+};
+
+struct cvmx_pki_style_config pki_dflt_style[CVMX_MAX_NODES] = {
+ [0 ... CVMX_MAX_NODES - 1] = { .parm_cfg = { .lenerr_en = 1,
+ .maxerr_en = 1,
+ .minerr_en = 1,
+ .fcs_strip = 1,
+ .fcs_chk = 1,
+ .first_skip = 40,
+ .mbuff_size = 2048 } }
+};
+
+struct cvmx_pki_sso_grp_config pki_dflt_sso_grp[CVMX_MAX_NODES];
+struct cvmx_pki_qpg_config pki_dflt_qpg[CVMX_MAX_NODES];
+struct cvmx_pki_pkind_config pki_dflt_pkind[CVMX_MAX_NODES];
+u64 pkind_style_map[CVMX_MAX_NODES][CVMX_PKI_NUM_PKIND] = {
+ [0 ... CVMX_MAX_NODES -
+ 1] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63 }
+};
+
+/* To store the qos watcher values before they are written to pcam when watcher is enabled
+There is no cvmx-pip.c file exist so it ended up here */
+struct cvmx_pki_legacy_qos_watcher qos_watcher[8];
+u64 pcam_dmach[CVMX_PKI_NUM_PCAM_ENTRY] = { -1 };
+u64 pcam_dmacl[CVMX_PKI_NUM_PCAM_ENTRY] = { -1 };
+
+/** @INTERNAL
+ * This function setsup default ltype map
+ * @param node node number
+ */
+void __cvmx_helper_pki_set_dflt_ltype_map(int node)
+{
+ cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_NONE,
+ CVMX_PKI_BELTYPE_NONE);
+ cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_ENET,
+ CVMX_PKI_BELTYPE_MISC);
+ cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_VLAN,
+ CVMX_PKI_BELTYPE_MISC);
+ cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_SNAP_PAYLD,
+ CVMX_PKI_BELTYPE_MISC);
+ cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_ARP,
+ CVMX_PKI_BELTYPE_MISC);
+ cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_RARP,
+ CVMX_PKI_BELTYPE_MISC);
+ cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_IP4,
+ CVMX_PKI_BELTYPE_IP4);
+ cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_IP4_OPT,
+ CVMX_PKI_BELTYPE_IP4);
+ cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_IP6,
+ CVMX_PKI_BELTYPE_IP6);
+ cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_IP6_OPT,
+ CVMX_PKI_BELTYPE_IP6);
+ cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_IPSEC_ESP,
+ CVMX_PKI_BELTYPE_MISC);
+ cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_IPFRAG,
+ CVMX_PKI_BELTYPE_MISC);
+ cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_IPCOMP,
+ CVMX_PKI_BELTYPE_MISC);
+ cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_TCP,
+ CVMX_PKI_BELTYPE_TCP);
+ cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_UDP,
+ CVMX_PKI_BELTYPE_UDP);
+ cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_SCTP,
+ CVMX_PKI_BELTYPE_SCTP);
+ cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_UDP_VXLAN,
+ CVMX_PKI_BELTYPE_UDP);
+ cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_GRE,
+ CVMX_PKI_BELTYPE_MISC);
+ cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_NVGRE,
+ CVMX_PKI_BELTYPE_MISC);
+ cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_GTP,
+ CVMX_PKI_BELTYPE_MISC);
+ cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_SW28,
+ CVMX_PKI_BELTYPE_MISC);
+ cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_SW29,
+ CVMX_PKI_BELTYPE_MISC);
+ cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_SW30,
+ CVMX_PKI_BELTYPE_MISC);
+ cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_SW31,
+ CVMX_PKI_BELTYPE_MISC);
+}
+
+/** @INTERNAL
+ * This function installs the default VLAN entries to identify
+ * the VLAN and set WQE[vv], WQE[vs] if VLAN is found. In 78XX
+ * hardware (PKI) is not hardwired to recognize any 802.1Q VLAN
+ * Ethertypes
+ *
+ * @param node node number
+ */
+int __cvmx_helper_pki_install_dflt_vlan(int node)
+{
+ struct cvmx_pki_pcam_input pcam_input;
+ struct cvmx_pki_pcam_action pcam_action;
+ enum cvmx_pki_term field;
+ int index;
+ int bank;
+ u64 cl_mask = CVMX_PKI_CLUSTER_ALL;
+
+ memset(&pcam_input, 0, sizeof(pcam_input));
+ memset(&pcam_action, 0, sizeof(pcam_action));
+
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X)) {
+ /* PKI-20858 */
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ union cvmx_pki_clx_ecc_ctl ecc_ctl;
+
+ ecc_ctl.u64 =
+ csr_rd_node(node, CVMX_PKI_CLX_ECC_CTL(i));
+ ecc_ctl.s.pcam_en = 0;
+ ecc_ctl.s.pcam0_cdis = 1;
+ ecc_ctl.s.pcam1_cdis = 1;
+ csr_wr_node(node, CVMX_PKI_CLX_ECC_CTL(i), ecc_ctl.u64);
+ }
+ }
+
+ for (field = CVMX_PKI_PCAM_TERM_ETHTYPE0;
+ field < CVMX_PKI_PCAM_TERM_ETHTYPE2; field++) {
+ bank = field & 0x01;
+
+ index = cvmx_pki_pcam_entry_alloc(
+ node, CVMX_PKI_FIND_AVAL_ENTRY, bank, cl_mask);
+ if (index < 0) {
+ debug("ERROR: Allocating pcam entry node=%d bank=%d\n",
+ node, bank);
+ return -1;
+ }
+ pcam_input.style = 0;
+ pcam_input.style_mask = 0;
+ pcam_input.field = field;
+ pcam_input.field_mask = 0xfd;
+ pcam_input.data = 0x81000000;
+ pcam_input.data_mask = 0xffff0000;
+ pcam_action.parse_mode_chg = CVMX_PKI_PARSE_NO_CHG;
+ pcam_action.layer_type_set = CVMX_PKI_LTYPE_E_VLAN;
+ pcam_action.style_add = 0;
+ pcam_action.pointer_advance = 4;
+ cvmx_pki_pcam_write_entry(
+ node, index, cl_mask, pcam_input,
+ pcam_action); /*cluster_mask in pass2*/
+
+ index = cvmx_pki_pcam_entry_alloc(
+ node, CVMX_PKI_FIND_AVAL_ENTRY, bank, cl_mask);
+ if (index < 0) {
+ debug("ERROR: Allocating pcam entry node=%d bank=%d\n",
+ node, bank);
+ return -1;
+ }
+ pcam_input.data = 0x88a80000;
+ cvmx_pki_pcam_write_entry(node, index, cl_mask, pcam_input,
+ pcam_action);
+
+ index = cvmx_pki_pcam_entry_alloc(
+ node, CVMX_PKI_FIND_AVAL_ENTRY, bank, cl_mask);
+ if (index < 0) {
+ debug("ERROR: Allocating pcam entry node=%d bank=%d\n",
+ node, bank);
+ return -1;
+ }
+ pcam_input.data = 0x92000000;
+ cvmx_pki_pcam_write_entry(
+ node, index, cl_mask, pcam_input,
+ pcam_action); /* cluster_mask in pass2*/
+
+ index = cvmx_pki_pcam_entry_alloc(
+ node, CVMX_PKI_FIND_AVAL_ENTRY, bank, cl_mask);
+ if (index < 0) {
+ debug("ERROR: Allocating pcam entry node=%d bank=%d\n",
+ node, bank);
+ return -1;
+ }
+ pcam_input.data = 0x91000000;
+ cvmx_pki_pcam_write_entry(node, index, cl_mask, pcam_input,
+ pcam_action);
+ }
+ return 0;
+}
+
+static int __cvmx_helper_setup_pki_cluster_groups(int node)
+{
+ u64 cl_mask;
+ int cl_group;
+
+ cl_group =
+ cvmx_pki_cluster_grp_alloc(node, pki_dflt_clgrp[node].grp_num);
+ if (cl_group == CVMX_RESOURCE_ALLOC_FAILED)
+ return -1;
+ else if (cl_group == CVMX_RESOURCE_ALREADY_RESERVED) {
+ if (pki_dflt_clgrp[node].grp_num == -1)
+ return -1;
+ else
+ return 0; /* cluster already configured, share it */
+ }
+ cl_mask = pki_dflt_clgrp[node].cluster_mask;
+ if (pki_helper_debug)
+ debug("pki-helper: setup pki cluster grp %d with cl_mask 0x%llx\n",
+ (int)cl_group, (unsigned long long)cl_mask);
+ cvmx_pki_attach_cluster_to_group(node, cl_group, cl_mask);
+ return 0;
+}
+
+/**
+ * This function sets up pools/auras to be used by PKI
+ * @param node node number
+ */
+int __cvmx_helper_pki_setup_sso_groups(int node)
+{
+ struct cvmx_coremask core_mask = CVMX_COREMASK_EMPTY;
+ cvmx_xgrp_t xgrp;
+ int grp;
+ int priority;
+ int weight;
+ int affinity;
+ u64 modify_mask;
+ u8 core_mask_set;
+
+ /* try to reserve sso groups and configure them if they are not configured */
+ grp = cvmx_sso_reserve_group_range(node, &pki_dflt_sso_grp[node].group,
+ 1);
+ if (grp == CVMX_RESOURCE_ALLOC_FAILED)
+ return -1;
+ else if (grp == CVMX_RESOURCE_ALREADY_RESERVED)
+ return 0; /* sso group already configured, share it */
+
+ xgrp.xgrp = grp;
+ priority = pki_dflt_sso_grp[node].priority;
+ weight = pki_dflt_sso_grp[node].weight;
+ affinity = pki_dflt_sso_grp[node].affinity;
+ core_mask_set = pki_dflt_sso_grp[node].core_mask_set;
+ cvmx_coremask_set64_node(&core_mask, node,
+ pki_dflt_sso_grp[node].core_mask);
+ modify_mask = CVMX_SSO_MODIFY_GROUP_PRIORITY |
+ CVMX_SSO_MODIFY_GROUP_WEIGHT |
+ CVMX_SSO_MODIFY_GROUP_AFFINITY;
+ if (pki_helper_debug)
+ debug("pki-helper: set sso grp %d with priority %d weight %d core_mask 0x%llx\n",
+ grp, priority, weight,
+ (unsigned long long)pki_dflt_sso_grp[node].core_mask);
+ cvmx_sso_set_group_priority(node, xgrp, priority, weight, affinity,
+ modify_mask);
+ cvmx_sso_set_group_core_affinity(xgrp, &core_mask, core_mask_set);
+ return 0;
+}
+
+/**
+ * This function sets up pools/auras to be used by PKI
+ * @param node node number
+ */
+static int __cvmx_helper_pki_setup_fpa_pools(int node)
+{
+ u64 buffer_count;
+ u64 buffer_size;
+
+ if (__cvmx_fpa3_aura_valid(pki_dflt_aura[node].aura))
+ return 0; /* aura already configured, share it */
+
+ buffer_count = pki_dflt_pool[node].buffer_count;
+ buffer_size = pki_dflt_pool[node].buffer_size;
+
+ if (buffer_count != 0) {
+ pki_dflt_pool[node].pool = cvmx_fpa3_setup_fill_pool(
+ node, pki_dflt_pool[node].pool_num, "PKI POOL DFLT",
+ buffer_size, buffer_count, NULL);
+ if (!__cvmx_fpa3_pool_valid(pki_dflt_pool[node].pool)) {
+ cvmx_printf("ERROR: %s: Failed to allocate pool %d\n",
+ __func__, pki_dflt_pool[node].pool_num);
+ return -1;
+ }
+ pki_dflt_pool[node].pool_num = pki_dflt_pool[node].pool.lpool;
+
+ if (pki_helper_debug)
+ debug("%s pool %d with buffer size %d cnt %d\n",
+ __func__, pki_dflt_pool[node].pool_num,
+ (int)buffer_size, (int)buffer_count);
+
+ pki_dflt_aura[node].pool_num = pki_dflt_pool[node].pool_num;
+ pki_dflt_aura[node].pool = pki_dflt_pool[node].pool;
+ }
+
+ buffer_count = pki_dflt_aura[node].buffer_count;
+
+ if (buffer_count != 0) {
+ pki_dflt_aura[node].aura = cvmx_fpa3_set_aura_for_pool(
+ pki_dflt_aura[node].pool, pki_dflt_aura[node].aura_num,
+ "PKI DFLT AURA", buffer_size, buffer_count);
+
+ if (!__cvmx_fpa3_aura_valid(pki_dflt_aura[node].aura)) {
+ debug("ERROR: %sL Failed to allocate aura %d\n",
+ __func__, pki_dflt_aura[node].aura_num);
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static int __cvmx_helper_setup_pki_qpg_table(int node)
+{
+ int offset;
+
+ offset = cvmx_pki_qpg_entry_alloc(node, pki_dflt_qpg[node].qpg_base, 1);
+ if (offset == CVMX_RESOURCE_ALLOC_FAILED)
+ return -1;
+ else if (offset == CVMX_RESOURCE_ALREADY_RESERVED)
+ return 0; /* share the qpg table entry */
+ if (pki_helper_debug)
+ debug("pki-helper: set qpg entry at offset %d with port add %d aura %d grp_ok %d grp_bad %d\n",
+ offset, pki_dflt_qpg[node].port_add,
+ pki_dflt_qpg[node].aura_num, pki_dflt_qpg[node].grp_ok,
+ pki_dflt_qpg[node].grp_bad);
+ cvmx_pki_write_qpg_entry(node, offset, &pki_dflt_qpg[node]);
+ return 0;
+}
+
+int __cvmx_helper_pki_port_setup(int node, int ipd_port)
+{
+ int xiface, index;
+ int pknd, style_num;
+ int rs;
+ struct cvmx_pki_pkind_config pkind_cfg;
+
+ if (!cvmx_pki_dflt_init[node])
+ return 0;
+ xiface = cvmx_helper_get_interface_num(ipd_port);
+ index = cvmx_helper_get_interface_index_num(ipd_port);
+
+ pknd = cvmx_helper_get_pknd(xiface, index);
+ style_num = pkind_style_map[node][pknd];
+
+ /* try to reserve the style, if it is not configured already, reserve
+ and configure it */
+ rs = cvmx_pki_style_alloc(node, style_num);
+ if (rs < 0) {
+ if (rs == CVMX_RESOURCE_ALLOC_FAILED)
+ return -1;
+ } else {
+ if (pki_helper_debug)
+ debug("pki-helper: set style %d with default parameters\n",
+ style_num);
+ pkind_style_map[node][pknd] = style_num;
+ /* configure style with default parameters */
+ cvmx_pki_write_style_config(node, style_num,
+ CVMX_PKI_CLUSTER_ALL,
+ &pki_dflt_style[node]);
+ }
+ if (pki_helper_debug)
+ debug("pki-helper: set pkind %d with initial style %d\n", pknd,
+ style_num);
+ /* write pkind configuration */
+ pkind_cfg = pki_dflt_pkind[node];
+ pkind_cfg.initial_style = style_num;
+ cvmx_pki_write_pkind_config(node, pknd, &pkind_cfg);
+ return 0;
+}
+
+int __cvmx_helper_pki_global_setup(int node)
+{
+ __cvmx_helper_pki_set_dflt_ltype_map(node);
+ if (!cvmx_pki_dflt_init[node])
+ return 0;
+ /* Setup the packet pools*/
+ __cvmx_helper_pki_setup_fpa_pools(node);
+ /*set up default cluster*/
+ __cvmx_helper_setup_pki_cluster_groups(node);
+ //__cvmx_helper_pki_setup_sso_groups(node);
+ __cvmx_helper_setup_pki_qpg_table(node);
+ /*
+ * errata PKI-19103 backward compat has only 1 aura
+ * no head line blocking
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X)) {
+ cvmx_pki_buf_ctl_t buf_ctl;
+
+ buf_ctl.u64 = csr_rd_node(node, CVMX_PKI_BUF_CTL);
+ buf_ctl.s.fpa_wait = 1;
+ csr_wr_node(node, CVMX_PKI_BUF_CTL, buf_ctl.u64);
+ }
+ return 0;
+}
+
+void cvmx_helper_pki_set_dflt_pool(int node, int pool, int buffer_size,
+ int buffer_count)
+{
+ if (pool == 0)
+ pool = -1;
+ pki_dflt_pool[node].pool_num = pool;
+ pki_dflt_pool[node].buffer_size = buffer_size;
+ pki_dflt_pool[node].buffer_count = buffer_count;
+}
+
+void cvmx_helper_pki_set_dflt_aura(int node, int aura, int pool,
+ int buffer_count)
+{
+ pki_dflt_aura[node].aura_num = aura;
+ pki_dflt_aura[node].pool_num = pool;
+ pki_dflt_aura[node].buffer_count = buffer_count;
+}
+
+void cvmx_helper_pki_set_dflt_pool_buffer(int node, int buffer_count)
+{
+ pki_dflt_pool[node].buffer_count = buffer_count;
+}
+
+void cvmx_helper_pki_set_dflt_aura_buffer(int node, int buffer_count)
+{
+ pki_dflt_aura[node].buffer_count = buffer_count;
+}
+
+void cvmx_helper_pki_set_dflt_style(int node,
+ struct cvmx_pki_style_config *style_cfg)
+{
+ pki_dflt_style[node] = *style_cfg;
+}
+
+void cvmx_helper_pki_get_dflt_style(int node,
+ struct cvmx_pki_style_config *style_cfg)
+{
+ *style_cfg = pki_dflt_style[node];
+}
+
+void cvmx_helper_pki_set_dflt_qpg(int node, struct cvmx_pki_qpg_config *qpg_cfg)
+{
+ pki_dflt_qpg[node] = *qpg_cfg;
+}
+
+void cvmx_helper_pki_get_dflt_qpg(int node, struct cvmx_pki_qpg_config *qpg_cfg)
+{
+ *qpg_cfg = pki_dflt_qpg[node];
+}
+
+void cvmx_helper_pki_set_dflt_pkind_map(int node, int pkind, int style)
+{
+ pkind_style_map[node][pkind] = style;
+}
+
+void cvmx_helper_pki_no_dflt_init(int node)
+{
+ cvmx_pki_dflt_init[node] = 0;
+}
+
+void cvmx_helper_pki_set_dflt_bp_en(int node, bool bp_en)
+{
+ cvmx_pki_dflt_bp_en[node] = bp_en;
+}
+
+/**
+ * This function Enabled the PKI hardware to
+ * start accepting/processing packets.
+ *
+ * @param node node number
+ */
+void cvmx_helper_pki_enable(int node)
+{
+ if (pki_helper_debug)
+ debug("enable PKI on node %d\n", node);
+ __cvmx_helper_pki_install_dflt_vlan(node);
+ cvmx_pki_setup_clusters(node);
+ if (cvmx_pki_dflt_bp_en[node])
+ cvmx_pki_enable_backpressure(node);
+ cvmx_pki_parse_enable(node, 0);
+ cvmx_pki_enable(node);
+}
+
+/**
+ * This function frees up PKI resources consumed by that port.
+ * This function should only be called if port resources
+ * (fpa pools aura, style qpg entry pcam entry etc.) are not shared
+ * @param ipd_port ipd port number for which resources need to
+ * be freed.
+ */
+int cvmx_helper_pki_port_shutdown(int ipd_port)
+{
+ /* remove pcam entries */
+ /* implemet if needed */
+ /* __cvmx_pki_port_rsrc_free(node); */
+ return 0;
+}
+
+/**
+ * This function shuts down complete PKI hardware
+ * and software resources.
+ * @param node node number where PKI needs to shutdown.
+ */
+void cvmx_helper_pki_shutdown(int node)
+{
+ int i, k;
+ /* remove pcam entries */
+ /* Disable PKI */
+ cvmx_pki_disable(node);
+ /* Free all prefetched buffers */
+ __cvmx_pki_free_ptr(node);
+ /* Reset PKI */
+ cvmx_pki_reset(node);
+ /* Free all the allocated PKI resources
+ except fpa pools & aura which will be done in fpa block */
+ __cvmx_pki_global_rsrc_free(node);
+ /* Setup some configuration registers to the reset state.*/
+ for (i = 0; i < CVMX_PKI_NUM_PKIND; i++) {
+ for (k = 0; k < (int)CVMX_PKI_NUM_CLUSTER; k++) {
+ csr_wr_node(node, CVMX_PKI_CLX_PKINDX_CFG(i, k), 0);
+ csr_wr_node(node, CVMX_PKI_CLX_PKINDX_STYLE(i, k), 0);
+ csr_wr_node(node, CVMX_PKI_CLX_PKINDX_SKIP(i, k), 0);
+ csr_wr_node(node, CVMX_PKI_CLX_PKINDX_L2_CUSTOM(i, k),
+ 0);
+ csr_wr_node(node, CVMX_PKI_CLX_PKINDX_LG_CUSTOM(i, k),
+ 0);
+ }
+ csr_wr_node(node, CVMX_PKI_PKINDX_ICGSEL(k), 0);
+ }
+ for (i = 0; i < CVMX_PKI_NUM_FINAL_STYLE; i++) {
+ for (k = 0; k < (int)CVMX_PKI_NUM_CLUSTER; k++) {
+ csr_wr_node(node, CVMX_PKI_CLX_STYLEX_CFG(i, k), 0);
+ csr_wr_node(node, CVMX_PKI_CLX_STYLEX_CFG2(i, k), 0);
+ csr_wr_node(node, CVMX_PKI_CLX_STYLEX_ALG(i, k), 0);
+ }
+ csr_wr_node(node, CVMX_PKI_STYLEX_BUF(k), (0x5 << 22) | 0x20);
+ }
+}
+
+/**
+ * This function calculates how mant qpf entries will be needed for
+ * a particular QOS.
+ * @param qpg_qos qos value for which entries need to be calculated.
+ */
+int cvmx_helper_pki_get_num_qpg_entry(enum cvmx_pki_qpg_qos qpg_qos)
+{
+ if (qpg_qos == CVMX_PKI_QPG_QOS_NONE)
+ return 1;
+ else if (qpg_qos == CVMX_PKI_QPG_QOS_VLAN ||
+ qpg_qos == CVMX_PKI_QPG_QOS_MPLS)
+ return 8;
+ else if (qpg_qos == CVMX_PKI_QPG_QOS_DSA_SRC)
+ return 32;
+ else if (qpg_qos == CVMX_PKI_QPG_QOS_DIFFSERV ||
+ qpg_qos == CVMX_PKI_QPG_QOS_HIGIG)
+ return 64;
+ else {
+ debug("ERROR: unrecognized qpg_qos = %d", qpg_qos);
+ return 0;
+ }
+}
+
+/**
+ * This function setups the qos table by allocating qpg entry and writing
+ * the provided parameters to that entry (offset).
+ * @param node node number.
+ * @param qpg_cfg pointer to struct containing qpg configuration
+ */
+int cvmx_helper_pki_set_qpg_entry(int node, struct cvmx_pki_qpg_config *qpg_cfg)
+{
+ int offset;
+
+ offset = cvmx_pki_qpg_entry_alloc(node, qpg_cfg->qpg_base, 1);
+ if (pki_helper_debug)
+ debug("pki-helper:set qpg entry at offset %d\n", offset);
+ if (offset == CVMX_RESOURCE_ALREADY_RESERVED) {
+ debug("INFO:setup_qpg_table: offset %d already reserved\n",
+ qpg_cfg->qpg_base);
+ return CVMX_RESOURCE_ALREADY_RESERVED;
+ } else if (offset == CVMX_RESOURCE_ALLOC_FAILED) {
+ debug("ERROR:setup_qpg_table: no more entries available\n");
+ return CVMX_RESOURCE_ALLOC_FAILED;
+ }
+ qpg_cfg->qpg_base = offset;
+ cvmx_pki_write_qpg_entry(node, offset, qpg_cfg);
+ return offset;
+}
+
+/**
+ * This function sets up aura QOS for RED, backpressure and tail-drop.
+ *
+ * @param node node number.
+ * @param aura aura to configure.
+ * @param ena_red enable RED based on [DROP] and [PASS] levels
+ * 1: enable 0:disable
+ * @param pass_thresh pass threshold for RED.
+ * @param drop_thresh drop threshold for RED
+ * @param ena_bp enable backpressure based on [BP] level.
+ * 1:enable 0:disable
+ * @param bp_thresh backpressure threshold.
+ * @param ena_drop enable tail drop.
+ * 1:enable 0:disable
+ * @return Zero on success. Negative on failure
+ * @note the 'node' and 'aura' arguments may be combined in the future
+ * to use a compaund cvmx_fpa3_gaura_t structure argument.
+ */
+int cvmx_helper_setup_aura_qos(int node, int aura, bool ena_red, bool ena_drop,
+ u64 pass_thresh, u64 drop_thresh, bool ena_bp,
+ u64 bp_thresh)
+{
+ cvmx_fpa3_gaura_t gaura;
+
+ gaura = __cvmx_fpa3_gaura(node, aura);
+
+ ena_red = ena_red | ena_drop;
+ cvmx_fpa3_setup_aura_qos(gaura, ena_red, pass_thresh, drop_thresh,
+ ena_bp, bp_thresh);
+ cvmx_pki_enable_aura_qos(node, aura, ena_red, ena_drop, ena_bp);
+ return 0;
+}
+
+/**
+ * This function maps specified bpid to all the auras from which it can receive bp and
+ * then maps that bpid to all the channels, that bpid can asserrt bp on.
+ *
+ * @param node node number.
+ * @param aura aura number which will back pressure specified bpid.
+ * @param bpid bpid to map.
+ * @param chl_map array of channels to map to that bpid.
+ * @param chl_cnt number of channel/ports to map to that bpid.
+ * @return Zero on success. Negative on failure
+ */
+int cvmx_helper_pki_map_aura_chl_bpid(int node, uint16_t aura, uint16_t bpid,
+ u16 chl_map[], uint16_t chl_cnt)
+{
+ u16 channel;
+
+ if (aura >= CVMX_PKI_NUM_AURA) {
+ debug("ERROR: aura %d is > supported in hw\n", aura);
+ return -1;
+ }
+ if (bpid >= CVMX_PKI_NUM_BPID) {
+ debug("ERROR: bpid %d is > supported in hw\n", bpid);
+ return -1;
+ }
+ cvmx_pki_write_aura_bpid(node, aura, bpid);
+ while (chl_cnt--) {
+ channel = chl_map[chl_cnt];
+ if (channel >= CVMX_PKI_NUM_CHANNEL) {
+ debug("ERROR: channel %d is > supported in hw\n",
+ channel);
+ return -1;
+ }
+ cvmx_pki_write_channel_bpid(node, channel, bpid);
+ }
+ return 0;
+}
+
+/** @INTERNAL
+ * This function returns the value of port shift required
+ * if all the ports on that interface are using same style and
+ * configuring qpg_qos != NONE
+ */
+int __cvmx_helper_pki_port_shift(int xiface, enum cvmx_pki_qpg_qos qpg_qos)
+{
+ u8 num_qos;
+ cvmx_helper_interface_mode_t mode =
+ cvmx_helper_interface_get_mode(xiface);
+
+ num_qos = cvmx_helper_pki_get_num_qpg_entry(qpg_qos);
+ if ((mode != CVMX_HELPER_INTERFACE_MODE_SGMII) &&
+ (mode != CVMX_HELPER_INTERFACE_MODE_NPI) &&
+ (mode != CVMX_HELPER_INTERFACE_MODE_LOOP)) {
+ return ffs(num_qos) - 1;
+ } else if (num_qos <= 16)
+ return 0;
+ else if (num_qos <= 32)
+ return 1;
+ else
+ return 2;
+}
+
+int __cvmx_helper_pki_qos_rsrcs(int node, struct cvmx_pki_qos_schd *qossch)
+{
+ int rs;
+
+ /* Reserve pool resources */
+ if (qossch->pool_per_qos && qossch->pool_num < 0) {
+ if (pki_helper_debug)
+ debug("pki-helper:qos-rsrc: setup pool %d buff_size %d blocks %d\n",
+ qossch->pool_num, (int)qossch->pool_buff_size,
+ (int)qossch->pool_max_buff);
+
+ qossch->_pool = cvmx_fpa3_setup_fill_pool(
+ node, qossch->pool_num, qossch->pool_name,
+ qossch->pool_buff_size, qossch->pool_max_buff, NULL);
+
+ if (!__cvmx_fpa3_pool_valid(qossch->_pool)) {
+ cvmx_printf("ERROR: %s POOL %d init failed\n", __func__,
+ qossch->pool_num);
+ return -1;
+ }
+
+ qossch->pool_num = qossch->_pool.lpool;
+ if (pki_helper_debug)
+ debug("pool alloced is %d\n", qossch->pool_num);
+ }
+ /* Reserve aura resources */
+ if (qossch->aura_per_qos && qossch->aura_num < 0) {
+ if (pki_helper_debug)
+ debug("pki-helper:qos-rsrc: setup aura %d pool %d blocks %d\n",
+ qossch->aura_num, qossch->pool_num,
+ (int)qossch->aura_buff_cnt);
+
+ qossch->_aura = cvmx_fpa3_set_aura_for_pool(
+ qossch->_pool, qossch->aura_num, qossch->aura_name,
+ qossch->pool_buff_size, qossch->aura_buff_cnt);
+
+ if (!__cvmx_fpa3_aura_valid(qossch->_aura)) {
+ cvmx_printf("ERROR: %s AURA %d init failed\n", __func__,
+ qossch->aura_num);
+ return -1;
+ }
+
+ qossch->aura_num = qossch->_aura.laura;
+ if (pki_helper_debug)
+ debug("aura alloced is %d\n", qossch->aura_num);
+ }
+ /* Reserve sso group resources */
+ /* Find which node work needs to be schedules vinita_to_do to extract node*/
+ if (qossch->sso_grp_per_qos && qossch->sso_grp < 0) {
+ //unsigned grp_node;
+ //grp_node = (abs)(qossch->sso_grp + CVMX_PKI_FIND_AVAILABLE_RSRC);
+ rs = cvmx_sso_reserve_group(node);
+ if (rs < 0) {
+ debug("pki-helper:qos-rsrc: ERROR: sso grp not available\n");
+ return rs;
+ }
+ qossch->sso_grp = rs | (node << 8);
+ if (pki_helper_debug)
+ debug("pki-helper:qos-rsrc: sso grp alloced is %d\n",
+ qossch->sso_grp);
+ }
+ return 0;
+}
+
+int __cvmx_helper_pki_port_rsrcs(int node, struct cvmx_pki_prt_schd *prtsch)
+{
+ int rs;
+
+ /* Erratum 22557: Disable per-port allocation for CN78XX pass 1.X */
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X)) {
+ static bool warned;
+
+ prtsch->pool_per_prt = 0;
+ if (!warned)
+ cvmx_printf(
+ "WARNING: %s: Ports configured in single-pool mode per erratum 22557.\n",
+ __func__);
+ warned = true;
+ }
+ /* Reserve pool resources */
+ if (prtsch->pool_per_prt && prtsch->pool_num < 0) {
+ if (pki_helper_debug)
+ debug("pki-helper:port-rsrc: setup pool %d buff_size %d blocks %d\n",
+ prtsch->pool_num, (int)prtsch->pool_buff_size,
+ (int)prtsch->pool_max_buff);
+
+ prtsch->_pool = cvmx_fpa3_setup_fill_pool(
+ node, prtsch->pool_num, prtsch->pool_name,
+ prtsch->pool_buff_size, prtsch->pool_max_buff, NULL);
+
+ if (!__cvmx_fpa3_pool_valid(prtsch->_pool)) {
+ cvmx_printf("ERROR: %s: POOL %d init failed\n",
+ __func__, prtsch->pool_num);
+ return -1;
+ }
+ prtsch->pool_num = prtsch->_pool.lpool;
+ if (pki_helper_debug)
+ debug("pool alloced is %d\n", prtsch->pool_num);
+ }
+ /* Reserve aura resources */
+ if (prtsch->aura_per_prt && prtsch->aura_num < 0) {
+ if (pki_helper_debug)
+ debug("pki-helper:port-rsrc; setup aura %d pool %d blocks %d\n",
+ prtsch->aura_num, prtsch->pool_num,
+ (int)prtsch->aura_buff_cnt);
+ prtsch->_aura = cvmx_fpa3_set_aura_for_pool(
+ prtsch->_pool, prtsch->aura_num, prtsch->aura_name,
+ prtsch->pool_buff_size, prtsch->aura_buff_cnt);
+
+ if (!__cvmx_fpa3_aura_valid(prtsch->_aura)) {
+ cvmx_printf("ERROR: %s: AURA %d init failed\n",
+ __func__, prtsch->aura_num);
+ return -1;
+ }
+ prtsch->aura_num = prtsch->_aura.laura;
+
+ if (pki_helper_debug)
+ debug("aura alloced is %d\n", prtsch->aura_num);
+ }
+ /* Reserve sso group resources , vinita_to_do to extract node*/
+ if (prtsch->sso_grp_per_prt && prtsch->sso_grp < 0) {
+ //unsigned grp_node;
+ //grp_node = (abs)(prtsch->sso_grp + CVMX_PKI_FIND_AVAILABLE_RSRC);
+ rs = cvmx_sso_reserve_group(node);
+ if (rs < 0) {
+ cvmx_printf("ERROR: %s: sso grp not available\n",
+ __func__);
+ return rs;
+ }
+ prtsch->sso_grp = rs | (node << 8);
+ if (pki_helper_debug)
+ debug("pki-helper:port-rsrc: sso grp alloced is %d\n",
+ prtsch->sso_grp);
+ }
+ return 0;
+}
+
+int __cvmx_helper_pki_intf_rsrcs(int node, struct cvmx_pki_intf_schd *intf)
+{
+ int rs;
+
+ if (intf->pool_per_intf && intf->pool_num < 0) {
+ if (pki_helper_debug)
+ debug("pki-helper:intf-rsrc: setup pool %d buff_size %d blocks %d\n",
+ intf->pool_num, (int)intf->pool_buff_size,
+ (int)intf->pool_max_buff);
+ intf->_pool = cvmx_fpa3_setup_fill_pool(
+ node, intf->pool_num, intf->pool_name,
+ intf->pool_buff_size, intf->pool_max_buff, NULL);
+
+ if (!__cvmx_fpa3_pool_valid(intf->_pool)) {
+ cvmx_printf("ERROR: %s: POOL %d init failed\n",
+ __func__, intf->pool_num);
+ return -1;
+ }
+ intf->pool_num = intf->_pool.lpool;
+
+ if (pki_helper_debug)
+ debug("pool alloced is %d\n", intf->pool_num);
+ }
+ if (intf->aura_per_intf && intf->aura_num < 0) {
+ if (pki_helper_debug)
+ debug("pki-helper:intf-rsrc: setup aura %d pool %d blocks %d\n",
+ intf->aura_num, intf->pool_num,
+ (int)intf->aura_buff_cnt);
+ intf->_aura = cvmx_fpa3_set_aura_for_pool(
+ intf->_pool, intf->aura_num, intf->aura_name,
+ intf->pool_buff_size, intf->aura_buff_cnt);
+
+ if (!__cvmx_fpa3_aura_valid(intf->_aura)) {
+ cvmx_printf("ERROR: %s: AURA %d init failed\n",
+ __func__, intf->aura_num);
+
+ return -1;
+ }
+
+ intf->aura_num = intf->_aura.laura;
+
+ if (pki_helper_debug)
+ debug("aura alloced is %d\n", intf->aura_num);
+ }
+ /* vinita_to_do to extract node */
+ if (intf->sso_grp_per_intf && intf->sso_grp < 0) {
+ //unsigned grp_node;
+ //grp_node = (abs)(intf->sso_grp + CVMX_PKI_FIND_AVAILABLE_RSRC);
+ rs = cvmx_sso_reserve_group(node);
+ if (rs < 0) {
+ cvmx_printf("ERROR: %s: sso grp not available\n",
+ __func__);
+ return rs;
+ }
+ intf->sso_grp = rs | (node << 8);
+ if (pki_helper_debug)
+ debug("pki-helper:intf-rsrc: sso grp alloced is %d\n",
+ intf->sso_grp);
+ }
+ return 0;
+}
+
+int __cvmx_helper_pki_set_intf_qpg(int node, int port, int qpg_base,
+ int num_entry,
+ struct cvmx_pki_intf_schd *intfsch)
+{
+ int offset;
+ int entry;
+ struct cvmx_pki_qpg_config qpg_cfg;
+
+ memset(&qpg_cfg, 0, sizeof(qpg_cfg));
+ if (pki_helper_debug)
+ debug("pki-helper:intf_qpg port %d qpg_base %d num_entry %d",
+ port, qpg_base, num_entry);
+ offset = cvmx_pki_qpg_entry_alloc(node, qpg_base, num_entry);
+ if (offset == CVMX_RESOURCE_ALREADY_RESERVED) {
+ debug("pki-helper: INFO: qpg entries will be shared\n");
+ return offset;
+ } else if (offset == CVMX_RESOURCE_ALLOC_FAILED) {
+ debug("pki-helper: ERROR: qpg entries not available\n");
+ return offset;
+ } else if (intfsch->qpg_base < 0) {
+ intfsch->qpg_base = offset;
+ }
+ if (pki_helper_debug)
+ debug("qpg_base allocated is %d\n", offset);
+ for (entry = 0; entry < num_entry; entry++) {
+ qpg_cfg.port_add = intfsch->prt_s[port].qos_s[entry].port_add;
+ qpg_cfg.aura_num = intfsch->prt_s[port].qos_s[entry].aura_num;
+ qpg_cfg.grp_ok = intfsch->prt_s[port].qos_s[entry].sso_grp;
+ qpg_cfg.grp_bad = intfsch->prt_s[port].qos_s[entry].sso_grp;
+ cvmx_pki_write_qpg_entry(node, (offset + entry), &qpg_cfg);
+ }
+ return offset;
+}
+
+/**
+ * This function sets up the global pool, aura and sso group
+ * resources which application can use between any interfaces
+ * and ports.
+ * @param node node number
+ * @param gblsch pointer to struct containing global
+ * scheduling parameters.
+ */
+int cvmx_helper_pki_set_gbl_schd(int node, struct cvmx_pki_global_schd *gblsch)
+{
+ int rs;
+
+ if (gblsch->setup_pool && gblsch->pool_num < 0) {
+ if (pki_helper_debug)
+ debug("%s: gbl setup global pool %d buff_size %d blocks %d\n",
+ __func__, gblsch->pool_num,
+ (int)gblsch->pool_buff_size,
+ (int)gblsch->pool_max_buff);
+
+ gblsch->_pool = cvmx_fpa3_setup_fill_pool(
+ node, gblsch->pool_num, gblsch->pool_name,
+ gblsch->pool_buff_size, gblsch->pool_max_buff, NULL);
+
+ if (!__cvmx_fpa3_pool_valid(gblsch->_pool)) {
+ cvmx_printf("ERROR: %s: POOL %u:%d unavailable\n",
+ __func__, node, gblsch->pool_num);
+ return -1;
+ }
+
+ gblsch->pool_num = gblsch->_pool.lpool;
+
+ if (pki_helper_debug)
+ debug("pool alloced is %d\n", gblsch->pool_num);
+ }
+ if (gblsch->setup_aura && gblsch->aura_num < 0) {
+ if (pki_helper_debug)
+ debug("%s: gbl setup global aura %d pool %d blocks %d\n",
+ __func__, gblsch->aura_num, gblsch->pool_num,
+ (int)gblsch->aura_buff_cnt);
+
+ gblsch->_aura = cvmx_fpa3_set_aura_for_pool(
+ gblsch->_pool, gblsch->aura_num, gblsch->aura_name,
+ gblsch->pool_buff_size, gblsch->aura_buff_cnt);
+
+ if (!__cvmx_fpa3_aura_valid(gblsch->_aura)) {
+ cvmx_printf("ERROR: %s: AURA %u:%d unavailable\n",
+ __func__, node, gblsch->aura_num);
+ return -1;
+ }
+
+ gblsch->aura_num = gblsch->_aura.laura;
+
+ if (pki_helper_debug)
+ debug("aura alloced is %d\n", gblsch->aura_num);
+ }
+ if (gblsch->setup_sso_grp && gblsch->sso_grp < 0) {
+ rs = cvmx_sso_reserve_group(node);
+ if (rs < 0) {
+ debug("pki-helper:gbl: ERROR: sso grp not available\n");
+ return rs;
+ }
+ gblsch->sso_grp = rs | (node << 8);
+ if (pki_helper_debug)
+ debug("pki-helper:gbl: sso grp alloced is %d\n",
+ gblsch->sso_grp);
+ }
+ return 0;
+}
+
+/**
+ * This function sets up scheduling parameters (pool, aura, sso group etc)
+ * of an ipd port.
+ * @param ipd_port ipd port number
+ * @param prtsch pointer to struct containing port's
+ * scheduling parameters.
+ */
+int cvmx_helper_pki_init_port(int ipd_port, struct cvmx_pki_prt_schd *prtsch)
+{
+ int num_qos;
+ int qos;
+ struct cvmx_pki_qpg_config qpg_cfg;
+ struct cvmx_pki_qos_schd *qossch;
+ struct cvmx_pki_style_config style_cfg;
+ struct cvmx_pki_pkind_config pknd_cfg;
+ int xiface = cvmx_helper_get_interface_num(ipd_port);
+ int pknd;
+ u16 mbuff_size;
+ int rs;
+
+ struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(ipd_port);
+
+ num_qos = cvmx_helper_pki_get_num_qpg_entry(prtsch->qpg_qos);
+ mbuff_size = prtsch->pool_buff_size;
+ memset(&qpg_cfg, 0, sizeof(qpg_cfg));
+
+ /* Reserve port resources */
+ rs = __cvmx_helper_pki_port_rsrcs(xp.node, prtsch);
+ if (rs)
+ return rs;
+ /* Reserve qpg resources */
+ if (prtsch->qpg_base < 0) {
+ rs = cvmx_pki_qpg_entry_alloc(xp.node, prtsch->qpg_base,
+ num_qos);
+ if (rs < 0) {
+ debug("pki-helper:port%d:ERROR: qpg entries not available\n",
+ ipd_port);
+ return CVMX_RESOURCE_ALLOC_FAILED;
+ }
+ prtsch->qpg_base = rs;
+ if (pki_helper_debug)
+ debug("pki-helper:port-init: to port %d, qpg_base %d allocated\n",
+ ipd_port, prtsch->qpg_base);
+ }
+
+ if (prtsch->qpg_qos) {
+ for (qos = 0; qos < num_qos; qos++) {
+ qossch = &prtsch->qos_s[qos];
+ if (!qossch->pool_per_qos)
+ qossch->pool_num = prtsch->pool_num;
+ else if (qossch->pool_buff_size < mbuff_size)
+ mbuff_size = qossch->pool_buff_size;
+ if (!qossch->aura_per_qos)
+ qossch->aura_num = prtsch->aura_num;
+ if (!qossch->sso_grp_per_qos)
+ qossch->sso_grp = prtsch->sso_grp;
+
+ /* Reserve qos resources */
+ rs = __cvmx_helper_pki_qos_rsrcs(xp.node, qossch);
+ if (rs)
+ return rs;
+ qpg_cfg.port_add = qossch->port_add;
+ qpg_cfg.aura_num = qossch->aura_num;
+ qpg_cfg.grp_ok = qossch->sso_grp;
+ qpg_cfg.grp_bad = qossch->sso_grp;
+ cvmx_pki_write_qpg_entry(
+ xp.node, prtsch->qpg_base + qos, &qpg_cfg);
+ if (pki_helper_debug)
+ debug("%s: port %d qos %d has port_add %d aura %d grp %d\n",
+ __func__, ipd_port, qos, qossch->port_add,
+ qossch->aura_num, qossch->sso_grp);
+ } /* for qos 0 ... num_qos */
+ } else {
+ qpg_cfg.port_add = 0;
+ qpg_cfg.aura_num = prtsch->aura_num;
+ qpg_cfg.grp_ok = prtsch->sso_grp;
+ qpg_cfg.grp_bad = prtsch->sso_grp;
+ cvmx_pki_write_qpg_entry(xp.node, prtsch->qpg_base, &qpg_cfg);
+
+ if (pki_helper_debug)
+ debug("%s: non-qos port %d has aura %d grp %d\n",
+ __func__, ipd_port, prtsch->aura_num,
+ prtsch->sso_grp);
+ }
+
+ /* LR: The rest of code is common for qos and non-qos ports */
+
+ /* Allocate style here and map it to the port */
+ rs = cvmx_pki_style_alloc(xp.node, prtsch->style);
+ if (rs == CVMX_RESOURCE_ALREADY_RESERVED) {
+ debug("%s INFO: style will be shared\n", __func__);
+ } else if (rs == CVMX_RESOURCE_ALLOC_FAILED) {
+ debug("%s ERROR: style not available\n", __func__);
+ return CVMX_RESOURCE_ALLOC_FAILED;
+ }
+ prtsch->style = rs;
+
+ if (pki_helper_debug)
+ debug("%s: port %d has style %d\n", __func__, ipd_port,
+ prtsch->style);
+
+ /* Config STYLE to above QPG table base entry */
+ style_cfg = pki_dflt_style[xp.node];
+ style_cfg.parm_cfg.qpg_qos = prtsch->qpg_qos;
+ style_cfg.parm_cfg.qpg_base = prtsch->qpg_base;
+ style_cfg.parm_cfg.qpg_port_msb = 0;
+ style_cfg.parm_cfg.qpg_port_sh = 0;
+ style_cfg.parm_cfg.mbuff_size = mbuff_size;
+ cvmx_pki_write_style_config(xp.node, prtsch->style,
+ CVMX_PKI_CLUSTER_ALL, &style_cfg);
+
+ /* Update PKND with initial STYLE */
+ pknd = cvmx_helper_get_pknd(
+ xiface, cvmx_helper_get_interface_index_num(ipd_port));
+ cvmx_pki_read_pkind_config(xp.node, pknd, &pknd_cfg);
+ pknd_cfg.initial_style = prtsch->style;
+ pknd_cfg.fcs_pres = __cvmx_helper_get_has_fcs(xiface);
+ cvmx_pki_write_pkind_config(xp.node, pknd, &pknd_cfg);
+
+ return 0;
+}
+
+/**
+ * This function sets up scheduling parameters (pool, aura, sso group etc)
+ * of an interface (all ports/channels on that interface).
+ * @param xiface interface number with node.
+ * @param intfsch pointer to struct containing interface
+ * scheduling parameters.
+ * @param gblsch pointer to struct containing global scheduling parameters
+ * (can be NULL if not used)
+ */
+int cvmx_helper_pki_init_interface(const int xiface,
+ struct cvmx_pki_intf_schd *intfsch,
+ struct cvmx_pki_global_schd *gblsch)
+{
+ const u16 num_ports = cvmx_helper_ports_on_interface(xiface);
+ u8 qos;
+ u16 port = num_ports;
+ u8 port_msb = 0;
+ u8 port_shift = 0;
+ u16 num_entry = 0;
+ u8 num_qos;
+ int pknd;
+ int rs;
+ int has_fcs;
+ int ipd_port;
+ int qpg_base;
+ u64 mbuff_size = 0;
+ struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+ enum cvmx_pki_qpg_qos qpg_qos = CVMX_PKI_QPG_QOS_NONE;
+ struct cvmx_pki_qpg_config qpg_cfg;
+ struct cvmx_pki_prt_schd *prtsch;
+ struct cvmx_pki_qos_schd *qossch;
+ struct cvmx_pki_style_config style_cfg;
+ struct cvmx_pki_pkind_config pknd_cfg;
+
+ has_fcs = __cvmx_helper_get_has_fcs(xiface);
+ memset(&qpg_cfg, 0, sizeof(qpg_cfg));
+
+ if (pki_helper_debug)
+ debug("pki-helper:intf-init:intf0x%x initialize--------------------------------\n",
+ xiface);
+
+ if (!intfsch->pool_per_intf) {
+ if (gblsch) {
+ intfsch->_pool = gblsch->_pool;
+ intfsch->pool_num = gblsch->pool_num;
+ } else {
+ debug("ERROR:pki-helper:intf-init:intf0x%x: global scheduling is in use but is NULL\n",
+ xiface);
+ return -1;
+ }
+ } else {
+ if (!intfsch) {
+ debug("ERROR:pki-helper:intf-init:intf0x%x: interface scheduling pointer is NULL\n",
+ xiface);
+ return -1;
+ }
+ mbuff_size = intfsch->pool_buff_size;
+ }
+ if (!intfsch->aura_per_intf) {
+ intfsch->_aura = gblsch->_aura;
+ intfsch->aura_num = gblsch->aura_num;
+ }
+ if (!intfsch->sso_grp_per_intf)
+ intfsch->sso_grp = gblsch->sso_grp;
+
+ /* Allocate interface resources */
+ rs = __cvmx_helper_pki_intf_rsrcs(xi.node, intfsch);
+ if (rs)
+ return rs;
+
+ for (port = 0; port < num_ports; port++) {
+ prtsch = &intfsch->prt_s[port];
+
+ /* Skip invalid/disabled ports */
+ if (!cvmx_helper_is_port_valid(xiface, port) ||
+ prtsch->cfg_port)
+ continue;
+
+ if (!prtsch->pool_per_prt) {
+ prtsch->pool_num = intfsch->pool_num;
+ prtsch->_pool = intfsch->_pool;
+ prtsch->pool_buff_size = intfsch->pool_buff_size;
+ } else if (prtsch->pool_buff_size < mbuff_size || !mbuff_size) {
+ mbuff_size = prtsch->pool_buff_size;
+ }
+ if (!prtsch->aura_per_prt) {
+ prtsch->aura_num = intfsch->aura_num;
+ prtsch->_aura = intfsch->_aura;
+ }
+ if (!prtsch->sso_grp_per_prt)
+ prtsch->sso_grp = intfsch->sso_grp;
+
+ rs = __cvmx_helper_pki_port_rsrcs(xi.node, prtsch);
+ if (rs)
+ return rs;
+
+ /* Port is using qpg qos to schedule packets to differnet aura or sso group */
+ num_qos = cvmx_helper_pki_get_num_qpg_entry(prtsch->qpg_qos);
+ if (pki_helper_debug)
+ debug("pki-helper:intf-init:intf%d: port %d used qpg_qos=%d\n",
+ xiface, port, prtsch->qpg_qos);
+
+ /* All ports will share the aura from port 0 for the respective qos */
+ /* Port 0 should never have this set to TRUE **/
+ if (intfsch->qos_share_aura && (port != 0)) {
+ if (pki_helper_debug)
+ debug("pki-helper:intf-init:intf0x%x All ports will share same aura for all qos\n",
+ xiface);
+ for (qos = 0; qos < num_qos; qos++) {
+ qossch = &prtsch->qos_s[qos];
+ prtsch->qpg_qos = intfsch->prt_s[0].qpg_qos;
+ qossch->pool_per_qos = intfsch->prt_s[0]
+ .qos_s[qos]
+ .pool_per_qos;
+ qossch->aura_per_qos = intfsch->prt_s[0]
+ .qos_s[qos]
+ .aura_per_qos;
+ qossch->pool_num =
+ intfsch->prt_s[0].qos_s[qos].pool_num;
+ qossch->_pool =
+ intfsch->prt_s[0].qos_s[qos]._pool;
+ qossch->aura_num =
+ intfsch->prt_s[0].qos_s[qos].aura_num;
+ qossch->_aura =
+ intfsch->prt_s[0].qos_s[qos]._aura;
+ }
+ }
+ if (intfsch->qos_share_grp && port != 0) {
+ if (pki_helper_debug)
+ debug("pki-helper:intf-init:intf0x%x: All ports will share same sso group for all qos\n",
+ xiface);
+ for (qos = 0; qos < num_qos; qos++) {
+ qossch = &prtsch->qos_s[qos];
+ qossch->sso_grp_per_qos =
+ intfsch->prt_s[0]
+ .qos_s[qos]
+ .sso_grp_per_qos;
+ qossch->sso_grp =
+ intfsch->prt_s[0].qos_s[qos].sso_grp;
+ }
+ }
+ for (qos = 0; qos < num_qos; qos++) {
+ qossch = &prtsch->qos_s[qos];
+ if (!qossch->pool_per_qos) {
+ qossch->pool_num = prtsch->pool_num;
+ qossch->_pool = prtsch->_pool;
+ if (pki_helper_debug)
+ debug("pki-helper:intf-init:intf0x%x: qos %d has pool %d\n",
+ xiface, qos, prtsch->pool_num);
+ } else if (qossch->pool_buff_size < mbuff_size ||
+ !mbuff_size)
+ mbuff_size = qossch->pool_buff_size;
+ if (!qossch->aura_per_qos) {
+ qossch->aura_num = prtsch->aura_num;
+ qossch->_aura = prtsch->_aura;
+ }
+ if (!qossch->sso_grp_per_qos)
+ qossch->sso_grp = prtsch->sso_grp;
+ rs = __cvmx_helper_pki_qos_rsrcs(xi.node, qossch);
+ if (rs)
+ return rs;
+ }
+ }
+ /* Using port shift and port msb to schedule packets from differnt
+ * port to differnt auras and different sso group
+ */
+ /* Using QPG_QOS to schedule packets to different aura and sso group */
+ /* If ports needs to send packets to different aura and sso group
+ * depending on packet qos
+ */
+ /* We will need to set up aura and sso group for each port and each qos
+ */
+ /* If all ports are using same style, they will be using same qpg_qos
+ * so check only for port 0
+ */
+ if (intfsch->style_per_intf) {
+ if (intfsch->prt_s[0].qpg_qos) {
+ /* all ports using same style will use same qos
+ * defined in port 0 config
+ */
+ qpg_qos = intfsch->prt_s[0].qpg_qos;
+ num_qos = cvmx_helper_pki_get_num_qpg_entry(
+ intfsch->prt_s[0].qpg_qos);
+ if (intfsch->qos_share_aura && intfsch->qos_share_grp) {
+ /* All ports will use same qpg offset so no
+ * need for port_msb or port shift
+ */
+ port_msb = 0;
+ port_shift = 0;
+ num_entry = num_qos;
+ qpg_base = intfsch->qpg_base;
+ rs = __cvmx_helper_pki_set_intf_qpg(xi.node, 0,
+ qpg_base,
+ num_entry,
+ intfsch);
+ if (rs == -1)
+ return rs;
+ intfsch->qpg_base = rs;
+ } else {
+ port_msb = 8;
+ port_shift = __cvmx_helper_pki_port_shift(
+ xiface, intfsch->prt_s[0].qpg_qos);
+ if (pki_helper_debug) {
+ debug("pki-helper: num qpg entry needed %d\n",
+ (int)num_entry);
+ debug("pki-helper:port_msb=%d port_shift=%d\n",
+ port_msb, port_shift);
+ }
+ num_entry = num_qos;
+ for (port = 0; port < num_ports; port++) {
+ /* Skip invalid/disabled ports */
+ prtsch = &intfsch->prt_s[port];
+ if (!cvmx_helper_is_port_valid(xiface,
+ port) ||
+ prtsch->cfg_port)
+ continue;
+ ipd_port = cvmx_helper_get_ipd_port(
+ xiface, port);
+ qpg_base = intfsch->qpg_base +
+ ((ipd_port & 0xff)
+ << port_shift);
+ rs = __cvmx_helper_pki_set_intf_qpg(
+ xi.node, port, qpg_base,
+ num_entry, intfsch);
+ if (rs == -1)
+ return rs;
+ prtsch->qpg_base = rs;
+ }
+ intfsch->qpg_base = intfsch->prt_s[0].qpg_base;
+ }
+ } else if (intfsch->prt_s[0].aura_per_prt ||
+ intfsch->prt_s[0].sso_grp_per_prt) {
+ /* Every port is using their own aura or group but no qos */
+ port_msb = 8;
+ port_shift = 0;
+ num_entry = 1;
+ if (pki_helper_debug)
+ debug("pki-helper: aura/grp_per_prt: num qpg entry needed %d\n",
+ (int)num_entry);
+ for (port = 0; port < num_ports; port++) {
+ prtsch = &intfsch->prt_s[port];
+ /* Skip invalid/disabled ports */
+ if (!cvmx_helper_is_port_valid(xiface, port) ||
+ prtsch->cfg_port)
+ continue;
+ ipd_port =
+ cvmx_helper_get_ipd_port(xiface, port);
+ qpg_base = intfsch->qpg_base +
+ ((ipd_port & 0xff) << port_shift);
+ if (pki_helper_debug)
+ debug("port %d intf_q_base=%d q_base= %d\n",
+ port, intfsch->qpg_base,
+ qpg_base);
+ qpg_base = cvmx_pki_qpg_entry_alloc(
+ xi.node, qpg_base, num_entry);
+ if (qpg_base ==
+ CVMX_RESOURCE_ALREADY_RESERVED) {
+ debug("pki-helper: INFO: qpg entries will be shared\n");
+ } else if (qpg_base ==
+ CVMX_RESOURCE_ALLOC_FAILED) {
+ debug("pki-helper: ERROR: qpg entries not available\n");
+ return qpg_base;
+ }
+
+ if (intfsch->qpg_base < 0)
+ intfsch->qpg_base = qpg_base;
+ prtsch->qpg_base = qpg_base;
+
+ qpg_cfg.port_add = 0;
+ qpg_cfg.aura_num = prtsch->aura_num;
+ qpg_cfg.grp_ok = prtsch->sso_grp;
+ qpg_cfg.grp_bad = prtsch->sso_grp;
+ cvmx_pki_write_qpg_entry(xi.node, qpg_base,
+ &qpg_cfg);
+ }
+ intfsch->qpg_base = intfsch->prt_s[0].qpg_base;
+ } else {
+ /* All ports on that intf use same port_add,
+ * aura & sso grps
+ */
+ /* All ports will use same qpg offset so no need for
+ * port_msb or port shift
+ */
+ port_msb = 0;
+ port_shift = 0;
+ num_entry = 1;
+ qpg_base = intfsch->qpg_base;
+ qpg_base = cvmx_pki_qpg_entry_alloc(xi.node, qpg_base,
+ num_entry);
+ if (qpg_base == CVMX_RESOURCE_ALREADY_RESERVED) {
+ debug("pki-helper: INFO: qpg entries will be shared\n");
+ } else if (qpg_base == CVMX_RESOURCE_ALLOC_FAILED) {
+ debug("pki-helper: ERROR: qpg entries not available\n");
+ return qpg_base;
+ }
+ intfsch->qpg_base = qpg_base;
+
+ qpg_cfg.port_add = 0;
+ qpg_cfg.aura_num = intfsch->aura_num;
+ qpg_cfg.grp_ok = intfsch->sso_grp;
+ qpg_cfg.grp_bad = intfsch->sso_grp;
+ cvmx_pki_write_qpg_entry(xi.node, qpg_base, &qpg_cfg);
+ }
+ if (!mbuff_size) {
+ if (!gblsch->setup_pool) {
+ debug("No pool has setup for intf 0x%x\n",
+ xiface);
+ return -1;
+ }
+ mbuff_size = gblsch->pool_buff_size;
+ debug("interface %d on node %d is using global pool\n",
+ xi.interface, xi.node);
+ }
+ /* Allocate style here and map it to all ports on interface */
+ rs = cvmx_pki_style_alloc(xi.node, intfsch->style);
+ if (rs == CVMX_RESOURCE_ALREADY_RESERVED) {
+ debug("passthrough: INFO: style will be shared\n");
+ } else if (rs == CVMX_RESOURCE_ALLOC_FAILED) {
+ debug("passthrough: ERROR: style not available\n");
+ return CVMX_RESOURCE_ALLOC_FAILED;
+ }
+
+ intfsch->style = rs;
+ if (pki_helper_debug)
+ debug("style %d allocated intf 0x%x qpg_base %d\n",
+ intfsch->style, xiface, intfsch->qpg_base);
+ style_cfg = pki_dflt_style[xi.node];
+ style_cfg.parm_cfg.qpg_qos = qpg_qos;
+ style_cfg.parm_cfg.qpg_base = intfsch->qpg_base;
+ style_cfg.parm_cfg.qpg_port_msb = port_msb;
+ style_cfg.parm_cfg.qpg_port_sh = port_shift;
+ style_cfg.parm_cfg.mbuff_size = mbuff_size;
+ cvmx_pki_write_style_config(xi.node, intfsch->style,
+ CVMX_PKI_CLUSTER_ALL, &style_cfg);
+
+ for (port = 0; port < num_ports; port++) {
+ prtsch = &intfsch->prt_s[port];
+ /* Skip invalid/disabled ports */
+ if (!cvmx_helper_is_port_valid(xiface, port) ||
+ prtsch->cfg_port)
+ continue;
+ prtsch->style = intfsch->style;
+ pknd = cvmx_helper_get_pknd(xiface, port);
+ cvmx_pki_read_pkind_config(xi.node, pknd, &pknd_cfg);
+ pknd_cfg.initial_style = intfsch->style;
+ pknd_cfg.fcs_pres = has_fcs;
+ cvmx_pki_write_pkind_config(xi.node, pknd, &pknd_cfg);
+ }
+ } else {
+ port_msb = 0;
+ port_shift = 0;
+ for (port = 0; port < num_ports; port++) {
+ prtsch = &intfsch->prt_s[port];
+ /* Skip invalid/disabled ports */
+ if (!cvmx_helper_is_port_valid(xiface, port) ||
+ prtsch->cfg_port)
+ continue;
+ if (prtsch->qpg_qos && intfsch->qos_share_aura &&
+ intfsch->qos_share_grp && port != 0) {
+ if (pki_helper_debug)
+ debug("intf 0x%x has all ports share qos aura n grps\n",
+ xiface);
+ /* Ports have differnet styles but want
+ * to share same qpg entries.
+ * this might never be the case
+ */
+ prtsch->qpg_base = intfsch->prt_s[0].qpg_base;
+ }
+ ipd_port = cvmx_helper_get_ipd_port(xiface, port);
+ cvmx_helper_pki_init_port(ipd_port, prtsch);
+ }
+ }
+ return 0;
+}
+
+/**
+ * This function gets all the PKI parameters related to that
+ * particular port from hardware.
+ * @param xipd_port xipd_port port number with node to get parameter of
+ * @param port_cfg pointer to structure where to store read parameters
+ */
+void cvmx_pki_get_port_config(int xipd_port,
+ struct cvmx_pki_port_config *port_cfg)
+{
+ int xiface, index, pknd;
+ int style, cl_mask;
+ cvmx_pki_icgx_cfg_t pki_cl_msk;
+ struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(xipd_port);
+
+ /* get the pkind used by this ipd port */
+ xiface = cvmx_helper_get_interface_num(xipd_port);
+ index = cvmx_helper_get_interface_index_num(xipd_port);
+ pknd = cvmx_helper_get_pknd(xiface, index);
+
+ cvmx_pki_read_pkind_config(xp.node, pknd, &port_cfg->pkind_cfg);
+ style = port_cfg->pkind_cfg.initial_style;
+ pki_cl_msk.u64 = csr_rd_node(
+ xp.node, CVMX_PKI_ICGX_CFG(port_cfg->pkind_cfg.cluster_grp));
+ cl_mask = pki_cl_msk.s.clusters;
+ cvmx_pki_read_style_config(xp.node, style, cl_mask,
+ &port_cfg->style_cfg);
+}
+
+/**
+ * This function sets all the PKI parameters related to that
+ * particular port in hardware.
+ * @param xipd_port ipd port number with node to get parameter of
+ * @param port_cfg pointer to structure containing port parameters
+ */
+void cvmx_pki_set_port_config(int xipd_port,
+ struct cvmx_pki_port_config *port_cfg)
+{
+ int xiface, index, pknd;
+ int style, cl_mask;
+ cvmx_pki_icgx_cfg_t pki_cl_msk;
+ struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(xipd_port);
+
+ /* get the pkind used by this ipd port */
+ xiface = cvmx_helper_get_interface_num(xipd_port);
+ index = cvmx_helper_get_interface_index_num(xipd_port);
+ pknd = cvmx_helper_get_pknd(xiface, index);
+
+ if (cvmx_pki_write_pkind_config(xp.node, pknd, &port_cfg->pkind_cfg))
+ return;
+ style = port_cfg->pkind_cfg.initial_style;
+ pki_cl_msk.u64 = csr_rd_node(
+ xp.node, CVMX_PKI_ICGX_CFG(port_cfg->pkind_cfg.cluster_grp));
+ cl_mask = pki_cl_msk.s.clusters;
+ cvmx_pki_write_style_config(xp.node, style, cl_mask,
+ &port_cfg->style_cfg);
+}
+
+/**
+ * This function displays all the PKI parameters related to that
+ * particular port.
+ * @param xipd_port ipd port number to display parameter of
+ */
+void cvmx_helper_pki_show_port_config(int xipd_port)
+{
+ int xiface, index, pknd;
+ struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(xipd_port);
+
+ xiface = cvmx_helper_get_interface_num(xipd_port);
+ index = cvmx_helper_get_interface_index_num(xipd_port);
+ pknd = cvmx_helper_get_pknd(xiface, index);
+ debug("Showing stats for intf 0x%x port %d------------------\n", xiface,
+ index);
+ cvmx_pki_show_pkind_attributes(xp.node, pknd);
+ debug("END STAUS------------------------\n\n");
+}
+
+void cvmx_helper_pki_errata(int node)
+{
+ struct cvmx_pki_global_config gbl_cfg;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X)) {
+ cvmx_pki_read_global_config(node, &gbl_cfg);
+ gbl_cfg.fpa_wait = CVMX_PKI_WAIT_PKT;
+ cvmx_pki_write_global_config(node, &gbl_cfg);
+ }
+}
+
+static const char *pki_ltype_sprint(int ltype)
+{
+ switch (ltype) {
+ case CVMX_PKI_LTYPE_E_ENET:
+ return "(ENET)";
+ case CVMX_PKI_LTYPE_E_VLAN:
+ return "(VLAN)";
+ case CVMX_PKI_LTYPE_E_SNAP_PAYLD:
+ return "(SNAP_PAYLD)";
+ case CVMX_PKI_LTYPE_E_ARP:
+ return "(ARP)";
+ case CVMX_PKI_LTYPE_E_RARP:
+ return "(RARP)";
+ case CVMX_PKI_LTYPE_E_IP4:
+ return "(IP4)";
+ case CVMX_PKI_LTYPE_E_IP4_OPT:
+ return "(IP4_OPT)";
+ case CVMX_PKI_LTYPE_E_IP6:
+ return "(IP6)";
+ case CVMX_PKI_LTYPE_E_IP6_OPT:
+ return "(IP6_OPT)";
+ case CVMX_PKI_LTYPE_E_IPSEC_ESP:
+ return "(IPSEC_ESP)";
+ case CVMX_PKI_LTYPE_E_IPFRAG:
+ return "(IPFRAG)";
+ case CVMX_PKI_LTYPE_E_IPCOMP:
+ return "(IPCOMP)";
+ case CVMX_PKI_LTYPE_E_TCP:
+ return "(TCP)";
+ case CVMX_PKI_LTYPE_E_UDP:
+ return "(UDP)";
+ case CVMX_PKI_LTYPE_E_SCTP:
+ return "(SCTP)";
+ case CVMX_PKI_LTYPE_E_UDP_VXLAN:
+ return "(UDP_VXLAN)";
+ case CVMX_PKI_LTYPE_E_GRE:
+ return "(GRE)";
+ case CVMX_PKI_LTYPE_E_NVGRE:
+ return "(NVGRE)";
+ case CVMX_PKI_LTYPE_E_GTP:
+ return "(GTP)";
+ default:
+ return "";
+ }
+}
+
+void cvmx_pki_dump_wqe(const cvmx_wqe_78xx_t *wqp)
+{
+ int i;
+ /* it is not cvmx_shared so per core only */
+ static u64 count;
+
+ debug("Wqe entry for packet %lld\n", (unsigned long long)count++);
+ debug(" WORD%02d: %016llx", 0, (unsigned long long)wqp->word0.u64);
+ debug(" aura=0x%x", wqp->word0.aura);
+ debug(" apad=%d", wqp->word0.apad);
+ debug(" chan=0x%x", wqp->word0.channel);
+ debug(" bufs=%d", wqp->word0.bufs);
+ debug(" style=0x%x", wqp->word0.style);
+ debug(" pknd=0x%x", wqp->word0.pknd);
+ debug("\n");
+ debug(" WORD%02d: %016llx", 1, (unsigned long long)wqp->word1.u64);
+ debug(" len=%d", wqp->word1.len);
+ debug(" grp=0x%x", wqp->word1.grp);
+ debug(" tt=%s", OCT_TAG_TYPE_STRING(wqp->word1.tag_type));
+ debug(" tag=0x%08x", wqp->word1.tag);
+ debug("\n");
+ if (wqp->word2.u64) {
+ debug(" WORD%02d: %016llx", 2,
+ (unsigned long long)wqp->word2.u64);
+ if (wqp->word2.le_hdr_type)
+ debug(" [LAE]");
+ if (wqp->word2.lb_hdr_type)
+ debug(" lbty=%d%s", wqp->word2.lb_hdr_type,
+ pki_ltype_sprint(wqp->word2.lb_hdr_type));
+ if (wqp->word2.lc_hdr_type)
+ debug(" lcty=%d%s", wqp->word2.lc_hdr_type,
+ pki_ltype_sprint(wqp->word2.lc_hdr_type));
+ if (wqp->word2.ld_hdr_type)
+ debug(" ldty=%d%s", wqp->word2.ld_hdr_type,
+ pki_ltype_sprint(wqp->word2.ld_hdr_type));
+ if (wqp->word2.le_hdr_type)
+ debug(" lety=%d%s", wqp->word2.le_hdr_type,
+ pki_ltype_sprint(wqp->word2.le_hdr_type));
+ if (wqp->word2.lf_hdr_type)
+ debug(" lfty=%d%s", wqp->word2.lf_hdr_type,
+ pki_ltype_sprint(wqp->word2.lf_hdr_type));
+ if (wqp->word2.lg_hdr_type)
+ debug(" lgty=%d%s", wqp->word2.lg_hdr_type,
+ pki_ltype_sprint(wqp->word2.lg_hdr_type));
+ if (wqp->word2.pcam_flag1)
+ debug(" PF1");
+ if (wqp->word2.pcam_flag2)
+ debug(" PF2");
+ if (wqp->word2.pcam_flag3)
+ debug(" PF3");
+ if (wqp->word2.pcam_flag4)
+ debug(" PF4");
+ if (wqp->word2.vlan_valid || wqp->word2.vlan_stacked) {
+ if (wqp->word2.vlan_valid)
+ debug(" vlan valid");
+ if (wqp->word2.vlan_stacked)
+ debug(" vlan stacked");
+ debug(" ");
+ }
+ if (wqp->word2.stat_inc)
+ debug(" stat_inc");
+ if (wqp->word2.is_frag)
+ debug(" L3 Fragment");
+ if (wqp->word2.is_l3_bcast)
+ debug(" L3 Broadcast");
+ if (wqp->word2.is_l3_mcast)
+ debug(" L3 Multicast");
+ if (wqp->word2.is_l2_bcast)
+ debug(" L2 Broadcast");
+ if (wqp->word2.is_l2_mcast)
+ debug(" L2 Multicast");
+ if (wqp->word2.is_raw)
+ debug(" RAW");
+ if (wqp->word2.err_level || wqp->word2.err_code) {
+ debug(" errlev=%d", wqp->word2.err_level);
+ debug(" opcode=0x%x", wqp->word2.err_code);
+ }
+ debug("\n");
+ }
+ debug(" WORD%02d: %016llx", 3,
+ (unsigned long long)wqp->packet_ptr.u64);
+
+ debug(" size=%d", wqp->packet_ptr.size);
+ debug(" addr=0x%llx", (unsigned long long)wqp->packet_ptr.addr);
+
+ debug("\n");
+ if (wqp->word4.u64) {
+ debug(" WORD%02d: %016llx", 4,
+ (unsigned long long)wqp->word4.u64);
+ if (wqp->word4.ptr_layer_a)
+ debug(" laptr=%d", wqp->word4.ptr_layer_a);
+ if (wqp->word4.ptr_layer_b)
+ debug(" lbptr=%d", wqp->word4.ptr_layer_b);
+ if (wqp->word4.ptr_layer_c)
+ debug(" lcptr=%d", wqp->word4.ptr_layer_c);
+ if (wqp->word4.ptr_layer_d)
+ debug(" ldptr=%d", wqp->word4.ptr_layer_d);
+ if (wqp->word4.ptr_layer_e)
+ debug(" leptr=%d", wqp->word4.ptr_layer_e);
+ if (wqp->word4.ptr_layer_f)
+ debug(" lfptr=%d", wqp->word4.ptr_layer_f);
+ if (wqp->word4.ptr_layer_g)
+ debug(" lgptr=%d", wqp->word4.ptr_layer_g);
+ if (wqp->word4.ptr_vlan)
+ debug(" vlptr=%d", wqp->word4.ptr_vlan);
+ debug("\n");
+ }
+ for (i = 0; i < 10; ++i) {
+ if (wqp->wqe_data[i])
+ debug(" WORD%02d: %016llx\n", i + 5,
+ (unsigned long long)wqp->wqe_data[i]);
+ }
+}
+
+/**
+ * Modifies maximum frame length to check.
+ * It modifies the global frame length set used by this port, any other
+ * port using the same set will get affected too.
+ * @param xipd_port ipd port for which to modify max len.
+ * @param max_size maximum frame length
+ */
+void cvmx_pki_set_max_frm_len(int ipd_port, uint32_t max_size)
+{
+ /* On CN78XX frame check is enabled for a style n and
+ * PKI_CLX_STYLE_CFG[minmax_sel] selects which set of
+ * MAXLEN/MINLEN to use.
+ */
+ int xiface, index, pknd;
+ cvmx_pki_clx_stylex_cfg_t style_cfg;
+ cvmx_pki_frm_len_chkx_t frame_len;
+ struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(ipd_port);
+ int cluster = 0;
+ int style;
+ int sel;
+
+ /* get the pkind used by this ipd port */
+ xiface = cvmx_helper_get_interface_num(ipd_port);
+ index = cvmx_helper_get_interface_index_num(ipd_port);
+ pknd = cvmx_helper_get_pknd(xiface, index);
+
+ style = cvmx_pki_get_pkind_style(xp.node, pknd);
+ style_cfg.u64 =
+ csr_rd_node(xp.node, CVMX_PKI_CLX_STYLEX_CFG(style, cluster));
+ sel = style_cfg.s.minmax_sel;
+ frame_len.u64 = csr_rd_node(xp.node, CVMX_PKI_FRM_LEN_CHKX(sel));
+ frame_len.s.maxlen = max_size;
+ csr_wr_node(xp.node, CVMX_PKI_FRM_LEN_CHKX(sel), frame_len.u64);
+}
+
+/**
+ * This function sets up all th eports of particular interface
+ * for chosen fcs mode. (only use for backward compatibility).
+ * New application can control it via init_interface calls.
+ * @param node node number.
+ * @param interface interface number.
+ * @param nports number of ports
+ * @param has_fcs 1 -- enable fcs check and fcs strip.
+ * 0 -- disable fcs check.
+ */
+void cvmx_helper_pki_set_fcs_op(int node, int interface, int nports,
+ int has_fcs)
+{
+ int xiface, index;
+ int pknd;
+ unsigned int cluster = 0;
+ cvmx_pki_clx_pkindx_cfg_t pkind_cfg;
+
+ xiface = cvmx_helper_node_interface_to_xiface(node, interface);
+ for (index = 0; index < nports; index++) {
+ pknd = cvmx_helper_get_pknd(xiface, index);
+ while (cluster < CVMX_PKI_NUM_CLUSTER) {
+ /*find the cluster in use pass2*/
+ pkind_cfg.u64 = csr_rd_node(
+ node, CVMX_PKI_CLX_PKINDX_CFG(pknd, cluster));
+ pkind_cfg.s.fcs_pres = has_fcs;
+ csr_wr_node(node,
+ CVMX_PKI_CLX_PKINDX_CFG(pknd, cluster),
+ pkind_cfg.u64);
+ cluster++;
+ }
+ /* make sure fcs_strip and fcs_check is also enable/disable
+ * for the style used by that port
+ */
+ cvmx_pki_endis_fcs_check(node, pknd, has_fcs, has_fcs);
+ cluster = 0;
+ }
+}
+
+/**
+ * This function sets the wqe buffer mode of all ports. First packet data buffer can reside
+ * either in same buffer as wqe OR it can go in separate buffer. If used the later mode,
+ * make sure software allocate enough buffers to now have wqe separate from packet data.
+ * @param node node number.
+ * @param pkt_outside_wqe 0 = The packet link pointer will be at word [FIRST_SKIP]
+ * immediately followed by packet data, in the same buffer
+ * as the work queue entry.
+ * 1 = The packet link pointer will be at word [FIRST_SKIP] in a new
+ * buffer separate from the work queue entry. Words following the
+ * WQE in the same cache line will be zeroed, other lines in the
+ * buffer will not be modified and will retain stale data (from the
+ * buffer’s previous use). This setting may decrease the peak PKI
+ * performance by up to half on small packets.
+ */
+void cvmx_helper_pki_set_wqe_mode(int node, bool pkt_outside_wqe)
+{
+ int interface, xiface, port, pknd;
+ int num_intf, num_ports;
+ u64 style;
+
+ /* get the pkind used by this ipd port */
+ num_intf = cvmx_helper_get_number_of_interfaces();
+ for (interface = 0; interface < num_intf; interface++) {
+ num_ports = cvmx_helper_ports_on_interface(interface);
+ /*Skip invalid/disabled interfaces */
+ if (num_ports <= 0)
+ continue;
+ xiface = cvmx_helper_node_interface_to_xiface(node, interface);
+ for (port = 0; port < num_ports; port++) {
+ pknd = cvmx_helper_get_pknd(xiface, port);
+ style = cvmx_pki_get_pkind_style(node, pknd);
+ cvmx_pki_set_wqe_mode(node, style, pkt_outside_wqe);
+ }
+ }
+}
+
+/**
+ * This function sets the Packet mode of all ports and styles to little-endian.
+ * It Changes write operations of packet data to L2C to
+ * be in little-endian. Does not change the WQE header format, which is
+ * properly endian neutral.
+ * @param node node number.
+ */
+void cvmx_helper_pki_set_little_endian(int node)
+{
+ int interface, xiface, port, pknd;
+ int num_intf, num_ports;
+ u64 style;
+
+ /* get the pkind used by this ipd port */
+ num_intf = cvmx_helper_get_number_of_interfaces();
+ for (interface = 0; interface < num_intf; interface++) {
+ num_ports = cvmx_helper_ports_on_interface(interface);
+ /*Skip invalid/disabled interfaces */
+ if (num_ports <= 0)
+ continue;
+ xiface = cvmx_helper_node_interface_to_xiface(node, interface);
+ for (port = 0; port < num_ports; port++) {
+ pknd = cvmx_helper_get_pknd(xiface, port);
+ style = cvmx_pki_get_pkind_style(node, pknd);
+ cvmx_pki_set_little_endian(node, style);
+ }
+ }
+}
+
+/**
+ * This function modifies the sso group where packets from specified port needs to be routed
+ * @param ipd_port pki port number.
+ * @param grp_ok sso group where good packets are routed
+ * @param grp_bad sso group where errored packets are routed
+ * NOTE: This function assumes that each port has its own style/profile and is not using qpg qos
+ */
+void cvmx_helper_pki_modify_prtgrp(int xipd_port, int grp_ok, int grp_bad)
+{
+ struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(xipd_port);
+ struct cvmx_pki_port_config port_cfg;
+ struct cvmx_pki_qpg_config qpg_cfg;
+ int index;
+
+ cvmx_pki_get_port_config(xipd_port, &port_cfg);
+ /* TODO: expand it to calculate index in other cases hrm:10.5.3*/
+ index = port_cfg.style_cfg.parm_cfg.qpg_base;
+ cvmx_pki_read_qpg_entry(xp.node, index, &qpg_cfg);
+ qpg_cfg.grp_ok = grp_ok;
+ qpg_cfg.grp_bad = grp_bad;
+ cvmx_pki_write_qpg_entry(xp.node, index, &qpg_cfg);
+}
+
+int cvmx_pki_clone_style(int node, int style, u64 cluster_mask)
+{
+ int new_style;
+ struct cvmx_pki_style_config style_cfg;
+
+ cvmx_pki_read_style_config(node, style, cluster_mask, &style_cfg);
+ new_style = cvmx_pki_style_alloc(node, CVMX_PKI_FIND_AVAL_ENTRY);
+ if (new_style < 0)
+ return -1;
+ cvmx_pki_write_style_config(node, new_style, cluster_mask, &style_cfg);
+ return new_style;
+}
+
+/* Optimize if use at runtime */
+int cvmx_pki_add_entry(u64 *array, u64 match, int index, int num_entries)
+{
+ if (index >= num_entries)
+ return -1;
+ array[index] = match;
+ return 0;
+}
+
+/* Optimize if use at runtime */
+int cvmx_pki_find_entry(u64 *array, u64 match, int num_entries)
+{
+ int i;
+
+ for (i = 0; i < num_entries; i++) {
+ if ((array[i] & 0xffffffffff) == match)
+ return i;
+ }
+ return -1;
+}
+
+/**
+ * This function send the packets to specified style/profile if
+ * specified mac address and specified input style/profile matches.
+ * @param node node number.
+ * @param style style/profile to match against
+ * @param mac_addr mac address to match
+ * @param mac_addr_mask mask of mac address bits
+ * 1: exact match
+ * 0: don't care
+ * ex: to exactly match mac address 0x0a0203040506
+ * mask = 0xffffffffffff
+ * to match only first 2 bytes 0x0a02xxxxxxxx
+ * mask = 0xffff00000000
+ * @param final_style final style (contains aura/sso_grp etc) to
+ * route matched packet to.
+ */
+int cvmx_helper_pki_route_dmac(int node, int style, u64 mac_addr,
+ u64 mac_addr_mask, int final_style)
+{
+ struct cvmx_pki_pcam_input pcam_input;
+ struct cvmx_pki_pcam_action pcam_action;
+ int bank;
+ int index;
+ int interim_style = style;
+ u64 cl_mask = CVMX_PKI_CLUSTER_ALL;
+ u32 data_to_match;
+ u32 data_to_mask;
+ u64 match_h;
+ u64 match_l;
+
+ memset(&pcam_input, 0, sizeof(pcam_input));
+ memset(&pcam_action, 0, sizeof(pcam_action));
+ data_to_match =
+ (mac_addr >> CVMX_PKI_DMACH_SHIFT) & CVMX_PKI_DMACH_MASK;
+ data_to_mask =
+ (mac_addr_mask >> CVMX_PKI_DMACH_SHIFT) & CVMX_PKI_DMACH_MASK;
+ match_h = (u64)(data_to_match & data_to_mask) | (u64)(style << 16);
+ if (!data_to_mask)
+ goto pcam_dmacl;
+ index = cvmx_pki_find_entry(pcam_dmach, match_h,
+ CVMX_PKI_NUM_PCAM_ENTRY);
+
+ if (index >= 0) {
+ interim_style = (pcam_dmach[index] >> 40) & 0xffffffffff;
+ goto pcam_dmacl;
+ }
+ bank = 0;
+ index = cvmx_pki_pcam_entry_alloc(node, CVMX_PKI_FIND_AVAL_ENTRY, bank,
+ cl_mask);
+ if (index < 0) {
+ debug("ERROR: Allocating pcam entry node=%d bank=%d\n", node,
+ bank);
+ return -1;
+ }
+ pcam_input.style = style;
+ pcam_input.style_mask = 0xffffffffffffffff;
+ pcam_input.field = CVMX_PKI_PCAM_TERM_DMACH;
+ pcam_input.field_mask = 0xff;
+ pcam_input.data = data_to_match;
+ pcam_input.data_mask = data_to_mask;
+ pcam_action.parse_mode_chg = CVMX_PKI_PARSE_NO_CHG;
+ pcam_action.parse_flag_set = 0;
+ pcam_action.layer_type_set = CVMX_PKI_LTYPE_E_NONE;
+ interim_style = cvmx_pki_clone_style(node, style, cl_mask);
+ if (interim_style < 0) {
+ debug("ERROR: Failed to allocate interim style\n");
+ return -1;
+ }
+ pcam_action.style_add = interim_style - style;
+ pcam_action.pointer_advance = 0;
+ cvmx_pki_pcam_write_entry(node, index, cl_mask, pcam_input,
+ pcam_action); /*cluster_mask in pass2*/
+ match_h |= (u64)(((u64)interim_style << 40) & 0xff0000000000);
+ cvmx_pki_add_entry(pcam_dmach, match_h, index, CVMX_PKI_NUM_PCAM_ENTRY);
+pcam_dmacl:
+ bank = 1;
+ data_to_match = (mac_addr & CVMX_PKI_DMACL_MASK);
+ data_to_mask = (mac_addr_mask & CVMX_PKI_DMACL_MASK);
+ if (!data_to_mask)
+ return 0;
+ match_l = (u64)(data_to_match & data_to_mask) |
+ ((u64)interim_style << 32);
+ if (cvmx_pki_find_entry(pcam_dmacl, match_l, CVMX_PKI_NUM_PCAM_ENTRY) >=
+ 0)
+ return 0;
+ index = cvmx_pki_pcam_entry_alloc(node, CVMX_PKI_FIND_AVAL_ENTRY, bank,
+ cl_mask);
+ if (index < 0) {
+ debug("ERROR: Allocating pcam entry node=%d bank=%d\n", node,
+ bank);
+ return -1;
+ }
+ cvmx_pki_add_entry(pcam_dmacl, match_l, index, CVMX_PKI_NUM_PCAM_ENTRY);
+ pcam_input.style = interim_style;
+ pcam_input.style_mask = 0xffffffffffffffff;
+ pcam_input.field = CVMX_PKI_PCAM_TERM_DMACL;
+ pcam_input.field_mask = 0xff;
+ pcam_input.data = data_to_match;
+ pcam_input.data_mask = data_to_mask;
+ /* customer need to decide if they want to resume parsing or terminate
+ * it, if further match found in pcam it will take precedence
+ */
+ pcam_action.parse_mode_chg = CVMX_PKI_PARSE_NO_CHG;
+ pcam_action.parse_flag_set = 0;
+ pcam_action.layer_type_set = CVMX_PKI_LTYPE_E_NONE;
+ pcam_action.style_add = final_style - interim_style;
+ pcam_action.pointer_advance = 0;
+ cvmx_pki_pcam_write_entry(node, index, cl_mask, pcam_input,
+ pcam_action); /*cluster_mask in pass2*/
+
+ return 0;
+}
+
+/**
+ * This function send the packets to specified sso group if
+ * specified mac address and specified input port matches.
+ * NOTE: This function will always create a new style/profile for the specified
+ * sso group even if style/profile already exist and if the style used by this ipd port is
+ * shared all the ports using that style will get affected.
+ * similar function to use: cvmx_helper_pki_route_dmac()
+ * @param node node number.
+ * @param ipd_port ipd port on which mac address match needs to be performed.
+ * @param mac_addr mac address to match
+ * @param mac_addr_mask mask of mac address bits
+ * 1: exact match
+ * 0: don't care
+ * ex: to exactly match mac address 0x0a0203040506
+ * mask = 0xffffffffffff
+ * to match only first 2 bytes 0x0a02xxxxxxxx
+ * mask = 0xffff00000000
+ * @param grp sso group to route matched packet to.
+ * @return success: final style containing routed sso group
+ * fail: -1
+ */
+int cvmx_helper_pki_route_prt_dmac(int xipd_port, u64 mac_addr,
+ u64 mac_addr_mask, int grp)
+{
+ int style;
+ int new_style;
+ int offset, index;
+ struct cvmx_pki_style_config st_cfg;
+ struct cvmx_pki_port_config port_cfg;
+ struct cvmx_pki_qpg_config qpg_cfg;
+ struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(xipd_port);
+ int node = xp.node;
+
+ /* 1. Get the current/initial style config used by this port */
+ cvmx_pki_get_port_config(xipd_port, &port_cfg);
+ style = port_cfg.pkind_cfg.initial_style;
+ st_cfg = port_cfg.style_cfg;
+
+ /* 2. Create new style/profile from current and modify it to steer
+ * traffic to specified grp
+ */
+ new_style = cvmx_pki_style_alloc(node, CVMX_PKI_FIND_AVAL_ENTRY);
+ if (new_style < 0) {
+ cvmx_printf("ERROR: %s: new style not available\n", __func__);
+ return -1;
+ }
+ offset = st_cfg.parm_cfg.qpg_base;
+ cvmx_pki_read_qpg_entry(node, offset, &qpg_cfg);
+ qpg_cfg.qpg_base = CVMX_PKI_FIND_AVAL_ENTRY;
+ qpg_cfg.grp_ok = grp;
+ qpg_cfg.grp_bad = grp;
+ index = cvmx_helper_pki_set_qpg_entry(node, &qpg_cfg);
+ if (index < 0) {
+ cvmx_printf("ERROR: %s: new qpg entry not available\n",
+ __func__);
+ return -1;
+ }
+ st_cfg.parm_cfg.qpg_base = index;
+ cvmx_pki_write_style_config(node, new_style, CVMX_PKI_CLUSTER_ALL,
+ &st_cfg);
+ cvmx_helper_pki_route_dmac(node, style, mac_addr, mac_addr_mask,
+ new_style);
+ return new_style;
+}
--
2.35.1
^ permalink raw reply related [flat|nested] 52+ messages in thread* [PATCH 18/52] mips: octeon: Add cvmx-helper-pko.c
2022-03-30 10:06 [PATCH 00/52] mips: octeon: Add ethernet support Stefan Roese
` (15 preceding siblings ...)
2022-03-30 10:06 ` [PATCH 17/52] mips: octeon: Add cvmx-helper-pki.c Stefan Roese
@ 2022-03-30 10:06 ` Stefan Roese
2022-03-30 10:06 ` [PATCH 19/52] mips: octeon: Add cvmx-helper-pko3.c Stefan Roese
` (32 subsequent siblings)
49 siblings, 0 replies; 52+ messages in thread
From: Stefan Roese @ 2022-03-30 10:06 UTC (permalink / raw)
To: u-boot; +Cc: awilliams, cchavva
From: Aaron Williams <awilliams@marvell.com>
Import cvmx-helper-pko.c from 2013 U-Boot. It will be used by the later
added drivers to support networking on the MIPS Octeon II / III
platforms.
Signed-off-by: Aaron Williams <awilliams@marvell.com>
Signed-off-by: Stefan Roese <sr@denx.de>
---
arch/mips/mach-octeon/cvmx-helper-pko.c | 312 ++++++++++++++++++++++++
1 file changed, 312 insertions(+)
create mode 100644 arch/mips/mach-octeon/cvmx-helper-pko.c
diff --git a/arch/mips/mach-octeon/cvmx-helper-pko.c b/arch/mips/mach-octeon/cvmx-helper-pko.c
new file mode 100644
index 000000000000..0dc7980b25f2
--- /dev/null
+++ b/arch/mips/mach-octeon/cvmx-helper-pko.c
@@ -0,0 +1,312 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ * Helper Functions for the PKO
+ */
+
+#include <errno.h>
+#include <log.h>
+#include <time.h>
+#include <linux/delay.h>
+
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-csr.h>
+#include <mach/cvmx-bootmem.h>
+#include <mach/octeon-model.h>
+#include <mach/cvmx-fuse.h>
+#include <mach/octeon-feature.h>
+#include <mach/cvmx-qlm.h>
+#include <mach/octeon_qlm.h>
+#include <mach/cvmx-pcie.h>
+#include <mach/cvmx-coremask.h>
+#include <mach/cvmx-range.h>
+#include <mach/cvmx-global-resources.h>
+
+#include <mach/cvmx-agl-defs.h>
+#include <mach/cvmx-bgxx-defs.h>
+#include <mach/cvmx-ciu-defs.h>
+#include <mach/cvmx-gmxx-defs.h>
+#include <mach/cvmx-gserx-defs.h>
+#include <mach/cvmx-ilk-defs.h>
+#include <mach/cvmx-ipd-defs.h>
+#include <mach/cvmx-pcsx-defs.h>
+#include <mach/cvmx-pcsxx-defs.h>
+#include <mach/cvmx-pki-defs.h>
+#include <mach/cvmx-pko-defs.h>
+#include <mach/cvmx-xcv-defs.h>
+
+#include <mach/cvmx-hwpko.h>
+#include <mach/cvmx-ilk.h>
+#include <mach/cvmx-ipd.h>
+#include <mach/cvmx-pki.h>
+#include <mach/cvmx-pko3.h>
+#include <mach/cvmx-pko3-queue.h>
+#include <mach/cvmx-pko3-resources.h>
+
+#include <mach/cvmx-helper.h>
+#include <mach/cvmx-helper-board.h>
+#include <mach/cvmx-helper-cfg.h>
+
+#include <mach/cvmx-helper-bgx.h>
+#include <mach/cvmx-helper-cfg.h>
+#include <mach/cvmx-helper-util.h>
+#include <mach/cvmx-helper-pki.h>
+
+static s64 pko_fpa_config_pool = -1;
+static u64 pko_fpa_config_size = 1024;
+static u64 pko_fpa_config_count;
+
+/**
+ * cvmx_override_pko_queue_priority(int pko_port, u64
+ * priorities[16]) is a function pointer. It is meant to allow
+ * customization of the PKO queue priorities based on the port
+ * number. Users should set this pointer to a function before
+ * calling any cvmx-helper operations.
+ */
+void (*cvmx_override_pko_queue_priority)(int ipd_port,
+ uint8_t *priorities) = NULL;
+
+void cvmx_pko_set_cmd_que_pool_config(s64 pool, u64 buffer_size,
+ u64 buffer_count)
+{
+ pko_fpa_config_pool = pool;
+ pko_fpa_config_size = buffer_size;
+ pko_fpa_config_count = buffer_count;
+}
+
+void cvmx_pko_set_cmd_queue_pool_buffer_count(u64 buffer_count)
+{
+ pko_fpa_config_count = buffer_count;
+}
+
+void cvmx_pko_get_cmd_que_pool_config(cvmx_fpa_pool_config_t *pko_pool)
+{
+ pko_pool->pool_num = pko_fpa_config_pool;
+ pko_pool->buffer_size = pko_fpa_config_size;
+ pko_pool->buffer_count = pko_fpa_config_count;
+}
+
+int64_t cvmx_fpa_get_pko_pool(void)
+{
+ return pko_fpa_config_pool;
+}
+
+/**
+ * Gets the buffer size of pko pool
+ */
+u64 cvmx_fpa_get_pko_pool_block_size(void)
+{
+ return pko_fpa_config_size;
+}
+
+/**
+ * Gets the buffer size of pko pool
+ */
+u64 cvmx_fpa_get_pko_pool_buffer_count(void)
+{
+ return pko_fpa_config_count;
+}
+
+/**
+ * Initialize PKO command queue buffer pool
+ */
+static int cvmx_helper_pko_pool_init(void)
+{
+ u8 pool;
+ unsigned int buf_count;
+ unsigned int pkt_buf_count;
+ int rc;
+
+ /* Reserve pool */
+ pool = cvmx_fpa_get_pko_pool();
+
+ /* Avoid redundant pool creation */
+ if (cvmx_fpa_get_block_size(pool) > 0) {
+#ifdef DEBUG
+ debug("WARNING: %s: pool %d already initialized\n", __func__,
+ pool);
+#endif
+ /* It is up to the app to have sufficient buffer count */
+ return pool;
+ }
+
+ /* Calculate buffer count: one per queue + 3-word-cmds * max_pkts */
+ pkt_buf_count = cvmx_fpa_get_packet_pool_buffer_count();
+ buf_count = CVMX_PKO_MAX_OUTPUT_QUEUES + (pkt_buf_count * 3) / 8;
+
+ /* Allocate pools for pko command queues */
+ rc = __cvmx_helper_initialize_fpa_pool(pool,
+ cvmx_fpa_get_pko_pool_block_size(),
+ buf_count, "PKO Cmd-bufs");
+
+ if (rc < 0)
+ debug("%s: ERROR: in PKO buffer pool\n", __func__);
+
+ pool = rc;
+ return pool;
+}
+
+/**
+ * Initialize the PKO
+ *
+ */
+int cvmx_helper_pko_init(void)
+{
+ int rc;
+
+ rc = cvmx_helper_pko_pool_init();
+ if (rc < 0)
+ return rc;
+
+ __cvmx_helper_init_port_config_data(0);
+
+ cvmx_pko_hw_init(cvmx_fpa_get_pko_pool(),
+ cvmx_fpa_get_pko_pool_block_size());
+ return 0;
+}
+
+/**
+ * @INTERNAL
+ * Setup the PKO for the ports on an interface. The number of
+ * queues per port and the priority of each PKO output queue
+ * is set here. PKO must be disabled when this function is called.
+ *
+ * @param interface to setup PKO for
+ *
+ * @return Zero on success, negative on failure
+ *
+ * @note This is for PKO1/PKO2, and is not used for PKO3.
+ */
+int __cvmx_helper_interface_setup_pko(int interface)
+{
+ /*
+ * Each packet output queue has an associated priority. The
+ * higher the priority, the more often it can send a packet. A
+ * priority of 8 means it can send in all 8 rounds of
+ * contention. We're going to make each queue one less than
+ * the last. The vector of priorities has been extended to
+ * support CN5xxx CPUs, where up to 16 queues can be
+ * associated to a port. To keep backward compatibility we
+ * don't change the initial 8 priorities and replicate them in
+ * the second half. With per-core PKO queues (PKO lockless
+ * operation) all queues have the same priority.
+ */
+ /* uint8_t priorities[16] = {8,7,6,5,4,3,2,1,8,7,6,5,4,3,2,1}; */
+ u8 priorities[16] = { [0 ... 15] = 8 };
+
+ /*
+ * Setup the IPD/PIP and PKO for the ports discovered
+ * above. Here packet classification, tagging and output
+ * priorities are set.
+ */
+ int num_ports = cvmx_helper_ports_on_interface(interface);
+
+ while (num_ports--) {
+ int ipd_port;
+
+ if (!cvmx_helper_is_port_valid(interface, num_ports))
+ continue;
+
+ ipd_port = cvmx_helper_get_ipd_port(interface, num_ports);
+ /*
+ * Give the user a chance to override the per queue
+ * priorities.
+ */
+ if (cvmx_override_pko_queue_priority)
+ cvmx_override_pko_queue_priority(ipd_port, priorities);
+
+ cvmx_pko_config_port(ipd_port,
+ cvmx_pko_get_base_queue(ipd_port),
+ cvmx_pko_get_num_queues(ipd_port),
+ priorities);
+ ipd_port++;
+ }
+ return 0;
+ /* NOTE:
+ * Now this function is called for all chips including 68xx,
+ * but on the 68xx it does not enable multiple pko_iports per
+ * eport, while before it was doing 3 pko_iport per eport
+ * buf the reason for that is not clear.
+ */
+}
+
+/**
+ * wait for the pko queue to drain
+ *
+ * @param queue a valid pko queue
+ * @return count is the length of the queue after calling this
+ * function
+ */
+static int cvmx_helper_wait_pko_queue_drain(int queue)
+{
+ const int timeout = 5; /* Wait up to 5 seconds for timeouts */
+ int count;
+ u64 start_cycle;
+
+ count = cvmx_pko_queue_pend_count(queue);
+ if (count < 0)
+ return count;
+
+ start_cycle = get_timer(0);
+
+ while (count > 0 && (get_timer(start_cycle) < timeout * 1000)) {
+ mdelay(1);
+ count = cvmx_pko_queue_pend_count(queue);
+ }
+
+ return count;
+}
+
+/**
+ * @INTERNAL
+ *
+ * Drain and wait until all PKO queues are empty.
+ */
+int __cvmx_helper_pko_drain(void)
+{
+ int result = 0;
+
+ if (octeon_has_feature(OCTEON_FEATURE_PKND)) {
+ int queue, max_queue;
+
+ /* PKO2 */
+ max_queue = __cvmx_helper_cfg_pko_max_queue();
+ for (queue = 0; queue < max_queue; queue++) {
+ if (cvmx_helper_wait_pko_queue_drain(queue)) {
+ result = -1;
+ return result;
+ }
+ }
+ } else {
+ int num_interfaces = cvmx_helper_get_number_of_interfaces();
+ int interface, num_ports, index;
+
+ /* PKO1 */
+ for (interface = 0; interface < num_interfaces; interface++) {
+ num_ports = cvmx_helper_ports_on_interface(interface);
+ for (index = 0; index < num_ports; index++) {
+ int pko_port;
+ int queue;
+ int max_queue;
+
+ if (!cvmx_helper_is_port_valid(interface,
+ index))
+ continue;
+ pko_port = cvmx_helper_get_ipd_port(interface,
+ index);
+ queue = cvmx_pko_get_base_queue(pko_port);
+ max_queue = queue +
+ cvmx_pko_get_num_queues(pko_port);
+ while (queue < max_queue) {
+ if (cvmx_helper_wait_pko_queue_drain(queue)) {
+ result = -1;
+ return result;
+ }
+ queue++;
+ }
+ }
+ }
+ }
+ return result;
+}
--
2.35.1
^ permalink raw reply related [flat|nested] 52+ messages in thread* [PATCH 19/52] mips: octeon: Add cvmx-helper-pko3.c
2022-03-30 10:06 [PATCH 00/52] mips: octeon: Add ethernet support Stefan Roese
` (16 preceding siblings ...)
2022-03-30 10:06 ` [PATCH 18/52] mips: octeon: Add cvmx-helper-pko.c Stefan Roese
@ 2022-03-30 10:06 ` Stefan Roese
2022-03-30 10:06 ` [PATCH 20/52] mips: octeon: Add cvmx-helper-rgmii.c Stefan Roese
` (31 subsequent siblings)
49 siblings, 0 replies; 52+ messages in thread
From: Stefan Roese @ 2022-03-30 10:06 UTC (permalink / raw)
To: u-boot; +Cc: awilliams, cchavva
From: Aaron Williams <awilliams@marvell.com>
Import cvmx-helper-pko3.c from 2013 U-Boot. It will be used by the later
added drivers to support networking on the MIPS Octeon II / III
platforms.
Signed-off-by: Aaron Williams <awilliams@marvell.com>
Signed-off-by: Stefan Roese <sr@denx.de>
---
arch/mips/mach-octeon/cvmx-helper-pko3.c | 1252 ++++++++++++++++++++++
1 file changed, 1252 insertions(+)
create mode 100644 arch/mips/mach-octeon/cvmx-helper-pko3.c
diff --git a/arch/mips/mach-octeon/cvmx-helper-pko3.c b/arch/mips/mach-octeon/cvmx-helper-pko3.c
new file mode 100644
index 000000000000..de9b2462111d
--- /dev/null
+++ b/arch/mips/mach-octeon/cvmx-helper-pko3.c
@@ -0,0 +1,1252 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ * PKOv3 helper file
+ */
+
+#include <errno.h>
+#include <log.h>
+#include <time.h>
+#include <linux/delay.h>
+
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-csr.h>
+#include <mach/cvmx-bootmem.h>
+#include <mach/octeon-model.h>
+#include <mach/cvmx-fuse.h>
+#include <mach/octeon-feature.h>
+#include <mach/cvmx-qlm.h>
+#include <mach/octeon_qlm.h>
+#include <mach/cvmx-pcie.h>
+#include <mach/cvmx-coremask.h>
+#include <mach/cvmx-range.h>
+#include <mach/cvmx-global-resources.h>
+
+#include <mach/cvmx-agl-defs.h>
+#include <mach/cvmx-bgxx-defs.h>
+#include <mach/cvmx-ciu-defs.h>
+#include <mach/cvmx-gmxx-defs.h>
+#include <mach/cvmx-gserx-defs.h>
+#include <mach/cvmx-ilk-defs.h>
+#include <mach/cvmx-ipd-defs.h>
+#include <mach/cvmx-pcsx-defs.h>
+#include <mach/cvmx-pcsxx-defs.h>
+#include <mach/cvmx-pki-defs.h>
+#include <mach/cvmx-pko-defs.h>
+#include <mach/cvmx-xcv-defs.h>
+
+#include <mach/cvmx-hwpko.h>
+#include <mach/cvmx-ilk.h>
+#include <mach/cvmx-ipd.h>
+#include <mach/cvmx-pki.h>
+#include <mach/cvmx-pko3.h>
+#include <mach/cvmx-pko3-queue.h>
+#include <mach/cvmx-pko3-resources.h>
+
+#include <mach/cvmx-helper.h>
+#include <mach/cvmx-helper-board.h>
+#include <mach/cvmx-helper-cfg.h>
+
+#include <mach/cvmx-helper-bgx.h>
+#include <mach/cvmx-helper-cfg.h>
+#include <mach/cvmx-helper-util.h>
+#include <mach/cvmx-helper-pki.h>
+
+/* channels are present at L2 queue level by default */
+static const enum cvmx_pko3_level_e cvmx_pko_default_channel_level =
+ CVMX_PKO_L2_QUEUES;
+
+static const int debug;
+
+static int __pko_pkt_budget, __pko_pkt_quota;
+
+/* These global variables are relevant for boot CPU only */
+static cvmx_fpa3_gaura_t __cvmx_pko3_aura[CVMX_MAX_NODES];
+
+/* This constant can not be modified, defined here for clarity only */
+#define CVMX_PKO3_POOL_BUFFER_SIZE 4096 /* 78XX PKO requires 4KB */
+
+/**
+ * @INTERNAL
+ *
+ * Build an owner tag based on interface/port
+ */
+static int __cvmx_helper_pko3_res_owner(int ipd_port)
+{
+ int res_owner;
+ const int res_owner_pfix = 0x19d0 << 14;
+
+ ipd_port &= 0x3fff; /* 12-bit for local CHAN_E value + node */
+
+ res_owner = res_owner_pfix | ipd_port;
+
+ return res_owner;
+}
+
+/**
+ * Configure an AURA/POOL designated for PKO internal use.
+ *
+ * This pool is used for (a) memory buffers that store PKO descriptor queues,
+ * (b) buffers for use with PKO_SEND_JUMP_S sub-header.
+ *
+ * The buffers of type (a) are never accessed by software, and their number
+ * should be at least equal to 4 times the number of descriptor queues
+ * in use.
+ *
+ * Type (b) buffers are consumed by PKO3 command-composition code,
+ * and are released by the hardware upon completion of transmission.
+ *
+ * @returns -1 if the pool could not be established or 12-bit AURA
+ * that includes the node number for use in PKO3 initialization call.
+ *
+ * NOTE: Linux kernel should pass its own aura to PKO3 initialization
+ * function so that the buffers can be mapped into kernel space
+ * for when software needs to adccess their contents.
+ *
+ */
+static int __cvmx_pko3_config_memory(unsigned int node)
+{
+ cvmx_fpa3_gaura_t aura;
+ int aura_num;
+ unsigned int buf_count;
+ bool small_mem;
+ int i, num_intf = 0;
+ const unsigned int pkt_per_buf =
+ (CVMX_PKO3_POOL_BUFFER_SIZE / sizeof(u64) / 16);
+ const unsigned int base_buf_count = 1024 * 4;
+
+ /* Simulator has limited memory, but uses one interface at a time */
+ // small_mem = cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM;
+ small_mem = false;
+
+ /* Count the number of live interfaces */
+ for (i = 0; i < cvmx_helper_get_number_of_interfaces(); i++) {
+ int xiface = cvmx_helper_node_interface_to_xiface(node, i);
+
+ if (CVMX_HELPER_INTERFACE_MODE_DISABLED !=
+ cvmx_helper_interface_get_mode(xiface))
+ num_intf++;
+ }
+
+ buf_count = 1024;
+ __pko_pkt_quota = buf_count * pkt_per_buf;
+ __pko_pkt_budget = __pko_pkt_quota * num_intf;
+ (void)small_mem;
+ (void)base_buf_count;
+
+ if (debug)
+ debug("%s: Creating AURA with %u buffers for up to %d total packets, %d packets per interface\n",
+ __func__, buf_count, __pko_pkt_budget, __pko_pkt_quota);
+
+ aura = cvmx_fpa3_setup_aura_and_pool(node, -1, "PKO3 AURA", NULL,
+ CVMX_PKO3_POOL_BUFFER_SIZE,
+ buf_count);
+
+ if (!__cvmx_fpa3_aura_valid(aura)) {
+ printf("ERROR: %s AURA create failed\n", __func__);
+ return -1;
+ }
+
+ aura_num = aura.node << 10 | aura.laura;
+
+ /* Store handle for destruction */
+ __cvmx_pko3_aura[node] = aura;
+
+ return aura_num;
+}
+
+/** Initialize a channelized port
+ * This is intended for LOOP, ILK and NPI interfaces which have one MAC
+ * per interface and need a channel per subinterface (e.g. ring).
+ * Each channel then may have 'num_queues' descriptor queues
+ * attached to it, which can also be prioritized or fair.
+ */
+static int __cvmx_pko3_config_chan_interface(int xiface, unsigned int num_chans,
+ u8 num_queues, bool prioritized)
+{
+ int l1_q_num;
+ int l2_q_base;
+ enum cvmx_pko3_level_e level;
+ int res;
+ int parent_q, child_q;
+ unsigned int chan, dq;
+ int pko_mac_num;
+ u16 ipd_port;
+ int res_owner, prio;
+ unsigned int i;
+ struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+ unsigned int node = xi.node;
+ char b1[12];
+
+ if (num_queues == 0)
+ num_queues = 1;
+ if ((cvmx_pko3_num_level_queues(CVMX_PKO_DESCR_QUEUES) / num_chans) < 3)
+ num_queues = 1;
+
+ if (prioritized && num_queues > 1)
+ prio = num_queues;
+ else
+ prio = -1;
+
+ if (debug)
+ debug("%s: configuring xiface %u:%u with %u chans %u queues each\n",
+ __func__, xi.node, xi.interface, num_chans, num_queues);
+
+ /* all channels all go to the same mac */
+ pko_mac_num = __cvmx_pko3_get_mac_num(xiface, 0);
+ if (pko_mac_num < 0) {
+ printf("ERROR: %s: Invalid interface\n", __func__);
+ return -1;
+ }
+
+ /* Resources of all channels on this port have common owner */
+ ipd_port = cvmx_helper_get_ipd_port(xiface, 0);
+
+ /* Build an identifiable owner */
+ res_owner = __cvmx_helper_pko3_res_owner(ipd_port);
+
+ /* Start configuration at L1/PQ */
+ level = CVMX_PKO_PORT_QUEUES;
+
+ /* Reserve port queue to make sure the MAC is not already configured */
+ l1_q_num = cvmx_pko_alloc_queues(node, level, res_owner, -1, 1);
+
+ if (l1_q_num < 0) {
+ printf("ERROR: %s: Reserving L1 PQ\n", __func__);
+ return -1;
+ }
+
+ res = cvmx_pko3_pq_config(node, pko_mac_num, l1_q_num);
+ if (res < 0) {
+ printf("ERROR: %s: Configuring L1 PQ\n", __func__);
+ return -1;
+ }
+
+ /* next queue level = L2/SQ */
+ level = __cvmx_pko3_sq_lvl_next(level);
+
+ /* allocate level 2 queues, one per channel */
+ l2_q_base =
+ cvmx_pko_alloc_queues(node, level, res_owner, -1, num_chans);
+ if (l2_q_base < 0) {
+ printf("ERROR: %s: allocation L2 SQ\n", __func__);
+ return -1;
+ }
+
+ /* Configre <num_chans> L2 children for PQ, non-prioritized */
+ res = cvmx_pko3_sq_config_children(node, level, l1_q_num, l2_q_base,
+ num_chans, -1);
+
+ if (res < 0) {
+ printf("ERROR: %s: Failed channel queues\n", __func__);
+ return -1;
+ }
+
+ /* map channels to l2 queues */
+ for (chan = 0; chan < num_chans; chan++) {
+ ipd_port = cvmx_helper_get_ipd_port(xiface, chan);
+ cvmx_pko3_map_channel(node, l1_q_num, l2_q_base + chan,
+ ipd_port);
+ }
+
+ /* next queue level = L3/SQ */
+ level = __cvmx_pko3_sq_lvl_next(level);
+ parent_q = l2_q_base;
+
+ do {
+ child_q = cvmx_pko_alloc_queues(node, level, res_owner, -1,
+ num_chans);
+
+ if (child_q < 0) {
+ printf("ERROR: %s: allocating %s\n", __func__,
+ __cvmx_pko3_sq_str(b1, level, child_q));
+ return -1;
+ }
+
+ for (i = 0; i < num_chans; i++) {
+ res = cvmx_pko3_sq_config_children(
+ node, level, parent_q + i, child_q + i, 1, 1);
+
+ if (res < 0) {
+ printf("ERROR: %s: configuring %s\n", __func__,
+ __cvmx_pko3_sq_str(b1, level, child_q));
+ return -1;
+ }
+
+ } /* for i */
+
+ parent_q = child_q;
+ level = __cvmx_pko3_sq_lvl_next(level);
+
+ /* Terminate loop on DQ level, it has special handling */
+ } while (level != CVMX_PKO_DESCR_QUEUES &&
+ level != CVMX_PKO_LEVEL_INVAL);
+
+ if (level != CVMX_PKO_DESCR_QUEUES) {
+ printf("ERROR: %s: level sequence error\n", __func__);
+ return -1;
+ }
+
+ /* Configure DQs, num_dqs per chan */
+ for (chan = 0; chan < num_chans; chan++) {
+ res = cvmx_pko_alloc_queues(node, level, res_owner, -1,
+ num_queues);
+
+ if (res < 0)
+ goto _fail;
+ dq = res;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_0) && (dq & 7))
+ debug("WARNING: %s: DQ# %u not integral of 8\n",
+ __func__, dq);
+
+ res = cvmx_pko3_sq_config_children(node, level, parent_q + chan,
+ dq, num_queues, prio);
+ if (res < 0)
+ goto _fail;
+
+ /* register DQ range with the translation table */
+ res = __cvmx_pko3_ipd_dq_register(xiface, chan, dq, num_queues);
+ if (res < 0)
+ goto _fail;
+ }
+
+ return 0;
+_fail:
+ debug("ERROR: %s: configuring queues for xiface %u:%u chan %u\n",
+ __func__, xi.node, xi.interface, i);
+ return -1;
+}
+
+/** Initialize a single Ethernet port with PFC-style channels
+ *
+ * One interface can contain multiple ports, this function is per-port
+ * Here, a physical port is allocated 8 logical channel, one per VLAN
+ * tag priority, one DQ is assigned to each channel, and all 8 DQs
+ * are registered for that IPD port.
+ * Note that the DQs are arrange such that the Ethernet QoS/PCP field
+ * can be used as an offset to the value returned by cvmx_pko_base_queue_get().
+ *
+ * For HighGig2 mode, 16 channels may be desired, instead of 8,
+ * but this function does not support that.
+ */
+static int __cvmx_pko3_config_pfc_interface(int xiface, unsigned int port)
+{
+ enum cvmx_pko3_level_e level;
+ int pko_mac_num;
+ int l1_q_num, l2_q_base;
+ int child_q, parent_q;
+ int dq_base;
+ int res;
+ const unsigned int num_chans = 8;
+ cvmx_xiface_t xi = cvmx_helper_xiface_to_node_interface(xiface);
+ unsigned int node = xi.node;
+ u16 ipd_port;
+ int res_owner;
+ char b1[12];
+ unsigned int i;
+
+ if (debug)
+ debug("%s: configuring xiface %u:%u port %u with %u PFC channels\n",
+ __func__, node, xi.interface, port, num_chans);
+
+ /* Get MAC number for the iface/port */
+ pko_mac_num = __cvmx_pko3_get_mac_num(xiface, port);
+ if (pko_mac_num < 0) {
+ printf("ERROR: %s: Invalid interface\n", __func__);
+ return -1;
+ }
+
+ ipd_port = cvmx_helper_get_ipd_port(xiface, port);
+
+ /* Build an identifiable owner identifier */
+ res_owner = __cvmx_helper_pko3_res_owner(ipd_port);
+
+ level = CVMX_PKO_PORT_QUEUES;
+
+ /* Allocate port queue to make sure the MAC is not already configured */
+ l1_q_num = cvmx_pko_alloc_queues(node, level, res_owner, -1, 1);
+
+ if (l1_q_num < 0) {
+ printf("ERROR: %s: allocation L1 PQ\n", __func__);
+ return -1;
+ }
+
+ res = cvmx_pko3_pq_config(xi.node, pko_mac_num, l1_q_num);
+ if (res < 0) {
+ printf("ERROR: %s: Configuring %s\n", __func__,
+ __cvmx_pko3_sq_str(b1, level, l1_q_num));
+ return -1;
+ }
+
+ /* Determine the next queue level */
+ level = __cvmx_pko3_sq_lvl_next(level);
+
+ /* Allocate 'num_chans' L2 queues, one per channel */
+ l2_q_base =
+ cvmx_pko_alloc_queues(node, level, res_owner, -1, num_chans);
+ if (l2_q_base < 0) {
+ printf("ERROR: %s: allocation L2 SQ\n", __func__);
+ return -1;
+ }
+
+ /* Configre <num_chans> L2 children for PQ, with static priority */
+ res = cvmx_pko3_sq_config_children(node, level, l1_q_num, l2_q_base,
+ num_chans, num_chans);
+
+ if (res < 0) {
+ printf("ERROR: %s: Configuring %s for PFC\n", __func__,
+ __cvmx_pko3_sq_str(b1, level, l1_q_num));
+ return -1;
+ }
+
+ /* Map each of the allocated channels */
+ for (i = 0; i < num_chans; i++) {
+ u16 chan;
+
+ /* Get CHAN_E value for this PFC channel, PCP in low 3 bits */
+ chan = ipd_port | cvmx_helper_prio2qos(i);
+
+ cvmx_pko3_map_channel(node, l1_q_num, l2_q_base + i, chan);
+ }
+
+ /* Iterate through the levels until DQ and allocate 'num_chans'
+ * consecutive queues at each level and hook them up
+ * one-to-one with the parent level queues
+ */
+
+ parent_q = l2_q_base;
+ level = __cvmx_pko3_sq_lvl_next(level);
+
+ do {
+ child_q = cvmx_pko_alloc_queues(node, level, res_owner, -1,
+ num_chans);
+
+ if (child_q < 0) {
+ printf("ERROR: %s: allocating %s\n", __func__,
+ __cvmx_pko3_sq_str(b1, level, child_q));
+ return -1;
+ }
+
+ for (i = 0; i < num_chans; i++) {
+ res = cvmx_pko3_sq_config_children(
+ node, level, parent_q + i, child_q + i, 1, 1);
+
+ if (res < 0) {
+ printf("ERROR: %s: configuring %s\n", __func__,
+ __cvmx_pko3_sq_str(b1, level, child_q));
+ return -1;
+ }
+
+ } /* for i */
+
+ parent_q = child_q;
+ level = __cvmx_pko3_sq_lvl_next(level);
+
+ /* Terminate loop on DQ level, it has special handling */
+ } while (level != CVMX_PKO_DESCR_QUEUES &&
+ level != CVMX_PKO_LEVEL_INVAL);
+
+ if (level != CVMX_PKO_DESCR_QUEUES) {
+ printf("ERROR: %s: level sequence error\n", __func__);
+ return -1;
+ }
+
+ dq_base = cvmx_pko_alloc_queues(node, level, res_owner, -1, num_chans);
+ if (dq_base < 0) {
+ printf("ERROR: %s: allocating %s\n", __func__,
+ __cvmx_pko3_sq_str(b1, level, dq_base));
+ return -1;
+ }
+
+ /* Configure DQs in QoS order, so that QoS/PCP can be index */
+ for (i = 0; i < num_chans; i++) {
+ int dq_num = dq_base + cvmx_helper_prio2qos(i);
+
+ res = cvmx_pko3_sq_config_children(node, level, parent_q + i,
+ dq_num, 1, 1);
+ if (res < 0) {
+ printf("ERROR: %s: configuring %s\n", __func__,
+ __cvmx_pko3_sq_str(b1, level, dq_num));
+ return -1;
+ }
+ }
+
+ /* register entire DQ range with the IPD translation table */
+ __cvmx_pko3_ipd_dq_register(xiface, port, dq_base, num_chans);
+
+ return 0;
+}
+
+/**
+ * Initialize a simple interface with a a given number of
+ * fair or prioritized queues.
+ * This function will assign one channel per sub-interface.
+ */
+int __cvmx_pko3_config_gen_interface(int xiface, uint8_t subif, u8 num_queues,
+ bool prioritized)
+{
+ cvmx_xiface_t xi = cvmx_helper_xiface_to_node_interface(xiface);
+ u8 node = xi.node;
+ int l1_q_num;
+ int parent_q, child_q;
+ int dq;
+ int res, res_owner;
+ int pko_mac_num;
+ enum cvmx_pko3_level_e level;
+ u16 ipd_port;
+ int static_pri;
+ char b1[12];
+
+ num_queues = 1;
+
+ if (num_queues == 0) {
+ num_queues = 1;
+ printf("WARNING: %s: xiface %#x misconfigured\n", __func__,
+ xiface);
+ }
+
+ /* Configure DQs relative priority (a.k.a. scheduling) */
+ if (prioritized) {
+ /* With 8 queues or fewer, use static priority, else WRR */
+ static_pri = (num_queues < 9) ? num_queues : 0;
+ } else {
+ /* Set equal-RR scheduling among queues */
+ static_pri = -1;
+ }
+
+ if (debug)
+ debug("%s: configuring xiface %u:%u/%u nq=%u %s\n", __func__,
+ xi.node, xi.interface, subif, num_queues,
+ (prioritized) ? "qos" : "fair");
+
+ /* Get MAC number for the iface/port */
+ pko_mac_num = __cvmx_pko3_get_mac_num(xiface, subif);
+ if (pko_mac_num < 0) {
+ printf("ERROR: %s: Invalid interface %u:%u\n", __func__,
+ xi.node, xi.interface);
+ return -1;
+ }
+
+ ipd_port = cvmx_helper_get_ipd_port(xiface, subif);
+
+ if (debug)
+ debug("%s: xiface %u:%u/%u ipd_port=%#03x\n", __func__, xi.node,
+ xi.interface, subif, ipd_port);
+
+ /* Build an identifiable owner identifier */
+ res_owner = __cvmx_helper_pko3_res_owner(ipd_port);
+
+ level = CVMX_PKO_PORT_QUEUES;
+
+ /* Reserve port queue to make sure the MAC is not already configured */
+ l1_q_num = cvmx_pko_alloc_queues(node, level, res_owner, -1, 1);
+
+ if (l1_q_num < 0) {
+ printf("ERROR %s: xiface %u:%u/%u failed allocation L1 PQ\n",
+ __func__, xi.node, xi.interface, subif);
+ return -1;
+ }
+
+ res = cvmx_pko3_pq_config(node, pko_mac_num, l1_q_num);
+ if (res < 0) {
+ printf("ERROR %s: Configuring L1 PQ\n", __func__);
+ return -1;
+ }
+
+ parent_q = l1_q_num;
+
+ /* Determine the next queue level */
+ level = __cvmx_pko3_sq_lvl_next(level);
+
+ /* Simply chain queues 1-to-1 from L2 to one before DQ level */
+ do {
+ /* allocate next level queue */
+ child_q = cvmx_pko_alloc_queues(node, level, res_owner, -1, 1);
+
+ if (child_q < 0) {
+ printf("ERROR: %s: allocating %s\n", __func__,
+ __cvmx_pko3_sq_str(b1, level, child_q));
+ return -1;
+ }
+
+ /* Configre newly allocated queue */
+ res = cvmx_pko3_sq_config_children(node, level, parent_q,
+ child_q, 1, 1);
+
+ if (res < 0) {
+ printf("ERROR: %s: configuring %s\n", __func__,
+ __cvmx_pko3_sq_str(b1, level, child_q));
+ return -1;
+ }
+
+ /* map IPD/channel to L2/L3 queues */
+ if (level == cvmx_pko_default_channel_level)
+ cvmx_pko3_map_channel(node, l1_q_num, child_q,
+ ipd_port);
+
+ /* Prepare for next level */
+ level = __cvmx_pko3_sq_lvl_next(level);
+ parent_q = child_q;
+
+ /* Terminate loop on DQ level, it has special handling */
+ } while (level != CVMX_PKO_DESCR_QUEUES &&
+ level != CVMX_PKO_LEVEL_INVAL);
+
+ if (level != CVMX_PKO_DESCR_QUEUES) {
+ printf("ERROR: %s: level sequence error\n", __func__);
+ return -1;
+ }
+
+ /* Allocate descriptor queues for the port */
+ dq = cvmx_pko_alloc_queues(node, level, res_owner, -1, num_queues);
+ if (dq < 0) {
+ printf("ERROR: %s: could not reserve DQs\n", __func__);
+ return -1;
+ }
+
+ res = cvmx_pko3_sq_config_children(node, level, parent_q, dq,
+ num_queues, static_pri);
+ if (res < 0) {
+ printf("ERROR: %s: configuring %s\n", __func__,
+ __cvmx_pko3_sq_str(b1, level, dq));
+ return -1;
+ }
+
+ /* register DQ/IPD translation */
+ __cvmx_pko3_ipd_dq_register(xiface, subif, dq, num_queues);
+
+ if (debug)
+ debug("%s: xiface %u:%u/%u qs %u-%u\n", __func__, xi.node,
+ xi.interface, subif, dq, dq + num_queues - 1);
+ return 0;
+}
+
+/** Initialize the NULL interface
+ *
+ * A NULL interface is a special case in that it is not
+ * one of the enumerated interfaces in the system, and does
+ * not apply to input either. Still, it can be very handy
+ * for dealing with packets that should be discarded in
+ * a generic, streamlined way.
+ *
+ * The Descriptor Queue 0 will be reserved for the NULL interface
+ * and the normalized (i.e. IPD) port number has the all-ones value.
+ */
+static int __cvmx_pko3_config_null_interface(unsigned int node)
+{
+ int l1_q_num;
+ int parent_q, child_q;
+ enum cvmx_pko3_level_e level;
+ int i, res, res_owner;
+ int xiface, ipd_port;
+ int num_dq = 1; /* # of DQs for NULL */
+ const int dq = 0; /* Reserve DQ#0 for NULL */
+ char pko_mac_num;
+ char b1[12];
+
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX))
+ pko_mac_num = 0x1C; /* MAC# 28 virtual MAC for NULL */
+ else if (OCTEON_IS_MODEL(OCTEON_CN73XX))
+ pko_mac_num = 0x0F; /* MAC# 16 virtual MAC for NULL */
+ else if (OCTEON_IS_MODEL(OCTEON_CNF75XX))
+ pko_mac_num = 0x0A; /* MAC# 10 virtual MAC for NULL */
+ else
+ return -1;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_0))
+ num_dq = 8;
+
+ if (debug)
+ debug("%s: null iface dq=%u-%u\n", __func__, dq,
+ dq + num_dq - 1);
+
+ ipd_port = cvmx_helper_node_to_ipd_port(node, CVMX_PKO3_IPD_PORT_NULL);
+
+ /* Build an identifiable owner identifier by MAC# for easy release */
+ res_owner = __cvmx_helper_pko3_res_owner(ipd_port);
+ if (res_owner < 0) {
+ debug("%s: ERROR Invalid interface\n", __func__);
+ return -1;
+ }
+
+ level = CVMX_PKO_PORT_QUEUES;
+
+ /* Allocate a port queue */
+ l1_q_num = cvmx_pko_alloc_queues(node, level, res_owner, -1, 1);
+
+ if (l1_q_num < 0) {
+ debug("%s: ERROR reserving L1 SQ\n", __func__);
+ return -1;
+ }
+
+ res = cvmx_pko3_pq_config(node, pko_mac_num, l1_q_num);
+ if (res < 0) {
+ printf("ERROR: %s: PQ/L1 queue configuration\n", __func__);
+ return -1;
+ }
+
+ parent_q = l1_q_num;
+
+ /* Determine the next queue level */
+ level = __cvmx_pko3_sq_lvl_next(level);
+
+ /* Simply chain queues 1-to-1 from L2 to one before DQ level */
+ do {
+ /* allocate next level queue */
+ child_q = cvmx_pko_alloc_queues(node, level, res_owner, -1, 1);
+
+ if (child_q < 0) {
+ printf("ERROR: %s: allocating %s\n", __func__,
+ __cvmx_pko3_sq_str(b1, level, child_q));
+ return -1;
+ }
+
+ /* Configre newly allocated queue */
+ res = cvmx_pko3_sq_config_children(node, level, parent_q,
+ child_q, 1, 1);
+
+ if (res < 0) {
+ printf("ERROR: %s: configuring %s\n", __func__,
+ __cvmx_pko3_sq_str(b1, level, child_q));
+ return -1;
+ }
+
+ /* Prepare for next level */
+ level = __cvmx_pko3_sq_lvl_next(level);
+ parent_q = child_q;
+
+ /* Terminate loop on DQ level, it has special handling */
+ } while (level != CVMX_PKO_DESCR_QUEUES &&
+ level != CVMX_PKO_LEVEL_INVAL);
+
+ if (level != CVMX_PKO_DESCR_QUEUES) {
+ printf("ERROR: %s: level sequence error\n", __func__);
+ return -1;
+ }
+
+ /* Reserve 'num_dq' DQ's at 0 by convention */
+ res = cvmx_pko_alloc_queues(node, level, res_owner, dq, num_dq);
+ if (dq != res) {
+ debug("%s: ERROR: could not reserve DQs\n", __func__);
+ return -1;
+ }
+
+ res = cvmx_pko3_sq_config_children(node, level, parent_q, dq, num_dq,
+ num_dq);
+ if (res < 0) {
+ printf("ERROR: %s: configuring %s\n", __func__,
+ __cvmx_pko3_sq_str(b1, level, dq));
+ return -1;
+ }
+
+ /* NULL interface does not need to map to a CHAN_E */
+
+ /* register DQ/IPD translation */
+ xiface = cvmx_helper_node_interface_to_xiface(node, __CVMX_XIFACE_NULL);
+ __cvmx_pko3_ipd_dq_register(xiface, 0, dq, num_dq);
+
+ /* open the null DQs here */
+ for (i = 0; i < num_dq; i++) {
+ unsigned int limit = 128; /* NULL never really uses much */
+
+ cvmx_pko_dq_open(node, dq + i);
+ cvmx_pko3_dq_set_limit(node, dq + i, limit);
+ }
+
+ return 0;
+}
+
+/** Open all descriptor queues belonging to an interface/port
+ * @INTERNAL
+ */
+int __cvmx_pko3_helper_dqs_activate(int xiface, int index, bool min_pad)
+{
+ int ipd_port, dq_base, dq_count, i;
+ struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+ unsigned int limit;
+
+ /* Get local IPD port for the interface */
+ ipd_port = cvmx_helper_get_ipd_port(xiface, index);
+ if (ipd_port < 0) {
+ printf("ERROR: %s: No IPD port for interface %d port %d\n",
+ __func__, xiface, index);
+ return -1;
+ }
+
+ /* Get DQ# range for the IPD port */
+ dq_base = cvmx_pko3_get_queue_base(ipd_port);
+ dq_count = cvmx_pko3_get_queue_num(ipd_port);
+ if (dq_base < 0 || dq_count <= 0) {
+ printf("ERROR: %s: No descriptor queues for interface %d port %d\n",
+ __func__, xiface, index);
+ return -1;
+ }
+
+ /* Mask out node from global DQ# */
+ dq_base &= (1 << 10) - 1;
+
+ limit = __pko_pkt_quota / dq_count /
+ cvmx_helper_interface_enumerate(xiface);
+
+ for (i = 0; i < dq_count; i++) {
+ /* FIXME: 2ms at 1Gbps max packet rate, make speed dependent */
+ cvmx_pko_dq_open(xi.node, dq_base + i);
+ cvmx_pko3_dq_options(xi.node, dq_base + i, min_pad);
+
+ if (debug)
+ debug("%s: DQ%u limit %d\n", __func__, dq_base + i,
+ limit);
+
+ cvmx_pko3_dq_set_limit(xi.node, dq_base + i, limit);
+ __pko_pkt_budget -= limit;
+ }
+
+ if (__pko_pkt_budget < 0)
+ printf("WARNING: %s: PKO buffer deficit %d\n", __func__,
+ __pko_pkt_budget);
+ else if (debug)
+ debug("%s: PKO remaining packet budget: %d\n", __func__,
+ __pko_pkt_budget);
+
+ return i;
+}
+
+/** Configure and initialize PKO3 for an interface
+ *
+ * @param xiface is the interface number to configure
+ * @return 0 on success.
+ */
+int cvmx_helper_pko3_init_interface(int xiface)
+{
+ cvmx_helper_interface_mode_t mode;
+ int node, iface, subif, num_ports;
+ bool fcs_enable, pad_enable, pad_enable_pko;
+ u8 fcs_sof_off = 0;
+ u8 num_queues = 1;
+ bool qos = false, pfc = false;
+ int res = -1;
+ cvmx_xiface_t xi = cvmx_helper_xiface_to_node_interface(xiface);
+
+ node = xi.node;
+ iface = xi.interface;
+ mode = cvmx_helper_interface_get_mode(xiface);
+ num_ports = cvmx_helper_interface_enumerate(xiface);
+ subif = 0;
+
+ if ((unsigned int)iface <
+ NUM_ELEMENTS(__cvmx_pko_queue_static_config[node].pknd.pko_cfg_iface)) {
+ pfc = __cvmx_pko_queue_static_config[node]
+ .pknd.pko_cfg_iface[iface]
+ .pfc_enable;
+ num_queues = __cvmx_pko_queue_static_config[node]
+ .pknd.pko_cfg_iface[iface]
+ .queues_per_port;
+ qos = __cvmx_pko_queue_static_config[node]
+ .pknd.pko_cfg_iface[iface]
+ .qos_enable;
+ }
+
+ /* Force 8 DQs per port for pass 1.0 to circumvent limitations */
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_0))
+ num_queues = 8;
+
+ /* For ILK there is one IPD port per channel */
+ if (mode == CVMX_HELPER_INTERFACE_MODE_ILK)
+ num_ports = __cvmx_helper_ilk_enumerate(xiface);
+
+ /* Skip non-existent interfaces */
+ if (num_ports < 1) {
+ debug("ERROR: %s: invalid iface %u:%u\n", __func__, node,
+ iface);
+ return -1;
+ }
+
+ if (mode == CVMX_HELPER_INTERFACE_MODE_LOOP) {
+ num_queues = __cvmx_pko_queue_static_config[node]
+ .pknd.pko_cfg_loop.queues_per_port;
+ qos = __cvmx_pko_queue_static_config[node]
+ .pknd.pko_cfg_loop.qos_enable;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_0))
+ num_queues = 8;
+
+ res = __cvmx_pko3_config_chan_interface(xiface, num_ports,
+ num_queues, qos);
+ if (res < 0)
+ goto __cfg_error;
+ } else if (mode == CVMX_HELPER_INTERFACE_MODE_NPI) {
+ num_queues = __cvmx_pko_queue_static_config[node]
+ .pknd.pko_cfg_npi.queues_per_port;
+ qos = __cvmx_pko_queue_static_config[node]
+ .pknd.pko_cfg_npi.qos_enable;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_0))
+ num_queues = 8;
+
+ res = __cvmx_pko3_config_chan_interface(xiface, num_ports,
+ num_queues, qos);
+ if (res < 0)
+ goto __cfg_error;
+ }
+ /* ILK-specific queue configuration */
+ else if (mode == CVMX_HELPER_INTERFACE_MODE_ILK) {
+ unsigned int num_chans = __cvmx_helper_ilk_enumerate(xiface);
+
+ num_queues = 8;
+ qos = true;
+ pfc = false;
+
+ if (num_chans >= 128)
+ num_queues = 1;
+ else if (num_chans >= 64)
+ num_queues = 2;
+ else if (num_chans >= 32)
+ num_queues = 4;
+ else
+ num_queues = 8;
+
+ res = __cvmx_pko3_config_chan_interface(xiface, num_chans,
+ num_queues, qos);
+ }
+ /* Setup all ethernet configured for PFC */
+ else if (pfc) {
+ /* PFC interfaces have 8 prioritized queues */
+ for (subif = 0; subif < num_ports; subif++) {
+ res = __cvmx_pko3_config_pfc_interface(xiface, subif);
+ if (res < 0)
+ goto __cfg_error;
+
+ /* Enable PFC/CBFC on BGX */
+ __cvmx_helper_bgx_xaui_config_pfc(node, iface, subif,
+ true);
+ }
+ } else {
+ /* All other interfaces follow static configuration */
+ for (subif = 0; subif < num_ports; subif++) {
+ res = __cvmx_pko3_config_gen_interface(xiface, subif,
+ num_queues, qos);
+ if (res < 0)
+ goto __cfg_error;
+ }
+ }
+
+ fcs_enable = __cvmx_helper_get_has_fcs(xiface);
+ pad_enable = __cvmx_helper_get_pko_padding(xiface);
+
+ /* Do not use PKO PAD/FCS generation on o78p1.x on BGX interfaces */
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X))
+ pad_enable_pko = false;
+ else
+ pad_enable_pko = pad_enable;
+
+ if (debug)
+ debug("%s: iface %u:%u FCS=%d pad=%d pko=%d\n", __func__, node,
+ iface, fcs_enable, pad_enable, pad_enable_pko);
+
+ /* Setup interface options */
+ for (subif = 0; subif < num_ports; subif++) {
+ /* Open interface/port DQs to allow transmission to begin */
+ res = __cvmx_pko3_helper_dqs_activate(xiface, subif,
+ pad_enable_pko);
+
+ if (res < 0)
+ goto __cfg_error;
+
+ /* ILK has only one MAC, subif == logical-channel */
+ if (mode == CVMX_HELPER_INTERFACE_MODE_ILK && subif > 0)
+ continue;
+
+ /* LOOP has only one MAC, subif == logical-channel */
+ if (mode == CVMX_HELPER_INTERFACE_MODE_LOOP && subif > 0)
+ continue;
+
+ /* NPI has only one MAC, subif == 'ring' */
+ if (mode == CVMX_HELPER_INTERFACE_MODE_NPI && subif > 0)
+ continue;
+
+ /* for sRIO there is 16 byte sRIO header, outside of FCS */
+ if (mode == CVMX_HELPER_INTERFACE_MODE_SRIO)
+ fcs_sof_off = 16;
+
+ if (iface >= CVMX_HELPER_MAX_GMX) {
+ /* Non-BGX interface, use PKO for FCS/PAD */
+ res = cvmx_pko3_interface_options(xiface, subif,
+ fcs_enable,
+ pad_enable_pko,
+ fcs_sof_off);
+ } else if (pad_enable == pad_enable_pko) {
+ /* BGX interface: FCS/PAD done by PKO */
+ res = cvmx_pko3_interface_options(xiface, subif,
+ fcs_enable,
+ pad_enable,
+ fcs_sof_off);
+ cvmx_helper_bgx_tx_options(node, iface, subif, false,
+ false);
+ } else {
+ /* BGX interface: FCS/PAD done by BGX */
+ res = cvmx_pko3_interface_options(xiface, subif, false,
+ false, fcs_sof_off);
+ cvmx_helper_bgx_tx_options(node, iface, subif,
+ fcs_enable, pad_enable);
+ }
+
+ if (res < 0)
+ debug("WARNING: %s: option set failed on iface %u:%u/%u\n",
+ __func__, node, iface, subif);
+ if (debug)
+ debug("%s: face %u:%u/%u fifo size %d\n", __func__,
+ node, iface, subif,
+ cvmx_pko3_port_fifo_size(xiface, subif));
+ }
+ return 0;
+
+__cfg_error:
+ debug("ERROR: %s: failed on iface %u:%u/%u\n", __func__, node, iface,
+ subif);
+ return -1;
+}
+
+/**
+ * Global initialization for PKO3
+ *
+ * Should only be called once on each node
+ *
+ * TBD: Resolve the kernel case.
+ * When Linux eats up the entire memory, bootmem will be unable to
+ * satisfy our request, and the memory needs to come from Linux free pages.
+ */
+int __cvmx_helper_pko3_init_global(unsigned int node, uint16_t gaura)
+{
+ int res;
+
+ res = cvmx_pko3_hw_init_global(node, gaura);
+ if (res < 0) {
+ debug("ERROR: %s:failed block initialization\n", __func__);
+ return res;
+ }
+
+ /* configure channel level */
+ cvmx_pko3_channel_credit_level(node, cvmx_pko_default_channel_level);
+
+ /* add NULL MAC/DQ setup */
+ res = __cvmx_pko3_config_null_interface(node);
+ if (res < 0)
+ debug("ERROR: %s: creating NULL interface\n", __func__);
+
+ return res;
+}
+
+/**
+ * Global initialization for PKO3
+ *
+ * Should only be called once on each node
+ *
+ * When Linux eats up the entire memory, bootmem will be unable to
+ * satisfy our request, and the memory needs to come from Linux free pages.
+ */
+int cvmx_helper_pko3_init_global(unsigned int node)
+{
+ void *ptr;
+ int res = -1;
+ unsigned int aura_num = ~0;
+ cvmx_fpa3_gaura_t aura;
+
+ /* Allocate memory required by PKO3 */
+ res = __cvmx_pko3_config_memory(node);
+ if (res < 0) {
+ debug("ERROR: %s: PKO3 memory allocation error\n", __func__);
+ return res;
+ }
+
+ aura_num = res;
+ aura = __cvmx_pko3_aura[node];
+
+ /* Exercise the FPA to make sure the AURA is functional */
+ ptr = cvmx_fpa3_alloc(aura);
+
+ if (!ptr) {
+ res = -1;
+ } else {
+ cvmx_fpa3_free_nosync(ptr, aura, 0);
+ res = 0;
+ }
+
+ if (res < 0) {
+ debug("ERROR: %s: FPA failure AURA=%u:%d\n", __func__,
+ aura.node, aura.laura);
+ return -1;
+ }
+
+ res = __cvmx_helper_pko3_init_global(node, aura_num);
+
+ if (res < 0)
+ debug("ERROR: %s: failed to start PPKO\n", __func__);
+
+ return res;
+}
+
+/**
+ * Uninitialize PKO3 interface
+ *
+ * Release all resources held by PKO for an interface.
+ * The shutdown code is the same for all supported interfaces.
+ *
+ * NOTE: The NULL virtual interface is identified by interface
+ * number -1, which translates into IPD port 0xfff, MAC#28. [Kludge]
+ */
+int cvmx_helper_pko3_shut_interface(int xiface)
+{
+ int index, num_ports;
+ int dq_base, dq_count;
+ u16 ipd_port;
+ int i, res_owner, res;
+ enum cvmx_pko3_level_e level;
+ cvmx_pko3_dq_params_t *p_param;
+ const unsigned int timeout = 10; /* milliseconds */
+ cvmx_xiface_t xi = cvmx_helper_xiface_to_node_interface(xiface);
+
+ if (__cvmx_helper_xiface_is_null(xiface)) {
+ /* Special case for NULL interface */
+ num_ports = 1;
+ } else {
+ cvmx_helper_interface_mode_t mode;
+
+ mode = cvmx_helper_interface_get_mode(xiface);
+ num_ports = cvmx_helper_interface_enumerate(xiface);
+ (void)mode;
+ }
+
+ /* Skip non-existent interfaces silently */
+ if (num_ports < 1)
+ return -1;
+
+ if (debug)
+ debug("%s: xiface %u:%d ports %d\n", __func__, xi.node,
+ xi.interface, num_ports);
+
+ for (index = 0; index < num_ports; index++) {
+ if (__cvmx_helper_xiface_is_null(xiface))
+ ipd_port = cvmx_helper_node_to_ipd_port(
+ xi.node, CVMX_PKO3_IPD_PORT_NULL);
+ else
+ ipd_port = cvmx_helper_get_ipd_port(xiface, index);
+
+ /* Retrieve DQ range for the index */
+ dq_base = cvmx_pko3_get_queue_base(ipd_port);
+ dq_count = cvmx_pko3_get_queue_num(ipd_port);
+
+ if (dq_base < 0 || dq_count < 0) {
+ debug("ERROR: %s: No DQs for iface %u:%d/%u\n",
+ __func__, xi.node, xi.interface, index);
+ continue;
+ }
+
+ /* Get rid of node-number in DQ# */
+ dq_base &= (1 << 10) - 1;
+
+ if (debug)
+ debug("%s: xiface %u:%d/%d dq %u-%u\n", __func__,
+ xi.node, xi.interface, index, dq_base,
+ dq_base + dq_count - 1);
+
+ /* Unregister the DQs for the port, should stop traffic */
+ res = __cvmx_pko3_ipd_dq_unregister(xiface, index);
+ if (res < 0) {
+ debug("ERROR: %s: failed to unregister DQs iface %u/%d/%u\n",
+ __func__, xi.node, xi.interface, index);
+ continue;
+ }
+
+ /* Begin draining all queues */
+ for (i = 0; i < dq_count; i++)
+ cvmx_pko3_dq_drain(xi.node, dq_base + i);
+
+ /* Wait for all queues to drain, and close them */
+ for (i = 0; i < dq_count; i++) {
+ /* Prepare timeout */
+ u64 start = get_timer(0);
+
+ /* Wait for queue to drain */
+ do {
+ res = cvmx_pko3_dq_query(xi.node, dq_base + i);
+ if (get_timer(start) > timeout)
+ break;
+ } while (res > 0);
+
+ if (res != 0)
+ debug("ERROR: %s: querying queue %u\n",
+ __func__, dq_base + i);
+
+ /* Close the queue, free internal buffers */
+ res = cvmx_pko3_dq_close(xi.node, dq_base + i);
+
+ if (res < 0)
+ debug("ERROR: %s: closing queue %u\n", __func__,
+ dq_base + i);
+
+ /* Return DQ packet budget */
+ p_param = cvmx_pko3_dq_parameters(xi.node, dq_base + i);
+ __pko_pkt_budget += p_param->limit;
+ p_param->limit = 0;
+ }
+
+ /* Release all global resources owned by this interface/port */
+
+ res_owner = __cvmx_helper_pko3_res_owner(ipd_port);
+ if (res_owner < 0) {
+ debug("ERROR: %s: no resource owner ticket\n",
+ __func__);
+ continue;
+ }
+
+ /* Actuall PQ/SQ/DQ associations left intact */
+ for (level = CVMX_PKO_PORT_QUEUES;
+ level != CVMX_PKO_LEVEL_INVAL;
+ level = __cvmx_pko3_sq_lvl_next(level)) {
+ cvmx_pko_free_queues(xi.node, level, res_owner);
+ }
+
+ } /* for port */
+
+ return 0;
+}
+
+/**
+ * Shutdown PKO3
+ *
+ * Should be called after all interfaces have been shut down on the PKO3.
+ *
+ * Disables the PKO, frees all its buffers.
+ */
+int cvmx_helper_pko3_shutdown(unsigned int node)
+{
+ unsigned int dq;
+ int res;
+
+ /* destroy NULL interface here, only PKO knows about it */
+ cvmx_helper_pko3_shut_interface(
+ cvmx_helper_node_interface_to_xiface(node, __CVMX_XIFACE_NULL));
+
+#ifdef __PKO_DQ_CLOSE_ERRATA_FIXED
+ /* Check that all DQs are closed */
+ /* this seems to cause issue on HW:
+ * the error code differs from expected
+ */
+ for (dq = 0; dq < (1 << 10); dq++) {
+ res = cvmx_pko3_dq_close(node, dq);
+ if (res != 0) {
+ debug("ERROR: %s: PKO3 descriptor queue %u could not be closed\n",
+ __func__, dq);
+ return -1;
+ }
+ }
+#endif
+ (void)dq;
+ res = cvmx_pko3_hw_disable(node);
+
+ /* shut down AURA/POOL we created, and free its resources */
+ cvmx_fpa3_shutdown_aura_and_pool(__cvmx_pko3_aura[node]);
+ return res;
+}
--
2.35.1
^ permalink raw reply related [flat|nested] 52+ messages in thread* [PATCH 20/52] mips: octeon: Add cvmx-helper-rgmii.c
2022-03-30 10:06 [PATCH 00/52] mips: octeon: Add ethernet support Stefan Roese
` (17 preceding siblings ...)
2022-03-30 10:06 ` [PATCH 19/52] mips: octeon: Add cvmx-helper-pko3.c Stefan Roese
@ 2022-03-30 10:06 ` Stefan Roese
2022-03-30 10:06 ` [PATCH 21/52] mips: octeon: Add cvmx-helper-sgmii.c Stefan Roese
` (30 subsequent siblings)
49 siblings, 0 replies; 52+ messages in thread
From: Stefan Roese @ 2022-03-30 10:06 UTC (permalink / raw)
To: u-boot; +Cc: awilliams, cchavva
From: Aaron Williams <awilliams@marvell.com>
Import cvmx-helper-rgmii.c from 2013 U-Boot. It will be used by the later
added drivers to support networking on the MIPS Octeon II / III
platforms.
Signed-off-by: Aaron Williams <awilliams@marvell.com>
Signed-off-by: Stefan Roese <sr@denx.de>
---
arch/mips/mach-octeon/cvmx-helper-rgmii.c | 431 ++++++++++++++++++++++
1 file changed, 431 insertions(+)
create mode 100644 arch/mips/mach-octeon/cvmx-helper-rgmii.c
diff --git a/arch/mips/mach-octeon/cvmx-helper-rgmii.c b/arch/mips/mach-octeon/cvmx-helper-rgmii.c
new file mode 100644
index 000000000000..a85b6a3cd4e8
--- /dev/null
+++ b/arch/mips/mach-octeon/cvmx-helper-rgmii.c
@@ -0,0 +1,431 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ * Functions for RGMII/GMII/MII initialization, configuration,
+ * and monitoring.
+ */
+
+#include <log.h>
+#include <time.h>
+#include <linux/delay.h>
+
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-csr.h>
+#include <mach/cvmx-bootmem.h>
+#include <mach/octeon-model.h>
+#include <mach/cvmx-fuse.h>
+#include <mach/octeon-feature.h>
+#include <mach/cvmx-qlm.h>
+#include <mach/octeon_qlm.h>
+#include <mach/cvmx-pcie.h>
+#include <mach/cvmx-coremask.h>
+#include <mach/cvmx-helper.h>
+#include <mach/cvmx-helper-board.h>
+
+#include <mach/cvmx-hwpko.h>
+
+#include <mach/cvmx-asxx-defs.h>
+#include <mach/cvmx-dbg-defs.h>
+#include <mach/cvmx-gmxx-defs.h>
+#include <mach/cvmx-npi-defs.h>
+#include <mach/cvmx-pko-defs.h>
+
+/**
+ * @INTERNAL
+ * Probe RGMII ports and determine the number present
+ *
+ * @param xiface Interface to probe
+ *
+ * @return Number of RGMII/GMII/MII ports (0-4).
+ */
+int __cvmx_helper_rgmii_probe(int xiface)
+{
+ struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+ int num_ports = 0;
+ union cvmx_gmxx_inf_mode mode;
+
+ mode.u64 = csr_rd(CVMX_GMXX_INF_MODE(xi.interface));
+
+ if (mode.s.type)
+ debug("ERROR: Unsupported Octeon model in %s\n", __func__);
+ else
+ debug("ERROR: Unsupported Octeon model in %s\n", __func__);
+ return num_ports;
+}
+
+/**
+ * Put an RGMII interface in loopback mode. Internal packets sent
+ * out will be received back again on the same port. Externally
+ * received packets will echo back out.
+ *
+ * @param port IPD port number to loop.
+ */
+void cvmx_helper_rgmii_internal_loopback(int port)
+{
+ int interface = (port >> 4) & 1;
+ int index = port & 0xf;
+ u64 tmp;
+
+ union cvmx_gmxx_prtx_cfg gmx_cfg;
+
+ gmx_cfg.u64 = 0;
+ gmx_cfg.s.duplex = 1;
+ gmx_cfg.s.slottime = 1;
+ gmx_cfg.s.speed = 1;
+ csr_wr(CVMX_GMXX_TXX_CLK(index, interface), 1);
+ csr_wr(CVMX_GMXX_TXX_SLOT(index, interface), 0x200);
+ csr_wr(CVMX_GMXX_TXX_BURST(index, interface), 0x2000);
+ csr_wr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
+ tmp = csr_rd(CVMX_ASXX_PRT_LOOP(interface));
+ csr_wr(CVMX_ASXX_PRT_LOOP(interface), (1 << index) | tmp);
+ tmp = csr_rd(CVMX_ASXX_TX_PRT_EN(interface));
+ csr_wr(CVMX_ASXX_TX_PRT_EN(interface), (1 << index) | tmp);
+ tmp = csr_rd(CVMX_ASXX_RX_PRT_EN(interface));
+ csr_wr(CVMX_ASXX_RX_PRT_EN(interface), (1 << index) | tmp);
+ gmx_cfg.s.en = 1;
+ csr_wr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
+}
+
+/**
+ * @INTERNAL
+ * Configure all of the ASX, GMX, and PKO regsiters required
+ * to get RGMII to function on the supplied interface.
+ *
+ * @param xiface PKO Interface to configure (0 or 1)
+ *
+ * @return Zero on success
+ */
+int __cvmx_helper_rgmii_enable(int xiface)
+{
+ struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+ int interface = xi.interface;
+ int num_ports = cvmx_helper_ports_on_interface(interface);
+ int port;
+ union cvmx_gmxx_inf_mode mode;
+ union cvmx_asxx_tx_prt_en asx_tx;
+ union cvmx_asxx_rx_prt_en asx_rx;
+
+ mode.u64 = csr_rd(CVMX_GMXX_INF_MODE(interface));
+
+ if (num_ports == -1)
+ return -1;
+ if (mode.s.en == 0)
+ return -1;
+
+ /* Configure the ASX registers needed to use the RGMII ports */
+ asx_tx.u64 = 0;
+ asx_tx.s.prt_en = cvmx_build_mask(num_ports);
+ csr_wr(CVMX_ASXX_TX_PRT_EN(interface), asx_tx.u64);
+
+ asx_rx.u64 = 0;
+ asx_rx.s.prt_en = cvmx_build_mask(num_ports);
+ csr_wr(CVMX_ASXX_RX_PRT_EN(interface), asx_rx.u64);
+
+ /* Configure the GMX registers needed to use the RGMII ports */
+ for (port = 0; port < num_ports; port++) {
+ /*
+ * Configure more flexible RGMII preamble
+ * checking. Pass 1 doesn't support this feature.
+ */
+ union cvmx_gmxx_rxx_frm_ctl frm_ctl;
+
+ frm_ctl.u64 = csr_rd(CVMX_GMXX_RXX_FRM_CTL(port, interface));
+ /* New field, so must be compile time */
+ frm_ctl.s.pre_free = 1;
+ csr_wr(CVMX_GMXX_RXX_FRM_CTL(port, interface), frm_ctl.u64);
+
+ /*
+ * Each pause frame transmitted will ask for about 10M
+ * bit times before resume. If buffer space comes
+ * available before that time has expired, an XON
+ * pause frame (0 time) will be transmitted to restart
+ * the flow.
+ */
+ csr_wr(CVMX_GMXX_TXX_PAUSE_PKT_TIME(port, interface), 20000);
+ csr_wr(CVMX_GMXX_TXX_PAUSE_PKT_INTERVAL(port, interface),
+ 19000);
+
+ csr_wr(CVMX_ASXX_TX_CLK_SETX(port, interface), 24);
+ csr_wr(CVMX_ASXX_RX_CLK_SETX(port, interface), 24);
+ }
+
+ __cvmx_helper_setup_gmx(interface, num_ports);
+
+ /* enable the ports now */
+ for (port = 0; port < num_ports; port++) {
+ union cvmx_gmxx_prtx_cfg gmx_cfg;
+
+ cvmx_helper_link_autoconf(
+ cvmx_helper_get_ipd_port(interface, port));
+ gmx_cfg.u64 = csr_rd(CVMX_GMXX_PRTX_CFG(port, interface));
+ gmx_cfg.s.en = 1;
+ csr_wr(CVMX_GMXX_PRTX_CFG(port, interface), gmx_cfg.u64);
+ }
+ return 0;
+}
+
+/**
+ * @INTERNAL
+ * Return the link state of an IPD/PKO port as returned by
+ * auto negotiation. The result of this function may not match
+ * Octeon's link config if auto negotiation has changed since
+ * the last call to cvmx_helper_link_set().
+ *
+ * @param ipd_port IPD/PKO port to query
+ *
+ * @return Link state
+ */
+cvmx_helper_link_info_t __cvmx_helper_rgmii_link_get(int ipd_port)
+{
+ int interface = cvmx_helper_get_interface_num(ipd_port);
+ int index = cvmx_helper_get_interface_index_num(ipd_port);
+ union cvmx_asxx_prt_loop asxx_prt_loop;
+
+ asxx_prt_loop.u64 = csr_rd(CVMX_ASXX_PRT_LOOP(interface));
+ if (asxx_prt_loop.s.int_loop & (1 << index)) {
+ /* Force 1Gbps full duplex on internal loopback */
+ cvmx_helper_link_info_t result;
+
+ result.u64 = 0;
+ result.s.full_duplex = 1;
+ result.s.link_up = 1;
+ result.s.speed = 1000;
+ return result;
+ } else {
+ return __cvmx_helper_board_link_get(ipd_port);
+ }
+}
+
+/**
+ * @INTERNAL
+ * Return the link state of an IPD/PKO port as returned by
+ * auto negotiation. The result of this function may not match
+ * Octeon's link config if auto negotiation has changed since
+ * the last call to cvmx_helper_link_set().
+ *
+ * @param ipd_port IPD/PKO port to query
+ *
+ * @return Link state
+ */
+cvmx_helper_link_info_t __cvmx_helper_gmii_link_get(int ipd_port)
+{
+ cvmx_helper_link_info_t result;
+ int index = cvmx_helper_get_interface_index_num(ipd_port);
+
+ if (index == 0) {
+ result = __cvmx_helper_rgmii_link_get(ipd_port);
+ } else {
+ result.s.full_duplex = 1;
+ result.s.link_up = 1;
+ result.s.speed = 1000;
+ }
+
+ return result;
+}
+
+/**
+ * @INTERNAL
+ * Configure an IPD/PKO port for the specified link state. This
+ * function does not influence auto negotiation at the PHY level.
+ * The passed link state must always match the link state returned
+ * by cvmx_helper_link_get(). It is normally best to use
+ * cvmx_helper_link_autoconf() instead.
+ *
+ * @param ipd_port IPD/PKO port to configure
+ * @param link_info The new link state
+ *
+ * @return Zero on success, negative on failure
+ */
+int __cvmx_helper_rgmii_link_set(int ipd_port,
+ cvmx_helper_link_info_t link_info)
+{
+ int result = 0;
+ int interface = cvmx_helper_get_interface_num(ipd_port);
+ int index = cvmx_helper_get_interface_index_num(ipd_port);
+ union cvmx_gmxx_prtx_cfg original_gmx_cfg;
+ union cvmx_gmxx_prtx_cfg new_gmx_cfg;
+ union cvmx_pko_mem_queue_qos pko_mem_queue_qos;
+ union cvmx_pko_mem_queue_qos pko_mem_queue_qos_save[16];
+ union cvmx_gmxx_tx_ovr_bp gmx_tx_ovr_bp;
+ union cvmx_gmxx_tx_ovr_bp gmx_tx_ovr_bp_save;
+ int i;
+
+ /* Read the current settings so we know the current enable state */
+ original_gmx_cfg.u64 = csr_rd(CVMX_GMXX_PRTX_CFG(index, interface));
+ new_gmx_cfg = original_gmx_cfg;
+
+ /* Disable the lowest level RX */
+ csr_wr(CVMX_ASXX_RX_PRT_EN(interface),
+ csr_rd(CVMX_ASXX_RX_PRT_EN(interface)) & ~(1 << index));
+
+ memset(pko_mem_queue_qos_save, 0, sizeof(pko_mem_queue_qos_save));
+ /* Disable all queues so that TX should become idle */
+ for (i = 0; i < cvmx_pko_get_num_queues(ipd_port); i++) {
+ int queue = cvmx_pko_get_base_queue(ipd_port) + i;
+
+ csr_wr(CVMX_PKO_REG_READ_IDX, queue);
+ pko_mem_queue_qos.u64 = csr_rd(CVMX_PKO_MEM_QUEUE_QOS);
+ pko_mem_queue_qos.s.pid = ipd_port;
+ pko_mem_queue_qos.s.qid = queue;
+ pko_mem_queue_qos_save[i] = pko_mem_queue_qos;
+ pko_mem_queue_qos.s.qos_mask = 0;
+ csr_wr(CVMX_PKO_MEM_QUEUE_QOS, pko_mem_queue_qos.u64);
+ }
+
+ /* Disable backpressure */
+ gmx_tx_ovr_bp.u64 = csr_rd(CVMX_GMXX_TX_OVR_BP(interface));
+ gmx_tx_ovr_bp_save = gmx_tx_ovr_bp;
+ gmx_tx_ovr_bp.s.bp &= ~(1 << index);
+ gmx_tx_ovr_bp.s.en |= 1 << index;
+ csr_wr(CVMX_GMXX_TX_OVR_BP(interface), gmx_tx_ovr_bp.u64);
+ csr_rd(CVMX_GMXX_TX_OVR_BP(interface));
+
+ /*
+ * Poll the GMX state machine waiting for it to become
+ * idle. Preferably we should only change speed when it is
+ * idle. If it doesn't become idle we will still do the speed
+ * change, but there is a slight chance that GMX will
+ * lockup.
+ */
+ csr_wr(CVMX_NPI_DBG_SELECT, interface * 0x800 + index * 0x100 + 0x880);
+ CVMX_WAIT_FOR_FIELD64(CVMX_DBG_DATA, cvmx_dbg_data_t, data & 7, ==, 0,
+ 10000);
+ CVMX_WAIT_FOR_FIELD64(CVMX_DBG_DATA, cvmx_dbg_data_t, data & 0xf, ==, 0,
+ 10000);
+
+ /* Disable the port before we make any changes */
+ new_gmx_cfg.s.en = 0;
+ csr_wr(CVMX_GMXX_PRTX_CFG(index, interface), new_gmx_cfg.u64);
+ csr_rd(CVMX_GMXX_PRTX_CFG(index, interface));
+
+ /* Set full/half duplex */
+ if (!link_info.s.link_up)
+ /* Force full duplex on down links */
+ new_gmx_cfg.s.duplex = 1;
+ else
+ new_gmx_cfg.s.duplex = link_info.s.full_duplex;
+
+ /* Set the link speed. Anything unknown is set to 1Gbps */
+ if (link_info.s.speed == 10) {
+ new_gmx_cfg.s.slottime = 0;
+ new_gmx_cfg.s.speed = 0;
+ } else if (link_info.s.speed == 100) {
+ new_gmx_cfg.s.slottime = 0;
+ new_gmx_cfg.s.speed = 0;
+ } else {
+ new_gmx_cfg.s.slottime = 1;
+ new_gmx_cfg.s.speed = 1;
+ }
+
+ /* Adjust the clocks */
+ if (link_info.s.speed == 10) {
+ csr_wr(CVMX_GMXX_TXX_CLK(index, interface), 50);
+ csr_wr(CVMX_GMXX_TXX_SLOT(index, interface), 0x40);
+ csr_wr(CVMX_GMXX_TXX_BURST(index, interface), 0);
+ } else if (link_info.s.speed == 100) {
+ csr_wr(CVMX_GMXX_TXX_CLK(index, interface), 5);
+ csr_wr(CVMX_GMXX_TXX_SLOT(index, interface), 0x40);
+ csr_wr(CVMX_GMXX_TXX_BURST(index, interface), 0);
+ } else {
+ csr_wr(CVMX_GMXX_TXX_CLK(index, interface), 1);
+ csr_wr(CVMX_GMXX_TXX_SLOT(index, interface), 0x200);
+ csr_wr(CVMX_GMXX_TXX_BURST(index, interface), 0x2000);
+ }
+
+ /* Do a read to make sure all setup stuff is complete */
+ csr_rd(CVMX_GMXX_PRTX_CFG(index, interface));
+
+ /* Save the new GMX setting without enabling the port */
+ csr_wr(CVMX_GMXX_PRTX_CFG(index, interface), new_gmx_cfg.u64);
+
+ /* Enable the lowest level RX */
+ if (link_info.s.link_up)
+ csr_wr(CVMX_ASXX_RX_PRT_EN(interface),
+ csr_rd(CVMX_ASXX_RX_PRT_EN(interface)) | (1 << index));
+
+ /* Re-enable the TX path */
+ for (i = 0; i < cvmx_pko_get_num_queues(ipd_port); i++) {
+ int queue = cvmx_pko_get_base_queue(ipd_port) + i;
+
+ csr_wr(CVMX_PKO_REG_READ_IDX, queue);
+ csr_wr(CVMX_PKO_MEM_QUEUE_QOS, pko_mem_queue_qos_save[i].u64);
+ }
+
+ /* Restore backpressure */
+ csr_wr(CVMX_GMXX_TX_OVR_BP(interface), gmx_tx_ovr_bp_save.u64);
+
+ /* Restore the GMX enable state. Port config is complete */
+ new_gmx_cfg.s.en = original_gmx_cfg.s.en;
+ csr_wr(CVMX_GMXX_PRTX_CFG(index, interface), new_gmx_cfg.u64);
+
+ return result;
+}
+
+/**
+ * @INTERNAL
+ * Configure a port for internal and/or external loopback. Internal loopback
+ * causes packets sent by the port to be received by Octeon. External loopback
+ * causes packets received from the wire to sent out again.
+ *
+ * @param ipd_port IPD/PKO port to loopback.
+ * @param enable_internal
+ * Non zero if you want internal loopback
+ * @param enable_external
+ * Non zero if you want external loopback
+ *
+ * @return Zero on success, negative on failure.
+ */
+int __cvmx_helper_rgmii_configure_loopback(int ipd_port, int enable_internal,
+ int enable_external)
+{
+ int interface = cvmx_helper_get_interface_num(ipd_port);
+ int index = cvmx_helper_get_interface_index_num(ipd_port);
+ int original_enable;
+ union cvmx_gmxx_prtx_cfg gmx_cfg;
+ union cvmx_asxx_prt_loop asxx_prt_loop;
+
+ /* Read the current enable state and save it */
+ gmx_cfg.u64 = csr_rd(CVMX_GMXX_PRTX_CFG(index, interface));
+ original_enable = gmx_cfg.s.en;
+ /* Force port to be disabled */
+ gmx_cfg.s.en = 0;
+ if (enable_internal) {
+ /* Force speed if we're doing internal loopback */
+ gmx_cfg.s.duplex = 1;
+ gmx_cfg.s.slottime = 1;
+ gmx_cfg.s.speed = 1;
+ csr_wr(CVMX_GMXX_TXX_CLK(index, interface), 1);
+ csr_wr(CVMX_GMXX_TXX_SLOT(index, interface), 0x200);
+ csr_wr(CVMX_GMXX_TXX_BURST(index, interface), 0x2000);
+ }
+ csr_wr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
+
+ /* Set the loopback bits */
+ asxx_prt_loop.u64 = csr_rd(CVMX_ASXX_PRT_LOOP(interface));
+ if (enable_internal)
+ asxx_prt_loop.s.int_loop |= 1 << index;
+ else
+ asxx_prt_loop.s.int_loop &= ~(1 << index);
+ if (enable_external)
+ asxx_prt_loop.s.ext_loop |= 1 << index;
+ else
+ asxx_prt_loop.s.ext_loop &= ~(1 << index);
+ csr_wr(CVMX_ASXX_PRT_LOOP(interface), asxx_prt_loop.u64);
+
+ /* Force enables in internal loopback */
+ if (enable_internal) {
+ u64 tmp;
+
+ tmp = csr_rd(CVMX_ASXX_TX_PRT_EN(interface));
+ csr_wr(CVMX_ASXX_TX_PRT_EN(interface), (1 << index) | tmp);
+ tmp = csr_rd(CVMX_ASXX_RX_PRT_EN(interface));
+ csr_wr(CVMX_ASXX_RX_PRT_EN(interface), (1 << index) | tmp);
+ original_enable = 1;
+ }
+
+ /* Restore the enable state */
+ gmx_cfg.s.en = original_enable;
+ csr_wr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
+ return 0;
+}
--
2.35.1
^ permalink raw reply related [flat|nested] 52+ messages in thread* [PATCH 21/52] mips: octeon: Add cvmx-helper-sgmii.c
2022-03-30 10:06 [PATCH 00/52] mips: octeon: Add ethernet support Stefan Roese
` (18 preceding siblings ...)
2022-03-30 10:06 ` [PATCH 20/52] mips: octeon: Add cvmx-helper-rgmii.c Stefan Roese
@ 2022-03-30 10:06 ` Stefan Roese
2022-03-30 10:06 ` [PATCH 22/52] mips: octeon: Add cvmx-helper-sfp.c Stefan Roese
` (29 subsequent siblings)
49 siblings, 0 replies; 52+ messages in thread
From: Stefan Roese @ 2022-03-30 10:06 UTC (permalink / raw)
To: u-boot; +Cc: awilliams, cchavva
From: Aaron Williams <awilliams@marvell.com>
Import cvmx-helper-sgmii.c from 2013 U-Boot. It will be used by the later
added drivers to support networking on the MIPS Octeon II / III
platforms.
Signed-off-by: Aaron Williams <awilliams@marvell.com>
Signed-off-by: Stefan Roese <sr@denx.de>
---
arch/mips/mach-octeon/cvmx-helper-sgmii.c | 781 ++++++++++++++++++++++
1 file changed, 781 insertions(+)
create mode 100644 arch/mips/mach-octeon/cvmx-helper-sgmii.c
diff --git a/arch/mips/mach-octeon/cvmx-helper-sgmii.c b/arch/mips/mach-octeon/cvmx-helper-sgmii.c
new file mode 100644
index 000000000000..b789ad5d1913
--- /dev/null
+++ b/arch/mips/mach-octeon/cvmx-helper-sgmii.c
@@ -0,0 +1,781 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ * Functions for SGMII initialization, configuration,
+ * and monitoring.
+ */
+
+#include <time.h>
+#include <log.h>
+#include <linux/delay.h>
+
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-csr.h>
+#include <mach/cvmx-bootmem.h>
+#include <mach/octeon-model.h>
+#include <mach/cvmx-fuse.h>
+#include <mach/octeon-feature.h>
+#include <mach/cvmx-qlm.h>
+#include <mach/octeon_qlm.h>
+#include <mach/cvmx-pcie.h>
+#include <mach/cvmx-coremask.h>
+
+#include <mach/cvmx-agl-defs.h>
+#include <mach/cvmx-bgxx-defs.h>
+#include <mach/cvmx-ciu-defs.h>
+#include <mach/cvmx-gmxx-defs.h>
+#include <mach/cvmx-ipd-defs.h>
+#include <mach/cvmx-pcsx-defs.h>
+#include <mach/cvmx-pki-defs.h>
+#include <mach/cvmx-xcv-defs.h>
+
+#include <mach/cvmx-helper.h>
+#include <mach/cvmx-helper-board.h>
+#include <mach/cvmx-helper-cfg.h>
+
+/**
+ * @INTERNAL
+ * Perform initialization required only once for an SGMII port.
+ *
+ * @param interface to init
+ * @param index Index of prot on the interface
+ *
+ * @return Zero on success, negative on failure
+ */
+static int __cvmx_helper_sgmii_hardware_init_one_time(int interface, int index)
+{
+ const u64 clock_mhz = 1200; /* todo: fixme */
+ union cvmx_pcsx_miscx_ctl_reg pcsx_miscx_ctl_reg;
+ union cvmx_pcsx_linkx_timer_count_reg pcsx_linkx_timer_count_reg;
+ union cvmx_gmxx_prtx_cfg gmxx_prtx_cfg;
+
+ if (!cvmx_helper_is_port_valid(interface, index))
+ return 0;
+
+ /* Disable GMX */
+ gmxx_prtx_cfg.u64 = csr_rd(CVMX_GMXX_PRTX_CFG(index, interface));
+ gmxx_prtx_cfg.s.en = 0;
+ csr_wr(CVMX_GMXX_PRTX_CFG(index, interface), gmxx_prtx_cfg.u64);
+
+ /*
+ * Write PCS*_LINK*_TIMER_COUNT_REG[COUNT] with the
+ * appropriate value. 1000BASE-X specifies a 10ms
+ * interval. SGMII specifies a 1.6ms interval.
+ */
+ pcsx_miscx_ctl_reg.u64 =
+ csr_rd(CVMX_PCSX_MISCX_CTL_REG(index, interface));
+ /* Adjust the MAC mode if requested by device tree */
+ pcsx_miscx_ctl_reg.s.mac_phy =
+ cvmx_helper_get_mac_phy_mode(interface, index);
+ pcsx_miscx_ctl_reg.s.mode =
+ cvmx_helper_get_1000x_mode(interface, index);
+ csr_wr(CVMX_PCSX_MISCX_CTL_REG(index, interface),
+ pcsx_miscx_ctl_reg.u64);
+
+ pcsx_linkx_timer_count_reg.u64 =
+ csr_rd(CVMX_PCSX_LINKX_TIMER_COUNT_REG(index, interface));
+ if (pcsx_miscx_ctl_reg.s.mode)
+ /* 1000BASE-X */
+ pcsx_linkx_timer_count_reg.s.count =
+ (10000ull * clock_mhz) >> 10;
+ else
+ /* SGMII */
+ pcsx_linkx_timer_count_reg.s.count =
+ (1600ull * clock_mhz) >> 10;
+
+ csr_wr(CVMX_PCSX_LINKX_TIMER_COUNT_REG(index, interface),
+ pcsx_linkx_timer_count_reg.u64);
+
+ /*
+ * Write the advertisement register to be used as the
+ * tx_Config_Reg<D15:D0> of the autonegotiation. In
+ * 1000BASE-X mode, tx_Config_Reg<D15:D0> is PCS*_AN*_ADV_REG.
+ * In SGMII PHY mode, tx_Config_Reg<D15:D0> is
+ * PCS*_SGM*_AN_ADV_REG. In SGMII MAC mode,
+ * tx_Config_Reg<D15:D0> is the fixed value 0x4001, so this
+ * step can be skipped.
+ */
+ if (pcsx_miscx_ctl_reg.s.mode) {
+ /* 1000BASE-X */
+ union cvmx_pcsx_anx_adv_reg pcsx_anx_adv_reg;
+
+ pcsx_anx_adv_reg.u64 =
+ csr_rd(CVMX_PCSX_ANX_ADV_REG(index, interface));
+ pcsx_anx_adv_reg.s.rem_flt = 0;
+ pcsx_anx_adv_reg.s.pause = 3;
+ pcsx_anx_adv_reg.s.hfd = 1;
+ pcsx_anx_adv_reg.s.fd = 1;
+ csr_wr(CVMX_PCSX_ANX_ADV_REG(index, interface),
+ pcsx_anx_adv_reg.u64);
+ } else {
+ if (pcsx_miscx_ctl_reg.s.mac_phy) {
+ /* PHY Mode */
+ union cvmx_pcsx_sgmx_an_adv_reg pcsx_sgmx_an_adv_reg;
+
+ pcsx_sgmx_an_adv_reg.u64 = csr_rd(
+ CVMX_PCSX_SGMX_AN_ADV_REG(index, interface));
+ pcsx_sgmx_an_adv_reg.s.dup = 1;
+ pcsx_sgmx_an_adv_reg.s.speed = 2;
+ csr_wr(CVMX_PCSX_SGMX_AN_ADV_REG(index, interface),
+ pcsx_sgmx_an_adv_reg.u64);
+ } else {
+ /* MAC Mode - Nothing to do */
+ }
+ }
+ return 0;
+}
+
+static int __cvmx_helper_need_g15618(void)
+{
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN66XX_PASS1_X) ||
+ OCTEON_IS_MODEL(OCTEON_CN68XX))
+ return 1;
+ else
+ return 0;
+}
+
+/**
+ * @INTERNAL
+ * Initialize the SERTES link for the first time or after a loss
+ * of link.
+ *
+ * @param interface to init
+ * @param index Index of prot on the interface
+ *
+ * @return Zero on success, negative on failure
+ */
+static int __cvmx_helper_sgmii_hardware_init_link(int interface, int index)
+{
+ union cvmx_pcsx_mrx_control_reg control_reg;
+ union cvmx_pcsx_miscx_ctl_reg pcsx_miscx_ctl_reg;
+ bool phy_mode;
+ bool an_disable; /** Disable autonegotiation */
+ bool mode_1000x; /** 1000Base-X mode */
+
+ if (!cvmx_helper_is_port_valid(interface, index))
+ return 0;
+
+ /*
+ * Take PCS through a reset sequence.
+ * PCS*_MR*_CONTROL_REG[PWR_DN] should be cleared to zero.
+ * Write PCS*_MR*_CONTROL_REG[RESET]=1 (while not changing the
+ * value of the other PCS*_MR*_CONTROL_REG bits). Read
+ * PCS*_MR*_CONTROL_REG[RESET] until it changes value to
+ * zero.
+ */
+ control_reg.u64 = csr_rd(CVMX_PCSX_MRX_CONTROL_REG(index, interface));
+
+ /*
+ * Errata G-15618 requires disabling PCS soft reset in CN63XX
+ * pass upto 2.1.
+ */
+ if (!__cvmx_helper_need_g15618()) {
+ control_reg.s.reset = 1;
+ csr_wr(CVMX_PCSX_MRX_CONTROL_REG(index, interface),
+ control_reg.u64);
+ if (CVMX_WAIT_FOR_FIELD64(
+ CVMX_PCSX_MRX_CONTROL_REG(index, interface),
+ cvmx_pcsx_mrx_control_reg_t, reset, ==, 0, 10000)) {
+ debug("SGMII%x: Timeout waiting for port %d to finish reset\n",
+ interface, index);
+ return -1;
+ }
+ }
+
+ /*
+ * Write PCS*_MR*_CONTROL_REG[RST_AN]=1 to ensure a fresh
+ * sgmii negotiation starts.
+ */
+ phy_mode = cvmx_helper_get_mac_phy_mode(interface, index);
+ an_disable = (phy_mode ||
+ !cvmx_helper_get_port_autonegotiation(interface, index));
+
+ control_reg.s.an_en = !an_disable;
+
+ /* Force a PCS reset by powering down the PCS interface
+ * This is needed to deal with broken Qualcomm/Atheros PHYs and switches
+ * which never recover if PCS is not power cycled. The alternative
+ * is to power cycle or hardware reset the Qualcomm devices whenever
+ * SGMII is initialized.
+ *
+ * This is needed for the QCA8033 PHYs as well as the QCA833X switches
+ * to work. The QCA8337 switch has additional SGMII problems and is
+ * best avoided if at all possible. Failure to power cycle PCS prevents
+ * any traffic from flowing between Octeon and Qualcomm devices if there
+ * is a warm reset. Even a software reset to the Qualcomm device will
+ * not work.
+ *
+ * Note that this problem has been reported between Qualcomm and other
+ * vendor's processors as well so this problem is not unique to
+ * Qualcomm and Octeon.
+ *
+ * Power cycling PCS doesn't hurt anything with non-Qualcomm devices
+ * other than adding a 25ms delay during initialization.
+ */
+ control_reg.s.pwr_dn = 1;
+ csr_wr(CVMX_PCSX_MRX_CONTROL_REG(index, interface), control_reg.u64);
+ csr_rd(CVMX_PCSX_MRX_CONTROL_REG(index, interface));
+
+ /* 25ms should be enough, 10ms is too short */
+ mdelay(25);
+
+ control_reg.s.pwr_dn = 0;
+ csr_wr(CVMX_PCSX_MRX_CONTROL_REG(index, interface), control_reg.u64);
+
+ /* The Cortina PHY runs in 1000base-X mode */
+ mode_1000x = cvmx_helper_get_1000x_mode(interface, index);
+ pcsx_miscx_ctl_reg.u64 =
+ csr_rd(CVMX_PCSX_MISCX_CTL_REG(index, interface));
+ pcsx_miscx_ctl_reg.s.mode = mode_1000x;
+ pcsx_miscx_ctl_reg.s.mac_phy = phy_mode;
+ csr_wr(CVMX_PCSX_MISCX_CTL_REG(index, interface),
+ pcsx_miscx_ctl_reg.u64);
+ if (an_disable)
+ /* In PHY mode we can't query the link status so we just
+ * assume that the link is up.
+ */
+ return 0;
+
+ /*
+ * Wait for PCS*_MR*_STATUS_REG[AN_CPT] to be set, indicating
+ * that sgmii autonegotiation is complete. In MAC mode this
+ * isn't an ethernet link, but a link between Octeon and the
+ * PHY.
+ */
+ if (CVMX_WAIT_FOR_FIELD64(CVMX_PCSX_MRX_STATUS_REG(index, interface),
+ union cvmx_pcsx_mrx_status_reg, an_cpt, ==, 1,
+ 10000)) {
+ debug("SGMII%x: Port %d link timeout\n", interface, index);
+ return -1;
+ }
+ return 0;
+}
+
+/**
+ * @INTERNAL
+ * Configure an SGMII link to the specified speed after the SERTES
+ * link is up.
+ *
+ * @param interface to init
+ * @param index Index of prot on the interface
+ * @param link_info Link state to configure
+ *
+ * @return Zero on success, negative on failure
+ */
+static int
+__cvmx_helper_sgmii_hardware_init_link_speed(int interface, int index,
+ cvmx_helper_link_info_t link_info)
+{
+ int is_enabled;
+ union cvmx_gmxx_prtx_cfg gmxx_prtx_cfg;
+ union cvmx_pcsx_miscx_ctl_reg pcsx_miscx_ctl_reg;
+
+ if (!cvmx_helper_is_port_valid(interface, index))
+ return 0;
+
+ /* Disable GMX before we make any changes. Remember the enable state */
+ gmxx_prtx_cfg.u64 = csr_rd(CVMX_GMXX_PRTX_CFG(index, interface));
+ is_enabled = gmxx_prtx_cfg.s.en;
+ gmxx_prtx_cfg.s.en = 0;
+ csr_wr(CVMX_GMXX_PRTX_CFG(index, interface), gmxx_prtx_cfg.u64);
+
+ /* Wait for GMX to be idle */
+ if (CVMX_WAIT_FOR_FIELD64(CVMX_GMXX_PRTX_CFG(index, interface),
+ cvmx_gmxx_prtx_cfg_t, rx_idle, ==, 1,
+ 10000) ||
+ CVMX_WAIT_FOR_FIELD64(CVMX_GMXX_PRTX_CFG(index, interface),
+ cvmx_gmxx_prtx_cfg_t, tx_idle, ==, 1,
+ 10000)) {
+ debug("SGMII%d: Timeout waiting for port %d to be idle\n",
+ interface, index);
+ return -1;
+ }
+
+ /* Read GMX CFG again to make sure the disable completed */
+ gmxx_prtx_cfg.u64 = csr_rd(CVMX_GMXX_PRTX_CFG(index, interface));
+
+ /*
+ * Get the misc control for PCS. We will need to set the
+ * duplication amount.
+ */
+ pcsx_miscx_ctl_reg.u64 =
+ csr_rd(CVMX_PCSX_MISCX_CTL_REG(index, interface));
+
+ /*
+ * Use GMXENO to force the link down if the status we get says
+ * it should be down.
+ */
+ pcsx_miscx_ctl_reg.s.gmxeno = !link_info.s.link_up;
+
+ /* Only change the duplex setting if the link is up */
+ if (link_info.s.link_up)
+ gmxx_prtx_cfg.s.duplex = link_info.s.full_duplex;
+
+ /* Do speed based setting for GMX */
+ switch (link_info.s.speed) {
+ case 10:
+ gmxx_prtx_cfg.s.speed = 0;
+ gmxx_prtx_cfg.s.speed_msb = 1;
+ gmxx_prtx_cfg.s.slottime = 0;
+ /* Setting from GMX-603 */
+ pcsx_miscx_ctl_reg.s.samp_pt = 25;
+ csr_wr(CVMX_GMXX_TXX_SLOT(index, interface), 64);
+ csr_wr(CVMX_GMXX_TXX_BURST(index, interface), 0);
+ break;
+ case 100:
+ gmxx_prtx_cfg.s.speed = 0;
+ gmxx_prtx_cfg.s.speed_msb = 0;
+ gmxx_prtx_cfg.s.slottime = 0;
+ pcsx_miscx_ctl_reg.s.samp_pt = 0x5;
+ csr_wr(CVMX_GMXX_TXX_SLOT(index, interface), 64);
+ csr_wr(CVMX_GMXX_TXX_BURST(index, interface), 0);
+ break;
+ case 1000:
+ gmxx_prtx_cfg.s.speed = 1;
+ gmxx_prtx_cfg.s.speed_msb = 0;
+ gmxx_prtx_cfg.s.slottime = 1;
+ pcsx_miscx_ctl_reg.s.samp_pt = 1;
+ csr_wr(CVMX_GMXX_TXX_SLOT(index, interface), 512);
+ if (gmxx_prtx_cfg.s.duplex)
+ /* full duplex */
+ csr_wr(CVMX_GMXX_TXX_BURST(index, interface), 0);
+ else
+ /* half duplex */
+ csr_wr(CVMX_GMXX_TXX_BURST(index, interface), 8192);
+ break;
+ default:
+ break;
+ }
+
+ /* Write the new misc control for PCS */
+ csr_wr(CVMX_PCSX_MISCX_CTL_REG(index, interface),
+ pcsx_miscx_ctl_reg.u64);
+
+ /* Write the new GMX settings with the port still disabled */
+ csr_wr(CVMX_GMXX_PRTX_CFG(index, interface), gmxx_prtx_cfg.u64);
+
+ /* Read GMX CFG again to make sure the config completed */
+ gmxx_prtx_cfg.u64 = csr_rd(CVMX_GMXX_PRTX_CFG(index, interface));
+
+ /* Restore the enabled / disabled state */
+ gmxx_prtx_cfg.s.en = is_enabled;
+ csr_wr(CVMX_GMXX_PRTX_CFG(index, interface), gmxx_prtx_cfg.u64);
+
+ return 0;
+}
+
+/**
+ * @INTERNAL
+ * Bring up the SGMII interface to be ready for packet I/O but
+ * leave I/O disabled using the GMX override. This function
+ * follows the bringup documented in 10.6.3 of the manual.
+ *
+ * @param interface to bringup
+ * @param num_ports Number of ports on the interface
+ *
+ * @return Zero on success, negative on failure
+ */
+static int __cvmx_helper_sgmii_hardware_init(int interface, int num_ports)
+{
+ int index;
+ int do_link_set = 1;
+
+ /*
+ * CN63XX Pass 1.0 errata G-14395 requires the QLM De-emphasis
+ * be programmed.
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_0)) {
+ union cvmx_ciu_qlm2 ciu_qlm;
+
+ ciu_qlm.u64 = csr_rd(CVMX_CIU_QLM2);
+ ciu_qlm.s.txbypass = 1;
+ ciu_qlm.s.txdeemph = 0xf;
+ ciu_qlm.s.txmargin = 0xd;
+ csr_wr(CVMX_CIU_QLM2, ciu_qlm.u64);
+ }
+
+ /*
+ * CN63XX Pass 2.x errata G-15273 requires the QLM De-emphasis
+ * be programmed when using a 156.25Mhz ref clock.
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS2_X)) {
+ /* Read the QLM speed pins */
+ union cvmx_mio_rst_boot mio_rst_boot;
+
+ mio_rst_boot.u64 = csr_rd(CVMX_MIO_RST_BOOT);
+
+ if (mio_rst_boot.cn63xx.qlm2_spd == 4) {
+ union cvmx_ciu_qlm2 ciu_qlm;
+
+ ciu_qlm.u64 = csr_rd(CVMX_CIU_QLM2);
+ ciu_qlm.s.txbypass = 1;
+ ciu_qlm.s.txdeemph = 0x0;
+ ciu_qlm.s.txmargin = 0xf;
+ csr_wr(CVMX_CIU_QLM2, ciu_qlm.u64);
+ }
+ }
+
+ __cvmx_helper_setup_gmx(interface, num_ports);
+
+ for (index = 0; index < num_ports; index++) {
+ int ipd_port = cvmx_helper_get_ipd_port(interface, index);
+
+ if (!cvmx_helper_is_port_valid(interface, index))
+ continue;
+ __cvmx_helper_sgmii_hardware_init_one_time(interface, index);
+ if (do_link_set)
+ __cvmx_helper_sgmii_link_set(ipd_port,
+ __cvmx_helper_sgmii_link_get(ipd_port));
+ }
+
+ return 0;
+}
+
+int __cvmx_helper_sgmii_enumerate(int xiface)
+{
+ if (OCTEON_IS_MODEL(OCTEON_CNF71XX))
+ return 2;
+ if (OCTEON_IS_MODEL(OCTEON_CN70XX)) {
+ struct cvmx_xiface xi =
+ cvmx_helper_xiface_to_node_interface(xiface);
+ enum cvmx_qlm_mode qlm_mode =
+ cvmx_qlm_get_dlm_mode(0, xi.interface);
+
+ if (qlm_mode == CVMX_QLM_MODE_SGMII)
+ return 1;
+ else if (qlm_mode == CVMX_QLM_MODE_QSGMII)
+ return 4;
+ return 0;
+ }
+ return 4;
+}
+
+/**
+ * @INTERNAL
+ * Probe a SGMII interface and determine the number of ports
+ * connected to it. The SGMII interface should still be down after
+ * this call.
+ *
+ * @param xiface Interface to probe
+ *
+ * @return Number of ports on the interface. Zero to disable.
+ */
+int __cvmx_helper_sgmii_probe(int xiface)
+{
+ struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+ int interface = xi.interface;
+ union cvmx_gmxx_inf_mode mode;
+ int ports;
+
+ /*
+ * Check if QLM is configured correct for SGMII, verify the
+ * speed as well as mode.
+ */
+ if (OCTEON_IS_OCTEON2()) {
+ int qlm = cvmx_qlm_interface(xiface);
+
+ if (cvmx_qlm_get_mode(qlm) != CVMX_QLM_MODE_SGMII)
+ return 0;
+ }
+
+ /* Do not enable the interface if is not in SGMII mode */
+ ports = __cvmx_helper_sgmii_enumerate(xiface);
+
+ if (ports <= 0)
+ return 0;
+
+ /*
+ * Due to errata GMX-700 on CN56XXp1.x and CN52XXp1.x, the
+ * interface needs to be enabled before IPD otherwise per port
+ * backpressure may not work properly.
+ */
+ mode.u64 = csr_rd(CVMX_GMXX_INF_MODE(interface));
+ mode.s.en = 1;
+ csr_wr(CVMX_GMXX_INF_MODE(interface), mode.u64);
+
+ return ports;
+}
+
+/**
+ * @INTERNAL
+ * Bringup and enable a SGMII interface. After this call packet
+ * I/O should be fully functional. This is called with IPD
+ * enabled but PKO disabled.
+ *
+ * @param xiface Interface to bring up
+ *
+ * @return Zero on success, negative on failure
+ */
+int __cvmx_helper_sgmii_enable(int xiface)
+{
+ int num_ports = cvmx_helper_ports_on_interface(xiface);
+ struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+ int interface = xi.interface;
+ int index;
+
+ /* Setup PKND and BPID */
+ if (octeon_has_feature(OCTEON_FEATURE_PKND)) {
+ for (index = 0; index < num_ports; index++) {
+ union cvmx_gmxx_bpid_msk bpid_msk;
+ union cvmx_gmxx_bpid_mapx bpid_map;
+ union cvmx_gmxx_prtx_cfg gmxx_prtx_cfg;
+
+ if (!cvmx_helper_is_port_valid(interface, index))
+ continue;
+ /* Setup PKIND */
+ gmxx_prtx_cfg.u64 =
+ csr_rd(CVMX_GMXX_PRTX_CFG(index, interface));
+ gmxx_prtx_cfg.s.pknd =
+ cvmx_helper_get_pknd(interface, index);
+ csr_wr(CVMX_GMXX_PRTX_CFG(index, interface),
+ gmxx_prtx_cfg.u64);
+
+ /* Setup BPID */
+ bpid_map.u64 =
+ csr_rd(CVMX_GMXX_BPID_MAPX(index, interface));
+ bpid_map.s.val = 1;
+ bpid_map.s.bpid =
+ cvmx_helper_get_bpid(interface, index);
+ csr_wr(CVMX_GMXX_BPID_MAPX(index, interface),
+ bpid_map.u64);
+
+ bpid_msk.u64 = csr_rd(CVMX_GMXX_BPID_MSK(interface));
+ bpid_msk.s.msk_or |= (1 << index);
+ bpid_msk.s.msk_and &= ~(1 << index);
+ csr_wr(CVMX_GMXX_BPID_MSK(interface), bpid_msk.u64);
+ }
+ }
+
+ __cvmx_helper_sgmii_hardware_init(interface, num_ports);
+
+ /* CN68XX adds the padding and FCS in PKO, not GMX */
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+ union cvmx_gmxx_txx_append gmxx_txx_append_cfg;
+
+ for (index = 0; index < num_ports; index++) {
+ if (!cvmx_helper_is_port_valid(interface, index))
+ continue;
+ gmxx_txx_append_cfg.u64 =
+ csr_rd(CVMX_GMXX_TXX_APPEND(index, interface));
+ gmxx_txx_append_cfg.s.fcs = 0;
+ gmxx_txx_append_cfg.s.pad = 0;
+ csr_wr(CVMX_GMXX_TXX_APPEND(index, interface),
+ gmxx_txx_append_cfg.u64);
+ }
+ }
+
+ /* Enable running disparity check for QSGMII interface */
+ if (OCTEON_IS_MODEL(OCTEON_CN70XX) && num_ports > 1) {
+ union cvmx_gmxx_qsgmii_ctl qsgmii_ctl;
+
+ qsgmii_ctl.u64 = 0;
+ qsgmii_ctl.s.disparity = 1;
+ csr_wr(CVMX_GMXX_QSGMII_CTL(interface), qsgmii_ctl.u64);
+ }
+
+ for (index = 0; index < num_ports; index++) {
+ union cvmx_gmxx_txx_append append_cfg;
+ union cvmx_gmxx_txx_sgmii_ctl sgmii_ctl;
+ union cvmx_gmxx_prtx_cfg gmxx_prtx_cfg;
+
+ if (!cvmx_helper_is_port_valid(interface, index))
+ continue;
+ /*
+ * Clear the align bit if preamble is set to attain
+ * maximum tx rate.
+ */
+ append_cfg.u64 = csr_rd(CVMX_GMXX_TXX_APPEND(index, interface));
+ sgmii_ctl.u64 =
+ csr_rd(CVMX_GMXX_TXX_SGMII_CTL(index, interface));
+ sgmii_ctl.s.align = append_cfg.s.preamble ? 0 : 1;
+ csr_wr(CVMX_GMXX_TXX_SGMII_CTL(index, interface),
+ sgmii_ctl.u64);
+
+ gmxx_prtx_cfg.u64 =
+ csr_rd(CVMX_GMXX_PRTX_CFG(index, interface));
+ gmxx_prtx_cfg.s.en = 1;
+ csr_wr(CVMX_GMXX_PRTX_CFG(index, interface), gmxx_prtx_cfg.u64);
+ }
+ return 0;
+}
+
+/**
+ * @INTERNAL
+ * Return the link state of an IPD/PKO port as returned by
+ * auto negotiation. The result of this function may not match
+ * Octeon's link config if auto negotiation has changed since
+ * the last call to cvmx_helper_link_set().
+ *
+ * @param ipd_port IPD/PKO port to query
+ *
+ * @return Link state
+ */
+cvmx_helper_link_info_t __cvmx_helper_sgmii_link_get(int ipd_port)
+{
+ cvmx_helper_link_info_t result;
+ union cvmx_pcsx_miscx_ctl_reg pcsx_miscx_ctl_reg;
+ int interface = cvmx_helper_get_interface_num(ipd_port);
+ int index = cvmx_helper_get_interface_index_num(ipd_port);
+ union cvmx_pcsx_mrx_control_reg pcsx_mrx_control_reg;
+ int speed = 1000;
+ int qlm;
+
+ result.u64 = 0;
+
+ if (!cvmx_helper_is_port_valid(interface, index))
+ return result;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN66XX)) {
+ union cvmx_gmxx_inf_mode inf_mode;
+
+ inf_mode.u64 = csr_rd(CVMX_GMXX_INF_MODE(interface));
+ if (inf_mode.s.rate & (1 << index))
+ speed = 2500;
+ else
+ speed = 1000;
+ } else if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
+ qlm = cvmx_qlm_interface(interface);
+ speed = cvmx_qlm_get_gbaud_mhz(qlm) * 8 / 10;
+ } else if (OCTEON_IS_MODEL(OCTEON_CNF71XX)) {
+ speed = cvmx_qlm_get_gbaud_mhz(0) * 8 / 10;
+ } else if (OCTEON_IS_MODEL(OCTEON_CN70XX)) {
+ speed = cvmx_qlm_get_gbaud_mhz(0) * 8 / 10;
+ if (cvmx_qlm_get_dlm_mode(0, interface) == CVMX_QLM_MODE_SGMII)
+ speed >>= 1;
+ else
+ speed >>= 2;
+ }
+
+ pcsx_mrx_control_reg.u64 =
+ csr_rd(CVMX_PCSX_MRX_CONTROL_REG(index, interface));
+ if (pcsx_mrx_control_reg.s.loopbck1) {
+ /* Force 1Gbps full duplex link for internal loopback */
+ result.s.link_up = 1;
+ result.s.full_duplex = 1;
+ result.s.speed = speed;
+ return result;
+ }
+
+ pcsx_miscx_ctl_reg.u64 =
+ csr_rd(CVMX_PCSX_MISCX_CTL_REG(index, interface));
+ if (pcsx_miscx_ctl_reg.s.mac_phy ||
+ cvmx_helper_get_port_force_link_up(interface, index)) {
+ /* PHY Mode */
+ /* Note that this also works for 1000base-X mode */
+
+ result.s.speed = speed;
+ result.s.full_duplex = 1;
+ result.s.link_up = 1;
+ return result;
+ }
+
+ /* MAC Mode */
+ return __cvmx_helper_board_link_get(ipd_port);
+}
+
+/**
+ * @INTERNAL
+ * Configure an IPD/PKO port for the specified link state. This
+ * function does not influence auto negotiation at the PHY level.
+ * The passed link state must always match the link state returned
+ * by cvmx_helper_link_get(). It is normally best to use
+ * cvmx_helper_link_autoconf() instead.
+ *
+ * @param ipd_port IPD/PKO port to configure
+ * @param link_info The new link state
+ *
+ * @return Zero on success, negative on failure
+ */
+int __cvmx_helper_sgmii_link_set(int ipd_port,
+ cvmx_helper_link_info_t link_info)
+{
+ union cvmx_pcsx_mrx_control_reg control_reg;
+ int interface = cvmx_helper_get_interface_num(ipd_port);
+ int index = cvmx_helper_get_interface_index_num(ipd_port);
+
+ if (!cvmx_helper_is_port_valid(interface, index))
+ return 0;
+
+ /* For some devices, i.e. the Qualcomm QCA8337 switch we need to power
+ * down the PCS interface when the link goes down and power it back
+ * up when the link returns.
+ */
+ if (link_info.s.link_up || !__cvmx_helper_need_g15618()) {
+ __cvmx_helper_sgmii_hardware_init_link(interface, index);
+ } else {
+ union cvmx_pcsx_miscx_ctl_reg pcsx_miscx_ctl_reg;
+
+ pcsx_miscx_ctl_reg.u64 =
+ csr_rd(CVMX_PCSX_MISCX_CTL_REG(index, interface));
+
+ /* Disable autonegotiation when MAC mode is enabled or
+ * autonegotiation is disabled.
+ */
+ control_reg.u64 =
+ csr_rd(CVMX_PCSX_MRX_CONTROL_REG(index, interface));
+ if (pcsx_miscx_ctl_reg.s.mac_phy == 0 ||
+ !cvmx_helper_get_port_autonegotiation(interface, index)) {
+ control_reg.s.an_en = 0;
+ control_reg.s.spdmsb = 1;
+ control_reg.s.spdlsb = 0;
+ control_reg.s.dup = 1;
+ }
+ csr_wr(CVMX_PCSX_MRX_CONTROL_REG(index, interface),
+ control_reg.u64);
+ csr_rd(CVMX_PCSX_MRX_CONTROL_REG(index, interface));
+ /*
+ * Use GMXENO to force the link down it will get
+ * reenabled later...
+ */
+ pcsx_miscx_ctl_reg.s.gmxeno = 1;
+ csr_wr(CVMX_PCSX_MISCX_CTL_REG(index, interface),
+ pcsx_miscx_ctl_reg.u64);
+ csr_rd(CVMX_PCSX_MISCX_CTL_REG(index, interface));
+ return 0;
+ }
+ return __cvmx_helper_sgmii_hardware_init_link_speed(interface, index,
+ link_info);
+}
+
+/**
+ * @INTERNAL
+ * Configure a port for internal and/or external loopback. Internal loopback
+ * causes packets sent by the port to be received by Octeon. External loopback
+ * causes packets received from the wire to sent out again.
+ *
+ * @param ipd_port IPD/PKO port to loopback.
+ * @param enable_internal
+ * Non zero if you want internal loopback
+ * @param enable_external
+ * Non zero if you want external loopback
+ *
+ * @return Zero on success, negative on failure.
+ */
+int __cvmx_helper_sgmii_configure_loopback(int ipd_port, int enable_internal,
+ int enable_external)
+{
+ int interface = cvmx_helper_get_interface_num(ipd_port);
+ int index = cvmx_helper_get_interface_index_num(ipd_port);
+ union cvmx_pcsx_mrx_control_reg pcsx_mrx_control_reg;
+ union cvmx_pcsx_miscx_ctl_reg pcsx_miscx_ctl_reg;
+
+ if (!cvmx_helper_is_port_valid(interface, index))
+ return 0;
+
+ pcsx_mrx_control_reg.u64 =
+ csr_rd(CVMX_PCSX_MRX_CONTROL_REG(index, interface));
+ pcsx_mrx_control_reg.s.loopbck1 = enable_internal;
+ csr_wr(CVMX_PCSX_MRX_CONTROL_REG(index, interface),
+ pcsx_mrx_control_reg.u64);
+
+ pcsx_miscx_ctl_reg.u64 =
+ csr_rd(CVMX_PCSX_MISCX_CTL_REG(index, interface));
+ pcsx_miscx_ctl_reg.s.loopbck2 = enable_external;
+ csr_wr(CVMX_PCSX_MISCX_CTL_REG(index, interface),
+ pcsx_miscx_ctl_reg.u64);
+
+ __cvmx_helper_sgmii_hardware_init_link(interface, index);
+ return 0;
+}
--
2.35.1
^ permalink raw reply related [flat|nested] 52+ messages in thread* [PATCH 22/52] mips: octeon: Add cvmx-helper-sfp.c
2022-03-30 10:06 [PATCH 00/52] mips: octeon: Add ethernet support Stefan Roese
` (19 preceding siblings ...)
2022-03-30 10:06 ` [PATCH 21/52] mips: octeon: Add cvmx-helper-sgmii.c Stefan Roese
@ 2022-03-30 10:06 ` Stefan Roese
2022-03-30 10:07 ` [PATCH 24/52] mips: octeon: Add cvmx-agl.c Stefan Roese
` (28 subsequent siblings)
49 siblings, 0 replies; 52+ messages in thread
From: Stefan Roese @ 2022-03-30 10:06 UTC (permalink / raw)
To: u-boot; +Cc: awilliams, cchavva
From: Aaron Williams <awilliams@marvell.com>
Import cvmx-helper-sfp.c from 2013 U-Boot. It will be used by the later
added drivers to support networking on the MIPS Octeon II / III
platforms.
Signed-off-by: Aaron Williams <awilliams@marvell.com>
Signed-off-by: Stefan Roese <sr@denx.de>
---
arch/mips/mach-octeon/cvmx-helper-sfp.c | 1877 +++++++++++++++++++++++
1 file changed, 1877 insertions(+)
create mode 100644 arch/mips/mach-octeon/cvmx-helper-sfp.c
diff --git a/arch/mips/mach-octeon/cvmx-helper-sfp.c b/arch/mips/mach-octeon/cvmx-helper-sfp.c
new file mode 100644
index 000000000000..a17ac542fcf8
--- /dev/null
+++ b/arch/mips/mach-octeon/cvmx-helper-sfp.c
@@ -0,0 +1,1877 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ */
+
+#include <errno.h>
+#include <i2c.h>
+#include <log.h>
+#include <malloc.h>
+#include <linux/delay.h>
+#include <display_options.h>
+
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-csr.h>
+#include <mach/cvmx-bootmem.h>
+#include <mach/octeon-model.h>
+#include <mach/cvmx-fuse.h>
+#include <mach/octeon-feature.h>
+#include <mach/cvmx-qlm.h>
+#include <mach/octeon_qlm.h>
+#include <mach/cvmx-pcie.h>
+#include <mach/cvmx-coremask.h>
+
+#include <mach/cvmx-helper.h>
+#include <mach/cvmx-helper-board.h>
+#include <mach/cvmx-helper-fdt.h>
+#include <mach/cvmx-helper-cfg.h>
+#include <mach/cvmx-helper-gpio.h>
+#include <mach/cvmx-helper-util.h>
+
+extern void octeon_i2c_unblock(int bus);
+
+static struct cvmx_fdt_sfp_info *sfp_list;
+
+/**
+ * Local allocator to handle both SE and U-Boot that also zeroes out memory
+ *
+ * @param size number of bytes to allocate
+ *
+ * @return pointer to allocated memory or NULL if out of memory.
+ * Alignment is set to 8-bytes.
+ */
+static void *cvm_sfp_alloc(size_t size)
+{
+ return calloc(size, 1);
+}
+
+/**
+ * Free allocated memory.
+ *
+ * @param ptr pointer to memory to free
+ *
+ * NOTE: This only works in U-Boot since SE does not really have a freeing
+ * mechanism. In SE the memory is zeroed out and not freed so this
+ * is a memory leak if errors occur.
+ */
+static inline void cvm_sfp_free(void *ptr, size_t size)
+{
+ free(ptr);
+}
+
+/**
+ * Select a QSFP device before accessing the EEPROM
+ *
+ * @param sfp handle for sfp/qsfp connector
+ * @param enable Set true to select, false to deselect
+ *
+ * @return 0 on success or if SFP or no select GPIO, -1 on GPIO error
+ */
+static int cvmx_qsfp_select(const struct cvmx_fdt_sfp_info *sfp, bool enable)
+{
+ /* Select is only needed for QSFP modules */
+ if (!sfp->is_qsfp) {
+ debug("%s(%s, %d): not QSFP\n", __func__, sfp->name, enable);
+ return 0;
+ }
+
+ if (dm_gpio_is_valid(&sfp->select)) {
+ /* Note that select is active low */
+ return dm_gpio_set_value(&sfp->select, !enable);
+ }
+
+ debug("%s: select GPIO unknown\n", __func__);
+ return 0;
+}
+
+static int cvmx_sfp_parse_sfp_buffer(struct cvmx_sfp_mod_info *sfp_info,
+ const uint8_t *buffer)
+{
+ u8 csum = 0;
+ bool csum_good = false;
+ int i;
+
+ /* Validate the checksum */
+ for (i = 0; i < 0x3f; i++)
+ csum += buffer[i];
+ csum_good = csum == buffer[0x3f];
+ debug("%s: Lower checksum: 0x%02x, expected: 0x%02x\n", __func__, csum,
+ buffer[0x3f]);
+ csum = 0;
+ for (i = 0x40; i < 0x5f; i++)
+ csum += buffer[i];
+ debug("%s: Upper checksum: 0x%02x, expected: 0x%02x\n", __func__, csum,
+ buffer[0x5f]);
+ if (csum != buffer[0x5f] || !csum_good) {
+ debug("Error: SFP EEPROM checksum information is incorrect\n");
+ return -1;
+ }
+
+ sfp_info->conn_type = buffer[0];
+ if (buffer[1] < 1 || buffer[1] > 7) { /* Extended ID */
+ debug("Error: Unknown SFP extended identifier 0x%x\n",
+ buffer[1]);
+ return -1;
+ }
+ if (buffer[1] != 4) {
+ debug("Module is not SFP/SFP+/SFP28/QSFP+\n");
+ return -1;
+ }
+ sfp_info->mod_type = buffer[2];
+ sfp_info->eth_comp = buffer[3] & 0xf0;
+ sfp_info->cable_comp = buffer[0x24];
+
+ /* There are several ways a cable can be marked as active or
+ * passive. 8.[2-3] specify the SFP+ cable technology. Some
+ * modules also use 3.[0-1] for Infiniband, though it's
+ * redundant.
+ */
+ if ((buffer[8] & 0x0C) == 0x08) {
+ sfp_info->limiting = true;
+ sfp_info->active_cable = true;
+ } else if ((buffer[8] & 0xC) == 0x4) {
+ sfp_info->limiting = false;
+ sfp_info->active_cable = false;
+ }
+ if ((buffer[3] & 3) == 2) {
+ sfp_info->active_cable = true;
+ sfp_info->limiting = true;
+ }
+
+ switch (sfp_info->mod_type) {
+ case CVMX_SFP_MOD_OPTICAL_LC:
+ case CVMX_SFP_MOD_OPTICAL_PIGTAIL:
+ sfp_info->copper_cable = false;
+ break;
+ case CVMX_SFP_MOD_COPPER_PIGTAIL:
+ sfp_info->copper_cable = true;
+ break;
+ case CVMX_SFP_MOD_NO_SEP_CONN:
+ switch (sfp_info->cable_comp) {
+ case CVMX_SFP_CABLE_100G_25GAUI_C2M_AOC_HIGH_BER:
+ case CVMX_SFP_CABLE_100G_25GAUI_C2M_AOC_LOW_BER:
+ case CVMX_SFP_CABLE_100G_25GAUI_C2M_ACC_LOW_BER:
+ sfp_info->copper_cable = false;
+ sfp_info->limiting = true;
+ sfp_info->active_cable = true;
+ break;
+
+ case CVMX_SFP_CABLE_100G_SR4_25G_SR:
+ case CVMX_SFP_CABLE_100G_LR4_25G_LR:
+ case CVMX_SFP_CABLE_100G_ER4_25G_ER:
+ case CVMX_SFP_CABLE_100G_SR10:
+ case CVMX_SFP_CABLE_100G_CWDM4_MSA:
+ case CVMX_SFP_CABLE_100G_PSM4:
+ case CVMX_SFP_CABLE_100G_CWDM4:
+ case CVMX_SFP_CABLE_40G_ER4:
+ case CVMX_SFP_CABLE_4X10G_SR:
+ case CVMX_SFP_CABLE_G959_1_P1I1_2D1:
+ case CVMX_SFP_CABLE_G959_1_P1S1_2D2:
+ case CVMX_SFP_CABLE_G959_1_P1L1_2D2:
+ case CVMX_SFP_CABLE_100G_CLR4:
+ case CVMX_SFP_CABLE_100G_2_LAMBDA_DWDM:
+ case CVMX_SFP_CABLE_40G_SWDM4:
+ case CVMX_SFP_CABLE_100G_SWDM4:
+ case CVMX_SFP_CABLE_100G_PAM4_BIDI:
+ sfp_info->copper_cable = false;
+ break;
+
+ case CVMX_SFP_CABLE_100G_25GAUI_C2M_ACC_HIGH_BER:
+ case CVMX_SFP_CABLE_10GBASE_T:
+ case CVMX_SFP_CABLE_10GBASE_T_SR:
+ case CVMX_SFP_CABLE_5GBASE_T:
+ case CVMX_SFP_CABLE_2_5GBASE_T:
+ sfp_info->copper_cable = true;
+ sfp_info->limiting = true;
+ sfp_info->active_cable = true;
+ break;
+
+ case CVMX_SFP_CABLE_100G_CR4_25G_CR_CA_L:
+ case CVMX_SFP_CABLE_25G_CR_CA_S:
+ case CVMX_SFP_CABLE_25G_CR_CA_N:
+ case CVMX_SFP_CABLE_40G_PSM4:
+ sfp_info->copper_cable = true;
+ break;
+
+ default:
+ switch (sfp_info->eth_comp) {
+ case CVMX_SFP_CABLE_10GBASE_ER:
+ case CVMX_SFP_CABLE_10GBASE_LRM:
+ case CVMX_SFP_CABLE_10GBASE_LR:
+ case CVMX_SFP_CABLE_10GBASE_SR:
+ sfp_info->copper_cable = false;
+ break;
+ }
+ break;
+ }
+ break;
+
+ case CVMX_SFP_MOD_RJ45:
+ debug("%s: RJ45 adapter\n", __func__);
+ sfp_info->copper_cable = true;
+ sfp_info->active_cable = true;
+ sfp_info->limiting = true;
+ break;
+ case CVMX_SFP_MOD_UNKNOWN:
+ /* The Avago 1000Base-X to 1000Base-T module reports that it
+ * is an unknown module type but the Ethernet compliance code
+ * says it is 1000Base-T. We'll change the reporting to RJ45.
+ */
+ if (buffer[6] & 8) {
+ debug("RJ45 gigabit module detected\n");
+ sfp_info->mod_type = CVMX_SFP_MOD_RJ45;
+ sfp_info->copper_cable = false;
+ sfp_info->limiting = true;
+ sfp_info->active_cable = true;
+ sfp_info->max_copper_cable_len = buffer[0x12];
+ sfp_info->rate = CVMX_SFP_RATE_1G;
+ } else {
+ debug("Unknown module type 0x%x\n", sfp_info->mod_type);
+ }
+ sfp_info->limiting = true;
+ break;
+ case CVMX_SFP_MOD_MXC_2X16:
+ debug("%s: MXC 2X16\n", __func__);
+ break;
+ default:
+ sfp_info->limiting = true;
+ break;
+ }
+
+ if (sfp_info->copper_cable)
+ sfp_info->max_copper_cable_len = buffer[0x12];
+ else
+ sfp_info->max_50um_om4_cable_length = buffer[0x12] * 10;
+
+ if (buffer[0xe])
+ sfp_info->max_single_mode_cable_length = buffer[0xe] * 1000;
+ else
+ sfp_info->max_single_mode_cable_length = buffer[0xf] * 100000;
+
+ sfp_info->max_50um_om2_cable_length = buffer[0x10] * 10;
+ sfp_info->max_62_5um_om1_cable_length = buffer[0x11] * 10;
+ sfp_info->max_50um_om3_cable_length = buffer[0x13] * 10;
+
+ if (buffer[0xc] == 0xff) {
+ if (buffer[0x42] >= 255)
+ sfp_info->rate = CVMX_SFP_RATE_100G;
+ else if (buffer[0x42] >= 160)
+ sfp_info->rate = CVMX_SFP_RATE_40G;
+ else if (buffer[0x42] >= 100)
+ sfp_info->rate = CVMX_SFP_RATE_25G;
+ else
+ sfp_info->rate = CVMX_SFP_RATE_UNKNOWN;
+ } else if (buffer[0xc] >= 100) {
+ sfp_info->rate = CVMX_SFP_RATE_10G;
+ } else if (buffer[0xc] >= 10) {
+ sfp_info->rate = CVMX_SFP_RATE_1G;
+ } else {
+ sfp_info->rate = CVMX_SFP_RATE_UNKNOWN;
+ }
+
+ if (sfp_info->rate == CVMX_SFP_RATE_UNKNOWN) {
+ switch (sfp_info->cable_comp) {
+ case CVMX_SFP_CABLE_100G_SR10:
+ case CVMX_SFP_CABLE_100G_CWDM4_MSA:
+ case CVMX_SFP_CABLE_100G_PSM4:
+ case CVMX_SFP_CABLE_100G_CWDM4:
+ case CVMX_SFP_CABLE_100G_CLR4:
+ case CVMX_SFP_CABLE_100G_2_LAMBDA_DWDM:
+ case CVMX_SFP_CABLE_100G_SWDM4:
+ case CVMX_SFP_CABLE_100G_PAM4_BIDI:
+ sfp_info->rate = CVMX_SFP_RATE_100G;
+ break;
+ case CVMX_SFP_CABLE_100G_25GAUI_C2M_AOC_HIGH_BER:
+ case CVMX_SFP_CABLE_100G_SR4_25G_SR:
+ case CVMX_SFP_CABLE_100G_LR4_25G_LR:
+ case CVMX_SFP_CABLE_100G_ER4_25G_ER:
+ case CVMX_SFP_CABLE_100G_25GAUI_C2M_ACC_HIGH_BER:
+ case CVMX_SFP_CABLE_100G_CR4_25G_CR_CA_L:
+ case CVMX_SFP_CABLE_25G_CR_CA_S:
+ case CVMX_SFP_CABLE_25G_CR_CA_N:
+ case CVMX_SFP_CABLE_100G_25GAUI_C2M_AOC_LOW_BER:
+ case CVMX_SFP_CABLE_100G_25GAUI_C2M_ACC_LOW_BER:
+ sfp_info->rate = CVMX_SFP_RATE_25G;
+ break;
+ case CVMX_SFP_CABLE_40G_ER4:
+ case CVMX_SFP_CABLE_4X10G_SR:
+ case CVMX_SFP_CABLE_40G_PSM4:
+ case CVMX_SFP_CABLE_40G_SWDM4:
+ sfp_info->rate = CVMX_SFP_RATE_40G;
+ break;
+ case CVMX_SFP_CABLE_G959_1_P1I1_2D1:
+ case CVMX_SFP_CABLE_G959_1_P1S1_2D2:
+ case CVMX_SFP_CABLE_G959_1_P1L1_2D2:
+ case CVMX_SFP_CABLE_10GBASE_T:
+ case CVMX_SFP_CABLE_10GBASE_T_SR:
+ case CVMX_SFP_CABLE_5GBASE_T:
+ case CVMX_SFP_CABLE_2_5GBASE_T:
+ sfp_info->rate = CVMX_SFP_RATE_10G;
+ break;
+ default:
+ switch (sfp_info->eth_comp) {
+ case CVMX_SFP_CABLE_10GBASE_ER:
+ case CVMX_SFP_CABLE_10GBASE_LRM:
+ case CVMX_SFP_CABLE_10GBASE_LR:
+ case CVMX_SFP_CABLE_10GBASE_SR:
+ sfp_info->rate = CVMX_SFP_RATE_10G;
+ break;
+ default:
+ sfp_info->rate = CVMX_SFP_RATE_UNKNOWN;
+ break;
+ }
+ break;
+ }
+ }
+
+ if (buffer[0xc] < 0xff)
+ sfp_info->bitrate_max = buffer[0xc] * 100;
+ else
+ sfp_info->bitrate_max = buffer[0x42] * 250;
+
+ if ((buffer[8] & 0xc) == 8) {
+ if (buffer[0x3c] & 0x4)
+ sfp_info->limiting = true;
+ }
+
+ /* Currently we only set this for 25G. FEC is required for CA-S cables
+ * and for cable lengths >= 5M as of this writing.
+ */
+ if ((sfp_info->rate == CVMX_SFP_RATE_25G &&
+ sfp_info->copper_cable) &&
+ (sfp_info->cable_comp == CVMX_SFP_CABLE_25G_CR_CA_S ||
+ sfp_info->max_copper_cable_len >= 5))
+ sfp_info->fec_required = true;
+
+ /* copy strings and vendor info, strings will be automatically NUL
+ * terminated.
+ */
+ memcpy(sfp_info->vendor_name, &buffer[0x14], 16);
+ memcpy(sfp_info->vendor_oui, &buffer[0x25], 3);
+ memcpy(sfp_info->vendor_pn, &buffer[0x28], 16);
+ memcpy(sfp_info->vendor_rev, &buffer[0x38], 4);
+ memcpy(sfp_info->vendor_sn, &buffer[0x44], 16);
+ memcpy(sfp_info->date_code, &buffer[0x54], 8);
+
+ sfp_info->cooled_laser = !!(buffer[0x40] & 4);
+ sfp_info->internal_cdr = !!(buffer[0x40] & 8);
+
+ if (buffer[0x40] & 0x20)
+ sfp_info->power_level = 3;
+ else
+ sfp_info->power_level = (buffer[0x40] & 2) ? 2 : 1;
+
+ sfp_info->diag_paging = !!(buffer[0x40] & 0x10);
+ sfp_info->linear_rx_output = !(buffer[0x40] & 1);
+ sfp_info->los_implemented = !!(buffer[0x41] & 2);
+ sfp_info->los_inverted = !!(buffer[0x41] & 4);
+ sfp_info->tx_fault_implemented = !!(buffer[0x41] & 8);
+ sfp_info->tx_disable_implemented = !!(buffer[0x41] & 0x10);
+ sfp_info->rate_select_implemented = !!(buffer[0x41] & 0x20);
+ sfp_info->tuneable_transmitter = !!(buffer[0x41] & 0x40);
+ sfp_info->rx_decision_threshold_implemented = !!(buffer[0x41] & 0x80);
+
+ sfp_info->diag_monitoring = !!(buffer[0x5c] & 0x40);
+ sfp_info->diag_rx_power_averaged = !!(buffer[0x5c] & 0x8);
+ sfp_info->diag_externally_calibrated = !!(buffer[0x5c] & 0x10);
+ sfp_info->diag_internally_calibrated = !!(buffer[0x5c] & 0x20);
+ sfp_info->diag_addr_change_required = !!(buffer[0x5c] & 0x4);
+ sfp_info->diag_soft_rate_select_control = !!(buffer[0x5d] & 2);
+ sfp_info->diag_app_select_control = !!(buffer[0x5d] & 4);
+ sfp_info->diag_soft_rate_select_control = !!(buffer[0x5d] & 8);
+ sfp_info->diag_soft_rx_los_implemented = !!(buffer[0x5d] & 0x10);
+ sfp_info->diag_soft_tx_fault_implemented = !!(buffer[0x5d] & 0x20);
+ sfp_info->diag_soft_tx_disable_implemented = !!(buffer[0x5d] & 0x40);
+ sfp_info->diag_alarm_warning_flags_implemented =
+ !!(buffer[0x5d] & 0x80);
+ sfp_info->diag_rev = buffer[0x5e];
+
+ return 0;
+}
+
+static int cvmx_sfp_parse_qsfp_buffer(struct cvmx_sfp_mod_info *sfp_info,
+ const uint8_t *buffer)
+{
+ u8 csum = 0;
+ bool csum_good = false;
+ int i;
+
+ /* Validate the checksum */
+ for (i = 0x80; i < 0xbf; i++)
+ csum += buffer[i];
+ csum_good = csum == buffer[0xbf];
+ debug("%s: Lower checksum: 0x%02x, expected: 0x%02x\n", __func__, csum,
+ buffer[0xbf]);
+ csum = 0;
+ for (i = 0xc0; i < 0xdf; i++)
+ csum += buffer[i];
+ debug("%s: Upper checksum: 0x%02x, expected: 0x%02x\n", __func__, csum,
+ buffer[0xdf]);
+ if (csum != buffer[0xdf] || !csum_good) {
+ debug("Error: SFP EEPROM checksum information is incorrect\n");
+ return -1;
+ }
+
+ sfp_info->conn_type = buffer[0x80];
+ sfp_info->mod_type = buffer[0x82];
+ sfp_info->eth_comp = buffer[0x83] & 0xf0;
+ sfp_info->cable_comp = buffer[0xa4];
+
+ switch (sfp_info->mod_type) {
+ case CVMX_SFP_MOD_COPPER_PIGTAIL:
+ case CVMX_SFP_MOD_NO_SEP_CONN:
+ debug("%s: copper pigtail or no separable cable\n", __func__);
+ /* There are several ways a cable can be marked as active or
+ * passive. 8.[2-3] specify the SFP+ cable technology. Some
+ * modules also use 3.[0-1] for Infiniband, though it's
+ * redundant.
+ */
+ sfp_info->copper_cable = true;
+ if ((buffer[0x88] & 0x0C) == 0x08) {
+ sfp_info->limiting = true;
+ sfp_info->active_cable = true;
+ } else if ((buffer[0x88] & 0xC) == 0x4) {
+ sfp_info->limiting = false;
+ sfp_info->active_cable = false;
+ }
+ if ((buffer[0x83] & 3) == 2) {
+ sfp_info->active_cable = true;
+ sfp_info->limiting = true;
+ }
+ break;
+ case CVMX_SFP_MOD_RJ45:
+ debug("%s: RJ45 adapter\n", __func__);
+ sfp_info->copper_cable = true;
+ sfp_info->active_cable = true;
+ sfp_info->limiting = true;
+ break;
+ case CVMX_SFP_MOD_UNKNOWN:
+ debug("Unknown module type\n");
+ /* The Avago 1000Base-X to 1000Base-T module reports that it
+ * is an unknown module type but the Ethernet compliance code
+ * says it is 1000Base-T. We'll change the reporting to RJ45.
+ */
+ if (buffer[0x86] & 8) {
+ sfp_info->mod_type = CVMX_SFP_MOD_RJ45;
+ sfp_info->copper_cable = false;
+ sfp_info->limiting = true;
+ sfp_info->active_cable = true;
+ sfp_info->max_copper_cable_len = buffer[0x92];
+ sfp_info->rate = CVMX_SFP_RATE_1G;
+ }
+ fallthrough;
+ default:
+ sfp_info->limiting = true;
+ break;
+ }
+
+ if (sfp_info->copper_cable)
+ sfp_info->max_copper_cable_len = buffer[0x92];
+ else
+ sfp_info->max_50um_om4_cable_length = buffer[0x92] * 10;
+
+ debug("%s: copper cable: %d, max copper cable len: %d\n", __func__,
+ sfp_info->copper_cable, sfp_info->max_copper_cable_len);
+ if (buffer[0xe])
+ sfp_info->max_single_mode_cable_length = buffer[0x8e] * 1000;
+ else
+ sfp_info->max_single_mode_cable_length = buffer[0x8f] * 100000;
+
+ sfp_info->max_50um_om2_cable_length = buffer[0x90] * 10;
+ sfp_info->max_62_5um_om1_cable_length = buffer[0x91] * 10;
+ sfp_info->max_50um_om3_cable_length = buffer[0x93] * 10;
+
+ if (buffer[0x8c] == 12) {
+ sfp_info->rate = CVMX_SFP_RATE_1G;
+ } else if (buffer[0x8c] == 103) {
+ sfp_info->rate = CVMX_SFP_RATE_10G;
+ } else if (buffer[0x8c] == 0xff) {
+ if (buffer[0xc2] == 103)
+ sfp_info->rate = CVMX_SFP_RATE_100G;
+ }
+
+ if (buffer[0x8c] < 0xff)
+ sfp_info->bitrate_max = buffer[0x8c] * 100;
+ else
+ sfp_info->bitrate_max = buffer[0xc2] * 250;
+
+ if ((buffer[0x88] & 0xc) == 8) {
+ if (buffer[0xbc] & 0x4)
+ sfp_info->limiting = true;
+ }
+
+ /* Currently we only set this for 25G. FEC is required for CA-S cables
+ * and for cable lengths >= 5M as of this writing.
+ */
+ /* copy strings and vendor info, strings will be automatically NUL
+ * terminated.
+ */
+ memcpy(sfp_info->vendor_name, &buffer[0x94], 16);
+ memcpy(sfp_info->vendor_oui, &buffer[0xa5], 3);
+ memcpy(sfp_info->vendor_pn, &buffer[0xa8], 16);
+ memcpy(sfp_info->vendor_rev, &buffer[0xb8], 4);
+ memcpy(sfp_info->vendor_sn, &buffer[0xc4], 16);
+ memcpy(sfp_info->date_code, &buffer[0xd4], 8);
+
+ sfp_info->linear_rx_output = !!(buffer[0xc0] & 1);
+ sfp_info->cooled_laser = !!(buffer[0xc0] & 4);
+ sfp_info->internal_cdr = !!(buffer[0xc0] & 8);
+
+ if (buffer[0xc0] & 0x20)
+ sfp_info->power_level = 3;
+ else
+ sfp_info->power_level = (buffer[0xc0] & 2) ? 2 : 1;
+
+ sfp_info->diag_paging = !!(buffer[0xc0] & 0x10);
+ sfp_info->los_implemented = !!(buffer[0xc1] & 2);
+ sfp_info->los_inverted = !!(buffer[0xc1] & 4);
+ sfp_info->tx_fault_implemented = !!(buffer[0xc1] & 8);
+ sfp_info->tx_disable_implemented = !!(buffer[0xc1] & 0x10);
+ sfp_info->rate_select_implemented = !!(buffer[0xc1] & 0x20);
+ sfp_info->tuneable_transmitter = !!(buffer[0xc1] & 0x40);
+ sfp_info->rx_decision_threshold_implemented = !!(buffer[0xc1] & 0x80);
+
+ sfp_info->diag_monitoring = !!(buffer[0xdc] & 0x40);
+ sfp_info->diag_rx_power_averaged = !!(buffer[0xdc] & 0x8);
+ sfp_info->diag_externally_calibrated = !!(buffer[0xdc] & 0x10);
+ sfp_info->diag_internally_calibrated = !!(buffer[0xdc] & 0x20);
+ sfp_info->diag_addr_change_required = !!(buffer[0xdc] & 0x4);
+ sfp_info->diag_soft_rate_select_control = !!(buffer[0xdd] & 2);
+ sfp_info->diag_app_select_control = !!(buffer[0xdd] & 4);
+ sfp_info->diag_soft_rate_select_control = !!(buffer[0xdd] & 8);
+ sfp_info->diag_soft_rx_los_implemented = !!(buffer[0xdd] & 0x10);
+ sfp_info->diag_soft_tx_fault_implemented = !!(buffer[0xdd] & 0x20);
+ sfp_info->diag_soft_tx_disable_implemented = !!(buffer[0xdd] & 0x40);
+ sfp_info->diag_alarm_warning_flags_implemented =
+ !!(buffer[0xdd] & 0x80);
+ sfp_info->diag_rev = buffer[0xde];
+
+ return 0;
+}
+
+static bool sfp_verify_checksum(const uint8_t *buffer)
+{
+ u8 csum = 0;
+ u8 offset;
+ bool csum_good = false;
+ int i;
+
+ switch (buffer[0]) {
+ case CVMX_SFP_CONN_QSFP:
+ case CVMX_SFP_CONN_QSFPP:
+ case CVMX_SFP_CONN_QSFP28:
+ case CVMX_SFP_CONN_MICRO_QSFP:
+ case CVMX_SFP_CONN_QSFP_DD:
+ offset = 0x80;
+ break;
+ default:
+ offset = 0;
+ break;
+ }
+ for (i = offset; i < offset + 0x3f; i++)
+ csum += buffer[i];
+ csum_good = csum == buffer[offset + 0x3f];
+ if (!csum_good) {
+ debug("%s: Lower checksum bad, got 0x%x, expected 0x%x\n",
+ __func__, csum, buffer[offset + 0x3f]);
+ return false;
+ }
+ csum = 0;
+ for (i = offset + 0x40; i < offset + 0x5f; i++)
+ csum += buffer[i];
+ if (csum != buffer[offset + 0x5f]) {
+ debug("%s: Upper checksum bad, got 0x%x, expected 0x%x\n",
+ __func__, csum, buffer[offset + 0x5f]);
+ return false;
+ }
+ return true;
+}
+
+/**
+ * Reads and parses SFP/QSFP EEPROM
+ *
+ * @param sfp sfp handle to read
+ *
+ * @return 0 for success, -1 on error.
+ */
+int cvmx_sfp_read_i2c_eeprom(struct cvmx_fdt_sfp_info *sfp)
+{
+ const struct cvmx_fdt_i2c_bus_info *bus = sfp->i2c_bus;
+ int oct_bus = cvmx_fdt_i2c_get_root_bus(bus);
+ struct udevice *dev;
+ u8 buffer[256];
+ bool is_qsfp;
+ int retry;
+ int err;
+
+ if (!bus) {
+ debug("%s(%s): Error: i2c bus undefined for eeprom\n", __func__,
+ sfp->name);
+ return -1;
+ }
+
+ is_qsfp = (sfp->sfp_info.conn_type == CVMX_SFP_CONN_QSFP ||
+ sfp->sfp_info.conn_type == CVMX_SFP_CONN_QSFPP ||
+ sfp->sfp_info.conn_type == CVMX_SFP_CONN_QSFP28 ||
+ sfp->sfp_info.conn_type == CVMX_SFP_CONN_MICRO_QSFP) ||
+ sfp->is_qsfp;
+
+ err = cvmx_qsfp_select(sfp, true);
+ if (err) {
+ debug("%s: Error selecting SFP/QSFP slot\n", __func__);
+ return err;
+ }
+
+ debug("%s: Reading eeprom from i2c address %d:0x%x\n", __func__,
+ oct_bus, sfp->i2c_eeprom_addr);
+ for (retry = 0; retry < 3; retry++) {
+ err = i2c_get_chip(bus->i2c_bus, sfp->i2c_eeprom_addr, 1, &dev);
+ if (err) {
+ debug("Cannot find I2C device: %d\n", err);
+ goto error;
+ }
+
+ err = dm_i2c_read(dev, 0, buffer, 256);
+ if (err || !sfp_verify_checksum(buffer)) {
+ debug("%s: Error %d reading eeprom at 0x%x, bus %d\n",
+ __func__, err, sfp->i2c_eeprom_addr, oct_bus);
+ debug("%s: Retry %d\n", __func__, retry + 1);
+ mdelay(1000);
+ } else {
+ break;
+ }
+ }
+ if (err) {
+ debug("%s: Error reading eeprom from SFP %s\n", __func__,
+ sfp->name);
+ return -1;
+ }
+#ifdef DEBUG
+ print_buffer(0, buffer, 1, 256, 0);
+#endif
+ memset(&sfp->sfp_info, 0, sizeof(struct cvmx_sfp_mod_info));
+
+ switch (buffer[0]) {
+ case CVMX_SFP_CONN_SFP:
+ err = cvmx_sfp_parse_sfp_buffer(&sfp->sfp_info, buffer);
+ break;
+ case CVMX_SFP_CONN_QSFP:
+ case CVMX_SFP_CONN_QSFPP:
+ case CVMX_SFP_CONN_QSFP28:
+ case CVMX_SFP_CONN_MICRO_QSFP:
+ err = cvmx_sfp_parse_qsfp_buffer(&sfp->sfp_info, buffer);
+ break;
+ default:
+ debug("%s: Unknown SFP transceiver type 0x%x\n", __func__,
+ buffer[0]);
+ err = -1;
+ break;
+ }
+
+error:
+ if (is_qsfp)
+ err |= cvmx_qsfp_select(sfp, false);
+
+ if (!err) {
+ sfp->valid = true;
+ sfp->sfp_info.valid = true;
+ } else {
+ sfp->valid = false;
+ sfp->sfp_info.valid = false;
+ }
+
+ return err;
+}
+
+/**
+ * Returns the information about a SFP/QSFP device
+ *
+ * @param sfp sfp handle
+ *
+ * @return sfp_info Pointer sfp mod info data structure
+ */
+const struct cvmx_sfp_mod_info *
+cvmx_phy_get_sfp_mod_info(const struct cvmx_fdt_sfp_info *sfp)
+{
+ return (sfp) ? &sfp->sfp_info : NULL;
+}
+
+/**
+ * Function called to check and return the status of the mod_abs pin or
+ * mod_pres pin for QSFPs.
+ *
+ * @param sfp Handle to SFP information.
+ * @param data User-defined data passed to the function
+ *
+ * @return 0 if absent, 1 if present, -1 on error
+ */
+int cvmx_sfp_check_mod_abs(struct cvmx_fdt_sfp_info *sfp, void *data)
+{
+ int val;
+ int err = 0;
+ int mode;
+
+ if (!dm_gpio_is_valid(&sfp->mod_abs)) {
+ debug("%s: Error: mod_abs not set for %s\n", __func__,
+ sfp->name);
+ return -1;
+ }
+ val = dm_gpio_get_value(&sfp->mod_abs);
+ debug("%s(%s, %p) mod_abs: %d\n", __func__, sfp->name, data, val);
+ if (val >= 0 && val != sfp->last_mod_abs && sfp->mod_abs_changed) {
+ err = 0;
+ if (!val) {
+ err = cvmx_sfp_read_i2c_eeprom(sfp);
+ if (err)
+ debug("%s: Error reading SFP %s EEPROM\n",
+ __func__, sfp->name);
+ }
+ err = sfp->mod_abs_changed(sfp, val, sfp->mod_abs_changed_data);
+ }
+ debug("%s(%s (%p)): Last mod_abs: %d, current: %d, changed: %p, rc: %d, next: %p, caller: %p\n",
+ __func__, sfp->name, sfp, sfp->last_mod_abs, val,
+ sfp->mod_abs_changed, err, sfp->next_iface_sfp,
+ __builtin_return_address(0));
+
+ if (err >= 0) {
+ sfp->last_mod_abs = val;
+ mode = cvmx_helper_interface_get_mode(sfp->xiface);
+ cvmx_sfp_validate_module(sfp, mode);
+ } else {
+ debug("%s: mod_abs_changed for %s returned error\n", __func__,
+ sfp->name);
+ }
+
+ return err < 0 ? err : val;
+}
+
+/**
+ * Reads the EEPROMs of all SFP modules.
+ *
+ * @return 0 for success
+ */
+int cvmx_sfp_read_all_modules(void)
+{
+ struct cvmx_fdt_sfp_info *sfp;
+ int val;
+ bool error = false;
+ int rc;
+
+ for (sfp = sfp_list; sfp; sfp = sfp->next) {
+ if (dm_gpio_is_valid(&sfp->mod_abs)) {
+ /* Check if module absent */
+ val = dm_gpio_get_value(&sfp->mod_abs);
+ sfp->last_mod_abs = val;
+ if (val)
+ continue;
+ }
+ rc = cvmx_sfp_read_i2c_eeprom(sfp);
+ if (rc) {
+ debug("%s: Error reading eeprom from SFP %s\n",
+ __func__, sfp->name);
+ error = true;
+ }
+ }
+
+ return error ? -1 : 0;
+}
+
+/**
+ * Registers a function to be called to check mod_abs/mod_pres for a SFP/QSFP
+ * slot.
+ *
+ * @param sfp Handle to SFP data structure
+ * @param check_mod_abs Function to be called or NULL to remove
+ * @param mod_abs_data User-defined data to be passed to check_mod_abs
+ *
+ * @return 0 for success
+ */
+int cvmx_sfp_register_check_mod_abs(struct cvmx_fdt_sfp_info *sfp,
+ int (*check_mod_abs)(struct cvmx_fdt_sfp_info *sfp,
+ void *data),
+ void *mod_abs_data)
+{
+ struct cvmx_fdt_sfp_info *nsfp = sfp; /** For walking list */
+
+ do {
+ if (nsfp->xiface == sfp->xiface && nsfp->index == sfp->index) {
+ nsfp->check_mod_abs = check_mod_abs;
+ nsfp->mod_abs_data = mod_abs_data;
+ }
+ nsfp = nsfp->next;
+ } while (nsfp);
+
+ return 0;
+}
+
+/**
+ * Registers a function to be called whenever the mod_abs/mod_pres signal
+ * changes.
+ *
+ * @param sfp Handle to SFP data structure
+ * @param mod_abs_changed Function called whenever mod_abs is changed
+ * or NULL to remove.
+ * @param mod_abs_changed_data User-defined data passed to
+ * mod_abs_changed
+ *
+ * @return 0 for success
+ *
+ * @NOTE: If multiple SFP slots are linked together, all subsequent slots
+ * will also be registered for the same handler.
+ */
+int cvmx_sfp_register_mod_abs_changed(struct cvmx_fdt_sfp_info *sfp,
+ int (*mod_abs_changed)(struct cvmx_fdt_sfp_info *sfp,
+ int val, void *data),
+ void *mod_abs_changed_data)
+{
+ sfp->mod_abs_changed = mod_abs_changed;
+ sfp->mod_abs_changed_data = mod_abs_changed_data;
+
+ sfp->last_mod_abs = -2; /* undefined */
+
+ return 0;
+}
+
+/**
+ * Function called to check and return the status of the tx_fault pin
+ *
+ * @param sfp Handle to SFP information.
+ * @param data User-defined data passed to the function
+ *
+ * @return 0 if signal present, 1 if signal absent, -1 on error
+ */
+int cvmx_sfp_check_tx_fault(struct cvmx_fdt_sfp_info *sfp, void *data)
+{
+ int val;
+
+ debug("%s(%s, %p)\n", __func__, sfp->name, data);
+ if (!dm_gpio_is_valid(&sfp->tx_error)) {
+ printf("%s: Error: tx_error not set for %s\n", __func__,
+ sfp->name);
+ return -1;
+ }
+ val = dm_gpio_get_value(&sfp->tx_error);
+ debug("%s: tx_fault: %d\n", __func__, val);
+
+ return val;
+}
+
+/**
+ * Function called to check and return the status of the rx_los pin
+ *
+ * @param sfp Handle to SFP information.
+ * @param data User-defined data passed to the function
+ *
+ * @return 0 if signal present, 1 if signal absent, -1 on error
+ */
+int cvmx_sfp_check_rx_los(struct cvmx_fdt_sfp_info *sfp, void *data)
+{
+ int val;
+ int err;
+
+ debug("%s(%s, %p)\n", __func__, sfp->name, data);
+ if (!dm_gpio_is_valid(&sfp->rx_los)) {
+ printf("%s: Error: rx_los not set for %s\n", __func__,
+ sfp->name);
+ return -1;
+ }
+ val = dm_gpio_get_value(&sfp->rx_los);
+ if (val >= 0 && val != sfp->last_rx_los && sfp->rx_los_changed)
+ err = sfp->rx_los_changed(sfp, val, sfp->rx_los_changed_data);
+ debug("%s: Last rx_los: %d, current: %d, changed: %p, rc: %d\n",
+ __func__, sfp->last_rx_los, val, sfp->rx_los_changed, err);
+ sfp->last_rx_los = val;
+
+ return val;
+}
+
+/**
+ * Registers a function to be called whenever rx_los changes
+ *
+ * @param sfp Handle to SFP data structure
+ * @param rx_los_changed Function to be called when rx_los changes
+ * or NULL to remove the function
+ * @param rx_los_changed_data User-defined data passed to
+ * rx_los_changed
+ *
+ * @return 0 for success
+ */
+int cvmx_sfp_register_rx_los_changed(struct cvmx_fdt_sfp_info *sfp,
+ int (*rx_los_changed)(struct cvmx_fdt_sfp_info *sfp,
+ int val, void *data),
+ void *rx_los_changed_data)
+{
+ sfp->rx_los_changed = rx_los_changed;
+ sfp->rx_los_changed_data = rx_los_changed_data;
+ sfp->last_rx_los = -2;
+
+ return 0;
+}
+
+/**
+ * Parses a SFP slot from the device tree
+ *
+ * @param sfp SFP handle to store data in
+ * @param fdt_addr Address of flat device tree
+ * @param of_offset Node in device tree for SFP slot
+ *
+ * @return 0 on success, -1 on error
+ */
+static int cvmx_sfp_parse_sfp(struct cvmx_fdt_sfp_info *sfp, ofnode node)
+{
+ struct ofnode_phandle_args phandle;
+ int err;
+
+ sfp->name = ofnode_get_name(node);
+ sfp->of_offset = ofnode_to_offset(node);
+
+ err = gpio_request_by_name_nodev(node, "tx_disable", 0,
+ &sfp->tx_disable, GPIOD_IS_OUT);
+ if (err) {
+ printf("%s: tx_disable not found in DT!\n", __func__);
+ return -ENODEV;
+ }
+ dm_gpio_set_value(&sfp->tx_disable, 0);
+
+ err = gpio_request_by_name_nodev(node, "mod_abs", 0,
+ &sfp->mod_abs, GPIOD_IS_IN);
+ if (err) {
+ printf("%s: mod_abs not found in DT!\n", __func__);
+ return -ENODEV;
+ }
+
+ err = gpio_request_by_name_nodev(node, "tx_error", 0,
+ &sfp->tx_error, GPIOD_IS_IN);
+ if (err) {
+ printf("%s: tx_error not found in DT!\n", __func__);
+ return -ENODEV;
+ }
+
+ err = gpio_request_by_name_nodev(node, "rx_los", 0,
+ &sfp->rx_los, GPIOD_IS_IN);
+ if (err) {
+ printf("%s: rx_los not found in DT!\n", __func__);
+ return -ENODEV;
+ }
+
+ err = ofnode_parse_phandle_with_args(node, "eeprom", NULL, 0, 0,
+ &phandle);
+ if (!err) {
+ sfp->i2c_eeprom_addr = ofnode_get_addr(phandle.node);
+ debug("%s: eeprom address: 0x%x\n", __func__,
+ sfp->i2c_eeprom_addr);
+
+ debug("%s: Getting eeprom i2c bus for %s\n", __func__,
+ sfp->name);
+ sfp->i2c_bus = cvmx_ofnode_get_i2c_bus(ofnode_get_parent(phandle.node));
+ }
+
+ err = ofnode_parse_phandle_with_args(node, "diag", NULL, 0, 0,
+ &phandle);
+ if (!err) {
+ sfp->i2c_diag_addr = ofnode_get_addr(phandle.node);
+ if (!sfp->i2c_bus)
+ sfp->i2c_bus = cvmx_ofnode_get_i2c_bus(ofnode_get_parent(phandle.node));
+ }
+
+ sfp->last_mod_abs = -2;
+ sfp->last_rx_los = -2;
+
+ if (!sfp->i2c_bus) {
+ debug("%s(%s): Error: could not get i2c bus from device tree\n",
+ __func__, sfp->name);
+ err = -1;
+ }
+
+ if (err) {
+ dm_gpio_free(sfp->tx_disable.dev, &sfp->tx_disable);
+ dm_gpio_free(sfp->mod_abs.dev, &sfp->mod_abs);
+ dm_gpio_free(sfp->tx_error.dev, &sfp->tx_error);
+ dm_gpio_free(sfp->rx_los.dev, &sfp->rx_los);
+ } else {
+ sfp->valid = true;
+ }
+
+ return err;
+}
+
+/**
+ * Parses a QSFP slot from the device tree
+ *
+ * @param sfp SFP handle to store data in
+ * @param fdt_addr Address of flat device tree
+ * @param of_offset Node in device tree for SFP slot
+ *
+ * @return 0 on success, -1 on error
+ */
+static int cvmx_sfp_parse_qsfp(struct cvmx_fdt_sfp_info *sfp, ofnode node)
+{
+ struct ofnode_phandle_args phandle;
+ int err;
+
+ sfp->is_qsfp = true;
+ sfp->name = ofnode_get_name(node);
+ sfp->of_offset = ofnode_to_offset(node);
+
+ err = gpio_request_by_name_nodev(node, "lp_mode", 0,
+ &sfp->lp_mode, GPIOD_IS_OUT);
+ if (err) {
+ printf("%s: lp_mode not found in DT!\n", __func__);
+ return -ENODEV;
+ }
+
+ err = gpio_request_by_name_nodev(node, "mod_prs", 0,
+ &sfp->mod_abs, GPIOD_IS_IN);
+ if (err) {
+ printf("%s: mod_prs not found in DT!\n", __func__);
+ return -ENODEV;
+ }
+
+ err = gpio_request_by_name_nodev(node, "select", 0,
+ &sfp->select, GPIOD_IS_IN);
+ if (err) {
+ printf("%s: select not found in DT!\n", __func__);
+ return -ENODEV;
+ }
+
+ err = gpio_request_by_name_nodev(node, "reset", 0,
+ &sfp->reset, GPIOD_IS_OUT);
+ if (err) {
+ printf("%s: reset not found in DT!\n", __func__);
+ return -ENODEV;
+ }
+
+ err = gpio_request_by_name_nodev(node, "interrupt", 0,
+ &sfp->interrupt, GPIOD_IS_IN);
+ if (err) {
+ printf("%s: interrupt not found in DT!\n", __func__);
+ return -ENODEV;
+ }
+
+ err = ofnode_parse_phandle_with_args(node, "eeprom", NULL, 0, 0,
+ &phandle);
+ if (!err) {
+ sfp->i2c_eeprom_addr = ofnode_get_addr(phandle.node);
+ sfp->i2c_bus = cvmx_ofnode_get_i2c_bus(ofnode_get_parent(phandle.node));
+ }
+
+ err = ofnode_parse_phandle_with_args(node, "diag", NULL, 0, 0,
+ &phandle);
+ if (!err) {
+ sfp->i2c_diag_addr = ofnode_get_addr(phandle.node);
+ if (!sfp->i2c_bus)
+ sfp->i2c_bus = cvmx_ofnode_get_i2c_bus(ofnode_get_parent(phandle.node));
+ }
+
+ sfp->last_mod_abs = -2;
+ sfp->last_rx_los = -2;
+
+ if (!sfp->i2c_bus) {
+ cvmx_printf("%s(%s): Error: could not get i2c bus from device tree\n",
+ __func__, sfp->name);
+ err = -1;
+ }
+
+ if (err) {
+ dm_gpio_free(sfp->lp_mode.dev, &sfp->lp_mode);
+ dm_gpio_free(sfp->mod_abs.dev, &sfp->mod_abs);
+ dm_gpio_free(sfp->select.dev, &sfp->select);
+ dm_gpio_free(sfp->reset.dev, &sfp->reset);
+ dm_gpio_free(sfp->interrupt.dev, &sfp->interrupt);
+ } else {
+ sfp->valid = true;
+ }
+
+ return err;
+}
+
+/**
+ * Parses the device tree for SFP and QSFP slots
+ *
+ * @param fdt_addr Address of flat device-tree
+ *
+ * @return 0 for success, -1 on error
+ */
+int cvmx_sfp_parse_device_tree(const void *fdt_addr)
+{
+ struct cvmx_fdt_sfp_info *sfp, *first_sfp = NULL, *last_sfp = NULL;
+ ofnode node;
+ int err = 0;
+ int reg;
+ static bool parsed;
+
+ debug("%s(%p): Parsing...\n", __func__, fdt_addr);
+ if (parsed) {
+ debug("%s(%p): Already parsed\n", __func__, fdt_addr);
+ return 0;
+ }
+
+ ofnode_for_each_compatible_node(node, "ethernet,sfp-slot") {
+ if (!ofnode_valid(node))
+ continue;
+
+ sfp = cvm_sfp_alloc(sizeof(*sfp));
+ if (!sfp)
+ return -1;
+
+ err = cvmx_sfp_parse_sfp(sfp, node);
+ if (!err) {
+ if (!sfp_list)
+ sfp_list = sfp;
+ if (last_sfp)
+ last_sfp->next = sfp;
+ sfp->prev = last_sfp;
+ last_sfp = sfp;
+ debug("%s: parsed %s\n", __func__, sfp->name);
+ } else {
+ debug("%s: Error parsing SFP at node %s\n",
+ __func__, ofnode_get_name(node));
+ return err;
+ }
+ }
+
+ ofnode_for_each_compatible_node(node, "ethernet,qsfp-slot") {
+ if (!ofnode_valid(node))
+ continue;
+
+ sfp = cvm_sfp_alloc(sizeof(*sfp));
+ if (!sfp)
+ return -1;
+
+ err = cvmx_sfp_parse_qsfp(sfp, node);
+ if (!err) {
+ if (!sfp_list)
+ sfp_list = sfp;
+ if (last_sfp)
+ last_sfp->next = sfp;
+ sfp->prev = last_sfp;
+ last_sfp = sfp;
+ debug("%s: parsed %s\n", __func__, sfp->name);
+ } else {
+ debug("%s: Error parsing QSFP at node %s\n",
+ __func__, ofnode_get_name(node));
+ return err;
+ }
+ }
+
+ if (!octeon_has_feature(OCTEON_FEATURE_BGX))
+ return 0;
+
+ err = 0;
+ ofnode_for_each_compatible_node(node, "cavium,octeon-7890-bgx-port") {
+ int sfp_nodes[4];
+ ofnode sfp_ofnodes[4];
+ int num_sfp_nodes;
+ u64 reg_addr;
+ struct cvmx_xiface xi;
+ int xiface, index;
+ cvmx_helper_interface_mode_t mode;
+ int i;
+ int rc;
+
+ if (!ofnode_valid(node))
+ break;
+
+ num_sfp_nodes = ARRAY_SIZE(sfp_nodes);
+ rc = cvmx_ofnode_lookup_phandles(node, "sfp-slot",
+ &num_sfp_nodes, sfp_ofnodes);
+ if (rc != 0 || num_sfp_nodes < 1)
+ rc = cvmx_ofnode_lookup_phandles(node, "qsfp-slot",
+ &num_sfp_nodes,
+ sfp_ofnodes);
+ /* If no SFP or QSFP slot found, go to next port */
+ if (rc < 0)
+ continue;
+
+ last_sfp = NULL;
+ for (i = 0; i < num_sfp_nodes; i++) {
+ sfp = cvmx_sfp_find_slot_by_fdt_node(ofnode_to_offset(sfp_ofnodes[i]));
+ debug("%s: Adding sfp %s (%p) to BGX port\n",
+ __func__, sfp->name, sfp);
+ if (last_sfp)
+ last_sfp->next_iface_sfp = sfp;
+ else
+ first_sfp = sfp;
+ last_sfp = sfp;
+ }
+ if (!first_sfp) {
+ debug("%s: Error: could not find SFP slot for BGX port %s\n",
+ __func__,
+ fdt_get_name(fdt_addr, sfp_nodes[0],
+ NULL));
+ err = -1;
+ break;
+ }
+
+ /* Get the port index */
+ reg = ofnode_get_addr(node);
+ if (reg < 0) {
+ debug("%s: Error: could not get BGX port reg value\n",
+ __func__);
+ err = -1;
+ break;
+ }
+ index = reg;
+
+ /* Get BGX node and address */
+ reg_addr = ofnode_get_addr(ofnode_get_parent(node));
+ /* Extrace node */
+ xi.node = cvmx_csr_addr_to_node(reg_addr);
+ /* Extract reg address */
+ reg_addr = cvmx_csr_addr_strip_node(reg_addr);
+ if ((reg_addr & 0xFFFFFFFFF0000000) !=
+ 0x00011800E0000000) {
+ debug("%s: Invalid BGX address 0x%llx\n",
+ __func__, (unsigned long long)reg_addr);
+ xi.node = -1;
+ err = -1;
+ break;
+ }
+
+ /* Extract interface from address */
+ xi.interface = (reg_addr >> 24) & 0x0F;
+ /* Convert to xiface */
+ xiface = cvmx_helper_node_interface_to_xiface(xi.node,
+ xi.interface);
+ debug("%s: Parsed %d SFP slots for interface 0x%x, index %d\n",
+ __func__, num_sfp_nodes, xiface, index);
+
+ mode = cvmx_helper_interface_get_mode(xiface);
+ for (sfp = first_sfp; sfp; sfp = sfp->next_iface_sfp) {
+ sfp->xiface = xiface;
+ sfp->index = index;
+ /* Convert to IPD port */
+ sfp->ipd_port[0] =
+ cvmx_helper_get_ipd_port(xiface, index);
+ debug("%s: sfp %s (%p) xi: 0x%x, index: 0x%x, node: %d, mode: 0x%x, next: %p\n",
+ __func__, sfp->name, sfp, sfp->xiface,
+ sfp->index, xi.node, mode,
+ sfp->next_iface_sfp);
+ if (mode == CVMX_HELPER_INTERFACE_MODE_XLAUI ||
+ mode == CVMX_HELPER_INTERFACE_MODE_40G_KR4)
+ for (i = 1; i < 4; i++)
+ sfp->ipd_port[i] = -1;
+ else
+ for (i = 1; i < 4; i++)
+ sfp->ipd_port[i] =
+ cvmx_helper_get_ipd_port(
+ xiface, i);
+ }
+ cvmx_helper_cfg_set_sfp_info(xiface, index, first_sfp);
+ }
+
+ if (!err) {
+ parsed = true;
+ cvmx_sfp_read_all_modules();
+ }
+
+ return err;
+}
+
+/**
+ * Given an IPD port number find the corresponding SFP or QSFP slot
+ *
+ * @param ipd_port IPD port number to search for
+ *
+ * @return pointer to SFP data structure or NULL if not found
+ */
+struct cvmx_fdt_sfp_info *cvmx_sfp_find_slot_by_port(int ipd_port)
+{
+ struct cvmx_fdt_sfp_info *sfp = sfp_list;
+ int i;
+
+ while (sfp) {
+ for (i = 0; i < 4; i++)
+ if (sfp->ipd_port[i] == ipd_port)
+ return sfp;
+ sfp = sfp->next;
+ }
+ return NULL;
+}
+
+/**
+ * Given a fdt node offset find the corresponding SFP or QSFP slot
+ *
+ * @param of_offset flat device tree node offset
+ *
+ * @return pointer to SFP data structure or NULL if not found
+ */
+struct cvmx_fdt_sfp_info *cvmx_sfp_find_slot_by_fdt_node(int of_offset)
+{
+ struct cvmx_fdt_sfp_info *sfp = sfp_list;
+
+ while (sfp) {
+ if (sfp->of_offset == of_offset)
+ return sfp;
+ sfp = sfp->next;
+ }
+ return NULL;
+}
+
+static bool cvmx_sfp_validate_quad(struct cvmx_fdt_sfp_info *sfp,
+ struct cvmx_phy_gpio_leds *leds)
+{
+ bool multi_led = leds && (leds->next);
+ bool error = false;
+ int mod_abs;
+
+ do {
+ /* Skip missing modules */
+ if (dm_gpio_is_valid(&sfp->mod_abs))
+ mod_abs = dm_gpio_get_value(&sfp->mod_abs);
+ else
+ mod_abs = 0;
+ if (!mod_abs) {
+ if (cvmx_sfp_read_i2c_eeprom(sfp)) {
+ debug("%s: Error reading eeprom for %s\n",
+ __func__, sfp->name);
+ }
+ if (sfp->sfp_info.rate < CVMX_SFP_RATE_10G) {
+ cvmx_helper_leds_show_error(leds, true);
+ error = true;
+ } else if (sfp->sfp_info.rate >= CVMX_SFP_RATE_10G) {
+ /* We don't support 10GBase-T modules in
+ * this mode.
+ */
+ switch (sfp->sfp_info.cable_comp) {
+ case CVMX_SFP_CABLE_10GBASE_T:
+ case CVMX_SFP_CABLE_10GBASE_T_SR:
+ case CVMX_SFP_CABLE_5GBASE_T:
+ case CVMX_SFP_CABLE_2_5GBASE_T:
+ cvmx_helper_leds_show_error(leds, true);
+ error = true;
+ break;
+ default:
+ break;
+ }
+ }
+ } else if (multi_led) {
+ cvmx_helper_leds_show_error(leds, false);
+ }
+
+ if (multi_led && leds->next)
+ leds = leds->next;
+ sfp = sfp->next_iface_sfp;
+ } while (sfp);
+
+ if (!multi_led)
+ cvmx_helper_leds_show_error(leds, error);
+
+ return error;
+}
+
+/**
+ * Validates if the module is correct for the specified port
+ *
+ * @param[in] sfp SFP port to check
+ * @param xiface interface
+ * @param index port index
+ * @param speed link speed, -1 if unknown
+ * @param mode interface mode
+ *
+ * @return true if module is valid, false if invalid
+ * NOTE: This will also toggle the error LED, if present
+ */
+bool cvmx_sfp_validate_module(struct cvmx_fdt_sfp_info *sfp, int mode)
+{
+ const struct cvmx_sfp_mod_info *mod_info = &sfp->sfp_info;
+ int xiface = sfp->xiface;
+ int index = sfp->index;
+ struct cvmx_phy_gpio_leds *leds;
+ bool error = false;
+ bool quad_mode = false;
+
+ debug("%s(%s, 0x%x, 0x%x, 0x%x)\n", __func__, sfp->name, xiface, index,
+ mode);
+ if (!sfp) {
+ debug("%s: Error: sfp is NULL\n", __func__);
+ return false;
+ }
+ /* No module is valid */
+ leds = cvmx_helper_get_port_phy_leds(xiface, index);
+ if (!leds)
+ debug("%s: No leds for 0x%x:0x%x\n", __func__, xiface, index);
+
+ if (mode != CVMX_HELPER_INTERFACE_MODE_XLAUI &&
+ mode != CVMX_HELPER_INTERFACE_MODE_40G_KR4 && !sfp->is_qsfp &&
+ sfp->last_mod_abs && leds) {
+ cvmx_helper_leds_show_error(leds, false);
+ debug("%s: %s: last_mod_abs: %d, no error\n", __func__,
+ sfp->name, sfp->last_mod_abs);
+ return true;
+ }
+
+ switch (mode) {
+ case CVMX_HELPER_INTERFACE_MODE_RGMII:
+ case CVMX_HELPER_INTERFACE_MODE_GMII:
+ case CVMX_HELPER_INTERFACE_MODE_SGMII:
+ case CVMX_HELPER_INTERFACE_MODE_QSGMII:
+ case CVMX_HELPER_INTERFACE_MODE_AGL:
+ case CVMX_HELPER_INTERFACE_MODE_SPI:
+ if ((mod_info->active_cable &&
+ mod_info->rate != CVMX_SFP_RATE_1G) ||
+ mod_info->rate < CVMX_SFP_RATE_1G)
+ error = true;
+ break;
+ case CVMX_HELPER_INTERFACE_MODE_RXAUI:
+ case CVMX_HELPER_INTERFACE_MODE_XAUI:
+ case CVMX_HELPER_INTERFACE_MODE_10G_KR:
+ case CVMX_HELPER_INTERFACE_MODE_XFI:
+ if ((mod_info->active_cable &&
+ mod_info->rate != CVMX_SFP_RATE_10G) ||
+ mod_info->rate < CVMX_SFP_RATE_10G)
+ error = true;
+ break;
+ case CVMX_HELPER_INTERFACE_MODE_XLAUI:
+ case CVMX_HELPER_INTERFACE_MODE_40G_KR4:
+ if (!sfp->is_qsfp) {
+ quad_mode = true;
+ error = cvmx_sfp_validate_quad(sfp, leds);
+ } else {
+ if ((mod_info->active_cable &&
+ mod_info->rate != CVMX_SFP_RATE_40G) ||
+ mod_info->rate < CVMX_SFP_RATE_25G)
+ error = true;
+ }
+ break;
+ default:
+ debug("%s: Unsupported interface mode %d on xiface 0x%x\n",
+ __func__, mode, xiface);
+ return false;
+ }
+ debug("%s: %s: error: %d\n", __func__, sfp->name, error);
+ if (leds && !quad_mode)
+ cvmx_helper_leds_show_error(leds, error);
+
+ return !error;
+}
+
+/**
+ * Prints information about the SFP module
+ *
+ * @param[in] sfp sfp data structure
+ */
+void cvmx_sfp_print_info(const struct cvmx_fdt_sfp_info *sfp)
+{
+ const struct cvmx_sfp_mod_info *mi = &sfp->sfp_info;
+ const char *conn_type;
+ const char *mod_type;
+ const char *rate_str;
+ const char *cable_comp;
+
+ if (!sfp) {
+ printf("Invalid SFP (NULL)\n");
+ return;
+ }
+
+ /* Please refer to the SFF-8024 and SFF-8472 documents */
+ switch (mi->conn_type) {
+ case CVMX_SFP_CONN_GBIC:
+ conn_type = "GBIC";
+ break;
+ case CVMX_SFP_CONN_SFP:
+ conn_type = "SFP/SFP+/SFP28";
+ break;
+ case CVMX_SFP_CONN_QSFP:
+ conn_type = "QSFP";
+ break;
+ case CVMX_SFP_CONN_QSFPP:
+ conn_type = "QSFP+";
+ break;
+ case CVMX_SFP_CONN_QSFP28:
+ conn_type = "QSFP28";
+ break;
+ case CVMX_SFP_CONN_MICRO_QSFP:
+ conn_type = "Micro QSFP";
+ break;
+ case CVMX_SFP_CONN_QSFP_DD:
+ conn_type = "QSFP-DD";
+ break;
+ case CVMX_SFP_CONN_SFP_DD:
+ conn_type = "SFP-DD";
+ break;
+ default:
+ conn_type = "Unknown";
+ break;
+ }
+
+ switch (mi->mod_type) {
+ case CVMX_SFP_MOD_UNKNOWN:
+ mod_type = "Unknown";
+ break;
+ case CVMX_SFP_MOD_OPTICAL_LC:
+ mod_type = "Optical LC";
+ break;
+ case CVMX_SFP_MOD_MULTIPLE_OPTICAL:
+ mod_type = "Multiple Optical";
+ break;
+ case CVMX_SFP_MOD_OPTICAL_PIGTAIL:
+ mod_type = "Optical Pigtail";
+ break;
+ case CVMX_SFP_MOD_COPPER_PIGTAIL:
+ mod_type = "Copper Pigtail";
+ break;
+ case CVMX_SFP_MOD_RJ45:
+ mod_type = "Copper RJ45";
+ break;
+ case CVMX_SFP_MOD_NO_SEP_CONN:
+ mod_type = "No Separable Connector";
+ break;
+ case CVMX_SFP_MOD_MXC_2X16:
+ mod_type = "MXC 2X16";
+ break;
+ case CVMX_SFP_MOD_CS_OPTICAL:
+ mod_type = "CS Optical";
+ break;
+ case CVMX_SFP_MOD_MINI_CS_OPTICAL:
+ mod_type = "Mini CS Optical";
+ break;
+ case CVMX_SFP_MOD_OTHER:
+ mod_type = "Unknown/Other";
+ break;
+ default:
+ mod_type = "Undefined";
+ break;
+ }
+
+ switch (mi->cable_comp) {
+ case CVMX_SFP_CABLE_UNSPEC:
+ cable_comp = "";
+ break;
+ case CVMX_SFP_CABLE_100G_25GAUI_C2M_AOC_HIGH_BER:
+ if (mi->rate == CVMX_SFP_RATE_25G)
+ cable_comp = " 25GAUI-C2M AOC HIGH BER";
+ else
+ cable_comp = " 100G AOC HIGH BER";
+ break;
+ case CVMX_SFP_CABLE_100G_SR4_25G_SR:
+ if (mi->rate == CVMX_SFP_RATE_25G)
+ cable_comp = " 25GBASE-SR";
+ else
+ cable_comp = " 100GBASE-SR4";
+ break;
+ case CVMX_SFP_CABLE_100G_LR4_25G_LR:
+ if (mi->rate == CVMX_SFP_RATE_25G)
+ cable_comp = " 25GBASE-LR";
+ else
+ cable_comp = " 100GBASE-LR4";
+ break;
+ case CVMX_SFP_CABLE_100G_ER4_25G_ER:
+ if (mi->rate == CVMX_SFP_RATE_25G)
+ cable_comp = " 25GBASE-ER";
+ else
+ cable_comp = " 100GBASE-ER4";
+ break;
+ case CVMX_SFP_CABLE_100G_SR10:
+ cable_comp = " 100GBASE-SR10";
+ break;
+ case CVMX_SFP_CABLE_100G_CWDM4_MSA:
+ cable_comp = " 100G CWDM4";
+ break;
+ case CVMX_SFP_CABLE_100G_PSM4:
+ cable_comp = " 100G PSM4 Parallel SMF";
+ break;
+ case CVMX_SFP_CABLE_100G_25GAUI_C2M_ACC_HIGH_BER:
+ if (mi->rate == CVMX_SFP_RATE_25G)
+ cable_comp = " 25GAUI C2M ACC";
+ else
+ cable_comp = " 100G ACC";
+ break;
+ case CVMX_SFP_CABLE_100G_CWDM4:
+ cable_comp = " 100G CWDM4 MSA";
+ break;
+ case CVMX_SFP_CABLE_100G_CR4_25G_CR_CA_L:
+ if (mi->rate == CVMX_SFP_RATE_25G)
+ cable_comp = " 25GBASE-CR CA-L";
+ else
+ cable_comp = " 100GBASE-CR4";
+ break;
+ case CVMX_SFP_CABLE_25G_CR_CA_S:
+ cable_comp = " 25GBase-CR CA-S";
+ break;
+ case CVMX_SFP_CABLE_25G_CR_CA_N:
+ cable_comp = " 25GBase-CR CA-N";
+ break;
+ case CVMX_SFP_CABLE_40G_ER4:
+ cable_comp = " 40GBASE-ER4";
+ break;
+ case CVMX_SFP_CABLE_4X10G_SR:
+ cable_comp = " 4 X 10GBASE-T SR";
+ break;
+ case CVMX_SFP_CABLE_40G_PSM4:
+ cable_comp = " 40G PSM4 Parallel SMF";
+ break;
+ case CVMX_SFP_CABLE_G959_1_P1I1_2D1:
+ cable_comp = " G959.1 profile P1I1-2D1";
+ break;
+ case CVMX_SFP_CABLE_G959_1_P1S1_2D2:
+ cable_comp = " G959.1 profile P1S1-2D2";
+ break;
+ case CVMX_SFP_CABLE_G959_1_P1L1_2D2:
+ cable_comp = " G959.1 profile P1L1-2D2";
+ break;
+ case CVMX_SFP_CABLE_10GBASE_T:
+ cable_comp = " 10GBase-T";
+ break;
+ case CVMX_SFP_CABLE_100G_CLR4:
+ cable_comp = " 10G CLR4";
+ break;
+ case CVMX_SFP_CABLE_100G_25GAUI_C2M_AOC_LOW_BER:
+ if (mi->rate == CVMX_SFP_RATE_25G)
+ cable_comp = " 25GAUI C2M AOC LOW BER";
+ else
+ cable_comp = " 100G AOC LOW BER";
+ break;
+ case CVMX_SFP_CABLE_100G_25GAUI_C2M_ACC_LOW_BER:
+ if (mi->rate == CVMX_SFP_RATE_25G)
+ cable_comp = " 25GAUI C2M ACC LOW BER";
+ else
+ cable_comp = " 100G ACC LOW BER";
+ break;
+ case CVMX_SFP_CABLE_100G_2_LAMBDA_DWDM:
+ cable_comp = " 100GE-DWDM2";
+ break;
+ case CVMX_SFP_CABLE_100G_1550NM_WDM:
+ cable_comp = " 100GE 1550nm WDM";
+ break;
+ case CVMX_SFP_CABLE_10GBASE_T_SR:
+ cable_comp = " 10GBase-T short reach (30 meters)";
+ break;
+ case CVMX_SFP_CABLE_5GBASE_T:
+ cable_comp = " 5GBase-T";
+ break;
+ case CVMX_SFP_CABLE_2_5GBASE_T:
+ cable_comp = " 2.5GBase-T";
+ break;
+ case CVMX_SFP_CABLE_40G_SWDM4:
+ cable_comp = " 40G SWDM4";
+ break;
+ case CVMX_SFP_CABLE_100G_SWDM4:
+ cable_comp = " 100G SWDM4";
+ break;
+ case CVMX_SFP_CABLE_100G_PAM4_BIDI:
+ cable_comp = " 100G SWDM4";
+ break;
+ case CVMX_SFP_CABLE_100G_4WDM_10_FEC_HOST:
+ cable_comp = " 4WDM-10 MSA (10KM CWDM4) FEC in host";
+ break;
+ case CVMX_SFP_CABLE_100G_4WDM_20_FEC_HOST:
+ cable_comp = " 4WDM-20 MSA (20KM 100GBASE-LR4) FEC in host";
+ break;
+ case CVMX_SFP_CABLE_100G_4WDM_40_FEC_HOST:
+ cable_comp = " 4WDM-40 MSA (40KM APD receiver) FEC in host";
+ break;
+ case CVMX_SFP_CABLE_100GBASE_DR_CAUI4_NO_FEC:
+ cable_comp = " 100GBASE-DR with CAUI-4, no FEC";
+ break;
+ case CVMX_SFP_CABLE_100G_FR_CAUI4_NO_FEC:
+ cable_comp = " 100G-FR with CAUI-4, no FEC";
+ break;
+ case CVMX_SFP_CABLE_100G_LR_CAUI4_NO_FEC:
+ cable_comp = " 100G-LR with CAUI-4, no FEC";
+ break;
+ case CVMX_SFP_CABLE_ACTIVE_COPPER_50_100_200GAUI_LOW_BER:
+ cable_comp = " Active Copper Cable with 50GAUI, 100GAUI-2 or 200GAUI-4 C2M, BER 10^(-6) or lower";
+ break;
+ case CVMX_SFP_CABLE_ACTIVE_OPTICAL_50_100_200GAUI_LOW_BER:
+ cable_comp = " Active Optical Cable with 50GAUI, 100GAUI-2 or 200GAUI-4 C2M, BER 10^(-6) or lower";
+ break;
+ case CVMX_SFP_CABLE_ACTIVE_COPPER_50_100_200GAUI_HI_BER:
+ cable_comp = " Active Copper Cable with 50GAUI, 100GAUI-2 or 200GAUI-4 AUI, BER 2.6 * 10^(-4) or lower";
+ break;
+ case CVMX_SFP_CABLE_ACTIVE_OPTICAL_50_100_200GAUI_HI_BER:
+ cable_comp = " Active Optical Cable with 50GAUI, 100GAUI-2 or 200GAUI-4 AUI, BER 2.6 10^(-4) or lower";
+ break;
+ case CVMX_SFP_CABLE_50_100_200G_CR:
+ cable_comp = " 50GBASE-CR, 100G-CR2 or 200G-CR4";
+ break;
+ case CVMX_SFP_CABLE_50_100_200G_SR:
+ cable_comp = " 50GBASE-SR, 100G-SR2 or 200G-SR4";
+ break;
+ case CVMX_SFP_CABLE_50GBASE_FR_200GBASE_DR4:
+ cable_comp = " 50GBASE-FR or 200GBASE-DR4";
+ break;
+ case CVMX_SFP_CABLE_200GBASE_FR4:
+ cable_comp = " 200GBASE-FR4";
+ break;
+ case CVMX_SFP_CABLE_200G_1550NM_PSM4:
+ cable_comp = " 200G 1550nm PSM4";
+ break;
+ case CVMX_SFP_CABLE_50GBASE_LR:
+ cable_comp = "50GBASE-LR";
+ break;
+ case CVMX_SFP_CABLE_200GBASE_LR4:
+ cable_comp = " 200GBASE-LR4";
+ break;
+ case CVMX_SFP_CABLE_64GFC_EA:
+ cable_comp = " 64GFC EA";
+ break;
+ case CVMX_SFP_CABLE_64GFC_SW:
+ cable_comp = " 64GFC SW";
+ break;
+ case CVMX_SFP_CABLE_64GFC_LW:
+ cable_comp = " 64GFC LW";
+ break;
+ case CVMX_SFP_CABLE_128GFC_EA:
+ cable_comp = " 64GFC EA";
+ break;
+ case CVMX_SFP_CABLE_128GFC_SW:
+ cable_comp = " 64GFC SW";
+ break;
+ case CVMX_SFP_CABLE_128GFC_LW:
+ cable_comp = " 64GFC LW";
+ break;
+ default:
+ cable_comp = " Unknown or Unsupported";
+ break;
+ }
+
+ switch (mi->rate) {
+ case CVMX_SFP_RATE_UNKNOWN:
+ default:
+ rate_str = "Unknown";
+ break;
+ case CVMX_SFP_RATE_1G:
+ rate_str = "1G";
+ break;
+ case CVMX_SFP_RATE_10G:
+ switch (mi->cable_comp) {
+ case CVMX_SFP_CABLE_5GBASE_T:
+ rate_str = "5G";
+ break;
+ case CVMX_SFP_CABLE_2_5GBASE_T:
+ rate_str = "2.5G";
+ break;
+ default:
+ rate_str = "10G";
+ break;
+ }
+ break;
+ case CVMX_SFP_RATE_25G:
+ rate_str = "25G";
+ break;
+ case CVMX_SFP_RATE_40G:
+ rate_str = "40G";
+ break;
+ case CVMX_SFP_RATE_100G:
+ rate_str = "100G";
+ break;
+ }
+
+ cvmx_printf("%s %s%s module detected\n", mod_type, conn_type,
+ cable_comp);
+ debug("Vendor: %s\n", mi->vendor_name);
+ debug("Vendor OUI: %02X:%02X:%02X\n", mi->vendor_oui[0],
+ mi->vendor_oui[1], mi->vendor_oui[2]);
+ debug("Vendor part number: %s Revision: %s\n", mi->vendor_pn,
+ mi->vendor_rev);
+ debug("Manufacturing date code: %s\n", mi->date_code);
+ debug("Rate: %s\n", rate_str);
+ if (mi->copper_cable) {
+ debug("Copper cable type: %s\n",
+ mi->active_cable ? "Active" : "Passive");
+ debug("Cable length: %u meters\n", mi->max_copper_cable_len);
+ if (mi->rate == CVMX_SFP_RATE_25G)
+ debug("Forward error correction is%s %s\n",
+ mi->fec_required ? "" : " not",
+ (mi->max_copper_cable_len >= 5 ||
+ !mi->fec_required) ?
+ "required" :
+ "suggested");
+ } else {
+ bool more = false;
+
+ debug("Supported optical types: ");
+ if (mi->eth_comp & CVMX_SFP_CABLE_10GBASE_ER) {
+ more = !!(mi->eth_comp & ~CVMX_SFP_CABLE_10GBASE_ER);
+ debug("10GBase-ER%s", more ? ", " : "");
+ }
+ if (mi->eth_comp & CVMX_SFP_CABLE_10GBASE_LRM) {
+ more = !!(mi->eth_comp & ~(CVMX_SFP_CABLE_10GBASE_ER |
+ CVMX_SFP_CABLE_10GBASE_LRM));
+ debug("10GBase-LRM%s", more ? ", " : "");
+ }
+ if (mi->eth_comp & CVMX_SFP_CABLE_10GBASE_LR) {
+ more = !!(mi->eth_comp & ~(CVMX_SFP_CABLE_10GBASE_ER |
+ CVMX_SFP_CABLE_10GBASE_LRM |
+ CVMX_SFP_CABLE_10GBASE_LR));
+ debug("10GBase-LR%s", more ? ", " : "");
+ }
+ if (mi->eth_comp & CVMX_SFP_CABLE_10GBASE_SR)
+ debug("10GBase-SR");
+ debug("\nMaximum single mode cable length: %d meters\n",
+ mi->max_single_mode_cable_length);
+ debug("Maximum 62.5um OM1 cable length: %d meters\n",
+ mi->max_62_5um_om1_cable_length);
+ debug("Maximum 50um OM2 cable length: %d meters\n",
+ mi->max_50um_om2_cable_length);
+ debug("Maximum 50um OM3 cable length: %d meters\n",
+ mi->max_50um_om3_cable_length);
+ debug("Maximum 50um OM4 cable length: %d meters\n",
+ mi->max_50um_om4_cable_length);
+ debug("Laser is%s cooled\n", mi->cooled_laser ? "" : " not");
+ }
+ debug("Limiting is %sabled\n", mi->limiting ? "en" : "dis");
+ debug("Power level: %d\n", mi->power_level);
+ debug("RX LoS is%s implemented and is%s inverted\n",
+ mi->los_implemented ? "" : " not",
+ mi->los_inverted ? "" : " not");
+ debug("TX fault is%s implemented\n",
+ mi->tx_fault_implemented ? "" : " not");
+ debug("TX disable is%s implemented\n",
+ mi->tx_disable_implemented ? "" : " not");
+ debug("Rate select is%s implemented\n",
+ mi->rate_select_implemented ? "" : " not");
+ debug("RX output is %s\n",
+ mi->linear_rx_output ? "linear" : "limiting");
+ debug("Diagnostic monitoring is%s implemented\n",
+ mi->diag_monitoring ? "" : " not");
+ if (mi->diag_monitoring) {
+ const char *diag_rev;
+
+ switch (mi->diag_rev) {
+ case CVMX_SFP_SFF_8472_NO_DIAG:
+ diag_rev = "none";
+ break;
+ case CVMX_SFP_SFF_8472_REV_9_3:
+ diag_rev = "9.3";
+ break;
+ case CVMX_SFP_SFF_8472_REV_9_5:
+ diag_rev = "9.5";
+ break;
+ case CVMX_SFP_SFF_8472_REV_10_2:
+ diag_rev = "10.2";
+ break;
+ case CVMX_SFP_SFF_8472_REV_10_4:
+ diag_rev = "10.4";
+ break;
+ case CVMX_SFP_SFF_8472_REV_11_0:
+ diag_rev = "11.0";
+ break;
+ case CVMX_SFP_SFF_8472_REV_11_3:
+ diag_rev = "11.3";
+ break;
+ case CVMX_SFP_SFF_8472_REV_11_4:
+ diag_rev = "11.4";
+ break;
+ case CVMX_SFP_SFF_8472_REV_12_0:
+ diag_rev = "12.0";
+ break;
+ case CVMX_SFP_SFF_8472_REV_UNALLOCATED:
+ diag_rev = "Unallocated";
+ break;
+ default:
+ diag_rev = "Unknown";
+ break;
+ }
+ debug("Diagnostics revision: %s\n", diag_rev);
+ debug("Diagnostic address change is%s required\n",
+ mi->diag_addr_change_required ? "" : " not");
+ debug("Diagnostics are%s internally calibrated\n",
+ mi->diag_internally_calibrated ? "" : " not");
+ debug("Diagnostics are%s externally calibrated\n",
+ mi->diag_externally_calibrated ? "" : " not");
+ debug("Receive power measurement type: %s\n",
+ mi->diag_rx_power_averaged ? "Averaged" : "OMA");
+ }
+}
--
2.35.1
^ permalink raw reply related [flat|nested] 52+ messages in thread* [PATCH 24/52] mips: octeon: Add cvmx-agl.c
2022-03-30 10:06 [PATCH 00/52] mips: octeon: Add ethernet support Stefan Roese
` (20 preceding siblings ...)
2022-03-30 10:06 ` [PATCH 22/52] mips: octeon: Add cvmx-helper-sfp.c Stefan Roese
@ 2022-03-30 10:07 ` Stefan Roese
2022-03-30 10:07 ` [PATCH 25/52] mips: octeon: Add cvmx-cmd-queue.c Stefan Roese
` (27 subsequent siblings)
49 siblings, 0 replies; 52+ messages in thread
From: Stefan Roese @ 2022-03-30 10:07 UTC (permalink / raw)
To: u-boot; +Cc: awilliams, cchavva
From: Aaron Williams <awilliams@marvell.com>
Import cvmx-agl.c from 2013 U-Boot. It will be used by the later
added drivers to support networking on the MIPS Octeon II / III
platforms.
Signed-off-by: Aaron Williams <awilliams@marvell.com>
Signed-off-by: Stefan Roese <sr@denx.de>
---
arch/mips/mach-octeon/cvmx-agl.c | 216 +++++++++++++++++++++++++++++++
1 file changed, 216 insertions(+)
create mode 100644 arch/mips/mach-octeon/cvmx-agl.c
diff --git a/arch/mips/mach-octeon/cvmx-agl.c b/arch/mips/mach-octeon/cvmx-agl.c
new file mode 100644
index 000000000000..9eea857e47c7
--- /dev/null
+++ b/arch/mips/mach-octeon/cvmx-agl.c
@@ -0,0 +1,216 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ * Functions for RGMII (MGMT) initialization, configuration,
+ * and monitoring.
+ */
+
+#include <log.h>
+#include <time.h>
+#include <linux/delay.h>
+
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-csr.h>
+#include <mach/cvmx-bootmem.h>
+#include <mach/octeon-model.h>
+#include <mach/cvmx-fuse.h>
+#include <mach/octeon-feature.h>
+#include <mach/cvmx-qlm.h>
+#include <mach/octeon_qlm.h>
+#include <mach/cvmx-pcie.h>
+#include <mach/cvmx-coremask.h>
+
+#include <mach/cvmx-pki.h>
+#include <mach/cvmx-helper.h>
+#include <mach/cvmx-helper-board.h>
+#include <mach/cvmx-helper-cfg.h>
+
+#include <mach/cvmx-agl.h>
+#include <mach/cvmx-agl-defs.h>
+
+/*
+ * @param port to enable
+ *
+ * @return Zero on success, negative on failure
+ */
+int cvmx_agl_enable(int port)
+{
+ cvmx_agl_gmx_rxx_frm_ctl_t rxx_frm_ctl;
+
+ rxx_frm_ctl.u64 = 0;
+ rxx_frm_ctl.s.pre_align = 1;
+ /* When set, disables the length check for non-min sized pkts with
+ * padding in the client data
+ */
+ rxx_frm_ctl.s.pad_len = 1;
+ /* When set, disables the length check for VLAN pkts */
+ rxx_frm_ctl.s.vlan_len = 1;
+ /* When set, PREAMBLE checking is less strict */
+ rxx_frm_ctl.s.pre_free = 1;
+ /* Control Pause Frames can match station SMAC */
+ rxx_frm_ctl.s.ctl_smac = 0;
+ /* Control Pause Frames can match globally assign Multicast address */
+ rxx_frm_ctl.s.ctl_mcst = 1;
+ rxx_frm_ctl.s.ctl_bck = 1; /* Forward pause information to TX block */
+ rxx_frm_ctl.s.ctl_drp = 1; /* Drop Control Pause Frames */
+ rxx_frm_ctl.s.pre_strp = 1; /* Strip off the preamble */
+ /* This port is configured to send PREAMBLE+SFD to begin every frame.
+ * GMX checks that the PREAMBLE is sent correctly
+ */
+ rxx_frm_ctl.s.pre_chk = 1;
+ csr_wr(CVMX_AGL_GMX_RXX_FRM_CTL(port), rxx_frm_ctl.u64);
+
+ return 0;
+}
+
+cvmx_helper_link_info_t cvmx_agl_link_get(int port)
+{
+ cvmx_helper_link_info_t result;
+ int interface, port_index;
+
+ /* Fake IPD port is used on some older models. */
+ if (port < 0)
+ return __cvmx_helper_board_link_get(port);
+
+ /* Simulator does not have PHY, use some defaults. */
+ interface = cvmx_helper_get_interface_num(port);
+ port_index = cvmx_helper_get_interface_index_num(port);
+ if (cvmx_helper_get_port_force_link_up(interface, port_index)) {
+ result.u64 = 0;
+ result.s.full_duplex = 1;
+ result.s.link_up = 1;
+ result.s.speed = 1000;
+ return result;
+ }
+
+ return __cvmx_helper_board_link_get(port);
+}
+
+/*
+ * Set MII/RGMII link based on mode.
+ *
+ * @param port interface port to set the link.
+ * @param link_info Link status
+ *
+ * @return 0 on success and 1 on failure
+ */
+int cvmx_agl_link_set(int port, cvmx_helper_link_info_t link_info)
+{
+ cvmx_agl_gmx_prtx_cfg_t agl_gmx_prtx;
+
+ /* Disable GMX before we make any changes. */
+ agl_gmx_prtx.u64 = csr_rd(CVMX_AGL_GMX_PRTX_CFG(port));
+ agl_gmx_prtx.s.en = 0;
+ agl_gmx_prtx.s.tx_en = 0;
+ agl_gmx_prtx.s.rx_en = 0;
+ csr_wr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);
+
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_OCTEON3()) {
+ u64 one_second = 0x1000000; /* todo: this needs checking */
+
+ /* Wait for GMX to be idle */
+ if (CVMX_WAIT_FOR_FIELD64(CVMX_AGL_GMX_PRTX_CFG(port),
+ cvmx_agl_gmx_prtx_cfg_t, rx_idle, ==,
+ 1, one_second) ||
+ CVMX_WAIT_FOR_FIELD64(CVMX_AGL_GMX_PRTX_CFG(port),
+ cvmx_agl_gmx_prtx_cfg_t, tx_idle, ==,
+ 1, one_second)) {
+ debug("AGL%d: Timeout waiting for GMX to be idle\n",
+ port);
+ return -1;
+ }
+ }
+
+ agl_gmx_prtx.u64 = csr_rd(CVMX_AGL_GMX_PRTX_CFG(port));
+
+ /* Set duplex mode */
+ if (!link_info.s.link_up)
+ agl_gmx_prtx.s.duplex = 1; /* Force full duplex on down links */
+ else
+ agl_gmx_prtx.s.duplex = link_info.s.full_duplex;
+
+ switch (link_info.s.speed) {
+ case 10:
+ agl_gmx_prtx.s.speed = 0;
+ agl_gmx_prtx.s.slottime = 0;
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_OCTEON3()) {
+ agl_gmx_prtx.s.speed_msb = 1;
+ agl_gmx_prtx.s.burst = 1;
+ }
+ break;
+
+ case 100:
+ agl_gmx_prtx.s.speed = 0;
+ agl_gmx_prtx.s.slottime = 0;
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_OCTEON3()) {
+ agl_gmx_prtx.s.speed_msb = 0;
+ agl_gmx_prtx.s.burst = 1;
+ }
+ break;
+
+ case 1000:
+ /* 1000 MBits is only supported on 6XXX chips */
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_OCTEON3()) {
+ agl_gmx_prtx.s.speed_msb = 0;
+ agl_gmx_prtx.s.speed = 1;
+ agl_gmx_prtx.s.slottime =
+ 1; /* Only matters for half-duplex */
+ agl_gmx_prtx.s.burst = agl_gmx_prtx.s.duplex;
+ }
+ break;
+
+ /* No link */
+ case 0:
+ default:
+ break;
+ }
+
+ /* Write the new GMX setting with the port still disabled. */
+ csr_wr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);
+
+ /* Read GMX CFG again to make sure the config is completed. */
+ agl_gmx_prtx.u64 = csr_rd(CVMX_AGL_GMX_PRTX_CFG(port));
+
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_OCTEON3()) {
+ cvmx_agl_gmx_txx_clk_t agl_clk;
+ cvmx_agl_prtx_ctl_t prt_ctl;
+
+ prt_ctl.u64 = csr_rd(CVMX_AGL_PRTX_CTL(port));
+ agl_clk.u64 = csr_rd(CVMX_AGL_GMX_TXX_CLK(port));
+ /* MII (both speeds) and RGMII 1000 setting */
+ agl_clk.s.clk_cnt = 1;
+ /* Check other speeds for RGMII mode */
+ if (prt_ctl.s.mode == 0 || OCTEON_IS_OCTEON3()) {
+ if (link_info.s.speed == 10)
+ agl_clk.s.clk_cnt = 50;
+ else if (link_info.s.speed == 100)
+ agl_clk.s.clk_cnt = 5;
+ }
+ csr_wr(CVMX_AGL_GMX_TXX_CLK(port), agl_clk.u64);
+ }
+
+ /* Enable transmit and receive ports */
+ agl_gmx_prtx.s.tx_en = 1;
+ agl_gmx_prtx.s.rx_en = 1;
+ csr_wr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);
+
+ /* Enable the link. */
+ agl_gmx_prtx.s.en = 1;
+ csr_wr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);
+
+ if (OCTEON_IS_OCTEON3()) {
+ union cvmx_agl_prtx_ctl agl_prtx_ctl;
+ /* Enable the interface, set clkrst */
+ agl_prtx_ctl.u64 = csr_rd(CVMX_AGL_PRTX_CTL(port));
+ agl_prtx_ctl.s.clkrst = 1;
+ csr_wr(CVMX_AGL_PRTX_CTL(port), agl_prtx_ctl.u64);
+ csr_rd(CVMX_AGL_PRTX_CTL(port));
+ agl_prtx_ctl.s.enable = 1;
+ csr_wr(CVMX_AGL_PRTX_CTL(port), agl_prtx_ctl.u64);
+ /* Read the value back to force the previous write */
+ csr_rd(CVMX_AGL_PRTX_CTL(port));
+ }
+
+ return 0;
+}
--
2.35.1
^ permalink raw reply related [flat|nested] 52+ messages in thread* [PATCH 25/52] mips: octeon: Add cvmx-cmd-queue.c
2022-03-30 10:06 [PATCH 00/52] mips: octeon: Add ethernet support Stefan Roese
` (21 preceding siblings ...)
2022-03-30 10:07 ` [PATCH 24/52] mips: octeon: Add cvmx-agl.c Stefan Roese
@ 2022-03-30 10:07 ` Stefan Roese
2022-03-30 10:07 ` [PATCH 26/52] mips: octeon: Add cvmx-fau-compat.c Stefan Roese
` (26 subsequent siblings)
49 siblings, 0 replies; 52+ messages in thread
From: Stefan Roese @ 2022-03-30 10:07 UTC (permalink / raw)
To: u-boot; +Cc: awilliams, cchavva
From: Aaron Williams <awilliams@marvell.com>
Import cvmx-cmd-queue.c from 2013 U-Boot. It will be used by the later
added drivers to support networking on the MIPS Octeon II / III
platforms.
Signed-off-by: Aaron Williams <awilliams@marvell.com>
Signed-off-by: Stefan Roese <sr@denx.de>
---
arch/mips/mach-octeon/cvmx-cmd-queue.c | 449 +++++++++++++++++++++++++
1 file changed, 449 insertions(+)
create mode 100644 arch/mips/mach-octeon/cvmx-cmd-queue.c
diff --git a/arch/mips/mach-octeon/cvmx-cmd-queue.c b/arch/mips/mach-octeon/cvmx-cmd-queue.c
new file mode 100644
index 000000000000..041f2390ca18
--- /dev/null
+++ b/arch/mips/mach-octeon/cvmx-cmd-queue.c
@@ -0,0 +1,449 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ * Support functions for managing command queues used for
+ * various hardware blocks.
+ */
+
+#include <errno.h>
+#include <log.h>
+#include <time.h>
+#include <linux/delay.h>
+
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-csr.h>
+#include <mach/cvmx-bootmem.h>
+#include <mach/octeon-model.h>
+#include <mach/cvmx-fuse.h>
+#include <mach/octeon-feature.h>
+#include <mach/cvmx-qlm.h>
+#include <mach/octeon_qlm.h>
+#include <mach/cvmx-pcie.h>
+#include <mach/cvmx-coremask.h>
+
+#include <mach/cvmx-fpa.h>
+#include <mach/cvmx-cmd-queue.h>
+
+#include <mach/cvmx-agl-defs.h>
+#include <mach/cvmx-bgxx-defs.h>
+#include <mach/cvmx-ciu-defs.h>
+#include <mach/cvmx-gmxx-defs.h>
+#include <mach/cvmx-gserx-defs.h>
+#include <mach/cvmx-ilk-defs.h>
+#include <mach/cvmx-ipd-defs.h>
+#include <mach/cvmx-pcsx-defs.h>
+#include <mach/cvmx-pcsxx-defs.h>
+#include <mach/cvmx-pki-defs.h>
+#include <mach/cvmx-pko-defs.h>
+#include <mach/cvmx-xcv-defs.h>
+
+#include <mach/cvmx-hwpko.h>
+#include <mach/cvmx-ilk.h>
+#include <mach/cvmx-pki.h>
+#include <mach/cvmx-pko3.h>
+#include <mach/cvmx-pko3-queue.h>
+#include <mach/cvmx-pko3-resources.h>
+
+#include <mach/cvmx-helper.h>
+#include <mach/cvmx-helper-board.h>
+#include <mach/cvmx-helper-cfg.h>
+
+#include <mach/cvmx-helper-bgx.h>
+#include <mach/cvmx-helper-cfg.h>
+#include <mach/cvmx-helper-util.h>
+#include <mach/cvmx-helper-pki.h>
+
+#include <mach/cvmx-helper-util.h>
+#include <mach/cvmx-dpi-defs.h>
+#include <mach/cvmx-npei-defs.h>
+#include <mach/cvmx-pexp-defs.h>
+
+/**
+ * This application uses this pointer to access the global queue
+ * state. It points to a bootmem named block.
+ */
+__cvmx_cmd_queue_all_state_t *__cvmx_cmd_queue_state_ptrs[CVMX_MAX_NODES];
+
+/**
+ * @INTERNAL
+ * Initialize the Global queue state pointer.
+ *
+ * @return CVMX_CMD_QUEUE_SUCCESS or a failure code
+ */
+cvmx_cmd_queue_result_t __cvmx_cmd_queue_init_state_ptr(unsigned int node)
+{
+ const char *alloc_name = "cvmx_cmd_queues\0\0";
+ char s[4] = "_0";
+ const struct cvmx_bootmem_named_block_desc *block_desc = NULL;
+ unsigned int size;
+ u64 paddr_min = 0, paddr_max = 0;
+ void *ptr;
+
+ if (cvmx_likely(__cvmx_cmd_queue_state_ptrs[node]))
+ return CVMX_CMD_QUEUE_SUCCESS;
+
+ /* Add node# to block name */
+ if (node > 0) {
+ s[1] += node;
+ strcat((char *)alloc_name, s);
+ }
+
+ /* Find the named block in case it has been created already */
+ block_desc = cvmx_bootmem_find_named_block(alloc_name);
+ if (block_desc) {
+ __cvmx_cmd_queue_state_ptrs[node] =
+ (__cvmx_cmd_queue_all_state_t *)cvmx_phys_to_ptr(
+ block_desc->base_addr);
+ return CVMX_CMD_QUEUE_SUCCESS;
+ }
+
+ size = sizeof(*__cvmx_cmd_queue_state_ptrs[node]);
+
+ /* Rest f the code is to allocate a new named block */
+
+ /* Atomically allocate named block once, and zero it by default */
+ ptr = cvmx_bootmem_alloc_named_range_once(size, paddr_min, paddr_max,
+ 128, alloc_name, NULL);
+
+ if (ptr) {
+ __cvmx_cmd_queue_state_ptrs[node] =
+ (__cvmx_cmd_queue_all_state_t *)ptr;
+ } else {
+ debug("ERROR: %s: Unable to get named block %s.\n", __func__,
+ alloc_name);
+ return CVMX_CMD_QUEUE_NO_MEMORY;
+ }
+ return CVMX_CMD_QUEUE_SUCCESS;
+}
+
+/**
+ * Initialize a command queue for use. The initial FPA buffer is
+ * allocated and the hardware unit is configured to point to the
+ * new command queue.
+ *
+ * @param queue_id Hardware command queue to initialize.
+ * @param max_depth Maximum outstanding commands that can be queued.
+ * @param fpa_pool FPA pool the command queues should come from.
+ * @param pool_size Size of each buffer in the FPA pool (bytes)
+ *
+ * @return CVMX_CMD_QUEUE_SUCCESS or a failure code
+ */
+cvmx_cmd_queue_result_t cvmx_cmd_queue_initialize(cvmx_cmd_queue_id_t queue_id,
+ int max_depth, int fpa_pool,
+ int pool_size)
+{
+ __cvmx_cmd_queue_state_t *qstate;
+ cvmx_cmd_queue_result_t result;
+ unsigned int node;
+ unsigned int index;
+ int fpa_pool_min, fpa_pool_max;
+ union cvmx_fpa_ctl_status status;
+ void *buffer;
+
+ node = __cvmx_cmd_queue_get_node(queue_id);
+
+ index = __cvmx_cmd_queue_get_index(queue_id);
+ if (index >= NUM_ELEMENTS(__cvmx_cmd_queue_state_ptrs[node]->state)) {
+ printf("ERROR: %s: queue %#x out of range\n", __func__,
+ queue_id);
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+ }
+
+ result = __cvmx_cmd_queue_init_state_ptr(node);
+ if (result != CVMX_CMD_QUEUE_SUCCESS)
+ return result;
+
+ qstate = __cvmx_cmd_queue_get_state(queue_id);
+ if (!qstate)
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+
+ /*
+ * We artificially limit max_depth to 1<<20 words. It is an
+ * arbitrary limit.
+ */
+ if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH) {
+ if (max_depth < 0 || max_depth > 1 << 20)
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+ } else if (max_depth != 0) {
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+ }
+
+ /* CVMX_FPA_NUM_POOLS maps to cvmx_fpa3_num_auras for FPA3 */
+ fpa_pool_min = node << 10;
+ fpa_pool_max = fpa_pool_min + CVMX_FPA_NUM_POOLS;
+
+ if (fpa_pool < fpa_pool_min || fpa_pool >= fpa_pool_max)
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+
+ if (pool_size < 128 || pool_size > (1 << 17))
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+
+ if (pool_size & 3)
+ debug("WARNING: %s: pool_size %d not multiple of 8\n", __func__,
+ pool_size);
+
+ /* See if someone else has already initialized the queue */
+ if (qstate->base_paddr) {
+ int depth;
+ static const char emsg[] = /* Common error message part */
+ "Queue already initialized with different ";
+
+ depth = (max_depth + qstate->pool_size_m1 - 1) /
+ qstate->pool_size_m1;
+ if (depth != qstate->max_depth) {
+ depth = qstate->max_depth * qstate->pool_size_m1;
+ debug("ERROR: %s: %s max_depth (%d).\n", __func__, emsg,
+ depth);
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+ }
+ if (fpa_pool != qstate->fpa_pool) {
+ debug("ERROR: %s: %s FPA pool (%d).\n", __func__, emsg,
+ (int)qstate->fpa_pool);
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+ }
+ if ((pool_size >> 3) - 1 != qstate->pool_size_m1) {
+ debug("ERROR: %s: %s FPA pool size (%u).\n", __func__,
+ emsg, (qstate->pool_size_m1 + 1) << 3);
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+ }
+ return CVMX_CMD_QUEUE_ALREADY_SETUP;
+ }
+
+ if (!(octeon_has_feature(OCTEON_FEATURE_FPA3))) {
+ status.u64 = csr_rd(CVMX_FPA_CTL_STATUS);
+ if (!status.s.enb) {
+ debug("ERROR: %s: FPA is not enabled.\n",
+ __func__);
+ return CVMX_CMD_QUEUE_NO_MEMORY;
+ }
+ }
+ buffer = cvmx_fpa_alloc(fpa_pool);
+ if (!buffer) {
+ debug("ERROR: %s: allocating first buffer.\n", __func__);
+ return CVMX_CMD_QUEUE_NO_MEMORY;
+ }
+
+ index = (pool_size >> 3) - 1;
+ qstate->pool_size_m1 = index;
+ qstate->max_depth = (max_depth + index - 1) / index;
+ qstate->index = 0;
+ qstate->fpa_pool = fpa_pool;
+ qstate->base_paddr = cvmx_ptr_to_phys(buffer);
+
+ /* Initialize lock */
+ __cvmx_cmd_queue_lock_init(queue_id);
+ return CVMX_CMD_QUEUE_SUCCESS;
+}
+
+/**
+ * Shutdown a queue a free it's command buffers to the FPA. The
+ * hardware connected to the queue must be stopped before this
+ * function is called.
+ *
+ * @param queue_id Queue to shutdown
+ *
+ * @return CVMX_CMD_QUEUE_SUCCESS or a failure code
+ */
+cvmx_cmd_queue_result_t cvmx_cmd_queue_shutdown(cvmx_cmd_queue_id_t queue_id)
+{
+ __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
+
+ /* FIXME: This will not complain if the queue was never initialized */
+ if (!qptr) {
+ debug("ERROR: %s: Unable to get queue information.\n",
+ __func__);
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+ }
+
+ if (cvmx_cmd_queue_length(queue_id) > 0) {
+ debug("ERROR: %s: Queue still has data in it.\n", __func__);
+ return CVMX_CMD_QUEUE_FULL;
+ }
+
+ __cvmx_cmd_queue_lock(queue_id);
+ if (qptr->base_paddr) {
+ cvmx_fpa_free(cvmx_phys_to_ptr((u64)qptr->base_paddr),
+ qptr->fpa_pool, 0);
+ qptr->base_paddr = 0;
+ }
+ __cvmx_cmd_queue_unlock(queue_id);
+
+ return CVMX_CMD_QUEUE_SUCCESS;
+}
+
+/**
+ * Return the number of command words pending in the queue. This
+ * function may be relatively slow for some hardware units.
+ *
+ * @param queue_id Hardware command queue to query
+ *
+ * @return Number of outstanding commands
+ */
+int cvmx_cmd_queue_length(cvmx_cmd_queue_id_t queue_id)
+{
+ union cvmx_dpi_dmax_counts dmax_counts;
+ union cvmx_pko_mem_debug8 debug8;
+
+ if (!__cvmx_cmd_queue_get_state(queue_id))
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+
+ /*
+ * The cast is here so gcc with check that all values in the
+ * cvmx_cmd_queue_id_t enumeration are here.
+ */
+ switch ((cvmx_cmd_queue_id_t)(queue_id & 0xff0000)) {
+ case CVMX_CMD_QUEUE_PKO_BASE:
+ /*
+ * Really need atomic lock on
+ * CVMX_PKO_REG_READ_IDX. Right now we are normally
+ * called with the queue lock, so that is a SLIGHT
+ * amount of protection.
+ */
+ csr_wr(CVMX_PKO_REG_READ_IDX, queue_id & 0xffff);
+ debug8.u64 = csr_rd(CVMX_PKO_MEM_DEBUG8);
+ if (octeon_has_feature(OCTEON_FEATURE_PKND))
+ return debug8.cn68xx.doorbell;
+ else
+ return debug8.cn58xx.doorbell;
+ case CVMX_CMD_QUEUE_ZIP:
+ case CVMX_CMD_QUEUE_DFA:
+ case CVMX_CMD_QUEUE_HNA:
+ case CVMX_CMD_QUEUE_RAID:
+ /* Still need to implement other lengths */
+ return 0;
+ case CVMX_CMD_QUEUE_DMA_BASE:
+ if (octeon_has_feature(OCTEON_FEATURE_NPEI)) {
+ dmax_counts.u64 = csr_rd(
+ CVMX_PEXP_NPEI_DMAX_COUNTS(queue_id & 0x7));
+ return dmax_counts.s.dbell;
+ }
+
+ dmax_counts.u64 = csr_rd(CVMX_DPI_DMAX_COUNTS(queue_id & 0x7));
+ return dmax_counts.s.dbell;
+ case CVMX_CMD_QUEUE_BCH:
+ /* Not available */
+ return 0;
+ case CVMX_CMD_QUEUE_END:
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+ }
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+}
+
+/**
+ * Return the command buffer to be written to. The purpose of this
+ * function is to allow CVMX routine access to the low level buffer
+ * for initial hardware setup. User applications should not call this
+ * function directly.
+ *
+ * @param queue_id Command queue to query
+ *
+ * @return Command buffer or NULL on failure
+ */
+void *cvmx_cmd_queue_buffer(cvmx_cmd_queue_id_t queue_id)
+{
+ __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
+
+ if (qptr && qptr->base_paddr)
+ return cvmx_phys_to_ptr((u64)qptr->base_paddr);
+ else
+ return NULL;
+}
+
+static u64 *__cvmx_cmd_queue_add_blk(__cvmx_cmd_queue_state_t *qptr)
+{
+ u64 *cmd_ptr;
+ u64 *new_buffer;
+ u64 new_paddr;
+
+ /* Get base vaddr of current (full) block */
+ cmd_ptr = (u64 *)cvmx_phys_to_ptr((u64)qptr->base_paddr);
+
+ /* Allocate a new block from the per-queue pool */
+ new_buffer = (u64 *)cvmx_fpa_alloc(qptr->fpa_pool);
+
+ /* Check for allocation failure */
+ if (cvmx_unlikely(!new_buffer))
+ return NULL;
+
+ /* Zero out the new block link pointer,
+ * in case this block will be filled to the rim
+ */
+ new_buffer[qptr->pool_size_m1] = ~0ull;
+
+ /* Get physical address of the new buffer */
+ new_paddr = cvmx_ptr_to_phys(new_buffer);
+
+ /* Store the physical link address at the end of current full block */
+ cmd_ptr[qptr->pool_size_m1] = new_paddr;
+
+ /* Store the physical address in the queue state structure */
+ qptr->base_paddr = new_paddr;
+ qptr->index = 0;
+
+ /* Return the virtual base of the new block */
+ return new_buffer;
+}
+
+/**
+ * @INTERNAL
+ * Add command words into a queue, handles all the corener cases
+ * where only some of the words might fit into the current block,
+ * and a new block may need to be allocated.
+ * Locking and argument checks are done in the front-end in-line
+ * functions that call this one for the rare corner cases.
+ */
+cvmx_cmd_queue_result_t
+__cvmx_cmd_queue_write_raw(cvmx_cmd_queue_id_t queue_id,
+ __cvmx_cmd_queue_state_t *qptr, int cmd_count,
+ const u64 *cmds)
+{
+ u64 *cmd_ptr;
+ unsigned int index;
+
+ cmd_ptr = (u64 *)cvmx_phys_to_ptr((u64)qptr->base_paddr);
+ index = qptr->index;
+
+ /* Enforce queue depth limit, if enabled, once per block */
+ if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH && cvmx_unlikely(qptr->max_depth)) {
+ unsigned int depth = cvmx_cmd_queue_length(queue_id);
+
+ depth /= qptr->pool_size_m1;
+
+ if (cvmx_unlikely(depth > qptr->max_depth))
+ return CVMX_CMD_QUEUE_FULL;
+ }
+
+ /*
+ * If the block allocation fails, even the words that we wrote
+ * to the current block will not count because the 'index' will
+ * not be comitted.
+ * The loop is run 'count + 1' times to take care of the tail
+ * case, where the buffer is full to the rim, so the link
+ * pointer must be filled with a valid address.
+ */
+ while (cmd_count >= 0) {
+ if (index >= qptr->pool_size_m1) {
+ /* Block is full, get another one and proceed */
+ cmd_ptr = __cvmx_cmd_queue_add_blk(qptr);
+
+ /* Baul on allocation error w/o comitting anything */
+ if (cvmx_unlikely(!cmd_ptr))
+ return CVMX_CMD_QUEUE_NO_MEMORY;
+
+ /* Reset index for start of new block */
+ index = 0;
+ }
+ /* Exit Loop on 'count + 1' iterations */
+ if (cmd_count <= 0)
+ break;
+ /* Store commands into queue block while there is space */
+ cmd_ptr[index++] = *cmds++;
+ cmd_count--;
+ } /* while cmd_count */
+
+ /* Commit added words if all is well */
+ qptr->index = index;
+
+ return CVMX_CMD_QUEUE_SUCCESS;
+}
--
2.35.1
^ permalink raw reply related [flat|nested] 52+ messages in thread* [PATCH 26/52] mips: octeon: Add cvmx-fau-compat.c
2022-03-30 10:06 [PATCH 00/52] mips: octeon: Add ethernet support Stefan Roese
` (22 preceding siblings ...)
2022-03-30 10:07 ` [PATCH 25/52] mips: octeon: Add cvmx-cmd-queue.c Stefan Roese
@ 2022-03-30 10:07 ` Stefan Roese
2022-03-30 10:07 ` [PATCH 28/52] mips: octeon: Add cvmx-fpa-resource.c Stefan Roese
` (25 subsequent siblings)
49 siblings, 0 replies; 52+ messages in thread
From: Stefan Roese @ 2022-03-30 10:07 UTC (permalink / raw)
To: u-boot; +Cc: awilliams, cchavva
From: Aaron Williams <awilliams@marvell.com>
Import cvmx-fau-compat.c from 2013 U-Boot. It will be used by the later
added drivers to support networking on the MIPS Octeon II / III
platforms.
Signed-off-by: Aaron Williams <awilliams@marvell.com>
Signed-off-by: Stefan Roese <sr@denx.de>
---
arch/mips/mach-octeon/cvmx-fau-compat.c | 53 +++++++++++++++++++++++++
1 file changed, 53 insertions(+)
create mode 100644 arch/mips/mach-octeon/cvmx-fau-compat.c
diff --git a/arch/mips/mach-octeon/cvmx-fau-compat.c b/arch/mips/mach-octeon/cvmx-fau-compat.c
new file mode 100644
index 000000000000..9c2ff763ad53
--- /dev/null
+++ b/arch/mips/mach-octeon/cvmx-fau-compat.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ */
+
+#include <log.h>
+#include <time.h>
+#include <linux/delay.h>
+
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-csr.h>
+#include <mach/cvmx-bootmem.h>
+#include <mach/octeon-model.h>
+#include <mach/cvmx-fuse.h>
+#include <mach/octeon-feature.h>
+#include <mach/cvmx-qlm.h>
+#include <mach/octeon_qlm.h>
+#include <mach/cvmx-pcie.h>
+#include <mach/cvmx-coremask.h>
+
+#include <mach/cvmx-global-resources.h>
+
+#include <mach/cvmx-pki.h>
+#include <mach/cvmx-helper.h>
+#include <mach/cvmx-helper-board.h>
+#include <mach/cvmx-helper-cfg.h>
+
+#include <mach/cvmx-hwfau.h>
+
+u8 *cvmx_fau_regs_ptr;
+
+void cvmx_fau_bootmem_init(void *bootmem)
+{
+ memset(bootmem, 0, CVMX_FAU_MAX_REGISTERS_8);
+}
+
+/**
+ * Initializes FAU region for devices without FAU unit.
+ * @return 0 on success -1 on failure
+ */
+int cvmx_fau_init(void)
+{
+ cvmx_fau_regs_ptr = (u8 *)cvmx_bootmem_alloc_named_range_once(
+ CVMX_FAU_MAX_REGISTERS_8, 0, 1ull << 31, 128,
+ "cvmx_fau_registers", cvmx_fau_bootmem_init);
+
+ if (cvmx_fau_regs_ptr == 0ull) {
+ debug("ERROR: Failed to alloc named block for software FAU.\n");
+ return -1;
+ }
+
+ return 0;
+}
--
2.35.1
^ permalink raw reply related [flat|nested] 52+ messages in thread* [PATCH 28/52] mips: octeon: Add cvmx-fpa-resource.c
2022-03-30 10:06 [PATCH 00/52] mips: octeon: Add ethernet support Stefan Roese
` (23 preceding siblings ...)
2022-03-30 10:07 ` [PATCH 26/52] mips: octeon: Add cvmx-fau-compat.c Stefan Roese
@ 2022-03-30 10:07 ` Stefan Roese
2022-03-30 10:07 ` [PATCH 29/52] mips: octeon: Add cvmx-global-resource.c Stefan Roese
` (24 subsequent siblings)
49 siblings, 0 replies; 52+ messages in thread
From: Stefan Roese @ 2022-03-30 10:07 UTC (permalink / raw)
To: u-boot; +Cc: awilliams, cchavva
From: Aaron Williams <awilliams@marvell.com>
Import cvmx-fpa-resource.c from 2013 U-Boot. It will be used by the later
added drivers to support networking on the MIPS Octeon II / III
platforms.
Signed-off-by: Aaron Williams <awilliams@marvell.com>
Signed-off-by: Stefan Roese <sr@denx.de>
---
arch/mips/mach-octeon/cvmx-fpa-resource.c | 305 ++++++++++++++++++++++
1 file changed, 305 insertions(+)
create mode 100644 arch/mips/mach-octeon/cvmx-fpa-resource.c
diff --git a/arch/mips/mach-octeon/cvmx-fpa-resource.c b/arch/mips/mach-octeon/cvmx-fpa-resource.c
new file mode 100644
index 000000000000..743518850088
--- /dev/null
+++ b/arch/mips/mach-octeon/cvmx-fpa-resource.c
@@ -0,0 +1,305 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ */
+
+#include <errno.h>
+#include <log.h>
+#include <time.h>
+#include <linux/delay.h>
+
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-csr.h>
+#include <mach/cvmx-bootmem.h>
+#include <mach/octeon-model.h>
+#include <mach/cvmx-fuse.h>
+#include <mach/octeon-feature.h>
+#include <mach/cvmx-qlm.h>
+#include <mach/octeon_qlm.h>
+#include <mach/cvmx-pcie.h>
+#include <mach/cvmx-coremask.h>
+#include <mach/cvmx-range.h>
+#include <mach/cvmx-global-resources.h>
+
+#include <mach/cvmx-agl-defs.h>
+#include <mach/cvmx-bgxx-defs.h>
+#include <mach/cvmx-ciu-defs.h>
+#include <mach/cvmx-gmxx-defs.h>
+#include <mach/cvmx-gserx-defs.h>
+#include <mach/cvmx-ilk-defs.h>
+#include <mach/cvmx-ipd-defs.h>
+#include <mach/cvmx-pcsx-defs.h>
+#include <mach/cvmx-pcsxx-defs.h>
+#include <mach/cvmx-pki-defs.h>
+#include <mach/cvmx-pko-defs.h>
+#include <mach/cvmx-xcv-defs.h>
+
+#include <mach/cvmx-hwpko.h>
+#include <mach/cvmx-ilk.h>
+#include <mach/cvmx-ipd.h>
+#include <mach/cvmx-pki.h>
+#include <mach/cvmx-pko3.h>
+#include <mach/cvmx-pko3-queue.h>
+#include <mach/cvmx-pko3-resources.h>
+
+#include <mach/cvmx-helper.h>
+#include <mach/cvmx-helper-board.h>
+#include <mach/cvmx-helper-cfg.h>
+
+#include <mach/cvmx-helper-bgx.h>
+#include <mach/cvmx-helper-cfg.h>
+#include <mach/cvmx-helper-util.h>
+#include <mach/cvmx-helper-pki.h>
+
+static struct global_resource_tag get_fpa1_resource_tag(void)
+{
+ return CVMX_GR_TAG_FPA;
+}
+
+static struct global_resource_tag get_fpa3_aura_resource_tag(int node)
+{
+ return cvmx_get_gr_tag('c', 'v', 'm', '_', 'a', 'u', 'r', 'a', '_',
+ node + '0', '.', '.', '.', '.', '.', '.');
+}
+
+static struct global_resource_tag get_fpa3_pool_resource_tag(int node)
+{
+ return cvmx_get_gr_tag('c', 'v', 'm', '_', 'p', 'o', 'o', 'l', '_',
+ node + '0', '.', '.', '.', '.', '.', '.');
+}
+
+int cvmx_fpa_get_max_pools(void)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_FPA3))
+ return cvmx_fpa3_num_auras();
+ else if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ /* 68xx pool 8 is not available via API */
+ return CVMX_FPA1_NUM_POOLS;
+ else
+ return CVMX_FPA1_NUM_POOLS;
+}
+
+u64 cvmx_fpa3_get_aura_owner(cvmx_fpa3_gaura_t aura)
+{
+ return cvmx_get_global_resource_owner(
+ get_fpa3_aura_resource_tag(aura.node), aura.laura);
+}
+
+u64 cvmx_fpa1_get_pool_owner(cvmx_fpa1_pool_t pool)
+{
+ return cvmx_get_global_resource_owner(get_fpa1_resource_tag(), pool);
+}
+
+u64 cvmx_fpa_get_pool_owner(int pool_num)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_FPA3))
+ return cvmx_fpa3_get_aura_owner(
+ cvmx_fpa1_pool_to_fpa3_aura(pool_num));
+ else
+ return cvmx_fpa1_get_pool_owner(pool_num);
+}
+
+/**
+ */
+cvmx_fpa3_gaura_t cvmx_fpa3_reserve_aura(int node, int desired_aura_num)
+{
+ u64 owner = cvmx_get_app_id();
+ int rv = 0;
+ struct global_resource_tag tag;
+ cvmx_fpa3_gaura_t aura;
+
+ if (node == -1)
+ node = cvmx_get_node_num();
+
+ tag = get_fpa3_aura_resource_tag(node);
+
+ if (cvmx_create_global_resource_range(tag, cvmx_fpa3_num_auras()) !=
+ 0) {
+ printf("ERROR: %s: global resource create node=%u\n", __func__,
+ node);
+ return CVMX_FPA3_INVALID_GAURA;
+ }
+
+ if (desired_aura_num >= 0)
+ rv = cvmx_reserve_global_resource_range(tag, owner,
+ desired_aura_num, 1);
+ else
+ rv = cvmx_resource_alloc_reverse(tag, owner);
+
+ if (rv < 0) {
+ printf("ERROR: %s: node=%u desired aura=%d\n", __func__, node,
+ desired_aura_num);
+ return CVMX_FPA3_INVALID_GAURA;
+ }
+
+ aura = __cvmx_fpa3_gaura(node, rv);
+
+ return aura;
+}
+
+int cvmx_fpa3_release_aura(cvmx_fpa3_gaura_t aura)
+{
+ struct global_resource_tag tag = get_fpa3_aura_resource_tag(aura.node);
+ int laura = aura.laura;
+
+ if (!__cvmx_fpa3_aura_valid(aura))
+ return -1;
+
+ return cvmx_free_global_resource_range_multiple(tag, &laura, 1);
+}
+
+/**
+ */
+cvmx_fpa3_pool_t cvmx_fpa3_reserve_pool(int node, int desired_pool_num)
+{
+ u64 owner = cvmx_get_app_id();
+ int rv = 0;
+ struct global_resource_tag tag;
+ cvmx_fpa3_pool_t pool;
+
+ if (node == -1)
+ node = cvmx_get_node_num();
+
+ tag = get_fpa3_pool_resource_tag(node);
+
+ if (cvmx_create_global_resource_range(tag, cvmx_fpa3_num_pools()) !=
+ 0) {
+ printf("ERROR: %s: global resource create node=%u\n", __func__,
+ node);
+ return CVMX_FPA3_INVALID_POOL;
+ }
+
+ if (desired_pool_num >= 0)
+ rv = cvmx_reserve_global_resource_range(tag, owner,
+ desired_pool_num, 1);
+ else
+ rv = cvmx_resource_alloc_reverse(tag, owner);
+
+ if (rv < 0) {
+ /* Desired pool is already in use */
+ return CVMX_FPA3_INVALID_POOL;
+ }
+
+ pool = __cvmx_fpa3_pool(node, rv);
+
+ return pool;
+}
+
+int cvmx_fpa3_release_pool(cvmx_fpa3_pool_t pool)
+{
+ struct global_resource_tag tag = get_fpa3_pool_resource_tag(pool.node);
+ int lpool = pool.lpool;
+
+ if (!__cvmx_fpa3_pool_valid(pool))
+ return -1;
+
+ if (cvmx_create_global_resource_range(tag, cvmx_fpa3_num_pools()) !=
+ 0) {
+ printf("ERROR: %s: global resource create node=%u\n", __func__,
+ pool.node);
+ return -1;
+ }
+
+ return cvmx_free_global_resource_range_multiple(tag, &lpool, 1);
+}
+
+cvmx_fpa1_pool_t cvmx_fpa1_reserve_pool(int desired_pool_num)
+{
+ u64 owner = cvmx_get_app_id();
+ struct global_resource_tag tag;
+ int rv;
+
+ tag = get_fpa1_resource_tag();
+
+ if (cvmx_create_global_resource_range(tag, CVMX_FPA1_NUM_POOLS) != 0) {
+ printf("ERROR: %s: global resource not created\n", __func__);
+ return -1;
+ }
+
+ if (desired_pool_num >= 0) {
+ rv = cvmx_reserve_global_resource_range(tag, owner,
+ desired_pool_num, 1);
+ } else {
+ rv = cvmx_resource_alloc_reverse(tag, owner);
+ }
+
+ if (rv < 0) {
+ printf("ERROR: %s: FPA_POOL %d unavailable\n", __func__,
+ desired_pool_num);
+ return CVMX_RESOURCE_ALREADY_RESERVED;
+ }
+ return (cvmx_fpa1_pool_t)rv;
+}
+
+int cvmx_fpa1_release_pool(cvmx_fpa1_pool_t pool)
+{
+ struct global_resource_tag tag;
+
+ tag = get_fpa1_resource_tag();
+
+ return cvmx_free_global_resource_range_multiple(tag, &pool, 1);
+}
+
+/**
+ * Query if an FPA pool is available for reservation
+ * using global resources
+ * @note This function is no longer in use, and will be removed in a future release
+ */
+int cvmx_fpa1_is_pool_available(cvmx_fpa1_pool_t pool)
+{
+ if (cvmx_fpa1_reserve_pool(pool) == -1)
+ return 0;
+ cvmx_fpa1_release_pool(pool);
+ return 1;
+}
+
+/**
+ * @INTERNAL
+ *
+ * This function is no longer in use, and will be removed in a future release
+ */
+int cvmx_fpa3_is_pool_available(int node, int lpool)
+{
+ cvmx_fpa3_pool_t pool;
+
+ pool = cvmx_fpa3_reserve_pool(node, lpool);
+
+ if (!__cvmx_fpa3_pool_valid(pool))
+ return 0;
+
+ cvmx_fpa3_release_pool(pool);
+ return 1;
+}
+
+/**
+ * @INTERNAL
+ *
+ * This function is no longer in use, and will be removed in a future release
+ */
+int cvmx_fpa3_is_aura_available(int node, int laura)
+{
+ cvmx_fpa3_gaura_t aura;
+
+ aura = cvmx_fpa3_reserve_aura(node, laura);
+
+ if (!__cvmx_fpa3_aura_valid(aura))
+ return 0;
+
+ cvmx_fpa3_release_aura(aura);
+ return 1;
+}
+
+/**
+ * Return if aura/pool is already reserved
+ * @param pool_num - pool to check (aura for o78+)
+ * @return 0 if reserved, 1 if available
+ *
+ * @note This function is no longer in use, and will be removed in a future release
+ */
+int cvmx_fpa_is_pool_available(int pool_num)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_FPA3))
+ return cvmx_fpa3_is_aura_available(0, pool_num);
+ else
+ return cvmx_fpa1_is_pool_available(pool_num);
+}
--
2.35.1
^ permalink raw reply related [flat|nested] 52+ messages in thread* [PATCH 29/52] mips: octeon: Add cvmx-global-resource.c
2022-03-30 10:06 [PATCH 00/52] mips: octeon: Add ethernet support Stefan Roese
` (24 preceding siblings ...)
2022-03-30 10:07 ` [PATCH 28/52] mips: octeon: Add cvmx-fpa-resource.c Stefan Roese
@ 2022-03-30 10:07 ` Stefan Roese
2022-03-30 10:07 ` [PATCH 30/52] mips: octeon: Add cvmx-ilk.c Stefan Roese
` (23 subsequent siblings)
49 siblings, 0 replies; 52+ messages in thread
From: Stefan Roese @ 2022-03-30 10:07 UTC (permalink / raw)
To: u-boot; +Cc: awilliams, cchavva
From: Aaron Williams <awilliams@marvell.com>
Import cvmx-global-resource.c from 2013 U-Boot. It will be used by the later
added drivers to support networking on the MIPS Octeon II / III
platforms.
Signed-off-by: Aaron Williams <awilliams@marvell.com>
Signed-off-by: Stefan Roese <sr@denx.de>
---
arch/mips/mach-octeon/cvmx-global-resources.c | 639 ++++++++++++++++++
1 file changed, 639 insertions(+)
create mode 100644 arch/mips/mach-octeon/cvmx-global-resources.c
diff --git a/arch/mips/mach-octeon/cvmx-global-resources.c b/arch/mips/mach-octeon/cvmx-global-resources.c
new file mode 100644
index 000000000000..14a2ed524714
--- /dev/null
+++ b/arch/mips/mach-octeon/cvmx-global-resources.c
@@ -0,0 +1,639 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ */
+
+#include <log.h>
+#include <time.h>
+#include <linux/delay.h>
+
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-csr.h>
+#include <mach/cvmx-bootmem.h>
+#include <mach/octeon-model.h>
+#include <mach/cvmx-fuse.h>
+#include <mach/octeon-feature.h>
+#include <mach/cvmx-qlm.h>
+#include <mach/octeon_qlm.h>
+#include <mach/cvmx-pcie.h>
+#include <mach/cvmx-coremask.h>
+#include <mach/cvmx-range.h>
+
+#include <mach/cvmx-global-resources.h>
+#include <mach/cvmx-bootmem.h>
+
+#include <mach/cvmx-pki.h>
+#include <mach/cvmx-helper.h>
+#include <mach/cvmx-helper-board.h>
+#include <mach/cvmx-helper-cfg.h>
+
+#define CVMX_MAX_GLOBAL_RESOURCES 128
+#define CVMX_RESOURCES_ENTRIES_SIZE \
+ (sizeof(struct cvmx_global_resource_entry) * CVMX_MAX_GLOBAL_RESOURCES)
+
+/**
+ * This macro returns a member of the data
+ * structure. The argument "field" is the member name of the
+ * structure to read. The return type is a u64.
+ */
+#define CVMX_GLOBAL_RESOURCES_GET_FIELD(field) \
+ __cvmx_struct_get_unsigned_field( \
+ __cvmx_global_resources_addr, \
+ offsetof(struct cvmx_global_resources, field), \
+ SIZEOF_FIELD(struct cvmx_global_resources, field))
+
+/**
+ * This macro writes a member of the struct cvmx_global_resourcest
+ * structure. The argument "field" is the member name of the
+ * struct cvmx_global_resources to write.
+ */
+#define CVMX_GLOBAL_RESOURCES_SET_FIELD(field, value) \
+ __cvmx_struct_set_unsigned_field( \
+ __cvmx_global_resources_addr, \
+ offsetof(struct cvmx_global_resources, field), \
+ SIZEOF_FIELD(struct cvmx_global_resources, field), value)
+
+/**
+ * This macro returns a member of the struct cvmx_global_resource_entry.
+ * The argument "field" is the member name of this structure.
+ * the return type is a u64. The "addr" parameter is the physical
+ * address of the structure.
+ */
+#define CVMX_RESOURCE_ENTRY_GET_FIELD(addr, field) \
+ __cvmx_struct_get_unsigned_field( \
+ addr, offsetof(struct cvmx_global_resource_entry, field), \
+ SIZEOF_FIELD(struct cvmx_global_resource_entry, field))
+
+/**
+ * This macro writes a member of the struct cvmx_global_resource_entry
+ * structure. The argument "field" is the member name of the
+ * struct cvmx_global_resource_entry to write. The "addr" parameter
+ * is the physical address of the structure.
+ */
+#define CVMX_RESOURCE_ENTRY_SET_FIELD(addr, field, value) \
+ __cvmx_struct_set_unsigned_field( \
+ addr, offsetof(struct cvmx_global_resource_entry, field), \
+ SIZEOF_FIELD(struct cvmx_global_resource_entry, field), value)
+
+#define CVMX_GET_RESOURCE_ENTRY(count) \
+ (__cvmx_global_resources_addr + \
+ offsetof(struct cvmx_global_resources, resource_entry) + \
+ (count * sizeof(struct cvmx_global_resource_entry)))
+
+#define CVMX_RESOURCE_TAG_SET_FIELD(addr, field, value) \
+ __cvmx_struct_set_unsigned_field( \
+ addr, offsetof(struct global_resource_tag, field), \
+ SIZEOF_FIELD(struct global_resource_tag, field), value)
+
+#define CVMX_RESOURCE_TAG_GET_FIELD(addr, field) \
+ __cvmx_struct_get_unsigned_field( \
+ addr, offsetof(struct global_resource_tag, field), \
+ SIZEOF_FIELD(struct global_resource_tag, field))
+
+#define MAX_RESOURCE_TAG_LEN 16
+#define CVMX_GLOBAL_RESOURCE_NO_LOCKING (1)
+
+struct cvmx_global_resource_entry {
+ struct global_resource_tag tag;
+ u64 phys_addr;
+ u64 size;
+};
+
+struct cvmx_global_resources {
+ u32 pad;
+ u32 rlock;
+ u64 entry_cnt;
+ struct cvmx_global_resource_entry resource_entry[];
+};
+
+/* Not the right place, putting it here for now */
+int cvmx_enable_helper_flag;
+u64 cvmx_app_id;
+
+/*
+ * Global named memory can be accessed anywhere even in 32-bit mode
+ */
+static u64 __cvmx_global_resources_addr;
+
+/**
+ * This macro returns the size of a member of a structure.
+ */
+#define SIZEOF_FIELD(s, field) sizeof(((s *)NULL)->field)
+
+/**
+ * This function is the implementation of the get macros defined
+ * for individual structure members. The argument are generated
+ * by the macros inorder to read only the needed memory.
+ *
+ * @param base 64bit physical address of the complete structure
+ * @param offset from the beginning of the structure to the member being
+ * accessed.
+ * @param size Size of the structure member.
+ *
+ * @return Value of the structure member promoted into a u64.
+ */
+static inline u64 __cvmx_struct_get_unsigned_field(u64 base, int offset,
+ int size)
+{
+ base = (1ull << 63) | (base + offset);
+ switch (size) {
+ case 4:
+ return cvmx_read64_uint32(base);
+ case 8:
+ return cvmx_read64_uint64(base);
+ default:
+ return 0;
+ }
+}
+
+/**
+ * This function is the implementation of the set macros defined
+ * for individual structure members. The argument are generated
+ * by the macros in order to write only the needed memory.
+ *
+ * @param base 64bit physical address of the complete structure
+ * @param offset from the beginning of the structure to the member being
+ * accessed.
+ * @param size Size of the structure member.
+ * @param value Value to write into the structure
+ */
+static inline void __cvmx_struct_set_unsigned_field(u64 base, int offset,
+ int size, u64 value)
+{
+ base = (1ull << 63) | (base + offset);
+ switch (size) {
+ case 4:
+ cvmx_write64_uint32(base, value);
+ break;
+ case 8:
+ cvmx_write64_uint64(base, value);
+ break;
+ default:
+ break;
+ }
+}
+
+/* Get the global resource lock. */
+static inline void __cvmx_global_resource_lock(void)
+{
+ u64 lock_addr =
+ (1ull << 63) | (__cvmx_global_resources_addr +
+ offsetof(struct cvmx_global_resources, rlock));
+ unsigned int tmp;
+
+ __asm__ __volatile__(".set noreorder\n"
+ "1: ll %[tmp], 0(%[addr])\n"
+ " bnez %[tmp], 1b\n"
+ " li %[tmp], 1\n"
+ " sc %[tmp], 0(%[addr])\n"
+ " beqz %[tmp], 1b\n"
+ " nop\n"
+ ".set reorder\n"
+ : [tmp] "=&r"(tmp)
+ : [addr] "r"(lock_addr)
+ : "memory");
+}
+
+/* Release the global resource lock. */
+static inline void __cvmx_global_resource_unlock(void)
+{
+ u64 lock_addr =
+ (1ull << 63) | (__cvmx_global_resources_addr +
+ offsetof(struct cvmx_global_resources, rlock));
+ CVMX_SYNCW;
+ __asm__ __volatile__("sw $0, 0(%[addr])\n"
+ :
+ : [addr] "r"(lock_addr)
+ : "memory");
+ CVMX_SYNCW;
+}
+
+static u64 __cvmx_alloc_bootmem_for_global_resources(int sz)
+{
+ void *tmp;
+
+ tmp = cvmx_bootmem_alloc_range(sz, CVMX_CACHE_LINE_SIZE, 0, 0);
+ return cvmx_ptr_to_phys(tmp);
+}
+
+static inline void __cvmx_get_tagname(struct global_resource_tag *rtag,
+ char *tagname)
+{
+ int i, j, k;
+
+ j = 0;
+ k = 8;
+ for (i = 7; i >= 0; i--, j++, k++) {
+ tagname[j] = (rtag->lo >> (i * 8)) & 0xff;
+ tagname[k] = (rtag->hi >> (i * 8)) & 0xff;
+ }
+}
+
+static u64 __cvmx_global_resources_init(void)
+{
+ struct cvmx_bootmem_named_block_desc *block_desc;
+ int sz = sizeof(struct cvmx_global_resources) +
+ CVMX_RESOURCES_ENTRIES_SIZE;
+ s64 tmp_phys;
+ int count = 0;
+ u64 base = 0;
+
+ cvmx_bootmem_lock();
+
+ block_desc = (struct cvmx_bootmem_named_block_desc *)
+ __cvmx_bootmem_find_named_block_flags(
+ CVMX_GLOBAL_RESOURCES_DATA_NAME,
+ CVMX_BOOTMEM_FLAG_NO_LOCKING);
+ if (!block_desc) {
+ debug("%s: allocating global resources\n", __func__);
+
+ tmp_phys = cvmx_bootmem_phy_named_block_alloc(
+ sz, 0, 0, CVMX_CACHE_LINE_SIZE,
+ CVMX_GLOBAL_RESOURCES_DATA_NAME,
+ CVMX_BOOTMEM_FLAG_NO_LOCKING);
+ if (tmp_phys < 0) {
+ cvmx_printf(
+ "ERROR: %s: failed to allocate global resource name block. sz=%d\n",
+ __func__, sz);
+ goto end;
+ }
+ __cvmx_global_resources_addr = (u64)tmp_phys;
+
+ debug("%s: memset global resources %llu\n", __func__,
+ CAST_ULL(__cvmx_global_resources_addr));
+
+ base = (1ull << 63) | __cvmx_global_resources_addr;
+ for (count = 0; count < (sz / 8); count++) {
+ cvmx_write64_uint64(base, 0);
+ base += 8;
+ }
+ } else {
+ debug("%s:found global resource\n", __func__);
+ __cvmx_global_resources_addr = block_desc->base_addr;
+ }
+end:
+ cvmx_bootmem_unlock();
+ debug("__cvmx_global_resources_addr=%llu sz=%d\n",
+ CAST_ULL(__cvmx_global_resources_addr), sz);
+ return __cvmx_global_resources_addr;
+}
+
+u64 cvmx_get_global_resource(struct global_resource_tag tag, int no_lock)
+{
+ u64 entry_cnt = 0;
+ u64 resource_entry_addr = 0;
+ int count = 0;
+ u64 rphys_addr = 0;
+ u64 tag_lo = 0, tag_hi = 0;
+
+ if (__cvmx_global_resources_addr == 0)
+ __cvmx_global_resources_init();
+ if (!no_lock)
+ __cvmx_global_resource_lock();
+
+ entry_cnt = CVMX_GLOBAL_RESOURCES_GET_FIELD(entry_cnt);
+ while (entry_cnt > 0) {
+ resource_entry_addr = CVMX_GET_RESOURCE_ENTRY(count);
+ tag_lo = CVMX_RESOURCE_TAG_GET_FIELD(resource_entry_addr, lo);
+ tag_hi = CVMX_RESOURCE_TAG_GET_FIELD(resource_entry_addr, hi);
+
+ if (tag_lo == tag.lo && tag_hi == tag.hi) {
+ debug("%s: Found global resource entry\n", __func__);
+ break;
+ }
+ entry_cnt--;
+ count++;
+ }
+
+ if (entry_cnt == 0) {
+ debug("%s: no matching global resource entry found\n",
+ __func__);
+ if (!no_lock)
+ __cvmx_global_resource_unlock();
+ return 0;
+ }
+ rphys_addr =
+ CVMX_RESOURCE_ENTRY_GET_FIELD(resource_entry_addr, phys_addr);
+ if (!no_lock)
+ __cvmx_global_resource_unlock();
+
+ return rphys_addr;
+}
+
+u64 cvmx_create_global_resource(struct global_resource_tag tag, u64 size,
+ int no_lock, int *_new_)
+{
+ u64 entry_count = 0;
+ u64 resource_entry_addr = 0;
+ u64 phys_addr;
+
+ if (__cvmx_global_resources_addr == 0)
+ __cvmx_global_resources_init();
+
+ if (!no_lock)
+ __cvmx_global_resource_lock();
+
+ phys_addr =
+ cvmx_get_global_resource(tag, CVMX_GLOBAL_RESOURCE_NO_LOCKING);
+ if (phys_addr != 0) {
+ /* we already have the resource, return it */
+ *_new_ = 0;
+ goto end;
+ }
+
+ *_new_ = 1;
+ entry_count = CVMX_GLOBAL_RESOURCES_GET_FIELD(entry_cnt);
+ if (entry_count >= CVMX_MAX_GLOBAL_RESOURCES) {
+ char tagname[MAX_RESOURCE_TAG_LEN + 1];
+
+ __cvmx_get_tagname(&tag, tagname);
+ cvmx_printf(
+ "ERROR: %s: reached global resources limit for %s\n",
+ __func__, tagname);
+ phys_addr = 0;
+ goto end;
+ }
+
+ /* Allocate bootmem for the resource*/
+ phys_addr = __cvmx_alloc_bootmem_for_global_resources(size);
+ if (!phys_addr) {
+ char tagname[MAX_RESOURCE_TAG_LEN + 1];
+
+ __cvmx_get_tagname(&tag, tagname);
+ debug("ERROR: %s: out of memory %s, size=%d\n", __func__,
+ tagname, (int)size);
+ goto end;
+ }
+
+ resource_entry_addr = CVMX_GET_RESOURCE_ENTRY(entry_count);
+ CVMX_RESOURCE_ENTRY_SET_FIELD(resource_entry_addr, phys_addr,
+ phys_addr);
+ CVMX_RESOURCE_ENTRY_SET_FIELD(resource_entry_addr, size, size);
+ CVMX_RESOURCE_TAG_SET_FIELD(resource_entry_addr, lo, tag.lo);
+ CVMX_RESOURCE_TAG_SET_FIELD(resource_entry_addr, hi, tag.hi);
+ /* update entry_cnt */
+ entry_count += 1;
+ CVMX_GLOBAL_RESOURCES_SET_FIELD(entry_cnt, entry_count);
+
+end:
+ if (!no_lock)
+ __cvmx_global_resource_unlock();
+
+ return phys_addr;
+}
+
+int cvmx_create_global_resource_range(struct global_resource_tag tag,
+ int nelements)
+{
+ int sz = cvmx_range_memory_size(nelements);
+ int _new_;
+ u64 addr;
+ int rv = 0;
+
+ if (__cvmx_global_resources_addr == 0)
+ __cvmx_global_resources_init();
+
+ __cvmx_global_resource_lock();
+ addr = cvmx_create_global_resource(tag, sz, 1, &_new_);
+ if (!addr) {
+ __cvmx_global_resource_unlock();
+ return -1;
+ }
+ if (_new_)
+ rv = cvmx_range_init(addr, nelements);
+ __cvmx_global_resource_unlock();
+ return rv;
+}
+
+int cvmx_allocate_global_resource_range(struct global_resource_tag tag,
+ u64 owner, int nelements, int alignment)
+{
+ u64 addr = cvmx_get_global_resource(tag, 1);
+ int base;
+
+ if (addr == 0) {
+ char tagname[256];
+
+ __cvmx_get_tagname(&tag, tagname);
+ cvmx_printf("ERROR: %s: cannot find resource %s\n", __func__,
+ tagname);
+ return -1;
+ }
+ __cvmx_global_resource_lock();
+ base = cvmx_range_alloc(addr, owner, nelements, alignment);
+ __cvmx_global_resource_unlock();
+ return base;
+}
+
+int cvmx_resource_alloc_many(struct global_resource_tag tag, u64 owner,
+ int nelements, int allocated_elements[])
+{
+ u64 addr = cvmx_get_global_resource(tag, 1);
+ int rv;
+
+ if (addr == 0) {
+ char tagname[256];
+
+ __cvmx_get_tagname(&tag, tagname);
+ debug("ERROR: cannot find resource %s\n", tagname);
+ return -1;
+ }
+ __cvmx_global_resource_lock();
+ rv = cvmx_range_alloc_non_contiguos(addr, owner, nelements,
+ allocated_elements);
+ __cvmx_global_resource_unlock();
+ return rv;
+}
+
+int cvmx_resource_alloc_reverse(struct global_resource_tag tag, u64 owner)
+{
+ u64 addr = cvmx_get_global_resource(tag, 1);
+ int rv;
+
+ if (addr == 0) {
+ char tagname[256];
+
+ __cvmx_get_tagname(&tag, tagname);
+ debug("ERROR: cannot find resource %s\n", tagname);
+ return -1;
+ }
+ __cvmx_global_resource_lock();
+ rv = cvmx_range_alloc_ordered(addr, owner, 1, 1, 1);
+ __cvmx_global_resource_unlock();
+ return rv;
+}
+
+int cvmx_reserve_global_resource_range(struct global_resource_tag tag,
+ u64 owner, int base, int nelements)
+{
+ u64 addr = cvmx_get_global_resource(tag, 1);
+ int start;
+
+ __cvmx_global_resource_lock();
+ start = cvmx_range_reserve(addr, owner, base, nelements);
+ __cvmx_global_resource_unlock();
+ return start;
+}
+
+int cvmx_free_global_resource_range_with_base(struct global_resource_tag tag,
+ int base, int nelements)
+{
+ u64 addr = cvmx_get_global_resource(tag, 1);
+ int rv;
+
+ /* Resource was not created, nothing to release */
+ if (addr == 0)
+ return 0;
+
+ __cvmx_global_resource_lock();
+ rv = cvmx_range_free_with_base(addr, base, nelements);
+ __cvmx_global_resource_unlock();
+ return rv;
+}
+
+int cvmx_free_global_resource_range_multiple(struct global_resource_tag tag,
+ int bases[], int nelements)
+{
+ u64 addr = cvmx_get_global_resource(tag, 1);
+ int rv;
+
+ /* Resource was not created, nothing to release */
+ if (addr == 0)
+ return 0;
+
+ __cvmx_global_resource_lock();
+ rv = cvmx_range_free_mutiple(addr, bases, nelements);
+ __cvmx_global_resource_unlock();
+ return rv;
+}
+
+int cvmx_free_global_resource_range_with_owner(struct global_resource_tag tag,
+ int owner)
+{
+ u64 addr = cvmx_get_global_resource(tag, 1);
+ int rv;
+
+ /* Resource was not created, nothing to release */
+ if (addr == 0)
+ return 0;
+
+ __cvmx_global_resource_lock();
+ rv = cvmx_range_free_with_owner(addr, owner);
+ __cvmx_global_resource_unlock();
+ return rv;
+}
+
+void cvmx_show_global_resource_range(struct global_resource_tag tag)
+{
+ u64 addr = cvmx_get_global_resource(tag, 1);
+
+ cvmx_range_show(addr);
+}
+
+int free_global_resources(void)
+{
+ int rc;
+ int i, entry_cnt;
+ u64 resource_entry_addr, phys_addr, size;
+
+ if (__cvmx_global_resources_addr == 0)
+ __cvmx_global_resources_init();
+
+ __cvmx_global_resource_lock();
+
+ entry_cnt = CVMX_GLOBAL_RESOURCES_GET_FIELD(entry_cnt);
+
+ /* get and free all the global resources */
+ for (i = 0; i < entry_cnt; i++) {
+ resource_entry_addr = CVMX_GET_RESOURCE_ENTRY(i);
+ phys_addr = CVMX_RESOURCE_ENTRY_GET_FIELD(resource_entry_addr,
+ phys_addr);
+ size = CVMX_RESOURCE_ENTRY_GET_FIELD(resource_entry_addr, size);
+ /* free the resource */
+ rc = __cvmx_bootmem_phy_free(phys_addr, size, 0);
+ if (!rc) {
+ debug("ERROR: %s: could not free memory to bootmem\n",
+ __func__);
+ }
+ }
+
+ __cvmx_global_resource_unlock();
+
+ rc = cvmx_bootmem_free_named(CVMX_GLOBAL_RESOURCES_DATA_NAME);
+ debug("freed global resources named block rc=%d\n", rc);
+
+ __cvmx_global_resources_addr = 0;
+
+ return 0;
+}
+
+u64 cvmx_get_global_resource_owner(struct global_resource_tag tag, int base)
+{
+ u64 addr = cvmx_get_global_resource(tag, 1);
+
+ /* Resource was not created, return "available" special owner code */
+ if (addr == 0)
+ return -88LL;
+
+ return cvmx_range_get_owner(addr, base);
+}
+
+void cvmx_global_resources_show(void)
+{
+ u64 entry_cnt;
+ u64 p;
+ char tagname[MAX_RESOURCE_TAG_LEN + 1];
+ struct global_resource_tag rtag;
+ u64 count;
+ u64 phys_addr;
+
+ if (__cvmx_global_resources_addr == 0)
+ __cvmx_global_resources_init();
+
+ __cvmx_global_resource_lock();
+
+ entry_cnt = CVMX_GLOBAL_RESOURCES_GET_FIELD(entry_cnt);
+ memset(tagname, 0, MAX_RESOURCE_TAG_LEN + 1);
+
+ for (count = 0; count < entry_cnt; count++) {
+ p = CVMX_GET_RESOURCE_ENTRY(count);
+ phys_addr = CVMX_RESOURCE_ENTRY_GET_FIELD(p, phys_addr);
+ rtag.lo = CVMX_RESOURCE_TAG_GET_FIELD(p, lo);
+ rtag.hi = CVMX_RESOURCE_TAG_GET_FIELD(p, hi);
+ __cvmx_get_tagname(&rtag, tagname);
+ debug("Global Resource tag name: %s Resource Address: %llx\n",
+ tagname, CAST_ULL(phys_addr));
+ }
+ debug("<End of Global Resources>\n");
+ __cvmx_global_resource_unlock();
+}
+
+void cvmx_app_id_init(void *bootmem)
+{
+ u64 *p = (u64 *)bootmem;
+
+ *p = 0;
+}
+
+u64 cvmx_allocate_app_id(void)
+{
+ u64 *vptr;
+
+ vptr = (u64 *)cvmx_bootmem_alloc_named_range_once(sizeof(cvmx_app_id),
+ 0, 1 << 31, 128,
+ "cvmx_app_id",
+ cvmx_app_id_init);
+
+ cvmx_app_id = __atomic_add_fetch(vptr, 1, __ATOMIC_SEQ_CST);
+
+ debug("CVMX_APP_ID = %lx\n", (unsigned long)cvmx_app_id);
+ return cvmx_app_id;
+}
+
+u64 cvmx_get_app_id(void)
+{
+ if (cvmx_app_id == 0)
+ cvmx_allocate_app_id();
+ return cvmx_app_id;
+}
--
2.35.1
^ permalink raw reply related [flat|nested] 52+ messages in thread* [PATCH 30/52] mips: octeon: Add cvmx-ilk.c
2022-03-30 10:06 [PATCH 00/52] mips: octeon: Add ethernet support Stefan Roese
` (25 preceding siblings ...)
2022-03-30 10:07 ` [PATCH 29/52] mips: octeon: Add cvmx-global-resource.c Stefan Roese
@ 2022-03-30 10:07 ` Stefan Roese
2022-03-30 10:07 ` [PATCH 31/52] mips: octeon: Add cvmx-ipd.c Stefan Roese
` (22 subsequent siblings)
49 siblings, 0 replies; 52+ messages in thread
From: Stefan Roese @ 2022-03-30 10:07 UTC (permalink / raw)
To: u-boot; +Cc: awilliams, cchavva
From: Aaron Williams <awilliams@marvell.com>
Import cvmx-ilk.c from 2013 U-Boot. It will be used by the later
added drivers to support networking on the MIPS Octeon II / III
platforms.
Signed-off-by: Aaron Williams <awilliams@marvell.com>
Signed-off-by: Stefan Roese <sr@denx.de>
---
arch/mips/mach-octeon/cvmx-ilk.c | 1618 ++++++++++++++++++++++++++++++
1 file changed, 1618 insertions(+)
create mode 100644 arch/mips/mach-octeon/cvmx-ilk.c
diff --git a/arch/mips/mach-octeon/cvmx-ilk.c b/arch/mips/mach-octeon/cvmx-ilk.c
new file mode 100644
index 000000000000..e9d0cfc2413f
--- /dev/null
+++ b/arch/mips/mach-octeon/cvmx-ilk.c
@@ -0,0 +1,1618 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ * Support library for the ILK
+ */
+
+#include <time.h>
+#include <log.h>
+#include <linux/delay.h>
+
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-csr.h>
+#include <mach/cvmx-bootmem.h>
+#include <mach/octeon-model.h>
+#include <mach/cvmx-fuse.h>
+#include <mach/octeon-feature.h>
+#include <mach/cvmx-qlm.h>
+#include <mach/octeon_qlm.h>
+#include <mach/cvmx-pcie.h>
+#include <mach/cvmx-coremask.h>
+
+#include <mach/cvmx-agl-defs.h>
+#include <mach/cvmx-bgxx-defs.h>
+#include <mach/cvmx-ciu-defs.h>
+#include <mach/cvmx-gmxx-defs.h>
+#include <mach/cvmx-gserx-defs.h>
+#include <mach/cvmx-ilk-defs.h>
+#include <mach/cvmx-ipd-defs.h>
+#include <mach/cvmx-pcsx-defs.h>
+#include <mach/cvmx-pcsxx-defs.h>
+#include <mach/cvmx-pki-defs.h>
+#include <mach/cvmx-pko-defs.h>
+#include <mach/cvmx-xcv-defs.h>
+
+#include <mach/cvmx-hwpko.h>
+#include <mach/cvmx-ilk.h>
+#include <mach/cvmx-pki.h>
+
+#include <mach/cvmx-helper.h>
+#include <mach/cvmx-helper-board.h>
+#include <mach/cvmx-helper-cfg.h>
+
+/*
+ * global configurations.
+ *
+ * for cn68, the default is {0xf, 0xf0}. to disable the 2nd ILK, set
+ * cvmx_ilk_lane_mask[CVMX_NUM_ILK_INTF] = {0xff, 0x0} and
+ * cvmx_ilk_chans[CVMX_NUM_ILK_INTF] = {8, 0}
+ */
+unsigned short cvmx_ilk_lane_mask[CVMX_MAX_NODES][CVMX_NUM_ILK_INTF] = {
+ [0 ... CVMX_MAX_NODES - 1] = { 0x000f, 0x00f0 }
+};
+
+int cvmx_ilk_chans[CVMX_MAX_NODES][CVMX_NUM_ILK_INTF] = {
+ [0 ... CVMX_MAX_NODES - 1] = { 8, 8 }
+};
+
+static cvmx_ilk_intf_t cvmx_ilk_intf_cfg[CVMX_MAX_NODES][CVMX_NUM_ILK_INTF];
+
+cvmx_ilk_LA_mode_t cvmx_ilk_LA_mode[CVMX_NUM_ILK_INTF] = { { 0, 0 }, { 0, 0 } };
+/**
+ * User-overrideable callback function that returns whether or not an interface
+ * should use look-aside mode.
+ *
+ * @param interface - interface being checked
+ * @param channel - channel number, can be 0 or 1 or -1 to see if LA mode
+ * should be enabled for the interface.
+ * @return 0 to not use LA-mode, 1 to use LA-mode.
+ */
+int cvmx_ilk_use_la_mode(int interface, int channel)
+{
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX_PASS1_0))
+ return 0;
+
+ if (interface >= CVMX_NUM_ILK_INTF) {
+ debug("ERROR: invalid interface=%d in %s\n",
+ interface, __func__);
+ return -1;
+ }
+ return cvmx_ilk_LA_mode[interface].ilk_LA_mode;
+}
+
+/**
+ * User-overrideable callback function that returns whether or not an interface
+ * in look-aside mode should enable the RX calendar.
+ *
+ * @param interface - interface to check
+ * @return 1 to enable RX calendar, 0 to disable RX calendar.
+ *
+ * NOTE: For the CN68XX pass 2.0 this will enable the RX calendar for interface
+ * 0 and not interface 1. It is up to the customer to override this behavior.
+ */
+int cvmx_ilk_la_mode_enable_rx_calendar(int interface)
+{
+ /* There is an errata in the CN68XX pass 2.0 where if connected
+ * in a loopback configuration or back to back then only one interface
+ * can have the RX calendar enabled.
+ */
+ if (interface >= CVMX_NUM_ILK_INTF) {
+ debug("ERROR: invalid interface=%d in %s\n",
+ interface, __func__);
+ return -1;
+ }
+ return cvmx_ilk_LA_mode[interface].ilk_LA_mode_cal_ena;
+}
+
+/**
+ * Initialize and start the ILK interface.
+ *
+ * @param interface The identifier of the packet interface to configure and
+ * use as a ILK interface. cn68xx has 2 interfaces: ilk0 and
+ * ilk1.
+ *
+ * @param lane_mask the lane group for this interface
+ *
+ * @return Zero on success, negative on failure.
+ */
+int cvmx_ilk_start_interface(int interface, unsigned short lane_mask)
+{
+ int res = -1;
+ int other_intf, this_qlm, other_qlm;
+ unsigned short uni_mask;
+ cvmx_ilk_txx_cfg0_t ilk_txx_cfg0;
+ cvmx_ilk_rxx_cfg0_t ilk_rxx_cfg0;
+ cvmx_ilk_ser_cfg_t ilk_ser_cfg;
+ int node = (interface >> 4) & 0xf;
+
+ interface &= 0xf;
+
+ if (!octeon_has_feature(OCTEON_FEATURE_ILK))
+ return res;
+
+ if (interface >= CVMX_NUM_ILK_INTF)
+ return res;
+
+ if (lane_mask == 0)
+ return res;
+
+ /* check conflicts between 2 ilk interfaces. 1 lane can be assigned to 1
+ * interface only
+ */
+ other_intf = !interface;
+ if (cvmx_ilk_lane_mask[node][other_intf] & lane_mask) {
+ debug("ILK%d:%d: %s: lane assignment conflict\n", node,
+ interface, __func__);
+ return res;
+ }
+
+ /* check the legality of the lane mask. interface 0 can have 8 lanes,
+ * while interface 1 can have 4 lanes at most
+ */
+ uni_mask = lane_mask >> (interface * 4);
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+ cvmx_mio_qlmx_cfg_t mio_qlmx_cfg, other_mio_qlmx_cfg;
+
+ if ((uni_mask != 0x1 && uni_mask != 0x3 && uni_mask != 0xf &&
+ uni_mask != 0xff) ||
+ (interface == 1 && lane_mask > 0xf0)) {
+ debug("ILK%d: %s: incorrect lane mask: 0x%x\n",
+ interface, __func__, uni_mask);
+ return res;
+ }
+ /* check the availability of qlms. qlm_cfg = 001 means the chip
+ * is fused to give this qlm to ilk
+ */
+ this_qlm = interface + CVMX_ILK_QLM_BASE();
+ other_qlm = other_intf + CVMX_ILK_QLM_BASE();
+ mio_qlmx_cfg.u64 = csr_rd(CVMX_MIO_QLMX_CFG(this_qlm));
+ other_mio_qlmx_cfg.u64 = csr_rd(CVMX_MIO_QLMX_CFG(other_qlm));
+ if (mio_qlmx_cfg.s.qlm_cfg != 1 ||
+ (uni_mask == 0xff && other_mio_qlmx_cfg.s.qlm_cfg != 1)) {
+ debug("ILK%d: %s: qlm unavailable\n", interface,
+ __func__);
+ return res;
+ }
+ /* Has 8 lanes */
+ lane_mask &= 0xff;
+ }
+
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
+ int qlm;
+ unsigned short lane_mask_all = 0;
+
+ /* QLM 4 - QLM 7 can be configured for ILK. Get the lane mask
+ * of all the qlms that are configured for ilk
+ */
+ for (qlm = 4; qlm < 8; qlm++) {
+ cvmx_gserx_cfg_t gserx_cfg;
+ cvmx_gserx_phy_ctl_t phy_ctl;
+
+ /* Make sure QLM is powered and out of reset */
+ phy_ctl.u64 =
+ csr_rd_node(node, CVMX_GSERX_PHY_CTL(qlm));
+ if (phy_ctl.s.phy_pd || phy_ctl.s.phy_reset)
+ continue;
+
+ /* Make sure QLM is in ILK mode */
+ gserx_cfg.u64 = csr_rd_node(node, CVMX_GSERX_CFG(qlm));
+ if (gserx_cfg.s.ila)
+ lane_mask_all |= ((1 << 4) - 1)
+ << (4 * (qlm - 4));
+ }
+
+ if ((lane_mask_all & lane_mask) != lane_mask) {
+ debug("ILK%d: %s: incorrect lane mask: 0x%x\n",
+ interface, __func__, lane_mask);
+ return res;
+ }
+ }
+
+ /* power up the serdes */
+ ilk_ser_cfg.u64 = csr_rd_node(node, CVMX_ILK_SER_CFG);
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+ if (ilk_ser_cfg.cn68xx.ser_pwrup == 0) {
+ ilk_ser_cfg.cn68xx.ser_rxpol_auto = 1;
+ ilk_ser_cfg.cn68xx.ser_rxpol = 0;
+ ilk_ser_cfg.cn68xx.ser_txpol = 0;
+ ilk_ser_cfg.cn68xx.ser_reset_n = 0xff;
+ ilk_ser_cfg.cn68xx.ser_haul = 0;
+ }
+ ilk_ser_cfg.cn68xx.ser_pwrup |=
+ ((interface == 0) && (lane_mask > 0xf)) ?
+ 0x3 :
+ (1 << interface);
+ }
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
+ ilk_ser_cfg.cn78xx.ser_rxpol_auto = 1;
+ ilk_ser_cfg.cn78xx.ser_rxpol = 0;
+ ilk_ser_cfg.cn78xx.ser_txpol = 0;
+ ilk_ser_cfg.cn78xx.ser_reset_n = 0xffff;
+ }
+ csr_wr_node(node, CVMX_ILK_SER_CFG, ilk_ser_cfg.u64);
+
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX_PASS2_X)) {
+ /* Workaround for Errata (G-16467) */
+ int qlm = (interface) ? 2 : 1;
+ int start_qlm, end_qlm;
+
+ /* Apply the workaround to both the QLMs if configured for x8 lanes */
+ if (cvmx_pop(lane_mask) > 4) {
+ start_qlm = 1;
+ end_qlm = 2;
+ } else {
+ start_qlm = qlm;
+ end_qlm = qlm;
+ }
+
+ for (qlm = start_qlm; qlm <= end_qlm; qlm++) {
+#ifdef CVMX_QLM_DUMP_STATE
+ debug("%s:%d: ILK%d: Applying workaround for Errata G-16467\n",
+ __func__, __LINE__, qlm);
+ cvmx_qlm_display_registers(qlm);
+ debug("\n");
+#endif
+ /* This workaround only applies to QLMs running ILK at 6.25Ghz */
+ if ((cvmx_qlm_get_gbaud_mhz(qlm) == 6250) &&
+ (cvmx_qlm_jtag_get(qlm, 0, "clkf_byp") != 20)) {
+ udelay(100); /* Wait 100us for links to stabalize */
+ cvmx_qlm_jtag_set(qlm, -1, "clkf_byp", 20);
+ /* Allow the QLM to exit reset */
+ cvmx_qlm_jtag_set(qlm, -1, "cfg_rst_n_clr", 0);
+ udelay(100); /* Wait 100us for links to stabalize */
+ /* Allow TX on QLM */
+ cvmx_qlm_jtag_set(qlm, -1, "cfg_tx_idle_set",
+ 0);
+ }
+#ifdef CVMX_QLM_DUMP_STATE
+ debug("%s:%d: ILK%d: Done applying workaround for Errata G-16467\n",
+ __func__, __LINE__, qlm);
+ cvmx_qlm_display_registers(qlm);
+ debug("\n\n");
+#endif
+ }
+ }
+
+ /* Initialize all calendar entries to xoff state */
+ __cvmx_ilk_clear_cal((node << 4) | interface);
+
+ /* Enable ILK LA mode if configured. */
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+ if (cvmx_ilk_use_la_mode(interface, 0)) {
+ cvmx_ilk_txx_cfg1_t ilk_txx_cfg1;
+ cvmx_ilk_rxx_cfg1_t ilk_rxx_cfg1;
+
+ ilk_txx_cfg1.u64 = csr_rd(CVMX_ILK_TXX_CFG1(interface));
+ ilk_rxx_cfg1.u64 = csr_rd(CVMX_ILK_RXX_CFG1(interface));
+ ilk_txx_cfg1.s.la_mode = 1;
+ ilk_txx_cfg1.s.tx_link_fc_jam = 1;
+ ilk_txx_cfg1.s.rx_link_fc_ign = 1;
+ ilk_rxx_cfg1.s.la_mode = 1;
+ csr_wr(CVMX_ILK_TXX_CFG1(interface), ilk_txx_cfg1.u64);
+ csr_wr(CVMX_ILK_RXX_CFG1(interface), ilk_rxx_cfg1.u64);
+ cvmx_ilk_intf_cfg[node][interface].la_mode =
+ 1; /* Enable look-aside mode */
+ } else {
+ cvmx_ilk_intf_cfg[node][interface].la_mode =
+ 0; /* Disable look-aside mode */
+ }
+ }
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX))
+ cvmx_ilk_intf_cfg[node][interface].la_mode = 0;
+
+ /* configure the lane enable of the interface */
+ ilk_txx_cfg0.u64 = csr_rd_node(node, CVMX_ILK_TXX_CFG0(interface));
+ ilk_rxx_cfg0.u64 = csr_rd_node(node, CVMX_ILK_RXX_CFG0(interface));
+ ilk_rxx_cfg0.s.lane_ena = lane_mask;
+ ilk_txx_cfg0.s.lane_ena = lane_mask;
+ csr_wr_node(node, CVMX_ILK_TXX_CFG0(interface), ilk_txx_cfg0.u64);
+ csr_wr_node(node, CVMX_ILK_RXX_CFG0(interface), ilk_rxx_cfg0.u64);
+
+ /* For 10.3125Gbs data rate, set SER_LIMIT to 0x3ff for x8 & x12 mode */
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
+ cvmx_gserx_lane_mode_t lmode0, lmode1;
+
+ lmode0.u64 = csr_rd_node(node, CVMX_GSERX_LANE_MODE(5));
+ lmode1.u64 = csr_rd_node(node, CVMX_GSERX_LANE_MODE(7));
+ if ((lmode0.s.lmode == 5 || lmode1.s.lmode == 5) &&
+ (lane_mask == 0xfff || lane_mask == 0xfff0 ||
+ lane_mask == 0xff || lane_mask == 0xff00)) {
+ cvmx_ilk_txx_cfg1_t ilk_txx_cfg1;
+
+ ilk_txx_cfg1.u64 =
+ csr_rd_node(node, CVMX_ILK_TXX_CFG1(interface));
+ ilk_txx_cfg1.s.ser_limit = 0x3ff;
+ csr_wr_node(node, CVMX_ILK_TXX_CFG1(interface),
+ ilk_txx_cfg1.u64);
+ }
+ }
+
+ /* write to local cache. for lane speed, if interface 0 has 8 lanes,
+ * assume both qlms have the same speed
+ */
+ cvmx_ilk_intf_cfg[node][interface].intf_en = 1;
+ res = 0;
+
+ return res;
+}
+
+/**
+ * set pipe group base and length for the interface
+ *
+ * @param xiface The identifier of the packet interface to configure and
+ * use as a ILK interface. cn68xx has 2 interfaces: ilk0 and
+ * ilk1.
+ *
+ * @param pipe_base the base of the pipe group
+ * @param pipe_len the length of the pipe group
+ *
+ * @return Zero on success, negative on failure.
+ */
+int cvmx_ilk_set_pipe(int xiface, int pipe_base, unsigned int pipe_len)
+{
+ int res = -1;
+ cvmx_ilk_txx_pipe_t ilk_txx_pipe;
+ struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+ int interface = xi.interface - CVMX_ILK_GBL_BASE();
+
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ return res;
+
+ if (interface >= CVMX_NUM_ILK_INTF)
+ return res;
+
+ /* set them in ilk tx section */
+ ilk_txx_pipe.u64 = csr_rd_node(xi.node, CVMX_ILK_TXX_PIPE(interface));
+ ilk_txx_pipe.s.base = pipe_base;
+ ilk_txx_pipe.s.nump = pipe_len;
+ csr_wr_node(xi.node, CVMX_ILK_TXX_PIPE(interface), ilk_txx_pipe.u64);
+ res = 0;
+
+ return res;
+}
+
+/**
+ * set logical channels for tx
+ *
+ * @param interface The identifier of the packet interface to configure and
+ * use as a ILK interface. cn68xx has 2 interfaces: ilk0 and
+ * ilk1.
+ *
+ * @param pch pointer to an array of pipe-channel pair
+ * @param num_chs the number of entries in the pipe-channel array
+ *
+ * @return Zero on success, negative on failure.
+ */
+int cvmx_ilk_tx_set_channel(int interface, cvmx_ilk_pipe_chan_t *pch,
+ unsigned int num_chs)
+{
+ int res = -1;
+ cvmx_ilk_txx_idx_pmap_t ilk_txx_idx_pmap;
+ cvmx_ilk_txx_mem_pmap_t ilk_txx_mem_pmap;
+ unsigned int i;
+
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ return res;
+
+ if (interface >= CVMX_NUM_ILK_INTF)
+ return res;
+
+ if (!pch || num_chs > CVMX_ILK_MAX_PIPES)
+ return res;
+
+ if (cvmx_ilk_use_la_mode(interface, 0)) {
+ ilk_txx_idx_pmap.u64 = 0;
+ ilk_txx_mem_pmap.u64 = 0;
+ for (i = 0; i < num_chs; i++) {
+ ilk_txx_idx_pmap.s.index = pch->pipe;
+ ilk_txx_mem_pmap.s.channel = pch->chan;
+ ilk_txx_mem_pmap.s.remap = 1;
+ csr_wr(CVMX_ILK_TXX_IDX_PMAP(interface),
+ ilk_txx_idx_pmap.u64);
+ csr_wr(CVMX_ILK_TXX_MEM_PMAP(interface),
+ ilk_txx_mem_pmap.u64);
+ pch++;
+ }
+ } else {
+ /* write the pair to ilk tx */
+ ilk_txx_mem_pmap.u64 = 0;
+ ilk_txx_idx_pmap.u64 = 0;
+ for (i = 0; i < num_chs; i++) {
+ ilk_txx_idx_pmap.s.index = pch->pipe;
+ ilk_txx_mem_pmap.s.channel = pch->chan;
+ csr_wr(CVMX_ILK_TXX_IDX_PMAP(interface),
+ ilk_txx_idx_pmap.u64);
+ csr_wr(CVMX_ILK_TXX_MEM_PMAP(interface),
+ ilk_txx_mem_pmap.u64);
+ pch++;
+ }
+ }
+ res = 0;
+
+ return res;
+}
+
+/**
+ * set pkind for rx
+ *
+ * @param xiface The identifier of the packet interface to configure and
+ * use as a ILK interface. cn68xx has 2 interfaces: ilk0 and
+ * ilk1.
+ *
+ * @param chpknd pointer to an array of channel-pkind pair
+ * @param num_pknd the number of entries in the channel-pkind array
+ *
+ * @return Zero on success, negative on failure.
+ */
+int cvmx_ilk_rx_set_pknd(int xiface, cvmx_ilk_chan_pknd_t *chpknd,
+ unsigned int num_pknd)
+{
+ int res = -1;
+ cvmx_ilk_rxf_idx_pmap_t ilk_rxf_idx_pmap;
+ unsigned int i;
+ struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+ int interface = xi.interface - CVMX_ILK_GBL_BASE();
+
+ if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+ return res;
+
+ if (interface >= CVMX_NUM_ILK_INTF)
+ return res;
+
+ if (!chpknd || num_pknd > CVMX_ILK_MAX_PKNDS)
+ return res;
+
+ res = 0;
+
+ for (i = 0; i < num_pknd; i++) {
+ ilk_rxf_idx_pmap.u64 = 0;
+ /* write the pair to ilk rx. note the channels for different
+ * interfaces are given in *chpknd and interface is not used
+ * as a param
+ */
+ if (chpknd->chan < 2 &&
+ cvmx_ilk_use_la_mode(interface, chpknd->chan)) {
+ ilk_rxf_idx_pmap.s.index =
+ interface * 256 + 128 + chpknd->chan;
+ csr_wr(CVMX_ILK_RXF_IDX_PMAP, ilk_rxf_idx_pmap.u64);
+ csr_wr(CVMX_ILK_RXF_MEM_PMAP, chpknd->pknd);
+ }
+ ilk_rxf_idx_pmap.s.index = interface * 256 + chpknd->chan;
+ csr_wr(CVMX_ILK_RXF_IDX_PMAP, ilk_rxf_idx_pmap.u64);
+ csr_wr(CVMX_ILK_RXF_MEM_PMAP, chpknd->pknd);
+ chpknd++;
+ }
+
+ return res;
+}
+
+/**
+ * configure calendar for rx
+ *
+ * @param intf The identifier of the packet interface to configure and
+ * use as a ILK interface. cn68xx has 2 interfaces: ilk0 and
+ * ilk1.
+ *
+ * @param cal_depth the number of calendar entries
+ * @param pent pointer to calendar entries
+ *
+ * @return Zero on success, negative on failure.
+ */
+int cvmx_ilk_rx_cal_conf(int intf, int cal_depth, cvmx_ilk_cal_entry_t *pent)
+{
+ int res = -1, i;
+ cvmx_ilk_rxx_cfg0_t ilk_rxx_cfg0;
+ int num_entries;
+ int node = (intf >> 4) & 0xf;
+ int interface = intf & 0xf;
+
+ if (!octeon_has_feature(OCTEON_FEATURE_ILK))
+ return res;
+
+ if (interface >= CVMX_NUM_ILK_INTF)
+ return res;
+
+ if (cal_depth < CVMX_ILK_RX_MIN_CAL || cal_depth > CVMX_ILK_MAX_CAL ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && !pent))
+ return res;
+
+ /* mandatory link-level fc as workarounds for ILK-15397 and
+ * ILK-15479
+ */
+ /* TODO: test effectiveness */
+
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+ /* Update the calendar for each channel */
+ if ((cvmx_ilk_use_la_mode(interface, 0) == 0) ||
+ (cvmx_ilk_use_la_mode(interface, 0) &&
+ cvmx_ilk_la_mode_enable_rx_calendar(interface))) {
+ for (i = 0; i < cal_depth; i++) {
+ __cvmx_ilk_write_rx_cal_entry(
+ interface, i, pent[i].pipe_bpid);
+ }
+ }
+
+ /* Update the depth */
+ ilk_rxx_cfg0.u64 = csr_rd(CVMX_ILK_RXX_CFG0(interface));
+ num_entries = 1 + cal_depth + (cal_depth - 1) / 15;
+ ilk_rxx_cfg0.s.cal_depth = num_entries;
+ if (cvmx_ilk_use_la_mode(interface, 0)) {
+ ilk_rxx_cfg0.s.mproto_ign = 1;
+ ilk_rxx_cfg0.s.lnk_stats_ena = 1;
+ ilk_rxx_cfg0.s.lnk_stats_wrap = 1;
+ }
+ csr_wr(CVMX_ILK_RXX_CFG0(interface), ilk_rxx_cfg0.u64);
+ }
+
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
+ ilk_rxx_cfg0.u64 =
+ csr_rd_node(node, CVMX_ILK_RXX_CFG0(interface));
+ /*
+ * Make sure cal_ena is 0 for programming the calendar table,
+ * as per Errata ILK-19398
+ */
+ ilk_rxx_cfg0.s.cal_ena = 0;
+ csr_wr_node(node, CVMX_ILK_RXX_CFG0(interface),
+ ilk_rxx_cfg0.u64);
+
+ for (i = 0; i < cal_depth; i++)
+ __cvmx_ilk_write_rx_cal_entry(intf, i, 0);
+
+ ilk_rxx_cfg0.u64 =
+ csr_rd_node(node, CVMX_ILK_RXX_CFG0(interface));
+ num_entries = 1 + cal_depth + (cal_depth - 1) / 15;
+ ilk_rxx_cfg0.s.cal_depth = num_entries;
+ csr_wr_node(node, CVMX_ILK_RXX_CFG0(interface),
+ ilk_rxx_cfg0.u64);
+ }
+
+ return 0;
+}
+
+/**
+ * set high water mark for rx
+ *
+ * @param intf The identifier of the packet interface to configure and
+ * use as a ILK interface. cn68xx has 2 interfaces: ilk0 and
+ * ilk1.
+ *
+ * @param hi_wm high water mark for this interface
+ *
+ * @return Zero on success, negative on failure.
+ */
+int cvmx_ilk_rx_set_hwm(int intf, int hi_wm)
+{
+ int res = -1;
+ cvmx_ilk_rxx_cfg1_t ilk_rxx_cfg1;
+ int node = (intf >> 4) & 0xf;
+ int interface = intf & 0xf;
+
+ if (!octeon_has_feature(OCTEON_FEATURE_ILK))
+ return res;
+
+ if (interface >= CVMX_NUM_ILK_INTF)
+ return res;
+
+ if (hi_wm <= 0)
+ return res;
+
+ /* set the hwm */
+ ilk_rxx_cfg1.u64 = csr_rd_node(node, CVMX_ILK_RXX_CFG1(interface));
+ ilk_rxx_cfg1.s.rx_fifo_hwm = hi_wm;
+ csr_wr_node(node, CVMX_ILK_RXX_CFG1(interface), ilk_rxx_cfg1.u64);
+ res = 0;
+
+ return res;
+}
+
+/**
+ * enable calendar for rx
+ *
+ * @param intf The identifier of the packet interface to configure and
+ * use as a ILK interface. cn68xx has 2 interfaces: ilk0 and
+ * ilk1.
+ *
+ * @param cal_ena enable or disable calendar
+ *
+ * @return Zero on success, negative on failure.
+ */
+int cvmx_ilk_rx_cal_ena(int intf, unsigned char cal_ena)
+{
+ int res = -1;
+ cvmx_ilk_rxx_cfg0_t ilk_rxx_cfg0;
+ int node = (intf >> 4) & 0xf;
+ int interface = intf & 0xf;
+
+ if (!octeon_has_feature(OCTEON_FEATURE_ILK))
+ return res;
+
+ if (interface >= CVMX_NUM_ILK_INTF)
+ return res;
+
+ if (cvmx_ilk_use_la_mode(interface, 0) &&
+ !cvmx_ilk_la_mode_enable_rx_calendar(interface))
+ return 0;
+
+ /* set the enable */
+ ilk_rxx_cfg0.u64 = csr_rd_node(node, CVMX_ILK_RXX_CFG0(interface));
+ ilk_rxx_cfg0.s.cal_ena = cal_ena;
+ csr_wr_node(node, CVMX_ILK_RXX_CFG0(interface), ilk_rxx_cfg0.u64);
+ csr_rd_node(node, CVMX_ILK_RXX_CFG0(interface));
+ res = 0;
+
+ return res;
+}
+
+/**
+ * set up calendar for rx
+ *
+ * @param intf The identifier of the packet interface to configure and
+ * use as a ILK interface. cn68xx has 2 interfaces: ilk0 and
+ * ilk1.
+ *
+ * @param cal_depth the number of calendar entries
+ * @param pent pointer to calendar entries
+ * @param hi_wm high water mark for this interface
+ * @param cal_ena enable or disable calendar
+ *
+ * @return Zero on success, negative on failure.
+ */
+int cvmx_ilk_cal_setup_rx(int intf, int cal_depth, cvmx_ilk_cal_entry_t *pent,
+ int hi_wm, unsigned char cal_ena)
+{
+ int res = -1;
+
+ if (!octeon_has_feature(OCTEON_FEATURE_ILK))
+ return res;
+
+ res = cvmx_ilk_rx_cal_conf(intf, cal_depth, pent);
+ if (res < 0)
+ return res;
+
+ res = cvmx_ilk_rx_set_hwm(intf, hi_wm);
+ if (res < 0)
+ return res;
+
+ res = cvmx_ilk_rx_cal_ena(intf, cal_ena);
+ return res;
+}
+
+/**
+ * configure calendar for tx
+ *
+ * @param intf The identifier of the packet interface to configure and
+ * use as a ILK interface. cn68xx has 2 interfaces: ilk0 and
+ * ilk1.
+ *
+ * @param cal_depth the number of calendar entries
+ * @param pent pointer to calendar entries
+ *
+ * @return Zero on success, negative on failure.
+ */
+int cvmx_ilk_tx_cal_conf(int intf, int cal_depth, cvmx_ilk_cal_entry_t *pent)
+{
+ int res = -1, i;
+ cvmx_ilk_txx_cfg0_t ilk_txx_cfg0;
+ int num_entries;
+ int node = (intf >> 4) & 0xf;
+ int interface = intf & 0xf;
+
+ if (!octeon_has_feature(OCTEON_FEATURE_ILK))
+ return res;
+
+ if (interface >= CVMX_NUM_ILK_INTF)
+ return res;
+
+ if (cal_depth < CVMX_ILK_TX_MIN_CAL || cal_depth > CVMX_ILK_MAX_CAL ||
+ (OCTEON_IS_MODEL(OCTEON_CN68XX) && !pent))
+ return res;
+
+ /* mandatory link-level fc as workarounds for ILK-15397 and
+ * ILK-15479
+ */
+ /* TODO: test effectiveness */
+
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+ /* Update the calendar for each channel */
+ for (i = 0; i < cal_depth; i++) {
+ __cvmx_ilk_write_tx_cal_entry(interface, i,
+ pent[i].pipe_bpid);
+ }
+
+ /* Set the depth (must be multiple of 8)*/
+ ilk_txx_cfg0.u64 = csr_rd(CVMX_ILK_TXX_CFG0(interface));
+ num_entries = 1 + cal_depth + (cal_depth - 1) / 15;
+ ilk_txx_cfg0.s.cal_depth = (num_entries + 7) & ~7;
+ csr_wr(CVMX_ILK_TXX_CFG0(interface), ilk_txx_cfg0.u64);
+ }
+
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
+ ilk_txx_cfg0.u64 =
+ csr_rd_node(node, CVMX_ILK_TXX_CFG0(interface));
+ /*
+ * Make sure cal_ena is 0 for programming the calendar table,
+ * as per Errata ILK-19398
+ */
+ ilk_txx_cfg0.s.cal_ena = 0;
+ csr_wr_node(node, CVMX_ILK_TXX_CFG0(interface),
+ ilk_txx_cfg0.u64);
+
+ for (i = 0; i < cal_depth; i++)
+ __cvmx_ilk_write_tx_cal_entry(intf, i, 0);
+
+ ilk_txx_cfg0.u64 =
+ csr_rd_node(node, CVMX_ILK_TXX_CFG0(interface));
+ num_entries = 1 + cal_depth + (cal_depth - 1) / 15;
+ /* cal_depth[2:0] needs to be zero, round up */
+ ilk_txx_cfg0.s.cal_depth = (num_entries + 7) & 0x1f8;
+ csr_wr_node(node, CVMX_ILK_TXX_CFG0(interface),
+ ilk_txx_cfg0.u64);
+ }
+
+ return 0;
+}
+
+/**
+ * enable calendar for tx
+ *
+ * @param intf The identifier of the packet interface to configure and
+ * use as a ILK interface. cn68xx has 2 interfaces: ilk0 and
+ * ilk1.
+ *
+ * @param cal_ena enable or disable calendar
+ *
+ * @return Zero on success, negative on failure.
+ */
+int cvmx_ilk_tx_cal_ena(int intf, unsigned char cal_ena)
+{
+ int res = -1;
+ cvmx_ilk_txx_cfg0_t ilk_txx_cfg0;
+ int node = (intf >> 4) & 0xf;
+ int interface = intf & 0xf;
+
+ if (!octeon_has_feature(OCTEON_FEATURE_ILK))
+ return res;
+
+ if (interface >= CVMX_NUM_ILK_INTF)
+ return res;
+
+ /* set the enable */
+ ilk_txx_cfg0.u64 = csr_rd_node(node, CVMX_ILK_TXX_CFG0(interface));
+ ilk_txx_cfg0.s.cal_ena = cal_ena;
+ csr_wr_node(node, CVMX_ILK_TXX_CFG0(interface), ilk_txx_cfg0.u64);
+ csr_rd_node(node, CVMX_ILK_TXX_CFG0(interface));
+ res = 0;
+
+ return res;
+}
+
+/**
+ * set up calendar for tx
+ *
+ * @param intf The identifier of the packet interface to configure and
+ * use as a ILK interface. cn68xx has 2 interfaces: ilk0 and
+ * ilk1.
+ *
+ * @param cal_depth the number of calendar entries
+ * @param pent pointer to calendar entries
+ * @param cal_ena enable or disable calendar
+ *
+ * @return Zero on success, negative on failure.
+ */
+int cvmx_ilk_cal_setup_tx(int intf, int cal_depth, cvmx_ilk_cal_entry_t *pent,
+ unsigned char cal_ena)
+{
+ int res = -1;
+
+ if (!octeon_has_feature(OCTEON_FEATURE_ILK))
+ return res;
+
+ res = cvmx_ilk_tx_cal_conf(intf, cal_depth, pent);
+ if (res < 0)
+ return res;
+
+ res = cvmx_ilk_tx_cal_ena(intf, cal_ena);
+ return res;
+}
+
+/* #define CVMX_ILK_STATS_ENA 1 */
+#ifdef CVMX_ILK_STATS_ENA
+static void cvmx_ilk_reg_dump_rx(int intf)
+{
+ int i;
+ cvmx_ilk_rxx_cfg0_t ilk_rxx_cfg0;
+ cvmx_ilk_rxx_cfg1_t ilk_rxx_cfg1;
+ cvmx_ilk_rxx_int_t ilk_rxx_int;
+ cvmx_ilk_rxx_jabber_t ilk_rxx_jabber;
+ cvmx_ilk_rx_lnex_cfg_t ilk_rx_lnex_cfg;
+ cvmx_ilk_rx_lnex_int_t ilk_rx_lnex_int;
+ cvmx_ilk_gbl_cfg_t ilk_gbl_cfg;
+ cvmx_ilk_ser_cfg_t ilk_ser_cfg;
+ cvmx_ilk_rxf_idx_pmap_t ilk_rxf_idx_pmap;
+ cvmx_ilk_rxf_mem_pmap_t ilk_rxf_mem_pmap;
+ cvmx_ilk_rxx_idx_cal_t ilk_rxx_idx_cal;
+ cvmx_ilk_rxx_mem_cal0_t ilk_rxx_mem_cal0;
+ cvmx_ilk_rxx_mem_cal1_t ilk_rxx_mem_cal1;
+ int node = (intf >> 4) & 0xf;
+ int interface = intf & 0xf;
+
+ ilk_rxx_cfg0.u64 = csr_rd_node(node, CVMX_ILK_RXX_CFG0(interface));
+ debug("ilk rxx cfg0: 0x%16lx\n", ilk_rxx_cfg0.u64);
+
+ ilk_rxx_cfg1.u64 = csr_rd_node(node, CVMX_ILK_RXX_CFG1(interface));
+ debug("ilk rxx cfg1: 0x%16lx\n", ilk_rxx_cfg1.u64);
+
+ ilk_rxx_int.u64 = csr_rd_node(node, CVMX_ILK_RXX_INT(interface));
+ debug("ilk rxx int: 0x%16lx\n", ilk_rxx_int.u64);
+ csr_wr_node(node, CVMX_ILK_RXX_INT(interface), ilk_rxx_int.u64);
+
+ ilk_rxx_jabber.u64 = csr_rd_node(node, CVMX_ILK_RXX_JABBER(interface));
+ debug("ilk rxx jabber: 0x%16lx\n", ilk_rxx_jabber.u64);
+
+#define LNE_NUM_DBG 4
+ for (i = 0; i < LNE_NUM_DBG; i++) {
+ ilk_rx_lnex_cfg.u64 =
+ csr_rd_node(node, CVMX_ILK_RX_LNEX_CFG(i));
+ debug("ilk rx lnex cfg lane: %d 0x%16lx\n", i,
+ ilk_rx_lnex_cfg.u64);
+ }
+
+ for (i = 0; i < LNE_NUM_DBG; i++) {
+ ilk_rx_lnex_int.u64 =
+ csr_rd_node(node, CVMX_ILK_RX_LNEX_INT(i));
+ debug("ilk rx lnex int lane: %d 0x%16lx\n", i,
+ ilk_rx_lnex_int.u64);
+ csr_wr_node(node, CVMX_ILK_RX_LNEX_INT(i), ilk_rx_lnex_int.u64);
+ }
+
+ ilk_gbl_cfg.u64 = csr_rd_node(node, CVMX_ILK_GBL_CFG);
+ debug("ilk gbl cfg: 0x%16lx\n", ilk_gbl_cfg.u64);
+
+ ilk_ser_cfg.u64 = csr_rd_node(node, CVMX_ILK_SER_CFG);
+ debug("ilk ser cfg: 0x%16lx\n", ilk_ser_cfg.u64);
+
+#define CHAN_NUM_DBG 8
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+ ilk_rxf_idx_pmap.u64 = 0;
+ ilk_rxf_idx_pmap.s.index = interface * 256;
+ ilk_rxf_idx_pmap.s.inc = 1;
+ csr_wr(CVMX_ILK_RXF_IDX_PMAP, ilk_rxf_idx_pmap.u64);
+ for (i = 0; i < CHAN_NUM_DBG; i++) {
+ ilk_rxf_mem_pmap.u64 = csr_rd(CVMX_ILK_RXF_MEM_PMAP);
+ debug("ilk rxf mem pmap chan: %3d 0x%16lx\n", i,
+ ilk_rxf_mem_pmap.u64);
+ }
+ }
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
+ cvmx_ilk_rxx_chax_t rxx_chax;
+
+ for (i = 0; i < CHAN_NUM_DBG; i++) {
+ rxx_chax.u64 = csr_rd_node(
+ node, CVMX_ILK_RXX_CHAX(i, interface));
+ debug("ilk chan: %d pki chan: 0x%x\n", i,
+ rxx_chax.s.port_kind);
+ }
+ }
+
+#define CAL_NUM_DBG 2
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+ ilk_rxx_idx_cal.u64 = 0;
+ ilk_rxx_idx_cal.s.inc = 1;
+ csr_wr(CVMX_ILK_RXX_IDX_CAL(interface), ilk_rxx_idx_cal.u64);
+ for (i = 0; i < CAL_NUM_DBG; i++) {
+ ilk_rxx_idx_cal.u64 =
+ csr_rd(CVMX_ILK_RXX_IDX_CAL(interface));
+ debug("ilk rxx idx cal: 0x%16lx\n",
+ ilk_rxx_idx_cal.u64);
+
+ ilk_rxx_mem_cal0.u64 =
+ csr_rd(CVMX_ILK_RXX_MEM_CAL0(interface));
+ debug("ilk rxx mem cal0: 0x%16lx\n",
+ ilk_rxx_mem_cal0.u64);
+ ilk_rxx_mem_cal1.u64 =
+ csr_rd(CVMX_ILK_RXX_MEM_CAL1(interface));
+ debug("ilk rxx mem cal1: 0x%16lx\n",
+ ilk_rxx_mem_cal1.u64);
+ }
+ }
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
+ cvmx_ilk_rxx_cal_entryx_t rxx_cal_entryx;
+
+ for (i = 0; i < CAL_NUM_DBG; i++) {
+ rxx_cal_entryx.u64 = csr_rd_node(
+ node, CVMX_ILK_RXX_CAL_ENTRYX(i, interface));
+ debug("ilk rxx cal idx: %d\n", i);
+ debug("ilk rxx cal ctl: 0x%x\n", rxx_cal_entryx.s.ctl);
+ debug("ilk rxx cal pko chan: 0x%x\n",
+ rxx_cal_entryx.s.channel);
+ }
+ }
+}
+
+static void cvmx_ilk_reg_dump_tx(int intf)
+{
+ int i;
+ cvmx_ilk_txx_cfg0_t ilk_txx_cfg0;
+ cvmx_ilk_txx_cfg1_t ilk_txx_cfg1;
+ cvmx_ilk_txx_idx_pmap_t ilk_txx_idx_pmap;
+ cvmx_ilk_txx_mem_pmap_t ilk_txx_mem_pmap;
+ cvmx_ilk_txx_int_t ilk_txx_int;
+ cvmx_ilk_txx_pipe_t ilk_txx_pipe;
+ cvmx_ilk_txx_idx_cal_t ilk_txx_idx_cal;
+ cvmx_ilk_txx_mem_cal0_t ilk_txx_mem_cal0;
+ cvmx_ilk_txx_mem_cal1_t ilk_txx_mem_cal1;
+ int node = (intf >> 4) & 0xf;
+ int interface = intf & 0xf;
+
+ ilk_txx_cfg0.u64 = csr_rd_node(node, CVMX_ILK_TXX_CFG0(interface));
+ debug("ilk txx cfg0: 0x%16lx\n", ilk_txx_cfg0.u64);
+
+ ilk_txx_cfg1.u64 = csr_rd_node(node, CVMX_ILK_TXX_CFG1(interface));
+ debug("ilk txx cfg1: 0x%16lx\n", ilk_txx_cfg1.u64);
+
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+ ilk_txx_pipe.u64 = csr_rd(CVMX_ILK_TXX_PIPE(interface));
+ debug("ilk txx pipe: 0x%16lx\n", ilk_txx_pipe.u64);
+
+ ilk_txx_idx_pmap.u64 = 0;
+ ilk_txx_idx_pmap.s.index = ilk_txx_pipe.s.base;
+ ilk_txx_idx_pmap.s.inc = 1;
+ csr_wr(CVMX_ILK_TXX_IDX_PMAP(interface), ilk_txx_idx_pmap.u64);
+ for (i = 0; i < CHAN_NUM_DBG; i++) {
+ ilk_txx_mem_pmap.u64 =
+ csr_rd(CVMX_ILK_TXX_MEM_PMAP(interface));
+ debug("ilk txx mem pmap pipe: %3d 0x%16lx\n",
+ ilk_txx_pipe.s.base + i, ilk_txx_mem_pmap.u64);
+ }
+ }
+
+ ilk_txx_int.u64 = csr_rd_node(node, CVMX_ILK_TXX_INT(interface));
+ debug("ilk txx int: 0x%16lx\n", ilk_txx_int.u64);
+
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+ ilk_txx_idx_cal.u64 = 0;
+ ilk_txx_idx_cal.s.inc = 1;
+ csr_wr(CVMX_ILK_TXX_IDX_CAL(interface), ilk_txx_idx_cal.u64);
+ for (i = 0; i < CAL_NUM_DBG; i++) {
+ ilk_txx_idx_cal.u64 =
+ csr_rd(CVMX_ILK_TXX_IDX_CAL(interface));
+ debug("ilk txx idx cal: 0x%16lx\n",
+ ilk_txx_idx_cal.u64);
+
+ ilk_txx_mem_cal0.u64 =
+ csr_rd(CVMX_ILK_TXX_MEM_CAL0(interface));
+ debug("ilk txx mem cal0: 0x%16lx\n",
+ ilk_txx_mem_cal0.u64);
+ ilk_txx_mem_cal1.u64 =
+ csr_rd(CVMX_ILK_TXX_MEM_CAL1(interface));
+ debug("ilk txx mem cal1: 0x%16lx\n",
+ ilk_txx_mem_cal1.u64);
+ }
+ }
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
+ cvmx_ilk_txx_cal_entryx_t txx_cal_entryx;
+
+ for (i = 0; i < CAL_NUM_DBG; i++) {
+ txx_cal_entryx.u64 = csr_rd_node(
+ node, CVMX_ILK_TXX_CAL_ENTRYX(i, interface));
+ debug("ilk txx cal idx: %d\n", i);
+ debug("ilk txx cal ctl: 0x%x\n", txx_cal_entryx.s.ctl);
+ debug("ilk txx cal pki chan: 0x%x\n",
+ txx_cal_entryx.s.channel);
+ }
+ }
+}
+#endif
+
+/**
+ * show run time status
+ *
+ * @param interface The identifier of the packet interface to enable. cn68xx
+ * has 2 interfaces: ilk0 and ilk1.
+ *
+ * @return nothing
+ */
+#ifdef CVMX_ILK_RUNTIME_DBG
+void cvmx_ilk_runtime_status(int interface)
+{
+ cvmx_ilk_txx_cfg1_t ilk_txx_cfg1;
+ cvmx_ilk_txx_flow_ctl0_t ilk_txx_flow_ctl0;
+ cvmx_ilk_rxx_cfg1_t ilk_rxx_cfg1;
+ cvmx_ilk_rxx_int_t ilk_rxx_int;
+ cvmx_ilk_rxx_flow_ctl0_t ilk_rxx_flow_ctl0;
+ cvmx_ilk_rxx_flow_ctl1_t ilk_rxx_flow_ctl1;
+ cvmx_ilk_gbl_int_t ilk_gbl_int;
+
+ debug("\nilk run-time status: interface: %d\n", interface);
+
+ ilk_txx_cfg1.u64 = csr_rd_node(node, CVMX_ILK_TXX_CFG1(interface));
+ debug("\nilk txx cfg1: 0x%16lx\n", ilk_txx_cfg1.u64);
+ if (ilk_txx_cfg1.s.rx_link_fc)
+ debug("link flow control received\n");
+ if (ilk_txx_cfg1.s.tx_link_fc)
+ debug("link flow control sent\n");
+
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+ ilk_txx_flow_ctl0.u64 =
+ csr_rd(CVMX_ILK_TXX_FLOW_CTL0(interface));
+ debug("\nilk txx flow ctl0: 0x%16lx\n", ilk_txx_flow_ctl0.u64);
+ }
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
+ int i;
+ cvmx_ilk_txx_cha_xonx_t txx_cha_xonx;
+
+ for (i = 0; i < 4; i++) {
+ txx_cha_xonx.u64 = csr_rd_node(
+ node, CVMX_ILK_TXX_CHA_XONX(i, interface));
+ debug("\nilk txx cha xon: 0x%16lx\n", txx_cha_xonx.u64);
+ }
+ }
+
+ ilk_rxx_cfg1.u64 = csr_rd_node(node, CVMX_ILK_RXX_CFG1(interface));
+ debug("\nilk rxx cfg1: 0x%16lx\n", ilk_rxx_cfg1.u64);
+ debug("rx fifo count: %d\n", ilk_rxx_cfg1.s.rx_fifo_cnt);
+
+ ilk_rxx_int.u64 = csr_rd_node(node, CVMX_ILK_RXX_INT(interface));
+ debug("\nilk rxx int: 0x%16lx\n", ilk_rxx_int.u64);
+ if (ilk_rxx_int.s.pkt_drop_rxf)
+ debug("rx fifo packet drop\n");
+ if (ilk_rxx_int.u64)
+ csr_wr_node(node, CVMX_ILK_RXX_INT(interface), ilk_rxx_int.u64);
+
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+ ilk_rxx_flow_ctl0.u64 =
+ csr_rd(CVMX_ILK_RXX_FLOW_CTL0(interface));
+ debug("\nilk rxx flow ctl0: 0x%16lx\n", ilk_rxx_flow_ctl0.u64);
+
+ ilk_rxx_flow_ctl1.u64 =
+ csr_rd(CVMX_ILK_RXX_FLOW_CTL1(interface));
+ debug("\nilk rxx flow ctl1: 0x%16lx\n", ilk_rxx_flow_ctl1.u64);
+ }
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
+ int i;
+ cvmx_ilk_rxx_cha_xonx_t rxx_cha_xonx;
+
+ for (i = 0; i < 4; i++) {
+ rxx_cha_xonx.u64 = csr_rd_node(
+ node, CVMX_ILK_RXX_CHA_XONX(i, interface));
+ debug("\nilk rxx cha xon: 0x%16lx\n", rxx_cha_xonx.u64);
+ }
+ }
+
+ ilk_gbl_int.u64 = csr_rd_node(node, CVMX_ILK_GBL_INT);
+ debug("\nilk gbl int: 0x%16lx\n", ilk_gbl_int.u64);
+ if (ilk_gbl_int.s.rxf_push_full)
+ debug("rx fifo overflow\n");
+ if (ilk_gbl_int.u64)
+ csr_wr_node(node, CVMX_ILK_GBL_INT, ilk_gbl_int.u64);
+}
+#endif
+
+/**
+ * enable interface
+ *
+ * @param xiface The identifier of the packet interface to enable. cn68xx
+ * has 2 interfaces: ilk0 and ilk1.
+ *
+ * @return Zero on success, negative on failure.
+ */
+int cvmx_ilk_enable(int xiface)
+{
+ int res = -1;
+ int retry_count = 0;
+ cvmx_helper_link_info_t result;
+ cvmx_ilk_txx_cfg1_t ilk_txx_cfg1;
+ cvmx_ilk_rxx_cfg1_t ilk_rxx_cfg1;
+#ifdef CVMX_ILK_STATS_ENA
+ cvmx_ilk_rxx_cfg0_t ilk_rxx_cfg0;
+ cvmx_ilk_txx_cfg0_t ilk_txx_cfg0;
+#endif
+ struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+ int node = xi.node;
+ int interface = xi.interface - CVMX_ILK_GBL_BASE();
+
+ if (!octeon_has_feature(OCTEON_FEATURE_ILK))
+ return res;
+
+ if (interface >= CVMX_NUM_ILK_INTF)
+ return res;
+
+ result.u64 = 0;
+
+#ifdef CVMX_ILK_STATS_ENA
+ debug("\n");
+ debug("<<<< ILK%d: Before enabling ilk\n", interface);
+ cvmx_ilk_reg_dump_rx(intf);
+ cvmx_ilk_reg_dump_tx(intf);
+#endif
+
+ /* RX packet will be enabled only if link is up */
+
+ /* TX side */
+ ilk_txx_cfg1.u64 = csr_rd_node(node, CVMX_ILK_TXX_CFG1(interface));
+ ilk_txx_cfg1.s.pkt_ena = 1;
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+ if (cvmx_ilk_use_la_mode(interface, 0)) {
+ ilk_txx_cfg1.s.la_mode = 1;
+ ilk_txx_cfg1.s.tx_link_fc_jam = 1;
+ }
+ }
+ csr_wr_node(node, CVMX_ILK_TXX_CFG1(interface), ilk_txx_cfg1.u64);
+ csr_rd_node(node, CVMX_ILK_TXX_CFG1(interface));
+
+#ifdef CVMX_ILK_STATS_ENA
+ /* RX side stats */
+ ilk_rxx_cfg0.u64 = csr_rd_node(node, CVMX_ILK_RXX_CFG0(interface));
+ ilk_rxx_cfg0.s.lnk_stats_ena = 1;
+ csr_wr_node(node, CVMX_ILK_RXX_CFG0(interface), ilk_rxx_cfg0.u64);
+
+ /* TX side stats */
+ ilk_txx_cfg0.u64 = csr_rd_node(node, CVMX_ILK_TXX_CFG0(interface));
+ ilk_txx_cfg0.s.lnk_stats_ena = 1;
+ csr_wr_node(node, CVMX_ILK_TXX_CFG0(interface), ilk_txx_cfg0.u64);
+#endif
+
+retry:
+ retry_count++;
+ if (retry_count > 10)
+ goto out;
+
+ /* Make sure the link is up, so that packets can be sent. */
+ result = __cvmx_helper_ilk_link_get(
+ cvmx_helper_get_ipd_port((interface + CVMX_ILK_GBL_BASE()), 0));
+
+ /* Small delay before another retry. */
+ udelay(100);
+
+ ilk_rxx_cfg1.u64 = csr_rd_node(node, CVMX_ILK_RXX_CFG1(interface));
+ if (ilk_rxx_cfg1.s.pkt_ena == 0)
+ goto retry;
+
+out:
+
+#ifdef CVMX_ILK_STATS_ENA
+ debug(">>>> ILK%d: After ILK is enabled\n", interface);
+ cvmx_ilk_reg_dump_rx(intf);
+ cvmx_ilk_reg_dump_tx(intf);
+#endif
+
+ if (result.s.link_up)
+ return 0;
+
+ return -1;
+}
+
+/**
+ * Disable interface
+ *
+ * @param intf The identifier of the packet interface to disable. cn68xx
+ * has 2 interfaces: ilk0 and ilk1.
+ *
+ * @return Zero on success, negative on failure.
+ */
+int cvmx_ilk_disable(int intf)
+{
+ int res = -1;
+ cvmx_ilk_txx_cfg1_t ilk_txx_cfg1;
+ cvmx_ilk_rxx_cfg1_t ilk_rxx_cfg1;
+#ifdef CVMX_ILK_STATS_ENA
+ cvmx_ilk_rxx_cfg0_t ilk_rxx_cfg0;
+ cvmx_ilk_txx_cfg0_t ilk_txx_cfg0;
+#endif
+ int node = (intf >> 4) & 0xf;
+ int interface = intf & 0xf;
+
+ if (!octeon_has_feature(OCTEON_FEATURE_ILK))
+ return res;
+
+ if (interface >= CVMX_NUM_ILK_INTF)
+ return res;
+
+ /* TX side */
+ ilk_txx_cfg1.u64 = csr_rd_node(node, CVMX_ILK_TXX_CFG1(interface));
+ ilk_txx_cfg1.s.pkt_ena = 0;
+ csr_wr_node(node, CVMX_ILK_TXX_CFG1(interface), ilk_txx_cfg1.u64);
+
+ /* RX side */
+ ilk_rxx_cfg1.u64 = csr_rd_node(node, CVMX_ILK_RXX_CFG1(interface));
+ ilk_rxx_cfg1.s.pkt_ena = 0;
+ csr_wr_node(node, CVMX_ILK_RXX_CFG1(interface), ilk_rxx_cfg1.u64);
+
+#ifdef CVMX_ILK_STATS_ENA
+ /* RX side stats */
+ ilk_rxx_cfg0.u64 = csr_rd_node(node, CVMX_ILK_RXX_CFG0(interface));
+ ilk_rxx_cfg0.s.lnk_stats_ena = 0;
+ csr_wr_node(node, CVMX_ILK_RXX_CFG0(interface), ilk_rxx_cfg0.u64);
+
+ /* RX side stats */
+ ilk_txx_cfg0.u64 = csr_rd_node(node, CVMX_ILK_TXX_CFG0(interface));
+ ilk_txx_cfg0.s.lnk_stats_ena = 0;
+ csr_wr_node(node, CVMX_ILK_TXX_CFG0(interface), ilk_txx_cfg0.u64);
+#endif
+
+ return 0;
+}
+
+/**
+ * Provide interface enable status
+ *
+ * @param xiface The identifier of the packet xiface to disable. cn68xx
+ * has 2 interfaces: ilk0 and ilk1.
+ *
+ * @return Zero, not enabled; One, enabled.
+ */
+int cvmx_ilk_get_intf_ena(int xiface)
+{
+ struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+ int interface = xi.interface - CVMX_ILK_GBL_BASE();
+ return cvmx_ilk_intf_cfg[xi.node][interface].intf_en;
+}
+
+/**
+ * Enable or disable LA mode in ILK header.
+ * For normal ILK mode, enable CRC and skip = 0.
+ * For ILK LA mode, disable CRC and set skip to size of ILK header.
+ *
+ * @param ipd_port IPD port of the ILK header
+ * @param mode If set, enable LA mode in ILK header, else disable
+ *
+ * @return ILK header
+ */
+cvmx_ilk_la_nsp_compact_hdr_t cvmx_ilk_enable_la_header(int ipd_port, int mode)
+{
+ cvmx_ilk_la_nsp_compact_hdr_t ilk_header;
+ cvmx_pip_prt_cfgx_t pip_config;
+ struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(ipd_port);
+ int xiface = cvmx_helper_get_interface_num(ipd_port);
+ struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+
+ int ilk_interface = xi.interface - CVMX_ILK_GBL_BASE();
+ int skip = 0;
+ int crc = 1;
+ int len_chk = 1;
+
+ ilk_header.u64 = 0;
+
+ if (ilk_interface >= CVMX_NUM_ILK_INTF)
+ debug("ERROR: Invalid interface %d\n", ilk_interface);
+ if (!cvmx_ilk_use_la_mode(ilk_interface, 0))
+ return ilk_header;
+
+ if (mode) {
+ ilk_header.s.la_mode = 1;
+ ilk_header.s.ilk_channel = xp.port & 1;
+ skip = sizeof(ilk_header);
+ crc = 0;
+ }
+ /* There is a bug in the CN68XX pass 2.x where the CRC erroneously is
+ * computed over the ILK header when it should not be so we ignore it.
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX_PASS2_X)) {
+ crc = 0;
+ if (!mode)
+ len_chk = 0;
+ skip = sizeof(ilk_header);
+ }
+
+ /* SKIP ILK header only for first 2 ports */
+ if ((xp.port & 0x7) < 2) {
+ int pknd = cvmx_helper_get_pknd(xiface, xp.port & 1);
+ int ipko_port;
+ cvmx_pko_reg_read_idx_t pko_reg;
+ cvmx_pko_mem_iport_ptrs_t pko_mem_iport;
+ cvmx_pip_sub_pkind_fcsx_t pknd_fcs;
+
+ /* Enable/Disable CRC in IPD and set skip */
+ pip_config.u64 = csr_rd(CVMX_PIP_PRT_CFGX(pknd));
+ pip_config.s.skip = skip;
+ pip_config.s.crc_en = crc;
+ pip_config.s.lenerr_en = len_chk;
+ pip_config.s.minerr_en = (len_chk && !mode);
+ csr_wr(CVMX_PIP_PRT_CFGX(pknd), pip_config.u64);
+
+ pknd_fcs.u64 = csr_rd(CVMX_PIP_SUB_PKIND_FCSX(0));
+ pknd_fcs.s.port_bit &= ~(1ull << pknd);
+ csr_wr(CVMX_PIP_SUB_PKIND_FCSX(0), pknd_fcs.u64);
+
+ /* Enable/Disable CRC in PKO */
+
+ /* Get PKO internal port */
+ ipko_port = ilk_interface + 0x1c;
+
+ pko_reg.u64 = csr_rd(CVMX_PKO_REG_READ_IDX);
+ pko_reg.s.index = cvmx_helper_get_pko_port(xiface, xp.port & 1);
+ csr_wr(CVMX_PKO_REG_READ_IDX, pko_reg.u64);
+
+ pko_mem_iport.u64 = csr_rd(CVMX_PKO_MEM_IPORT_PTRS);
+ pko_mem_iport.s.crc = crc;
+ pko_mem_iport.s.intr = ipko_port;
+ csr_wr(CVMX_PKO_MEM_IPORT_PTRS, pko_mem_iport.u64);
+ }
+
+ return ilk_header;
+}
+
+/**
+ * Show channel statistics
+ *
+ * @param interface The identifier of the packet interface to disable. cn68xx
+ * has 2 interfaces: ilk0 and ilk1.
+ * @param pstats A pointer to cvmx_ilk_stats_ctrl_t that specifies which
+ * logical channels to access
+ *
+ * @return nothing
+ */
+void cvmx_ilk_show_stats(int interface, cvmx_ilk_stats_ctrl_t *pstats)
+{
+ unsigned int i;
+ cvmx_ilk_rxx_idx_stat0_t ilk_rxx_idx_stat0;
+ cvmx_ilk_rxx_idx_stat1_t ilk_rxx_idx_stat1;
+ cvmx_ilk_rxx_mem_stat0_t ilk_rxx_mem_stat0;
+ cvmx_ilk_rxx_mem_stat1_t ilk_rxx_mem_stat1;
+
+ cvmx_ilk_txx_idx_stat0_t ilk_txx_idx_stat0;
+ cvmx_ilk_txx_idx_stat1_t ilk_txx_idx_stat1;
+ cvmx_ilk_txx_mem_stat0_t ilk_txx_mem_stat0;
+ cvmx_ilk_txx_mem_stat1_t ilk_txx_mem_stat1;
+ int node = 0;
+
+ if (!octeon_has_feature(OCTEON_FEATURE_ILK))
+ return;
+
+ if (interface >= CVMX_NUM_ILK_INTF)
+ return;
+
+ if (!pstats)
+ return;
+
+ /* discrete channels */
+ if (pstats->chan_list) {
+ int *chan_list = pstats->chan_list;
+
+ for (i = 0; i < pstats->num_chans; i++) {
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+ /* get the number of rx packets */
+ ilk_rxx_idx_stat0.u64 = 0;
+ ilk_rxx_idx_stat0.s.index = *pstats->chan_list;
+ ilk_rxx_idx_stat0.s.clr = pstats->clr_on_rd;
+ csr_wr(CVMX_ILK_RXX_IDX_STAT0(interface),
+ ilk_rxx_idx_stat0.u64);
+ ilk_rxx_mem_stat0.u64 = csr_rd(
+ CVMX_ILK_RXX_MEM_STAT0(interface));
+
+ /* get the number of rx bytes */
+ ilk_rxx_idx_stat1.u64 = 0;
+ ilk_rxx_idx_stat1.s.index = *chan_list;
+ ilk_rxx_idx_stat1.s.clr = pstats->clr_on_rd;
+ csr_wr(CVMX_ILK_RXX_IDX_STAT1(interface),
+ ilk_rxx_idx_stat1.u64);
+ ilk_rxx_mem_stat1.u64 = csr_rd(
+ CVMX_ILK_RXX_MEM_STAT1(interface));
+
+ debug("ILK%d Channel%d Rx: %d packets %d bytes\n",
+ interface, *chan_list,
+ ilk_rxx_mem_stat0.s.rx_pkt,
+ (unsigned int)
+ ilk_rxx_mem_stat1.s.rx_bytes);
+ }
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
+ cvmx_ilk_rxx_pkt_cntx_t rxx_pkt_cntx;
+ cvmx_ilk_rxx_byte_cntx_t rxx_byte_cntx;
+
+ rxx_pkt_cntx.u64 = csr_rd_node(
+ node, CVMX_ILK_RXX_PKT_CNTX(*chan_list,
+ interface));
+ rxx_byte_cntx.u64 = csr_rd_node(
+ node, CVMX_ILK_RXX_BYTE_CNTX(
+ *chan_list, interface));
+ debug("ILK%d Channel%d Rx: %llu packets %llu bytes\n",
+ interface, *chan_list,
+ (unsigned long long)rxx_pkt_cntx.s.rx_pkt,
+ (unsigned long long)
+ rxx_byte_cntx.s.rx_bytes);
+ }
+
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+ /* get the number of tx packets */
+ ilk_txx_idx_stat0.u64 = 0;
+ ilk_txx_idx_stat0.s.index = *chan_list;
+ ilk_txx_idx_stat0.s.clr = pstats->clr_on_rd;
+ csr_wr(CVMX_ILK_TXX_IDX_STAT0(interface),
+ ilk_txx_idx_stat0.u64);
+ ilk_txx_mem_stat0.u64 = csr_rd(
+ CVMX_ILK_TXX_MEM_STAT0(interface));
+
+ /* get the number of tx bytes */
+ ilk_txx_idx_stat1.u64 = 0;
+ ilk_txx_idx_stat1.s.index = *pstats->chan_list;
+ ilk_txx_idx_stat1.s.clr = pstats->clr_on_rd;
+ csr_wr(CVMX_ILK_TXX_IDX_STAT1(interface),
+ ilk_txx_idx_stat1.u64);
+ ilk_txx_mem_stat1.u64 = csr_rd(
+ CVMX_ILK_TXX_MEM_STAT1(interface));
+
+ debug("ILK%d Channel%d Tx: %d packets %d bytes\n",
+ interface, *chan_list,
+ ilk_txx_mem_stat0.s.tx_pkt,
+ (unsigned int)
+ ilk_txx_mem_stat1.s.tx_bytes);
+ }
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
+ cvmx_ilk_txx_pkt_cntx_t txx_pkt_cntx;
+ cvmx_ilk_txx_byte_cntx_t txx_byte_cntx;
+
+ txx_pkt_cntx.u64 = csr_rd_node(
+ node, CVMX_ILK_TXX_PKT_CNTX(*chan_list,
+ interface));
+ txx_byte_cntx.u64 = csr_rd_node(
+ node, CVMX_ILK_TXX_BYTE_CNTX(
+ *chan_list, interface));
+ debug("ILK%d Channel%d Tx: %llu packets %llu bytes\n",
+ interface, *chan_list,
+ (unsigned long long)txx_pkt_cntx.s.tx_pkt,
+ (unsigned long long)
+ txx_byte_cntx.s.tx_bytes);
+ }
+
+ chan_list++;
+ }
+ return;
+ }
+
+ /* continuous channels */
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+ ilk_rxx_idx_stat0.u64 = 0;
+ ilk_rxx_idx_stat0.s.index = pstats->chan_start;
+ ilk_rxx_idx_stat0.s.inc = pstats->chan_step;
+ ilk_rxx_idx_stat0.s.clr = pstats->clr_on_rd;
+ csr_wr(CVMX_ILK_RXX_IDX_STAT0(interface),
+ ilk_rxx_idx_stat0.u64);
+
+ ilk_rxx_idx_stat1.u64 = 0;
+ ilk_rxx_idx_stat1.s.index = pstats->chan_start;
+ ilk_rxx_idx_stat1.s.inc = pstats->chan_step;
+ ilk_rxx_idx_stat1.s.clr = pstats->clr_on_rd;
+ csr_wr(CVMX_ILK_RXX_IDX_STAT1(interface),
+ ilk_rxx_idx_stat1.u64);
+
+ ilk_txx_idx_stat0.u64 = 0;
+ ilk_txx_idx_stat0.s.index = pstats->chan_start;
+ ilk_txx_idx_stat0.s.inc = pstats->chan_step;
+ ilk_txx_idx_stat0.s.clr = pstats->clr_on_rd;
+ csr_wr(CVMX_ILK_TXX_IDX_STAT0(interface),
+ ilk_txx_idx_stat0.u64);
+
+ ilk_txx_idx_stat1.u64 = 0;
+ ilk_txx_idx_stat1.s.index = pstats->chan_start;
+ ilk_txx_idx_stat1.s.inc = pstats->chan_step;
+ ilk_txx_idx_stat1.s.clr = pstats->clr_on_rd;
+ csr_wr(CVMX_ILK_TXX_IDX_STAT1(interface),
+ ilk_txx_idx_stat1.u64);
+
+ for (i = pstats->chan_start; i <= pstats->chan_end;
+ i += pstats->chan_step) {
+ ilk_rxx_mem_stat0.u64 =
+ csr_rd(CVMX_ILK_RXX_MEM_STAT0(interface));
+ ilk_rxx_mem_stat1.u64 =
+ csr_rd(CVMX_ILK_RXX_MEM_STAT1(interface));
+ debug("ILK%d Channel%d Rx: %d packets %d bytes\n",
+ interface, i, ilk_rxx_mem_stat0.s.rx_pkt,
+ (unsigned int)ilk_rxx_mem_stat1.s.rx_bytes);
+
+ ilk_txx_mem_stat0.u64 =
+ csr_rd(CVMX_ILK_TXX_MEM_STAT0(interface));
+ ilk_txx_mem_stat1.u64 =
+ csr_rd(CVMX_ILK_TXX_MEM_STAT1(interface));
+ debug("ILK%d Channel%d Tx: %d packets %d bytes\n",
+ interface, i, ilk_rxx_mem_stat0.s.rx_pkt,
+ (unsigned int)ilk_rxx_mem_stat1.s.rx_bytes);
+ }
+ }
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
+ for (i = pstats->chan_start; i <= pstats->chan_end;
+ i += pstats->chan_step) {
+ cvmx_ilk_rxx_pkt_cntx_t rxx_pkt_cntx;
+ cvmx_ilk_rxx_byte_cntx_t rxx_byte_cntx;
+ cvmx_ilk_txx_pkt_cntx_t txx_pkt_cntx;
+ cvmx_ilk_txx_byte_cntx_t txx_byte_cntx;
+
+ rxx_pkt_cntx.u64 = csr_rd_node(
+ node, CVMX_ILK_RXX_PKT_CNTX(i, interface));
+ rxx_byte_cntx.u64 = csr_rd_node(
+ node, CVMX_ILK_RXX_BYTE_CNTX(i, interface));
+ debug("ILK%d Channel%d Rx: %llu packets %llu bytes\n",
+ interface, i,
+ (unsigned long long)rxx_pkt_cntx.s.rx_pkt,
+ (unsigned long long)rxx_byte_cntx.s.rx_bytes);
+
+ txx_pkt_cntx.u64 = csr_rd_node(
+ node, CVMX_ILK_TXX_PKT_CNTX(i, interface));
+ txx_byte_cntx.u64 = csr_rd_node(
+ node, CVMX_ILK_TXX_BYTE_CNTX(i, interface));
+ debug("ILK%d Channel%d Tx: %llu packets %llu bytes\n",
+ interface, i,
+ (unsigned long long)txx_pkt_cntx.s.tx_pkt,
+ (unsigned long long)txx_byte_cntx.s.tx_bytes);
+ }
+ }
+}
+
+/**
+ * enable or disable loopbacks
+ *
+ * @param xiface The identifier of the packet xiface to disable. cn68xx
+ * has 2 interfaces: ilk0 and ilk1.
+ * @param enable Enable or disable loopback
+ * @param mode Internal or external loopback
+ *
+ * @return Zero on success, negative on failure.
+ */
+int cvmx_ilk_lpbk(int xiface, cvmx_ilk_lpbk_ena_t enable,
+ cvmx_ilk_lpbk_mode_t mode)
+{
+ int res = -1;
+ cvmx_ilk_txx_cfg0_t ilk_txx_cfg0;
+ cvmx_ilk_rxx_cfg0_t ilk_rxx_cfg0;
+ struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+ int node = xi.node;
+ int interface = xi.interface - CVMX_ILK_GBL_BASE();
+
+ ;
+
+ if (!octeon_has_feature(OCTEON_FEATURE_ILK))
+ return res;
+
+ if (interface >= CVMX_NUM_ILK_INTF)
+ return res;
+
+ /* internal loopback. only 1 type of loopback can be on at 1 time */
+ if (mode == CVMX_ILK_LPBK_INT) {
+ if (enable == CVMX_ILK_LPBK_ENA) {
+ ilk_txx_cfg0.u64 =
+ csr_rd_node(node, CVMX_ILK_TXX_CFG0(interface));
+ ilk_txx_cfg0.s.ext_lpbk = CVMX_ILK_LPBK_DISA;
+ ilk_txx_cfg0.s.ext_lpbk_fc = CVMX_ILK_LPBK_DISA;
+ csr_wr_node(node, CVMX_ILK_TXX_CFG0(interface),
+ ilk_txx_cfg0.u64);
+
+ ilk_rxx_cfg0.u64 =
+ csr_rd_node(node, CVMX_ILK_RXX_CFG0(interface));
+ ilk_rxx_cfg0.s.ext_lpbk = CVMX_ILK_LPBK_DISA;
+ ilk_rxx_cfg0.s.ext_lpbk_fc = CVMX_ILK_LPBK_DISA;
+ csr_wr_node(node, CVMX_ILK_RXX_CFG0(interface),
+ ilk_rxx_cfg0.u64);
+ }
+
+ ilk_txx_cfg0.u64 =
+ csr_rd_node(node, CVMX_ILK_TXX_CFG0(interface));
+ ilk_txx_cfg0.s.int_lpbk = enable;
+ csr_wr_node(node, CVMX_ILK_TXX_CFG0(interface),
+ ilk_txx_cfg0.u64);
+
+ res = 0;
+ return res;
+ }
+
+ /* external loopback. only 1 type of loopback can be on at 1 time */
+ if (enable == CVMX_ILK_LPBK_ENA) {
+ ilk_txx_cfg0.u64 =
+ csr_rd_node(node, CVMX_ILK_TXX_CFG0(interface));
+ ilk_txx_cfg0.s.int_lpbk = CVMX_ILK_LPBK_DISA;
+ csr_wr_node(node, CVMX_ILK_TXX_CFG0(interface),
+ ilk_txx_cfg0.u64);
+ }
+
+ ilk_txx_cfg0.u64 = csr_rd_node(node, CVMX_ILK_TXX_CFG0(interface));
+ ilk_txx_cfg0.s.ext_lpbk = enable;
+ ilk_txx_cfg0.s.ext_lpbk_fc = enable;
+ csr_wr_node(node, CVMX_ILK_TXX_CFG0(interface), ilk_txx_cfg0.u64);
+
+ ilk_rxx_cfg0.u64 = csr_rd_node(node, CVMX_ILK_RXX_CFG0(interface));
+ ilk_rxx_cfg0.s.ext_lpbk = enable;
+ ilk_rxx_cfg0.s.ext_lpbk_fc = enable;
+ csr_wr_node(node, CVMX_ILK_RXX_CFG0(interface), ilk_rxx_cfg0.u64);
+
+ res = 0;
+ return res;
+}
--
2.35.1
^ permalink raw reply related [flat|nested] 52+ messages in thread* [PATCH 31/52] mips: octeon: Add cvmx-ipd.c
2022-03-30 10:06 [PATCH 00/52] mips: octeon: Add ethernet support Stefan Roese
` (26 preceding siblings ...)
2022-03-30 10:07 ` [PATCH 30/52] mips: octeon: Add cvmx-ilk.c Stefan Roese
@ 2022-03-30 10:07 ` Stefan Roese
2022-03-30 10:07 ` [PATCH 32/52] mips: octeon: Add cvmx-pki.c Stefan Roese
` (21 subsequent siblings)
49 siblings, 0 replies; 52+ messages in thread
From: Stefan Roese @ 2022-03-30 10:07 UTC (permalink / raw)
To: u-boot; +Cc: awilliams, cchavva
From: Aaron Williams <awilliams@marvell.com>
Import cvmx-ipd.c from 2013 U-Boot. It will be used by the later
added drivers to support networking on the MIPS Octeon II / III
platforms.
Signed-off-by: Aaron Williams <awilliams@marvell.com>
Signed-off-by: Stefan Roese <sr@denx.de>
---
arch/mips/mach-octeon/cvmx-ipd.c | 690 +++++++++++++++++++++++++++++++
1 file changed, 690 insertions(+)
create mode 100644 arch/mips/mach-octeon/cvmx-ipd.c
diff --git a/arch/mips/mach-octeon/cvmx-ipd.c b/arch/mips/mach-octeon/cvmx-ipd.c
new file mode 100644
index 000000000000..0b5aca56e399
--- /dev/null
+++ b/arch/mips/mach-octeon/cvmx-ipd.c
@@ -0,0 +1,690 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ * IPD Support.
+ */
+
+#include <errno.h>
+#include <log.h>
+#include <time.h>
+#include <linux/delay.h>
+
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-csr.h>
+#include <mach/cvmx-bootmem.h>
+#include <mach/octeon-model.h>
+#include <mach/cvmx-fuse.h>
+#include <mach/octeon-feature.h>
+#include <mach/cvmx-qlm.h>
+#include <mach/octeon_qlm.h>
+#include <mach/cvmx-pcie.h>
+#include <mach/cvmx-coremask.h>
+#include <mach/cvmx-range.h>
+#include <mach/cvmx-global-resources.h>
+
+#include <mach/cvmx-agl-defs.h>
+#include <mach/cvmx-bgxx-defs.h>
+#include <mach/cvmx-ciu-defs.h>
+#include <mach/cvmx-gmxx-defs.h>
+#include <mach/cvmx-gserx-defs.h>
+#include <mach/cvmx-ilk-defs.h>
+#include <mach/cvmx-ipd-defs.h>
+#include <mach/cvmx-pcsx-defs.h>
+#include <mach/cvmx-pcsxx-defs.h>
+#include <mach/cvmx-pki-defs.h>
+#include <mach/cvmx-pko-defs.h>
+#include <mach/cvmx-xcv-defs.h>
+
+#include <mach/cvmx-hwpko.h>
+#include <mach/cvmx-ilk.h>
+#include <mach/cvmx-ipd.h>
+#include <mach/cvmx-pki.h>
+#include <mach/cvmx-pko3.h>
+#include <mach/cvmx-pko3-queue.h>
+#include <mach/cvmx-pko3-resources.h>
+
+#include <mach/cvmx-helper.h>
+#include <mach/cvmx-helper-board.h>
+#include <mach/cvmx-helper-cfg.h>
+
+#include <mach/cvmx-helper-bgx.h>
+#include <mach/cvmx-helper-cfg.h>
+#include <mach/cvmx-helper-util.h>
+#include <mach/cvmx-helper-pki.h>
+
+cvmx_ipd_config_t cvmx_ipd_cfg = {
+ .first_mbuf_skip = 184,
+ .ipd_enable = 1,
+ .cache_mode = CVMX_IPD_OPC_MODE_STT,
+ .packet_pool = { 0, 2048, 0 },
+ .wqe_pool = { 1, 128, 0 },
+ .port_config = { CVMX_PIP_PORT_CFG_MODE_SKIPL2,
+ CVMX_POW_TAG_TYPE_ORDERED, CVMX_PIP_TAG_MODE_TUPLE,
+ .tag_fields = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 } }
+};
+
+#define IPD_RED_AVG_DLY 1000
+#define IPD_RED_PRB_DLY 1000
+
+void cvmx_ipd_convert_to_newcfg(cvmx_ipd_config_t ipd_config)
+{
+ int pkind;
+ unsigned int node = cvmx_get_node_num();
+
+ /*Set all the styles to same parameters since old config does not have per port config*/
+ pki_dflt_style[node].parm_cfg.cache_mode = ipd_config.cache_mode;
+ pki_dflt_style[node].parm_cfg.first_skip = ipd_config.first_mbuf_skip;
+ pki_dflt_style[node].parm_cfg.later_skip =
+ ipd_config.not_first_mbuf_skip;
+ pki_dflt_style[node].parm_cfg.mbuff_size =
+ ipd_config.packet_pool.buffer_size;
+ pki_dflt_style[node].parm_cfg.tag_type =
+ ipd_config.port_config.tag_type;
+
+ pki_dflt_style[node].tag_cfg.tag_fields.layer_c_src =
+ ipd_config.port_config.tag_fields.ipv6_src_ip |
+ ipd_config.port_config.tag_fields.ipv4_src_ip;
+ pki_dflt_style[node].tag_cfg.tag_fields.layer_c_dst =
+ ipd_config.port_config.tag_fields.ipv6_dst_ip |
+ ipd_config.port_config.tag_fields.ipv4_dst_ip;
+ pki_dflt_style[node].tag_cfg.tag_fields.ip_prot_nexthdr =
+ ipd_config.port_config.tag_fields.ipv6_next_header |
+ ipd_config.port_config.tag_fields.ipv4_protocol;
+ pki_dflt_style[node].tag_cfg.tag_fields.layer_f_src =
+ ipd_config.port_config.tag_fields.ipv6_src_port |
+ ipd_config.port_config.tag_fields.ipv4_src_port;
+ pki_dflt_style[node].tag_cfg.tag_fields.layer_f_dst =
+ ipd_config.port_config.tag_fields.ipv6_dst_port |
+ ipd_config.port_config.tag_fields.ipv4_dst_port;
+ pki_dflt_style[node].tag_cfg.tag_fields.input_port =
+ ipd_config.port_config.tag_fields.input_port;
+
+ if (ipd_config.port_config.parse_mode == 0x1)
+ pki_dflt_pkind[node].initial_parse_mode =
+ CVMX_PKI_PARSE_LA_TO_LG;
+ else if (ipd_config.port_config.parse_mode == 0x2)
+ pki_dflt_pkind[node].initial_parse_mode =
+ CVMX_PKI_PARSE_LC_TO_LG;
+ else
+ pki_dflt_pkind[node].initial_parse_mode =
+ CVMX_PKI_PARSE_NOTHING;
+
+ /* For compatibility make style = pkind so old software can modify style */
+ for (pkind = 0; pkind < CVMX_PKI_NUM_PKIND; pkind++)
+ pkind_style_map[node][pkind] = pkind;
+ /*setup packet pool*/
+ cvmx_helper_pki_set_dflt_pool(node, ipd_config.packet_pool.pool_num,
+ ipd_config.packet_pool.buffer_size,
+ ipd_config.packet_pool.buffer_count);
+ cvmx_helper_pki_set_dflt_aura(node, ipd_config.packet_pool.pool_num,
+ ipd_config.packet_pool.pool_num,
+ ipd_config.packet_pool.buffer_count);
+}
+
+int cvmx_ipd_set_config(cvmx_ipd_config_t ipd_config)
+{
+ cvmx_ipd_cfg = ipd_config;
+ if (octeon_has_feature(OCTEON_FEATURE_PKI))
+ cvmx_ipd_convert_to_newcfg(ipd_config);
+ return 0;
+}
+
+void cvmx_ipd_get_config(cvmx_ipd_config_t *ipd_config)
+{
+ *ipd_config = cvmx_ipd_cfg;
+}
+
+void cvmx_ipd_set_packet_pool_buffer_count(u64 buffer_count)
+{
+ cvmx_ipd_cfg.packet_pool.buffer_count = buffer_count;
+}
+
+void cvmx_ipd_set_packet_pool_config(s64 pool, u64 buffer_size,
+ u64 buffer_count)
+{
+ cvmx_ipd_cfg.packet_pool.pool_num = pool;
+ cvmx_ipd_cfg.packet_pool.buffer_size = buffer_size;
+ cvmx_ipd_cfg.packet_pool.buffer_count = buffer_count;
+ if (octeon_has_feature(OCTEON_FEATURE_PKI)) {
+ int node = cvmx_get_node_num();
+ s64 aura = pool;
+
+ cvmx_helper_pki_set_dflt_pool(node, pool, buffer_size,
+ buffer_count);
+ cvmx_helper_pki_set_dflt_aura(node, aura, pool, buffer_count);
+ }
+}
+
+void cvmx_ipd_set_wqe_pool_buffer_count(u64 buffer_count)
+{
+ cvmx_ipd_cfg.wqe_pool.buffer_count = buffer_count;
+}
+
+void cvmx_ipd_set_wqe_pool_config(s64 pool, u64 buffer_size, u64 buffer_count)
+{
+ cvmx_ipd_cfg.wqe_pool.pool_num = pool;
+ cvmx_ipd_cfg.wqe_pool.buffer_size = buffer_size;
+ cvmx_ipd_cfg.wqe_pool.buffer_count = buffer_count;
+}
+
+static void __cvmx_ipd_free_ptr_v1(void)
+{
+ unsigned int wqe_pool = cvmx_fpa_get_wqe_pool();
+ int i;
+ union cvmx_ipd_ptr_count ptr_count;
+ union cvmx_ipd_prc_port_ptr_fifo_ctl prc_port_fifo;
+ int packet_pool = (int)cvmx_fpa_get_packet_pool();
+
+ ptr_count.u64 = csr_rd(CVMX_IPD_PTR_COUNT);
+
+ /* Handle Work Queue Entry in cn56xx and cn52xx */
+ if (octeon_has_feature(OCTEON_FEATURE_NO_WPTR)) {
+ union cvmx_ipd_ctl_status ctl_status;
+
+ ctl_status.u64 = csr_rd(CVMX_IPD_CTL_STATUS);
+ if (ctl_status.s.no_wptr)
+ wqe_pool = packet_pool;
+ }
+
+ /* Free the prefetched WQE */
+ if (ptr_count.s.wqev_cnt) {
+ union cvmx_ipd_wqe_ptr_valid wqe_ptr_valid;
+
+ wqe_ptr_valid.u64 = csr_rd(CVMX_IPD_WQE_PTR_VALID);
+ cvmx_fpa1_free(cvmx_phys_to_ptr((u64)wqe_ptr_valid.s.ptr << 7),
+ wqe_pool, 0);
+ }
+
+ /* Free all WQE in the fifo */
+ if (ptr_count.s.wqe_pcnt) {
+ int i;
+ union cvmx_ipd_pwp_ptr_fifo_ctl pwp_fifo;
+
+ pwp_fifo.u64 = csr_rd(CVMX_IPD_PWP_PTR_FIFO_CTL);
+ for (i = 0; i < ptr_count.s.wqe_pcnt; i++) {
+ pwp_fifo.s.cena = 0;
+ pwp_fifo.s.raddr =
+ pwp_fifo.s.max_cnts +
+ (pwp_fifo.s.wraddr + i) % pwp_fifo.s.max_cnts;
+ csr_wr(CVMX_IPD_PWP_PTR_FIFO_CTL, pwp_fifo.u64);
+ pwp_fifo.u64 = csr_rd(CVMX_IPD_PWP_PTR_FIFO_CTL);
+ cvmx_fpa1_free(cvmx_phys_to_ptr((u64)pwp_fifo.s.ptr
+ << 7),
+ wqe_pool, 0);
+ }
+ pwp_fifo.s.cena = 1;
+ csr_wr(CVMX_IPD_PWP_PTR_FIFO_CTL, pwp_fifo.u64);
+ }
+
+ /* Free the prefetched packet */
+ if (ptr_count.s.pktv_cnt) {
+ union cvmx_ipd_pkt_ptr_valid pkt_ptr_valid;
+
+ pkt_ptr_valid.u64 = csr_rd(CVMX_IPD_PKT_PTR_VALID);
+ cvmx_fpa1_free(cvmx_phys_to_ptr((u64)pkt_ptr_valid.s.ptr << 7),
+ packet_pool, 0);
+ }
+
+ /* Free the per port prefetched packets */
+ prc_port_fifo.u64 = csr_rd(CVMX_IPD_PRC_PORT_PTR_FIFO_CTL);
+
+ for (i = 0; i < prc_port_fifo.s.max_pkt; i++) {
+ prc_port_fifo.s.cena = 0;
+ prc_port_fifo.s.raddr = i % prc_port_fifo.s.max_pkt;
+ csr_wr(CVMX_IPD_PRC_PORT_PTR_FIFO_CTL, prc_port_fifo.u64);
+ prc_port_fifo.u64 = csr_rd(CVMX_IPD_PRC_PORT_PTR_FIFO_CTL);
+ cvmx_fpa1_free(cvmx_phys_to_ptr((u64)prc_port_fifo.s.ptr << 7),
+ packet_pool, 0);
+ }
+ prc_port_fifo.s.cena = 1;
+ csr_wr(CVMX_IPD_PRC_PORT_PTR_FIFO_CTL, prc_port_fifo.u64);
+
+ /* Free all packets in the holding fifo */
+ if (ptr_count.s.pfif_cnt) {
+ int i;
+ union cvmx_ipd_prc_hold_ptr_fifo_ctl prc_hold_fifo;
+
+ prc_hold_fifo.u64 = csr_rd(CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL);
+
+ for (i = 0; i < ptr_count.s.pfif_cnt; i++) {
+ prc_hold_fifo.s.cena = 0;
+ prc_hold_fifo.s.raddr = (prc_hold_fifo.s.praddr + i) %
+ prc_hold_fifo.s.max_pkt;
+ csr_wr(CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL,
+ prc_hold_fifo.u64);
+ prc_hold_fifo.u64 =
+ csr_rd(CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL);
+ cvmx_fpa1_free(cvmx_phys_to_ptr((u64)prc_hold_fifo.s.ptr
+ << 7),
+ packet_pool, 0);
+ }
+ prc_hold_fifo.s.cena = 1;
+ csr_wr(CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL, prc_hold_fifo.u64);
+ }
+
+ /* Free all packets in the fifo */
+ if (ptr_count.s.pkt_pcnt) {
+ int i;
+ union cvmx_ipd_pwp_ptr_fifo_ctl pwp_fifo;
+
+ pwp_fifo.u64 = csr_rd(CVMX_IPD_PWP_PTR_FIFO_CTL);
+
+ for (i = 0; i < ptr_count.s.pkt_pcnt; i++) {
+ pwp_fifo.s.cena = 0;
+ pwp_fifo.s.raddr =
+ (pwp_fifo.s.praddr + i) % pwp_fifo.s.max_cnts;
+ csr_wr(CVMX_IPD_PWP_PTR_FIFO_CTL, pwp_fifo.u64);
+ pwp_fifo.u64 = csr_rd(CVMX_IPD_PWP_PTR_FIFO_CTL);
+ cvmx_fpa1_free(cvmx_phys_to_ptr((u64)pwp_fifo.s.ptr
+ << 7),
+ packet_pool, 0);
+ }
+ pwp_fifo.s.cena = 1;
+ csr_wr(CVMX_IPD_PWP_PTR_FIFO_CTL, pwp_fifo.u64);
+ }
+}
+
+static void __cvmx_ipd_free_ptr_v2(void)
+{
+ int no_wptr = 0;
+ int i;
+ union cvmx_ipd_port_ptr_fifo_ctl port_ptr_fifo;
+ union cvmx_ipd_ptr_count ptr_count;
+ int packet_pool = (int)cvmx_fpa_get_packet_pool();
+ int wqe_pool = (int)cvmx_fpa_get_wqe_pool();
+
+ ptr_count.u64 = csr_rd(CVMX_IPD_PTR_COUNT);
+
+ /* Handle Work Queue Entry in cn68xx */
+ if (octeon_has_feature(OCTEON_FEATURE_NO_WPTR)) {
+ union cvmx_ipd_ctl_status ctl_status;
+
+ ctl_status.u64 = csr_rd(CVMX_IPD_CTL_STATUS);
+ if (ctl_status.s.no_wptr)
+ no_wptr = 1;
+ }
+
+ /* Free the prefetched WQE */
+ if (ptr_count.s.wqev_cnt) {
+ union cvmx_ipd_next_wqe_ptr next_wqe_ptr;
+
+ next_wqe_ptr.u64 = csr_rd(CVMX_IPD_NEXT_WQE_PTR);
+ if (no_wptr)
+ cvmx_fpa1_free(cvmx_phys_to_ptr((u64)next_wqe_ptr.s.ptr
+ << 7),
+ packet_pool, 0);
+ else
+ cvmx_fpa1_free(cvmx_phys_to_ptr((u64)next_wqe_ptr.s.ptr
+ << 7),
+ wqe_pool, 0);
+ }
+
+ /* Free all WQE in the fifo */
+ if (ptr_count.s.wqe_pcnt) {
+ union cvmx_ipd_free_ptr_fifo_ctl free_fifo;
+ union cvmx_ipd_free_ptr_value free_ptr_value;
+
+ free_fifo.u64 = csr_rd(CVMX_IPD_FREE_PTR_FIFO_CTL);
+ for (i = 0; i < ptr_count.s.wqe_pcnt; i++) {
+ free_fifo.s.cena = 0;
+ free_fifo.s.raddr =
+ free_fifo.s.max_cnts +
+ (free_fifo.s.wraddr + i) % free_fifo.s.max_cnts;
+ csr_wr(CVMX_IPD_FREE_PTR_FIFO_CTL, free_fifo.u64);
+ free_fifo.u64 = csr_rd(CVMX_IPD_FREE_PTR_FIFO_CTL);
+ free_ptr_value.u64 = csr_rd(CVMX_IPD_FREE_PTR_VALUE);
+ if (no_wptr)
+ cvmx_fpa1_free(cvmx_phys_to_ptr(
+ (u64)free_ptr_value.s.ptr
+ << 7),
+ packet_pool, 0);
+ else
+ cvmx_fpa1_free(cvmx_phys_to_ptr(
+ (u64)free_ptr_value.s.ptr
+ << 7),
+ wqe_pool, 0);
+ }
+ free_fifo.s.cena = 1;
+ csr_wr(CVMX_IPD_FREE_PTR_FIFO_CTL, free_fifo.u64);
+ }
+
+ /* Free the prefetched packet */
+ if (ptr_count.s.pktv_cnt) {
+ union cvmx_ipd_next_pkt_ptr next_pkt_ptr;
+
+ next_pkt_ptr.u64 = csr_rd(CVMX_IPD_NEXT_PKT_PTR);
+ cvmx_fpa1_free(cvmx_phys_to_ptr((u64)next_pkt_ptr.s.ptr << 7),
+ packet_pool, 0);
+ }
+
+ /* Free the per port prefetched packets */
+ port_ptr_fifo.u64 = csr_rd(CVMX_IPD_PORT_PTR_FIFO_CTL);
+
+ for (i = 0; i < port_ptr_fifo.s.max_pkt; i++) {
+ port_ptr_fifo.s.cena = 0;
+ port_ptr_fifo.s.raddr = i % port_ptr_fifo.s.max_pkt;
+ csr_wr(CVMX_IPD_PORT_PTR_FIFO_CTL, port_ptr_fifo.u64);
+ port_ptr_fifo.u64 = csr_rd(CVMX_IPD_PORT_PTR_FIFO_CTL);
+ cvmx_fpa1_free(cvmx_phys_to_ptr((u64)port_ptr_fifo.s.ptr << 7),
+ packet_pool, 0);
+ }
+ port_ptr_fifo.s.cena = 1;
+ csr_wr(CVMX_IPD_PORT_PTR_FIFO_CTL, port_ptr_fifo.u64);
+
+ /* Free all packets in the holding fifo */
+ if (ptr_count.s.pfif_cnt) {
+ union cvmx_ipd_hold_ptr_fifo_ctl hold_ptr_fifo;
+
+ hold_ptr_fifo.u64 = csr_rd(CVMX_IPD_HOLD_PTR_FIFO_CTL);
+
+ for (i = 0; i < ptr_count.s.pfif_cnt; i++) {
+ hold_ptr_fifo.s.cena = 0;
+ hold_ptr_fifo.s.raddr = (hold_ptr_fifo.s.praddr + i) %
+ hold_ptr_fifo.s.max_pkt;
+ csr_wr(CVMX_IPD_HOLD_PTR_FIFO_CTL, hold_ptr_fifo.u64);
+ hold_ptr_fifo.u64 = csr_rd(CVMX_IPD_HOLD_PTR_FIFO_CTL);
+ cvmx_fpa1_free(cvmx_phys_to_ptr((u64)hold_ptr_fifo.s.ptr
+ << 7),
+ packet_pool, 0);
+ }
+ hold_ptr_fifo.s.cena = 1;
+ csr_wr(CVMX_IPD_HOLD_PTR_FIFO_CTL, hold_ptr_fifo.u64);
+ }
+
+ /* Free all packets in the fifo */
+ if (ptr_count.s.pkt_pcnt) {
+ union cvmx_ipd_free_ptr_fifo_ctl free_fifo;
+ union cvmx_ipd_free_ptr_value free_ptr_value;
+
+ free_fifo.u64 = csr_rd(CVMX_IPD_FREE_PTR_FIFO_CTL);
+
+ for (i = 0; i < ptr_count.s.pkt_pcnt; i++) {
+ free_fifo.s.cena = 0;
+ free_fifo.s.raddr =
+ (free_fifo.s.praddr + i) % free_fifo.s.max_cnts;
+ csr_wr(CVMX_IPD_FREE_PTR_FIFO_CTL, free_fifo.u64);
+ free_fifo.u64 = csr_rd(CVMX_IPD_FREE_PTR_FIFO_CTL);
+ free_ptr_value.u64 = csr_rd(CVMX_IPD_FREE_PTR_VALUE);
+ cvmx_fpa1_free(cvmx_phys_to_ptr(
+ (u64)free_ptr_value.s.ptr << 7),
+ packet_pool, 0);
+ }
+ free_fifo.s.cena = 1;
+ csr_wr(CVMX_IPD_FREE_PTR_FIFO_CTL, free_fifo.u64);
+ }
+}
+
+/**
+ * @INTERNAL
+ * This function is called by cvmx_helper_shutdown() to extract
+ * all FPA buffers out of the IPD and PIP. After this function
+ * completes, all FPA buffers that were prefetched by IPD and PIP
+ * wil be in the appropriate FPA pool. This functions does not reset
+ * PIP or IPD as FPA pool zero must be empty before the reset can
+ * be performed. WARNING: It is very important that IPD and PIP be
+ * reset soon after a call to this function.
+ */
+void __cvmx_ipd_free_ptr(void)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_PKND))
+ __cvmx_ipd_free_ptr_v2();
+ else
+ __cvmx_ipd_free_ptr_v1();
+}
+
+void cvmx_ipd_config(u64 mbuff_size, u64 first_mbuff_skip,
+ u64 not_first_mbuff_skip, u64 first_back, u64 second_back,
+ u64 wqe_fpa_pool, cvmx_ipd_mode_t cache_mode,
+ u64 back_pres_enable_flag)
+{
+ cvmx_ipd_1st_mbuff_skip_t first_skip;
+ cvmx_ipd_mbuff_not_first_skip_t not_first_skip;
+ cvmx_ipd_packet_mbuff_size_t size;
+ cvmx_ipd_1st_next_ptr_back_t first_back_struct;
+ cvmx_ipd_second_next_ptr_back_t second_back_struct;
+ cvmx_ipd_wqe_fpa_queue_t wqe_pool;
+ cvmx_ipd_ctl_status_t ipd_ctl_reg;
+
+ /* Enforce 1st skip minimum if WQE shares the buffer with packet */
+ if (octeon_has_feature(OCTEON_FEATURE_NO_WPTR)) {
+ union cvmx_ipd_ctl_status ctl_status;
+
+ ctl_status.u64 = csr_rd(CVMX_IPD_CTL_STATUS);
+ if (ctl_status.s.no_wptr != 0 && first_mbuff_skip < 16)
+ first_mbuff_skip = 16;
+ }
+
+ first_skip.u64 = 0;
+ first_skip.s.skip_sz = first_mbuff_skip;
+ csr_wr(CVMX_IPD_1ST_MBUFF_SKIP, first_skip.u64);
+
+ not_first_skip.u64 = 0;
+ not_first_skip.s.skip_sz = not_first_mbuff_skip;
+ csr_wr(CVMX_IPD_NOT_1ST_MBUFF_SKIP, not_first_skip.u64);
+
+ size.u64 = 0;
+ size.s.mb_size = mbuff_size;
+ csr_wr(CVMX_IPD_PACKET_MBUFF_SIZE, size.u64);
+
+ first_back_struct.u64 = 0;
+ first_back_struct.s.back = first_back;
+ csr_wr(CVMX_IPD_1st_NEXT_PTR_BACK, first_back_struct.u64);
+
+ second_back_struct.u64 = 0;
+ second_back_struct.s.back = second_back;
+ csr_wr(CVMX_IPD_2nd_NEXT_PTR_BACK, second_back_struct.u64);
+
+ wqe_pool.u64 = 0;
+ wqe_pool.s.wqe_pool = wqe_fpa_pool;
+ csr_wr(CVMX_IPD_WQE_FPA_QUEUE, wqe_pool.u64);
+
+ ipd_ctl_reg.u64 = csr_rd(CVMX_IPD_CTL_STATUS);
+ ipd_ctl_reg.s.opc_mode = cache_mode;
+ ipd_ctl_reg.s.pbp_en = back_pres_enable_flag;
+ csr_wr(CVMX_IPD_CTL_STATUS, ipd_ctl_reg.u64);
+
+ /* Note: the example RED code is below */
+}
+
+/**
+ * Setup Random Early Drop on a specific input queue
+ *
+ * @param queue Input queue to setup RED on (0-7)
+ * @param pass_thresh
+ * Packets will begin slowly dropping when there are less than
+ * this many packet buffers free in FPA 0.
+ * @param drop_thresh
+ * All incoming packets will be dropped when there are less
+ * than this many free packet buffers in FPA 0.
+ * @return Zero on success. Negative on failure
+ */
+int cvmx_ipd_setup_red_queue(int queue, int pass_thresh, int drop_thresh)
+{
+ union cvmx_ipd_qosx_red_marks red_marks;
+ union cvmx_ipd_red_quex_param red_param;
+
+ /*
+ * Set RED to begin dropping packets when there are
+ * pass_thresh buffers left. It will linearly drop more
+ * packets until reaching drop_thresh buffers.
+ */
+ red_marks.u64 = 0;
+ red_marks.s.drop = drop_thresh;
+ red_marks.s.pass = pass_thresh;
+ csr_wr(CVMX_IPD_QOSX_RED_MARKS(queue), red_marks.u64);
+
+ /* Use the actual queue 0 counter, not the average */
+ red_param.u64 = 0;
+ red_param.s.prb_con =
+ (255ul << 24) / (red_marks.s.pass - red_marks.s.drop);
+ red_param.s.avg_con = 1;
+ red_param.s.new_con = 255;
+ red_param.s.use_pcnt = 1;
+ csr_wr(CVMX_IPD_RED_QUEX_PARAM(queue), red_param.u64);
+ return 0;
+}
+
+/**
+ * Setup Random Early Drop to automatically begin dropping packets.
+ *
+ * @param pass_thresh
+ * Packets will begin slowly dropping when there are less than
+ * this many packet buffers free in FPA 0.
+ * @param drop_thresh
+ * All incoming packets will be dropped when there are less
+ * than this many free packet buffers in FPA 0.
+ * @return Zero on success. Negative on failure
+ */
+int cvmx_ipd_setup_red(int pass_thresh, int drop_thresh)
+{
+ int queue;
+ int interface;
+ int port;
+
+ if (octeon_has_feature(OCTEON_FEATURE_PKI))
+ return -1;
+ /*
+ * Disable backpressure based on queued buffers. It needs SW support
+ */
+ if (octeon_has_feature(OCTEON_FEATURE_PKND)) {
+ int bpid;
+
+ for (interface = 0; interface < CVMX_HELPER_MAX_GMX;
+ interface++) {
+ int num_ports;
+
+ num_ports = cvmx_helper_ports_on_interface(interface);
+ for (port = 0; port < num_ports; port++) {
+ bpid = cvmx_helper_get_bpid(interface, port);
+ if (bpid == CVMX_INVALID_BPID)
+ debug("setup_red: cvmx_helper_get_bpid(%d, %d) = %d\n",
+ interface, port,
+ cvmx_helper_get_bpid(interface,
+ port));
+ else
+ csr_wr(CVMX_IPD_BPIDX_MBUF_TH(bpid), 0);
+ }
+ }
+ } else {
+ union cvmx_ipd_portx_bp_page_cnt page_cnt;
+
+ page_cnt.u64 = 0;
+ page_cnt.s.bp_enb = 0;
+ page_cnt.s.page_cnt = 100;
+ for (interface = 0; interface < CVMX_HELPER_MAX_GMX;
+ interface++) {
+ for (port = cvmx_helper_get_first_ipd_port(interface);
+ port < cvmx_helper_get_last_ipd_port(interface);
+ port++)
+ csr_wr(CVMX_IPD_PORTX_BP_PAGE_CNT(port),
+ page_cnt.u64);
+ }
+ }
+
+ for (queue = 0; queue < 8; queue++)
+ cvmx_ipd_setup_red_queue(queue, pass_thresh, drop_thresh);
+
+ /*
+ * Shutoff the dropping based on the per port page count. SW isn't
+ * decrementing it right now
+ */
+ if (octeon_has_feature(OCTEON_FEATURE_PKND))
+ csr_wr(CVMX_IPD_ON_BP_DROP_PKTX(0), 0);
+ else
+ csr_wr(CVMX_IPD_BP_PRT_RED_END, 0);
+
+ /*
+ * Setting up avg_dly and prb_dly, enable bits
+ */
+ if (octeon_has_feature(OCTEON_FEATURE_PKND)) {
+ union cvmx_ipd_red_delay red_delay;
+ union cvmx_ipd_red_bpid_enablex red_bpid_enable;
+
+ red_delay.u64 = 0;
+ red_delay.s.avg_dly = IPD_RED_AVG_DLY;
+ red_delay.s.prb_dly = IPD_RED_PRB_DLY;
+ csr_wr(CVMX_IPD_RED_DELAY, red_delay.u64);
+
+ /*
+ * Only enable the gmx ports
+ */
+ red_bpid_enable.u64 = 0;
+ for (interface = 0; interface < CVMX_HELPER_MAX_GMX;
+ interface++) {
+ int num_ports =
+ cvmx_helper_ports_on_interface(interface);
+ for (port = 0; port < num_ports; port++)
+ red_bpid_enable.u64 |=
+ (((u64)1) << cvmx_helper_get_bpid(
+ interface, port));
+ }
+ csr_wr(CVMX_IPD_RED_BPID_ENABLEX(0), red_bpid_enable.u64);
+ } else {
+ union cvmx_ipd_red_port_enable red_port_enable;
+
+ red_port_enable.u64 = 0;
+ red_port_enable.s.prt_enb = 0xfffffffffull;
+ red_port_enable.s.avg_dly = IPD_RED_AVG_DLY;
+ red_port_enable.s.prb_dly = IPD_RED_PRB_DLY;
+ csr_wr(CVMX_IPD_RED_PORT_ENABLE, red_port_enable.u64);
+
+ /*
+ * Shutoff the dropping of packets based on RED for SRIO ports
+ */
+ if (octeon_has_feature(OCTEON_FEATURE_SRIO)) {
+ union cvmx_ipd_red_port_enable2 red_port_enable2;
+
+ red_port_enable2.u64 = 0;
+ red_port_enable2.s.prt_enb = 0xf0;
+ csr_wr(CVMX_IPD_RED_PORT_ENABLE2, red_port_enable2.u64);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * Enable IPD
+ */
+void cvmx_ipd_enable(void)
+{
+ cvmx_ipd_ctl_status_t ipd_reg;
+
+ ipd_reg.u64 = csr_rd(CVMX_IPD_CTL_STATUS);
+
+ /*
+ * busy-waiting for rst_done in o68
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ while (ipd_reg.s.rst_done != 0)
+ ipd_reg.u64 = csr_rd(CVMX_IPD_CTL_STATUS);
+
+ if (ipd_reg.s.ipd_en)
+ debug("Warning: Enabling IPD when IPD already enabled.\n");
+
+ ipd_reg.s.ipd_en = 1;
+
+ if (cvmx_ipd_cfg.enable_len_M8_fix)
+ ipd_reg.s.len_m8 = 1;
+
+ csr_wr(CVMX_IPD_CTL_STATUS, ipd_reg.u64);
+}
+
+/**
+ * Disable IPD
+ */
+void cvmx_ipd_disable(void)
+{
+ cvmx_ipd_ctl_status_t ipd_reg;
+
+ if (octeon_has_feature(OCTEON_FEATURE_PKI)) {
+ unsigned int node = cvmx_get_node_num();
+
+ cvmx_pki_disable(node);
+ return;
+ }
+ ipd_reg.u64 = csr_rd(CVMX_IPD_CTL_STATUS);
+ ipd_reg.s.ipd_en = 0;
+ csr_wr(CVMX_IPD_CTL_STATUS, ipd_reg.u64);
+}
--
2.35.1
^ permalink raw reply related [flat|nested] 52+ messages in thread* [PATCH 32/52] mips: octeon: Add cvmx-pki.c
2022-03-30 10:06 [PATCH 00/52] mips: octeon: Add ethernet support Stefan Roese
` (27 preceding siblings ...)
2022-03-30 10:07 ` [PATCH 31/52] mips: octeon: Add cvmx-ipd.c Stefan Roese
@ 2022-03-30 10:07 ` Stefan Roese
2022-03-30 10:07 ` [PATCH 34/52] mips: octeon: Add cvmx-pko.c Stefan Roese
` (20 subsequent siblings)
49 siblings, 0 replies; 52+ messages in thread
From: Stefan Roese @ 2022-03-30 10:07 UTC (permalink / raw)
To: u-boot; +Cc: awilliams, cchavva
From: Aaron Williams <awilliams@marvell.com>
Import cvmx-pki.c from 2013 U-Boot. It will be used by the later
added drivers to support networking on the MIPS Octeon II / III
platforms.
Signed-off-by: Aaron Williams <awilliams@marvell.com>
Signed-off-by: Stefan Roese <sr@denx.de>
---
arch/mips/mach-octeon/cvmx-pki.c | 1619 ++++++++++++++++++++++++++++++
1 file changed, 1619 insertions(+)
create mode 100644 arch/mips/mach-octeon/cvmx-pki.c
diff --git a/arch/mips/mach-octeon/cvmx-pki.c b/arch/mips/mach-octeon/cvmx-pki.c
new file mode 100644
index 000000000000..136e17966b18
--- /dev/null
+++ b/arch/mips/mach-octeon/cvmx-pki.c
@@ -0,0 +1,1619 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ * PKI Support.
+ */
+
+#include <time.h>
+#include <log.h>
+#include <linux/delay.h>
+
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-csr.h>
+#include <mach/cvmx-bootmem.h>
+#include <mach/octeon-model.h>
+#include <mach/cvmx-fuse.h>
+#include <mach/octeon-feature.h>
+#include <mach/cvmx-qlm.h>
+#include <mach/octeon_qlm.h>
+#include <mach/cvmx-pcie.h>
+#include <mach/cvmx-coremask.h>
+
+#include <mach/cvmx-agl-defs.h>
+#include <mach/cvmx-bgxx-defs.h>
+#include <mach/cvmx-ciu-defs.h>
+#include <mach/cvmx-gmxx-defs.h>
+#include <mach/cvmx-gserx-defs.h>
+#include <mach/cvmx-ilk-defs.h>
+#include <mach/cvmx-ipd-defs.h>
+#include <mach/cvmx-pcsx-defs.h>
+#include <mach/cvmx-pcsxx-defs.h>
+#include <mach/cvmx-pki-defs.h>
+#include <mach/cvmx-pko-defs.h>
+#include <mach/cvmx-xcv-defs.h>
+
+#include <mach/cvmx-hwpko.h>
+#include <mach/cvmx-ilk.h>
+#include <mach/cvmx-pki.h>
+#include <mach/cvmx-pki-cluster.h>
+#include <mach/cvmx-pki-resources.h>
+
+#include <mach/cvmx-helper.h>
+#include <mach/cvmx-helper-board.h>
+#include <mach/cvmx-helper-cfg.h>
+
+/**
+ * This function enables PKI
+ *
+ * @param node Node to enable PKI.
+ */
+void cvmx_pki_enable(int node)
+{
+ cvmx_pki_sft_rst_t sft_rst;
+ cvmx_pki_buf_ctl_t buf_ctl;
+
+ sft_rst.u64 = csr_rd_node(node, CVMX_PKI_SFT_RST);
+ while (sft_rst.s.busy != 0)
+ sft_rst.u64 = csr_rd_node(node, CVMX_PKI_SFT_RST);
+
+ buf_ctl.u64 = csr_rd_node(node, CVMX_PKI_BUF_CTL);
+ if (buf_ctl.s.pki_en)
+ debug("Warning: Enabling PKI when PKI already enabled.\n");
+
+ buf_ctl.s.pki_en = 1;
+ csr_wr_node(node, CVMX_PKI_BUF_CTL, buf_ctl.u64);
+}
+
+/**
+ * This function disables PKI.
+ *
+ * @param node Node to disable PKI.
+ */
+void cvmx_pki_disable(int node)
+{
+ cvmx_pki_buf_ctl_t buf_ctl;
+
+ buf_ctl.u64 = csr_rd_node(node, CVMX_PKI_BUF_CTL);
+ buf_ctl.s.pki_en = 0;
+ csr_wr_node(node, CVMX_PKI_BUF_CTL, buf_ctl.u64);
+}
+
+/**
+ * This function soft resets PKI.
+ *
+ * @param node Node to enable PKI.
+ */
+void cvmx_pki_reset(int node)
+{
+ cvmx_pki_sft_rst_t sft_rst;
+
+ sft_rst.u64 = csr_rd_node(node, CVMX_PKI_SFT_RST);
+ if (CVMX_WAIT_FOR_FIELD64_NODE(node, CVMX_PKI_SFT_RST,
+ cvmx_pki_sft_rst_t, active, ==, 0,
+ 10000)) {
+ debug("PKI_SFT_RST is not active\n");
+ }
+
+ sft_rst.s.rst = 1;
+ csr_wr_node(node, CVMX_PKI_SFT_RST, sft_rst.u64);
+ if (CVMX_WAIT_FOR_FIELD64_NODE(node, CVMX_PKI_SFT_RST,
+ cvmx_pki_sft_rst_t, busy, ==, 0,
+ 10000)) {
+ debug("PKI_SFT_RST is busy\n");
+ }
+}
+
+/**
+ * This function sets the clusters in PKI.
+ *
+ * @param node Node to set clusters.
+ */
+int cvmx_pki_setup_clusters(int node)
+{
+ int i;
+
+ for (i = 0; i < cvmx_pki_cluster_code_length; i++)
+ csr_wr_node(node, CVMX_PKI_IMEMX(i),
+ cvmx_pki_cluster_code_default[i]);
+
+ return 0;
+}
+
+/**
+ * @INTERNAL
+ * This function is called by cvmx_helper_shutdown() to extract all FPA buffers
+ * out of the PKI. After this function completes, all FPA buffers that were
+ * prefetched by PKI will be in the appropriate FPA pool. This functions does
+ * not reset PKI.
+ * WARNING: It is very important that PKI be reset soon after a call to this function.
+ */
+void __cvmx_pki_free_ptr(int node)
+{
+ cvmx_pki_buf_ctl_t buf_ctl;
+
+ buf_ctl.u64 = csr_rd_node(node, CVMX_PKI_BUF_CTL);
+ /* Disable buffering any data. */
+ buf_ctl.s.pkt_off = 1;
+ /* Disable caching of any data and return all the prefetched buffers to FPA. */
+ buf_ctl.s.fpa_cac_dis = 1;
+ csr_wr_node(node, CVMX_PKI_BUF_CTL, buf_ctl.u64);
+ buf_ctl.u64 = csr_rd_node(node, CVMX_PKI_BUF_CTL);
+}
+
+/**
+ * This function reads global configuration of PKI block.
+ *
+ * @param node Node number.
+ * @param gbl_cfg Pointer to struct to read global configuration.
+ */
+void cvmx_pki_read_global_config(int node,
+ struct cvmx_pki_global_config *gbl_cfg)
+{
+ cvmx_pki_stat_ctl_t stat_ctl;
+ cvmx_pki_icgx_cfg_t icg_cfg;
+ cvmx_pki_gbl_pen_t gbl_pen;
+ cvmx_pki_tag_secret_t tag_secret;
+ cvmx_pki_frm_len_chkx_t frm_len_chk;
+ cvmx_pki_buf_ctl_t buf_ctl;
+ unsigned int cl_grp;
+ int id;
+
+ stat_ctl.u64 = csr_rd_node(node, CVMX_PKI_STAT_CTL);
+ gbl_cfg->stat_mode = stat_ctl.s.mode;
+
+ for (cl_grp = 0; cl_grp < CVMX_PKI_NUM_CLUSTER_GROUP; cl_grp++) {
+ icg_cfg.u64 = csr_rd_node(node, CVMX_PKI_ICGX_CFG(cl_grp));
+ gbl_cfg->cluster_mask[cl_grp] = icg_cfg.s.clusters;
+ }
+ gbl_pen.u64 = csr_rd_node(node, CVMX_PKI_GBL_PEN);
+ gbl_cfg->gbl_pen.virt_pen = gbl_pen.s.virt_pen;
+ gbl_cfg->gbl_pen.clg_pen = gbl_pen.s.clg_pen;
+ gbl_cfg->gbl_pen.cl2_pen = gbl_pen.s.cl2_pen;
+ gbl_cfg->gbl_pen.l4_pen = gbl_pen.s.l4_pen;
+ gbl_cfg->gbl_pen.il3_pen = gbl_pen.s.il3_pen;
+ gbl_cfg->gbl_pen.l3_pen = gbl_pen.s.l3_pen;
+ gbl_cfg->gbl_pen.mpls_pen = gbl_pen.s.mpls_pen;
+ gbl_cfg->gbl_pen.fulc_pen = gbl_pen.s.fulc_pen;
+ gbl_cfg->gbl_pen.dsa_pen = gbl_pen.s.dsa_pen;
+ gbl_cfg->gbl_pen.hg_pen = gbl_pen.s.hg_pen;
+
+ tag_secret.u64 = csr_rd_node(node, CVMX_PKI_TAG_SECRET);
+ gbl_cfg->tag_secret.dst6 = tag_secret.s.dst6;
+ gbl_cfg->tag_secret.src6 = tag_secret.s.src6;
+ gbl_cfg->tag_secret.dst = tag_secret.s.dst;
+ gbl_cfg->tag_secret.src = tag_secret.s.src;
+
+ for (id = 0; id < CVMX_PKI_NUM_FRAME_CHECK; id++) {
+ frm_len_chk.u64 = csr_rd_node(node, CVMX_PKI_FRM_LEN_CHKX(id));
+ gbl_cfg->frm_len[id].maxlen = frm_len_chk.s.maxlen;
+ gbl_cfg->frm_len[id].minlen = frm_len_chk.s.minlen;
+ }
+ buf_ctl.u64 = csr_rd_node(node, CVMX_PKI_BUF_CTL);
+ gbl_cfg->fpa_wait = buf_ctl.s.fpa_wait;
+}
+
+/**
+ * This function writes max and min frame lengths to hardware which can be used
+ * to check the size of frame arrived.There are 2 possible combination which are
+ * indicated by id field.
+ *
+ * @param node Node number.
+ * @param id Choose which frame len register to write to
+ * @param len_chk Struct containing byte count for max-sized/min-sized frame check.
+ */
+static void cvmx_pki_write_frame_len(int node, int id,
+ struct cvmx_pki_frame_len len_chk)
+{
+ cvmx_pki_frm_len_chkx_t frm_len_chk;
+
+ frm_len_chk.u64 = csr_rd_node(node, CVMX_PKI_FRM_LEN_CHKX(id));
+ frm_len_chk.s.maxlen = len_chk.maxlen;
+ frm_len_chk.s.minlen = len_chk.minlen;
+ csr_wr_node(node, CVMX_PKI_FRM_LEN_CHKX(id), frm_len_chk.u64);
+}
+
+/**
+ * This function writes global configuration of PKI into hw.
+ *
+ * @param node Node number.
+ * @param gbl_cfg Pointer to struct to global configuration.
+ */
+void cvmx_pki_write_global_config(int node,
+ struct cvmx_pki_global_config *gbl_cfg)
+{
+ cvmx_pki_stat_ctl_t stat_ctl;
+ cvmx_pki_buf_ctl_t buf_ctl;
+ unsigned int cl_grp;
+
+ for (cl_grp = 0; cl_grp < CVMX_PKI_NUM_CLUSTER_GROUP; cl_grp++)
+ cvmx_pki_attach_cluster_to_group(node, cl_grp,
+ gbl_cfg->cluster_mask[cl_grp]);
+
+ stat_ctl.u64 = 0;
+ stat_ctl.s.mode = gbl_cfg->stat_mode;
+ csr_wr_node(node, CVMX_PKI_STAT_CTL, stat_ctl.u64);
+
+ buf_ctl.u64 = csr_rd_node(node, CVMX_PKI_BUF_CTL);
+ buf_ctl.s.fpa_wait = gbl_cfg->fpa_wait;
+ csr_wr_node(node, CVMX_PKI_BUF_CTL, buf_ctl.u64);
+
+ cvmx_pki_write_global_parse(node, gbl_cfg->gbl_pen);
+ cvmx_pki_write_tag_secret(node, gbl_cfg->tag_secret);
+ cvmx_pki_write_frame_len(node, 0, gbl_cfg->frm_len[0]);
+ cvmx_pki_write_frame_len(node, 1, gbl_cfg->frm_len[1]);
+}
+
+/**
+ * This function reads per pkind parameters in hardware which defines how
+ * the incoming packet is processed.
+ *
+ * @param node Node number.
+ * @param pkind PKI supports a large number of incoming interfaces and packets
+ * arriving on different interfaces or channels may want to be processed
+ * differently. PKI uses the pkind to determine how the incoming packet
+ * is processed.
+ * @param pkind_cfg Pointer to struct conatining pkind configuration read
+ * from the hardware.
+ */
+int cvmx_pki_read_pkind_config(int node, int pkind,
+ struct cvmx_pki_pkind_config *pkind_cfg)
+{
+ int cluster = 0;
+ u64 cl_mask;
+ cvmx_pki_pkindx_icgsel_t icgsel;
+ cvmx_pki_clx_pkindx_style_t pstyle;
+ cvmx_pki_icgx_cfg_t icg_cfg;
+ cvmx_pki_clx_pkindx_cfg_t pcfg;
+ cvmx_pki_clx_pkindx_skip_t skip;
+ cvmx_pki_clx_pkindx_l2_custom_t l2cust;
+ cvmx_pki_clx_pkindx_lg_custom_t lgcust;
+
+ icgsel.u64 = csr_rd_node(node, CVMX_PKI_PKINDX_ICGSEL(pkind));
+ icg_cfg.u64 = csr_rd_node(node, CVMX_PKI_ICGX_CFG(icgsel.s.icg));
+ pkind_cfg->cluster_grp = (uint8_t)icgsel.s.icg;
+ cl_mask = (uint64_t)icg_cfg.s.clusters;
+ cluster = __builtin_ffsll(cl_mask) - 1;
+
+ pstyle.u64 =
+ csr_rd_node(node, CVMX_PKI_CLX_PKINDX_STYLE(pkind, cluster));
+ pkind_cfg->initial_parse_mode = pstyle.s.pm;
+ pkind_cfg->initial_style = pstyle.s.style;
+
+ pcfg.u64 = csr_rd_node(node, CVMX_PKI_CLX_PKINDX_CFG(pkind, cluster));
+ pkind_cfg->fcs_pres = pcfg.s.fcs_pres;
+ pkind_cfg->parse_en.inst_hdr = pcfg.s.inst_hdr;
+ pkind_cfg->parse_en.mpls_en = pcfg.s.mpls_en;
+ pkind_cfg->parse_en.lg_custom = pcfg.s.lg_custom;
+ pkind_cfg->parse_en.fulc_en = pcfg.s.fulc_en;
+ pkind_cfg->parse_en.dsa_en = pcfg.s.dsa_en;
+ pkind_cfg->parse_en.hg2_en = pcfg.s.hg2_en;
+ pkind_cfg->parse_en.hg_en = pcfg.s.hg_en;
+
+ skip.u64 = csr_rd_node(node, CVMX_PKI_CLX_PKINDX_SKIP(pkind, cluster));
+ pkind_cfg->fcs_skip = skip.s.fcs_skip;
+ pkind_cfg->inst_skip = skip.s.inst_skip;
+
+ l2cust.u64 = csr_rd_node(node,
+ CVMX_PKI_CLX_PKINDX_L2_CUSTOM(pkind, cluster));
+ pkind_cfg->l2_scan_offset = l2cust.s.offset;
+
+ lgcust.u64 = csr_rd_node(node,
+ CVMX_PKI_CLX_PKINDX_LG_CUSTOM(pkind, cluster));
+ pkind_cfg->lg_scan_offset = lgcust.s.offset;
+ return 0;
+}
+
+/**
+ * This function writes per pkind parameters in hardware which defines how
+ * the incoming packet is processed.
+ *
+ * @param node Node number.
+ * @param pkind PKI supports a large number of incoming interfaces and packets
+ * arriving on different interfaces or channels may want to be processed
+ * differently. PKI uses the pkind to determine how the incoming
+ * packet is processed.
+ * @param pkind_cfg Pointer to struct conatining pkind configuration need
+ * to be written in the hardware.
+ */
+int cvmx_pki_write_pkind_config(int node, int pkind,
+ struct cvmx_pki_pkind_config *pkind_cfg)
+{
+ unsigned int cluster = 0;
+ u64 cluster_mask;
+ cvmx_pki_pkindx_icgsel_t icgsel;
+ cvmx_pki_clx_pkindx_style_t pstyle;
+ cvmx_pki_icgx_cfg_t icg_cfg;
+ cvmx_pki_clx_pkindx_cfg_t pcfg;
+ cvmx_pki_clx_pkindx_skip_t skip;
+ cvmx_pki_clx_pkindx_l2_custom_t l2cust;
+ cvmx_pki_clx_pkindx_lg_custom_t lgcust;
+
+ if (pkind >= CVMX_PKI_NUM_PKIND ||
+ pkind_cfg->cluster_grp >= CVMX_PKI_NUM_CLUSTER_GROUP ||
+ pkind_cfg->initial_style >= CVMX_PKI_NUM_FINAL_STYLE) {
+ debug("ERROR: Configuring PKIND pkind = %d cluster_group = %d style = %d\n",
+ pkind, pkind_cfg->cluster_grp, pkind_cfg->initial_style);
+ return -1;
+ }
+ icgsel.u64 = csr_rd_node(node, CVMX_PKI_PKINDX_ICGSEL(pkind));
+ icgsel.s.icg = pkind_cfg->cluster_grp;
+ csr_wr_node(node, CVMX_PKI_PKINDX_ICGSEL(pkind), icgsel.u64);
+
+ icg_cfg.u64 =
+ csr_rd_node(node, CVMX_PKI_ICGX_CFG(pkind_cfg->cluster_grp));
+ cluster_mask = (uint64_t)icg_cfg.s.clusters;
+ while (cluster < CVMX_PKI_NUM_CLUSTER) {
+ if (cluster_mask & (0x01L << cluster)) {
+ pstyle.u64 = csr_rd_node(
+ node,
+ CVMX_PKI_CLX_PKINDX_STYLE(pkind, cluster));
+ pstyle.s.pm = pkind_cfg->initial_parse_mode;
+ pstyle.s.style = pkind_cfg->initial_style;
+ csr_wr_node(node,
+ CVMX_PKI_CLX_PKINDX_STYLE(pkind, cluster),
+ pstyle.u64);
+
+ pcfg.u64 = csr_rd_node(
+ node, CVMX_PKI_CLX_PKINDX_CFG(pkind, cluster));
+ pcfg.s.fcs_pres = pkind_cfg->fcs_pres;
+ pcfg.s.inst_hdr = pkind_cfg->parse_en.inst_hdr;
+ pcfg.s.mpls_en = pkind_cfg->parse_en.mpls_en;
+ pcfg.s.lg_custom = pkind_cfg->parse_en.lg_custom;
+ pcfg.s.fulc_en = pkind_cfg->parse_en.fulc_en;
+ pcfg.s.dsa_en = pkind_cfg->parse_en.dsa_en;
+ pcfg.s.hg2_en = pkind_cfg->parse_en.hg2_en;
+ pcfg.s.hg_en = pkind_cfg->parse_en.hg_en;
+ csr_wr_node(node,
+ CVMX_PKI_CLX_PKINDX_CFG(pkind, cluster),
+ pcfg.u64);
+
+ skip.u64 = csr_rd_node(
+ node, CVMX_PKI_CLX_PKINDX_SKIP(pkind, cluster));
+ skip.s.fcs_skip = pkind_cfg->fcs_skip;
+ skip.s.inst_skip = pkind_cfg->inst_skip;
+ csr_wr_node(node,
+ CVMX_PKI_CLX_PKINDX_SKIP(pkind, cluster),
+ skip.u64);
+
+ l2cust.u64 = csr_rd_node(
+ node,
+ CVMX_PKI_CLX_PKINDX_L2_CUSTOM(pkind, cluster));
+ l2cust.s.offset = pkind_cfg->l2_scan_offset;
+ csr_wr_node(node,
+ CVMX_PKI_CLX_PKINDX_L2_CUSTOM(pkind,
+ cluster),
+ l2cust.u64);
+
+ lgcust.u64 = csr_rd_node(
+ node,
+ CVMX_PKI_CLX_PKINDX_LG_CUSTOM(pkind, cluster));
+ lgcust.s.offset = pkind_cfg->lg_scan_offset;
+ csr_wr_node(node,
+ CVMX_PKI_CLX_PKINDX_LG_CUSTOM(pkind,
+ cluster),
+ lgcust.u64);
+ }
+ cluster++;
+ }
+ return 0;
+}
+
+/**
+ * This function reads parameters associated with tag configuration in hardware.
+ * Only first cluster in the group is used.
+ *
+ * @param node Node number.
+ * @param style Style to configure tag for.
+ * @param cluster_mask Mask of clusters to configure the style for.
+ * @param tag_cfg Pointer to tag configuration struct.
+ */
+void cvmx_pki_read_tag_config(int node, int style, uint64_t cluster_mask,
+ struct cvmx_pki_style_tag_cfg *tag_cfg)
+{
+ int mask, tag_idx, index;
+ cvmx_pki_clx_stylex_cfg2_t style_cfg2;
+ cvmx_pki_clx_stylex_alg_t style_alg;
+ cvmx_pki_stylex_tag_sel_t tag_sel;
+ cvmx_pki_tag_incx_ctl_t tag_ctl;
+ cvmx_pki_tag_incx_mask_t tag_mask;
+ int cluster = __builtin_ffsll(cluster_mask) - 1;
+
+ style_cfg2.u64 =
+ csr_rd_node(node, CVMX_PKI_CLX_STYLEX_CFG2(style, cluster));
+ style_alg.u64 =
+ csr_rd_node(node, CVMX_PKI_CLX_STYLEX_ALG(style, cluster));
+
+ /* 7-Tuple Tag: */
+ tag_cfg->tag_fields.layer_g_src = style_cfg2.s.tag_src_lg;
+ tag_cfg->tag_fields.layer_f_src = style_cfg2.s.tag_src_lf;
+ tag_cfg->tag_fields.layer_e_src = style_cfg2.s.tag_src_le;
+ tag_cfg->tag_fields.layer_d_src = style_cfg2.s.tag_src_ld;
+ tag_cfg->tag_fields.layer_c_src = style_cfg2.s.tag_src_lc;
+ tag_cfg->tag_fields.layer_b_src = style_cfg2.s.tag_src_lb;
+ tag_cfg->tag_fields.layer_g_dst = style_cfg2.s.tag_dst_lg;
+ tag_cfg->tag_fields.layer_f_dst = style_cfg2.s.tag_dst_lf;
+ tag_cfg->tag_fields.layer_e_dst = style_cfg2.s.tag_dst_le;
+ tag_cfg->tag_fields.layer_d_dst = style_cfg2.s.tag_dst_ld;
+ tag_cfg->tag_fields.layer_c_dst = style_cfg2.s.tag_dst_lc;
+ tag_cfg->tag_fields.layer_b_dst = style_cfg2.s.tag_dst_lb;
+ tag_cfg->tag_fields.tag_vni = style_alg.s.tag_vni;
+ tag_cfg->tag_fields.tag_gtp = style_alg.s.tag_gtp;
+ tag_cfg->tag_fields.tag_spi = style_alg.s.tag_spi;
+ tag_cfg->tag_fields.tag_sync = style_alg.s.tag_syn;
+ tag_cfg->tag_fields.ip_prot_nexthdr = style_alg.s.tag_pctl;
+ tag_cfg->tag_fields.second_vlan = style_alg.s.tag_vs1;
+ tag_cfg->tag_fields.first_vlan = style_alg.s.tag_vs0;
+ tag_cfg->tag_fields.mpls_label = style_alg.s.tag_mpls0;
+ tag_cfg->tag_fields.input_port = style_alg.s.tag_prt;
+
+ /* Custom-Mask Tag: */
+ tag_sel.u64 = csr_rd_node(node, CVMX_PKI_STYLEX_TAG_SEL(style));
+ for (mask = 0; mask < 4; mask++) {
+ tag_cfg->mask_tag[mask].enable =
+ (style_cfg2.s.tag_inc & (1 << mask)) != 0;
+ switch (mask) {
+ case 0:
+ tag_idx = tag_sel.s.tag_idx0;
+ break;
+ case 1:
+ tag_idx = tag_sel.s.tag_idx1;
+ break;
+ case 2:
+ tag_idx = tag_sel.s.tag_idx2;
+ break;
+ case 3:
+ tag_idx = tag_sel.s.tag_idx3;
+ break;
+ }
+ index = tag_idx * 4 + mask;
+ tag_mask.u64 = csr_rd_node(node, CVMX_PKI_TAG_INCX_MASK(index));
+ tag_cfg->mask_tag[mask].val = tag_mask.s.en;
+ tag_ctl.u64 = csr_rd_node(node, CVMX_PKI_TAG_INCX_CTL(index));
+ tag_cfg->mask_tag[mask].base = tag_ctl.s.ptr_sel;
+ tag_cfg->mask_tag[mask].offset = tag_ctl.s.offset;
+ }
+}
+
+/**
+ * This function writes/configures parameters associated with tag configuration in
+ * hardware. In Custom-Mask Tagging, all four masks use the same base index
+ * to access Tag Control and Tag Mask registers.
+ *
+ * @param node Node number.
+ * @param style Style to configure tag for.
+ * @param cluster_mask Mask of clusters to configure the style for.
+ * @param tag_cfg Pointer to taf configuration struct.
+ */
+void cvmx_pki_write_tag_config(int node, int style, uint64_t cluster_mask,
+ struct cvmx_pki_style_tag_cfg *tag_cfg)
+{
+ int mask, index, tag_idx, mtag_en = 0;
+ unsigned int cluster = 0;
+ cvmx_pki_clx_stylex_cfg2_t scfg2;
+ cvmx_pki_clx_stylex_alg_t style_alg;
+ cvmx_pki_tag_incx_ctl_t tag_ctl;
+ cvmx_pki_tag_incx_mask_t tag_mask;
+ cvmx_pki_stylex_tag_sel_t tag_sel;
+
+ while (cluster < CVMX_PKI_NUM_CLUSTER) {
+ if (cluster_mask & (0x01L << cluster)) {
+ /* 7-Tuple Tag: */
+ scfg2.u64 = csr_rd_node(
+ node, CVMX_PKI_CLX_STYLEX_CFG2(style, cluster));
+ scfg2.s.tag_src_lg = tag_cfg->tag_fields.layer_g_src;
+ scfg2.s.tag_src_lf = tag_cfg->tag_fields.layer_f_src;
+ scfg2.s.tag_src_le = tag_cfg->tag_fields.layer_e_src;
+ scfg2.s.tag_src_ld = tag_cfg->tag_fields.layer_d_src;
+ scfg2.s.tag_src_lc = tag_cfg->tag_fields.layer_c_src;
+ scfg2.s.tag_src_lb = tag_cfg->tag_fields.layer_b_src;
+ scfg2.s.tag_dst_lg = tag_cfg->tag_fields.layer_g_dst;
+ scfg2.s.tag_dst_lf = tag_cfg->tag_fields.layer_f_dst;
+ scfg2.s.tag_dst_le = tag_cfg->tag_fields.layer_e_dst;
+ scfg2.s.tag_dst_ld = tag_cfg->tag_fields.layer_d_dst;
+ scfg2.s.tag_dst_lc = tag_cfg->tag_fields.layer_c_dst;
+ scfg2.s.tag_dst_lb = tag_cfg->tag_fields.layer_b_dst;
+ csr_wr_node(node,
+ CVMX_PKI_CLX_STYLEX_CFG2(style, cluster),
+ scfg2.u64);
+
+ style_alg.u64 = csr_rd_node(
+ node, CVMX_PKI_CLX_STYLEX_ALG(style, cluster));
+ style_alg.s.tag_vni = tag_cfg->tag_fields.tag_vni;
+ style_alg.s.tag_gtp = tag_cfg->tag_fields.tag_gtp;
+ style_alg.s.tag_spi = tag_cfg->tag_fields.tag_spi;
+ style_alg.s.tag_syn = tag_cfg->tag_fields.tag_sync;
+ style_alg.s.tag_pctl =
+ tag_cfg->tag_fields.ip_prot_nexthdr;
+ style_alg.s.tag_vs1 = tag_cfg->tag_fields.second_vlan;
+ style_alg.s.tag_vs0 = tag_cfg->tag_fields.first_vlan;
+ style_alg.s.tag_mpls0 = tag_cfg->tag_fields.mpls_label;
+ style_alg.s.tag_prt = tag_cfg->tag_fields.input_port;
+ csr_wr_node(node,
+ CVMX_PKI_CLX_STYLEX_ALG(style, cluster),
+ style_alg.u64);
+
+ /* Custom-Mask Tag (Part 1): */
+ for (mask = 0; mask < 4; mask++) {
+ if (tag_cfg->mask_tag[mask].enable)
+ mtag_en++;
+ }
+ if (mtag_en) {
+ scfg2.u64 = csr_rd_node(
+ node, CVMX_PKI_CLX_STYLEX_CFG2(
+ style, cluster));
+ scfg2.s.tag_inc = 0;
+ for (mask = 0; mask < 4; mask++) {
+ if (tag_cfg->mask_tag[mask].enable)
+ scfg2.s.tag_inc |= 1 << mask;
+ }
+ csr_wr_node(node,
+ CVMX_PKI_CLX_STYLEX_CFG2(style,
+ cluster),
+ scfg2.u64);
+ }
+ }
+ cluster++;
+ }
+ /* Custom-Mask Tag (Part 2): */
+ if (mtag_en) {
+ tag_idx = cvmx_pki_mtag_idx_alloc(node, -1);
+ if (tag_idx < 0)
+ return;
+
+ tag_sel.u64 = csr_rd_node(node, CVMX_PKI_STYLEX_TAG_SEL(style));
+ for (mask = 0; mask < 4; mask++) {
+ if (tag_cfg->mask_tag[mask].enable) {
+ switch (mask) {
+ case 0:
+ tag_sel.s.tag_idx0 = tag_idx;
+ break;
+ case 1:
+ tag_sel.s.tag_idx1 = tag_idx;
+ break;
+ case 2:
+ tag_sel.s.tag_idx2 = tag_idx;
+ break;
+ case 3:
+ tag_sel.s.tag_idx3 = tag_idx;
+ break;
+ }
+ index = tag_idx * 4 + mask;
+ tag_mask.u64 = csr_rd_node(
+ node, CVMX_PKI_TAG_INCX_MASK(index));
+ tag_mask.s.en = tag_cfg->mask_tag[mask].val;
+ csr_wr_node(node, CVMX_PKI_TAG_INCX_MASK(index),
+ tag_mask.u64);
+
+ tag_ctl.u64 = csr_rd_node(
+ node, CVMX_PKI_TAG_INCX_CTL(index));
+ tag_ctl.s.ptr_sel =
+ tag_cfg->mask_tag[mask].base;
+ tag_ctl.s.offset =
+ tag_cfg->mask_tag[mask].offset;
+ csr_wr_node(node, CVMX_PKI_TAG_INCX_CTL(index),
+ tag_ctl.u64);
+ }
+ }
+ csr_wr_node(node, CVMX_PKI_STYLEX_TAG_SEL(style), tag_sel.u64);
+ }
+}
+
+/**
+ * This function reads parameters associated with style in hardware.
+ *
+ * @param node Node number.
+ * @param style Style to read from.
+ * @param cluster_mask Mask of clusters style belongs to.
+ * @param style_cfg Pointer to style config struct.
+ */
+void cvmx_pki_read_style_config(int node, int style, uint64_t cluster_mask,
+ struct cvmx_pki_style_config *style_cfg)
+{
+ cvmx_pki_clx_stylex_cfg_t scfg;
+ cvmx_pki_clx_stylex_cfg2_t scfg2;
+ cvmx_pki_clx_stylex_alg_t style_alg;
+ cvmx_pki_stylex_buf_t style_buf;
+ int cluster = __builtin_ffsll(cluster_mask) - 1;
+
+ scfg.u64 = csr_rd_node(node, CVMX_PKI_CLX_STYLEX_CFG(style, cluster));
+ scfg2.u64 = csr_rd_node(node, CVMX_PKI_CLX_STYLEX_CFG2(style, cluster));
+ style_alg.u64 =
+ csr_rd_node(node, CVMX_PKI_CLX_STYLEX_ALG(style, cluster));
+ style_buf.u64 = csr_rd_node(node, CVMX_PKI_STYLEX_BUF(style));
+
+ style_cfg->parm_cfg.ip6_udp_opt = scfg.s.ip6_udp_opt;
+ style_cfg->parm_cfg.lenerr_en = scfg.s.lenerr_en;
+ style_cfg->parm_cfg.lenerr_eqpad = scfg.s.lenerr_eqpad;
+ style_cfg->parm_cfg.maxerr_en = scfg.s.maxerr_en;
+ style_cfg->parm_cfg.minerr_en = scfg.s.minerr_en;
+ style_cfg->parm_cfg.fcs_chk = scfg.s.fcs_chk;
+ style_cfg->parm_cfg.fcs_strip = scfg.s.fcs_strip;
+ style_cfg->parm_cfg.minmax_sel = scfg.s.minmax_sel;
+ style_cfg->parm_cfg.qpg_base = scfg.s.qpg_base;
+ style_cfg->parm_cfg.qpg_dis_padd = scfg.s.qpg_dis_padd;
+ style_cfg->parm_cfg.qpg_dis_aura = scfg.s.qpg_dis_aura;
+ style_cfg->parm_cfg.qpg_dis_grp = scfg.s.qpg_dis_grp;
+ style_cfg->parm_cfg.qpg_dis_grptag = scfg.s.qpg_dis_grptag;
+ style_cfg->parm_cfg.rawdrp = scfg.s.rawdrp;
+ style_cfg->parm_cfg.force_drop = scfg.s.drop;
+ style_cfg->parm_cfg.nodrop = scfg.s.nodrop;
+
+ style_cfg->parm_cfg.len_lg = scfg2.s.len_lg;
+ style_cfg->parm_cfg.len_lf = scfg2.s.len_lf;
+ style_cfg->parm_cfg.len_le = scfg2.s.len_le;
+ style_cfg->parm_cfg.len_ld = scfg2.s.len_ld;
+ style_cfg->parm_cfg.len_lc = scfg2.s.len_lc;
+ style_cfg->parm_cfg.len_lb = scfg2.s.len_lb;
+ style_cfg->parm_cfg.csum_lg = scfg2.s.csum_lg;
+ style_cfg->parm_cfg.csum_lf = scfg2.s.csum_lf;
+ style_cfg->parm_cfg.csum_le = scfg2.s.csum_le;
+ style_cfg->parm_cfg.csum_ld = scfg2.s.csum_ld;
+ style_cfg->parm_cfg.csum_lc = scfg2.s.csum_lc;
+ style_cfg->parm_cfg.csum_lb = scfg2.s.csum_lb;
+
+ style_cfg->parm_cfg.qpg_qos = style_alg.s.qpg_qos;
+ style_cfg->parm_cfg.tag_type = style_alg.s.tt;
+ style_cfg->parm_cfg.apad_nip = style_alg.s.apad_nip;
+ style_cfg->parm_cfg.qpg_port_sh = style_alg.s.qpg_port_sh;
+ style_cfg->parm_cfg.qpg_port_msb = style_alg.s.qpg_port_msb;
+ style_cfg->parm_cfg.wqe_vs = style_alg.s.wqe_vs;
+
+ style_cfg->parm_cfg.pkt_lend = style_buf.s.pkt_lend;
+ style_cfg->parm_cfg.wqe_hsz = style_buf.s.wqe_hsz;
+ style_cfg->parm_cfg.wqe_skip = style_buf.s.wqe_skip * 128;
+ style_cfg->parm_cfg.first_skip = style_buf.s.first_skip * 8;
+ style_cfg->parm_cfg.later_skip = style_buf.s.later_skip * 8;
+ style_cfg->parm_cfg.cache_mode = style_buf.s.opc_mode;
+ style_cfg->parm_cfg.mbuff_size = style_buf.s.mb_size * 8;
+ style_cfg->parm_cfg.dis_wq_dat = style_buf.s.dis_wq_dat;
+
+ cvmx_pki_read_tag_config(node, style, cluster_mask,
+ &style_cfg->tag_cfg);
+}
+
+/**
+ * This function writes/configures parameters associated with style in hardware.
+ *
+ * @param node Node number.
+ * @param style Style to configure.
+ * @param cluster_mask Mask of clusters to configure the style for.
+ * @param style_cfg Pointer to style config struct.
+ */
+void cvmx_pki_write_style_config(int node, uint64_t style, u64 cluster_mask,
+ struct cvmx_pki_style_config *style_cfg)
+{
+ cvmx_pki_clx_stylex_cfg_t scfg;
+ cvmx_pki_clx_stylex_cfg2_t scfg2;
+ cvmx_pki_clx_stylex_alg_t style_alg;
+ cvmx_pki_stylex_buf_t style_buf;
+ unsigned int cluster = 0;
+
+ while (cluster < CVMX_PKI_NUM_CLUSTER) {
+ if (cluster_mask & (0x01L << cluster)) {
+ scfg.u64 = csr_rd_node(
+ node, CVMX_PKI_CLX_STYLEX_CFG(style, cluster));
+ scfg.s.ip6_udp_opt = style_cfg->parm_cfg.ip6_udp_opt;
+ scfg.s.lenerr_en = style_cfg->parm_cfg.lenerr_en;
+ scfg.s.lenerr_eqpad = style_cfg->parm_cfg.lenerr_eqpad;
+ scfg.s.maxerr_en = style_cfg->parm_cfg.maxerr_en;
+ scfg.s.minerr_en = style_cfg->parm_cfg.minerr_en;
+ scfg.s.fcs_chk = style_cfg->parm_cfg.fcs_chk;
+ scfg.s.fcs_strip = style_cfg->parm_cfg.fcs_strip;
+ scfg.s.minmax_sel = style_cfg->parm_cfg.minmax_sel;
+ scfg.s.qpg_base = style_cfg->parm_cfg.qpg_base;
+ scfg.s.qpg_dis_padd = style_cfg->parm_cfg.qpg_dis_padd;
+ scfg.s.qpg_dis_aura = style_cfg->parm_cfg.qpg_dis_aura;
+ scfg.s.qpg_dis_grp = style_cfg->parm_cfg.qpg_dis_grp;
+ scfg.s.qpg_dis_grptag =
+ style_cfg->parm_cfg.qpg_dis_grptag;
+ scfg.s.rawdrp = style_cfg->parm_cfg.rawdrp;
+ scfg.s.drop = style_cfg->parm_cfg.force_drop;
+ scfg.s.nodrop = style_cfg->parm_cfg.nodrop;
+ csr_wr_node(node,
+ CVMX_PKI_CLX_STYLEX_CFG(style, cluster),
+ scfg.u64);
+
+ scfg2.u64 = csr_rd_node(
+ node, CVMX_PKI_CLX_STYLEX_CFG2(style, cluster));
+ scfg2.s.len_lg = style_cfg->parm_cfg.len_lg;
+ scfg2.s.len_lf = style_cfg->parm_cfg.len_lf;
+ scfg2.s.len_le = style_cfg->parm_cfg.len_le;
+ scfg2.s.len_ld = style_cfg->parm_cfg.len_ld;
+ scfg2.s.len_lc = style_cfg->parm_cfg.len_lc;
+ scfg2.s.len_lb = style_cfg->parm_cfg.len_lb;
+ scfg2.s.csum_lg = style_cfg->parm_cfg.csum_lg;
+ scfg2.s.csum_lf = style_cfg->parm_cfg.csum_lf;
+ scfg2.s.csum_le = style_cfg->parm_cfg.csum_le;
+ scfg2.s.csum_ld = style_cfg->parm_cfg.csum_ld;
+ scfg2.s.csum_lc = style_cfg->parm_cfg.csum_lc;
+ scfg2.s.csum_lb = style_cfg->parm_cfg.csum_lb;
+ csr_wr_node(node,
+ CVMX_PKI_CLX_STYLEX_CFG2(style, cluster),
+ scfg2.u64);
+
+ style_alg.u64 = csr_rd_node(
+ node, CVMX_PKI_CLX_STYLEX_ALG(style, cluster));
+ style_alg.s.qpg_qos = style_cfg->parm_cfg.qpg_qos;
+ style_alg.s.tt = style_cfg->parm_cfg.tag_type;
+ style_alg.s.apad_nip = style_cfg->parm_cfg.apad_nip;
+ style_alg.s.qpg_port_sh =
+ style_cfg->parm_cfg.qpg_port_sh;
+ style_alg.s.qpg_port_msb =
+ style_cfg->parm_cfg.qpg_port_msb;
+ style_alg.s.wqe_vs = style_cfg->parm_cfg.wqe_vs;
+ csr_wr_node(node,
+ CVMX_PKI_CLX_STYLEX_ALG(style, cluster),
+ style_alg.u64);
+ }
+ cluster++;
+ }
+ style_buf.u64 = csr_rd_node(node, CVMX_PKI_STYLEX_BUF(style));
+ style_buf.s.pkt_lend = style_cfg->parm_cfg.pkt_lend;
+ style_buf.s.wqe_hsz = style_cfg->parm_cfg.wqe_hsz;
+ style_buf.s.wqe_skip = (style_cfg->parm_cfg.wqe_skip) / 128;
+ style_buf.s.first_skip = (style_cfg->parm_cfg.first_skip) / 8;
+ style_buf.s.later_skip = style_cfg->parm_cfg.later_skip / 8;
+ style_buf.s.opc_mode = style_cfg->parm_cfg.cache_mode;
+ style_buf.s.mb_size = (style_cfg->parm_cfg.mbuff_size) / 8;
+ style_buf.s.dis_wq_dat = style_cfg->parm_cfg.dis_wq_dat;
+ csr_wr_node(node, CVMX_PKI_STYLEX_BUF(style), style_buf.u64);
+
+ cvmx_pki_write_tag_config(node, style, cluster_mask,
+ &style_cfg->tag_cfg);
+}
+
+/**
+ * This function reads qpg entry at specified offset from qpg table.
+ *
+ * @param node Node number.
+ * @param offset Offset in qpg table to read from.
+ * @param qpg_cfg Pointer to structure containing qpg values.
+ */
+int cvmx_pki_read_qpg_entry(int node, int offset,
+ struct cvmx_pki_qpg_config *qpg_cfg)
+{
+ cvmx_pki_qpg_tblx_t qpg_tbl;
+
+ if (offset >= CVMX_PKI_NUM_QPG_ENTRY) {
+ debug("ERROR: qpg offset %d is >= 2048\n", offset);
+ return -1;
+ }
+ qpg_tbl.u64 = csr_rd_node(node, CVMX_PKI_QPG_TBLX(offset));
+ qpg_cfg->aura_num = qpg_tbl.s.laura;
+ qpg_cfg->port_add = qpg_tbl.s.padd;
+ qpg_cfg->grp_ok = qpg_tbl.s.grp_ok;
+ qpg_cfg->grp_bad = qpg_tbl.s.grp_bad;
+ qpg_cfg->grptag_ok = qpg_tbl.s.grptag_ok;
+ qpg_cfg->grptag_bad = qpg_tbl.s.grptag_bad;
+ return 0;
+}
+
+/**
+ * This function writes qpg entry at specified offset in qpg table.
+ *
+ * @param node Node number.
+ * @param offset Offset in qpg table to read from.
+ * @param qpg_cfg Pointer to structure containing qpg values.
+ */
+void cvmx_pki_write_qpg_entry(int node, int offset,
+ struct cvmx_pki_qpg_config *qpg_cfg)
+{
+ cvmx_pki_qpg_tblx_t qpg_tbl;
+
+ qpg_tbl.u64 = csr_rd_node(node, CVMX_PKI_QPG_TBLX(offset));
+ qpg_tbl.s.padd = qpg_cfg->port_add;
+ qpg_tbl.s.laura = qpg_cfg->aura_num;
+ qpg_tbl.s.grp_ok = qpg_cfg->grp_ok;
+ qpg_tbl.s.grp_bad = qpg_cfg->grp_bad;
+ qpg_tbl.s.grptag_ok = qpg_cfg->grptag_ok;
+ qpg_tbl.s.grptag_bad = qpg_cfg->grptag_bad;
+ csr_wr_node(node, CVMX_PKI_QPG_TBLX(offset), qpg_tbl.u64);
+}
+
+/**
+ * This function writes pcam entry at given offset in pcam table in hardware
+ *
+ * @param node Node number.
+ * @param index Offset in pcam table.
+ * @param cluster_mask Mask of clusters in which to write pcam entry.
+ * @param input Input keys to pcam match passed as struct.
+ * @param action PCAM match action passed as struct.
+ */
+int cvmx_pki_pcam_write_entry(int node, int index, uint64_t cluster_mask,
+ struct cvmx_pki_pcam_input input,
+ struct cvmx_pki_pcam_action action)
+{
+ int bank;
+ unsigned int cluster = 0;
+ cvmx_pki_clx_pcamx_termx_t term;
+ cvmx_pki_clx_pcamx_matchx_t match;
+ cvmx_pki_clx_pcamx_actionx_t act;
+
+ if (index >= CVMX_PKI_TOTAL_PCAM_ENTRY) {
+ debug("\nERROR: Invalid pcam entry %d\n", index);
+ return -1;
+ }
+ bank = (int)(input.field & 0x01);
+ while (cluster < CVMX_PKI_NUM_CLUSTER) {
+ if (cluster_mask & (0x01L << cluster)) {
+ term.u64 = csr_rd_node(
+ node,
+ CVMX_PKI_CLX_PCAMX_TERMX(cluster, bank, index));
+ term.s.valid = 0;
+ csr_wr_node(node,
+ CVMX_PKI_CLX_PCAMX_TERMX(cluster, bank,
+ index),
+ term.u64);
+ match.u64 = csr_rd_node(
+ node, CVMX_PKI_CLX_PCAMX_MATCHX(cluster, bank,
+ index));
+ match.s.data1 = input.data & input.data_mask;
+ match.s.data0 = (~input.data) & input.data_mask;
+ csr_wr_node(node,
+ CVMX_PKI_CLX_PCAMX_MATCHX(cluster, bank,
+ index),
+ match.u64);
+
+ act.u64 = csr_rd_node(
+ node, CVMX_PKI_CLX_PCAMX_ACTIONX(cluster, bank,
+ index));
+ act.s.pmc = action.parse_mode_chg;
+ act.s.style_add = action.style_add;
+ act.s.pf = action.parse_flag_set;
+ act.s.setty = action.layer_type_set;
+ act.s.advance = action.pointer_advance;
+ csr_wr_node(node,
+ CVMX_PKI_CLX_PCAMX_ACTIONX(cluster, bank,
+ index),
+ act.u64);
+
+ term.u64 = csr_rd_node(
+ node,
+ CVMX_PKI_CLX_PCAMX_TERMX(cluster, bank, index));
+ term.s.term1 = input.field & input.field_mask;
+ term.s.term0 = (~input.field) & input.field_mask;
+ term.s.style1 = input.style & input.style_mask;
+ term.s.style0 = (~input.style) & input.style_mask;
+ term.s.valid = 1;
+ csr_wr_node(node,
+ CVMX_PKI_CLX_PCAMX_TERMX(cluster, bank,
+ index),
+ term.u64);
+ }
+ cluster++;
+ }
+ return 0;
+}
+
+/**
+ * Enables/Disabled QoS (RED Drop, Tail Drop & backpressure) for the PKI aura.
+ *
+ * @param node Node number
+ * @param aura To enable/disable QoS on.
+ * @param ena_red Enable/Disable RED drop between pass and drop level
+ * 1-enable 0-disable
+ * @param ena_drop Enable/disable tail drop when max drop level exceeds
+ * 1-enable 0-disable
+ * @param ena_bp Enable/Disable asserting backpressure on bpid when max
+ * DROP level exceeds.
+ * 1-enable 0-disable
+ */
+int cvmx_pki_enable_aura_qos(int node, int aura, bool ena_red, bool ena_drop,
+ bool ena_bp)
+{
+ cvmx_pki_aurax_cfg_t aura_cfg;
+
+ if (aura >= CVMX_PKI_NUM_AURA) {
+ debug("ERROR: %s aura = %d\n", __func__, aura);
+ return -1;
+ }
+ aura_cfg.u64 = csr_rd_node(node, CVMX_PKI_AURAX_CFG(aura));
+ aura_cfg.s.ena_red = ena_red;
+ aura_cfg.s.ena_drop = ena_drop;
+ aura_cfg.s.ena_bp = ena_bp;
+ csr_wr_node(node, CVMX_PKI_AURAX_CFG(aura), aura_cfg.u64);
+ return 0;
+}
+
+/**
+ * Configures the bpid on which, specified aura will assert backpressure.
+ * Each bpid receives backpressure from auras.
+ * Multiple auras can backpressure single bpid.
+ *
+ * @param node Node number.
+ * @param aura Number which will assert backpressure on that bpid.
+ * @param bpid To assert backpressure on.
+ */
+int cvmx_pki_write_aura_bpid(int node, int aura, int bpid)
+{
+ int i, cnt, ena_bp;
+ cvmx_pki_aurax_cfg_t aura_cfg;
+
+ if (aura >= CVMX_PKI_NUM_AURA || bpid >= CVMX_PKI_NUM_BPID) {
+ debug("ERROR: aura=%d or bpid=%d is out or range\n", aura,
+ bpid);
+ return -1;
+ }
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
+ /* Workaround for Errata PKI-24364:
+ * Inform about assigning the same BPID to multiple auras
+ * having different ENA_BP.
+ */
+ aura_cfg.u64 = csr_rd_node(node, CVMX_PKI_AURAX_CFG(aura));
+ ena_bp = aura_cfg.s.ena_bp;
+ for (i = 0, cnt = 1; i < CVMX_PKI_NUM_AURA; i++) {
+ if (i == aura)
+ continue;
+ aura_cfg.u64 = csr_rd_node(node, CVMX_PKI_AURAX_CFG(i));
+ if (aura_cfg.s.bpid == bpid &&
+ aura_cfg.s.ena_bp != ena_bp)
+ cnt++;
+ }
+ if (cnt > 1)
+ debug("WARNING: BPID(%d) is used by %d AURAs.\n"
+ "\tEnable|disable backpressure for all AURAs on this BPID.\n",
+ bpid, cnt);
+ }
+ aura_cfg.u64 = csr_rd_node(node, CVMX_PKI_AURAX_CFG(aura));
+ aura_cfg.s.bpid = bpid;
+ csr_wr_node(node, CVMX_PKI_AURAX_CFG(aura), aura_cfg.u64);
+ return 0;
+}
+
+/**
+ * Configures the channel which will receive backpressure
+ * from the specified bpid.
+ * Each channel listens for backpressure on a specific bpid.
+ * Each bpid can backpressure multiple channels.
+ *
+ * @param node Node number.
+ * @param bpid BPID from which, channel will receive backpressure.
+ * @param channel Channel number to receive backpressue.
+ */
+int cvmx_pki_write_channel_bpid(int node, int channel, int bpid)
+{
+ cvmx_pki_chanx_cfg_t chan_cfg;
+
+ if (channel >= CVMX_PKI_NUM_CHANNEL || bpid >= CVMX_PKI_NUM_BPID) {
+ debug("ERROR: %s channel = %d bpid = %d\n", __func__, channel,
+ bpid);
+ return -1;
+ }
+ chan_cfg.u64 = csr_rd_node(node, CVMX_PKI_CHANX_CFG(channel));
+ chan_cfg.s.bpid = bpid;
+ csr_wr_node(node, CVMX_PKI_CHANX_CFG(channel), chan_cfg.u64);
+ return 0;
+}
+
+/**
+ * This function gives the initial style used by that pkind.
+ *
+ * @param node Node number.
+ * @param pkind PKIND number.
+ */
+int cvmx_pki_get_pkind_style(int node, int pkind)
+{
+ int cluster = 0;
+ cvmx_pki_clx_pkindx_style_t style;
+
+ style.u64 =
+ csr_rd_node(node, CVMX_PKI_CLX_PKINDX_STYLE(pkind, cluster));
+ return style.s.style;
+}
+
+/**
+ * This function sets the wqe buffer mode. First packet data buffer can
+ * reside either in same buffer as wqe OR it can go in separate buffer.
+ * If used the later mode, make sure software allocate enough buffers to
+ * now have wqe separate from packet data.
+ *
+ * @param node Node number.
+ * @param style Style to configure.
+ * @param pkt_outside_wqe 0 = The packet link pointer will be at word [FIRST_SKIP]
+ * immediately followed by packet data, in the same buffer as the work queue entry.
+ * 1 = The packet link pointer will be at word [FIRST_SKIP] in a new buffer
+ * separate from the work queue entry. Words following the WQE in the same
+ * cache line will be zeroed, other lines in the buffer will not be modified
+ * and will retain stale data (from the buffer’s previous use). This setting may
+ * decrease the peak PKI performance by up to half on small packets.
+ */
+void cvmx_pki_set_wqe_mode(int node, uint64_t style, bool pkt_outside_wqe)
+{
+ cvmx_pki_stylex_buf_t style_buf;
+
+ style_buf.u64 = csr_rd_node(node, CVMX_PKI_STYLEX_BUF(style));
+ style_buf.s.dis_wq_dat = pkt_outside_wqe;
+ csr_wr_node(node, CVMX_PKI_STYLEX_BUF(style), style_buf.u64);
+}
+
+/**
+ * This function sets the Packet mode of all ports and styles to little-endian.
+ * It Changes write operations of packet data to L2C to be in little-endian.
+ * Does not change the WQE header format, which is properly endian neutral.
+ *
+ * @param node Node number.
+ * @param style Style to configure.
+ */
+void cvmx_pki_set_little_endian(int node, uint64_t style)
+{
+ cvmx_pki_stylex_buf_t style_buf;
+
+ style_buf.u64 = csr_rd_node(node, CVMX_PKI_STYLEX_BUF(style));
+ style_buf.s.pkt_lend = 1;
+ csr_wr_node(node, CVMX_PKI_STYLEX_BUF(style), style_buf.u64);
+}
+
+/**
+ * Enables/Disables fcs check and fcs stripping on the pkind.
+ *
+ * @param node Node number
+ * @param pknd PKIND to apply settings on.
+ * @param fcs_chk Enable/disable fcs check.
+ * 1 = enable fcs error check.
+ * 0 = disable fcs error check.
+ * @param fcs_strip Strip L2 FCS bytes from packet, decrease WQE[LEN] by 4 bytes
+ * 1 = strip L2 FCS.
+ * 0 = Do not strip L2 FCS.
+ */
+void cvmx_pki_endis_fcs_check(int node, int pknd, bool fcs_chk, bool fcs_strip)
+{
+ int style;
+ unsigned int cluster;
+ cvmx_pki_clx_pkindx_style_t pstyle;
+ cvmx_pki_clx_stylex_cfg_t style_cfg;
+
+ /* Valudate PKIND # */
+ if (pknd >= CVMX_PKI_NUM_PKIND) {
+ printf("%s: PKIND %d out of range\n", __func__, pknd);
+ return;
+ }
+
+ for (cluster = 0; cluster < CVMX_PKI_NUM_CLUSTER; cluster++) {
+ pstyle.u64 = csr_rd_node(
+ node, CVMX_PKI_CLX_PKINDX_STYLE(pknd, cluster));
+ style = pstyle.s.style;
+ /* Validate STYLE # */
+ if (style >= CVMX_PKI_NUM_INTERNAL_STYLE)
+ continue;
+ style_cfg.u64 = csr_rd_node(
+ node, CVMX_PKI_CLX_STYLEX_CFG(style, cluster));
+ style_cfg.s.fcs_chk = fcs_chk;
+ style_cfg.s.fcs_strip = fcs_strip;
+ csr_wr_node(node, CVMX_PKI_CLX_STYLEX_CFG(style, cluster),
+ style_cfg.u64);
+ }
+}
+
+/**
+ * Enables/Disables l2 length error check and max & min frame length checks
+ *
+ * @param node Node number
+ * @param pknd PKIND to disable error for.
+ * @param l2len_err L2 length error check enable.
+ * @param maxframe_err Max frame error check enable.
+ * @param minframe_err Min frame error check enable.
+ * 1 = Enabel err checks
+ * 0 = Disable error checks
+ */
+void cvmx_pki_endis_l2_errs(int node, int pknd, bool l2len_err,
+ bool maxframe_err, bool minframe_err)
+{
+ int style;
+ unsigned int cluster;
+ cvmx_pki_clx_pkindx_style_t pstyle;
+ cvmx_pki_clx_stylex_cfg_t style_cfg;
+
+ /* Valudate PKIND # */
+ if (pknd >= CVMX_PKI_NUM_PKIND) {
+ printf("%s: PKIND %d out of range\n", __func__, pknd);
+ return;
+ }
+
+ for (cluster = 0; cluster < CVMX_PKI_NUM_CLUSTER; cluster++) {
+ pstyle.u64 = csr_rd_node(
+ node, CVMX_PKI_CLX_PKINDX_STYLE(pknd, cluster));
+ style = pstyle.s.style;
+ /* Validate STYLE # */
+ if (style >= CVMX_PKI_NUM_INTERNAL_STYLE)
+ continue;
+ style_cfg.u64 = csr_rd_node(
+ node, CVMX_PKI_CLX_STYLEX_CFG(style, cluster));
+ style_cfg.s.lenerr_en = l2len_err;
+ style_cfg.s.maxerr_en = maxframe_err;
+ style_cfg.s.minerr_en = minframe_err;
+ csr_wr_node(node, CVMX_PKI_CLX_STYLEX_CFG(style, cluster),
+ style_cfg.u64);
+ }
+}
+
+/**
+ * Disables maximum & minimum frame length checks
+ *
+ * @param node Node number.
+ * @param pknd PKIND to disable error for.
+ */
+void cvmx_pki_dis_frame_len_chk(int node, int pknd)
+{
+ int style;
+ unsigned int cluster = 0;
+ cvmx_pki_clx_pkindx_style_t pstyle;
+ cvmx_pki_clx_stylex_cfg_t style_cfg;
+
+ while (cluster < CVMX_PKI_NUM_CLUSTER) {
+ pstyle.u64 = csr_rd_node(
+ node, CVMX_PKI_CLX_PKINDX_STYLE(pknd, cluster));
+ style = pstyle.s.style;
+ style_cfg.u64 = csr_rd_node(
+ node, CVMX_PKI_CLX_STYLEX_CFG(style, cluster));
+ style_cfg.s.maxerr_en = 0;
+ style_cfg.s.minerr_en = 0;
+ csr_wr_node(node, CVMX_PKI_CLX_STYLEX_CFG(style, cluster),
+ style_cfg.u64);
+ cluster++;
+ }
+}
+
+/**
+ * This function shows the qpg table entries, read directly from hardware.
+ *
+ * @param node Node number
+ * @param num_entry Number of entries to show
+ */
+void cvmx_pki_show_qpg_entries(int node, uint16_t num_entry)
+{
+ int index;
+ cvmx_pki_qpg_tblx_t qpg_tbl;
+
+ if (num_entry > CVMX_PKI_NUM_QPG_ENTRY)
+ num_entry = CVMX_PKI_NUM_QPG_ENTRY;
+ for (index = 0; index < num_entry; index++) {
+ qpg_tbl.u64 = csr_rd_node(node, CVMX_PKI_QPG_TBLX(index));
+ debug("\n%d ", index);
+ debug("PADD %-16lu", (unsigned long)qpg_tbl.s.padd);
+ debug("GRP_OK %-16lu", (unsigned long)qpg_tbl.s.grp_ok);
+ debug("GRP_BAD %-16lu", (unsigned long)qpg_tbl.s.grp_bad);
+ debug("LAURA %-16lu", (unsigned long)qpg_tbl.s.laura);
+ }
+}
+
+/**
+ * This function shows the pcam table in raw format,
+ * read directly from hardware.
+ *
+ * @param node Node number
+ */
+void cvmx_pki_show_pcam_entries(int node)
+{
+ int cluster;
+ int index;
+ int bank;
+
+ for (cluster = 0; cluster < (int)CVMX_PKI_NUM_CLUSTER; cluster++) {
+ for (bank = 0; bank < 2; bank++) {
+ debug("\n--------------Cluster %1d Bank %1d-------------\n",
+ cluster, bank);
+ debug("index TERM DATA, ACTION");
+ for (index = 0; index < CVMX_PKI_NUM_PCAM_ENTRY;
+ index++) {
+ debug("\n%d", index);
+ debug(" %-16lx",
+ (unsigned long)csr_rd_node(
+ node,
+ CVMX_PKI_CLX_PCAMX_TERMX(
+ cluster, bank, index)));
+ debug(" %-16lx",
+ (unsigned long)csr_rd_node(
+ node,
+ CVMX_PKI_CLX_PCAMX_MATCHX(
+ cluster, bank, index)));
+ debug(" %-16lx",
+ (unsigned long)csr_rd_node(
+ node,
+ CVMX_PKI_CLX_PCAMX_ACTIONX(
+ cluster, bank, index)));
+ }
+ }
+ }
+}
+
+/**
+ * This function shows the valid entries in readable format,
+ * read directly from hardware.
+ *
+ * @param node Node number.
+ */
+void cvmx_pki_show_valid_pcam_entries(int node)
+{
+ int cluster;
+ int index;
+ int bank;
+ cvmx_pki_clx_pcamx_termx_t term;
+ cvmx_pki_clx_pcamx_matchx_t match;
+ cvmx_pki_clx_pcamx_actionx_t act;
+
+ for (cluster = 0; cluster < (int)CVMX_PKI_NUM_CLUSTER; cluster++) {
+ for (bank = 0; bank < 2; bank++) {
+ debug("\n--------------Cluster %1d Bank %1d---------------------\n",
+ cluster, bank);
+ debug("%-10s%-17s%-19s%-18s", "index", "TERM1:TERM0",
+ "Style1:Style0", "Data1:Data0");
+ debug("%-6s", "ACTION[pmc:style_add:pf:setty:advance]");
+ for (index = 0; index < CVMX_PKI_NUM_PCAM_ENTRY;
+ index++) {
+ term.u64 = csr_rd_node(
+ node, CVMX_PKI_CLX_PCAMX_TERMX(
+ cluster, bank, index));
+ if (term.s.valid) {
+ match.u64 = csr_rd_node(
+ node,
+ CVMX_PKI_CLX_PCAMX_MATCHX(
+ cluster, bank, index));
+ act.u64 = csr_rd_node(
+ node,
+ CVMX_PKI_CLX_PCAMX_ACTIONX(
+ cluster, bank, index));
+ debug("\n%-13d", index);
+ debug("%-2x:%x", term.s.term1,
+ term.s.term0);
+ debug(" %-2x:%x", term.s.style1,
+ term.s.style0);
+ debug(" %-8x:%x", match.s.data1,
+ match.s.data0);
+ debug(" %-2x:%-2x :%-1x :%2x :%-2x",
+ act.s.pmc, act.s.style_add,
+ act.s.pf, act.s.setty,
+ act.s.advance);
+ }
+ }
+ }
+ }
+}
+
+/**
+ * This function shows the pkind attributes in readable format,
+ * read directly from hardware.
+ *
+ * @param node Node number
+ * @param pkind PKIND info to print
+ */
+void cvmx_pki_show_pkind_attributes(int node, int pkind)
+{
+ unsigned int cluster = 0;
+ int index;
+ cvmx_pki_pkindx_icgsel_t icgsel;
+ cvmx_pki_clx_pkindx_style_t pstyle;
+ cvmx_pki_icgx_cfg_t icg_cfg;
+ cvmx_pki_clx_stylex_cfg_t style_cfg;
+ cvmx_pki_clx_stylex_alg_t style_alg;
+
+ if (pkind >= CVMX_PKI_NUM_PKIND) {
+ debug("ERROR: PKIND %d is beyond range\n", pkind);
+ return;
+ }
+ debug("Showing stats for pkind %d------------------\n", pkind);
+ icgsel.u64 = csr_rd_node(node, CVMX_PKI_PKINDX_ICGSEL(pkind));
+ debug("cluster group: %d\n", icgsel.s.icg);
+ icg_cfg.u64 = csr_rd_node(node, CVMX_PKI_ICGX_CFG(icgsel.s.icg));
+ debug("cluster mask of the group: 0x%x\n", icg_cfg.s.clusters);
+
+ while (cluster < CVMX_PKI_NUM_CLUSTER) {
+ if (icg_cfg.s.clusters & (0x01L << cluster)) {
+ debug("pkind %d config 0x%llx\n", pkind,
+ (unsigned long long)csr_rd_node(
+ node,
+ CVMX_PKI_CLX_PKINDX_CFG(pkind, cluster)));
+ pstyle.u64 = csr_rd_node(
+ node,
+ CVMX_PKI_CLX_PKINDX_STYLE(pkind, cluster));
+ debug("initial parse Mode: %d\n", pstyle.s.pm);
+ debug("initial_style: %d\n", pstyle.s.style);
+ style_alg.u64 = csr_rd_node(
+ node, CVMX_PKI_CLX_STYLEX_ALG(pstyle.s.style,
+ cluster));
+ debug("style_alg: 0x%llx\n",
+ (unsigned long long)style_alg.u64);
+ style_cfg.u64 = csr_rd_node(
+ node, CVMX_PKI_CLX_STYLEX_CFG(pstyle.s.style,
+ cluster));
+ debug("style_cfg: 0x%llx\n",
+ (unsigned long long)style_cfg.u64);
+ debug("style_cfg2: 0x%llx\n",
+ (unsigned long long)csr_rd_node(
+ node, CVMX_PKI_CLX_STYLEX_CFG2(
+ pstyle.s.style, cluster)));
+ debug("style_buf: 0x%llx\n",
+ (unsigned long long)csr_rd_node(
+ node,
+ CVMX_PKI_STYLEX_BUF(pstyle.s.style)));
+ break;
+ }
+ }
+ debug("qpg base: %d\n", style_cfg.s.qpg_base);
+ debug("qpg qos: %d\n", style_alg.s.qpg_qos);
+ for (index = 0; index < 8; index++) {
+ debug("qpg index %d: 0x%llx\n", (index + style_cfg.s.qpg_base),
+ (unsigned long long)csr_rd_node(
+ node,
+ CVMX_PKI_QPG_TBLX(style_cfg.s.qpg_base + index)));
+ }
+}
+
+static void readcorrect(int node, u64 value, u64 addr)
+{
+ int cnt = 0;
+
+ while (value >= (1ull << 48) && cnt++ < 20)
+ value = csr_rd_node(node, addr);
+ if (cnt >= 20)
+ debug("count stuck for 0x%llx\n", (unsigned long long)addr);
+}
+
+/**
+ * Get the status counters for index from PKI.
+ *
+ * @param node Node number
+ * @param index PKIND number (if PKI_STATS_CTL:mode=0) or
+ * style(flow) number (if PKI_STATS_CTL:mode=1)
+ * @param status Where to put the results.
+ */
+void cvmx_pki_get_stats(int node, int index, struct cvmx_pki_port_stats *status)
+{
+ cvmx_pki_statx_stat0_t stat0;
+ cvmx_pki_statx_stat1_t stat1;
+ cvmx_pki_statx_stat2_t stat2;
+ cvmx_pki_statx_stat3_t stat3;
+ cvmx_pki_statx_stat4_t stat4;
+ cvmx_pki_statx_stat5_t stat5;
+ cvmx_pki_statx_stat6_t stat6;
+ cvmx_pki_statx_stat7_t stat7;
+ cvmx_pki_statx_stat8_t stat8;
+ cvmx_pki_statx_stat9_t stat9;
+ cvmx_pki_statx_stat10_t stat10;
+ cvmx_pki_statx_stat11_t stat11;
+ cvmx_pki_statx_stat14_t stat14;
+ cvmx_pki_statx_stat15_t stat15;
+ cvmx_pki_statx_stat16_t stat16;
+ cvmx_pki_statx_stat17_t stat17;
+ cvmx_pki_statx_hist0_t hist0;
+ cvmx_pki_statx_hist1_t hist1;
+ cvmx_pki_statx_hist2_t hist2;
+ cvmx_pki_statx_hist3_t hist3;
+ cvmx_pki_statx_hist4_t hist4;
+ cvmx_pki_statx_hist5_t hist5;
+ cvmx_pki_statx_hist6_t hist6;
+ cvmx_pki_pkndx_inb_stat0_t inb_stat0;
+ cvmx_pki_pkndx_inb_stat1_t inb_stat1;
+ cvmx_pki_pkndx_inb_stat2_t inb_stat2;
+
+ /* Accessing PKI stat registers can timeout based on the Errata
+ * PKI-20775, disable SLI_INT_SUM[RML_TO] before reading the stats
+ * enable back after clearing the interrupt.
+ */
+ cvmx_error_intsn_disable_v3(node, 0x1f000);
+ stat0.u64 = csr_rd_node(node, CVMX_PKI_STATX_STAT0(index));
+ readcorrect(node, stat0.u64, CVMX_PKI_STATX_STAT0(index));
+
+ stat1.u64 = csr_rd_node(node, CVMX_PKI_STATX_STAT1(index));
+ readcorrect(node, stat1.u64, CVMX_PKI_STATX_STAT1(index));
+
+ stat2.u64 = csr_rd_node(node, CVMX_PKI_STATX_STAT2(index));
+ readcorrect(node, stat2.u64, CVMX_PKI_STATX_STAT2(index));
+
+ stat3.u64 = csr_rd_node(node, CVMX_PKI_STATX_STAT3(index));
+ readcorrect(node, stat3.u64, CVMX_PKI_STATX_STAT3(index));
+
+ stat4.u64 = csr_rd_node(node, CVMX_PKI_STATX_STAT4(index));
+ readcorrect(node, stat4.u64, CVMX_PKI_STATX_STAT4(index));
+
+ stat5.u64 = csr_rd_node(node, CVMX_PKI_STATX_STAT5(index));
+ readcorrect(node, stat5.u64, CVMX_PKI_STATX_STAT5(index));
+
+ stat6.u64 = csr_rd_node(node, CVMX_PKI_STATX_STAT6(index));
+ readcorrect(node, stat6.u64, CVMX_PKI_STATX_STAT6(index));
+
+ stat7.u64 = csr_rd_node(node, CVMX_PKI_STATX_STAT7(index));
+ readcorrect(node, stat7.u64, CVMX_PKI_STATX_STAT7(index));
+
+ stat8.u64 = csr_rd_node(node, CVMX_PKI_STATX_STAT8(index));
+ readcorrect(node, stat8.u64, CVMX_PKI_STATX_STAT8(index));
+
+ stat9.u64 = csr_rd_node(node, CVMX_PKI_STATX_STAT9(index));
+ readcorrect(node, stat9.u64, CVMX_PKI_STATX_STAT9(index));
+
+ stat10.u64 = csr_rd_node(node, CVMX_PKI_STATX_STAT10(index));
+ readcorrect(node, stat10.u64, CVMX_PKI_STATX_STAT10(index));
+
+ stat11.u64 = csr_rd_node(node, CVMX_PKI_STATX_STAT11(index));
+ readcorrect(node, stat11.u64, CVMX_PKI_STATX_STAT11(index));
+
+ stat14.u64 = csr_rd_node(node, CVMX_PKI_STATX_STAT14(index));
+ readcorrect(node, stat14.u64, CVMX_PKI_STATX_STAT14(index));
+
+ stat15.u64 = csr_rd_node(node, CVMX_PKI_STATX_STAT15(index));
+ readcorrect(node, stat15.u64, CVMX_PKI_STATX_STAT15(index));
+
+ stat16.u64 = csr_rd_node(node, CVMX_PKI_STATX_STAT16(index));
+ readcorrect(node, stat16.u64, CVMX_PKI_STATX_STAT16(index));
+
+ stat17.u64 = csr_rd_node(node, CVMX_PKI_STATX_STAT17(index));
+ readcorrect(node, stat17.u64, CVMX_PKI_STATX_STAT17(index));
+
+ hist0.u64 = csr_rd_node(node, CVMX_PKI_STATX_HIST0(index));
+ readcorrect(node, hist0.u64, CVMX_PKI_STATX_HIST0(index));
+
+ hist1.u64 = csr_rd_node(node, CVMX_PKI_STATX_HIST1(index));
+ readcorrect(node, hist1.u64, CVMX_PKI_STATX_HIST1(index));
+
+ hist2.u64 = csr_rd_node(node, CVMX_PKI_STATX_HIST2(index));
+ readcorrect(node, hist2.u64, CVMX_PKI_STATX_HIST2(index));
+
+ hist3.u64 = csr_rd_node(node, CVMX_PKI_STATX_HIST3(index));
+ readcorrect(node, hist3.u64, CVMX_PKI_STATX_HIST3(index));
+
+ hist4.u64 = csr_rd_node(node, CVMX_PKI_STATX_HIST4(index));
+ readcorrect(node, hist4.u64, CVMX_PKI_STATX_HIST4(index));
+
+ hist5.u64 = csr_rd_node(node, CVMX_PKI_STATX_HIST5(index));
+ readcorrect(node, hist5.u64, CVMX_PKI_STATX_HIST5(index));
+
+ hist6.u64 = csr_rd_node(node, CVMX_PKI_STATX_HIST6(index));
+ readcorrect(node, hist6.u64, CVMX_PKI_STATX_HIST6(index));
+
+ inb_stat0.u64 = csr_rd_node(node, CVMX_PKI_PKNDX_INB_STAT0(index));
+ inb_stat1.u64 = csr_rd_node(node, CVMX_PKI_PKNDX_INB_STAT1(index));
+ inb_stat2.u64 = csr_rd_node(node, CVMX_PKI_PKNDX_INB_STAT2(index));
+
+ status->dropped_octets = stat4.s.drp_octs;
+ status->dropped_packets = stat3.s.drp_pkts;
+ status->octets = stat1.s.octs;
+ status->pci_raw_packets = stat2.s.raw;
+ status->packets = stat0.s.pkts;
+ status->multicast_packets = stat6.s.mcast;
+ status->broadcast_packets = stat5.s.bcast;
+ status->len_64_packets = hist0.s.h1to63;
+ status->len_65_127_packets = hist1.s.h64to127;
+ status->len_128_255_packets = hist2.s.h128to255;
+ status->len_256_511_packets = hist3.s.h256to511;
+ status->len_512_1023_packets = hist4.s.h512to1023;
+ status->len_1024_1518_packets = hist5.s.h1024to1518;
+ status->len_1519_max_packets = hist6.s.h1519;
+ status->fcs_align_err_packets = stat7.s.fcs;
+ status->runt_packets = stat9.s.undersz;
+ status->runt_crc_packets = stat8.s.frag;
+ status->oversize_packets = stat11.s.oversz;
+ status->oversize_crc_packets = stat10.s.jabber;
+ status->mcast_l2_red_packets = stat15.s.drp_mcast;
+ status->bcast_l2_red_packets = stat14.s.drp_bcast;
+ status->mcast_l3_red_packets = stat17.s.drp_mcast;
+ status->bcast_l3_red_packets = stat16.s.drp_bcast;
+ status->inb_packets = inb_stat0.s.pkts;
+ status->inb_octets = inb_stat1.s.octs;
+ status->inb_errors = inb_stat2.s.errs;
+ /* Enable SLI_INT_SUM[RML_TO] interrupt after clear the pending interrupt. */
+ csr_wr_node(node, CVMX_CIU3_ISCX_W1C(0x1f000), 1);
+ cvmx_error_intsn_enable_v3(node, 0x1f000);
+}
+
+/**
+ * Clear the statistics counters for a port.
+ *
+ * @param node Node number
+ * @param port Port number (ipd_port) to get statistics for.
+ * Make sure PKI_STATS_CTL:mode is set to 0 for collecting per port/pkind stats.
+ */
+void cvmx_pki_clear_port_stats(int node, uint64_t port)
+{
+ int xipd = cvmx_helper_node_to_ipd_port(node, port);
+ int xiface = cvmx_helper_get_interface_num(xipd);
+ int index = cvmx_helper_get_interface_index_num(port);
+ int pknd = cvmx_helper_get_pknd(xiface, index);
+
+ cvmx_pki_statx_stat0_t stat0;
+ cvmx_pki_statx_stat1_t stat1;
+ cvmx_pki_statx_stat2_t stat2;
+ cvmx_pki_statx_stat3_t stat3;
+ cvmx_pki_statx_stat4_t stat4;
+ cvmx_pki_statx_stat5_t stat5;
+ cvmx_pki_statx_stat6_t stat6;
+ cvmx_pki_statx_stat7_t stat7;
+ cvmx_pki_statx_stat8_t stat8;
+ cvmx_pki_statx_stat9_t stat9;
+ cvmx_pki_statx_stat10_t stat10;
+ cvmx_pki_statx_stat11_t stat11;
+ cvmx_pki_statx_stat14_t stat14;
+ cvmx_pki_statx_stat15_t stat15;
+ cvmx_pki_statx_stat16_t stat16;
+ cvmx_pki_statx_stat17_t stat17;
+ cvmx_pki_statx_hist0_t hist0;
+ cvmx_pki_statx_hist1_t hist1;
+ cvmx_pki_statx_hist2_t hist2;
+ cvmx_pki_statx_hist3_t hist3;
+ cvmx_pki_statx_hist4_t hist4;
+ cvmx_pki_statx_hist5_t hist5;
+ cvmx_pki_statx_hist6_t hist6;
+ cvmx_pki_pkndx_inb_stat0_t inb_stat0;
+ cvmx_pki_pkndx_inb_stat1_t inb_stat1;
+ cvmx_pki_pkndx_inb_stat2_t inb_stat2;
+
+ stat0.u64 = csr_rd_node(node, CVMX_PKI_STATX_STAT0(pknd));
+ stat1.u64 = csr_rd_node(node, CVMX_PKI_STATX_STAT1(pknd));
+ stat2.u64 = csr_rd_node(node, CVMX_PKI_STATX_STAT2(pknd));
+ stat3.u64 = csr_rd_node(node, CVMX_PKI_STATX_STAT3(pknd));
+ stat4.u64 = csr_rd_node(node, CVMX_PKI_STATX_STAT4(pknd));
+ stat5.u64 = csr_rd_node(node, CVMX_PKI_STATX_STAT5(pknd));
+ stat6.u64 = csr_rd_node(node, CVMX_PKI_STATX_STAT6(pknd));
+ stat7.u64 = csr_rd_node(node, CVMX_PKI_STATX_STAT7(pknd));
+ stat8.u64 = csr_rd_node(node, CVMX_PKI_STATX_STAT8(pknd));
+ stat9.u64 = csr_rd_node(node, CVMX_PKI_STATX_STAT9(pknd));
+ stat10.u64 = csr_rd_node(node, CVMX_PKI_STATX_STAT10(pknd));
+ stat11.u64 = csr_rd_node(node, CVMX_PKI_STATX_STAT11(pknd));
+ stat14.u64 = csr_rd_node(node, CVMX_PKI_STATX_STAT14(pknd));
+ stat15.u64 = csr_rd_node(node, CVMX_PKI_STATX_STAT15(pknd));
+ stat16.u64 = csr_rd_node(node, CVMX_PKI_STATX_STAT16(pknd));
+ stat17.u64 = csr_rd_node(node, CVMX_PKI_STATX_STAT17(pknd));
+ hist0.u64 = csr_rd_node(node, CVMX_PKI_STATX_HIST0(pknd));
+ hist1.u64 = csr_rd_node(node, CVMX_PKI_STATX_HIST1(pknd));
+ hist2.u64 = csr_rd_node(node, CVMX_PKI_STATX_HIST2(pknd));
+ hist3.u64 = csr_rd_node(node, CVMX_PKI_STATX_HIST3(pknd));
+ hist4.u64 = csr_rd_node(node, CVMX_PKI_STATX_HIST4(pknd));
+ hist5.u64 = csr_rd_node(node, CVMX_PKI_STATX_HIST5(pknd));
+ hist6.u64 = csr_rd_node(node, CVMX_PKI_STATX_HIST6(pknd));
+ inb_stat0.u64 = csr_rd_node(node, CVMX_PKI_PKNDX_INB_STAT0(pknd));
+ inb_stat1.u64 = csr_rd_node(node, CVMX_PKI_PKNDX_INB_STAT1(pknd));
+ inb_stat2.u64 = csr_rd_node(node, CVMX_PKI_PKNDX_INB_STAT2(pknd));
+
+ stat4.s.drp_octs = 0;
+ stat3.s.drp_pkts = 0;
+ stat1.s.octs = 0;
+ stat2.s.raw = 0;
+ stat0.s.pkts = 0;
+ stat6.s.mcast = 0;
+ stat5.s.bcast = 0;
+ hist0.s.h1to63 = 0;
+ hist1.s.h64to127 = 0;
+ hist2.s.h128to255 = 0;
+ hist3.s.h256to511 = 0;
+ hist4.s.h512to1023 = 0;
+ hist5.s.h1024to1518 = 0;
+ hist6.s.h1519 = 0;
+ stat7.s.fcs = 0;
+ stat9.s.undersz = 0;
+ stat8.s.frag = 0;
+ stat11.s.oversz = 0;
+ stat10.s.jabber = 0;
+ stat15.s.drp_mcast = 0;
+ stat14.s.drp_bcast = 0;
+ stat17.s.drp_mcast = 0;
+ stat16.s.drp_bcast = 0;
+ inb_stat0.s.pkts = 0;
+ inb_stat1.s.octs = 0;
+ inb_stat2.s.errs = 0;
+
+ csr_wr_node(node, CVMX_PKI_STATX_STAT0(pknd), stat0.u64);
+ csr_wr_node(node, CVMX_PKI_STATX_STAT1(pknd), stat1.u64);
+ csr_wr_node(node, CVMX_PKI_STATX_STAT2(pknd), stat2.u64);
+ csr_wr_node(node, CVMX_PKI_STATX_STAT3(pknd), stat3.u64);
+ csr_wr_node(node, CVMX_PKI_STATX_STAT4(pknd), stat4.u64);
+ csr_wr_node(node, CVMX_PKI_STATX_STAT5(pknd), stat5.u64);
+ csr_wr_node(node, CVMX_PKI_STATX_STAT6(pknd), stat6.u64);
+ csr_wr_node(node, CVMX_PKI_STATX_STAT7(pknd), stat7.u64);
+ csr_wr_node(node, CVMX_PKI_STATX_STAT8(pknd), stat8.u64);
+ csr_wr_node(node, CVMX_PKI_STATX_STAT9(pknd), stat9.u64);
+ csr_wr_node(node, CVMX_PKI_STATX_STAT10(pknd), stat10.u64);
+ csr_wr_node(node, CVMX_PKI_STATX_STAT11(pknd), stat11.u64);
+ csr_wr_node(node, CVMX_PKI_STATX_STAT14(pknd), stat14.u64);
+ csr_wr_node(node, CVMX_PKI_STATX_STAT15(pknd), stat15.u64);
+ csr_wr_node(node, CVMX_PKI_STATX_STAT16(pknd), stat16.u64);
+ csr_wr_node(node, CVMX_PKI_STATX_STAT17(pknd), stat17.u64);
+ csr_wr_node(node, CVMX_PKI_STATX_HIST0(pknd), hist0.u64);
+ csr_wr_node(node, CVMX_PKI_STATX_HIST1(pknd), hist1.u64);
+ csr_wr_node(node, CVMX_PKI_STATX_HIST2(pknd), hist2.u64);
+ csr_wr_node(node, CVMX_PKI_STATX_HIST3(pknd), hist3.u64);
+ csr_wr_node(node, CVMX_PKI_STATX_HIST4(pknd), hist4.u64);
+ csr_wr_node(node, CVMX_PKI_STATX_HIST5(pknd), hist5.u64);
+ csr_wr_node(node, CVMX_PKI_STATX_HIST6(pknd), hist6.u64);
+ csr_wr_node(node, CVMX_PKI_PKNDX_INB_STAT0(pknd), inb_stat0.u64);
+ csr_wr_node(node, CVMX_PKI_PKNDX_INB_STAT1(pknd), inb_stat1.u64);
+ csr_wr_node(node, CVMX_PKI_PKNDX_INB_STAT2(pknd), inb_stat2.u64);
+}
--
2.35.1
^ permalink raw reply related [flat|nested] 52+ messages in thread* [PATCH 34/52] mips: octeon: Add cvmx-pko.c
2022-03-30 10:06 [PATCH 00/52] mips: octeon: Add ethernet support Stefan Roese
` (28 preceding siblings ...)
2022-03-30 10:07 ` [PATCH 32/52] mips: octeon: Add cvmx-pki.c Stefan Roese
@ 2022-03-30 10:07 ` Stefan Roese
2022-03-30 10:07 ` [PATCH 35/52] mips: octeon: Add cvmx-pko3.c Stefan Roese
` (19 subsequent siblings)
49 siblings, 0 replies; 52+ messages in thread
From: Stefan Roese @ 2022-03-30 10:07 UTC (permalink / raw)
To: u-boot; +Cc: awilliams, cchavva
From: Aaron Williams <awilliams@marvell.com>
Import cvmx-pko.c from 2013 U-Boot. It will be used by the later
added drivers to support networking on the MIPS Octeon II / III
platforms.
Signed-off-by: Aaron Williams <awilliams@marvell.com>
Signed-off-by: Stefan Roese <sr@denx.de>
---
arch/mips/mach-octeon/cvmx-pko.c | 1110 ++++++++++++++++++++++++++++++
1 file changed, 1110 insertions(+)
create mode 100644 arch/mips/mach-octeon/cvmx-pko.c
diff --git a/arch/mips/mach-octeon/cvmx-pko.c b/arch/mips/mach-octeon/cvmx-pko.c
new file mode 100644
index 000000000000..6340020410b0
--- /dev/null
+++ b/arch/mips/mach-octeon/cvmx-pko.c
@@ -0,0 +1,1110 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ * Support library for the hardware Packet Output unit.
+ */
+
+#include <errno.h>
+#include <log.h>
+#include <time.h>
+#include <linux/delay.h>
+
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-csr.h>
+#include <mach/cvmx-bootmem.h>
+#include <mach/octeon-model.h>
+#include <mach/cvmx-fuse.h>
+#include <mach/octeon-feature.h>
+#include <mach/cvmx-qlm.h>
+#include <mach/octeon_qlm.h>
+#include <mach/cvmx-pcie.h>
+#include <mach/cvmx-coremask.h>
+
+#include <mach/cvmx-agl-defs.h>
+#include <mach/cvmx-bgxx-defs.h>
+#include <mach/cvmx-ciu-defs.h>
+#include <mach/cvmx-gmxx-defs.h>
+#include <mach/cvmx-gserx-defs.h>
+#include <mach/cvmx-ilk-defs.h>
+#include <mach/cvmx-iob-defs.h>
+#include <mach/cvmx-ipd-defs.h>
+#include <mach/cvmx-pcsx-defs.h>
+#include <mach/cvmx-pcsxx-defs.h>
+#include <mach/cvmx-pki-defs.h>
+#include <mach/cvmx-pko-defs.h>
+#include <mach/cvmx-xcv-defs.h>
+
+#include <mach/cvmx-hwpko.h>
+#include <mach/cvmx-ilk.h>
+#include <mach/cvmx-pki.h>
+#include <mach/cvmx-pko3.h>
+#include <mach/cvmx-pko3-queue.h>
+#include <mach/cvmx-pko3-resources.h>
+
+#include <mach/cvmx-helper.h>
+#include <mach/cvmx-helper-board.h>
+#include <mach/cvmx-helper-cfg.h>
+
+#include <mach/cvmx-helper-bgx.h>
+#include <mach/cvmx-helper-cfg.h>
+#include <mach/cvmx-helper-util.h>
+#include <mach/cvmx-helper-pki.h>
+#include <mach/cvmx-helper-pko.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+#define CVMX_PKO_NQ_PER_PORT_MAX 32
+
+static cvmx_pko_return_value_t cvmx_pko2_config_port(short ipd_port,
+ int base_queue,
+ int num_queues,
+ const u8 priority[]);
+
+static const int debug;
+
+/**
+ * Internal state of packet output
+ */
+
+/*
+ * PKO port iterator
+ * XXX this macro only works for 68XX
+ */
+
+#define pko_for_each_port(__p) \
+ for (__p = 0; __p < CVMX_HELPER_CFG_MAX_PKO_PORT; __p++) \
+ if (__cvmx_helper_cfg_pko_queue_base(__p) != \
+ CVMX_HELPER_CFG_INVALID_VALUE)
+
+/*
+ * @INTERNAL
+ *
+ * Get INT for a port
+ *
+ * @param interface
+ * @param index
+ * @return the INT value on success and -1 on error
+ *
+ * This function is only for CN68XX.
+ */
+static int __cvmx_pko_int(int interface, int index)
+{
+ cvmx_helper_cfg_assert(interface < CVMX_HELPER_MAX_IFACE);
+ cvmx_helper_cfg_assert(index >= 0);
+
+ switch (interface) {
+ case 0:
+ cvmx_helper_cfg_assert(index < 4);
+ return index;
+ case 1:
+ cvmx_helper_cfg_assert(index == 0);
+ return 4;
+ case 2:
+ cvmx_helper_cfg_assert(index < 4);
+ return index + 8;
+ case 3:
+ cvmx_helper_cfg_assert(index < 4);
+ return index + 0xC;
+ case 4:
+ cvmx_helper_cfg_assert(index < 4);
+ return index + 0x10;
+ case 5:
+ cvmx_helper_cfg_assert(index < 256);
+ return 0x1C;
+ case 6:
+ cvmx_helper_cfg_assert(index < 256);
+ return 0x1D;
+ case 7:
+ cvmx_helper_cfg_assert(index < 32);
+ return 0x1E;
+ case 8:
+ cvmx_helper_cfg_assert(index < 8);
+ return 0x1F;
+ }
+
+ return -1;
+}
+
+int cvmx_pko_get_base_pko_port(int interface, int index)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))
+ return cvmx_helper_get_ipd_port(interface, index);
+ else if (octeon_has_feature(OCTEON_FEATURE_PKND))
+ return __cvmx_helper_cfg_pko_port_base(interface, index);
+ else
+ return cvmx_helper_get_ipd_port(interface, index);
+}
+
+int cvmx_pko_get_num_pko_ports(int interface, int index)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))
+ return 1;
+ if (octeon_has_feature(OCTEON_FEATURE_PKND))
+ return __cvmx_helper_cfg_pko_port_num(interface, index);
+ else
+ return 1;
+}
+
+int cvmx_pko_get_base_queue(int port)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
+ return cvmx_pko3_get_queue_base(port);
+ } else if (octeon_has_feature(OCTEON_FEATURE_PKND)) {
+ return __cvmx_helper_cfg_pko_queue_base(
+ cvmx_helper_cfg_ipd2pko_port_base(port));
+ } else {
+ if (port < 48)
+ return cvmx_pko_queue_table[port].ccppp_queue_base;
+ else
+ return CVMX_PKO_ILLEGAL_QUEUE;
+ }
+}
+
+/**
+ * For a given PKO port number, return the base output queue
+ * for the port.
+ *
+ * @param pko_port PKO port number
+ * @return Base output queue
+ */
+int cvmx_pko_get_base_queue_pkoid(int pko_port)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))
+ return cvmx_pko3_get_queue_base(pko_port);
+ return __cvmx_helper_cfg_pko_queue_base(pko_port);
+}
+
+/**
+ * For a given PKO port number, return the number of output queues
+ * for the port.
+ *
+ * @param pko_port PKO port number
+ * @return the number of output queues
+ */
+int cvmx_pko_get_num_queues_pkoid(int pko_port)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))
+ return cvmx_pko3_get_queue_num(pko_port);
+ return __cvmx_helper_cfg_pko_queue_num(pko_port);
+}
+
+int cvmx_pko_get_num_queues(int port)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
+ return cvmx_pko3_get_queue_num(port);
+ } else if (octeon_has_feature(OCTEON_FEATURE_PKND)) {
+ return __cvmx_helper_cfg_pko_queue_num(
+ cvmx_helper_cfg_ipd2pko_port_base(port));
+ } else {
+ if (port < 48)
+ return cvmx_pko_queue_table[port].ccppp_num_queues;
+ }
+ return 0;
+}
+
+/**
+ * Show queues for the internal ports
+ */
+void cvmx_pko_show_queue_map(void)
+{
+ if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
+ debug("%s: not supported on this chip\n", __func__);
+ return;
+ } else if (octeon_has_feature(OCTEON_FEATURE_PKND)) {
+ int port;
+
+ pko_for_each_port(port) {
+ debug("pko_port %d (interface%d index%d) has %d queues (queue base = %d)\n",
+ port, __cvmx_helper_cfg_pko_port_interface(port),
+ __cvmx_helper_cfg_pko_port_index(port),
+ __cvmx_helper_cfg_pko_queue_num(port),
+ __cvmx_helper_cfg_pko_queue_base(port));
+ }
+ } else {
+ int port;
+ int pko_output_ports;
+
+ pko_output_ports = 40;
+ debug("pko queue info\n");
+ for (port = 0; port < pko_output_ports; port++) {
+ debug("%3d=%3d-%3d ", port,
+ cvmx_pko_get_base_queue(port),
+ cvmx_pko_get_base_queue(port) +
+ cvmx_pko_get_num_queues(port) - 1);
+ if (((port + 1) % 4) == 0)
+ debug("\n");
+ }
+ debug("\n");
+ }
+}
+
+/*
+ * Allocate memory for PKO engines.
+ *
+ * @param engine is the PKO engine ID.
+ * @return # of 2KB-chunks allocated to this PKO engine.
+ */
+static int __cvmx_pko_memory_per_engine_o68(int engine)
+{
+ /* CN68XX has 40KB to devide between the engines in 2KB chunks */
+ int max_engine;
+ int size_per_engine;
+ int size;
+
+ max_engine = __cvmx_helper_cfg_pko_max_engine();
+ size_per_engine = 40 / 2 / max_engine;
+
+ if (engine >= max_engine)
+ /* Unused engines get no space */
+ size = 0;
+ else if (engine == max_engine - 1)
+ /*
+ * The last engine gets all the space lost by rounding. This means
+ * the ILK gets the most space
+ */
+ size = 40 / 2 - engine * size_per_engine;
+ else
+ /* All other engines get the same space */
+ size = size_per_engine;
+
+ return size;
+}
+
+/*
+ * Setup one-to-one mapping between PKO2 iport and eport.
+ * @INTERNAL
+ */
+static void __cvmx_pko2_chip_init(void)
+{
+ int i;
+ int interface, index, port;
+ cvmx_helper_interface_mode_t mode;
+ union cvmx_pko_mem_iport_ptrs config;
+
+ /*
+ * Initialize every iport with the invalid eid.
+ */
+#define CVMX_O68_PKO2_INVALID_EID 31
+ config.u64 = 0;
+ config.s.eid = CVMX_O68_PKO2_INVALID_EID;
+ for (i = 0; i < CVMX_HELPER_CFG_MAX_PKO_PORT; i++) {
+ config.s.ipid = i;
+ csr_wr(CVMX_PKO_MEM_IPORT_PTRS, config.u64);
+ }
+
+ /*
+ * Set up PKO_MEM_IPORT_PTRS
+ */
+ pko_for_each_port(port) {
+ interface = __cvmx_helper_cfg_pko_port_interface(port);
+ index = __cvmx_helper_cfg_pko_port_index(port);
+ mode = cvmx_helper_interface_get_mode(interface);
+
+ if (mode == CVMX_HELPER_INTERFACE_MODE_DISABLED)
+ continue;
+
+ config.s.ipid = port;
+ config.s.qos_mask = 0xff;
+ config.s.crc = __cvmx_helper_get_has_fcs(interface);
+ config.s.min_pkt = __cvmx_helper_get_pko_padding(interface);
+ config.s.intr = __cvmx_pko_int(interface, index);
+ config.s.eid = __cvmx_helper_cfg_pko_port_eid(port);
+ config.s.pipe = (mode == CVMX_HELPER_INTERFACE_MODE_LOOP) ?
+ index :
+ port;
+ csr_wr(CVMX_PKO_MEM_IPORT_PTRS, config.u64);
+ }
+}
+
+int __cvmx_pko_get_pipe(int interface, int index)
+{
+ /* The loopback ports do not have pipes */
+ if (cvmx_helper_interface_get_mode(interface) ==
+ CVMX_HELPER_INTERFACE_MODE_LOOP)
+ return -1;
+ /* We use pko_port as the pipe. See __cvmx_pko_port_map_o68(). */
+ return cvmx_helper_get_pko_port(interface, index);
+}
+
+static void __cvmx_pko1_chip_init(void)
+{
+ int queue;
+ union cvmx_pko_mem_queue_ptrs config;
+ union cvmx_pko_reg_queue_ptrs1 config1;
+ const int port = CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID;
+
+ /* Initialize all queues to connect to port 63 (ILLEGAL_PID) */
+ for (queue = 0; queue < CVMX_PKO_MAX_OUTPUT_QUEUES; queue++) {
+ config1.u64 = 0;
+ config1.s.idx3 = 0;
+ config1.s.qid7 = queue >> 7;
+
+ config.u64 = 0;
+ config.s.tail = 1;
+ config.s.index = 0;
+ config.s.port = port;
+ config.s.queue = queue;
+ config.s.buf_ptr = 0;
+
+ csr_wr(CVMX_PKO_REG_QUEUE_PTRS1, config1.u64);
+ csr_wr(CVMX_PKO_MEM_QUEUE_PTRS, config.u64);
+ }
+}
+
+/**
+ * Call before any other calls to initialize the packet
+ * output system. This does chip global config, and should only be
+ * done by one core.
+ */
+void cvmx_pko_hw_init(u8 pool, unsigned int bufsize)
+{
+ union cvmx_pko_reg_cmd_buf config;
+ union cvmx_iob_fau_timeout fau_to;
+ int i;
+
+ if (debug)
+ debug("%s: pool=%u bufsz=%u\n", __func__, pool, bufsize);
+
+ /* chip-specific setup. */
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ __cvmx_pko2_chip_init();
+ else
+ __cvmx_pko1_chip_init();
+
+ /*
+ * Set the size of the PKO command buffers to an odd number of
+ * 64bit words. This allows the normal two word send to stay
+ * aligned and never span a command word buffer.
+ */
+ config.u64 = 0;
+ config.s.pool = pool;
+ config.s.size = bufsize / 8 - 1;
+ csr_wr(CVMX_PKO_REG_CMD_BUF, config.u64);
+
+ /*
+ * Disable tagwait FAU timeout. This needs to be done before
+ * anyone might start packet output using tags.
+ */
+ fau_to.u64 = 0;
+ fau_to.s.tout_val = 0xfff;
+ fau_to.s.tout_enb = 0;
+ csr_wr(CVMX_IOB_FAU_TIMEOUT, fau_to.u64);
+
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+ union cvmx_pko_reg_min_pkt min_pkt;
+
+ min_pkt.u64 = 0;
+ min_pkt.s.size1 = 59;
+ min_pkt.s.size2 = 59;
+ min_pkt.s.size3 = 59;
+ min_pkt.s.size4 = 59;
+ min_pkt.s.size5 = 59;
+ min_pkt.s.size6 = 59;
+ min_pkt.s.size7 = 59;
+ csr_wr(CVMX_PKO_REG_MIN_PKT, min_pkt.u64);
+ }
+
+ /*
+ * If we aren't using all of the queues optimize PKO's
+ * internal memory.
+ */
+ if (OCTEON_IS_OCTEON2() || OCTEON_IS_MODEL(OCTEON_CN70XX)) {
+ int max_queues = __cvmx_helper_cfg_pko_max_queue();
+
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX) && max_queues <= 32)
+ csr_wr(CVMX_PKO_REG_QUEUE_MODE, 3);
+ else if (max_queues <= 64)
+ csr_wr(CVMX_PKO_REG_QUEUE_MODE, 2);
+ else if (max_queues <= 128)
+ csr_wr(CVMX_PKO_REG_QUEUE_MODE, 1);
+ else
+ csr_wr(CVMX_PKO_REG_QUEUE_MODE, 0);
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+ for (i = 0; i < 2; i++) {
+ union cvmx_pko_reg_engine_storagex
+ engine_storage;
+
+#define PKO_ASSIGN_ENGINE_STORAGE(index) \
+ engine_storage.s.engine##index = \
+ __cvmx_pko_memory_per_engine_o68(16 * i + (index))
+
+ engine_storage.u64 = 0;
+ PKO_ASSIGN_ENGINE_STORAGE(0);
+ PKO_ASSIGN_ENGINE_STORAGE(1);
+ PKO_ASSIGN_ENGINE_STORAGE(2);
+ PKO_ASSIGN_ENGINE_STORAGE(3);
+ PKO_ASSIGN_ENGINE_STORAGE(4);
+ PKO_ASSIGN_ENGINE_STORAGE(5);
+ PKO_ASSIGN_ENGINE_STORAGE(6);
+ PKO_ASSIGN_ENGINE_STORAGE(7);
+ PKO_ASSIGN_ENGINE_STORAGE(8);
+ PKO_ASSIGN_ENGINE_STORAGE(9);
+ PKO_ASSIGN_ENGINE_STORAGE(10);
+ PKO_ASSIGN_ENGINE_STORAGE(11);
+ PKO_ASSIGN_ENGINE_STORAGE(12);
+ PKO_ASSIGN_ENGINE_STORAGE(13);
+ PKO_ASSIGN_ENGINE_STORAGE(14);
+ PKO_ASSIGN_ENGINE_STORAGE(15);
+ csr_wr(CVMX_PKO_REG_ENGINE_STORAGEX(i),
+ engine_storage.u64);
+ }
+ }
+ }
+}
+
+/**
+ * Enables the packet output hardware. It must already be
+ * configured.
+ */
+void cvmx_pko_enable(void)
+{
+ union cvmx_pko_reg_flags flags;
+
+ flags.u64 = csr_rd(CVMX_PKO_REG_FLAGS);
+ if (flags.s.ena_pko)
+ debug("Warning: Enabling PKO when PKO already enabled.\n");
+
+ flags.s.ena_dwb = cvmx_helper_cfg_opt_get(CVMX_HELPER_CFG_OPT_USE_DWB);
+ flags.s.ena_pko = 1;
+ /*
+ * always enable big endian for 3-word command. Does nothing
+ * for 2-word.
+ */
+ flags.s.store_be = 1;
+ csr_wr(CVMX_PKO_REG_FLAGS, flags.u64);
+}
+
+/**
+ * Disables the packet output. Does not affect any configuration.
+ */
+void cvmx_pko_disable(void)
+{
+ union cvmx_pko_reg_flags pko_reg_flags;
+
+ pko_reg_flags.u64 = csr_rd(CVMX_PKO_REG_FLAGS);
+ pko_reg_flags.s.ena_pko = 0;
+ csr_wr(CVMX_PKO_REG_FLAGS, pko_reg_flags.u64);
+}
+
+/**
+ * @INTERNAL
+ * Reset the packet output.
+ */
+static void __cvmx_pko_reset(void)
+{
+ union cvmx_pko_reg_flags pko_reg_flags;
+
+ pko_reg_flags.u64 = csr_rd(CVMX_PKO_REG_FLAGS);
+ pko_reg_flags.s.reset = 1;
+ csr_wr(CVMX_PKO_REG_FLAGS, pko_reg_flags.u64);
+}
+
+/**
+ * Shutdown and free resources required by packet output.
+ */
+void cvmx_pko_shutdown(void)
+{
+ int queue;
+
+ cvmx_pko_disable();
+
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+ union cvmx_pko_mem_iqueue_ptrs config;
+
+ config.u64 = 0;
+ for (queue = 0; queue < CVMX_PKO_MAX_OUTPUT_QUEUES; queue++) {
+ config.s.qid = queue;
+ csr_wr(CVMX_PKO_MEM_IQUEUE_PTRS, config.u64);
+ cvmx_cmd_queue_shutdown(CVMX_CMD_QUEUE_PKO(queue));
+ }
+ } else {
+ union cvmx_pko_mem_queue_ptrs config;
+
+ for (queue = 0; queue < CVMX_PKO_MAX_OUTPUT_QUEUES; queue++) {
+ union cvmx_pko_reg_queue_ptrs1 config1;
+
+ config.u64 = 0;
+ config.s.tail = 1;
+ config.s.index = 0;
+ config.s.port = CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID;
+ config.s.queue = queue & 0x7f;
+ config.s.qos_mask = 0;
+ config.s.buf_ptr = 0;
+
+ config1.u64 = 0;
+ config1.s.qid7 = queue >> 7;
+ csr_wr(CVMX_PKO_REG_QUEUE_PTRS1, config1.u64);
+ csr_wr(CVMX_PKO_MEM_QUEUE_PTRS, config.u64);
+ cvmx_cmd_queue_shutdown(CVMX_CMD_QUEUE_PKO(queue));
+ }
+ }
+
+ __cvmx_pko_reset();
+ cvmx_pko_queue_free_all();
+}
+
+/**
+ * Configure a output port and the associated queues for use.
+ *
+ * @param port Port to configure.
+ * @param base_queue First queue number to associate with this port.
+ * @param num_queues Number of queues to associate with this port
+ * @param priority Array of priority levels for each queue. Values are
+ * allowed to be 0-8. A value of 8 get 8 times the traffic
+ * of a value of 1. A value of 0 indicates that no rounds
+ * will be participated in. These priorities can be changed
+ * on the fly while the pko is enabled. A priority of 9
+ * indicates that static priority should be used. If static
+ * priority is used all queues with static priority must be
+ * contiguous starting at the base_queue, and lower numbered
+ * queues have higher priority than higher numbered queues.
+ * There must be num_queues elements in the array.
+ */
+cvmx_pko_return_value_t cvmx_pko_config_port(int port, int base_queue,
+ int num_queues,
+ const u8 priority[])
+{
+ cvmx_pko_return_value_t result_code;
+ int queue;
+ union cvmx_pko_mem_queue_ptrs config;
+ union cvmx_pko_reg_queue_ptrs1 config1;
+ int static_priority_base = -1;
+ int static_priority_end = -1;
+ int outputbuffer_pool = (int)cvmx_fpa_get_pko_pool();
+ u64 outputbuffer_pool_size = cvmx_fpa_get_pko_pool_block_size();
+
+ /* This function is not used for CN68XX */
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ return cvmx_pko2_config_port(port, base_queue, num_queues,
+ priority);
+
+ if (debug)
+ debug("%s: port=%d queue=%d-%d pri %#x %#x %#x %#x\n", __func__,
+ port, base_queue, (base_queue + num_queues - 1),
+ priority[0], priority[1], priority[2], priority[3]);
+
+ /* The need to handle ILLEGAL_PID port argument
+ * is obsolete now, the code here can be simplified.
+ */
+
+ if (port >= CVMX_PKO_NUM_OUTPUT_PORTS &&
+ port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID) {
+ debug("ERROR: %s: Invalid port %llu\n", __func__,
+ (unsigned long long)port);
+ return CVMX_PKO_INVALID_PORT;
+ }
+
+ if (base_queue + num_queues > CVMX_PKO_MAX_OUTPUT_QUEUES) {
+ debug("ERROR: %s: Invalid queue range port = %lld base=%llu numques=%lld\n",
+ __func__, (unsigned long long)port,
+ (unsigned long long)base_queue,
+ (unsigned long long)num_queues);
+ return CVMX_PKO_INVALID_QUEUE;
+ }
+
+ if (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID) {
+ /*
+ * Validate the static queue priority setup and set
+ * static_priority_base and static_priority_end
+ * accordingly.
+ */
+ for (queue = 0; queue < num_queues; queue++) {
+ /* Find first queue of static priority */
+ int p_queue = queue % 16;
+
+ if (static_priority_base == -1 &&
+ priority[p_queue] == CVMX_PKO_QUEUE_STATIC_PRIORITY)
+ static_priority_base = queue;
+ /* Find last queue of static priority */
+ if (static_priority_base != -1 &&
+ static_priority_end == -1 &&
+ priority[p_queue] !=
+ CVMX_PKO_QUEUE_STATIC_PRIORITY &&
+ queue)
+ static_priority_end = queue - 1;
+ else if (static_priority_base != -1 &&
+ static_priority_end == -1 &&
+ queue == num_queues - 1)
+ /* all queues're static priority */
+ static_priority_end = queue;
+
+ /*
+ * Check to make sure all static priority
+ * queues are contiguous. Also catches some
+ * cases of static priorites not starting at
+ * queue 0.
+ */
+ if (static_priority_end != -1 &&
+ (int)queue > static_priority_end &&
+ priority[p_queue] ==
+ CVMX_PKO_QUEUE_STATIC_PRIORITY) {
+ debug("ERROR: %s: Static priority queues aren't contiguous or don't start at base queue. q: %d, eq: %d\n",
+ __func__, (int)queue, static_priority_end);
+ return CVMX_PKO_INVALID_PRIORITY;
+ }
+ }
+ if (static_priority_base > 0) {
+ debug("ERROR: %s: Static priority queues don't start at base queue. sq: %d\n",
+ __func__, static_priority_base);
+ return CVMX_PKO_INVALID_PRIORITY;
+ }
+ }
+
+ /*
+ * At this point, static_priority_base and static_priority_end
+ * are either both -1, or are valid start/end queue numbers
+ */
+
+ result_code = CVMX_PKO_SUCCESS;
+
+ for (queue = 0; queue < num_queues; queue++) {
+ u64 *buf_ptr = NULL;
+ int p_queue = queue % 16;
+
+ config1.u64 = 0;
+ config1.s.idx3 = queue >> 3;
+ config1.s.qid7 = (base_queue + queue) >> 7;
+
+ config.u64 = 0;
+ config.s.tail = queue == (num_queues - 1);
+ config.s.index = queue;
+ config.s.port = port;
+ config.s.queue = base_queue + queue;
+
+ config.s.static_p = static_priority_base >= 0;
+ config.s.static_q = (int)queue <= static_priority_end;
+ config.s.s_tail = (int)queue == static_priority_end;
+ /*
+ * Convert the priority into an enable bit field. Try
+ * to space the bits out evenly so the packet don't
+ * get grouped up.
+ */
+ switch ((int)priority[p_queue]) {
+ case 0:
+ config.s.qos_mask = 0x00;
+ break;
+ case 1:
+ config.s.qos_mask = 0x01;
+ break;
+ case 2:
+ config.s.qos_mask = 0x11;
+ break;
+ case 3:
+ config.s.qos_mask = 0x49;
+ break;
+ case 4:
+ config.s.qos_mask = 0x55;
+ break;
+ case 5:
+ config.s.qos_mask = 0x57;
+ break;
+ case 6:
+ config.s.qos_mask = 0x77;
+ break;
+ case 7:
+ config.s.qos_mask = 0x7f;
+ break;
+ case 8:
+ config.s.qos_mask = 0xff;
+ break;
+ case CVMX_PKO_QUEUE_STATIC_PRIORITY:
+ config.s.qos_mask = 0xff;
+ break;
+ default:
+ debug("ERROR: %s: Invalid priority %llu\n", __func__,
+ (unsigned long long)priority[p_queue]);
+ config.s.qos_mask = 0xff;
+ result_code = CVMX_PKO_INVALID_PRIORITY;
+ break;
+ }
+
+ if (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID) {
+ cvmx_cmd_queue_result_t cmd_res;
+
+ cmd_res = cvmx_cmd_queue_initialize(
+ CVMX_CMD_QUEUE_PKO(base_queue + queue),
+ CVMX_PKO_MAX_QUEUE_DEPTH, outputbuffer_pool,
+ outputbuffer_pool_size -
+ CVMX_PKO_COMMAND_BUFFER_SIZE_ADJUST *
+ 8);
+ if (cmd_res != CVMX_CMD_QUEUE_SUCCESS) {
+ switch (cmd_res) {
+ case CVMX_CMD_QUEUE_NO_MEMORY:
+ debug("ERROR: %s: Unable to allocate output buffer\n",
+ __func__);
+ return CVMX_PKO_NO_MEMORY;
+ case CVMX_CMD_QUEUE_ALREADY_SETUP:
+ debug("ERROR: %s: Port already setup. port=%d\n",
+ __func__, (int)port);
+ return CVMX_PKO_PORT_ALREADY_SETUP;
+ case CVMX_CMD_QUEUE_INVALID_PARAM:
+ default:
+ debug("ERROR: %s: Command queue initialization failed.\n",
+ __func__);
+ return CVMX_PKO_CMD_QUEUE_INIT_ERROR;
+ }
+ }
+
+ buf_ptr = (u64 *)cvmx_cmd_queue_buffer(
+ CVMX_CMD_QUEUE_PKO(base_queue + queue));
+ config.s.buf_ptr = cvmx_ptr_to_phys(buf_ptr);
+ } else {
+ config.s.buf_ptr = 0;
+ }
+
+ CVMX_SYNCWS;
+
+ csr_wr(CVMX_PKO_REG_QUEUE_PTRS1, config1.u64);
+ csr_wr(CVMX_PKO_MEM_QUEUE_PTRS, config.u64);
+ }
+
+ return result_code;
+}
+
+/*
+ * Configure queues for an internal port.
+ * @INTERNAL
+ * @param pko_port PKO internal port number
+ * @note this is the PKO2 equivalent to cvmx_pko_config_port()
+ */
+static cvmx_pko_return_value_t cvmx_pko2_config_port(short ipd_port,
+ int base_queue,
+ int num_queues,
+ const u8 priority[])
+{
+ int queue, pko_port;
+ int static_priority_base;
+ int static_priority_end;
+ union cvmx_pko_mem_iqueue_ptrs config;
+ u64 *buf_ptr = NULL;
+ int outputbuffer_pool = (int)cvmx_fpa_get_pko_pool();
+ u64 outputbuffer_pool_size = cvmx_fpa_get_pko_pool_block_size();
+
+ pko_port = cvmx_helper_cfg_ipd2pko_port_base(ipd_port);
+
+ if (debug)
+ debug("%s: ipd_port %d pko_iport %d qbase %d qnum %d\n",
+ __func__, ipd_port, pko_port, base_queue, num_queues);
+
+ static_priority_base = -1;
+ static_priority_end = -1;
+
+ /*
+ * static queue priority validation
+ */
+ for (queue = 0; queue < num_queues; queue++) {
+ int p_queue = queue % 16;
+
+ if (static_priority_base == -1 &&
+ priority[p_queue] == CVMX_PKO_QUEUE_STATIC_PRIORITY)
+ static_priority_base = queue;
+
+ if (static_priority_base != -1 && static_priority_end == -1 &&
+ priority[p_queue] != CVMX_PKO_QUEUE_STATIC_PRIORITY &&
+ queue)
+ static_priority_end = queue - 1;
+ else if (static_priority_base != -1 &&
+ static_priority_end == -1 && queue == num_queues - 1)
+ static_priority_end =
+ queue; /* all queues are static priority */
+
+ /*
+ * Check to make sure all static priority queues are contiguous.
+ * Also catches some cases of static priorites not starting from
+ * queue 0.
+ */
+ if (static_priority_end != -1 &&
+ (int)queue > static_priority_end &&
+ priority[p_queue] == CVMX_PKO_QUEUE_STATIC_PRIORITY) {
+ debug("ERROR: %s: Static priority queues aren't contiguous or don't start at base queue. q: %d, eq: %d\n",
+ __func__, (int)queue, static_priority_end);
+ }
+ if (static_priority_base > 0) {
+ debug("ERROR: %s: Static priority queues don't start at base queue. sq: %d\n",
+ __func__, static_priority_base);
+ }
+ }
+
+ /*
+ * main loop to set the fields of CVMX_PKO_MEM_IQUEUE_PTRS for
+ * each queue
+ */
+ for (queue = 0; queue < num_queues; queue++) {
+ int p_queue = queue % 8;
+
+ config.u64 = 0;
+ config.s.index = queue;
+ config.s.qid = base_queue + queue;
+ config.s.ipid = pko_port;
+ config.s.tail = (queue == (num_queues - 1));
+ config.s.s_tail = (queue == static_priority_end);
+ config.s.static_p = (static_priority_base >= 0);
+ config.s.static_q = (queue <= static_priority_end);
+
+ /*
+ * Convert the priority into an enable bit field.
+ * Try to space the bits out evenly so the packet
+ * don't get grouped up.
+ */
+ switch ((int)priority[p_queue]) {
+ case 0:
+ config.s.qos_mask = 0x00;
+ break;
+ case 1:
+ config.s.qos_mask = 0x01;
+ break;
+ case 2:
+ config.s.qos_mask = 0x11;
+ break;
+ case 3:
+ config.s.qos_mask = 0x49;
+ break;
+ case 4:
+ config.s.qos_mask = 0x55;
+ break;
+ case 5:
+ config.s.qos_mask = 0x57;
+ break;
+ case 6:
+ config.s.qos_mask = 0x77;
+ break;
+ case 7:
+ config.s.qos_mask = 0x7f;
+ break;
+ case 8:
+ config.s.qos_mask = 0xff;
+ break;
+ case CVMX_PKO_QUEUE_STATIC_PRIORITY:
+ config.s.qos_mask = 0xff;
+ break;
+ default:
+ debug("ERROR: %s: Invalid priority %llu\n", __func__,
+ (unsigned long long)priority[p_queue]);
+ config.s.qos_mask = 0xff;
+ break;
+ }
+
+ /*
+ * The command queues
+ */
+ {
+ cvmx_cmd_queue_result_t cmd_res;
+
+ cmd_res = cvmx_cmd_queue_initialize(
+ CVMX_CMD_QUEUE_PKO(base_queue + queue),
+ CVMX_PKO_MAX_QUEUE_DEPTH, outputbuffer_pool,
+ (outputbuffer_pool_size -
+ CVMX_PKO_COMMAND_BUFFER_SIZE_ADJUST * 8));
+
+ if (cmd_res != CVMX_CMD_QUEUE_SUCCESS) {
+ switch (cmd_res) {
+ case CVMX_CMD_QUEUE_NO_MEMORY:
+ debug("ERROR: %s: Unable to allocate output buffer\n",
+ __func__);
+ break;
+ case CVMX_CMD_QUEUE_ALREADY_SETUP:
+ debug("ERROR: %s: Port already setup\n",
+ __func__);
+ break;
+ case CVMX_CMD_QUEUE_INVALID_PARAM:
+ default:
+ debug("ERROR: %s: Command queue initialization failed.",
+ __func__);
+ break;
+ }
+ debug(" pko_port%d base_queue%d num_queues%d queue%d.\n",
+ pko_port, base_queue, num_queues, queue);
+ }
+
+ buf_ptr = (u64 *)cvmx_cmd_queue_buffer(
+ CVMX_CMD_QUEUE_PKO(base_queue + queue));
+ config.s.buf_ptr = cvmx_ptr_to_phys(buf_ptr) >> 7;
+ }
+
+ CVMX_SYNCWS;
+ csr_wr(CVMX_PKO_MEM_IQUEUE_PTRS, config.u64);
+ }
+
+ /* Error detection is resirable here */
+ return 0;
+}
+
+/**
+ * Rate limit a PKO port to a max packets/sec. This function is only
+ * supported on CN51XX and higher, excluding CN58XX.
+ *
+ * @param port Port to rate limit
+ * @param packets_s Maximum packet/sec
+ * @param burst Maximum number of packets to burst in a row before rate
+ * limiting cuts in.
+ *
+ * @return Zero on success, negative on failure
+ */
+int cvmx_pko_rate_limit_packets(int port, int packets_s, int burst)
+{
+ union cvmx_pko_mem_port_rate0 pko_mem_port_rate0;
+ union cvmx_pko_mem_port_rate1 pko_mem_port_rate1;
+
+ pko_mem_port_rate0.u64 = 0;
+ pko_mem_port_rate0.s.pid = port;
+ pko_mem_port_rate0.s.rate_pkt = gd->bus_clk / packets_s / 16;
+ /* No cost per word since we are limited by packets/sec, not bits/sec */
+ pko_mem_port_rate0.s.rate_word = 0;
+
+ pko_mem_port_rate1.u64 = 0;
+ pko_mem_port_rate1.s.pid = port;
+ pko_mem_port_rate1.s.rate_lim =
+ ((u64)pko_mem_port_rate0.s.rate_pkt * burst) >> 8;
+
+ csr_wr(CVMX_PKO_MEM_PORT_RATE0, pko_mem_port_rate0.u64);
+ csr_wr(CVMX_PKO_MEM_PORT_RATE1, pko_mem_port_rate1.u64);
+ return 0;
+}
+
+/**
+ * Rate limit a PKO port to a max bits/sec. This function is only
+ * supported on CN51XX and higher, excluding CN58XX.
+ *
+ * @param port Port to rate limit
+ * @param bits_s PKO rate limit in bits/sec
+ * @param burst Maximum number of bits to burst before rate
+ * limiting cuts in.
+ *
+ * @return Zero on success, negative on failure
+ */
+int cvmx_pko_rate_limit_bits(int port, u64 bits_s, int burst)
+{
+ union cvmx_pko_mem_port_rate0 pko_mem_port_rate0;
+ union cvmx_pko_mem_port_rate1 pko_mem_port_rate1;
+ // u64 clock_rate = cvmx_clock_get_rate(CVMX_CLOCK_SCLK);
+ u64 clock_rate = gd->bus_clk;
+ u64 tokens_per_bit = clock_rate * 16 / bits_s;
+
+ pko_mem_port_rate0.u64 = 0;
+ pko_mem_port_rate0.s.pid = port;
+ /*
+ * Each packet has a 12 bytes of interframe gap, an 8 byte
+ * preamble, and a 4 byte CRC. These are not included in the
+ * per word count. Multiply by 8 to covert to bits and divide
+ * by 256 for limit granularity.
+ */
+ pko_mem_port_rate0.s.rate_pkt = (12 + 8 + 4) * 8 * tokens_per_bit / 256;
+ /* Each 8 byte word has 64bits */
+ pko_mem_port_rate0.s.rate_word = 64 * tokens_per_bit;
+
+ pko_mem_port_rate1.u64 = 0;
+ pko_mem_port_rate1.s.pid = port;
+ pko_mem_port_rate1.s.rate_lim = tokens_per_bit * burst / 256;
+
+ csr_wr(CVMX_PKO_MEM_PORT_RATE0, pko_mem_port_rate0.u64);
+ csr_wr(CVMX_PKO_MEM_PORT_RATE1, pko_mem_port_rate1.u64);
+ return 0;
+}
+
+/**
+ * Get the status counters for a port.
+ *
+ * @param ipd_port Port number (ipd_port) to get statistics for.
+ * @param clear Set to 1 to clear the counters after they are read
+ * @param status Where to put the results.
+ *
+ * Note:
+ * - Only the doorbell for the base queue of the ipd_port is
+ * collected.
+ * - Retrieving the stats involves writing the index through
+ * CVMX_PKO_REG_READ_IDX and reading the stat CSRs, in that
+ * order. It is not MP-safe and caller should guarantee
+ * atomicity.
+ */
+void cvmx_pko_get_port_status(u64 ipd_port, u64 clear,
+ cvmx_pko_port_status_t *status)
+{
+ cvmx_pko_reg_read_idx_t pko_reg_read_idx;
+ cvmx_pko_mem_count0_t pko_mem_count0;
+ cvmx_pko_mem_count1_t pko_mem_count1;
+ int pko_port, port_base, port_limit;
+
+ if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
+ int xipd = cvmx_helper_node_to_ipd_port(cvmx_get_node_num(),
+ ipd_port);
+ cvmx_pko3_get_legacy_port_stats(xipd, clear, status);
+ return;
+ } else if (octeon_has_feature(OCTEON_FEATURE_PKND)) {
+ int interface = cvmx_helper_get_interface_num(ipd_port);
+ int index = cvmx_helper_get_interface_index_num(ipd_port);
+
+ port_base = cvmx_helper_get_pko_port(interface, index);
+ if (port_base == -1)
+ debug("Warning: Invalid port_base\n");
+ port_limit = port_base +
+ cvmx_pko_get_num_pko_ports(interface, index);
+ } else {
+ port_base = ipd_port;
+ port_limit = port_base + 1;
+ }
+
+ /*
+ * status->packets and status->octets
+ */
+ status->packets = 0;
+ status->octets = 0;
+ pko_reg_read_idx.u64 = 0;
+
+ for (pko_port = port_base; pko_port < port_limit; pko_port++) {
+ /*
+ * In theory, one doesn't need to write the index csr every
+ * time as he can set pko_reg_read_idx.s.inc to increment
+ * the index automatically. Need to find out exactly how XXX.
+ */
+ pko_reg_read_idx.s.index = pko_port;
+ csr_wr(CVMX_PKO_REG_READ_IDX, pko_reg_read_idx.u64);
+
+ pko_mem_count0.u64 = csr_rd(CVMX_PKO_MEM_COUNT0);
+ status->packets += pko_mem_count0.s.count;
+ if (clear) {
+ pko_mem_count0.s.count = pko_port;
+ csr_wr(CVMX_PKO_MEM_COUNT0, pko_mem_count0.u64);
+ }
+
+ pko_mem_count1.u64 = csr_rd(CVMX_PKO_MEM_COUNT1);
+ status->octets += pko_mem_count1.s.count;
+ if (clear) {
+ pko_mem_count1.s.count = pko_port;
+ csr_wr(CVMX_PKO_MEM_COUNT1, pko_mem_count1.u64);
+ }
+ }
+
+ /*
+ * status->doorbell
+ */
+ cvmx_pko_mem_debug8_t debug8;
+
+ pko_reg_read_idx.s.index = cvmx_pko_get_base_queue(ipd_port);
+ csr_wr(CVMX_PKO_REG_READ_IDX, pko_reg_read_idx.u64);
+ debug8.u64 = csr_rd(CVMX_PKO_MEM_DEBUG8);
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ status->doorbell = debug8.cn68xx.doorbell;
+ else
+ status->doorbell = debug8.cn58xx.doorbell;
+}
+
+/*
+ * Obtain the number of PKO commands pending in a queue
+ *
+ * @param queue is the queue identifier to be queried
+ * @return the number of commands pending transmission or -1 on error
+ */
+int cvmx_pko_queue_pend_count(cvmx_cmd_queue_id_t queue)
+{
+ int count;
+
+ if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
+ int node = cvmx_get_node_num();
+
+ count = cvmx_pko3_dq_query(node, queue);
+ } else {
+ count = cvmx_cmd_queue_length(CVMX_CMD_QUEUE_PKO(queue));
+ }
+ return count;
+}
--
2.35.1
^ permalink raw reply related [flat|nested] 52+ messages in thread* [PATCH 35/52] mips: octeon: Add cvmx-pko3.c
2022-03-30 10:06 [PATCH 00/52] mips: octeon: Add ethernet support Stefan Roese
` (29 preceding siblings ...)
2022-03-30 10:07 ` [PATCH 34/52] mips: octeon: Add cvmx-pko.c Stefan Roese
@ 2022-03-30 10:07 ` Stefan Roese
2022-03-30 10:07 ` [PATCH 36/52] mips: octeon: Add cvmx-pko3-queue.c Stefan Roese
` (18 subsequent siblings)
49 siblings, 0 replies; 52+ messages in thread
From: Stefan Roese @ 2022-03-30 10:07 UTC (permalink / raw)
To: u-boot; +Cc: awilliams, cchavva
From: Aaron Williams <awilliams@marvell.com>
Import cvmx-pko3.c from 2013 U-Boot. It will be used by the later
added drivers to support networking on the MIPS Octeon II / III
platforms.
Signed-off-by: Aaron Williams <awilliams@marvell.com>
Signed-off-by: Stefan Roese <sr@denx.de>
---
arch/mips/mach-octeon/cvmx-pko3.c | 2143 +++++++++++++++++++++++++++++
1 file changed, 2143 insertions(+)
create mode 100644 arch/mips/mach-octeon/cvmx-pko3.c
diff --git a/arch/mips/mach-octeon/cvmx-pko3.c b/arch/mips/mach-octeon/cvmx-pko3.c
new file mode 100644
index 000000000000..dc180ea83ac3
--- /dev/null
+++ b/arch/mips/mach-octeon/cvmx-pko3.c
@@ -0,0 +1,2143 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ */
+
+#include <errno.h>
+#include <log.h>
+#include <time.h>
+#include <linux/delay.h>
+
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-csr.h>
+#include <mach/cvmx-bootmem.h>
+#include <mach/octeon-model.h>
+#include <mach/cvmx-fuse.h>
+#include <mach/octeon-feature.h>
+#include <mach/cvmx-qlm.h>
+#include <mach/octeon_qlm.h>
+#include <mach/cvmx-pcie.h>
+#include <mach/cvmx-coremask.h>
+
+#include <mach/cvmx-agl-defs.h>
+#include <mach/cvmx-bgxx-defs.h>
+#include <mach/cvmx-ciu-defs.h>
+#include <mach/cvmx-gmxx-defs.h>
+#include <mach/cvmx-gserx-defs.h>
+#include <mach/cvmx-ilk-defs.h>
+#include <mach/cvmx-ipd-defs.h>
+#include <mach/cvmx-pcsx-defs.h>
+#include <mach/cvmx-pcsxx-defs.h>
+#include <mach/cvmx-pki-defs.h>
+#include <mach/cvmx-pko-defs.h>
+#include <mach/cvmx-xcv-defs.h>
+
+#include <mach/cvmx-hwpko.h>
+#include <mach/cvmx-ilk.h>
+#include <mach/cvmx-pki.h>
+#include <mach/cvmx-pko3.h>
+#include <mach/cvmx-pko3-queue.h>
+#include <mach/cvmx-pko3-resources.h>
+
+#include <mach/cvmx-helper.h>
+#include <mach/cvmx-helper-board.h>
+#include <mach/cvmx-helper-cfg.h>
+
+#include <mach/cvmx-helper-bgx.h>
+#include <mach/cvmx-helper-cfg.h>
+#include <mach/cvmx-helper-util.h>
+#include <mach/cvmx-helper-pki.h>
+
+static const int debug;
+static const bool __native_le;
+
+#define CVMX_DUMP_REGX(reg) \
+ if (debug) \
+ debug("%s=%#llx\n", #reg, (long long)csr_rd_node(node, reg))
+
+static int cvmx_pko_setup_macs(int node);
+
+/*
+ * PKO descriptor queue operation error string
+ *
+ * @param dqstatus is the enumeration returned from hardware,
+ * PKO_QUERY_RTN_S[DQSTATUS].
+ *
+ * @return static constant string error description
+ */
+const char *pko_dqstatus_error(pko_query_dqstatus_t dqstatus)
+{
+ char *str = "PKO Undefined error";
+
+ switch (dqstatus) {
+ case PKO_DQSTATUS_PASS:
+ str = "No error";
+ break;
+ case PKO_DQSTATUS_BADSTATE:
+ str = "PKO queue not ready";
+ break;
+ case PKO_DQSTATUS_NOFPABUF:
+ str = "PKO failed to allocate buffer from FPA";
+ break;
+ case PKO_DQSTATUS_NOPKOBUF:
+ str = "PKO out of buffers";
+ break;
+ case PKO_DQSTATUS_FAILRTNPTR:
+ str = "PKO failed to return buffer to FPA";
+ break;
+ case PKO_DQSTATUS_ALREADY:
+ str = "PKO queue already opened";
+ break;
+ case PKO_DQSTATUS_NOTCREATED:
+ str = "PKO queue has not been created";
+ break;
+ case PKO_DQSTATUS_NOTEMPTY:
+ str = "PKO queue is not empty";
+ break;
+ case PKO_DQSTATUS_SENDPKTDROP:
+ str = "Illegal PKO command construct";
+ break;
+ }
+ return str;
+}
+
+/*
+ * PKO global initialization for 78XX.
+ *
+ * @param node is the node on which PKO block is initialized.
+ * @return none.
+ */
+int cvmx_pko3_hw_init_global(int node, uint16_t aura)
+{
+ cvmx_pko_dpfi_flush_t pko_flush;
+ cvmx_pko_dpfi_fpa_aura_t pko_aura;
+ cvmx_pko_dpfi_ena_t dpfi_enable;
+ cvmx_pko_ptf_iobp_cfg_t ptf_iobp_cfg;
+ cvmx_pko_pdm_cfg_t pko_pdm_cfg;
+ cvmx_pko_enable_t pko_enable;
+ cvmx_pko_dpfi_status_t dpfi_status;
+ cvmx_pko_status_t pko_status;
+ cvmx_pko_shaper_cfg_t shaper_cfg;
+ u64 cycles;
+ const unsigned int timeout = 100; /* 100 milliseconds */
+
+ if (node != (aura >> 10))
+ cvmx_printf("WARNING: AURA vs PKO node mismatch\n");
+
+ pko_enable.u64 = csr_rd_node(node, CVMX_PKO_ENABLE);
+ if (pko_enable.s.enable) {
+ cvmx_printf("WARNING: %s: PKO already enabled on node %u\n",
+ __func__, node);
+ return 0;
+ }
+ /* Enable color awareness. */
+ shaper_cfg.u64 = csr_rd_node(node, CVMX_PKO_SHAPER_CFG);
+ shaper_cfg.s.color_aware = 1;
+ csr_wr_node(node, CVMX_PKO_SHAPER_CFG, shaper_cfg.u64);
+
+ /* Clear FLUSH command to be sure */
+ pko_flush.u64 = 0;
+ pko_flush.s.flush_en = 0;
+ csr_wr_node(node, CVMX_PKO_DPFI_FLUSH, pko_flush.u64);
+
+ /* set the aura number in pko, use aura node from parameter */
+ pko_aura.u64 = 0;
+ pko_aura.s.node = aura >> 10;
+ pko_aura.s.laura = aura;
+ csr_wr_node(node, CVMX_PKO_DPFI_FPA_AURA, pko_aura.u64);
+
+ CVMX_DUMP_REGX(CVMX_PKO_DPFI_FPA_AURA);
+
+ dpfi_enable.u64 = 0;
+ dpfi_enable.s.enable = 1;
+ csr_wr_node(node, CVMX_PKO_DPFI_ENA, dpfi_enable.u64);
+
+ /* Prepare timeout */
+ cycles = get_timer(0);
+
+ /* Wait until all pointers have been returned */
+ do {
+ pko_status.u64 = csr_rd_node(node, CVMX_PKO_STATUS);
+ if (get_timer(cycles) > timeout)
+ break;
+ } while (!pko_status.s.pko_rdy);
+
+ if (!pko_status.s.pko_rdy) {
+ dpfi_status.u64 = csr_rd_node(node, CVMX_PKO_DPFI_STATUS);
+ cvmx_printf("ERROR: %s: PKO DFPI failed, PKO_STATUS=%#llx DPFI_STATUS=%#llx\n",
+ __func__, (unsigned long long)pko_status.u64,
+ (unsigned long long)dpfi_status.u64);
+ return -1;
+ }
+
+ /* Set max outstanding requests in IOBP for any FIFO.*/
+ ptf_iobp_cfg.u64 = csr_rd_node(node, CVMX_PKO_PTF_IOBP_CFG);
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX))
+ ptf_iobp_cfg.s.max_read_size = 0x10; /* Recommended by HRM.*/
+ else
+ /* Reduce the value from recommended 0x10 to avoid
+ * getting "underflow" condition in the BGX TX FIFO.
+ */
+ ptf_iobp_cfg.s.max_read_size = 3;
+ csr_wr_node(node, CVMX_PKO_PTF_IOBP_CFG, ptf_iobp_cfg.u64);
+
+ /* Set minimum packet size per Ethernet standard */
+ pko_pdm_cfg.u64 = 0;
+ pko_pdm_cfg.s.pko_pad_minlen = 0x3c; /* 60 bytes before FCS */
+ csr_wr_node(node, CVMX_PKO_PDM_CFG, pko_pdm_cfg.u64);
+
+ /* Initialize MACs and FIFOs */
+ cvmx_pko_setup_macs(node);
+
+ /* enable PKO, although interfaces and queues are not up yet */
+ pko_enable.u64 = 0;
+ pko_enable.s.enable = 1;
+ csr_wr_node(node, CVMX_PKO_ENABLE, pko_enable.u64);
+
+ /* PKO_RDY set indicates successful initialization */
+ pko_status.u64 = csr_rd_node(node, CVMX_PKO_STATUS);
+ if (pko_status.s.pko_rdy)
+ return 0;
+
+ cvmx_printf("ERROR: %s: failed, PKO_STATUS=%#llx\n", __func__,
+ (unsigned long long)pko_status.u64);
+ return -1;
+}
+
+/**
+ * Shutdown the entire PKO
+ */
+int cvmx_pko3_hw_disable(int node)
+{
+ cvmx_pko_dpfi_flush_t pko_flush;
+ cvmx_pko_dpfi_status_t dpfi_status;
+ cvmx_pko_dpfi_ena_t dpfi_enable;
+ cvmx_pko_enable_t pko_enable;
+ cvmx_pko_status_t pko_status;
+ u64 cycles;
+ const unsigned int timeout = 10; /* 10 milliseconds */
+ unsigned int mac_num, fifo, i;
+ unsigned int null_mac_num, null_fifo_num, fifo_grp_count, pq_count;
+
+ (void)pko_status;
+
+ /* Wait until there are no in-flight packets */
+ for (i = mac_num = 0; mac_num < __cvmx_pko3_num_macs(); mac_num++) {
+ cvmx_pko_ptfx_status_t ptf_status;
+
+ ptf_status.u64 =
+ csr_rd_node(node, CVMX_PKO_PTFX_STATUS(mac_num));
+ if (debug)
+ debug("%s: MAC %u in-flight %u total %u\n", __func__,
+ mac_num, ptf_status.s.in_flight_cnt,
+ ptf_status.s.total_in_flight_cnt);
+ if (ptf_status.s.mac_num == 0x1f)
+ continue;
+ if (ptf_status.s.in_flight_cnt != 0) {
+ cvmx_printf("WARNING: %s: MAC %d in-flight %d\n",
+ __func__, mac_num,
+ ptf_status.s.in_flight_cnt);
+ mac_num--;
+ udelay(1000);
+ }
+ }
+
+ /* disable PKO - all packets should be out by now */
+ pko_enable.u64 = 0;
+ pko_enable.s.enable = 0;
+ csr_wr_node(node, CVMX_PKO_ENABLE, pko_enable.u64);
+
+ /* Assign NULL MAC# for L1/SQ disabled state */
+ if (OCTEON_IS_MODEL(OCTEON_CN73XX)) {
+ null_mac_num = 0x0f;
+ null_fifo_num = 0x1f;
+ fifo_grp_count = 4;
+ pq_count = 16;
+ } else if (OCTEON_IS_MODEL(OCTEON_CNF75XX)) {
+ null_mac_num = 0x0a;
+ null_fifo_num = 0x1f;
+ fifo_grp_count = 4;
+ pq_count = 16;
+ } else {
+ null_mac_num = 0x1c;
+ null_fifo_num = 0x1f;
+ fifo_grp_count = 8;
+ pq_count = 32;
+ }
+
+ /* Reset L1_PQ */
+ for (i = 0; i < pq_count; i++) {
+ cvmx_pko_l1_sqx_topology_t pko_l1_topology;
+ cvmx_pko_l1_sqx_shape_t pko_l1_shape;
+ cvmx_pko_l1_sqx_link_t pko_l1_link;
+
+ pko_l1_topology.u64 = 0;
+ pko_l1_topology.s.link = null_mac_num;
+ csr_wr_node(node, CVMX_PKO_L1_SQX_TOPOLOGY(i),
+ pko_l1_topology.u64);
+
+ pko_l1_shape.u64 = 0;
+ pko_l1_shape.s.link = null_mac_num;
+ csr_wr_node(node, CVMX_PKO_L1_SQX_SHAPE(i), pko_l1_shape.u64);
+
+ pko_l1_link.u64 = 0;
+ pko_l1_link.s.link = null_mac_num;
+ csr_wr_node(node, CVMX_PKO_L1_SQX_LINK(i), pko_l1_link.u64);
+ }
+
+ /* Reset all MAC configurations */
+ for (mac_num = 0; mac_num < __cvmx_pko3_num_macs(); mac_num++) {
+ cvmx_pko_macx_cfg_t pko_mac_cfg;
+
+ pko_mac_cfg.u64 = 0;
+ pko_mac_cfg.s.fifo_num = null_fifo_num;
+ csr_wr_node(node, CVMX_PKO_MACX_CFG(mac_num), pko_mac_cfg.u64);
+ }
+
+ /* Reset all FIFO groups */
+ for (fifo = 0; fifo < fifo_grp_count; fifo++) {
+ cvmx_pko_ptgfx_cfg_t pko_ptgfx_cfg;
+
+ pko_ptgfx_cfg.u64 = csr_rd_node(node, CVMX_PKO_PTGFX_CFG(fifo));
+ /* Simulator asserts if an unused group is reset */
+ if (pko_ptgfx_cfg.u64 == 0)
+ continue;
+ pko_ptgfx_cfg.u64 = 0;
+ pko_ptgfx_cfg.s.reset = 1;
+ csr_wr_node(node, CVMX_PKO_PTGFX_CFG(fifo), pko_ptgfx_cfg.u64);
+ }
+
+ /* Set FLUSH_EN to return cached pointers to FPA */
+ pko_flush.u64 = 0;
+ pko_flush.s.flush_en = 1;
+ csr_wr_node(node, CVMX_PKO_DPFI_FLUSH, pko_flush.u64);
+
+ /* Prepare timeout */
+ // cycles = cvmx_get_cycle();
+ // cycles += cvmx_clock_get_rate(CVMX_CLOCK_CORE)/1000 * timeout;
+ cycles = get_timer(0);
+
+ /* Wait until all pointers have been returned */
+ do {
+ dpfi_status.u64 = csr_rd_node(node, CVMX_PKO_DPFI_STATUS);
+ // if (cycles < cvmx_get_cycle())
+ if (get_timer(cycles) > timeout)
+ break;
+ } while (!dpfi_status.s.cache_flushed);
+
+ /* disable PKO buffer manager, should return all buffers to FPA */
+ dpfi_enable.u64 = 0;
+ dpfi_enable.s.enable = 0;
+ csr_wr_node(node, CVMX_PKO_DPFI_ENA, dpfi_enable.u64);
+
+ CVMX_DUMP_REGX(CVMX_PKO_DPFI_ENA);
+ CVMX_DUMP_REGX(CVMX_PKO_DPFI_STATUS);
+ CVMX_DUMP_REGX(CVMX_PKO_STATUS);
+
+ /* Clear the FLUSH_EN bit, as we are done */
+ pko_flush.u64 = 0;
+ csr_wr_node(node, CVMX_PKO_DPFI_FLUSH, pko_flush.u64);
+ CVMX_DUMP_REGX(CVMX_PKO_DPFI_FLUSH);
+
+ if (dpfi_status.s.cache_flushed == 0) {
+ cvmx_printf("%s: ERROR: timeout waiting for PKO3 ptr flush\n",
+ __func__);
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * Configure Channel credit level in PKO.
+ *
+ * @param node is to specify the node to which this configuration is applied.
+ * @param level specifies the level at which pko channel queues will be configured,
+ * @return returns 0 if successful and -1 on failure.
+ */
+int cvmx_pko3_channel_credit_level(int node, enum cvmx_pko3_level_e level)
+{
+ union cvmx_pko_channel_level channel_level;
+
+ channel_level.u64 = 0;
+
+ if (level == CVMX_PKO_L2_QUEUES)
+ channel_level.s.cc_level = 0;
+ else if (level == CVMX_PKO_L3_QUEUES)
+ channel_level.s.cc_level = 1;
+ else
+ return -1;
+
+ csr_wr_node(node, CVMX_PKO_CHANNEL_LEVEL, channel_level.u64);
+
+ return 0;
+}
+
+/** Open configured descriptor queues before queueing packets into them.
+ *
+ * @param node is to specify the node to which this configuration is applied.
+ * @param dq is the descriptor queue number to be opened.
+ * @return returns 0 on success or -1 on failure.
+ */
+int cvmx_pko_dq_open(int node, int dq)
+{
+ cvmx_pko_query_rtn_t pko_status;
+ pko_query_dqstatus_t dqstatus;
+ cvmx_pko3_dq_params_t *p_param;
+
+ if (debug)
+ debug("%s: DEBUG: dq %u\n", __func__, dq);
+
+ __cvmx_pko3_dq_param_setup(node);
+
+ pko_status = __cvmx_pko3_do_dma(node, dq, NULL, 0, CVMX_PKO_DQ_OPEN);
+
+ dqstatus = pko_status.s.dqstatus;
+
+ if (dqstatus == PKO_DQSTATUS_ALREADY)
+ return 0;
+ if (dqstatus != PKO_DQSTATUS_PASS) {
+ cvmx_printf("%s: ERROR: Failed to open dq :%u: %s\n", __func__,
+ dq, pko_dqstatus_error(dqstatus));
+ return -1;
+ }
+
+ /* Setup the descriptor queue software parameters */
+ p_param = cvmx_pko3_dq_parameters(node, dq);
+ if (p_param) {
+ p_param->depth = pko_status.s.depth;
+ if (p_param->limit == 0)
+ p_param->limit = 1024; /* last-resort default */
+ }
+
+ return 0;
+}
+
+/**
+ * Close a descriptor queue
+ *
+ * @param node is to specify the node to which this configuration is applied.
+ * @param dq is the descriptor queue number to be opened.
+ * @return returns 0 on success or -1 on failure.
+ *
+ * This should be called before changing the DQ parent link, topology,
+ * or when shutting down the PKO.
+ */
+int cvmx_pko3_dq_close(int node, int dq)
+{
+ cvmx_pko_query_rtn_t pko_status;
+ pko_query_dqstatus_t dqstatus;
+
+ if (debug)
+ debug("%s: DEBUG: dq %u\n", __func__, dq);
+
+ pko_status = __cvmx_pko3_do_dma(node, dq, NULL, 0, CVMX_PKO_DQ_CLOSE);
+
+ dqstatus = pko_status.s.dqstatus;
+
+ if (dqstatus == PKO_DQSTATUS_NOTCREATED)
+ return 0;
+
+ if (dqstatus != PKO_DQSTATUS_PASS) {
+ cvmx_printf("WARNING: %s: Failed to close dq :%u: %s\n",
+ __func__, dq, pko_dqstatus_error(dqstatus));
+ debug("DEBUG: %s: dq %u depth %u\n", __func__, dq,
+ (unsigned int)pko_status.s.depth);
+ }
+
+ return 0;
+}
+
+/**
+ * Drain a descriptor queue
+ *
+ * Before closing a DQ, this call will drain all pending traffic
+ * on the DQ to the NULL MAC, which will circumvent any traffic
+ * shaping and flow control to quickly reclaim all packet buffers.
+ *
+ * @param node is to specify the node to which this configuration is applied.
+ * @param dq is the descriptor queue number to be drained.
+ */
+void cvmx_pko3_dq_drain(int node, int dq)
+{
+ cvmx_pko_dqx_sw_xoff_t rxoff;
+
+ rxoff.u64 = 0;
+ rxoff.s.drain_null_link = 1;
+ rxoff.s.drain = 1;
+ rxoff.s.xoff = 0;
+
+ csr_wr_node(node, CVMX_PKO_DQX_SW_XOFF(dq), rxoff.u64);
+
+ udelay(100);
+
+ rxoff.u64 = 0;
+ csr_wr_node(node, CVMX_PKO_DQX_SW_XOFF(dq), rxoff.u64);
+}
+
+/**
+ * Query a descriptor queue
+ *
+ * @param node is to specify the node to which this configuration is applied.
+ * @param dq is the descriptor queue number to be opened.
+ * @return returns the descriptor queue depth on success or -1 on failure.
+ *
+ * This should be called before changing the DQ parent link, topology,
+ * or when shutting down the PKO.
+ */
+int cvmx_pko3_dq_query(int node, int dq)
+{
+ cvmx_pko_query_rtn_t pko_status;
+ pko_query_dqstatus_t dqstatus;
+
+ pko_status = __cvmx_pko3_do_dma(node, dq, NULL, 0, CVMX_PKO_DQ_QUERY);
+
+ dqstatus = pko_status.s.dqstatus;
+
+ if (dqstatus != PKO_DQSTATUS_PASS) {
+ cvmx_printf("%s: ERROR: Failed to query dq :%u: %s\n", __func__,
+ dq, pko_dqstatus_error(dqstatus));
+ return -1;
+ }
+
+ /* Temp: debug for HW */
+ if (pko_status.s.depth > 0)
+ debug("%s: DEBUG: dq %u depth %u\n", __func__, dq,
+ (unsigned int)pko_status.s.depth);
+
+ return pko_status.s.depth;
+}
+
+/*
+ * PKO initialization of MACs and FIFOs
+ *
+ * All MACs are configured and assigned a specific FIFO,
+ * and each FIFO is configured with size for a best utilization
+ * of available FIFO resources.
+ *
+ * @param node is to specify which node's pko block for this setup.
+ * @return returns 0 if successful and -1 on failure.
+ *
+ * Note: This function contains model-specific code.
+ */
+static int cvmx_pko_setup_macs(int node)
+{
+ unsigned int interface;
+ unsigned int port, num_ports;
+ unsigned int mac_num, fifo, pri, cnt;
+ cvmx_helper_interface_mode_t mode;
+ const unsigned int num_interfaces =
+ cvmx_helper_get_number_of_interfaces();
+ u8 fifo_group_cfg[8];
+ u8 fifo_group_spd[8];
+ unsigned int fifo_count = 0;
+ unsigned int max_fifos = 0, fifo_groups = 0;
+ struct {
+ u8 fifo_cnt;
+ u8 fifo_id;
+ u8 pri;
+ u8 spd;
+ u8 mac_fifo_cnt;
+ } cvmx_pko3_mac_table[32];
+
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
+ max_fifos = 28; /* exclusive of NULL FIFO */
+ fifo_groups = 8; /* inclusive of NULL PTGF */
+ }
+ if (OCTEON_IS_MODEL(OCTEON_CN73XX) || OCTEON_IS_MODEL(OCTEON_CNF75XX)) {
+ max_fifos = 16;
+ fifo_groups = 5;
+ }
+
+ /* Initialize FIFO allocation table */
+ memset(&fifo_group_cfg, 0, sizeof(fifo_group_cfg));
+ memset(&fifo_group_spd, 0, sizeof(fifo_group_spd));
+ memset(cvmx_pko3_mac_table, 0, sizeof(cvmx_pko3_mac_table));
+
+ /* Initialize all MACs as disabled */
+ for (mac_num = 0; mac_num < __cvmx_pko3_num_macs(); mac_num++) {
+ cvmx_pko3_mac_table[mac_num].pri = 0;
+ cvmx_pko3_mac_table[mac_num].fifo_cnt = 0;
+ cvmx_pko3_mac_table[mac_num].fifo_id = 0x1f;
+ }
+
+ for (interface = 0; interface < num_interfaces; interface++) {
+ int xiface =
+ cvmx_helper_node_interface_to_xiface(node, interface);
+ /* Interface type for ALL interfaces */
+ mode = cvmx_helper_interface_get_mode(xiface);
+ num_ports = cvmx_helper_interface_enumerate(xiface);
+
+ if (mode == CVMX_HELPER_INTERFACE_MODE_DISABLED)
+ continue;
+ /*
+ * Non-BGX interfaces:
+ * Each of these interfaces has a single MAC really.
+ */
+ if (mode == CVMX_HELPER_INTERFACE_MODE_ILK ||
+ mode == CVMX_HELPER_INTERFACE_MODE_NPI ||
+ mode == CVMX_HELPER_INTERFACE_MODE_LOOP)
+ num_ports = 1;
+
+ for (port = 0; port < num_ports; port++) {
+ int i;
+
+ /* Get the per-port mode for BGX-interfaces */
+ if (interface < CVMX_HELPER_MAX_GMX)
+ mode = cvmx_helper_bgx_get_mode(xiface, port);
+ /* In MIXED mode, LMACs can run different protocols */
+
+ /* convert interface/port to mac number */
+ i = __cvmx_pko3_get_mac_num(xiface, port);
+ if (i < 0 || i >= (int)__cvmx_pko3_num_macs()) {
+ cvmx_printf("%s: ERROR: interface %d:%u port %d has no MAC %d/%d\n",
+ __func__, node, interface, port, i,
+ __cvmx_pko3_num_macs());
+ continue;
+ }
+
+ if (mode == CVMX_HELPER_INTERFACE_MODE_RXAUI) {
+ unsigned int bgx_fifo_size =
+ __cvmx_helper_bgx_fifo_size(xiface,
+ port);
+
+ cvmx_pko3_mac_table[i].mac_fifo_cnt =
+ bgx_fifo_size /
+ (CVMX_BGX_TX_FIFO_SIZE / 4);
+ cvmx_pko3_mac_table[i].pri = 2;
+ cvmx_pko3_mac_table[i].spd = 10;
+ cvmx_pko3_mac_table[i].fifo_cnt = 2;
+ } else if (mode == CVMX_HELPER_INTERFACE_MODE_XLAUI) {
+ unsigned int bgx_fifo_size =
+ __cvmx_helper_bgx_fifo_size(xiface,
+ port);
+
+ cvmx_pko3_mac_table[i].mac_fifo_cnt =
+ bgx_fifo_size /
+ (CVMX_BGX_TX_FIFO_SIZE / 4);
+ cvmx_pko3_mac_table[i].pri = 4;
+ cvmx_pko3_mac_table[i].spd = 40;
+ cvmx_pko3_mac_table[i].fifo_cnt = 4;
+ } else if (mode == CVMX_HELPER_INTERFACE_MODE_XAUI) {
+ unsigned int bgx_fifo_size =
+ __cvmx_helper_bgx_fifo_size(xiface,
+ port);
+
+ cvmx_pko3_mac_table[i].mac_fifo_cnt =
+ bgx_fifo_size /
+ (CVMX_BGX_TX_FIFO_SIZE / 4);
+ cvmx_pko3_mac_table[i].pri = 3;
+ cvmx_pko3_mac_table[i].fifo_cnt = 4;
+ /* DXAUI at 20G, or XAU at 10G */
+ cvmx_pko3_mac_table[i].spd = 20;
+ } else if (mode == CVMX_HELPER_INTERFACE_MODE_XFI) {
+ unsigned int bgx_fifo_size =
+ __cvmx_helper_bgx_fifo_size(xiface,
+ port);
+
+ cvmx_pko3_mac_table[i].mac_fifo_cnt =
+ bgx_fifo_size /
+ (CVMX_BGX_TX_FIFO_SIZE / 4);
+ cvmx_pko3_mac_table[i].pri = 3;
+ cvmx_pko3_mac_table[i].fifo_cnt = 4;
+ cvmx_pko3_mac_table[i].spd = 10;
+ } else if (mode == CVMX_HELPER_INTERFACE_MODE_LOOP) {
+ cvmx_pko3_mac_table[i].fifo_cnt = 1;
+ cvmx_pko3_mac_table[i].pri = 1;
+ cvmx_pko3_mac_table[i].spd = 1;
+ cvmx_pko3_mac_table[i].mac_fifo_cnt = 1;
+ } else if (mode == CVMX_HELPER_INTERFACE_MODE_ILK ||
+ mode == CVMX_HELPER_INTERFACE_MODE_SRIO) {
+ cvmx_pko3_mac_table[i].fifo_cnt = 4;
+ cvmx_pko3_mac_table[i].pri = 3;
+ /* ILK/SRIO: speed depends on lane count */
+ cvmx_pko3_mac_table[i].spd = 40;
+ cvmx_pko3_mac_table[i].mac_fifo_cnt = 4;
+ } else if (mode == CVMX_HELPER_INTERFACE_MODE_NPI) {
+ cvmx_pko3_mac_table[i].fifo_cnt = 4;
+ cvmx_pko3_mac_table[i].pri = 2;
+ /* Actual speed depends on PCIe lanes/mode */
+ cvmx_pko3_mac_table[i].spd = 50;
+ /* SLI Tx FIFO size to be revisitted */
+ cvmx_pko3_mac_table[i].mac_fifo_cnt = 1;
+ } else {
+ /* Other BGX interface modes: SGMII/RGMII */
+ unsigned int bgx_fifo_size =
+ __cvmx_helper_bgx_fifo_size(xiface,
+ port);
+
+ cvmx_pko3_mac_table[i].mac_fifo_cnt =
+ bgx_fifo_size /
+ (CVMX_BGX_TX_FIFO_SIZE / 4);
+ cvmx_pko3_mac_table[i].fifo_cnt = 1;
+ cvmx_pko3_mac_table[i].pri = 1;
+ cvmx_pko3_mac_table[i].spd = 1;
+ }
+
+ if (debug)
+ debug("%s: intf %d:%u port %u %s mac %02u cnt %u macfifo %uk spd %u\n",
+ __func__, node, interface, port,
+ cvmx_helper_interface_mode_to_string(mode),
+ i, cvmx_pko3_mac_table[i].fifo_cnt,
+ cvmx_pko3_mac_table[i].mac_fifo_cnt * 8,
+ cvmx_pko3_mac_table[i].spd);
+
+ } /* for port */
+ } /* for interface */
+
+ /* Count the number of requested FIFOs */
+ for (fifo_count = mac_num = 0; mac_num < __cvmx_pko3_num_macs();
+ mac_num++)
+ fifo_count += cvmx_pko3_mac_table[mac_num].fifo_cnt;
+
+ if (debug)
+ debug("%s: initially requested FIFO count %u\n", __func__,
+ fifo_count);
+
+ /* Heuristically trim FIFO count to fit in available number */
+ pri = 1;
+ cnt = 4;
+ while (fifo_count > max_fifos) {
+ for (mac_num = 0; mac_num < __cvmx_pko3_num_macs(); mac_num++) {
+ if (cvmx_pko3_mac_table[mac_num].fifo_cnt == cnt &&
+ cvmx_pko3_mac_table[mac_num].pri <= pri) {
+ cvmx_pko3_mac_table[mac_num].fifo_cnt >>= 1;
+ fifo_count -=
+ cvmx_pko3_mac_table[mac_num].fifo_cnt;
+ }
+ if (fifo_count <= max_fifos)
+ break;
+ }
+ if (pri >= 4) {
+ pri = 1;
+ cnt >>= 1;
+ } else {
+ pri++;
+ }
+ if (cnt == 0)
+ break;
+ }
+
+ if (debug)
+ debug("%s: adjusted FIFO count %u\n", __func__, fifo_count);
+
+ /* Special case for NULL Virtual FIFO */
+ fifo_group_cfg[fifo_groups - 1] = 0;
+ /* there is no MAC connected to NULL FIFO */
+
+ /* Configure MAC units, and attach a FIFO to each */
+ for (fifo = 0, cnt = 4; cnt > 0; cnt >>= 1) {
+ unsigned int g;
+
+ for (mac_num = 0; mac_num < __cvmx_pko3_num_macs(); mac_num++) {
+ if (cvmx_pko3_mac_table[mac_num].fifo_cnt < cnt ||
+ cvmx_pko3_mac_table[mac_num].fifo_id != 0x1f)
+ continue;
+
+ /* Attach FIFO to MAC */
+ cvmx_pko3_mac_table[mac_num].fifo_id = fifo;
+ g = fifo >> 2;
+ /* Sum speed for FIFO group */
+ fifo_group_spd[g] += cvmx_pko3_mac_table[mac_num].spd;
+
+ if (cnt == 4)
+ fifo_group_cfg[g] = 4; /* 10k,0,0,0 */
+ else if (cnt == 2 && (fifo & 0x3) == 0)
+ fifo_group_cfg[g] = 3; /* 5k,0,5k,0 */
+ else if (cnt == 2 && fifo_group_cfg[g] == 3)
+ /* no change */;
+ else if (cnt == 1 && (fifo & 0x2) &&
+ fifo_group_cfg[g] == 3)
+ fifo_group_cfg[g] = 1; /* 5k,0,2.5k 2.5k*/
+ else if (cnt == 1 && (fifo & 0x3) == 0x3)
+ /* no change */;
+ else if (cnt == 1)
+ fifo_group_cfg[g] = 0; /* 2.5k x 4 */
+ else
+ cvmx_printf("ERROR: %s: internal error\n",
+ __func__);
+
+ fifo += cnt;
+ }
+ }
+
+ /* Check if there was no error in FIFO allocation */
+ if (fifo > max_fifos) {
+ cvmx_printf("ERROR: %s: Internal error FIFO %u\n", __func__,
+ fifo);
+ return -1;
+ }
+
+ if (debug)
+ debug("%s: used %u of FIFOs\n", __func__, fifo);
+
+ /* Now configure all FIFO groups */
+ for (fifo = 0; fifo < fifo_groups; fifo++) {
+ cvmx_pko_ptgfx_cfg_t pko_ptgfx_cfg;
+
+ pko_ptgfx_cfg.u64 = csr_rd_node(node, CVMX_PKO_PTGFX_CFG(fifo));
+ if (pko_ptgfx_cfg.s.size != fifo_group_cfg[fifo])
+ pko_ptgfx_cfg.s.reset = 1;
+ pko_ptgfx_cfg.s.size = fifo_group_cfg[fifo];
+ if (fifo_group_spd[fifo] >= 40)
+ if (pko_ptgfx_cfg.s.size >= 3)
+ pko_ptgfx_cfg.s.rate = 3; /* 50 Gbps */
+ else
+ pko_ptgfx_cfg.s.rate = 2; /* 25 Gbps */
+ else if (fifo_group_spd[fifo] >= 20)
+ pko_ptgfx_cfg.s.rate = 2; /* 25 Gbps */
+ else if (fifo_group_spd[fifo] >= 10)
+ pko_ptgfx_cfg.s.rate = 1; /* 12.5 Gbps */
+ else
+ pko_ptgfx_cfg.s.rate = 0; /* 6.25 Gbps */
+
+ if (debug)
+ debug("%s: FIFO %#x-%#x size=%u speed=%d rate=%d\n",
+ __func__, fifo * 4, fifo * 4 + 3,
+ pko_ptgfx_cfg.s.size, fifo_group_spd[fifo],
+ pko_ptgfx_cfg.s.rate);
+
+ csr_wr_node(node, CVMX_PKO_PTGFX_CFG(fifo), pko_ptgfx_cfg.u64);
+ pko_ptgfx_cfg.s.reset = 0;
+ csr_wr_node(node, CVMX_PKO_PTGFX_CFG(fifo), pko_ptgfx_cfg.u64);
+ }
+
+ /* Configure all MACs assigned FIFO number */
+ for (mac_num = 0; mac_num < __cvmx_pko3_num_macs(); mac_num++) {
+ cvmx_pko_macx_cfg_t pko_mac_cfg;
+
+ if (debug)
+ debug("%s: mac#%02u: fifo=%#x cnt=%u speed=%d\n",
+ __func__, mac_num,
+ cvmx_pko3_mac_table[mac_num].fifo_id,
+ cvmx_pko3_mac_table[mac_num].fifo_cnt,
+ cvmx_pko3_mac_table[mac_num].spd);
+
+ pko_mac_cfg.u64 = csr_rd_node(node, CVMX_PKO_MACX_CFG(mac_num));
+ pko_mac_cfg.s.fifo_num = cvmx_pko3_mac_table[mac_num].fifo_id;
+ csr_wr_node(node, CVMX_PKO_MACX_CFG(mac_num), pko_mac_cfg.u64);
+ }
+
+ /* Setup PKO MCI0/MCI1/SKID credits */
+ for (mac_num = 0; mac_num < __cvmx_pko3_num_macs(); mac_num++) {
+ cvmx_pko_mci0_max_credx_t pko_mci0_max_cred;
+ cvmx_pko_mci1_max_credx_t pko_mci1_max_cred;
+ cvmx_pko_macx_cfg_t pko_mac_cfg;
+ unsigned int fifo_credit, mac_credit, skid_credit;
+ unsigned int pko_fifo_cnt, fifo_size;
+ unsigned int mac_fifo_cnt;
+ unsigned int tmp;
+ int saved_fifo_num;
+
+ pko_fifo_cnt = cvmx_pko3_mac_table[mac_num].fifo_cnt;
+ mac_fifo_cnt = cvmx_pko3_mac_table[mac_num].mac_fifo_cnt;
+
+ /* Skip unused MACs */
+ if (pko_fifo_cnt == 0)
+ continue;
+
+ /* Check for sanity */
+ if (pko_fifo_cnt > 4)
+ pko_fifo_cnt = 1;
+
+ fifo_size = (2 * 1024) + (1024 / 2); /* 2.5KiB */
+ fifo_credit = pko_fifo_cnt * fifo_size;
+
+ if (mac_num == 0) {
+ /* loopback */
+ mac_credit = 4096; /* From HRM Sec 13.0 */
+ skid_credit = 0;
+ } else if (mac_num == 1) {
+ /* DPI */
+ mac_credit = 2 * 1024;
+ skid_credit = 0;
+ } else if (octeon_has_feature(OCTEON_FEATURE_ILK) &&
+ (mac_num & 0xfe) == 2) {
+ /* ILK0, ILK1: MAC 2,3 */
+ mac_credit = 4 * 1024; /* 4KB fifo */
+ skid_credit = 0;
+ } else if (octeon_has_feature(OCTEON_FEATURE_SRIO) &&
+ (mac_num >= 6) && (mac_num <= 9)) {
+ /* SRIO0, SRIO1: MAC 6..9 */
+ mac_credit = 1024 / 2;
+ skid_credit = 0;
+ } else {
+ /* BGX */
+ mac_credit = mac_fifo_cnt * 8 * 1024;
+ skid_credit = mac_fifo_cnt * 256;
+ }
+
+ if (debug)
+ debug("%s: mac %u pko_fifo_credit=%u mac_credit=%u\n",
+ __func__, mac_num, fifo_credit, mac_credit);
+
+ tmp = (fifo_credit + mac_credit) / 16;
+ pko_mci0_max_cred.u64 = 0;
+ pko_mci0_max_cred.s.max_cred_lim = tmp;
+
+ /* Check for overflow */
+ if (pko_mci0_max_cred.s.max_cred_lim != tmp) {
+ cvmx_printf("WARNING: %s: MCI0 credit overflow\n",
+ __func__);
+ pko_mci0_max_cred.s.max_cred_lim = 0xfff;
+ }
+
+ /* Pass 2 PKO hardware does not use the MCI0 credits */
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X))
+ csr_wr_node(node, CVMX_PKO_MCI0_MAX_CREDX(mac_num),
+ pko_mci0_max_cred.u64);
+
+ /* The original CSR formula is the correct one after all */
+ tmp = (mac_credit) / 16;
+ pko_mci1_max_cred.u64 = 0;
+ pko_mci1_max_cred.s.max_cred_lim = tmp;
+
+ /* Check for overflow */
+ if (pko_mci1_max_cred.s.max_cred_lim != tmp) {
+ cvmx_printf("WARNING: %s: MCI1 credit overflow\n",
+ __func__);
+ pko_mci1_max_cred.s.max_cred_lim = 0xfff;
+ }
+
+ csr_wr_node(node, CVMX_PKO_MCI1_MAX_CREDX(mac_num),
+ pko_mci1_max_cred.u64);
+
+ tmp = (skid_credit / 256) >> 1; /* valid 0,1,2 */
+ pko_mac_cfg.u64 = csr_rd_node(node, CVMX_PKO_MACX_CFG(mac_num));
+
+ /* The PKO_MACX_CFG bits cannot be changed unless FIFO_MUM=0x1f (unused fifo) */
+ saved_fifo_num = pko_mac_cfg.s.fifo_num;
+ pko_mac_cfg.s.fifo_num = 0x1f;
+ pko_mac_cfg.s.skid_max_cnt = tmp;
+ csr_wr_node(node, CVMX_PKO_MACX_CFG(mac_num), pko_mac_cfg.u64);
+
+ pko_mac_cfg.u64 = csr_rd_node(node, CVMX_PKO_MACX_CFG(mac_num));
+ pko_mac_cfg.s.fifo_num = saved_fifo_num;
+ csr_wr_node(node, CVMX_PKO_MACX_CFG(mac_num), pko_mac_cfg.u64);
+
+ if (debug) {
+ pko_mci0_max_cred.u64 =
+ csr_rd_node(node, CVMX_PKO_MCI0_MAX_CREDX(mac_num));
+ pko_mci1_max_cred.u64 =
+ csr_rd_node(node, CVMX_PKO_MCI1_MAX_CREDX(mac_num));
+ pko_mac_cfg.u64 =
+ csr_rd_node(node, CVMX_PKO_MACX_CFG(mac_num));
+ debug("%s: mac %u PKO_MCI0_MAX_CREDX=%u PKO_MCI1_MAX_CREDX=%u PKO_MACX_CFG[SKID_MAX_CNT]=%u\n",
+ __func__, mac_num,
+ pko_mci0_max_cred.s.max_cred_lim,
+ pko_mci1_max_cred.s.max_cred_lim,
+ pko_mac_cfg.s.skid_max_cnt);
+ }
+ } /* for mac_num */
+
+ return 0;
+}
+
+/**
+ * @INTERNAL
+ * Backward compatibility for collecting statistics from PKO3
+ *
+ * NOTE:
+ * The good stats are in BGX block.
+ */
+void cvmx_pko3_get_legacy_port_stats(u16 ipd_port, unsigned int clear,
+ cvmx_pko_port_status_t *stats)
+{
+ unsigned int dq, dq_base, dq_num;
+ unsigned int node = cvmx_get_node_num();
+
+ dq_base = cvmx_pko3_get_queue_base(ipd_port);
+ dq_num = cvmx_pko3_get_queue_num(ipd_port);
+
+ stats->packets = 0;
+ stats->octets = 0;
+ stats->doorbell = 0; /* NOTE: PKO3 does not have a doorbell */
+
+ for (dq = dq_base; dq < (dq_base + dq_num); dq++) {
+ cvmx_pko_dqx_packets_t pkts;
+ cvmx_pko_dqx_bytes_t byts;
+
+ /* NOTE: clearing of these counters is non-atomic */
+ pkts.u64 = csr_rd_node(node, CVMX_PKO_DQX_PACKETS(dq));
+ if (clear)
+ csr_wr_node(node, CVMX_PKO_DQX_PACKETS(dq), 0ull);
+
+ byts.u64 = csr_rd_node(node, CVMX_PKO_DQX_BYTES(dq));
+ if (clear)
+ csr_wr_node(node, CVMX_PKO_DQX_BYTES(dq), 0ull);
+
+ stats->packets += pkts.s.count;
+ stats->octets += byts.s.count;
+ } /* for dq */
+}
+
+/** Set MAC options
+ *
+ * The options supported are the parameters below:
+ *
+ * @param xiface The physical interface number
+ * @param index The physical sub-interface port
+ * @param fcs_enable Enable FCS generation
+ * @param pad_enable Enable padding to minimum packet size
+ * @param fcs_sop_off Number of bytes at start of packet to exclude from FCS
+ *
+ * The typical use for `fcs_sop_off` is when the interface is configured
+ * to use a header such as HighGig to precede every Ethernet packet,
+ * such a header usually does not partake in the CRC32 computation stream,
+ * and its size must be set with this parameter.
+ *
+ * @return Returns 0 on success, -1 if interface/port is invalid.
+ */
+int cvmx_pko3_interface_options(int xiface, int index, bool fcs_enable,
+ bool pad_enable, unsigned int fcs_sop_off)
+{
+ int mac_num;
+ cvmx_pko_macx_cfg_t pko_mac_cfg;
+ unsigned int fifo_num;
+ struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+
+ if (debug)
+ debug("%s: intf %u:%u/%u fcs=%d pad=%d\n", __func__, xi.node,
+ xi.interface, index, fcs_enable, pad_enable);
+
+ mac_num = __cvmx_pko3_get_mac_num(xiface, index);
+ if (mac_num < 0) {
+ cvmx_printf("ERROR: %s: invalid interface %u:%u/%u\n", __func__,
+ xi.node, xi.interface, index);
+ return -1;
+ }
+
+ pko_mac_cfg.u64 = csr_rd_node(xi.node, CVMX_PKO_MACX_CFG(mac_num));
+
+ /* If MAC is not assigned, return an error */
+ if (pko_mac_cfg.s.fifo_num == 0x1f) {
+ cvmx_printf("ERROR: %s: unused interface %u:%u/%u\n", __func__,
+ xi.node, xi.interface, index);
+ return -1;
+ }
+
+ if (pko_mac_cfg.s.min_pad_ena == pad_enable &&
+ pko_mac_cfg.s.fcs_ena == fcs_enable) {
+ if (debug)
+ debug("%s: mac %#x unchanged\n", __func__, mac_num);
+ return 0;
+ }
+
+ /* WORKAROUND: Pass1 won't allow change any bits unless FIFO_NUM=0x1f */
+ fifo_num = pko_mac_cfg.s.fifo_num;
+ pko_mac_cfg.s.fifo_num = 0x1f;
+
+ pko_mac_cfg.s.min_pad_ena = pad_enable;
+ pko_mac_cfg.s.fcs_ena = fcs_enable;
+ pko_mac_cfg.s.fcs_sop_off = fcs_sop_off;
+
+ csr_wr_node(xi.node, CVMX_PKO_MACX_CFG(mac_num), pko_mac_cfg.u64);
+
+ pko_mac_cfg.s.fifo_num = fifo_num;
+ csr_wr_node(xi.node, CVMX_PKO_MACX_CFG(mac_num), pko_mac_cfg.u64);
+
+ if (debug)
+ debug("%s: PKO_MAC[%u]CFG=%#llx\n", __func__, mac_num,
+ (unsigned long long)csr_rd_node(xi.node, CVMX_PKO_MACX_CFG(mac_num)));
+
+ return 0;
+}
+
+/** Set Descriptor Queue options
+ *
+ * The `min_pad` parameter must be in agreement with the interface-level
+ * padding option for all descriptor queues assigned to that particular
+ * interface/port.
+ *
+ * @param node on which to operate
+ * @param dq descriptor queue to set
+ * @param min_pad minimum padding to set for dq
+ */
+void cvmx_pko3_dq_options(unsigned int node, unsigned int dq, bool min_pad)
+{
+ cvmx_pko_pdm_dqx_minpad_t reg;
+
+ dq &= (1 << 10) - 1;
+ reg.u64 = csr_rd_node(node, CVMX_PKO_PDM_DQX_MINPAD(dq));
+ reg.s.minpad = min_pad;
+ csr_wr_node(node, CVMX_PKO_PDM_DQX_MINPAD(dq), reg.u64);
+}
+
+/**
+ * Get number of PKO internal buffers available
+ *
+ * This function may be used to throttle output processing
+ * when the PKO runs out of internal buffers, to avoid discarding
+ * of packets or returning error results from transmission function.
+ *
+ * Returns negative numbers on error, positive number ti nidicate the
+ * number of buffers available, or 0 when no more buffers are available.
+ *
+ * @INTERNAL
+ */
+int cvmx_pko3_internal_buffer_count(unsigned int node)
+{
+ cvmx_pko_dpfi_fpa_aura_t pko_aura;
+ unsigned int laura, pool;
+ long long avail1, avail2;
+
+ /* get the aura number in pko, use aura node from parameter */
+ pko_aura.u64 = csr_rd_node(node, CVMX_PKO_DPFI_FPA_AURA);
+ laura = pko_aura.s.laura;
+
+ /* form here on, node is the AURA node */
+ node = pko_aura.s.node;
+
+ /* get the POOL number for this AURA */
+ pool = csr_rd_node(node, CVMX_FPA_AURAX_POOL(laura));
+
+ avail1 = csr_rd_node(node, CVMX_FPA_POOLX_AVAILABLE(pool));
+
+ avail2 = csr_rd_node(node, CVMX_FPA_AURAX_CNT_LIMIT(laura)) -
+ csr_rd_node(node, CVMX_FPA_AURAX_CNT(laura));
+
+ if (avail1 < avail2)
+ return avail1;
+
+ return avail2;
+}
+
+/**
+ * @INTERNAL
+ *
+ * Get actual PKO FIFO buffer size for a given port
+ *
+ * Since the FIFOs are allocated dynamically based on supply/demand
+ * heuristics, it may be useful in some instances to know the
+ * actual FIFO size allocated to any specific port at run time.
+ *
+ * @param xiface global interface number
+ * @param index port index on interface
+ * @return Returns the per-port FIFO size in bytes, or 0 if the port
+ * has not been configured, or a negative number if the interface and
+ * index numbers are not valid.
+ */
+int cvmx_pko3_port_fifo_size(unsigned int xiface, unsigned int index)
+{
+ unsigned int node;
+ unsigned int mac_num;
+ unsigned int fifo_grp, fifo_off;
+ cvmx_pko_macx_cfg_t pko_mac_cfg;
+ cvmx_pko_ptgfx_cfg_t pko_ptgfx_cfg;
+ cvmx_xiface_t xi = cvmx_helper_xiface_to_node_interface(xiface);
+ int ret;
+
+ node = xi.node;
+ ret = __cvmx_pko3_get_mac_num(xiface, index);
+ if (ret < 0)
+ return ret;
+
+ if (debug)
+ debug("%s: iface=%u:%u/%u mac %d\n", __func__, xi.node,
+ xi.interface, index, ret);
+
+ mac_num = ret;
+
+ /* Check for the special value on onused MACs */
+ if (mac_num == 0x1f)
+ return 0;
+
+ pko_mac_cfg.u64 = csr_rd_node(node, CVMX_PKO_MACX_CFG(mac_num));
+
+ fifo_grp = pko_mac_cfg.s.fifo_num >> 2;
+ fifo_off = pko_mac_cfg.s.fifo_num & 0x3;
+ pko_ptgfx_cfg.u64 = csr_rd_node(node, CVMX_PKO_PTGFX_CFG(fifo_grp));
+
+ ret = (2 << 10) + (1 << 9); /* set 2.5KBytes base FIFO size */
+
+ switch (pko_ptgfx_cfg.s.size) {
+ case 0:
+ /* 2.5l, 2.5k, 2.5k, 2.5k */
+ break;
+ case 1:
+ /* 5.0k, 0.0k, 2.5k, 2.5k */
+ if (fifo_off == 1)
+ ret = 0;
+ if (fifo_off == 0)
+ ret *= 2;
+ break;
+ case 2:
+ /* 2.5k, 2.5k, 5.0k, 0.0k */
+ if (fifo_off == 3)
+ ret = 0;
+ if (fifo_off == 2)
+ ret *= 2;
+ break;
+ case 3:
+ /* 5k, 0, 5k, 0 */
+ if ((fifo_off & 1) != 0)
+ ret = 0;
+ ret *= 2;
+ break;
+ case 4:
+ /* 10k, 0, 0, 0 */
+ if (fifo_off != 0)
+ ret = 0;
+ ret *= 4;
+ break;
+ default:
+ ret = -1;
+ }
+ return ret;
+}
+
+/**
+ * @INTERNAL
+ *
+ * Stop an interface port transmission and wait until its FIFO is empty.
+ *
+ */
+int cvmx_pko3_port_xoff(unsigned int xiface, unsigned int index)
+{
+ cvmx_pko_l1_sqx_topology_t pko_l1_topology;
+ cvmx_pko_l1_sqx_sw_xoff_t pko_l1_xoff;
+ cvmx_pko_ptfx_status_t pko_ptfx_status;
+ cvmx_pko_macx_cfg_t pko_mac_cfg;
+ cvmx_pko_mci1_cred_cntx_t cred_cnt;
+ unsigned int node, pq, num_pq, mac_num, fifo_num;
+ int ret;
+ cvmx_xiface_t xi = cvmx_helper_xiface_to_node_interface(xiface);
+
+ node = xi.node;
+ ret = __cvmx_pko3_get_mac_num(xiface, index);
+
+ if (debug)
+ debug("%s: iface=%u:%u/%u mac %d\n", __func__, xi.node,
+ xi.interface, index, ret);
+
+ if (ret < 0)
+ return ret;
+
+ mac_num = ret;
+
+ if (mac_num == 0x1f)
+ return 0;
+
+ pko_mac_cfg.u64 = csr_rd_node(node, CVMX_PKO_MACX_CFG(mac_num));
+ fifo_num = pko_mac_cfg.s.fifo_num;
+
+ /* Verify the FIFO number is correct */
+ pko_ptfx_status.u64 = csr_rd_node(node, CVMX_PKO_PTFX_STATUS(fifo_num));
+
+ if (debug)
+ debug("%s: mac %d fifo %d, fifo mac %d\n", __func__, mac_num,
+ fifo_num, pko_ptfx_status.s.mac_num);
+
+ cvmx_warn_if(pko_ptfx_status.s.mac_num != mac_num,
+ "PKO3 FIFO number does not match MAC\n");
+
+ num_pq = cvmx_pko3_num_level_queues(CVMX_PKO_PORT_QUEUES);
+ /* Find the L1/PQ connected to the MAC for this interface */
+ for (pq = 0; pq < num_pq; pq++) {
+ pko_l1_topology.u64 =
+ csr_rd_node(node, CVMX_PKO_L1_SQX_TOPOLOGY(pq));
+ if (pko_l1_topology.s.link == mac_num)
+ break;
+ }
+
+ if (debug)
+ debug("%s: L1_PQ%u LINK %d MAC_NUM %d\n", __func__, pq,
+ pko_l1_topology.s.link, mac_num);
+
+ if (pq >= num_pq)
+ return -1;
+
+ if (debug) {
+ pko_ptfx_status.u64 =
+ csr_rd_node(node, CVMX_PKO_PTFX_STATUS(fifo_num));
+ ret = pko_ptfx_status.s.in_flight_cnt;
+ debug("%s: FIFO %d in-flight %d packets\n", __func__, fifo_num,
+ ret);
+ }
+
+ /* Turn the XOFF bit on */
+ pko_l1_xoff.u64 = csr_rd_node(node, CVMX_PKO_L1_SQX_SW_XOFF(pq));
+ pko_l1_xoff.s.xoff = 1;
+ csr_wr_node(node, CVMX_PKO_L1_SQX_SW_XOFF(pq), pko_l1_xoff.u64);
+
+ ret = 1 << 22;
+ /* Wait for PKO TX FIFO to drain */
+ do {
+ CVMX_SYNC;
+ pko_ptfx_status.u64 =
+ csr_rd_node(node, CVMX_PKO_PTFX_STATUS(fifo_num));
+ } while (pko_ptfx_status.s.in_flight_cnt != 0 && ret--);
+
+ if (pko_ptfx_status.s.in_flight_cnt != 0)
+ cvmx_warn("%s: FIFO %d failed to drain\n", __func__, fifo_num);
+
+ if (debug)
+ debug("%s: FIFO %d drained in %d cycles\n", __func__, fifo_num,
+ (1 << 22) - ret);
+
+ /* Wait for MAC TX FIFO to drain. */
+ do {
+ cred_cnt.u64 =
+ csr_rd_node(node, CVMX_PKO_MCI1_CRED_CNTX(mac_num));
+ } while (cred_cnt.s.cred_cnt != 0);
+
+ return 0;
+}
+
+/**
+ * @INTERNAL
+ *
+ * Resume transmission on an interface port.
+ *
+ */
+int cvmx_pko3_port_xon(unsigned int xiface, unsigned int index)
+{
+ cvmx_pko_l1_sqx_topology_t pko_l1_topology;
+ cvmx_pko_l1_sqx_sw_xoff_t pko_l1_xoff;
+ unsigned int node, pq, num_pq, mac_num;
+ int ret;
+ cvmx_xiface_t xi = cvmx_helper_xiface_to_node_interface(xiface);
+
+ num_pq = cvmx_pko3_num_level_queues(CVMX_PKO_PORT_QUEUES);
+ node = xi.node;
+ ret = __cvmx_pko3_get_mac_num(xiface, index);
+
+ if (debug)
+ debug("%s: iface=%u:%u/%u mac %d\n", __func__, xi.node,
+ xi.interface, index, ret);
+
+ if (ret < 0)
+ return ret;
+
+ mac_num = ret;
+
+ if (mac_num == 0x1f)
+ return 0;
+
+ /* Find the L1/PQ connected to the MAC for this interface */
+ for (pq = 0; pq < num_pq; pq++) {
+ pko_l1_topology.u64 =
+ csr_rd_node(node, CVMX_PKO_L1_SQX_TOPOLOGY(pq));
+ if (pko_l1_topology.s.link == mac_num)
+ break;
+ }
+
+ if (debug)
+ debug("%s: L1_PQ%u LINK %d MAC_NUM %d\n", __func__, pq,
+ pko_l1_topology.s.link, mac_num);
+
+ if (pq >= num_pq)
+ return -1;
+
+ /* Turn the XOFF bit off */
+ pko_l1_xoff.u64 = csr_rd_node(node, CVMX_PKO_L1_SQX_SW_XOFF(pq));
+ ret = pko_l1_xoff.s.xoff;
+ pko_l1_xoff.s.xoff = 0;
+ csr_wr_node(node, CVMX_PKO_L1_SQX_SW_XOFF(pq), pko_l1_xoff.u64);
+
+ return ret;
+}
+
+/******************************************************************************
+ * New PKO3 API - Experimental
+ ******************************************************************************/
+
+/**
+ * Initialize packet descriptor
+ *
+ * Descriptor storage is provided by the caller,
+ * use this function to initialize the descriptor to a known
+ * empty state.
+ *
+ * @param pdesc Packet Descriptor.
+ *
+ * Do not use this function when creating a descriptor from a
+ * Work Queue Entry.
+ *
+ * The default setting of the 'free_bufs' attribute is 'false'.
+ */
+void cvmx_pko3_pdesc_init(cvmx_pko3_pdesc_t *pdesc)
+{
+ cvmx_pko_send_aura_t *ext_s;
+
+ memset(pdesc, 0, sizeof(*pdesc));
+
+ /* Start with HDR_S and HDR_EXT_S in first two words, all 0's */
+ pdesc->num_words = 2;
+
+ pdesc->hdr_s = (void *)&pdesc->word[0];
+ ext_s = (void *)&pdesc->word[1];
+ ext_s->s.subdc4 = CVMX_PKO_SENDSUBDC_EXT;
+
+ pdesc->last_aura = -1;
+ pdesc->jb_aura = -1;
+
+ /* Empty packets, can not decode header offsets (yet) */
+ pdesc->hdr_offsets = 1;
+}
+
+/**
+ * @INTERNAL
+ *
+ * Add arbitrary subcommand to a packet descriptor.
+ *
+ * This function will also allocate a jump buffer when
+ * the primary LMTDMA buffer is exhausted.
+ * The jump buffer is allocated from the internal PKO3 aura
+ * on the node where this function is running.
+ */
+static int cvmx_pko3_pdesc_subdc_add(cvmx_pko3_pdesc_t *pdesc, uint64_t subdc)
+{
+ cvmx_pko_send_hdr_t *hdr_s;
+ cvmx_pko_send_aura_t *ext_s;
+ cvmx_pko_buf_ptr_t *jump_s;
+ const unsigned int jump_buf_size = 4 * 1024 / sizeof(uint64_t);
+ unsigned int i;
+
+ /* Simple handling while fitting the command buffer */
+ if (cvmx_likely(pdesc->num_words < 15 && !pdesc->jump_buf)) {
+ pdesc->word[pdesc->num_words] = subdc;
+ pdesc->num_words++;
+ return pdesc->num_words;
+ }
+
+ /* SEND_JUMP_S missing on Pass1 */
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X)) {
+ cvmx_printf("%s: ERROR: too many segments\n", __func__);
+ return -E2BIG;
+ }
+
+ hdr_s = (void *)&pdesc->word[0];
+ ext_s = (void *)&pdesc->word[1];
+
+ /* Allocate jump buffer */
+ if (cvmx_unlikely(!pdesc->jump_buf)) {
+ u16 pko_gaura;
+ cvmx_fpa3_gaura_t aura;
+ unsigned int fpa_node = cvmx_get_node_num();
+
+ /* Allocate jump buffer from PKO internal FPA AURA, size=4KiB */
+ pko_gaura = __cvmx_pko3_aura_get(fpa_node);
+ aura = __cvmx_fpa3_gaura(pko_gaura >> 10, pko_gaura & 0x3ff);
+
+ pdesc->jump_buf = cvmx_fpa3_alloc(aura);
+ if (!pdesc->jump_buf)
+ return -EINVAL;
+
+ /* Save the JB aura for later */
+ pdesc->jb_aura = pko_gaura;
+
+ /* Move most of the command to the jump buffer */
+ memcpy(pdesc->jump_buf, &pdesc->word[2],
+ (pdesc->num_words - 2) * sizeof(uint64_t));
+ jump_s = (void *)&pdesc->word[2];
+ jump_s->u64 = 0;
+ jump_s->s.addr = cvmx_ptr_to_phys(pdesc->jump_buf);
+ jump_s->s.i = !hdr_s->s.df; /* F= ~DF */
+ jump_s->s.size = pdesc->num_words - 2;
+ jump_s->s.subdc3 = CVMX_PKO_SENDSUBDC_JUMP;
+
+ /* Now the LMTDMA buffer has only HDR_S, EXT_S, JUMP_S */
+ pdesc->num_words = 3;
+ }
+
+ /* Add the new subcommand to the jump buffer */
+ jump_s = (void *)&pdesc->word[2];
+ i = jump_s->s.size;
+
+ /* Avoid overrunning jump buffer */
+ if (i >= (jump_buf_size - 2)) {
+ cvmx_printf("%s: ERROR: too many segments\n", __func__);
+ return -E2BIG;
+ }
+
+ pdesc->jump_buf[i] = subdc;
+ jump_s->s.size++;
+
+ (void)ext_s;
+
+ return (i + pdesc->num_words);
+}
+
+/**
+ * Send a packet in a descriptor to an output port via an output queue.
+ *
+ * A call to this function must follow all other functions that
+ * create a packet descriptor from WQE, or after initializing an
+ * empty descriptor and filling it with one or more data fragments.
+ * After this function is called, the content of the packet descriptor
+ * can no longer be used, and are undefined.
+ *
+ * @param pdesc Packet Descriptor.
+ * @param dq Descriptor Queue associated with the desired output port
+ * @param tag Flow Tag pointer for packet ordering or NULL
+ * @return Returns 0 on success, -1 on error.
+ *
+ */
+int cvmx_pko3_pdesc_transmit(cvmx_pko3_pdesc_t *pdesc, uint16_t dq,
+ uint32_t *tag)
+{
+ cvmx_pko_query_rtn_t pko_status;
+ cvmx_pko_send_aura_t aura_s;
+ u8 port_node;
+ int rc;
+
+ /* Add last AURA_S for jump_buf, if present */
+ if (cvmx_unlikely(pdesc->jump_buf) &&
+ pdesc->last_aura != pdesc->jb_aura) {
+ /* The last AURA_S subdc refers to the jump_buf itself */
+ aura_s.s.aura = pdesc->jb_aura;
+ aura_s.s.offset = 0;
+ aura_s.s.alg = AURAALG_NOP;
+ aura_s.s.subdc4 = CVMX_PKO_SENDSUBDC_AURA;
+ pdesc->last_aura = pdesc->jb_aura;
+
+ rc = cvmx_pko3_pdesc_subdc_add(pdesc, aura_s.u64);
+ if (rc < 0)
+ return -1;
+ }
+
+ /* SEND_WORK_S must be the very last subdc */
+ if (cvmx_unlikely(pdesc->send_work_s != 0ULL)) {
+ rc = cvmx_pko3_pdesc_subdc_add(pdesc, pdesc->send_work_s);
+ if (rc < 0)
+ return -1;
+ pdesc->send_work_s = 0ULL;
+ }
+
+ /* Derive destination node from dq */
+ port_node = dq >> 10;
+ dq &= (1 << 10) - 1;
+
+ /* To preserve packet order, go atomic with DQ-specific tag */
+ if (tag)
+ cvmx_pow_tag_sw(*tag ^ dq, CVMX_POW_TAG_TYPE_ATOMIC);
+
+ /* Send the PKO3 command into the Descriptor Queue */
+ pko_status = __cvmx_pko3_do_dma(port_node, dq, pdesc->word,
+ pdesc->num_words, CVMX_PKO_DQ_SEND);
+
+ /* Map PKO3 result codes to legacy return values */
+ if (pko_status.s.dqstatus == PKO_DQSTATUS_PASS)
+ return 0;
+
+ return -1;
+}
+
+int cvmx_pko3_pdesc_append_free(cvmx_pko3_pdesc_t *pdesc, uint64_t addr,
+ unsigned int gaura)
+{
+ cvmx_pko_send_hdr_t *hdr_s;
+ cvmx_pko_send_free_t free_s;
+ cvmx_pko_send_aura_t aura_s;
+
+ hdr_s = (void *)&pdesc->word[0];
+
+ if (pdesc->last_aura == -1) {
+ hdr_s->s.aura = gaura;
+ pdesc->last_aura = hdr_s->s.aura;
+ } else if (pdesc->last_aura != (short)gaura) {
+ aura_s.s.aura = gaura;
+ aura_s.s.offset = 0;
+ aura_s.s.alg = AURAALG_NOP;
+ aura_s.s.subdc4 = CVMX_PKO_SENDSUBDC_AURA;
+ pdesc->last_aura = gaura;
+ if (cvmx_pko3_pdesc_subdc_add(pdesc, aura_s.u64) < 0)
+ return -1;
+ }
+
+ free_s.u64 = 0;
+ free_s.s.subdc4 = CVMX_PKO_SENDSUBDC_FREE;
+ free_s.s.addr = addr;
+
+ return cvmx_pko3_pdesc_subdc_add(pdesc, free_s.u64);
+}
+
+/**
+ * Append a packet segment to a packet descriptor
+ *
+ * After a packet descriptor is initialized, one or more
+ * packet data segments can be added to the packet,
+ * in the order in which they should be transmitted.
+ *
+ * The size of the resulting packet will be equal to the
+ * sum of the segments appended by this function.
+ * Every segment may be contained in a buffer that belongs
+ * to a different FPA 'aura', and may be automatically
+ * released back to that aura, if required.
+ *
+ * @param pdesc Packet Descriptor.
+ * @param p_data Address of the segment first byte (virtual).
+ * @param data_bytes Size of the data segment (in bytes).
+ * @param gaura A global FPA 'aura' where the packet buffer was allocated from.
+ *
+ * The 'gaura' parameter contains the node number where the buffer pool
+ * is located, and has only a meaning if the 'free_buf' argument is 'true'.
+ * The buffer being added will be automatically freed upon transmission
+ * along with all other buffers in this descriptor, or not, depending
+ * on the descriptor 'free_bufs' attribute that is set during
+ * descriptor creation, or changed subsequently with a call to
+ * 'cvmx_pko3_pdesc_set_free()'.
+ *
+ * @return Returns 0 on success, -1 on error.
+ */
+int cvmx_pko3_pdesc_buf_append(cvmx_pko3_pdesc_t *pdesc, void *p_data,
+ unsigned int data_bytes, unsigned int gaura)
+{
+ cvmx_pko_send_hdr_t *hdr_s;
+ cvmx_pko_buf_ptr_t gather_s;
+ cvmx_pko_send_aura_t aura_s;
+ int rc;
+
+ if (pdesc->mem_s_ix > 0) {
+ cvmx_printf("ERROR: %s: subcommand restriction violated\n",
+ __func__);
+ return -1;
+ }
+
+ hdr_s = (void *)&pdesc->word[0];
+
+ if (gaura != (unsigned int)-1) {
+ if (pdesc->last_aura == -1) {
+ unsigned int buf_sz = 128;
+
+ /* First mbuf, calculate headroom */
+ cvmx_fpa3_gaura_t aura;
+
+ aura = __cvmx_fpa3_gaura(gaura >> 10, gaura & 0x3ff);
+ buf_sz = cvmx_fpa3_get_aura_buf_size(aura);
+ pdesc->headroom = (unsigned long)p_data & (buf_sz - 1);
+ hdr_s->s.aura = gaura;
+ pdesc->last_aura = hdr_s->s.aura;
+ } else if (pdesc->last_aura != (short)gaura) {
+ aura_s.s.aura = gaura;
+ aura_s.s.offset = 0;
+ aura_s.s.alg = AURAALG_NOP;
+ aura_s.s.subdc4 = CVMX_PKO_SENDSUBDC_AURA;
+ pdesc->last_aura = gaura;
+
+ rc = cvmx_pko3_pdesc_subdc_add(pdesc, aura_s.u64);
+ if (rc < 0)
+ return -1;
+ }
+ }
+
+ gather_s.u64 = 0;
+ gather_s.s.addr = cvmx_ptr_to_phys(p_data);
+ gather_s.s.size = data_bytes;
+ hdr_s->s.total += data_bytes;
+ gather_s.s.i = 0; /* follow HDR_S[DF] setting */
+ gather_s.s.subdc3 = CVMX_PKO_SENDSUBDC_GATHER;
+
+ rc = cvmx_pko3_pdesc_subdc_add(pdesc, gather_s.u64);
+ if (rc < 0)
+ return -1;
+
+ return rc;
+}
+
+/**
+ * Add a Work Entry for packet transmission notification
+ *
+ * Add a subcommand to notify of packet transmission completion
+ * via a Work Queue entry over the SSO.
+ * The Work Queue entry may be a 'software' event, or the content
+ * of a packet.
+ *
+ * @param pdesc Packet Descriptor, memory provided by caller.
+ * @param wqe Work Queue Entry in a model-native format.
+ * @param node The OCI node of the SSO where the WQE will be delivered.
+ * @param group The SSO group where the WQE is delivered.
+ * @param tt The SSO Tag Type for the WQE. If tt is not NULL, tag should be a
+ * valid tag value.
+ * @param tag Valid tag value to assign to WQE
+ *
+ * @return Returns 0 on success, -1 on error.
+ *
+ * Restrictions:
+ * There can be only one such notification per packet descriptor,
+ * but this function may be called at any time after the descriptor
+ * is first created from WQE or initialized, and before
+ * starting transmission.
+ *
+ */
+int cvmx_pko3_pdesc_notify_wqe(cvmx_pko3_pdesc_t *pdesc, cvmx_wqe_78xx_t *wqe,
+ u8 node, u8 group, uint8_t tt, uint32_t tag)
+{
+ cvmx_pko_send_work_t work_s;
+
+ /*
+ * There can be only one SEND_WORK_S entry in the command
+ * and it must be the very last subcommand
+ */
+ if (pdesc->send_work_s != 0) {
+ cvmx_printf("ERROR: %s: Only one SEND_WORK_S is allowed\n",
+ __func__);
+ return -1;
+ }
+
+ work_s.u64 = 0;
+ work_s.s.subdc4 = CVMX_PKO_SENDSUBDC_WORK;
+ work_s.s.addr = cvmx_ptr_to_phys(wqe);
+ work_s.s.grp = (group & 0xff) | (node << 8);
+ work_s.s.tt = tt;
+
+ wqe->word1.rsvd_0 = 0;
+ wqe->word1.rsvd_1 = 0;
+ wqe->word1.tag = tag;
+ wqe->word1.tag_type = tt;
+ wqe->word1.grp = work_s.s.grp;
+
+ /* Store in descriptor for now, apply just before LMTDMA-ing */
+ pdesc->send_work_s = work_s.u64;
+
+ return 0;
+}
+
+/**
+ * Request atomic memory decrement at transmission completion
+ *
+ * Each packet descriptor may contain several decrement notification
+ * requests, but these requests must only be made after all of the
+ * packet data segments have been added, and before packet transmission
+ * commences.
+ *
+ * Only decrement of a 64-bit memory location is supported.
+ *
+ * @param pdesc Packet Descriptor.
+ * @param p_counter A pointer to an atomic 64-bit memory location.
+ *
+ * @return Returns 0 on success, -1 on failure.
+ */
+int cvmx_pko3_pdesc_notify_decrement(cvmx_pko3_pdesc_t *pdesc,
+ volatile uint64_t *p_counter)
+{
+ int rc;
+ /* 64-bit decrement is the only supported operation */
+ cvmx_pko_send_mem_t mem_s = {
+ .s = { .subdc4 = CVMX_PKO_SENDSUBDC_MEM,
+ .dsz = MEMDSZ_B64,
+ .alg = MEMALG_SUB,
+ .offset = 1,
+#ifdef _NOT_IN_SIM_
+ /* Enforce MEM before SSO submission if both present */
+ .wmem = 1
+#endif
+ }
+ };
+
+ mem_s.s.addr = cvmx_ptr_to_phys(CASTPTR(void, p_counter));
+
+ rc = cvmx_pko3_pdesc_subdc_add(pdesc, mem_s.u64);
+
+ /*
+ * SEND_MEM_S must be after all LINK_S/FATHER_S/IMM_S
+ * subcommands, set the index to prevent further data
+ * subcommands.
+ */
+ if (rc > 0)
+ pdesc->mem_s_ix = rc;
+
+ return rc;
+}
+
+/**
+ * Request atomic memory clear at transmission completion
+ *
+ * Each packet descriptor may contain several notification
+ * requests, but these request must only be made after all of the
+ * packet data segments have been added, and before packet transmission
+ * commences.
+ *
+ * Clearing of a single byte is requested by this function.
+ *
+ * @param pdesc Packet Descriptor.
+ * @param p_mem A pointer to a byte location.
+ *
+ * @return Returns 0 on success, -1 on failure.
+ */
+int cvmx_pko3_pdesc_notify_memclr(cvmx_pko3_pdesc_t *pdesc,
+ volatile uint8_t *p_mem)
+{
+ int rc;
+ /* 640bit decrement is the only supported operation */
+ cvmx_pko_send_mem_t mem_s = { .s = {
+ .subdc4 = CVMX_PKO_SENDSUBDC_MEM,
+ .dsz = MEMDSZ_B8,
+ .alg = MEMALG_SET,
+ .offset = 0,
+ } };
+
+ mem_s.s.addr = cvmx_ptr_to_phys(CASTPTR(void, p_mem));
+
+ rc = cvmx_pko3_pdesc_subdc_add(pdesc, mem_s.u64);
+
+ /*
+ * SEND_MEM_S must be after all LINK_S/FATHER_S/IMM_S
+ * subcommands, set the index to prevent further data
+ * subcommands.
+ */
+ if (rc > 0)
+ pdesc->mem_s_ix = rc;
+
+ return rc;
+}
+
+/**
+ * @INTERNAL
+ *
+ * Decode packet header and calculate protocol header offsets
+ *
+ * The protocol information and layer offset is derived
+ * from the results if decoding done by the PKI,
+ * and the appropriate PKO fields are filled.
+ *
+ * The function assumes the headers have not been modified
+ * since converted from WQE, and does not (yet) implement
+ * software-based decoding to handle modified or originated
+ * packets correctly.
+ *
+ * @note
+ * Need to add simple accessors to read the decoded protocol fields.
+ */
+static int cvmx_pko3_pdesc_hdr_offsets(cvmx_pko3_pdesc_t *pdesc)
+{
+ cvmx_pko_send_hdr_t *hdr_s;
+
+ if (pdesc->hdr_offsets)
+ return 0;
+
+ if (!pdesc->pki_word4_present)
+ return -EINVAL;
+
+ hdr_s = (void *)&pdesc->word[0];
+ pdesc->hdr_s = hdr_s;
+
+ /* Match IPv5/IPv6 protocols with/without options */
+ if ((pdesc->pki_word2.lc_hdr_type & 0x1c) == CVMX_PKI_LTYPE_E_IP4) {
+ hdr_s->s.l3ptr = pdesc->pki_word4.ptr_layer_c;
+
+ /* Match TCP/UDP/SCTP group */
+ if ((pdesc->pki_word2.lf_hdr_type & 0x18) ==
+ CVMX_PKI_LTYPE_E_TCP)
+ hdr_s->s.l4ptr = pdesc->pki_word4.ptr_layer_f;
+
+ if (pdesc->pki_word2.lf_hdr_type == CVMX_PKI_LTYPE_E_UDP)
+ pdesc->ckl4_alg = CKL4ALG_UDP;
+ if (pdesc->pki_word2.lf_hdr_type == CVMX_PKI_LTYPE_E_TCP)
+ pdesc->ckl4_alg = CKL4ALG_TCP;
+ if (pdesc->pki_word2.lf_hdr_type == CVMX_PKI_LTYPE_E_SCTP)
+ pdesc->ckl4_alg = CKL4ALG_SCTP;
+ }
+ /* May need to add logic for ARP, IPfrag packets here */
+
+ pdesc->hdr_offsets = 1; /* make sure its done once */
+ return 0;
+}
+
+/*
+ * @INTERNAL
+ *
+ * memcpy() a reverse endian memory region.
+ * where both the source and destination are the reverse endianness
+ * with respect to native byte order.
+ */
+static void memcpy_swap(void *dst, const void *src, unsigned int bytes)
+{
+ u8 *d = dst;
+ const u8 *s = src;
+ unsigned int i;
+ const unsigned int swizzle = 0x7; /* 64-bit invariant endianness */
+
+ for (i = 0; i < bytes; i++)
+ d[i ^ swizzle] = s[i ^ swizzle];
+}
+
+/*
+ * @INTERNAL
+ *
+ * memcpy() with swizzling, from reverse endianness to native byte order.
+ */
+static void memcpy_from_swap(void *dst, const void *src, unsigned int bytes)
+{
+ u8 *d = dst;
+ const u8 *s = src;
+ unsigned int i;
+ const unsigned int swizzle = 0x7; /* 64-bit invariant endianness */
+
+ for (i = 0; i < bytes; i++)
+ d[i] = s[i ^ swizzle];
+}
+
+/*
+ * @INTERNAL
+ *
+ * memcpy() with swizzling, from native byte order to the reverse endianness.
+ */
+static void memcpy_to_swap(void *dst, const void *src, unsigned int bytes)
+{
+ u8 *d = dst;
+ const u8 *s = src;
+ unsigned int i;
+ const unsigned int swizzle = 0x7; /* 64-bit invariant endianness */
+
+ for (i = 0; i < bytes; i++)
+ d[i ^ swizzle] = s[i];
+}
+
+/**
+ * Prepend a data segment to the packet descriptor
+ *
+ * Useful for pushing additional headers
+ *
+ * The initial implementation is confined by the size of the
+ * "headroom" in the first packet buffer attached to the descriptor.
+ * Future version may prepend additional buffers when this head room
+ * is insufficient, but currently will return -1 when headrom is
+ * insufficient.
+ *
+ * On success, the function returns the remaining headroom in the buffer.
+ *
+ */
+int cvmx_pko3_pdesc_hdr_push(cvmx_pko3_pdesc_t *pdesc, const void *p_data,
+ u8 data_bytes, uint8_t layer)
+{
+ cvmx_pko_send_hdr_t *hdr_s;
+ cvmx_pko_buf_ptr_t *gather_s;
+ short headroom;
+ void *p; /* old data location */
+ void *q; /* new data location */
+ bool endian_swap;
+
+ headroom = pdesc->headroom;
+
+ if ((short)data_bytes > headroom)
+ return -ENOSPC;
+
+ hdr_s = (void *)&pdesc->word[0];
+ endian_swap = (hdr_s->s.le != __native_le);
+
+ /* Get GATTHER_S/LINK_S subcommand location */
+ if (cvmx_likely(!pdesc->jump_buf))
+ /* Without JB, first data buf is in 3rd command word */
+ gather_s = (void *)&pdesc->word[2];
+ else
+ /* With JB, its first word is the first buffer */
+ gather_s = (void *)pdesc->jump_buf;
+
+ /* Verify the subcommand is of the expected type */
+ if (cvmx_unlikely(gather_s->s.subdc3 != CVMX_PKO_SENDSUBDC_LINK &&
+ gather_s->s.subdc3 != CVMX_PKO_SENDSUBDC_GATHER))
+ return -EINVAL;
+
+ /* adjust address and size values */
+ p = cvmx_phys_to_ptr(gather_s->s.addr);
+ q = p - data_bytes;
+ gather_s->s.addr -= data_bytes;
+ gather_s->s.size += data_bytes;
+ hdr_s->s.total += data_bytes;
+ headroom -= data_bytes;
+
+ /* Move link pointer if the descriptor is SEND_LINK_S */
+ if (gather_s->s.subdc3 == CVMX_PKO_SENDSUBDC_LINK) {
+ if (cvmx_likely(!endian_swap))
+ memcpy(q - 8, p - 8, 8);
+ else
+ memcpy_swap(q - 8, p - 8, 8);
+ }
+
+ if (cvmx_likely(!endian_swap))
+ memcpy(q, p_data, data_bytes);
+ else
+ memcpy_to_swap(q, p_data, data_bytes);
+
+ pdesc->headroom = headroom;
+
+ /* Adjust higher level protocol header offset */
+ cvmx_pko3_pdesc_hdr_offsets(pdesc);
+ if (layer <= 4)
+ pdesc->hdr_s->s.l4ptr += data_bytes;
+
+ if (layer <= 3)
+ pdesc->hdr_s->s.l3ptr += data_bytes;
+
+ if (layer >= 3) {
+ /* Set CKL3 only for IPv4 */
+ if ((pdesc->pki_word2.lc_hdr_type & 0x1e) ==
+ CVMX_PKI_LTYPE_E_IP4)
+ hdr_s->s.ckl3 = 1;
+ hdr_s->s.ckl4 = pdesc->ckl4_alg;
+ }
+
+ return headroom;
+}
+
+/**
+ * Remove some bytes from start of packet
+ *
+ * Useful for popping a header from a packet.
+ * It only needs to find the first segment, and adjust its address,
+ * as well as segment and total sizes.
+ *
+ * Returns new packet size, or -1 if the trimmed size exceeds the
+ * size of the first data segment.
+ */
+int cvmx_pko3_pdesc_hdr_pop(cvmx_pko3_pdesc_t *pdesc, void *hdr_buf,
+ unsigned int num_bytes)
+{
+ cvmx_pko_send_hdr_t *hdr_s;
+ cvmx_pko_buf_ptr_t *gather_s;
+ short headroom;
+ void *p;
+ void *q;
+ bool endian_swap;
+
+ headroom = pdesc->headroom;
+
+ hdr_s = (void *)&pdesc->word[0];
+ endian_swap = (hdr_s->s.le != __native_le);
+
+ if (hdr_s->s.total < num_bytes)
+ return -ENOSPC;
+
+ /* Get GATTHER_S/LINK_S subcommand location */
+ if (cvmx_likely(!pdesc->jump_buf))
+ /* Without JB, first data buf is in 3rd command word */
+ gather_s = (void *)&pdesc->word[2];
+ else
+ /* With JB, its first word is the first buffer */
+ gather_s = (void *)pdesc->jump_buf;
+
+ /* Verify the subcommand is of the expected type */
+ if (cvmx_unlikely(gather_s->s.subdc3 != CVMX_PKO_SENDSUBDC_LINK &&
+ gather_s->s.subdc3 != CVMX_PKO_SENDSUBDC_GATHER))
+ return -EINVAL;
+
+ /* Can't trim more than the content of the first buffer */
+ if (gather_s->s.size < num_bytes)
+ return -ENOMEM;
+
+ /* adjust address and size values */
+ p = cvmx_phys_to_ptr(gather_s->s.addr);
+ q = p + num_bytes;
+ gather_s->s.addr += num_bytes;
+ gather_s->s.size -= num_bytes;
+ hdr_s->s.total -= num_bytes;
+ headroom += num_bytes;
+
+ if (hdr_buf) {
+ /* Retrieve popped header to user buffer */
+ if (cvmx_likely(!endian_swap))
+ memcpy(hdr_buf, p, num_bytes);
+ else
+ memcpy_from_swap(hdr_buf, p, num_bytes);
+ }
+
+ /* Move link pointer if the descriptor is SEND_LINK_S */
+ if (gather_s->s.subdc3 == CVMX_PKO_SENDSUBDC_LINK) {
+ if (cvmx_likely(!endian_swap))
+ memcpy(q - 8, p - 8, 8);
+ else
+ memcpy_swap(q - 8, p - 8, 8);
+ }
+
+ pdesc->headroom = headroom;
+
+ /* Adjust higher level protocol header offset */
+ cvmx_pko3_pdesc_hdr_offsets(pdesc);
+ if (num_bytes < pdesc->hdr_s->s.l3ptr) {
+ pdesc->hdr_s->s.l3ptr -= num_bytes;
+ pdesc->hdr_s->s.l4ptr -= num_bytes;
+ } else if (num_bytes < pdesc->hdr_s->s.l4ptr) {
+ pdesc->hdr_s->s.l3ptr = 0;
+ pdesc->hdr_s->s.l4ptr -= num_bytes;
+ } else {
+ pdesc->hdr_s->s.l3ptr = 0;
+ pdesc->hdr_s->s.l4ptr = 0;
+ hdr_s->s.ckl4 = CKL4ALG_NONE;
+ }
+
+ return hdr_s->s.total;
+}
+
+/**
+ * Peek into some header field of a packet
+ *
+ * Will return a number of bytes of packet header data at an arbitrary offset
+ * which must reside within the first packet data buffer.
+ *
+ */
+int cvmx_pko3_pdesc_hdr_peek(cvmx_pko3_pdesc_t *pdesc, void *hdr_buf,
+ unsigned int num_bytes, unsigned int offset)
+{
+ cvmx_pko_send_hdr_t *hdr_s;
+ cvmx_pko_buf_ptr_t *gather_s;
+ void *p;
+ bool endian_swap;
+
+ hdr_s = (void *)&pdesc->word[0];
+ endian_swap = (hdr_s->s.le != __native_le);
+
+ if (hdr_s->s.total < (num_bytes + offset))
+ return -ENOSPC;
+
+ /* Get GATTHER_S/LINK_S subcommand location */
+ if (cvmx_likely(!pdesc->jump_buf))
+ /* Without JB, first data buf is in 3rd command word */
+ gather_s = (void *)&pdesc->word[2];
+ else
+ /* With JB, its first word is the first buffer */
+ gather_s = (void *)pdesc->jump_buf;
+
+ /* Verify the subcommand is of the expected type */
+ if (cvmx_unlikely(gather_s->s.subdc3 != CVMX_PKO_SENDSUBDC_LINK &&
+ gather_s->s.subdc3 != CVMX_PKO_SENDSUBDC_GATHER))
+ return -EINVAL;
+
+ /* Can't peek more than the content of the first buffer */
+ if (gather_s->s.size <= offset)
+ return -ENOMEM;
+ if ((gather_s->s.size - offset) < num_bytes)
+ num_bytes = gather_s->s.size - offset;
+
+ /* adjust address */
+ p = cvmx_phys_to_ptr(gather_s->s.addr) + offset;
+
+ if (!hdr_buf)
+ return -EINVAL;
+
+ /* Copy requested bytes */
+ if (cvmx_likely(!endian_swap))
+ memcpy(hdr_buf, p, num_bytes);
+ else
+ memcpy_from_swap(hdr_buf, p, num_bytes);
+
+ return num_bytes;
+}
+
+/**
+ * Set the packet descriptor automatic-free attribute
+ *
+ * Override the 'free_bufs' attribute that was set during
+ * packet descriptor creation, or by an earlier call to
+ * this function.
+ * Setting the 'buf_free" attribute to 'true' will cause
+ * the PKO3 to free all buffers associated with this packet
+ * descriptor to be released upon transmission complete.
+ * Setting this attribute to 'false' allows e.g. using the
+ * same descriptor to transmit a packet out of several ports
+ * with a minimum overhead.
+ */
+void cvmx_pko3_pdesc_set_free(cvmx_pko3_pdesc_t *pdesc, bool free_bufs)
+{
+ cvmx_pko_send_hdr_t *hdr_s;
+ cvmx_pko_buf_ptr_t *jump_s;
+
+ hdr_s = (void *)&pdesc->word[0];
+ hdr_s->s.df = !free_bufs;
+
+ if (cvmx_likely(!pdesc->jump_buf))
+ return;
+ jump_s = (void *)&pdesc->word[2];
+ jump_s->s.i = free_bufs; /* F=free */
+}
--
2.35.1
^ permalink raw reply related [flat|nested] 52+ messages in thread* [PATCH 36/52] mips: octeon: Add cvmx-pko3-queue.c
2022-03-30 10:06 [PATCH 00/52] mips: octeon: Add ethernet support Stefan Roese
` (30 preceding siblings ...)
2022-03-30 10:07 ` [PATCH 35/52] mips: octeon: Add cvmx-pko3.c Stefan Roese
@ 2022-03-30 10:07 ` Stefan Roese
2022-03-30 10:07 ` [PATCH 37/52] mips: octeon: Add cvmx-pko3-compat.c Stefan Roese
` (17 subsequent siblings)
49 siblings, 0 replies; 52+ messages in thread
From: Stefan Roese @ 2022-03-30 10:07 UTC (permalink / raw)
To: u-boot; +Cc: awilliams, cchavva
From: Aaron Williams <awilliams@marvell.com>
Import cvmx-pko3-queue.c from 2013 U-Boot. It will be used by the later
added drivers to support networking on the MIPS Octeon II / III
platforms.
Signed-off-by: Aaron Williams <awilliams@marvell.com>
Signed-off-by: Stefan Roese <sr@denx.de>
---
arch/mips/mach-octeon/cvmx-pko3-queue.c | 1331 +++++++++++++++++++++++
1 file changed, 1331 insertions(+)
create mode 100644 arch/mips/mach-octeon/cvmx-pko3-queue.c
diff --git a/arch/mips/mach-octeon/cvmx-pko3-queue.c b/arch/mips/mach-octeon/cvmx-pko3-queue.c
new file mode 100644
index 000000000000..d1b48d4cb8de
--- /dev/null
+++ b/arch/mips/mach-octeon/cvmx-pko3-queue.c
@@ -0,0 +1,1331 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ */
+
+#include <errno.h>
+#include <log.h>
+#include <time.h>
+#include <linux/delay.h>
+
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-csr.h>
+#include <mach/cvmx-bootmem.h>
+#include <mach/octeon-model.h>
+#include <mach/cvmx-fuse.h>
+#include <mach/octeon-feature.h>
+#include <mach/cvmx-qlm.h>
+#include <mach/octeon_qlm.h>
+#include <mach/cvmx-pcie.h>
+#include <mach/cvmx-coremask.h>
+
+#include <mach/cvmx-agl-defs.h>
+#include <mach/cvmx-bgxx-defs.h>
+#include <mach/cvmx-ciu-defs.h>
+#include <mach/cvmx-gmxx-defs.h>
+#include <mach/cvmx-gserx-defs.h>
+#include <mach/cvmx-ilk-defs.h>
+#include <mach/cvmx-ipd-defs.h>
+#include <mach/cvmx-pcsx-defs.h>
+#include <mach/cvmx-pcsxx-defs.h>
+#include <mach/cvmx-pki-defs.h>
+#include <mach/cvmx-pko-defs.h>
+#include <mach/cvmx-xcv-defs.h>
+
+#include <mach/cvmx-hwpko.h>
+#include <mach/cvmx-ilk.h>
+#include <mach/cvmx-pki.h>
+#include <mach/cvmx-pko3.h>
+#include <mach/cvmx-pko3-queue.h>
+#include <mach/cvmx-pko3-resources.h>
+
+#include <mach/cvmx-helper.h>
+#include <mach/cvmx-helper-board.h>
+#include <mach/cvmx-helper-cfg.h>
+
+#include <mach/cvmx-helper-bgx.h>
+#include <mach/cvmx-helper-cfg.h>
+#include <mach/cvmx-helper-util.h>
+#include <mach/cvmx-helper-pki.h>
+
+/* Smalles Round-Robin quantum to use +1 */
+#define CVMX_PKO3_RR_QUANTUM_MIN 0x10
+
+static int debug; /* 1 for basic, 2 for detailed trace */
+
+struct cvmx_pko3_dq {
+ unsigned dq_count : 6; /* Number of descriptor queues */
+ unsigned dq_base : 10; /* Descriptor queue start number */
+#define CVMX_PKO3_SWIZZLE_IPD 0x0
+};
+
+/*
+ * @INTERNAL
+ * Descriptor Queue to IPD port mapping table.
+ *
+ * This pointer is per-core, contains the virtual address
+ * of a global named block which has 2^12 entries per each
+ * possible node.
+ */
+struct cvmx_pko3_dq *__cvmx_pko3_dq_table;
+
+int cvmx_pko3_get_queue_base(int ipd_port)
+{
+ struct cvmx_pko3_dq *dq_table;
+ int ret = -1;
+ unsigned int i;
+ struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(ipd_port);
+
+ /* get per-node table */
+ if (cvmx_unlikely(!__cvmx_pko3_dq_table))
+ __cvmx_pko3_dq_table_setup();
+
+ i = CVMX_PKO3_SWIZZLE_IPD ^ xp.port;
+
+ /* get per-node table */
+ dq_table = __cvmx_pko3_dq_table + CVMX_PKO3_IPD_NUM_MAX * xp.node;
+
+ if (cvmx_likely(dq_table[i].dq_count > 0))
+ ret = xp.node << 10 | dq_table[i].dq_base;
+ else if (debug)
+ cvmx_printf("ERROR: %s: no queues for ipd_port=%#x\n", __func__,
+ ipd_port);
+
+ return ret;
+}
+
+int cvmx_pko3_get_queue_num(int ipd_port)
+{
+ struct cvmx_pko3_dq *dq_table;
+ int ret = -1;
+ unsigned int i;
+ struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(ipd_port);
+
+ /* get per-node table */
+ if (cvmx_unlikely(!__cvmx_pko3_dq_table))
+ __cvmx_pko3_dq_table_setup();
+
+ i = CVMX_PKO3_SWIZZLE_IPD ^ xp.port;
+
+ /* get per-node table */
+ dq_table = __cvmx_pko3_dq_table + CVMX_PKO3_IPD_NUM_MAX * xp.node;
+
+ if (cvmx_likely(dq_table[i].dq_count > 0))
+ ret = dq_table[i].dq_count;
+ else if (debug)
+ debug("ERROR: %s: no queues for ipd_port=%#x\n", __func__,
+ ipd_port);
+
+ return ret;
+}
+
+/**
+ * Get L1/Port Queue number assigned to interface port.
+ *
+ * @param xiface is interface number.
+ * @param index is port index.
+ */
+int cvmx_pko3_get_port_queue(int xiface, int index)
+{
+ int queue;
+ cvmx_pko_l1_sqx_topology_t qtop;
+ int mac = __cvmx_pko3_get_mac_num(xiface, index);
+ int nqueues = cvmx_pko3_num_level_queues(CVMX_PKO_PORT_QUEUES);
+ cvmx_xiface_t xi = cvmx_helper_xiface_to_node_interface(xiface);
+
+ for (queue = 0; queue < nqueues; queue++) {
+ qtop.u64 =
+ csr_rd_node(xi.node, CVMX_PKO_L1_SQX_TOPOLOGY(queue));
+ if (qtop.s.link == mac)
+ break;
+ }
+ if (queue >= nqueues)
+ return -1;
+ return queue;
+}
+
+/**
+ * @INTERNAL
+ *
+ * Initialize port/dq table contents
+ */
+static void __cvmx_pko3_dq_table_init(void *ptr)
+{
+ unsigned int size = sizeof(struct cvmx_pko3_dq) *
+ CVMX_PKO3_IPD_NUM_MAX * CVMX_MAX_NODES;
+
+ memset(ptr, 0, size);
+}
+
+/**
+ * @INTERNAL
+ *
+ * Find or allocate global port/dq map table
+ * which is a named table, contains entries for
+ * all possible OCI nodes.
+ *
+ * The table global pointer is stored in core-local variable
+ * so that every core will call this function once, on first use.
+ */
+int __cvmx_pko3_dq_table_setup(void)
+{
+ void *ptr;
+
+ ptr = cvmx_bootmem_alloc_named_range_once(
+ /* size */
+ sizeof(struct cvmx_pko3_dq) * CVMX_PKO3_IPD_NUM_MAX *
+ CVMX_MAX_NODES,
+ /* min_addr, max_addr, align */
+ 0ull, 0ull, sizeof(struct cvmx_pko3_dq),
+ /* name */
+ "cvmx_pko3_global_dq_table", __cvmx_pko3_dq_table_init);
+
+ if (debug)
+ debug("%s: dq_table_ptr=%p\n", __func__, ptr);
+
+ if (!ptr)
+ return -1;
+
+ __cvmx_pko3_dq_table = ptr;
+ return 0;
+}
+
+/*
+ * @INTERNAL
+ * Register a range of Descriptor Queues with an interface port
+ *
+ * This function populates the DQ-to-IPD translation table
+ * used by the application to retrieve the DQ range (typically ordered
+ * by priority) for a given IPD-port, which is either a physical port,
+ * or a channel on a channelized interface (i.e. ILK).
+ *
+ * @param xiface is the physical interface number
+ * @param index is either a physical port on an interface
+ * or a channel of an ILK interface
+ * @param dq_base is the first Descriptor Queue number in a consecutive range
+ * @param dq_count is the number of consecutive Descriptor Queues leading
+ * the same channel or port.
+ *
+ * Only a consecutive range of Descriptor Queues can be associated with any
+ * given channel/port, and usually they are ordered from most to least
+ * in terms of scheduling priority.
+ *
+ * Note: thus function only populates the node-local translation table.
+ * NOTE: This function would be cleaner if it had a single ipd_port argument
+ *
+ * @returns 0 on success, -1 on failure.
+ */
+int __cvmx_pko3_ipd_dq_register(int xiface, int index, unsigned int dq_base,
+ unsigned int dq_count)
+{
+ struct cvmx_pko3_dq *dq_table;
+ int ipd_port;
+ unsigned int i;
+ struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+ struct cvmx_xport xp;
+
+ if (__cvmx_helper_xiface_is_null(xiface)) {
+ ipd_port = cvmx_helper_node_to_ipd_port(xi.node,
+ CVMX_PKO3_IPD_PORT_NULL);
+ } else {
+ int p;
+
+ p = cvmx_helper_get_ipd_port(xiface, index);
+ if (p < 0) {
+ cvmx_printf("ERROR: %s: xiface %#x has no IPD port\n",
+ __func__, xiface);
+ return -1;
+ }
+ ipd_port = p;
+ }
+
+ xp = cvmx_helper_ipd_port_to_xport(ipd_port);
+
+ i = CVMX_PKO3_SWIZZLE_IPD ^ xp.port;
+
+ /* get per-node table */
+ if (!__cvmx_pko3_dq_table)
+ __cvmx_pko3_dq_table_setup();
+
+ dq_table = __cvmx_pko3_dq_table + CVMX_PKO3_IPD_NUM_MAX * xi.node;
+
+ if (debug)
+ debug("%s: ipd_port=%#x ix=%#x dq %u cnt %u\n", __func__,
+ ipd_port, i, dq_base, dq_count);
+
+ /* Check the IPD port has not already been configured */
+ if (dq_table[i].dq_count > 0) {
+ cvmx_printf("%s: ERROR: IPD %#x already registered\n", __func__,
+ ipd_port);
+ return -1;
+ }
+
+ /* Store DQ# range in the queue lookup table */
+ dq_table[i].dq_base = dq_base;
+ dq_table[i].dq_count = dq_count;
+
+ return 0;
+}
+
+/**
+ * @INTERNAL
+ *
+ * Unregister DQs associated with CHAN_E (IPD port)
+ *
+ * NOTE: This function would be cleaner if it had a single ipd_port argument
+ */
+int __cvmx_pko3_ipd_dq_unregister(int xiface, int index)
+{
+ struct cvmx_pko3_dq *dq_table;
+ int ipd_port;
+ unsigned int i;
+ struct cvmx_xport xp;
+ struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+
+ if (__cvmx_helper_xiface_is_null(xiface)) {
+ ipd_port = cvmx_helper_node_to_ipd_port(xi.node,
+ CVMX_PKO3_IPD_PORT_NULL);
+ } else {
+ int p;
+
+ p = cvmx_helper_get_ipd_port(xiface, index);
+ if (p < 0) {
+ cvmx_printf("ERROR: %s: xiface %#x has no IPD port\n",
+ __func__, xiface);
+ return -1;
+ }
+ ipd_port = p;
+ }
+
+ xp = cvmx_helper_ipd_port_to_xport(ipd_port);
+
+ i = CVMX_PKO3_SWIZZLE_IPD ^ xp.port;
+
+ /* get per-node table */
+ if (!__cvmx_pko3_dq_table)
+ __cvmx_pko3_dq_table_setup();
+
+ /* get per-node table */
+ dq_table = __cvmx_pko3_dq_table + CVMX_PKO3_IPD_NUM_MAX * xi.node;
+
+ if (dq_table[i].dq_count == 0) {
+ cvmx_printf("WARNING: %s:ipd=%#x already released\n", __func__,
+ ipd_port);
+ return -1;
+ }
+
+ if (debug)
+ debug("%s:ipd_port=%#x release dq %u cnt %u\n", __func__,
+ ipd_port, dq_table[i].dq_base, dq_table[i].dq_count);
+
+ dq_table[i].dq_count = 0;
+
+ return 0;
+}
+
+/*
+ * @INTERNAL
+ * Convert normal CHAN_E (i.e. IPD port) value to compressed channel form
+ * that is used to populate PKO_LUT.
+ *
+ * Note: This code may be model specific.
+ */
+static int cvmx_pko3_chan_2_xchan(uint16_t ipd_port)
+{
+ u16 xchan;
+ u8 off;
+ static const u8 *xchan_base;
+ static const u8 xchan_base_cn78xx[16] = {
+ /* IPD 0x000 */ 0x3c0 >> 4, /* LBK */
+ /* IPD 0x100 */ 0x380 >> 4, /* DPI */
+ /* IPD 0x200 */ 0xfff >> 4, /* not used */
+ /* IPD 0x300 */ 0xfff >> 4, /* not used */
+ /* IPD 0x400 */ 0x000 >> 4, /* ILK0 */
+ /* IPD 0x500 */ 0x100 >> 4, /* ILK1 */
+ /* IPD 0x600 */ 0xfff >> 4, /* not used */
+ /* IPD 0x700 */ 0xfff >> 4, /* not used */
+ /* IPD 0x800 */ 0x200 >> 4, /* BGX0 */
+ /* IPD 0x900 */ 0x240 >> 4, /* BGX1 */
+ /* IPD 0xa00 */ 0x280 >> 4, /* BGX2 */
+ /* IPD 0xb00 */ 0x2c0 >> 4, /* BGX3 */
+ /* IPD 0xc00 */ 0x300 >> 4, /* BGX4 */
+ /* IPD 0xd00 */ 0x340 >> 4, /* BGX5 */
+ /* IPD 0xe00 */ 0xfff >> 4, /* not used */
+ /* IPD 0xf00 */ 0xfff >> 4 /* not used */
+ };
+ static const u8 xchan_base_cn73xx[16] = {
+ /* IPD 0x000 */ 0x0c0 >> 4, /* LBK */
+ /* IPD 0x100 */ 0x100 >> 4, /* DPI */
+ /* IPD 0x200 */ 0xfff >> 4, /* not used */
+ /* IPD 0x300 */ 0xfff >> 4, /* not used */
+ /* IPD 0x400 */ 0xfff >> 4, /* not used */
+ /* IPD 0x500 */ 0xfff >> 4, /* not used */
+ /* IPD 0x600 */ 0xfff >> 4, /* not used */
+ /* IPD 0x700 */ 0xfff >> 4, /* not used */
+ /* IPD 0x800 */ 0x000 >> 4, /* BGX0 */
+ /* IPD 0x900 */ 0x040 >> 4, /* BGX1 */
+ /* IPD 0xa00 */ 0x080 >> 4, /* BGX2 */
+ /* IPD 0xb00 */ 0xfff >> 4, /* not used */
+ /* IPD 0xc00 */ 0xfff >> 4, /* not used */
+ /* IPD 0xd00 */ 0xfff >> 4, /* not used */
+ /* IPD 0xe00 */ 0xfff >> 4, /* not used */
+ /* IPD 0xf00 */ 0xfff >> 4 /* not used */
+ };
+ static const u8 xchan_base_cn75xx[16] = {
+ /* IPD 0x000 */ 0x040 >> 4, /* LBK */
+ /* IPD 0x100 */ 0x080 >> 4, /* DPI */
+ /* IPD 0x200 */ 0xeee >> 4, /* SRIO0 noop */
+ /* IPD 0x300 */ 0xfff >> 4, /* not used */
+ /* IPD 0x400 */ 0xfff >> 4, /* not used */
+ /* IPD 0x500 */ 0xfff >> 4, /* not used */
+ /* IPD 0x600 */ 0xfff >> 4, /* not used */
+ /* IPD 0x700 */ 0xfff >> 4, /* not used */
+ /* IPD 0x800 */ 0x000 >> 4, /* BGX0 */
+ /* IPD 0x900 */ 0xfff >> 4, /* not used */
+ /* IPD 0xa00 */ 0xfff >> 4, /* not used */
+ /* IPD 0xb00 */ 0xfff >> 4, /* not used */
+ /* IPD 0xc00 */ 0xfff >> 4, /* not used */
+ /* IPD 0xd00 */ 0xfff >> 4, /* not used */
+ /* IPD 0xe00 */ 0xfff >> 4, /* not used */
+ /* IPD 0xf00 */ 0xfff >> 4 /* not used */
+ };
+
+ if (OCTEON_IS_MODEL(OCTEON_CN73XX))
+ xchan_base = xchan_base_cn73xx;
+ if (OCTEON_IS_MODEL(OCTEON_CNF75XX))
+ xchan_base = xchan_base_cn75xx;
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX))
+ xchan_base = xchan_base_cn78xx;
+
+ if (!xchan_base)
+ return -1;
+
+ xchan = ipd_port >> 8;
+
+ /* ILKx, DPI has 8 bits logical channels, others just 6 */
+ if (((xchan & 0xfe) == 0x04) || xchan == 0x01)
+ off = ipd_port & 0xff;
+ else
+ off = ipd_port & 0x3f;
+
+ xchan = xchan_base[xchan & 0xf];
+
+ if (xchan == 0xff)
+ return -1; /* Invalid IPD_PORT */
+ else if (xchan == 0xee)
+ return -2; /* LUT not used */
+ else
+ return (xchan << 4) | off;
+}
+
+/*
+ * Map channel number in PKO
+ *
+ * @param node is to specify the node to which this configuration is applied.
+ * @param pq_num specifies the Port Queue (i.e. L1) queue number.
+ * @param l2_l3_q_num specifies L2/L3 queue number.
+ * @param channel specifies the channel number to map to the queue.
+ *
+ * The channel assignment applies to L2 or L3 Shaper Queues depending
+ * on the setting of channel credit level.
+ *
+ * @return returns none.
+ */
+void cvmx_pko3_map_channel(unsigned int node, unsigned int pq_num,
+ unsigned int l2_l3_q_num, uint16_t channel)
+{
+ union cvmx_pko_l3_l2_sqx_channel sqx_channel;
+ cvmx_pko_lutx_t lutx;
+ int xchan;
+
+ sqx_channel.u64 =
+ csr_rd_node(node, CVMX_PKO_L3_L2_SQX_CHANNEL(l2_l3_q_num));
+
+ sqx_channel.s.cc_channel = channel;
+
+ csr_wr_node(node, CVMX_PKO_L3_L2_SQX_CHANNEL(l2_l3_q_num),
+ sqx_channel.u64);
+
+ /* Convert CHAN_E into compressed channel */
+ xchan = cvmx_pko3_chan_2_xchan(channel);
+
+ if (debug)
+ debug("%s: ipd_port=%#x xchan=%#x\n", __func__, channel, xchan);
+
+ if (xchan < 0) {
+ if (xchan == -1)
+ cvmx_printf("%s: ERROR: channel %#x not recognized\n",
+ __func__, channel);
+ return;
+ }
+
+ lutx.u64 = 0;
+ lutx.s.valid = 1;
+ lutx.s.pq_idx = pq_num;
+ lutx.s.queue_number = l2_l3_q_num;
+
+ csr_wr_node(node, CVMX_PKO_LUTX(xchan), lutx.u64);
+
+ if (debug)
+ debug("%s: channel %#x (compressed=%#x) mapped L2/L3 SQ=%u, PQ=%u\n",
+ __func__, channel, xchan, l2_l3_q_num, pq_num);
+}
+
+/*
+ * @INTERNAL
+ * This function configures port queue scheduling and topology parameters
+ * in hardware.
+ *
+ * @param node is to specify the node to which this configuration is applied.
+ * @param port_queue is the port queue number to be configured.
+ * @param mac_num is the mac number of the mac that will be tied to this port_queue.
+ */
+static void cvmx_pko_configure_port_queue(int node, int port_queue, int mac_num)
+{
+ cvmx_pko_l1_sqx_topology_t pko_l1_topology;
+ cvmx_pko_l1_sqx_shape_t pko_l1_shape;
+ cvmx_pko_l1_sqx_link_t pko_l1_link;
+
+ pko_l1_topology.u64 = 0;
+ pko_l1_topology.s.link = mac_num;
+ csr_wr_node(node, CVMX_PKO_L1_SQX_TOPOLOGY(port_queue),
+ pko_l1_topology.u64);
+
+ pko_l1_shape.u64 = 0;
+ pko_l1_shape.s.link = mac_num;
+ csr_wr_node(node, CVMX_PKO_L1_SQX_SHAPE(port_queue), pko_l1_shape.u64);
+
+ pko_l1_link.u64 = 0;
+ pko_l1_link.s.link = mac_num;
+ csr_wr_node(node, CVMX_PKO_L1_SQX_LINK(port_queue), pko_l1_link.u64);
+}
+
+/*
+ * @INTERNAL
+ * This function configures level 2 queues scheduling and topology parameters
+ * in hardware.
+ *
+ * @param node is to specify the node to which this configuration is applied.
+ * @param queue is the level3 queue number to be configured.
+ * @param parent_queue is the parent queue at next level for this l3 queue.
+ * @param prio is this queue's priority in parent's scheduler.
+ * @param rr_quantum is this queue's round robin quantum value.
+ * @param child_base is the first child queue number in the static prioriy children.
+ * @param child_rr_prio is the round robin children priority.
+ */
+static void cvmx_pko_configure_l2_queue(int node, int queue, int parent_queue,
+ int prio, int rr_quantum,
+ int child_base, int child_rr_prio)
+{
+ cvmx_pko_l2_sqx_schedule_t pko_sq_sched;
+ cvmx_pko_l2_sqx_topology_t pko_child_topology;
+ cvmx_pko_l1_sqx_topology_t pko_parent_topology;
+
+ /* parent topology configuration */
+ pko_parent_topology.u64 =
+ csr_rd_node(node, CVMX_PKO_L1_SQX_TOPOLOGY(parent_queue));
+ pko_parent_topology.s.prio_anchor = child_base;
+ pko_parent_topology.s.rr_prio = child_rr_prio;
+ csr_wr_node(node, CVMX_PKO_L1_SQX_TOPOLOGY(parent_queue),
+ pko_parent_topology.u64);
+
+ if (debug > 1)
+ debug("CVMX_PKO_L1_SQX_TOPOLOGY(%u): PRIO_ANCHOR=%u PARENT=%u\n",
+ parent_queue, pko_parent_topology.s.prio_anchor,
+ pko_parent_topology.s.link);
+
+ /* scheduler configuration for this sq in the parent queue */
+ pko_sq_sched.u64 = 0;
+ pko_sq_sched.s.prio = prio;
+ pko_sq_sched.s.rr_quantum = rr_quantum;
+ csr_wr_node(node, CVMX_PKO_L2_SQX_SCHEDULE(queue), pko_sq_sched.u64);
+
+ /* child topology configuration */
+ pko_child_topology.u64 = 0;
+ pko_child_topology.s.parent = parent_queue;
+ csr_wr_node(node, CVMX_PKO_L2_SQX_TOPOLOGY(queue),
+ pko_child_topology.u64);
+}
+
+/*
+ * @INTERNAL
+ * This function configures level 3 queues scheduling and topology parameters
+ * in hardware.
+ *
+ * @param node is to specify the node to which this configuration is applied.
+ * @param queue is the level3 queue number to be configured.
+ * @param parent_queue is the parent queue at next level for this l3 queue.
+ * @param prio is this queue's priority in parent's scheduler.
+ * @param rr_quantum is this queue's round robin quantum value.
+ * @param child_base is the first child queue number in the static prioriy children.
+ * @param child_rr_prio is the round robin children priority.
+ */
+static void cvmx_pko_configure_l3_queue(int node, int queue, int parent_queue,
+ int prio, int rr_quantum,
+ int child_base, int child_rr_prio)
+{
+ cvmx_pko_l3_sqx_schedule_t pko_sq_sched;
+ cvmx_pko_l3_sqx_topology_t pko_child_topology;
+ cvmx_pko_l2_sqx_topology_t pko_parent_topology;
+
+ /* parent topology configuration */
+ pko_parent_topology.u64 =
+ csr_rd_node(node, CVMX_PKO_L2_SQX_TOPOLOGY(parent_queue));
+ pko_parent_topology.s.prio_anchor = child_base;
+ pko_parent_topology.s.rr_prio = child_rr_prio;
+ csr_wr_node(node, CVMX_PKO_L2_SQX_TOPOLOGY(parent_queue),
+ pko_parent_topology.u64);
+
+ if (debug > 1)
+ debug("CVMX_PKO_L2_SQX_TOPOLOGY(%u): PRIO_ANCHOR=%u PARENT=%u\n",
+ parent_queue, pko_parent_topology.s.prio_anchor,
+ pko_parent_topology.s.parent);
+
+ /* scheduler configuration for this sq in the parent queue */
+ pko_sq_sched.u64 = 0;
+ pko_sq_sched.s.prio = prio;
+ pko_sq_sched.s.rr_quantum = rr_quantum;
+ csr_wr_node(node, CVMX_PKO_L3_SQX_SCHEDULE(queue), pko_sq_sched.u64);
+
+ /* child topology configuration */
+ pko_child_topology.u64 = 0;
+ pko_child_topology.s.parent = parent_queue;
+ csr_wr_node(node, CVMX_PKO_L3_SQX_TOPOLOGY(queue),
+ pko_child_topology.u64);
+}
+
+/*
+ * @INTERNAL
+ * This function configures level 4 queues scheduling and topology parameters
+ * in hardware.
+ *
+ * @param node is to specify the node to which this configuration is applied.
+ * @param queue is the level4 queue number to be configured.
+ * @param parent_queue is the parent queue at next level for this l4 queue.
+ * @param prio is this queue's priority in parent's scheduler.
+ * @param rr_quantum is this queue's round robin quantum value.
+ * @param child_base is the first child queue number in the static prioriy children.
+ * @param child_rr_prio is the round robin children priority.
+ */
+static void cvmx_pko_configure_l4_queue(int node, int queue, int parent_queue,
+ int prio, int rr_quantum,
+ int child_base, int child_rr_prio)
+{
+ cvmx_pko_l4_sqx_schedule_t pko_sq_sched;
+ cvmx_pko_l4_sqx_topology_t pko_child_topology;
+ cvmx_pko_l3_sqx_topology_t pko_parent_topology;
+
+ /* parent topology configuration */
+ pko_parent_topology.u64 =
+ csr_rd_node(node, CVMX_PKO_L3_SQX_TOPOLOGY(parent_queue));
+ pko_parent_topology.s.prio_anchor = child_base;
+ pko_parent_topology.s.rr_prio = child_rr_prio;
+ csr_wr_node(node, CVMX_PKO_L3_SQX_TOPOLOGY(parent_queue),
+ pko_parent_topology.u64);
+
+ if (debug > 1)
+ debug("CVMX_PKO_L3_SQX_TOPOLOGY(%u): PRIO_ANCHOR=%u PARENT=%u\n",
+ parent_queue, pko_parent_topology.s.prio_anchor,
+ pko_parent_topology.s.parent);
+
+ /* scheduler configuration for this sq in the parent queue */
+ pko_sq_sched.u64 = 0;
+ pko_sq_sched.s.prio = prio;
+ pko_sq_sched.s.rr_quantum = rr_quantum;
+ csr_wr_node(node, CVMX_PKO_L4_SQX_SCHEDULE(queue), pko_sq_sched.u64);
+
+ /* topology configuration */
+ pko_child_topology.u64 = 0;
+ pko_child_topology.s.parent = parent_queue;
+ csr_wr_node(node, CVMX_PKO_L4_SQX_TOPOLOGY(queue),
+ pko_child_topology.u64);
+}
+
+/*
+ * @INTERNAL
+ * This function configures level 5 queues scheduling and topology parameters
+ * in hardware.
+ *
+ * @param node is to specify the node to which this configuration is applied.
+ * @param queue is the level5 queue number to be configured.
+ * @param parent_queue is the parent queue at next level for this l5 queue.
+ * @param prio is this queue's priority in parent's scheduler.
+ * @param rr_quantum is this queue's round robin quantum value.
+ * @param child_base is the first child queue number in the static prioriy children.
+ * @param child_rr_prio is the round robin children priority.
+ */
+static void cvmx_pko_configure_l5_queue(int node, int queue, int parent_queue,
+ int prio, int rr_quantum,
+ int child_base, int child_rr_prio)
+{
+ cvmx_pko_l5_sqx_schedule_t pko_sq_sched;
+ cvmx_pko_l4_sqx_topology_t pko_parent_topology;
+ cvmx_pko_l5_sqx_topology_t pko_child_topology;
+
+ /* parent topology configuration */
+ pko_parent_topology.u64 =
+ csr_rd_node(node, CVMX_PKO_L4_SQX_TOPOLOGY(parent_queue));
+ pko_parent_topology.s.prio_anchor = child_base;
+ pko_parent_topology.s.rr_prio = child_rr_prio;
+ csr_wr_node(node, CVMX_PKO_L4_SQX_TOPOLOGY(parent_queue),
+ pko_parent_topology.u64);
+
+ if (debug > 1)
+ debug("CVMX_PKO_L4_SQX_TOPOLOGY(%u): PRIO_ANCHOR=%u PARENT=%u\n",
+ parent_queue, pko_parent_topology.s.prio_anchor,
+ pko_parent_topology.s.parent);
+
+ /* scheduler configuration for this sq in the parent queue */
+ pko_sq_sched.u64 = 0;
+ pko_sq_sched.s.prio = prio;
+ pko_sq_sched.s.rr_quantum = rr_quantum;
+ csr_wr_node(node, CVMX_PKO_L5_SQX_SCHEDULE(queue), pko_sq_sched.u64);
+
+ /* topology configuration */
+ pko_child_topology.u64 = 0;
+ pko_child_topology.s.parent = parent_queue;
+ csr_wr_node(node, CVMX_PKO_L5_SQX_TOPOLOGY(queue),
+ pko_child_topology.u64);
+}
+
+/*
+ * @INTERNAL
+ * This function configures descriptor queues scheduling and topology parameters
+ * in hardware.
+ *
+ * @param node is to specify the node to which this configuration is applied.
+ * @param dq is the descriptor queue number to be configured.
+ * @param parent_queue is the parent queue at next level for this dq.
+ * @param prio is this queue's priority in parent's scheduler.
+ * @param rr_quantum is this queue's round robin quantum value.
+ * @param child_base is the first child queue number in the static prioriy children.
+ * @param child_rr_prio is the round robin children priority.
+ */
+static void cvmx_pko_configure_dq(int node, int dq, int parent_queue, int prio,
+ int rr_quantum, int child_base,
+ int child_rr_prio)
+{
+ cvmx_pko_dqx_schedule_t pko_dq_sched;
+ cvmx_pko_dqx_topology_t pko_dq_topology;
+ cvmx_pko_l5_sqx_topology_t pko_parent_topology;
+ cvmx_pko_dqx_wm_ctl_t pko_dq_wm_ctl;
+ unsigned long long parent_topology_reg;
+ char lvl;
+
+ if (debug)
+ debug("%s: dq %u parent %u child_base %u\n", __func__, dq,
+ parent_queue, child_base);
+
+ if (__cvmx_pko3_sq_lvl_max() == CVMX_PKO_L5_QUEUES) {
+ parent_topology_reg = CVMX_PKO_L5_SQX_TOPOLOGY(parent_queue);
+ lvl = 5;
+ } else if (__cvmx_pko3_sq_lvl_max() == CVMX_PKO_L3_QUEUES) {
+ parent_topology_reg = CVMX_PKO_L3_SQX_TOPOLOGY(parent_queue);
+ lvl = 3;
+ } else {
+ return;
+ }
+
+ if (debug)
+ debug("%s: parent_topology_reg=%#llx\n", __func__,
+ parent_topology_reg);
+
+ /* parent topology configuration */
+ pko_parent_topology.u64 = csr_rd_node(node, parent_topology_reg);
+ pko_parent_topology.s.prio_anchor = child_base;
+ pko_parent_topology.s.rr_prio = child_rr_prio;
+ csr_wr_node(node, parent_topology_reg, pko_parent_topology.u64);
+
+ if (debug > 1)
+ debug("CVMX_PKO_L%d_SQX_TOPOLOGY(%u): PRIO_ANCHOR=%u PARENT=%u\n",
+ lvl, parent_queue, pko_parent_topology.s.prio_anchor,
+ pko_parent_topology.s.parent);
+
+ /* scheduler configuration for this dq in the parent queue */
+ pko_dq_sched.u64 = 0;
+ pko_dq_sched.s.prio = prio;
+ pko_dq_sched.s.rr_quantum = rr_quantum;
+ csr_wr_node(node, CVMX_PKO_DQX_SCHEDULE(dq), pko_dq_sched.u64);
+
+ /* topology configuration */
+ pko_dq_topology.u64 = 0;
+ pko_dq_topology.s.parent = parent_queue;
+ csr_wr_node(node, CVMX_PKO_DQX_TOPOLOGY(dq), pko_dq_topology.u64);
+
+ /* configure for counting packets, not bytes at this level */
+ pko_dq_wm_ctl.u64 = 0;
+ pko_dq_wm_ctl.s.kind = 1;
+ pko_dq_wm_ctl.s.enable = 0;
+ csr_wr_node(node, CVMX_PKO_DQX_WM_CTL(dq), pko_dq_wm_ctl.u64);
+
+ if (debug > 1) {
+ pko_dq_sched.u64 = csr_rd_node(node, CVMX_PKO_DQX_SCHEDULE(dq));
+ pko_dq_topology.u64 =
+ csr_rd_node(node, CVMX_PKO_DQX_TOPOLOGY(dq));
+ debug("CVMX_PKO_DQX_TOPOLOGY(%u)PARENT=%u CVMX_PKO_DQX_SCHEDULE(%u) PRIO=%u Q=%u\n",
+ dq, pko_dq_topology.s.parent, dq, pko_dq_sched.s.prio,
+ pko_dq_sched.s.rr_quantum);
+ }
+}
+
+/*
+ * @INTERNAL
+ * The following structure selects the Scheduling Queue configuration
+ * routine for each of the supported levels.
+ * The initial content of the table will be setup in accordance
+ * to the specific SoC model and its implemented resources
+ */
+struct pko3_cfg_tab_s {
+ /* function pointer for to configure the given level, last=DQ */
+ struct {
+ u8 parent_level;
+ void (*cfg_sq_func)(int node, int queue, int parent_queue,
+ int prio, int rr_quantum, int child_base,
+ int child_rr_prio);
+ //XXX for debugging exagerated size
+ } lvl[256];
+};
+
+static const struct pko3_cfg_tab_s pko3_cn78xx_cfg = {
+ { [CVMX_PKO_L2_QUEUES] = { CVMX_PKO_PORT_QUEUES,
+ cvmx_pko_configure_l2_queue },
+ [CVMX_PKO_L3_QUEUES] = { CVMX_PKO_L2_QUEUES,
+ cvmx_pko_configure_l3_queue },
+ [CVMX_PKO_L4_QUEUES] = { CVMX_PKO_L3_QUEUES,
+ cvmx_pko_configure_l4_queue },
+ [CVMX_PKO_L5_QUEUES] = { CVMX_PKO_L4_QUEUES,
+ cvmx_pko_configure_l5_queue },
+ [CVMX_PKO_DESCR_QUEUES] = { CVMX_PKO_L5_QUEUES,
+ cvmx_pko_configure_dq } }
+};
+
+static const struct pko3_cfg_tab_s pko3_cn73xx_cfg = {
+ { [CVMX_PKO_L2_QUEUES] = { CVMX_PKO_PORT_QUEUES,
+ cvmx_pko_configure_l2_queue },
+ [CVMX_PKO_L3_QUEUES] = { CVMX_PKO_L2_QUEUES,
+ cvmx_pko_configure_l3_queue },
+ [CVMX_PKO_DESCR_QUEUES] = { CVMX_PKO_L3_QUEUES,
+ cvmx_pko_configure_dq } }
+};
+
+/*
+ * Configure Port Queue and its children Scheduler Queue
+ *
+ * Port Queues (a.k.a L1) are assigned 1-to-1 to MACs.
+ * L2 Scheduler Queues are used for specifying channels, and thus there
+ * could be multiple L2 SQs attached to a single L1 PQ, either in a
+ * fair round-robin scheduling, or with static and/or round-robin priorities.
+ *
+ * @param node on which to operate
+ * @param mac_num is the LMAC number to that is associated with the Port Queue,
+ * @param pq_num is the number of the L1 PQ attached to the MAC
+ *
+ * @returns 0 on success, -1 on failure.
+ */
+int cvmx_pko3_pq_config(unsigned int node, unsigned int mac_num,
+ unsigned int pq_num)
+{
+ char b1[10];
+
+ if (debug)
+ debug("%s: MAC%u -> %s\n", __func__, mac_num,
+ __cvmx_pko3_sq_str(b1, CVMX_PKO_PORT_QUEUES, pq_num));
+
+ cvmx_pko_configure_port_queue(node, pq_num, mac_num);
+
+ return 0;
+}
+
+/*
+ * Configure L3 through L5 Scheduler Queues and Descriptor Queues
+ *
+ * The Scheduler Queues in Levels 3 to 5 and Descriptor Queues are
+ * configured one-to-one or many-to-one to a single parent Scheduler
+ * Queues. The level of the parent SQ is specified in an argument,
+ * as well as the number of childer to attach to the specific parent.
+ * The children can have fair round-robin or priority-based scheduling
+ * when multiple children are assigned a single parent.
+ *
+ * @param node on which to operate
+ * @param child_level is the level of the child queue
+ * @param parent_queue is the number of the parent Scheduler Queue
+ * @param child_base is the number of the first child SQ or DQ to assign to
+ * @param child_count is the number of consecutive children to assign
+ * @param stat_prio_count is the priority setting for the children L2 SQs
+ *
+ * If <stat_prio_count> is -1, the Ln children will have equal Round-Robin
+ * relationship with eachother. If <stat_prio_count> is 0, all Ln children
+ * will be arranged in Weighted-Round-Robin, with the first having the most
+ * precedence. If <stat_prio_count> is between 1 and 8, it indicates how
+ * many children will have static priority settings (with the first having
+ * the most precedence), with the remaining Ln children having WRR scheduling.
+ *
+ * @returns 0 on success, -1 on failure.
+ *
+ * Note: this function supports the configuration of node-local unit.
+ */
+int cvmx_pko3_sq_config_children(unsigned int node,
+ enum cvmx_pko3_level_e child_level,
+ unsigned int parent_queue,
+ unsigned int child_base,
+ unsigned int child_count, int stat_prio_count)
+{
+ enum cvmx_pko3_level_e parent_level;
+ unsigned int num_elem = 0;
+ unsigned int rr_quantum, rr_count;
+ unsigned int child, prio, rr_prio;
+ const struct pko3_cfg_tab_s *cfg_tbl = NULL;
+ char b1[10], b2[10];
+
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
+ num_elem = NUM_ELEMENTS(pko3_cn78xx_cfg.lvl);
+ cfg_tbl = &pko3_cn78xx_cfg;
+ }
+ if (OCTEON_IS_MODEL(OCTEON_CN73XX) || OCTEON_IS_MODEL(OCTEON_CNF75XX)) {
+ num_elem = NUM_ELEMENTS(pko3_cn73xx_cfg.lvl);
+ cfg_tbl = &pko3_cn73xx_cfg;
+ }
+
+ if (!cfg_tbl || child_level >= num_elem) {
+ cvmx_printf("ERROR: %s: model or level %#x invalid\n", __func__,
+ child_level);
+ return -1;
+ }
+
+ parent_level = cfg_tbl->lvl[child_level].parent_level;
+
+ if (!cfg_tbl->lvl[child_level].cfg_sq_func ||
+ cfg_tbl->lvl[child_level].parent_level == 0) {
+ cvmx_printf("ERROR: %s: queue level %#x invalid\n", __func__,
+ child_level);
+ return -1;
+ }
+
+ /* First static priority is 0 - top precedence */
+ prio = 0;
+
+ if (stat_prio_count > (signed int)child_count)
+ stat_prio_count = child_count;
+
+ /* Valid PRIO field is 0..9, limit maximum static priorities */
+ if (stat_prio_count > 9)
+ stat_prio_count = 9;
+
+ /* Special case of a single child */
+ if (child_count == 1) {
+ rr_count = 0;
+ rr_prio = 0xF;
+ /* Special case for Fair-RR */
+ } else if (stat_prio_count < 0) {
+ rr_count = child_count;
+ rr_prio = 0;
+ } else {
+ rr_count = child_count - stat_prio_count;
+ rr_prio = stat_prio_count;
+ }
+
+ /* Compute highest RR_QUANTUM */
+ if (stat_prio_count > 0)
+ rr_quantum = CVMX_PKO3_RR_QUANTUM_MIN * rr_count;
+ else
+ rr_quantum = CVMX_PKO3_RR_QUANTUM_MIN;
+
+ if (debug)
+ debug("%s: Parent %s child_base %u rr_pri %u\n", __func__,
+ __cvmx_pko3_sq_str(b1, parent_level, parent_queue),
+ child_base, rr_prio);
+
+ /* Parent is configured with child */
+
+ for (child = child_base; child < (child_base + child_count); child++) {
+ if (debug)
+ debug("%s: Child %s of %s prio %u rr_quantum %#x\n",
+ __func__,
+ __cvmx_pko3_sq_str(b1, child_level, child),
+ __cvmx_pko3_sq_str(b2, parent_level,
+ parent_queue),
+ prio, rr_quantum);
+
+ cfg_tbl->lvl[child_level].cfg_sq_func(node, child, parent_queue,
+ prio, rr_quantum,
+ child_base, rr_prio);
+
+ if (prio < rr_prio)
+ prio++;
+ else if (stat_prio_count > 0)
+ rr_quantum -= CVMX_PKO3_RR_QUANTUM_MIN;
+ } /* for child */
+
+ return 0;
+}
+
+/**
+ * Convert bitrate and burst size to SQx_xIR register values
+ *
+ * @INTERNAL
+ *
+ * Common function to convert bit-rate (ie kilo-bits-per-sec)
+ * and maximum burst (in bytes) values to PKO shaper register
+ * format, that is a short-float type, with divisor.
+ *
+ * @param tclk is the time-wheel clock for the specific shaper
+ * @param reg is a pointer to a register structure
+ * @param rate_kbips is the requested bit rate in kilobits/sec
+ * @param burst_bytes is the size of maximum burst in bytes
+ *
+ * @return A negative number means the transfer rate could
+ * not be set within acceptable tolerance, and the actual
+ * error in PPM is the negative of the returned value.
+ * A positive value indicates that the bit rate was set
+ * within acceptable tolerance, but the burst rate had an
+ * error, which is returned in PPM.
+ * A value of 0 means both measures were set within tolerance.
+ *
+ * Note that the bust error could be as a result of this function
+ * enforcing the minimum MTU as the minimum burst size allowed.
+ *
+ */
+static int cvmx_pko3_shaper_rate_compute(unsigned long tclk,
+ cvmx_pko_l1_sqx_cir_t *reg,
+ unsigned long rate_kbips,
+ unsigned int burst_bytes)
+{
+ const unsigned int max_exp = 12; /* maximum exponent */
+ const unsigned int tock_bytes_exp = 0; /* tock rate in bytes */
+ long long burst_v, rate_v;
+ unsigned long long rate_tocks, burst_tocks;
+ unsigned long min_burst;
+ unsigned int div_exp, mant, exp;
+ unsigned long long tmp, fmax;
+
+ if (debug)
+ debug("%s: tclk=%lu, rate=%lu kbps, burst=%u bytes\n", __func__,
+ tclk, rate_kbips, burst_bytes);
+
+ /* Convert API args into tocks: PSE native units */
+ tmp = (1 << (3 + tock_bytes_exp)) - 1;
+ tmp += rate_kbips;
+ rate_tocks = (1000ULL * tmp) >> (3 + tock_bytes_exp);
+ tmp = (1 << tock_bytes_exp) - 1;
+ burst_tocks = (burst_bytes + tmp) >> tock_bytes_exp;
+
+ /* Compute largest short-float that fits in register fields */
+ fmax = CVMX_SHOFT_TO_U64((1 << CVMX_SHOFT_MANT_BITS) - 1, max_exp);
+
+ /* Find the biggest divider that has the short float fit */
+ for (div_exp = 0; div_exp <= max_exp; div_exp++) {
+ tmp = ((rate_tocks << div_exp) + (tclk / 2)) / tclk;
+ if (tmp > fmax) {
+ if (div_exp > 0)
+ div_exp--;
+ break;
+ }
+ }
+ /* Make sure divider, rate are within valid range */
+ if (div_exp > max_exp) {
+ /* Minimum reached */
+ div_exp = max_exp;
+ } else if (div_exp == 0) {
+ /* Maximum reached */
+ if ((rate_tocks / tclk) > fmax)
+ rate_tocks = fmax * tclk;
+ }
+ /* Store common divider */
+ reg->s.rate_divider_exponent = div_exp;
+
+ /* Burst register is the maximum accumulated credit count
+ * in bytes, which must not be less then the MTU, and
+ * should not be less than RATE/Tclk
+ */
+
+ /* Find the minimum burst size needed for rate (burst ~ 4x rate) */
+ min_burst = (rate_tocks << (div_exp + 4)) / tclk;
+
+ /* Apply the minimum */
+ if (burst_tocks < min_burst)
+ burst_tocks = min_burst;
+
+ /* Calculate the rate short float */
+ tmp = (rate_tocks << (div_exp + 8)) / tclk;
+ CVMX_SHOFT_FROM_U64(tmp, mant, exp);
+ reg->s.rate_mantissa = mant;
+ reg->s.rate_exponent = exp - 8;
+
+ /* Calculate the BURST short float */
+ tmp = (burst_tocks << 8);
+ CVMX_SHOFT_FROM_U64(tmp, mant, exp);
+ reg->s.burst_mantissa = mant;
+ reg->s.burst_exponent = exp - 8 - 1;
+
+ if (debug)
+ debug("%s: RATE=%llu BURST=%llu DIV_EXP=%d\n", __func__,
+ CVMX_SHOFT_TO_U64(reg->s.rate_mantissa,
+ reg->s.rate_exponent),
+ CVMX_SHOFT_TO_U64(reg->s.burst_mantissa,
+ (reg->s.burst_exponent + 1)),
+ div_exp);
+
+ /* Validate the resulting rate */
+ rate_v = CVMX_SHOFT_TO_U64(reg->s.rate_mantissa, reg->s.rate_exponent) *
+ tclk;
+ /* Convert to kbips for comaring with argument */
+ rate_v = (rate_v << (3 + tock_bytes_exp)) / 1000ULL;
+ /* Finally apply divider for best accuracy */
+ rate_v >>= div_exp;
+
+ burst_v = CVMX_SHOFT_TO_U64(reg->s.burst_mantissa,
+ (reg->s.burst_exponent + 1));
+ /* Convert in additional bytes as in argument */
+ burst_v = burst_v << (tock_bytes_exp);
+
+ if (debug)
+ debug("%s: result rate=%'llu kbips burst=%llu bytes\n",
+ __func__, rate_v, burst_v);
+
+ /* Compute error in parts-per-million */
+ rate_v = abs(rate_v - rate_kbips);
+ burst_v = abs(burst_v - burst_bytes);
+
+ if (debug)
+ debug("%s: diff rate=%llu burst=%llu ppm\n", __func__, rate_v,
+ burst_v);
+
+ rate_v = (rate_v * 1000000ULL) / rate_kbips;
+ if (burst_bytes > 0)
+ burst_v = (burst_v * 1000000ULL) / burst_bytes;
+ else
+ burst_v = 0;
+
+ if (debug)
+ debug("%s: error rate=%llu burst=%llu ppm\n", __func__, rate_v,
+ burst_v);
+
+ /* Allow 1% error for CIR/PIR, and 5% for BURST */
+ if (rate_v > 10000)
+ return -rate_v;
+ if (burst_v > 50000)
+ return burst_v;
+
+ return 0;
+}
+
+/**
+ * Configure per-port CIR rate limit parameters
+ *
+ * This function configures rate limit at the L1/PQ level,
+ * i.e. for an entire MAC or physical port.
+ *
+ * @param node The OCI node where the target port is located
+ * @param pq_num The L1/PQ queue number for this setting
+ * @param rate_kbips The desired throughput in kilo-bits-per-second
+ * @param burst_bytes The size of a burst in bytes, at least MTU
+ *
+ * @return Returns zero if both settings applied within allowed tolerance,
+ * otherwise the error is returned in parts-per-million.
+ * 'rate_bps" error is e negative number, otherwise 'birst_rate' error
+ * is returned as a positive integer.
+ */
+int cvmx_pko3_port_cir_set(unsigned int node, unsigned int pq_num,
+ unsigned long rate_kbips, unsigned int burst_bytes,
+ int adj_bytes)
+{
+ unsigned long tclk;
+ cvmx_pko_l1_sqx_cir_t sqx_cir;
+ cvmx_pko_l1_sqx_shape_t shape;
+ int rc;
+
+ if (debug)
+ debug("%s: pq=%u rate=%lu kbps, burst=%u bytes\n", __func__,
+ pq_num, rate_kbips, burst_bytes);
+
+ sqx_cir.u64 = 0;
+
+ /* When rate == 0, disable the shaper */
+ if (rate_kbips == 0ULL) {
+ sqx_cir.s.enable = 0;
+ csr_wr_node(node, CVMX_PKO_L1_SQX_CIR(pq_num), sqx_cir.u64);
+ return 0;
+ }
+ /* Compute time-wheel frequency */
+ tclk = cvmx_pko3_pq_tw_clock_rate_node(node);
+
+ /* Compute shaper values */
+ rc = cvmx_pko3_shaper_rate_compute(tclk, &sqx_cir, rate_kbips,
+ burst_bytes);
+
+ /* Apply new settings */
+ sqx_cir.s.enable = 1;
+ sqx_cir.s.rate_divider_exponent = sqx_cir.s.rate_divider_exponent;
+ sqx_cir.s.rate_mantissa = sqx_cir.s.rate_mantissa;
+ sqx_cir.s.rate_exponent = sqx_cir.s.rate_exponent;
+ sqx_cir.s.burst_mantissa = sqx_cir.s.burst_mantissa;
+ sqx_cir.s.burst_exponent = sqx_cir.s.burst_exponent - 1;
+ csr_wr_node(node, CVMX_PKO_L1_SQX_CIR(pq_num), sqx_cir.u64);
+
+ shape.u64 = csr_rd_node(node, CVMX_PKO_L1_SQX_SHAPE(pq_num));
+ shape.s.adjust = adj_bytes;
+ csr_wr_node(node, CVMX_PKO_L1_SQX_SHAPE(pq_num), shape.u64);
+
+ return rc;
+}
+
+/**
+ * Configure per-queue CIR rate limit parameters
+ *
+ * This function configures rate limit at the descriptor queue level.
+ *
+ * @param node The OCI node where the target port is located
+ * @param dq_num The descriptor queue number for this setting
+ * @param rate_kbips The desired throughput in kilo-bits-per-second
+ * @param burst_bytes The size of a burst in bytes, at least MTU
+ *
+ * @return Returns zero if both settings applied within allowed tolerance,
+ * otherwise the error is returned in parts-per-million.
+ * 'rate_bps" error is a negative number, otherwise 'birst_rate' error
+ * is returned as a positive integer.
+ */
+int cvmx_pko3_dq_cir_set(unsigned int node, unsigned int dq_num,
+ unsigned long rate_kbips, unsigned int burst_bytes)
+{
+ unsigned long tclk;
+ cvmx_pko_l1_sqx_cir_t sqx_cir;
+ cvmx_pko_dqx_cir_t dqx_cir;
+ int rc;
+
+ dq_num &= cvmx_pko3_num_level_queues(CVMX_PKO_DESCR_QUEUES) - 1;
+ if (debug)
+ debug("%s: dq=%u rate=%lu kbps, burst=%u bytes\n", __func__,
+ dq_num, rate_kbips, burst_bytes);
+
+ dqx_cir.u64 = 0;
+
+ /* When rate == 0, disable the shaper */
+ if (rate_kbips == 0ULL) {
+ dqx_cir.s.enable = 0;
+ csr_wr_node(node, CVMX_PKO_DQX_CIR(dq_num), dqx_cir.u64);
+ return 0;
+ }
+ /* Compute time-wheel frequency */
+ tclk = cvmx_pko3_dq_tw_clock_rate_node(node);
+
+ /* Compute shaper values */
+ rc = cvmx_pko3_shaper_rate_compute(tclk, &sqx_cir, rate_kbips,
+ burst_bytes);
+
+ /* Apply new settings */
+ dqx_cir.s.enable = 1;
+ dqx_cir.s.rate_divider_exponent = sqx_cir.s.rate_divider_exponent;
+ dqx_cir.s.rate_mantissa = sqx_cir.s.rate_mantissa;
+ dqx_cir.s.rate_exponent = sqx_cir.s.rate_exponent;
+ dqx_cir.s.burst_mantissa = sqx_cir.s.burst_mantissa;
+ dqx_cir.s.burst_exponent = sqx_cir.s.burst_exponent - 1;
+ csr_wr_node(node, CVMX_PKO_DQX_CIR(dq_num), dqx_cir.u64);
+
+ return rc;
+}
+
+/**
+ * Configure per-queue PIR rate limit parameters
+ *
+ * This function configures rate limit at the descriptor queue level.
+ *
+ * @param node The OCI node where the target port is located
+ * @param dq_num The descriptor queue number for this setting
+ * @param rate_kbips The desired throughput in kilo-bits-per-second
+ * @param burst_bytes The size of a burst in bytes, at least MTU
+ *
+ * @return Returns zero if both settings applied within allowed tolerance,
+ * otherwise the error is returned in parts-per-million.
+ * 'rate_bps" error is a negative number, otherwise 'birst_rate' error
+ * is returned as a positive integer.
+ */
+int cvmx_pko3_dq_pir_set(unsigned int node, unsigned int dq_num,
+ unsigned long rate_kbips, unsigned int burst_bytes)
+{
+ unsigned long tclk;
+ cvmx_pko_l1_sqx_cir_t sqx_cir;
+ cvmx_pko_dqx_pir_t dqx_pir;
+ int rc;
+
+ dq_num &= cvmx_pko3_num_level_queues(CVMX_PKO_DESCR_QUEUES) - 1;
+ if (debug)
+ debug("%s: dq=%u rate=%lu kbps, burst=%u bytes\n", __func__,
+ dq_num, rate_kbips, burst_bytes);
+
+ dqx_pir.u64 = 0;
+
+ /* When rate == 0, disable the shaper */
+ if (rate_kbips == 0ULL) {
+ dqx_pir.s.enable = 0;
+ csr_wr_node(node, CVMX_PKO_DQX_PIR(dq_num), dqx_pir.u64);
+ return 0;
+ }
+ /* Compute time-wheel frequency */
+ tclk = cvmx_pko3_dq_tw_clock_rate_node(node);
+
+ /* Compute shaper values */
+ rc = cvmx_pko3_shaper_rate_compute(tclk, &sqx_cir, rate_kbips,
+ burst_bytes);
+
+ /* Apply new settings */
+ dqx_pir.s.enable = 1;
+ dqx_pir.s.rate_divider_exponent = sqx_cir.s.rate_divider_exponent;
+ dqx_pir.s.rate_mantissa = sqx_cir.s.rate_mantissa;
+ dqx_pir.s.rate_exponent = sqx_cir.s.rate_exponent;
+ dqx_pir.s.burst_mantissa = sqx_cir.s.burst_mantissa;
+ dqx_pir.s.burst_exponent = sqx_cir.s.burst_exponent - 1;
+ csr_wr_node(node, CVMX_PKO_DQX_PIR(dq_num), dqx_pir.u64);
+
+ return rc;
+}
+
+/**
+ * Configure per-queue treatment of excess traffic
+ *
+ * The default and most sensible behavior is to stall the packets
+ * colored Red (i.e. exceeding the PIR rate in full 3-color mode).
+ * There is also the option to discard excess traffic, which is
+ * the desired action for some applications that do not rely on
+ * back-pressure flow control.
+ * The shaper may be programmed to pass the RED packets onwards,
+ * which may be useful it the color is translated to a change
+ * in packet priority on egress.
+ *
+ * @param node The OCI node where the target port is located
+ * @param dq_num The descriptor queue number for this setting
+ * @param red_act The action required for all packets in excess of PIR
+ * @param len_adjust A 2's complement 8 bit value to add/subtract from
+ * packet length for the purpose of shaping calculations, e.g.
+ * a value of -14 will subtract the length of the Ethernet header
+ * and hence only account IP packet size.
+ *
+ * @return N/A
+ */
+void cvmx_pko3_dq_red(unsigned int node, unsigned int dq_num,
+ red_action_t red_act, int8_t len_adjust)
+{
+ cvmx_pko_dqx_shape_t dqx_shape;
+
+ dq_num &= cvmx_pko3_num_level_queues(CVMX_PKO_DESCR_QUEUES) - 1;
+ dqx_shape.u64 = 0;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X)) {
+ if (len_adjust < 0)
+ len_adjust = 0;
+ }
+ dqx_shape.s.adjust = len_adjust;
+
+ switch (red_act) {
+ default:
+ case CVMX_PKO3_SHAPE_RED_STALL:
+ dqx_shape.s.red_algo = 0x0;
+ break;
+ case CVMX_PKO3_SHAPE_RED_DISCARD:
+ dqx_shape.s.red_algo = 0x3;
+ break;
+ case CVMX_PKO3_SHAPE_RED_PASS:
+ dqx_shape.s.red_algo = 0x1;
+ break;
+ }
+ csr_wr_node(node, CVMX_PKO_DQX_SHAPE(dq_num), dqx_shape.u64);
+}
--
2.35.1
^ permalink raw reply related [flat|nested] 52+ messages in thread* [PATCH 37/52] mips: octeon: Add cvmx-pko3-compat.c
2022-03-30 10:06 [PATCH 00/52] mips: octeon: Add ethernet support Stefan Roese
` (31 preceding siblings ...)
2022-03-30 10:07 ` [PATCH 36/52] mips: octeon: Add cvmx-pko3-queue.c Stefan Roese
@ 2022-03-30 10:07 ` Stefan Roese
2022-03-30 10:07 ` [PATCH 38/52] mips: octeon: Add cvmx-pko3-resources.c Stefan Roese
` (16 subsequent siblings)
49 siblings, 0 replies; 52+ messages in thread
From: Stefan Roese @ 2022-03-30 10:07 UTC (permalink / raw)
To: u-boot; +Cc: awilliams, cchavva
From: Aaron Williams <awilliams@marvell.com>
Import cvmx-pko3-compat.c from 2013 U-Boot. It will be used by the later
added drivers to support networking on the MIPS Octeon II / III
platforms.
Signed-off-by: Aaron Williams <awilliams@marvell.com>
Signed-off-by: Stefan Roese <sr@denx.de>
---
arch/mips/mach-octeon/cvmx-pko3-compat.c | 656 +++++++++++++++++++++++
1 file changed, 656 insertions(+)
create mode 100644 arch/mips/mach-octeon/cvmx-pko3-compat.c
diff --git a/arch/mips/mach-octeon/cvmx-pko3-compat.c b/arch/mips/mach-octeon/cvmx-pko3-compat.c
new file mode 100644
index 000000000000..3e142322dbfb
--- /dev/null
+++ b/arch/mips/mach-octeon/cvmx-pko3-compat.c
@@ -0,0 +1,656 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ */
+
+#include <errno.h>
+#include <log.h>
+#include <time.h>
+#include <linux/delay.h>
+
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-csr.h>
+#include <mach/cvmx-bootmem.h>
+#include <mach/octeon-model.h>
+#include <mach/cvmx-fuse.h>
+#include <mach/octeon-feature.h>
+#include <mach/cvmx-qlm.h>
+#include <mach/octeon_qlm.h>
+#include <mach/cvmx-pcie.h>
+#include <mach/cvmx-coremask.h>
+
+#include <mach/cvmx-agl-defs.h>
+#include <mach/cvmx-bgxx-defs.h>
+#include <mach/cvmx-ciu-defs.h>
+#include <mach/cvmx-gmxx-defs.h>
+#include <mach/cvmx-gserx-defs.h>
+#include <mach/cvmx-ilk-defs.h>
+#include <mach/cvmx-ipd-defs.h>
+#include <mach/cvmx-pcsx-defs.h>
+#include <mach/cvmx-pcsxx-defs.h>
+#include <mach/cvmx-pki-defs.h>
+#include <mach/cvmx-pko-defs.h>
+#include <mach/cvmx-xcv-defs.h>
+
+#include <mach/cvmx-scratch.h>
+#include <mach/cvmx-hwfau.h>
+#include <mach/cvmx-fau.h>
+
+#include <mach/cvmx-hwpko.h>
+#include <mach/cvmx-ilk.h>
+#include <mach/cvmx-pki.h>
+#include <mach/cvmx-pko3.h>
+#include <mach/cvmx-pko3-queue.h>
+#include <mach/cvmx-pko3-resources.h>
+
+#include <mach/cvmx-helper.h>
+#include <mach/cvmx-helper-board.h>
+#include <mach/cvmx-helper-cfg.h>
+
+#include <mach/cvmx-helper-bgx.h>
+#include <mach/cvmx-helper-cfg.h>
+#include <mach/cvmx-helper-util.h>
+#include <mach/cvmx-helper-pki.h>
+
+/* #undef CVMX_ENABLE_PARAMETER_CHECKING */
+/* #define CVMX_ENABLE_PARAMETER_CHECKING 1 */
+/* #define __PKO3_NATIVE_PTR */
+
+static inline u64 cvmx_pko3_legacy_paddr(unsigned int node, u64 addr)
+{
+ u64 paddr;
+
+ paddr = node;
+ paddr = (addr & ((1ull << 40) - 1)) | (paddr << 40);
+ return paddr;
+}
+
+#if CVMX_ENABLE_PARAMETER_CHECKING
+/**
+ * @INTERNAL
+ *
+ * Verify the integrity of a legacy buffer link pointer,
+ *
+ * Note that the IPD/PIP/PKO hardware would sometimes
+ * round-up the buf_ptr->size field of the last buffer in a chain to the next
+ * cache line size, so the sum of buf_ptr->size
+ * fields for a packet may exceed total_bytes by up to 127 bytes.
+ *
+ * @returns 0 on success, a negative number on error.
+ */
+static int cvmx_pko3_legacy_bufptr_validate(cvmx_buf_ptr_t buf_ptr,
+ unsigned int gather,
+ unsigned int buffers,
+ unsigned int total_bytes)
+{
+ unsigned int node = cvmx_get_node_num();
+ unsigned int segs = 0, bytes = 0;
+ unsigned int phys_addr;
+ cvmx_buf_ptr_t ptr;
+ int delta;
+
+ if (buffers == 0) {
+ return -1;
+ } else if (buffers == 1) {
+ delta = buf_ptr.s.size - total_bytes;
+ if (delta < 0 || delta > 127)
+ return -2;
+ } else if (gather) {
+ cvmx_buf_ptr_t *vptr;
+ /* Validate gather list */
+ if (buf_ptr.s.size < buffers)
+ return -3;
+ phys_addr = cvmx_pko3_legacy_paddr(node, buf_ptr.s.addr);
+ vptr = cvmx_phys_to_ptr(phys_addr);
+ for (segs = 0; segs < buffers; segs++)
+ bytes += vptr[segs].s.size;
+ delta = bytes - total_bytes;
+ if (delta < 0 || delta > 127)
+ return -4;
+ } else {
+ void *vptr;
+ /* Validate linked buffers */
+ ptr = buf_ptr;
+ for (segs = 0; segs < buffers; segs++) {
+ bytes += ptr.s.size;
+ phys_addr = cvmx_pko3_legacy_paddr(node, ptr.s.addr);
+ vptr = cvmx_phys_to_ptr(phys_addr);
+ memcpy(&ptr, vptr - sizeof(u64), sizeof(u64));
+ }
+ delta = bytes - total_bytes;
+ if (delta < 0 || delta > 127)
+ return -5;
+ }
+ return 0;
+}
+#endif /* CVMX_ENABLE_PARAMETER_CHECKING */
+
+/*
+ * @INTERNAL
+ *
+ * Implementation note:
+ * When the packet is sure to not need a jump_buf,
+ * it will be written directly into cvmseg.
+ * When the packet might not fit into cvmseg with all
+ * of its descriptors, a jump_buf is allocated a priori,
+ * and only header is first placed into cvmseg, all other
+ * descriptors are placed into jump_buf, and finally
+ * the PKO_SEND_JUMP_S is written to cvmseg.
+ * This is because if there are no EXT or TSO descriptors,
+ * then HDR must be first, and JMP second and that is all
+ * that should go into cvmseg.
+ */
+struct __cvmx_pko3_legacy_desc {
+ u64 *cmd_words;
+ u64 *jump_buf_base_ptr;
+ unsigned short word_count;
+ short last_pool;
+ u8 port_node;
+ u8 aura_node;
+ u8 jump_buf_size;
+};
+
+/**
+ * @INTERNAL
+ *
+ * Add a subdescriptor into a command buffer,
+ * and handle command-buffer overflow by allocating a JUMP_s buffer
+ * from PKO3 internal AURA.
+ */
+static int __cvmx_pko3_cmd_subdc_add(struct __cvmx_pko3_legacy_desc *desc,
+ u64 subdc)
+{
+ /* SEND_JUMP_S missing on Pass1.X */
+ if (desc->word_count >= 15) {
+ printf("%s: ERROR: too many segments\n", __func__);
+ return -EBADF;
+ }
+
+ /* Handle small commands simply */
+ if (cvmx_likely(!desc->jump_buf_base_ptr)) {
+ desc->cmd_words[desc->word_count] = subdc;
+ (desc->word_count)++;
+ return desc->word_count;
+ }
+
+ if (cvmx_unlikely(desc->jump_buf_size >= 255))
+ return -ENOMEM;
+
+ desc->jump_buf_base_ptr[desc->jump_buf_size++] = subdc;
+
+ return desc->word_count + desc->jump_buf_size;
+}
+
+/**
+ * @INTERNAL
+ *
+ * Finalize command buffer
+ *
+ * @returns: number of command words in command buffer and jump buffer
+ * or negative number on error.
+ */
+
+static int __cvmx_pko3_cmd_done(struct __cvmx_pko3_legacy_desc *desc)
+{
+ short pko_aura;
+ cvmx_pko_buf_ptr_t jump_s;
+ cvmx_pko_send_aura_t aura_s;
+
+ /* no jump buffer, nothing to do */
+ if (!desc->jump_buf_base_ptr)
+ return desc->word_count;
+
+ desc->word_count++;
+
+ /* Verify number of words is 15 */
+ if (desc->word_count != 2) {
+ printf("ERROR: %s: internal error, word_count=%d\n", __func__,
+ desc->word_count);
+ return -EINVAL;
+ }
+
+ /* Add SEND_AURA_S at the end of jump_buf */
+ pko_aura = __cvmx_pko3_aura_get(desc->port_node);
+
+ aura_s.u64 = 0;
+ aura_s.s.aura = pko_aura;
+ aura_s.s.offset = 0;
+ aura_s.s.alg = AURAALG_NOP;
+ aura_s.s.subdc4 = CVMX_PKO_SENDSUBDC_AURA;
+
+ desc->jump_buf_base_ptr[desc->jump_buf_size++] = aura_s.u64;
+
+ /* Add SEND_JUMPS to point to jump_buf */
+ jump_s.u64 = 0;
+ jump_s.s.subdc3 = CVMX_PKO_SENDSUBDC_JUMP;
+ jump_s.s.addr = cvmx_ptr_to_phys(desc->jump_buf_base_ptr);
+ jump_s.s.i = 1; /* F=1: Free this buffer when done */
+ jump_s.s.size = desc->jump_buf_size;
+ desc->cmd_words[1] = jump_s.u64;
+
+ return desc->word_count + desc->jump_buf_size;
+}
+
+/**
+ * @INTERNAL
+ *
+ * Handle buffer pools for PKO legacy transmit operation
+ */
+static inline int cvmx_pko3_legacy_pool(struct __cvmx_pko3_legacy_desc *desc,
+ int pool)
+{
+ cvmx_pko_send_aura_t aura_s;
+ unsigned int aura;
+
+ if (cvmx_unlikely(desc->last_pool == pool))
+ return 0;
+
+ aura = desc->aura_node << 10; /* LAURA=AURA[0..9] */
+ aura |= pool;
+
+ if (cvmx_likely(desc->last_pool < 0)) {
+ cvmx_pko_send_hdr_t *hdr_s;
+
+ hdr_s = (void *)&desc->cmd_words[0];
+ /* Create AURA from legacy pool (assume LAURA==POOL */
+ hdr_s->s.aura = aura;
+ desc->last_pool = pool;
+ return 0;
+ }
+
+ aura_s.u64 = 0;
+ aura_s.s.subdc4 = CVMX_PKO_SENDSUBDC_AURA;
+ aura_s.s.offset = 0;
+ aura_s.s.alg = AURAALG_NOP;
+ aura |= pool;
+ aura_s.s.aura = aura;
+ desc->last_pool = pool;
+ return __cvmx_pko3_cmd_subdc_add(desc, aura_s.u64);
+}
+
+/**
+ * @INTERNAL
+ *
+ * Backward compatibility for packet transmission using legacy PKO command.
+ *
+ * NOTE: Only supports output on node-local ports.
+ *
+ * TBD: Could embed destination node in extended DQ number.
+ */
+cvmx_pko_return_value_t
+cvmx_pko3_legacy_xmit(unsigned int dq, cvmx_pko_command_word0_t pko_command,
+ cvmx_buf_ptr_t packet, u64 addr, bool tag_sw)
+{
+ cvmx_pko_query_rtn_t pko_status;
+ cvmx_pko_send_hdr_t *hdr_s;
+ struct __cvmx_pko3_legacy_desc desc;
+ u8 *data_ptr;
+ unsigned int node, seg_cnt;
+ int res;
+ cvmx_buf_ptr_pki_t bptr;
+
+ seg_cnt = pko_command.s.segs;
+ desc.cmd_words = cvmx_pko3_cvmseg_addr();
+
+ /* Allocate from local aura, assume all old-pools are local */
+ node = cvmx_get_node_num();
+ desc.aura_node = node;
+
+ /* Derive destination node from dq */
+ desc.port_node = dq >> 10;
+ dq &= (1 << 10) - 1;
+
+ desc.word_count = 1;
+ desc.last_pool = -1;
+
+ /* For small packets, write descriptors directly to CVMSEG
+ * but for longer packets use jump_buf
+ */
+ if (seg_cnt < 7 || OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X)) {
+ desc.jump_buf_size = 0;
+ desc.jump_buf_base_ptr = NULL;
+ } else {
+ unsigned int pko_aura = __cvmx_pko3_aura_get(desc.port_node);
+
+ cvmx_fpa3_gaura_t aura =
+ __cvmx_fpa3_gaura(pko_aura >> 10, pko_aura & 0x3ff);
+
+ /* Allocate from internal AURA, size is 4KiB */
+ desc.jump_buf_base_ptr = cvmx_fpa3_alloc(aura);
+
+ if (!desc.jump_buf_base_ptr)
+ return -ENOMEM;
+ desc.jump_buf_size = 0;
+ }
+
+ /* Native buffer-pointer for error checiing */
+ bptr.u64 = packet.u64;
+
+#if CVMX_ENABLE_PARAMETER_CHECKING
+ if (seg_cnt == 1 && bptr.size == pko_command.s.total_bytes) {
+ /*
+ * Special case for native buffer pointer:
+ * This is the only case where the native pointer-style can be
+ * automatically identified, that is when an entire packet
+ * fits into a single buffer by the PKI.
+ * The use of the native buffers with this function
+ * should be avoided.
+ */
+ debug("%s: WARNING: Native buffer-pointer\n", __func__);
+ } else {
+ /* The buffer ptr is assume to be received in legacy format */
+ res = cvmx_pko3_legacy_bufptr_validate(
+ packet, pko_command.s.gather, pko_command.s.segs,
+ pko_command.s.total_bytes);
+ if (res < 0) {
+ debug("%s: ERROR: Not a valid packet pointer <%d>\n",
+ __func__, res);
+ return CVMX_PKO_CMD_QUEUE_INIT_ERROR;
+ }
+ }
+#endif /* CVMX_ENABLE_PARAMETER_CHECKING */
+
+ /* Squash warnings */
+ (void)bptr;
+
+ /*** Translate legacy PKO fields into PKO3 PKO_SEND_HDR_S ***/
+
+ /* PKO_SEND_HDR_S is alwasy the first word in the command */
+ hdr_s = (void *)&desc.cmd_words[0];
+ hdr_s->u64 = 0;
+
+ /* Copy total packet size */
+ hdr_s->s.total = pko_command.s.total_bytes;
+
+ /* Endianness */
+ hdr_s->s.le = pko_command.s.le;
+
+ /* N2 is the same meaning */
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X))
+ hdr_s->s.n2 = 0; /* L2 allocate everything */
+ else
+ hdr_s->s.n2 = pko_command.s.n2;
+
+ /* DF bit has the same meaning */
+ hdr_s->s.df = pko_command.s.dontfree;
+
+ /* II bit has the same meaning */
+ hdr_s->s.ii = pko_command.s.ignore_i;
+
+ /* non-zero IP header offset requires L3/L4 checksum calculation */
+ if (cvmx_unlikely(pko_command.s.ipoffp1 > 0)) {
+ u8 ipoff, ip0, l4_proto = 0;
+
+ /* Get data pointer for header inspection below */
+ if (pko_command.s.gather) {
+ cvmx_buf_ptr_t *p_ptr;
+ cvmx_buf_ptr_t blk;
+
+ p_ptr = cvmx_phys_to_ptr(
+ cvmx_pko3_legacy_paddr(node, packet.s.addr));
+ blk = p_ptr[0];
+ data_ptr = cvmx_phys_to_ptr(
+ cvmx_pko3_legacy_paddr(node, blk.s.addr));
+ } else {
+ data_ptr = cvmx_phys_to_ptr(
+ cvmx_pko3_legacy_paddr(node, packet.s.addr));
+ }
+
+ /* Get IP header offset */
+ ipoff = pko_command.s.ipoffp1 - 1;
+
+ /* Parse IP header, version, L4 protocol */
+ hdr_s->s.l3ptr = ipoff;
+ ip0 = data_ptr[ipoff];
+
+ /* IPv4 header length, checksum offload */
+ if ((ip0 >> 4) == 4) {
+ hdr_s->s.l4ptr = hdr_s->s.l3ptr + ((ip0 & 0xf) << 2);
+ l4_proto = data_ptr[ipoff + 9];
+ hdr_s->s.ckl3 = 1; /* Only valid for IPv4 */
+ }
+ /* IPv6 header length is fixed, no checksum */
+ if ((ip0 >> 4) == 6) {
+ hdr_s->s.l4ptr = hdr_s->s.l3ptr + 40;
+ l4_proto = data_ptr[ipoff + 6];
+ }
+ /* Set L4 checksum algo based on L4 protocol */
+ if (l4_proto == 6)
+ hdr_s->s.ckl4 = /* TCP */ 2;
+ else if (l4_proto == 17)
+ hdr_s->s.ckl4 = /* UDP */ 1;
+ else if (l4_proto == 132)
+ hdr_s->s.ckl4 = /* SCTP */ 3;
+ else
+ hdr_s->s.ckl4 = /* Unknown */ 0;
+ }
+
+ if (pko_command.s.gather) {
+ /* Process legacy gather list */
+ cvmx_pko_buf_ptr_t gather_s;
+ cvmx_buf_ptr_t *p_ptr;
+ cvmx_buf_ptr_t blk;
+ unsigned int i;
+
+ /* Get gather list pointer */
+ p_ptr = cvmx_phys_to_ptr(
+ cvmx_pko3_legacy_paddr(node, packet.s.addr));
+ blk = p_ptr[0];
+ /* setup data_ptr */
+ data_ptr = cvmx_phys_to_ptr(
+ cvmx_pko3_legacy_paddr(node, blk.s.addr));
+
+ for (i = 0; i < seg_cnt; i++) {
+ if (cvmx_unlikely(cvmx_pko3_legacy_pool(
+ &desc, blk.s.pool) < 0))
+ return CVMX_PKO_NO_MEMORY;
+
+ /* Insert PKO_SEND_GATHER_S for the current buffer */
+ gather_s.u64 = 0;
+ gather_s.s.subdc3 = CVMX_PKO_SENDSUBDC_GATHER;
+ gather_s.s.size = blk.s.size;
+ gather_s.s.i = blk.s.i;
+ gather_s.s.addr =
+ cvmx_pko3_legacy_paddr(node, blk.s.addr);
+
+ res = __cvmx_pko3_cmd_subdc_add(&desc, gather_s.u64);
+ if (res < 0)
+ return CVMX_PKO_NO_MEMORY;
+
+ /* get next bufptr */
+ blk = p_ptr[i + 1];
+ } /* for i */
+
+ /* Free original gather-list buffer */
+ if ((pko_command.s.ignore_i && !pko_command.s.dontfree) ||
+ packet.s.i == pko_command.s.dontfree)
+ cvmx_fpa_free_nosync(p_ptr, packet.s.pool,
+ (i - 1) / 16 + 1);
+ } else {
+ /* Process legacy linked buffer list */
+ cvmx_pko_buf_ptr_t gather_s;
+ cvmx_buf_ptr_t blk;
+ void *vptr;
+
+ data_ptr = cvmx_phys_to_ptr(
+ cvmx_pko3_legacy_paddr(node, packet.s.addr));
+ blk = packet;
+
+ /*
+ * Legacy linked-buffers converted into flat gather list
+ * so that the AURA can optionally be changed to reflect
+ * the POOL number in the legacy pointers
+ */
+ do {
+ /* Insert PKO_SEND_AURA_S if pool changes */
+ if (cvmx_unlikely(cvmx_pko3_legacy_pool(
+ &desc, blk.s.pool) < 0))
+ return CVMX_PKO_NO_MEMORY;
+
+ /* Insert PKO_SEND_GATHER_S for the current buffer */
+ gather_s.u64 = 0;
+ gather_s.s.subdc3 = CVMX_PKO_SENDSUBDC_GATHER;
+ gather_s.s.size = blk.s.size;
+ gather_s.s.i = blk.s.i;
+ gather_s.s.addr =
+ cvmx_pko3_legacy_paddr(node, blk.s.addr);
+
+ res = __cvmx_pko3_cmd_subdc_add(&desc, gather_s.u64);
+ if (res < 0)
+ return CVMX_PKO_NO_MEMORY;
+
+ /* Get the next buffer pointer */
+ vptr = cvmx_phys_to_ptr(
+ cvmx_pko3_legacy_paddr(node, blk.s.addr));
+ memcpy(&blk, vptr - sizeof(blk), sizeof(blk));
+
+ /* Decrement segment count */
+ seg_cnt--;
+
+ } while (seg_cnt > 0);
+ }
+
+ /* This field indicates the presence of 3rd legacy command word */
+ /* NOTE: legacy 3rd word may contain CN78XX native phys addr already */
+ if (cvmx_unlikely(pko_command.s.rsp)) {
+ /* PTP bit in word3 is not supported -
+ * can not be distibguished from larger phys_addr[42..41]
+ */
+ if (pko_command.s.wqp) {
+ /* <addr> is an SSO WQE */
+ cvmx_wqe_word1_t *wqe_p;
+ cvmx_pko_send_work_t work_s;
+
+ work_s.u64 = 0;
+ work_s.s.subdc4 = CVMX_PKO_SENDSUBDC_WORK;
+ work_s.s.addr = addr;
+ /* Assume WQE is legacy format too */
+ wqe_p = cvmx_phys_to_ptr(addr + sizeof(u64));
+ work_s.s.grp = wqe_p->cn38xx.grp;
+ work_s.s.tt = wqe_p->tag_type;
+
+ res = __cvmx_pko3_cmd_subdc_add(&desc, work_s.u64);
+ } else {
+ cvmx_pko_send_mem_t mem_s;
+ /* MEMALG_SET broken on Pass1 */
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_0)) {
+ debug("%s: ERROR: PKO byte-clear not supported\n",
+ __func__);
+ }
+ /* <addr> is a physical address of byte clear */
+ mem_s.u64 = 0;
+ mem_s.s.subdc4 = CVMX_PKO_SENDSUBDC_MEM;
+ mem_s.s.addr = addr;
+ mem_s.s.dsz = MEMDSZ_B8;
+ mem_s.s.alg = MEMALG_SET;
+ mem_s.s.offset = 0;
+
+ res = __cvmx_pko3_cmd_subdc_add(&desc, mem_s.u64);
+ }
+ if (res < 0)
+ return CVMX_PKO_NO_MEMORY;
+ }
+
+ /* FAU counter binding reg0 */
+ if (pko_command.s.reg0) {
+ cvmx_pko_send_mem_t mem_s;
+
+ debug("%s: Legacy FAU commands: reg0=%#x sz0=%#x\n", __func__,
+ pko_command.s.reg0, pko_command.s.size0);
+ mem_s.u64 = 0;
+ mem_s.s.subdc4 = CVMX_PKO_SENDSUBDC_MEM;
+ mem_s.s.addr = cvmx_ptr_to_phys(
+ CASTPTR(void, __cvmx_fau_sw_addr(pko_command.s.reg0)));
+ if (cvmx_likely(pko_command.s.size0 == CVMX_FAU_OP_SIZE_64))
+ mem_s.s.dsz = MEMDSZ_B64;
+ else if (pko_command.s.size0 == CVMX_FAU_OP_SIZE_32)
+ mem_s.s.dsz = MEMDSZ_B32;
+ else if (pko_command.s.size0 == CVMX_FAU_OP_SIZE_16)
+ mem_s.s.dsz = MEMDSZ_B16;
+ else
+ mem_s.s.dsz = MEMDSZ_B8;
+
+ if (mem_s.s.dsz == MEMDSZ_B16 || mem_s.s.dsz == MEMDSZ_B8)
+ debug("%s: ERROR: 8/16 bit decrement unsupported",
+ __func__);
+
+ mem_s.s.offset = pko_command.s.subone0;
+ if (mem_s.s.offset)
+ mem_s.s.alg = MEMALG_SUB;
+ else
+ mem_s.s.alg = MEMALG_SUBLEN;
+
+ res = __cvmx_pko3_cmd_subdc_add(&desc, mem_s.u64);
+ if (res < 0)
+ return CVMX_PKO_NO_MEMORY;
+ }
+
+ /* FAU counter binding reg1 */
+ if (cvmx_unlikely(pko_command.s.reg1)) {
+ cvmx_pko_send_mem_t mem_s;
+
+ debug("%s: Legacy FAU commands: reg1=%#x sz1=%#x\n", __func__,
+ pko_command.s.reg1, pko_command.s.size1);
+ mem_s.u64 = 0;
+ mem_s.s.subdc4 = CVMX_PKO_SENDSUBDC_MEM;
+ mem_s.s.addr = cvmx_ptr_to_phys(
+ CASTPTR(void, __cvmx_fau_sw_addr(pko_command.s.reg1)));
+ if (cvmx_likely(pko_command.s.size1 == CVMX_FAU_OP_SIZE_64))
+ mem_s.s.dsz = MEMDSZ_B64;
+ else if (pko_command.s.size1 == CVMX_FAU_OP_SIZE_32)
+ mem_s.s.dsz = MEMDSZ_B32;
+ else if (pko_command.s.size1 == CVMX_FAU_OP_SIZE_16)
+ mem_s.s.dsz = MEMDSZ_B16;
+ else
+ mem_s.s.dsz = MEMDSZ_B8;
+
+ if (mem_s.s.dsz == MEMDSZ_B16 || mem_s.s.dsz == MEMDSZ_B8)
+ printf("%s: ERROR: 8/16 bit decrement unsupported",
+ __func__);
+
+ mem_s.s.offset = pko_command.s.subone1;
+ if (mem_s.s.offset)
+ mem_s.s.alg = MEMALG_SUB;
+ else
+ mem_s.s.alg = MEMALG_SUBLEN;
+
+ res = __cvmx_pko3_cmd_subdc_add(&desc, mem_s.u64);
+ if (res < 0)
+ return CVMX_PKO_NO_MEMORY;
+ }
+
+ /* These PKO_HDR_S fields are not used: */
+ /* hdr_s->s.ds does not have legacy equivalent, remains 0 */
+ /* hdr_s->s.format has no legacy equivalent, remains 0 */
+
+ /*** Finalize command buffer ***/
+ res = __cvmx_pko3_cmd_done(&desc);
+ if (res < 0)
+ return CVMX_PKO_NO_MEMORY;
+
+ /*** Send the PKO3 command into the Descriptor Queue ***/
+ pko_status =
+ __cvmx_pko3_lmtdma(desc.port_node, dq, desc.word_count, tag_sw);
+
+ /*** Map PKO3 result codes to legacy return values ***/
+ if (cvmx_likely(pko_status.s.dqstatus == PKO_DQSTATUS_PASS))
+ return CVMX_PKO_SUCCESS;
+
+ debug("%s: ERROR: failed to enqueue: %s\n", __func__,
+ pko_dqstatus_error(pko_status.s.dqstatus));
+
+ if (pko_status.s.dqstatus == PKO_DQSTATUS_ALREADY)
+ return CVMX_PKO_PORT_ALREADY_SETUP;
+ if (pko_status.s.dqstatus == PKO_DQSTATUS_NOFPABUF ||
+ pko_status.s.dqstatus == PKO_DQSTATUS_NOPKOBUF)
+ return CVMX_PKO_NO_MEMORY;
+ if (pko_status.s.dqstatus == PKO_DQSTATUS_NOTCREATED)
+ return CVMX_PKO_INVALID_QUEUE;
+ if (pko_status.s.dqstatus == PKO_DQSTATUS_BADSTATE)
+ return CVMX_PKO_CMD_QUEUE_INIT_ERROR;
+ if (pko_status.s.dqstatus == PKO_DQSTATUS_SENDPKTDROP)
+ return CVMX_PKO_INVALID_PORT;
+
+ return CVMX_PKO_INVALID_PORT;
+}
--
2.35.1
^ permalink raw reply related [flat|nested] 52+ messages in thread* [PATCH 38/52] mips: octeon: Add cvmx-pko3-resources.c
2022-03-30 10:06 [PATCH 00/52] mips: octeon: Add ethernet support Stefan Roese
` (32 preceding siblings ...)
2022-03-30 10:07 ` [PATCH 37/52] mips: octeon: Add cvmx-pko3-compat.c Stefan Roese
@ 2022-03-30 10:07 ` Stefan Roese
2022-03-30 10:07 ` [PATCH 39/52] mips: octeon: Add cvmx-pko-internal-ports-range.c Stefan Roese
` (15 subsequent siblings)
49 siblings, 0 replies; 52+ messages in thread
From: Stefan Roese @ 2022-03-30 10:07 UTC (permalink / raw)
To: u-boot; +Cc: awilliams, cchavva
From: Aaron Williams <awilliams@marvell.com>
Import cvmx-pko3-resources.c from 2013 U-Boot. It will be used by the later
added drivers to support networking on the MIPS Octeon II / III
platforms.
Signed-off-by: Aaron Williams <awilliams@marvell.com>
Signed-off-by: Stefan Roese <sr@denx.de>
---
arch/mips/mach-octeon/cvmx-pko3-resources.c | 229 ++++++++++++++++++++
1 file changed, 229 insertions(+)
create mode 100644 arch/mips/mach-octeon/cvmx-pko3-resources.c
diff --git a/arch/mips/mach-octeon/cvmx-pko3-resources.c b/arch/mips/mach-octeon/cvmx-pko3-resources.c
new file mode 100644
index 000000000000..3d17d84832cf
--- /dev/null
+++ b/arch/mips/mach-octeon/cvmx-pko3-resources.c
@@ -0,0 +1,229 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ * PKO resources.
+ */
+
+#include <errno.h>
+#include <log.h>
+#include <time.h>
+#include <linux/delay.h>
+
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-csr.h>
+#include <mach/cvmx-bootmem.h>
+#include <mach/octeon-model.h>
+#include <mach/cvmx-fuse.h>
+#include <mach/octeon-feature.h>
+#include <mach/cvmx-qlm.h>
+#include <mach/octeon_qlm.h>
+#include <mach/cvmx-pcie.h>
+#include <mach/cvmx-coremask.h>
+#include <mach/cvmx-range.h>
+#include <mach/cvmx-global-resources.h>
+
+#include <mach/cvmx-agl-defs.h>
+#include <mach/cvmx-bgxx-defs.h>
+#include <mach/cvmx-ciu-defs.h>
+#include <mach/cvmx-gmxx-defs.h>
+#include <mach/cvmx-gserx-defs.h>
+#include <mach/cvmx-ilk-defs.h>
+#include <mach/cvmx-ipd-defs.h>
+#include <mach/cvmx-pcsx-defs.h>
+#include <mach/cvmx-pcsxx-defs.h>
+#include <mach/cvmx-pki-defs.h>
+#include <mach/cvmx-pko-defs.h>
+#include <mach/cvmx-xcv-defs.h>
+
+#include <mach/cvmx-hwpko.h>
+#include <mach/cvmx-ilk.h>
+#include <mach/cvmx-pki.h>
+#include <mach/cvmx-pko3.h>
+#include <mach/cvmx-pko3-queue.h>
+#include <mach/cvmx-pko3-resources.h>
+
+#include <mach/cvmx-helper.h>
+#include <mach/cvmx-helper-board.h>
+#include <mach/cvmx-helper-cfg.h>
+
+#include <mach/cvmx-helper-bgx.h>
+#include <mach/cvmx-helper-cfg.h>
+#include <mach/cvmx-helper-util.h>
+#include <mach/cvmx-helper-pki.h>
+
+#define CVMX_GR_TAG_PKO_PORT_QUEUES(x) \
+ cvmx_get_gr_tag('c', 'v', 'm', '_', 'p', 'k', 'o', 'p', 'o', 'q', '_', \
+ ((x) + '0'), '.', '.', '.', '.')
+#define CVMX_GR_TAG_PKO_L2_QUEUES(x) \
+ cvmx_get_gr_tag('c', 'v', 'm', '_', 'p', 'k', 'o', 'l', '2', 'q', '_', \
+ ((x) + '0'), '.', '.', '.', '.')
+#define CVMX_GR_TAG_PKO_L3_QUEUES(x) \
+ cvmx_get_gr_tag('c', 'v', 'm', '_', 'p', 'k', 'o', 'l', '3', 'q', '_', \
+ ((x) + '0'), '.', '.', '.', '.')
+#define CVMX_GR_TAG_PKO_L4_QUEUES(x) \
+ cvmx_get_gr_tag('c', 'v', 'm', '_', 'p', 'k', 'o', 'l', '4', 'q', '_', \
+ ((x) + '0'), '.', '.', '.', '.')
+#define CVMX_GR_TAG_PKO_L5_QUEUES(x) \
+ cvmx_get_gr_tag('c', 'v', 'm', '_', 'p', 'k', 'o', 'l', '5', 'q', '_', \
+ ((x) + '0'), '.', '.', '.', '.')
+#define CVMX_GR_TAG_PKO_DESCR_QUEUES(x) \
+ cvmx_get_gr_tag('c', 'v', 'm', '_', 'p', 'k', 'o', 'd', 'e', 'q', '_', \
+ ((x) + '0'), '.', '.', '.', '.')
+#define CVMX_GR_TAG_PKO_PORT_INDEX(x) \
+ cvmx_get_gr_tag('c', 'v', 'm', '_', 'p', 'k', 'o', 'p', 'i', 'd', '_', \
+ ((x) + '0'), '.', '.', '.', '.')
+
+/*
+ * @INRWENAL
+ * Per-DQ parameters, current and maximum queue depth counters
+ */
+cvmx_pko3_dq_params_t *__cvmx_pko3_dq_params[CVMX_MAX_NODES];
+
+static const short cvmx_pko_num_queues_78XX[256] = {
+ [CVMX_PKO_PORT_QUEUES] = 32, [CVMX_PKO_L2_QUEUES] = 512,
+ [CVMX_PKO_L3_QUEUES] = 512, [CVMX_PKO_L4_QUEUES] = 1024,
+ [CVMX_PKO_L5_QUEUES] = 1024, [CVMX_PKO_DESCR_QUEUES] = 1024
+};
+
+static const short cvmx_pko_num_queues_73XX[256] = {
+ [CVMX_PKO_PORT_QUEUES] = 16, [CVMX_PKO_L2_QUEUES] = 256,
+ [CVMX_PKO_L3_QUEUES] = 256, [CVMX_PKO_L4_QUEUES] = 0,
+ [CVMX_PKO_L5_QUEUES] = 0, [CVMX_PKO_DESCR_QUEUES] = 256
+};
+
+int cvmx_pko3_num_level_queues(enum cvmx_pko3_level_e level)
+{
+ unsigned int nq = 0, ne = 0;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
+ ne = NUM_ELEMENTS(cvmx_pko_num_queues_78XX);
+ nq = cvmx_pko_num_queues_78XX[level];
+ }
+ if (OCTEON_IS_MODEL(OCTEON_CN73XX) || OCTEON_IS_MODEL(OCTEON_CNF75XX)) {
+ ne = NUM_ELEMENTS(cvmx_pko_num_queues_73XX);
+ nq = cvmx_pko_num_queues_73XX[level];
+ }
+
+ if (nq == 0 || level >= ne) {
+ printf("ERROR: %s: queue level %#x invalid\n", __func__, level);
+ return -1;
+ }
+
+ return nq;
+}
+
+static inline struct global_resource_tag
+__cvmx_pko_get_queues_resource_tag(int node, enum cvmx_pko3_level_e queue_level)
+{
+ if (cvmx_pko3_num_level_queues(queue_level) == 0) {
+ printf("ERROR: %s: queue level %#x invalid\n", __func__,
+ queue_level);
+ return CVMX_GR_TAG_INVALID;
+ }
+
+ switch (queue_level) {
+ case CVMX_PKO_PORT_QUEUES:
+ return CVMX_GR_TAG_PKO_PORT_QUEUES(node);
+ case CVMX_PKO_L2_QUEUES:
+ return CVMX_GR_TAG_PKO_L2_QUEUES(node);
+ case CVMX_PKO_L3_QUEUES:
+ return CVMX_GR_TAG_PKO_L3_QUEUES(node);
+ case CVMX_PKO_L4_QUEUES:
+ return CVMX_GR_TAG_PKO_L4_QUEUES(node);
+ case CVMX_PKO_L5_QUEUES:
+ return CVMX_GR_TAG_PKO_L5_QUEUES(node);
+ case CVMX_PKO_DESCR_QUEUES:
+ return CVMX_GR_TAG_PKO_DESCR_QUEUES(node);
+ default:
+ printf("ERROR: %s: queue level %#x invalid\n", __func__,
+ queue_level);
+ return CVMX_GR_TAG_INVALID;
+ }
+}
+
+/**
+ * Allocate or reserve a pko resource - called by wrapper functions
+ * @param tag processed global resource tag
+ * @param base_queue if specified the queue to reserve
+ * @param owner to be specified for resource
+ * @param num_queues to allocate
+ * @param max_num_queues for global resource
+ */
+int cvmx_pko_alloc_global_resource(struct global_resource_tag tag,
+ int base_queue, int owner, int num_queues,
+ int max_num_queues)
+{
+ int res;
+
+ if (cvmx_create_global_resource_range(tag, max_num_queues)) {
+ debug("ERROR: Failed to create PKO3 resource: %lx-%lx\n",
+ (unsigned long)tag.hi, (unsigned long)tag.lo);
+ return -1;
+ }
+ if (base_queue >= 0) {
+ res = cvmx_reserve_global_resource_range(tag, owner, base_queue,
+ num_queues);
+ } else {
+ res = cvmx_allocate_global_resource_range(tag, owner,
+ num_queues, 1);
+ }
+ if (res < 0) {
+ debug("ERROR: Failed to %s PKO3 tag %lx:%lx, %i %i %i %i.\n",
+ ((base_queue < 0) ? "allocate" : "reserve"),
+ (unsigned long)tag.hi, (unsigned long)tag.lo, base_queue,
+ owner, num_queues, max_num_queues);
+ return -1;
+ }
+
+ return res;
+}
+
+/**
+ * Allocate or reserve PKO queues - wrapper for cvmx_pko_alloc_global_resource
+ *
+ * @param node on which to allocate/reserve PKO queues
+ * @param level of PKO queue
+ * @param owner of reserved/allocated resources
+ * @param base_queue to start reservation/allocatation
+ * @param num_queues number of queues to be allocated
+ * @return 0 on success, -1 on failure
+ */
+int cvmx_pko_alloc_queues(int node, int level, int owner, int base_queue,
+ int num_queues)
+{
+ struct global_resource_tag tag =
+ __cvmx_pko_get_queues_resource_tag(node, level);
+ int max_num_queues = cvmx_pko3_num_level_queues(level);
+
+ return cvmx_pko_alloc_global_resource(tag, base_queue, owner,
+ num_queues, max_num_queues);
+}
+
+/**
+ * Free an allocated/reserved PKO queues for a certain level and owner
+ *
+ * @param node on which to allocate/reserve PKO queues
+ * @param level of PKO queue
+ * @param owner of reserved/allocated resources
+ * @return 0 on success, -1 on failure
+ */
+int cvmx_pko_free_queues(int node, int level, int owner)
+{
+ struct global_resource_tag tag =
+ __cvmx_pko_get_queues_resource_tag(node, level);
+
+ return cvmx_free_global_resource_range_with_owner(tag, owner);
+}
+
+/**
+ * @INTERNAL
+ *
+ * Initialize the pointer to the descriptor queue parameter table.
+ * The table is one named block per node, and may be shared between
+ * applications.
+ */
+int __cvmx_pko3_dq_param_setup(unsigned int node)
+{
+ return 0;
+}
--
2.35.1
^ permalink raw reply related [flat|nested] 52+ messages in thread* [PATCH 39/52] mips: octeon: Add cvmx-pko-internal-ports-range.c
2022-03-30 10:06 [PATCH 00/52] mips: octeon: Add ethernet support Stefan Roese
` (33 preceding siblings ...)
2022-03-30 10:07 ` [PATCH 38/52] mips: octeon: Add cvmx-pko3-resources.c Stefan Roese
@ 2022-03-30 10:07 ` Stefan Roese
2022-03-30 10:07 ` [PATCH 40/52] mips: octeon: Add cvmx-qlm-tables.c Stefan Roese
` (14 subsequent siblings)
49 siblings, 0 replies; 52+ messages in thread
From: Stefan Roese @ 2022-03-30 10:07 UTC (permalink / raw)
To: u-boot; +Cc: awilliams, cchavva
From: Aaron Williams <awilliams@marvell.com>
Import cvmx-pko-internal-ports-range.c from 2013 U-Boot. It will be used
by the later added drivers to support networking on the MIPS Octeon II / III
platforms.
Signed-off-by: Aaron Williams <awilliams@marvell.com>
Signed-off-by: Stefan Roese <sr@denx.de>
---
.../cvmx-pko-internal-ports-range.c | 164 ++++++++++++++++++
1 file changed, 164 insertions(+)
create mode 100644 arch/mips/mach-octeon/cvmx-pko-internal-ports-range.c
diff --git a/arch/mips/mach-octeon/cvmx-pko-internal-ports-range.c b/arch/mips/mach-octeon/cvmx-pko-internal-ports-range.c
new file mode 100644
index 000000000000..259453eacd5c
--- /dev/null
+++ b/arch/mips/mach-octeon/cvmx-pko-internal-ports-range.c
@@ -0,0 +1,164 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ */
+
+#include <errno.h>
+#include <log.h>
+#include <time.h>
+#include <linux/delay.h>
+
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-csr.h>
+#include <mach/cvmx-bootmem.h>
+#include <mach/octeon-model.h>
+#include <mach/cvmx-fuse.h>
+#include <mach/octeon-feature.h>
+#include <mach/cvmx-qlm.h>
+#include <mach/octeon_qlm.h>
+#include <mach/cvmx-pcie.h>
+#include <mach/cvmx-coremask.h>
+#include <mach/cvmx-range.h>
+#include <mach/cvmx-global-resources.h>
+
+#include <mach/cvmx-agl-defs.h>
+#include <mach/cvmx-bgxx-defs.h>
+#include <mach/cvmx-ciu-defs.h>
+#include <mach/cvmx-gmxx-defs.h>
+#include <mach/cvmx-gserx-defs.h>
+#include <mach/cvmx-ilk-defs.h>
+#include <mach/cvmx-ipd-defs.h>
+#include <mach/cvmx-pcsx-defs.h>
+#include <mach/cvmx-pcsxx-defs.h>
+#include <mach/cvmx-pki-defs.h>
+#include <mach/cvmx-pko-defs.h>
+#include <mach/cvmx-xcv-defs.h>
+
+#include <mach/cvmx-hwpko.h>
+#include <mach/cvmx-ilk.h>
+#include <mach/cvmx-ipd.h>
+#include <mach/cvmx-pki.h>
+#include <mach/cvmx-pko3.h>
+#include <mach/cvmx-pko3-queue.h>
+#include <mach/cvmx-pko3-resources.h>
+
+#include <mach/cvmx-helper.h>
+#include <mach/cvmx-helper-board.h>
+#include <mach/cvmx-helper-cfg.h>
+
+#include <mach/cvmx-helper-bgx.h>
+#include <mach/cvmx-helper-cfg.h>
+#include <mach/cvmx-helper-util.h>
+#include <mach/cvmx-helper-pki.h>
+
+union interface_port {
+ struct {
+ int port;
+ int interface;
+ } s;
+ u64 u64;
+};
+
+static int dbg;
+
+static int port_range_init;
+
+int __cvmx_pko_internal_ports_range_init(void)
+{
+ int rv = 0;
+
+ if (port_range_init)
+ return 0;
+ port_range_init = 1;
+ rv = cvmx_create_global_resource_range(CVMX_GR_TAG_PKO_IPORTS,
+ CVMX_HELPER_CFG_MAX_PKO_QUEUES);
+ if (rv != 0)
+ debug("ERROR : Failed to initialize pko internal port range\n");
+ return rv;
+}
+
+int cvmx_pko_internal_ports_alloc(int xiface, int port, u64 count)
+{
+ int ret_val = -1;
+ union interface_port inf_port;
+ struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+
+ __cvmx_pko_internal_ports_range_init();
+ inf_port.s.interface = xi.interface;
+ inf_port.s.port = port;
+ ret_val = cvmx_allocate_global_resource_range(CVMX_GR_TAG_PKO_IPORTS,
+ inf_port.u64, count, 1);
+ if (dbg)
+ debug("internal port alloc : port=%02d base=%02d count=%02d\n",
+ (int)port, ret_val, (int)count);
+ if (ret_val == -1)
+ return ret_val;
+ cvmx_cfg_port[xi.node][xi.interface][port].ccpp_pko_port_base = ret_val;
+ cvmx_cfg_port[xi.node][xi.interface][port].ccpp_pko_num_ports = count;
+ return 0;
+}
+
+/*
+ * Return the internal ports base
+ *
+ * @param port the port for which the queues are returned
+ *
+ * @return 0 on success
+ * -1 on failure
+ */
+int cvmx_pko_internal_ports_free(int xiface, int port)
+{
+ struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+ int ret_val = -1;
+
+ __cvmx_pko_internal_ports_range_init();
+ ret_val = cvmx_free_global_resource_range_with_base(
+ CVMX_GR_TAG_PKO_IPORTS,
+ cvmx_cfg_port[xi.node][xi.interface][port].ccpp_pko_port_base,
+ cvmx_cfg_port[xi.node][xi.interface][port].ccpp_pko_num_ports);
+ if (ret_val != 0)
+ return ret_val;
+ cvmx_cfg_port[xi.node][xi.interface][port].ccpp_pko_port_base =
+ CVMX_HELPER_CFG_INVALID_VALUE;
+ cvmx_cfg_port[xi.node][xi.interface][port].ccpp_pko_num_ports =
+ CVMX_HELPER_CFG_INVALID_VALUE;
+
+ return 0;
+}
+
+void cvmx_pko_internal_ports_range_free_all(void)
+{
+ int interface, port;
+
+ __cvmx_pko_internal_ports_range_init();
+ for (interface = 0; interface < CVMX_HELPER_MAX_IFACE; interface++)
+ for (port = 0; port < CVMX_HELPER_CFG_MAX_PORT_PER_IFACE;
+ port++) {
+ if (cvmx_cfg_port[0][interface][port]
+ .ccpp_pko_port_base !=
+ CVMX_HELPER_CFG_INVALID_VALUE)
+ cvmx_pko_internal_ports_free(interface, port);
+ }
+ //cvmx_range_show(pko_internal_ports_range);
+}
+
+void cvmx_pko_internal_ports_range_show(void)
+{
+ int interface, port;
+
+ __cvmx_pko_internal_ports_range_init();
+ cvmx_show_global_resource_range(CVMX_GR_TAG_PKO_IPORTS);
+ for (interface = 0; interface < CVMX_HELPER_MAX_IFACE; interface++)
+ for (port = 0; port < CVMX_HELPER_CFG_MAX_PORT_PER_IFACE;
+ port++) {
+ if (cvmx_cfg_port[0][interface][port]
+ .ccpp_pko_port_base !=
+ CVMX_HELPER_CFG_INVALID_VALUE)
+ debug("interface=%d port=%d port_base=%d port_cnt=%d\n",
+ interface, port,
+ (int)cvmx_cfg_port[0][interface][port]
+ .ccpp_pko_port_base,
+ (int)cvmx_cfg_port[0][interface][port]
+ .ccpp_pko_num_ports);
+ }
+}
--
2.35.1
^ permalink raw reply related [flat|nested] 52+ messages in thread* [PATCH 40/52] mips: octeon: Add cvmx-qlm-tables.c
2022-03-30 10:06 [PATCH 00/52] mips: octeon: Add ethernet support Stefan Roese
` (34 preceding siblings ...)
2022-03-30 10:07 ` [PATCH 39/52] mips: octeon: Add cvmx-pko-internal-ports-range.c Stefan Roese
@ 2022-03-30 10:07 ` Stefan Roese
2022-03-30 10:07 ` [PATCH 41/52] mips: octeon: Add cvmx-range.c Stefan Roese
` (13 subsequent siblings)
49 siblings, 0 replies; 52+ messages in thread
From: Stefan Roese @ 2022-03-30 10:07 UTC (permalink / raw)
To: u-boot; +Cc: awilliams, cchavva
From: Aaron Williams <awilliams@marvell.com>
Import cvmx-qlm-tables.c from 2013 U-Boot. It will be used by the later
added drivers to support networking on the MIPS Octeon II / III
platforms.
Signed-off-by: Aaron Williams <awilliams@marvell.com>
Signed-off-by: Stefan Roese <sr@denx.de>
---
arch/mips/mach-octeon/cvmx-qlm-tables.c | 292 ++++++++++++++++++++++++
1 file changed, 292 insertions(+)
create mode 100644 arch/mips/mach-octeon/cvmx-qlm-tables.c
diff --git a/arch/mips/mach-octeon/cvmx-qlm-tables.c b/arch/mips/mach-octeon/cvmx-qlm-tables.c
new file mode 100644
index 000000000000..ca2289225d37
--- /dev/null
+++ b/arch/mips/mach-octeon/cvmx-qlm-tables.c
@@ -0,0 +1,292 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ */
+
+#include <mach/cvmx-regs.h>
+#include <mach/octeon-model.h>
+#include <mach/cvmx-fuse.h>
+#include <mach/octeon-feature.h>
+#include <mach/cvmx-qlm.h>
+#include <mach/octeon_qlm.h>
+
+const __cvmx_qlm_jtag_field_t __cvmx_qlm_jtag_field_cn63xx[] = {
+ { "prbs_err_cnt", 299, 252 }, // prbs_err_cnt[47..0]
+ { "prbs_lock", 251, 251 }, // prbs_lock
+ { "jtg_prbs_rst_n", 250, 250 }, // jtg_prbs_rst_n
+ { "jtg_run_prbs31", 249, 249 }, // jtg_run_prbs31
+ { "jtg_run_prbs7", 248, 248 }, // jtg_run_prbs7
+ { "Unused1", 247, 245 }, // 0
+ { "cfg_pwrup_set", 244, 244 }, // cfg_pwrup_set
+ { "cfg_pwrup_clr", 243, 243 }, // cfg_pwrup_clr
+ { "cfg_rst_n_set", 242, 242 }, // cfg_rst_n_set
+ { "cfg_rst_n_clr", 241, 241 }, // cfg_rst_n_clr
+ { "cfg_tx_idle_set", 240, 240 }, // cfg_tx_idle_set
+ { "cfg_tx_idle_clr", 239, 239 }, // cfg_tx_idle_clr
+ { "cfg_tx_byp", 238, 238 }, // cfg_tx_byp
+ { "cfg_tx_byp_inv", 237, 237 }, // cfg_tx_byp_inv
+ { "cfg_tx_byp_val", 236, 227 }, // cfg_tx_byp_val[9..0]
+ { "cfg_loopback", 226, 226 }, // cfg_loopback
+ { "shlpbck", 225, 224 }, // shlpbck[1..0]
+ { "sl_enable", 223, 223 }, // sl_enable
+ { "sl_posedge_sample", 222, 222 }, // sl_posedge_sample
+ { "trimen", 221, 220 }, // trimen[1..0]
+ { "serdes_tx_byp", 219, 219 }, // serdes_tx_byp
+ { "serdes_pll_byp", 218, 218 }, // serdes_pll_byp
+ { "lowf_byp", 217, 217 }, // lowf_byp
+ { "spdsel_byp", 216, 216 }, // spdsel_byp
+ { "div4_byp", 215, 215 }, // div4_byp
+ { "clkf_byp", 214, 208 }, // clkf_byp[6..0]
+ { "Unused2", 207, 206 }, // 0
+ { "biasdrv_hs_ls_byp", 205, 201 }, // biasdrv_hs_ls_byp[4..0]
+ { "tcoeff_hf_ls_byp", 200, 197 }, // tcoeff_hf_ls_byp[3..0]
+ { "biasdrv_hf_byp", 196, 192 }, // biasdrv_hf_byp[4..0]
+ { "tcoeff_hf_byp", 191, 188 }, // tcoeff_hf_byp[3..0]
+ { "Unused3", 187, 186 }, // 0
+ { "biasdrv_lf_ls_byp", 185, 181 }, // biasdrv_lf_ls_byp[4..0]
+ { "tcoeff_lf_ls_byp", 180, 177 }, // tcoeff_lf_ls_byp[3..0]
+ { "biasdrv_lf_byp", 176, 172 }, // biasdrv_lf_byp[4..0]
+ { "tcoeff_lf_byp", 171, 168 }, // tcoeff_lf_byp[3..0]
+ { "Unused4", 167, 167 }, // 0
+ { "interpbw", 166, 162 }, // interpbw[4..0]
+ { "pll_cpb", 161, 159 }, // pll_cpb[2..0]
+ { "pll_cps", 158, 156 }, // pll_cps[2..0]
+ { "pll_diffamp", 155, 152 }, // pll_diffamp[3..0]
+ { "Unused5", 151, 150 }, // 0
+ { "cfg_rx_idle_set", 149, 149 }, // cfg_rx_idle_set
+ { "cfg_rx_idle_clr", 148, 148 }, // cfg_rx_idle_clr
+ { "cfg_rx_idle_thr", 147, 144 }, // cfg_rx_idle_thr[3..0]
+ { "cfg_com_thr", 143, 140 }, // cfg_com_thr[3..0]
+ { "cfg_rx_offset", 139, 136 }, // cfg_rx_offset[3..0]
+ { "cfg_skp_max", 135, 132 }, // cfg_skp_max[3..0]
+ { "cfg_skp_min", 131, 128 }, // cfg_skp_min[3..0]
+ { "cfg_fast_pwrup", 127, 127 }, // cfg_fast_pwrup
+ { "Unused6", 126, 100 }, // 0
+ { "detected_n", 99, 99 }, // detected_n
+ { "detected_p", 98, 98 }, // detected_p
+ { "dbg_res_rx", 97, 94 }, // dbg_res_rx[3..0]
+ { "dbg_res_tx", 93, 90 }, // dbg_res_tx[3..0]
+ { "cfg_tx_pol_set", 89, 89 }, // cfg_tx_pol_set
+ { "cfg_tx_pol_clr", 88, 88 }, // cfg_tx_pol_clr
+ { "cfg_rx_pol_set", 87, 87 }, // cfg_rx_pol_set
+ { "cfg_rx_pol_clr", 86, 86 }, // cfg_rx_pol_clr
+ { "cfg_rxd_set", 85, 85 }, // cfg_rxd_set
+ { "cfg_rxd_clr", 84, 84 }, // cfg_rxd_clr
+ { "cfg_rxd_wait", 83, 80 }, // cfg_rxd_wait[3..0]
+ { "cfg_cdr_limit", 79, 79 }, // cfg_cdr_limit
+ { "cfg_cdr_rotate", 78, 78 }, // cfg_cdr_rotate
+ { "cfg_cdr_bw_ctl", 77, 76 }, // cfg_cdr_bw_ctl[1..0]
+ { "cfg_cdr_trunc", 75, 74 }, // cfg_cdr_trunc[1..0]
+ { "cfg_cdr_rqoffs", 73, 64 }, // cfg_cdr_rqoffs[9..0]
+ { "cfg_cdr_inc2", 63, 58 }, // cfg_cdr_inc2[5..0]
+ { "cfg_cdr_inc1", 57, 52 }, // cfg_cdr_inc1[5..0]
+ { "fusopt_voter_sync", 51, 51 }, // fusopt_voter_sync
+ { "rndt", 50, 50 }, // rndt
+ { "hcya", 49, 49 }, // hcya
+ { "hyst", 48, 48 }, // hyst
+ { "idle_dac", 47, 45 }, // idle_dac[2..0]
+ { "bg_ref_sel", 44, 44 }, // bg_ref_sel
+ { "ic50dac", 43, 39 }, // ic50dac[4..0]
+ { "ir50dac", 38, 34 }, // ir50dac[4..0]
+ { "tx_rout_comp_bypass", 33, 33 }, // tx_rout_comp_bypass
+ { "tx_rout_comp_value", 32, 29 }, // tx_rout_comp_value[3..0]
+ { "tx_res_offset", 28, 25 }, // tx_res_offset[3..0]
+ { "rx_rout_comp_bypass", 24, 24 }, // rx_rout_comp_bypass
+ { "rx_rout_comp_value", 23, 20 }, // rx_rout_comp_value[3..0]
+ { "rx_res_offset", 19, 16 }, // rx_res_offset[3..0]
+ { "rx_cap_gen2", 15, 12 }, // rx_cap_gen2[3..0]
+ { "rx_eq_gen2", 11, 8 }, // rx_eq_gen2[3..0]
+ { "rx_cap_gen1", 7, 4 }, // rx_cap_gen1[3..0]
+ { "rx_eq_gen1", 3, 0 }, // rx_eq_gen1[3..0]
+ { NULL, -1, -1 }
+};
+
+const __cvmx_qlm_jtag_field_t __cvmx_qlm_jtag_field_cn66xx[] = {
+ { "prbs_err_cnt", 303, 256 }, // prbs_err_cnt[47..0]
+ { "prbs_lock", 255, 255 }, // prbs_lock
+ { "jtg_prbs_rx_rst_n", 254, 254 }, // jtg_prbs_rx_rst_n
+ { "jtg_prbs_tx_rst_n", 253, 253 }, // jtg_prbs_tx_rst_n
+ { "jtg_prbs_mode", 252, 251 }, // jtg_prbs_mode[252:251]
+ { "jtg_prbs_rst_n", 250, 250 }, // jtg_prbs_rst_n
+ { "jtg_run_prbs31", 249,
+ 249 }, // jtg_run_prbs31 - Use jtg_prbs_mode instead
+ { "jtg_run_prbs7", 248,
+ 248 }, // jtg_run_prbs7 - Use jtg_prbs_mode instead
+ { "Unused1", 247, 246 }, // 0
+ { "div5_byp", 245, 245 }, // div5_byp
+ { "cfg_pwrup_set", 244, 244 }, // cfg_pwrup_set
+ { "cfg_pwrup_clr", 243, 243 }, // cfg_pwrup_clr
+ { "cfg_rst_n_set", 242, 242 }, // cfg_rst_n_set
+ { "cfg_rst_n_clr", 241, 241 }, // cfg_rst_n_clr
+ { "cfg_tx_idle_set", 240, 240 }, // cfg_tx_idle_set
+ { "cfg_tx_idle_clr", 239, 239 }, // cfg_tx_idle_clr
+ { "cfg_tx_byp", 238, 238 }, // cfg_tx_byp
+ { "cfg_tx_byp_inv", 237, 237 }, // cfg_tx_byp_inv
+ { "cfg_tx_byp_val", 236, 227 }, // cfg_tx_byp_val[9..0]
+ { "cfg_loopback", 226, 226 }, // cfg_loopback
+ { "shlpbck", 225, 224 }, // shlpbck[1..0]
+ { "sl_enable", 223, 223 }, // sl_enable
+ { "sl_posedge_sample", 222, 222 }, // sl_posedge_sample
+ { "trimen", 221, 220 }, // trimen[1..0]
+ { "serdes_tx_byp", 219, 219 }, // serdes_tx_byp
+ { "serdes_pll_byp", 218, 218 }, // serdes_pll_byp
+ { "lowf_byp", 217, 217 }, // lowf_byp
+ { "spdsel_byp", 216, 216 }, // spdsel_byp
+ { "div4_byp", 215, 215 }, // div4_byp
+ { "clkf_byp", 214, 208 }, // clkf_byp[6..0]
+ { "biasdrv_hs_ls_byp", 207, 203 }, // biasdrv_hs_ls_byp[4..0]
+ { "tcoeff_hf_ls_byp", 202, 198 }, // tcoeff_hf_ls_byp[4..0]
+ { "biasdrv_hf_byp", 197, 193 }, // biasdrv_hf_byp[4..0]
+ { "tcoeff_hf_byp", 192, 188 }, // tcoeff_hf_byp[4..0]
+ { "biasdrv_lf_ls_byp", 187, 183 }, // biasdrv_lf_ls_byp[4..0]
+ { "tcoeff_lf_ls_byp", 182, 178 }, // tcoeff_lf_ls_byp[4..0]
+ { "biasdrv_lf_byp", 177, 173 }, // biasdrv_lf_byp[4..0]
+ { "tcoeff_lf_byp", 172, 168 }, // tcoeff_lf_byp[4..0]
+ { "Unused4", 167, 167 }, // 0
+ { "interpbw", 166, 162 }, // interpbw[4..0]
+ { "pll_cpb", 161, 159 }, // pll_cpb[2..0]
+ { "pll_cps", 158, 156 }, // pll_cps[2..0]
+ { "pll_diffamp", 155, 152 }, // pll_diffamp[3..0]
+ { "cfg_err_thr", 151, 150 }, // cfg_err_thr
+ { "cfg_rx_idle_set", 149, 149 }, // cfg_rx_idle_set
+ { "cfg_rx_idle_clr", 148, 148 }, // cfg_rx_idle_clr
+ { "cfg_rx_idle_thr", 147, 144 }, // cfg_rx_idle_thr[3..0]
+ { "cfg_com_thr", 143, 140 }, // cfg_com_thr[3..0]
+ { "cfg_rx_offset", 139, 136 }, // cfg_rx_offset[3..0]
+ { "cfg_skp_max", 135, 132 }, // cfg_skp_max[3..0]
+ { "cfg_skp_min", 131, 128 }, // cfg_skp_min[3..0]
+ { "cfg_fast_pwrup", 127, 127 }, // cfg_fast_pwrup
+ { "Unused6", 126, 101 }, // 0
+ { "cfg_indep_dis", 100, 100 }, // cfg_indep_dis
+ { "detected_n", 99, 99 }, // detected_n
+ { "detected_p", 98, 98 }, // detected_p
+ { "dbg_res_rx", 97, 94 }, // dbg_res_rx[3..0]
+ { "dbg_res_tx", 93, 90 }, // dbg_res_tx[3..0]
+ { "cfg_tx_pol_set", 89, 89 }, // cfg_tx_pol_set
+ { "cfg_tx_pol_clr", 88, 88 }, // cfg_tx_pol_clr
+ { "cfg_rx_pol_set", 87, 87 }, // cfg_rx_pol_set
+ { "cfg_rx_pol_clr", 86, 86 }, // cfg_rx_pol_clr
+ { "cfg_rxd_set", 85, 85 }, // cfg_rxd_set
+ { "cfg_rxd_clr", 84, 84 }, // cfg_rxd_clr
+ { "cfg_rxd_wait", 83, 80 }, // cfg_rxd_wait[3..0]
+ { "cfg_cdr_limit", 79, 79 }, // cfg_cdr_limit
+ { "cfg_cdr_rotate", 78, 78 }, // cfg_cdr_rotate
+ { "cfg_cdr_bw_ctl", 77, 76 }, // cfg_cdr_bw_ctl[1..0]
+ { "cfg_cdr_trunc", 75, 74 }, // cfg_cdr_trunc[1..0]
+ { "cfg_cdr_rqoffs", 73, 64 }, // cfg_cdr_rqoffs[9..0]
+ { "cfg_cdr_inc2", 63, 58 }, // cfg_cdr_inc2[5..0]
+ { "cfg_cdr_inc1", 57, 52 }, // cfg_cdr_inc1[5..0]
+ { "fusopt_voter_sync", 51, 51 }, // fusopt_voter_sync
+ { "rndt", 50, 50 }, // rndt
+ { "hcya", 49, 49 }, // hcya
+ { "hyst", 48, 48 }, // hyst
+ { "idle_dac", 47, 45 }, // idle_dac[2..0]
+ { "bg_ref_sel", 44, 44 }, // bg_ref_sel
+ { "ic50dac", 43, 39 }, // ic50dac[4..0]
+ { "ir50dac", 38, 34 }, // ir50dac[4..0]
+ { "tx_rout_comp_bypass", 33, 33 }, // tx_rout_comp_bypass
+ { "tx_rout_comp_value", 32, 29 }, // tx_rout_comp_value[3..0]
+ { "tx_res_offset", 28, 25 }, // tx_res_offset[3..0]
+ { "rx_rout_comp_bypass", 24, 24 }, // rx_rout_comp_bypass
+ { "rx_rout_comp_value", 23, 20 }, // rx_rout_comp_value[3..0]
+ { "rx_res_offset", 19, 16 }, // rx_res_offset[3..0]
+ { "rx_cap_gen2", 15, 12 }, // rx_cap_gen2[3..0]
+ { "rx_eq_gen2", 11, 8 }, // rx_eq_gen2[3..0]
+ { "rx_cap_gen1", 7, 4 }, // rx_cap_gen1[3..0]
+ { "rx_eq_gen1", 3, 0 }, // rx_eq_gen1[3..0]
+ { NULL, -1, -1 }
+};
+
+const __cvmx_qlm_jtag_field_t __cvmx_qlm_jtag_field_cn68xx[] = {
+ { "prbs_err_cnt", 303, 256 }, // prbs_err_cnt[47..0]
+ { "prbs_lock", 255, 255 }, // prbs_lock
+ { "jtg_prbs_rx_rst_n", 254, 254 }, // jtg_prbs_rx_rst_n
+ { "jtg_prbs_tx_rst_n", 253, 253 }, // jtg_prbs_tx_rst_n
+ { "jtg_prbs_mode", 252, 251 }, // jtg_prbs_mode[252:251]
+ { "jtg_prbs_rst_n", 250, 250 }, // jtg_prbs_rst_n
+ { "jtg_run_prbs31", 249,
+ 249 }, // jtg_run_prbs31 - Use jtg_prbs_mode instead
+ { "jtg_run_prbs7", 248,
+ 248 }, // jtg_run_prbs7 - Use jtg_prbs_mode instead
+ { "Unused1", 247, 245 }, // 0
+ { "cfg_pwrup_set", 244, 244 }, // cfg_pwrup_set
+ { "cfg_pwrup_clr", 243, 243 }, // cfg_pwrup_clr
+ { "cfg_rst_n_set", 242, 242 }, // cfg_rst_n_set
+ { "cfg_rst_n_clr", 241, 241 }, // cfg_rst_n_clr
+ { "cfg_tx_idle_set", 240, 240 }, // cfg_tx_idle_set
+ { "cfg_tx_idle_clr", 239, 239 }, // cfg_tx_idle_clr
+ { "cfg_tx_byp", 238, 238 }, // cfg_tx_byp
+ { "cfg_tx_byp_inv", 237, 237 }, // cfg_tx_byp_inv
+ { "cfg_tx_byp_val", 236, 227 }, // cfg_tx_byp_val[9..0]
+ { "cfg_loopback", 226, 226 }, // cfg_loopback
+ { "shlpbck", 225, 224 }, // shlpbck[1..0]
+ { "sl_enable", 223, 223 }, // sl_enable
+ { "sl_posedge_sample", 222, 222 }, // sl_posedge_sample
+ { "trimen", 221, 220 }, // trimen[1..0]
+ { "serdes_tx_byp", 219, 219 }, // serdes_tx_byp
+ { "serdes_pll_byp", 218, 218 }, // serdes_pll_byp
+ { "lowf_byp", 217, 217 }, // lowf_byp
+ { "spdsel_byp", 216, 216 }, // spdsel_byp
+ { "div4_byp", 215, 215 }, // div4_byp
+ { "clkf_byp", 214, 208 }, // clkf_byp[6..0]
+ { "biasdrv_hs_ls_byp", 207, 203 }, // biasdrv_hs_ls_byp[4..0]
+ { "tcoeff_hf_ls_byp", 202, 198 }, // tcoeff_hf_ls_byp[4..0]
+ { "biasdrv_hf_byp", 197, 193 }, // biasdrv_hf_byp[4..0]
+ { "tcoeff_hf_byp", 192, 188 }, // tcoeff_hf_byp[4..0]
+ { "biasdrv_lf_ls_byp", 187, 183 }, // biasdrv_lf_ls_byp[4..0]
+ { "tcoeff_lf_ls_byp", 182, 178 }, // tcoeff_lf_ls_byp[4..0]
+ { "biasdrv_lf_byp", 177, 173 }, // biasdrv_lf_byp[4..0]
+ { "tcoeff_lf_byp", 172, 168 }, // tcoeff_lf_byp[4..0]
+ { "Unused4", 167, 167 }, // 0
+ { "interpbw", 166, 162 }, // interpbw[4..0]
+ { "pll_cpb", 161, 159 }, // pll_cpb[2..0]
+ { "pll_cps", 158, 156 }, // pll_cps[2..0]
+ { "pll_diffamp", 155, 152 }, // pll_diffamp[3..0]
+ { "cfg_err_thr", 151, 150 }, // cfg_err_thr
+ { "cfg_rx_idle_set", 149, 149 }, // cfg_rx_idle_set
+ { "cfg_rx_idle_clr", 148, 148 }, // cfg_rx_idle_clr
+ { "cfg_rx_idle_thr", 147, 144 }, // cfg_rx_idle_thr[3..0]
+ { "cfg_com_thr", 143, 140 }, // cfg_com_thr[3..0]
+ { "cfg_rx_offset", 139, 136 }, // cfg_rx_offset[3..0]
+ { "cfg_skp_max", 135, 132 }, // cfg_skp_max[3..0]
+ { "cfg_skp_min", 131, 128 }, // cfg_skp_min[3..0]
+ { "cfg_fast_pwrup", 127, 127 }, // cfg_fast_pwrup
+ { "Unused6", 126, 100 }, // 0
+ { "detected_n", 99, 99 }, // detected_n
+ { "detected_p", 98, 98 }, // detected_p
+ { "dbg_res_rx", 97, 94 }, // dbg_res_rx[3..0]
+ { "dbg_res_tx", 93, 90 }, // dbg_res_tx[3..0]
+ { "cfg_tx_pol_set", 89, 89 }, // cfg_tx_pol_set
+ { "cfg_tx_pol_clr", 88, 88 }, // cfg_tx_pol_clr
+ { "cfg_rx_pol_set", 87, 87 }, // cfg_rx_pol_set
+ { "cfg_rx_pol_clr", 86, 86 }, // cfg_rx_pol_clr
+ { "cfg_rxd_set", 85, 85 }, // cfg_rxd_set
+ { "cfg_rxd_clr", 84, 84 }, // cfg_rxd_clr
+ { "cfg_rxd_wait", 83, 80 }, // cfg_rxd_wait[3..0]
+ { "cfg_cdr_limit", 79, 79 }, // cfg_cdr_limit
+ { "cfg_cdr_rotate", 78, 78 }, // cfg_cdr_rotate
+ { "cfg_cdr_bw_ctl", 77, 76 }, // cfg_cdr_bw_ctl[1..0]
+ { "cfg_cdr_trunc", 75, 74 }, // cfg_cdr_trunc[1..0]
+ { "cfg_cdr_rqoffs", 73, 64 }, // cfg_cdr_rqoffs[9..0]
+ { "cfg_cdr_inc2", 63, 58 }, // cfg_cdr_inc2[5..0]
+ { "cfg_cdr_inc1", 57, 52 }, // cfg_cdr_inc1[5..0]
+ { "fusopt_voter_sync", 51, 51 }, // fusopt_voter_sync
+ { "rndt", 50, 50 }, // rndt
+ { "hcya", 49, 49 }, // hcya
+ { "hyst", 48, 48 }, // hyst
+ { "idle_dac", 47, 45 }, // idle_dac[2..0]
+ { "bg_ref_sel", 44, 44 }, // bg_ref_sel
+ { "ic50dac", 43, 39 }, // ic50dac[4..0]
+ { "ir50dac", 38, 34 }, // ir50dac[4..0]
+ { "tx_rout_comp_bypass", 33, 33 }, // tx_rout_comp_bypass
+ { "tx_rout_comp_value", 32, 29 }, // tx_rout_comp_value[3..0]
+ { "tx_res_offset", 28, 25 }, // tx_res_offset[3..0]
+ { "rx_rout_comp_bypass", 24, 24 }, // rx_rout_comp_bypass
+ { "rx_rout_comp_value", 23, 20 }, // rx_rout_comp_value[3..0]
+ { "rx_res_offset", 19, 16 }, // rx_res_offset[3..0]
+ { "rx_cap_gen2", 15, 12 }, // rx_cap_gen2[3..0]
+ { "rx_eq_gen2", 11, 8 }, // rx_eq_gen2[3..0]
+ { "rx_cap_gen1", 7, 4 }, // rx_cap_gen1[3..0]
+ { "rx_eq_gen1", 3, 0 }, // rx_eq_gen1[3..0]
+ { NULL, -1, -1 }
+};
--
2.35.1
^ permalink raw reply related [flat|nested] 52+ messages in thread* [PATCH 41/52] mips: octeon: Add cvmx-range.c
2022-03-30 10:06 [PATCH 00/52] mips: octeon: Add ethernet support Stefan Roese
` (35 preceding siblings ...)
2022-03-30 10:07 ` [PATCH 40/52] mips: octeon: Add cvmx-qlm-tables.c Stefan Roese
@ 2022-03-30 10:07 ` Stefan Roese
2022-03-30 10:07 ` [PATCH 42/52] mips: octeon: Misc changes to existing C files for upcoming eth support Stefan Roese
` (12 subsequent siblings)
49 siblings, 0 replies; 52+ messages in thread
From: Stefan Roese @ 2022-03-30 10:07 UTC (permalink / raw)
To: u-boot; +Cc: awilliams, cchavva
From: Aaron Williams <awilliams@marvell.com>
Import cvmx-range.c from 2013 U-Boot. It will be used by the later
added drivers to support networking on the MIPS Octeon II / III
platforms.
Signed-off-by: Aaron Williams <awilliams@marvell.com>
Signed-off-by: Stefan Roese <sr@denx.de>
---
arch/mips/mach-octeon/cvmx-range.c | 344 +++++++++++++++++++++++++++++
1 file changed, 344 insertions(+)
create mode 100644 arch/mips/mach-octeon/cvmx-range.c
diff --git a/arch/mips/mach-octeon/cvmx-range.c b/arch/mips/mach-octeon/cvmx-range.c
new file mode 100644
index 000000000000..33dd95e7ab1a
--- /dev/null
+++ b/arch/mips/mach-octeon/cvmx-range.c
@@ -0,0 +1,344 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ */
+
+#include <log.h>
+#include <time.h>
+#include <linux/delay.h>
+
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-csr.h>
+#include <mach/cvmx-bootmem.h>
+#include <mach/octeon-model.h>
+#include <mach/cvmx-fuse.h>
+#include <mach/octeon-feature.h>
+#include <mach/cvmx-qlm.h>
+#include <mach/octeon_qlm.h>
+#include <mach/cvmx-pcie.h>
+#include <mach/cvmx-coremask.h>
+
+#include <mach/cvmx-global-resources.h>
+
+#include <mach/cvmx-pki.h>
+#include <mach/cvmx-helper.h>
+#include <mach/cvmx-helper-board.h>
+#include <mach/cvmx-helper-cfg.h>
+
+#include <mach/cvmx-range.h>
+
+#define CVMX_RANGE_AVAILABLE ((u64)-88)
+#define addr_of_element(base, index) \
+ (1ull << 63 | ((base) + sizeof(u64) + (index) * sizeof(u64)))
+#define addr_of_size(base) (1ull << 63 | (base))
+
+static const int debug;
+
+int cvmx_range_memory_size(int nelements)
+{
+ return sizeof(u64) * (nelements + 1);
+}
+
+int cvmx_range_init(u64 range_addr, int size)
+{
+ u64 lsize = size;
+ u64 i;
+
+ cvmx_write64_uint64(addr_of_size(range_addr), lsize);
+ for (i = 0; i < lsize; i++) {
+ cvmx_write64_uint64(addr_of_element(range_addr, i),
+ CVMX_RANGE_AVAILABLE);
+ }
+ return 0;
+}
+
+static int64_t cvmx_range_find_next_available(u64 range_addr, u64 index,
+ int align)
+{
+ u64 size = cvmx_read64_uint64(addr_of_size(range_addr));
+ u64 i;
+
+ while ((index % align) != 0)
+ index++;
+
+ for (i = index; i < size; i += align) {
+ u64 r_owner = cvmx_read64_uint64(addr_of_element(range_addr, i));
+
+ if (debug)
+ debug("%s: index=%d owner=%llx\n", __func__, (int)i,
+ (unsigned long long)r_owner);
+ if (r_owner == CVMX_RANGE_AVAILABLE)
+ return i;
+ }
+ return -1;
+}
+
+static int64_t cvmx_range_find_last_available(u64 range_addr, u64 index,
+ u64 align)
+{
+ u64 size = cvmx_read64_uint64(addr_of_size(range_addr));
+ u64 i;
+
+ if (index == 0)
+ index = size - 1;
+
+ while ((index % align) != 0)
+ index++;
+
+ for (i = index; i > align; i -= align) {
+ u64 r_owner = cvmx_read64_uint64(addr_of_element(range_addr, i));
+
+ if (debug)
+ debug("%s: index=%d owner=%llx\n", __func__, (int)i,
+ (unsigned long long)r_owner);
+ if (r_owner == CVMX_RANGE_AVAILABLE)
+ return i;
+ }
+ return -1;
+}
+
+int cvmx_range_alloc_ordered(u64 range_addr, u64 owner, u64 cnt,
+ int align, int reverse)
+{
+ u64 i = 0, size;
+ s64 first_available;
+
+ if (debug)
+ debug("%s: range_addr=%llx owner=%llx cnt=%d\n", __func__,
+ (unsigned long long)range_addr,
+ (unsigned long long)owner, (int)cnt);
+
+ size = cvmx_read64_uint64(addr_of_size(range_addr));
+ while (i < size) {
+ u64 available_cnt = 0;
+
+ if (reverse)
+ first_available = cvmx_range_find_last_available(range_addr, i, align);
+ else
+ first_available = cvmx_range_find_next_available(range_addr, i, align);
+ if (first_available == -1)
+ return -1;
+ i = first_available;
+
+ if (debug)
+ debug("%s: first_available=%d\n", __func__, (int)first_available);
+ while ((available_cnt != cnt) && (i < size)) {
+ u64 r_owner = cvmx_read64_uint64(addr_of_element(range_addr, i));
+
+ if (r_owner == CVMX_RANGE_AVAILABLE)
+ available_cnt++;
+ i++;
+ }
+ if (available_cnt == cnt) {
+ u64 j;
+
+ if (debug)
+ debug("%s: first_available=%d available=%d\n",
+ __func__,
+ (int)first_available, (int)available_cnt);
+
+ for (j = first_available; j < first_available + cnt;
+ j++) {
+ u64 a = addr_of_element(range_addr, j);
+
+ cvmx_write64_uint64(a, owner);
+ }
+ return first_available;
+ }
+ }
+
+ if (debug) {
+ debug("ERROR: %s: failed to allocate range cnt=%d\n",
+ __func__, (int)cnt);
+ cvmx_range_show(range_addr);
+ }
+
+ return -1;
+}
+
+int cvmx_range_alloc(u64 range_addr, u64 owner, u64 cnt, int align)
+{
+ return cvmx_range_alloc_ordered(range_addr, owner, cnt, align, 0);
+}
+
+int cvmx_range_alloc_non_contiguos(u64 range_addr, u64 owner,
+ u64 cnt, int elements[])
+{
+ u64 i = 0, size;
+ u64 element_index = 0;
+
+ size = cvmx_read64_uint64(addr_of_size(range_addr));
+ for (i = 0; i < size; i++) {
+ u64 r_owner = cvmx_read64_uint64(addr_of_element(range_addr, i));
+
+ if (debug)
+ debug("%s: index=%d owner=%llx\n", __func__, (int)i,
+ (unsigned long long)r_owner);
+ if (r_owner == CVMX_RANGE_AVAILABLE)
+ elements[element_index++] = (int)i;
+
+ if (element_index == cnt)
+ break;
+ }
+ if (element_index != cnt) {
+ if (debug)
+ debug("%s: failed to allocate non contiguous cnt=%d available=%d\n",
+ __func__, (int)cnt, (int)element_index);
+ return -1;
+ }
+ for (i = 0; i < cnt; i++) {
+ u64 a = addr_of_element(range_addr, elements[i]);
+
+ cvmx_write64_uint64(a, owner);
+ }
+ return 0;
+}
+
+int cvmx_range_reserve(u64 range_addr, u64 owner, u64 base,
+ u64 cnt)
+{
+ u64 i, size, r_owner;
+ u64 up = base + cnt;
+
+ size = cvmx_read64_uint64(addr_of_size(range_addr));
+ if (up > size) {
+ debug("ERROR: %s: invalid base or cnt. range_addr=0x%llx, owner=0x%llx, size=%d base+cnt=%d\n",
+ __func__, (unsigned long long)range_addr,
+ (unsigned long long)owner,
+ (int)size, (int)up);
+ return -1;
+ }
+ for (i = base; i < up; i++) {
+ r_owner = cvmx_read64_uint64(addr_of_element(range_addr, i));
+ if (debug)
+ debug("%s: %d: %llx\n",
+ __func__, (int)i, (unsigned long long)r_owner);
+ if (r_owner != CVMX_RANGE_AVAILABLE) {
+ if (debug) {
+ debug("%s: resource already reserved base+cnt=%d %llu %llu %llx %llx %llx\n",
+ __func__, (int)i, (unsigned long long)cnt,
+ (unsigned long long)base,
+ (unsigned long long)r_owner,
+ (unsigned long long)range_addr,
+ (unsigned long long)owner);
+ }
+ return -1;
+ }
+ }
+ for (i = base; i < up; i++)
+ cvmx_write64_uint64(addr_of_element(range_addr, i), owner);
+ return base;
+}
+
+int cvmx_range_free_with_owner(u64 range_addr, u64 owner)
+{
+ u64 i, size;
+ int found = -1;
+
+ size = cvmx_read64_uint64(addr_of_size(range_addr));
+ for (i = 0; i < size; i++) {
+ u64 r_owner = cvmx_read64_uint64(addr_of_element(range_addr, i));
+
+ if (r_owner == owner) {
+ cvmx_write64_uint64(addr_of_element(range_addr, i),
+ CVMX_RANGE_AVAILABLE);
+ found = 0;
+ }
+ }
+ return found;
+}
+
+int __cvmx_range_is_allocated(u64 range_addr, int bases[], int count)
+{
+ u64 i, cnt, size;
+ u64 r_owner;
+
+ cnt = count;
+ size = cvmx_read64_uint64(addr_of_size(range_addr));
+ for (i = 0; i < cnt; i++) {
+ u64 base = bases[i];
+
+ if (base >= size) {
+ debug("ERROR: %s: invalid base or cnt size=%d base=%d\n",
+ __func__, (int)size, (int)base);
+ return 0;
+ }
+ r_owner = cvmx_read64_uint64(addr_of_element(range_addr, base));
+ if (r_owner == CVMX_RANGE_AVAILABLE) {
+ if (debug) {
+ debug("%s: i=%d:base=%d is available\n",
+ __func__, (int)i, (int)base);
+ }
+ return 0;
+ }
+ }
+ return 1;
+}
+
+int cvmx_range_free_mutiple(u64 range_addr, int bases[], int count)
+{
+ u64 i, cnt;
+
+ cnt = count;
+ if (__cvmx_range_is_allocated(range_addr, bases, count) != 1)
+ return -1;
+ for (i = 0; i < cnt; i++) {
+ u64 base = bases[i];
+
+ cvmx_write64_uint64(addr_of_element(range_addr, base),
+ CVMX_RANGE_AVAILABLE);
+ }
+ return 0;
+}
+
+int cvmx_range_free_with_base(u64 range_addr, int base, int cnt)
+{
+ u64 i, size;
+ u64 up = base + cnt;
+
+ size = cvmx_read64_uint64(addr_of_size(range_addr));
+ if (up > size) {
+ debug("ERROR: %s: invalid base or cnt size=%d base+cnt=%d\n",
+ __func__, (int)size, (int)up);
+ return -1;
+ }
+ for (i = base; i < up; i++) {
+ cvmx_write64_uint64(addr_of_element(range_addr, i),
+ CVMX_RANGE_AVAILABLE);
+ }
+ return 0;
+}
+
+u64 cvmx_range_get_owner(u64 range_addr, u64 base)
+{
+ u64 size = cvmx_read64_uint64(addr_of_size(range_addr));
+
+ if (base >= size) {
+ debug("ERROR: %s: invalid base or cnt size=%d base=%d\n",
+ __func__, (int)size, (int)base);
+ return 0;
+ }
+ return cvmx_read64_uint64(addr_of_element(range_addr, base));
+}
+
+void cvmx_range_show(u64 range_addr)
+{
+ u64 pval, val, size, pindex, i;
+
+ size = cvmx_read64_uint64(addr_of_size(range_addr));
+ pval = cvmx_read64_uint64(addr_of_element(range_addr, 0));
+ pindex = 0;
+
+ debug("index=%d: owner %llx\n", (int)pindex, CAST_ULL(pval));
+
+ for (i = 1; i < size; i++) {
+ val = cvmx_read64_uint64(addr_of_element(range_addr, i));
+ if (val != pval) {
+ debug("index=%d: owner %llx\n", (int)pindex,
+ CAST_ULL(pval));
+ pindex = i;
+ pval = val;
+ }
+ }
+ debug("index=%d: owner %llx\n", (int)pindex, CAST_ULL(pval));
+}
--
2.35.1
^ permalink raw reply related [flat|nested] 52+ messages in thread* [PATCH 42/52] mips: octeon: Misc changes to existing C files for upcoming eth support
2022-03-30 10:06 [PATCH 00/52] mips: octeon: Add ethernet support Stefan Roese
` (36 preceding siblings ...)
2022-03-30 10:07 ` [PATCH 41/52] mips: octeon: Add cvmx-range.c Stefan Roese
@ 2022-03-30 10:07 ` Stefan Roese
2022-03-30 10:07 ` [PATCH 43/52] mips: octeon: Makefile: Enable building of the newly added C files Stefan Roese
` (11 subsequent siblings)
49 siblings, 0 replies; 52+ messages in thread
From: Stefan Roese @ 2022-03-30 10:07 UTC (permalink / raw)
To: u-boot; +Cc: awilliams, cchavva
This patch includes misc changes to already present Octeon MIPS C files
files, which are necessary for the upcoming ethernet support.
The changes are mostly:
- DM GPIO & I2C infrastructure
- Coding style cleanup while reworking of the code
Signed-off-by: Stefan Roese <sr@denx.de>
---
arch/mips/mach-octeon/cvmx-bootmem.c | 3 +-
arch/mips/mach-octeon/cvmx-helper-cfg.c | 67 +--
arch/mips/mach-octeon/cvmx-helper-fdt.c | 645 +++++++++---------------
arch/mips/mach-octeon/cvmx-helper.c | 45 +-
4 files changed, 279 insertions(+), 481 deletions(-)
diff --git a/arch/mips/mach-octeon/cvmx-bootmem.c b/arch/mips/mach-octeon/cvmx-bootmem.c
index 9bd644d68bd8..52e58b4c1761 100644
--- a/arch/mips/mach-octeon/cvmx-bootmem.c
+++ b/arch/mips/mach-octeon/cvmx-bootmem.c
@@ -1189,7 +1189,7 @@ s64 cvmx_bootmem_phy_mem_list_init(u64 mem_size,
if (mem_size > OCTEON_DDR1_SIZE) {
__cvmx_bootmem_phy_free(OCTEON_DDR1_BASE, OCTEON_DDR1_SIZE, 0);
__cvmx_bootmem_phy_free(OCTEON_DDR2_BASE,
- mem_size - OCTEON_DDR1_SIZE, 0);
+ mem_size - OCTEON_DDR2_BASE, 0);
} else {
__cvmx_bootmem_phy_free(OCTEON_DDR1_BASE, mem_size, 0);
}
@@ -1349,7 +1349,6 @@ s64 cvmx_bootmem_phy_mem_list_init_multi(u8 node_mask,
addr += sizeof(struct cvmx_bootmem_named_block_desc);
}
- // test-only: DEBUG ifdef???
cvmx_bootmem_phy_list_print();
return 1;
diff --git a/arch/mips/mach-octeon/cvmx-helper-cfg.c b/arch/mips/mach-octeon/cvmx-helper-cfg.c
index 494108f0cdb7..c0b17c3914c3 100644
--- a/arch/mips/mach-octeon/cvmx-helper-cfg.c
+++ b/arch/mips/mach-octeon/cvmx-helper-cfg.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (C) 2020 Marvell International Ltd.
+ * Copyright (C) 2020-2022 Marvell International Ltd.
*
* Helper Functions for the Configuration Framework
*/
@@ -100,7 +100,6 @@ static u64 cvmx_cfg_opts[CVMX_HELPER_CFG_OPT_MAX] = {
static int cvmx_cfg_max_pko_engines; /* # of PKO DMA engines allocated */
static int cvmx_pko_queue_alloc(u64 port, int count);
static void cvmx_init_port_cfg(void);
-static const int dbg;
int __cvmx_helper_cfg_pknd(int xiface, int index)
{
@@ -980,8 +979,6 @@ int __cvmx_helper_init_port_valid(void)
rc = __cvmx_helper_parse_bgx_dt(fdt_addr);
if (!rc)
rc = __cvmx_fdt_parse_vsc7224(fdt_addr);
- if (!rc)
- rc = __cvmx_fdt_parse_avsp5410(fdt_addr);
if (!rc && octeon_has_feature(OCTEON_FEATURE_BGX_XCV))
rc = __cvmx_helper_parse_bgx_rgmii_dt(fdt_addr);
@@ -1036,7 +1033,6 @@ cvmx_import_config_t cvmx_import_app_config;
int __cvmx_helper_init_port_config_data_local(void)
{
int rv = 0;
- int dbg = 0;
if (!port_cfg_data_initialized)
cvmx_init_port_cfg();
@@ -1061,10 +1057,12 @@ int __cvmx_helper_init_port_config_data_local(void)
}
}
}
- if (dbg) {
- cvmx_helper_cfg_show_cfg();
- cvmx_pko_queue_show();
- }
+
+#ifdef DEBUG
+ cvmx_helper_cfg_show_cfg();
+ cvmx_pko_queue_show();
+#endif
+
return rv;
}
@@ -1077,9 +1075,8 @@ int cvmx_pko_alloc_iport_and_queues(int interface, int port, int port_cnt, int q
{
int rv, p, port_start, cnt;
- if (dbg)
- debug("%s: intf %d/%d pcnt %d qcnt %d\n", __func__, interface, port, port_cnt,
- queue_cnt);
+ debug("%s: intf %d/%d pcnt %d qcnt %d\n", __func__, interface, port, port_cnt,
+ queue_cnt);
if (!port_cfg_data_initialized)
cvmx_init_port_cfg();
@@ -1122,6 +1119,7 @@ static void cvmx_init_port_cfg(void)
struct cvmx_srio_port_param *sr;
pcfg = &cvmx_cfg_port[node][i][j];
+
memset(pcfg, 0, sizeof(*pcfg));
pcfg->port_fdt_node = CVMX_HELPER_CFG_INVALID_VALUE;
@@ -1188,8 +1186,7 @@ int __cvmx_helper_init_port_config_data(int node)
int pknd = 0, bpid = 0;
const int use_static_config = 1;
- if (dbg)
- printf("%s:\n", __func__);
+ debug("%s:\n", __func__);
if (!port_cfg_data_initialized)
cvmx_init_port_cfg();
@@ -1295,10 +1292,11 @@ int __cvmx_helper_init_port_config_data(int node)
__cvmx_helper_cfg_init_ipd2pko_cache();
}
- if (dbg) {
- cvmx_helper_cfg_show_cfg();
- cvmx_pko_queue_show();
- }
+#ifdef DEBUG
+ cvmx_helper_cfg_show_cfg();
+ cvmx_pko_queue_show();
+#endif
+
return rv;
}
@@ -1336,39 +1334,6 @@ int cvmx_helper_get_port_fdt_node_offset(int xiface, int index)
return cvmx_cfg_port[xi.node][xi.interface][index].port_fdt_node;
}
-/**
- * Search for a port based on its FDT node offset
- *
- * @param of_offset Node offset of port to search for
- * @param[out] xiface xinterface of match
- * @param[out] index port index of match
- *
- * Return: 0 if found, -1 if not found
- */
-int cvmx_helper_cfg_get_xiface_index_by_fdt_node_offset(int of_offset, int *xiface, int *index)
-{
- int iface;
- int i;
- int node;
- struct cvmx_cfg_port_param *pcfg = NULL;
- *xiface = -1;
- *index = -1;
-
- for (node = 0; node < CVMX_MAX_NODES; node++) {
- for (iface = 0; iface < CVMX_HELPER_MAX_IFACE; iface++) {
- for (i = 0; i < CVMX_HELPER_CFG_MAX_PORT_PER_IFACE; i++) {
- pcfg = &cvmx_cfg_port[node][iface][i];
- if (pcfg->valid && pcfg->port_fdt_node == of_offset) {
- *xiface = cvmx_helper_node_interface_to_xiface(node, iface);
- *index = i;
- return 0;
- }
- }
- }
- }
- return -1;
-}
-
/**
* @INTERNAL
* Store the FDT node offset in the device tree of a phy
diff --git a/arch/mips/mach-octeon/cvmx-helper-fdt.c b/arch/mips/mach-octeon/cvmx-helper-fdt.c
index 3177dfb6e5ae..187c7fe65ac5 100644
--- a/arch/mips/mach-octeon/cvmx-helper-fdt.c
+++ b/arch/mips/mach-octeon/cvmx-helper-fdt.c
@@ -1,14 +1,17 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (C) 2020 Marvell International Ltd.
+ * Copyright (C) 2020-2022 Marvell International Ltd.
*
* FDT Helper functions similar to those provided to U-Boot.
*/
+#include <dm.h>
+#include <i2c.h>
#include <log.h>
#include <malloc.h>
#include <net.h>
#include <linux/delay.h>
+#include <asm-generic/gpio.h>
#include <mach/cvmx-regs.h>
#include <mach/cvmx-csr.h>
@@ -19,35 +22,6 @@
#include <mach/cvmx-helper-board.h>
#include <mach/cvmx-helper-cfg.h>
#include <mach/cvmx-helper-fdt.h>
-#include <mach/cvmx-helper-gpio.h>
-
-/** Structure used to get type of GPIO from device tree */
-struct gpio_compat {
- char *compatible; /** Compatible string */
- enum cvmx_gpio_type type; /** Type */
- int8_t size; /** (max) Number of pins */
-};
-
-#define GPIO_REG_PCA953X_IN 0
-#define GPIO_REG_PCA953X_OUT 1
-#define GPIO_REG_PCA953X_INVERT 2
-#define GPIO_REG_PCA953X_DIR 3
-
-#define GPIO_REG_PCA957X_IN 0
-#define GPIO_REG_PCA957X_INVERT 1
-#define GPIO_REG_PCA957X_CFG 4
-#define GPIO_REG_PCA957X_OUT 5
-
-enum cvmx_i2c_mux_type { I2C_MUX, I2C_SWITCH };
-
-/** Structure used to get type of GPIO from device tree */
-struct mux_compat {
- char *compatible; /** Compatible string */
- enum cvmx_i2c_bus_type type; /** Mux chip type */
- enum cvmx_i2c_mux_type mux_type; /** Type of mux */
- u8 enable; /** Enable bit for mux */
- u8 size; /** (max) Number of channels */
-};
/**
* Local allocator to handle both SE and U-Boot that also zeroes out memory
@@ -110,6 +84,28 @@ int cvmx_fdt_lookup_phandles(const void *fdt_addr, int node,
return 0;
}
+int cvmx_ofnode_lookup_phandles(ofnode node, const char *prop_name, int *lenp,
+ ofnode *nodes)
+{
+ const u32 *phandles;
+ int count;
+ int i;
+
+ phandles = ofnode_get_property(node, prop_name, &count);
+ if (!phandles || count < 0)
+ return -FDT_ERR_NOTFOUND;
+
+ count /= 4;
+ if (count > *lenp)
+ count = *lenp;
+
+ for (i = 0; i < count; i++)
+ nodes[i] = ofnode_get_by_phandle(fdt32_to_cpu(phandles[i]));
+
+ *lenp = count;
+ return 0;
+}
+
/**
* Given a FDT node return the CPU node number
*
@@ -271,10 +267,10 @@ int cvmx_sfp_set_ipd_port(struct cvmx_fdt_sfp_info *sfp, int ipd_port)
*
* Return: 0 for success, -1 on error
*/
-static int cvmx_fdt_parse_vsc7224_channels(const void *fdt_addr, int of_offset,
+static int cvmx_fdt_parse_vsc7224_channels(ofnode node,
struct cvmx_vsc7224 *vsc7224)
{
- int parent_offset = of_offset;
+ struct ofnode_phandle_args phandle;
int err = 0;
int reg;
int num_chan = 0;
@@ -289,35 +285,33 @@ static int cvmx_fdt_parse_vsc7224_channels(const void *fdt_addr, int of_offset,
bool is_tx;
bool is_qsfp;
const char *mac_str;
+ ofnode node_chan;
- debug("%s(%p, %d, %s)\n", __func__, fdt_addr, of_offset, vsc7224->name);
- do {
- /* Walk through all channels */
- of_offset = fdt_node_offset_by_compatible(fdt_addr, of_offset,
- "vitesse,vsc7224-channel");
- if (of_offset == -FDT_ERR_NOTFOUND) {
- break;
- } else if (of_offset < 0) {
- debug("%s: Failed finding compatible channel\n",
- __func__);
- err = -1;
+ debug("%s(%x, %s)\n", __func__, ofnode_to_offset(node), vsc7224->name);
+ ofnode_for_each_compatible_node(node_chan, "vitesse,vsc7224-channel") {
+ if (!ofnode_valid(node_chan)) {
+ debug("%s: Error parsing FDT node %s\n",
+ __func__, ofnode_get_name(node));
break;
}
- if (fdt_parent_offset(fdt_addr, of_offset) != parent_offset)
+
+ if (ofnode_to_offset(ofnode_get_parent(node_chan)) !=
+ ofnode_to_offset(node))
break;
- reg = cvmx_fdt_get_int(fdt_addr, of_offset, "reg", -1);
+
+ reg = ofnode_get_addr(node_chan);
if (reg < 0 || reg > 3) {
debug("%s: channel reg is either not present or out of range\n",
__func__);
err = -1;
break;
}
- is_tx = cvmx_fdt_get_bool(fdt_addr, of_offset, "direction-tx");
+ is_tx = ofnode_read_bool(node_chan, "direction-tx");
debug("%s(%s): Adding %cx channel %d\n",
__func__, vsc7224->name, is_tx ? 't' : 'r',
reg);
- tap_values = (const uint32_t *)fdt_getprop(fdt_addr, of_offset, "taps", &len);
+ tap_values = ofnode_get_property(node_chan, "taps", &len);
if (!tap_values) {
debug("%s: Error: no taps defined for vsc7224 channel %d\n",
__func__, reg);
@@ -351,11 +345,12 @@ static int cvmx_fdt_parse_vsc7224_channels(const void *fdt_addr, int of_offset,
vsc7224->channel[reg] = channel;
channel->num_taps = num_taps;
channel->lane = reg;
- channel->of_offset = of_offset;
+ channel->of_offset = ofnode_to_offset(node_chan);
channel->is_tx = is_tx;
- channel->pretap_disable = cvmx_fdt_get_bool(fdt_addr, of_offset, "pretap-disable");
- channel->posttap_disable =
- cvmx_fdt_get_bool(fdt_addr, of_offset, "posttap-disable");
+ channel->pretap_disable = ofnode_read_bool(node_chan,
+ "pretap-disable");
+ channel->posttap_disable = ofnode_read_bool(node_chan,
+ "posttap-disable");
channel->vsc7224 = vsc7224;
/* Read all the tap values */
for (i = 0; i < num_taps; i++) {
@@ -371,9 +366,9 @@ static int cvmx_fdt_parse_vsc7224_channels(const void *fdt_addr, int of_offset,
channel->ipd_port = -1;
mac_str = "sfp-mac";
- if (fdt_getprop(fdt_addr, of_offset, mac_str, NULL)) {
+ if (ofnode_get_property(node_chan, mac_str, NULL)) {
is_qsfp = false;
- } else if (fdt_getprop(fdt_addr, of_offset, "qsfp-mac", NULL)) {
+ } else if (ofnode_get_property(node_chan, "qsfp-mac", NULL)) {
is_qsfp = true;
mac_str = "qsfp-mac";
} else {
@@ -381,52 +376,59 @@ static int cvmx_fdt_parse_vsc7224_channels(const void *fdt_addr, int of_offset,
vsc7224->name, reg);
return -1;
}
- of_mac = cvmx_fdt_lookup_phandle(fdt_addr, of_offset, mac_str);
- if (of_mac < 0) {
+
+ err = ofnode_parse_phandle_with_args(node_chan, mac_str, NULL,
+ 0, 0, &phandle);
+ if (err) {
debug("%s: Error %d with MAC %s phandle for %s\n", __func__, of_mac,
mac_str, vsc7224->name);
return -1;
}
- debug("%s: Found mac at offset %d\n", __func__, of_mac);
- err = cvmx_helper_cfg_get_xiface_index_by_fdt_node_offset(of_mac, &xiface, &index);
- if (!err) {
- channel->xiface = xiface;
- channel->index = index;
- channel->ipd_port = cvmx_helper_get_ipd_port(xiface, index);
-
- debug("%s: Found MAC, xiface: 0x%x, index: %d, ipd port: %d\n", __func__,
- xiface, index, channel->ipd_port);
- if (channel->ipd_port >= 0) {
- cvmx_helper_cfg_set_vsc7224_chan_info(xiface, index, channel);
- debug("%s: Storing config channel for xiface 0x%x, index %d\n",
- __func__, xiface, index);
- }
- sfp_info = cvmx_helper_cfg_get_sfp_info(xiface, index);
- if (!sfp_info) {
- debug("%s: Warning: no (Q)SFP+ slot found for xinterface 0x%x, index %d for channel %d\n",
- __func__, xiface, index, channel->lane);
- continue;
- }
+ debug("%s: Found mac at %s\n", __func__,
+ ofnode_get_name(phandle.node));
+
+ xiface = (ofnode_get_addr(ofnode_get_parent(phandle.node))
+ >> 24) & 0x0f;
+ index = ofnode_get_addr(phandle.node);
+ channel->xiface = xiface;
+ channel->index = index;
+ channel->ipd_port = cvmx_helper_get_ipd_port(xiface, index);
+
+ debug("%s: Found MAC, xiface: 0x%x, index: %d, ipd port: %d\n", __func__,
+ xiface, index, channel->ipd_port);
+ if (channel->ipd_port >= 0) {
+ cvmx_helper_cfg_set_vsc7224_chan_info(xiface, index, channel);
+ debug("%s: Storing config channel for xiface 0x%x, index %d\n",
+ __func__, xiface, index);
+ }
+ sfp_info = cvmx_helper_cfg_get_sfp_info(xiface, index);
+ if (!sfp_info) {
+ debug("%s: Warning: no (Q)SFP+ slot found for xinterface 0x%x, index %d for channel %d\n",
+ __func__, xiface, index, channel->lane);
+ continue;
+ }
- /* Link it */
- channel->next = sfp_info->vsc7224_chan;
- if (sfp_info->vsc7224_chan)
- sfp_info->vsc7224_chan->prev = channel;
- sfp_info->vsc7224_chan = channel;
- sfp_info->is_vsc7224 = true;
- debug("%s: Registering VSC7224 %s channel %d with SFP %s\n", __func__,
- vsc7224->name, channel->lane, sfp_info->name);
- if (!sfp_info->mod_abs_changed) {
- debug("%s: Registering cvmx_sfp_vsc7224_mod_abs_changed at %p for xinterface 0x%x, index %d\n",
- __func__, &cvmx_sfp_vsc7224_mod_abs_changed, xiface, index);
- cvmx_sfp_register_mod_abs_changed(
- sfp_info,
- &cvmx_sfp_vsc7224_mod_abs_changed,
- NULL);
- }
+ /* Link it */
+ channel->next = sfp_info->vsc7224_chan;
+ if (sfp_info->vsc7224_chan)
+ sfp_info->vsc7224_chan->prev = channel;
+ sfp_info->vsc7224_chan = channel;
+ sfp_info->is_vsc7224 = true;
+ debug("%s: Registering VSC7224 %s channel %d with SFP %s\n", __func__,
+ vsc7224->name, channel->lane, sfp_info->name);
+ if (!sfp_info->mod_abs_changed) {
+ debug("%s: Registering cvmx_sfp_vsc7224_mod_abs_changed at %p for xinterface 0x%x, index %d\n",
+ __func__, &cvmx_sfp_vsc7224_mod_abs_changed, xiface, index);
+ cvmx_sfp_register_mod_abs_changed(
+ sfp_info,
+ &cvmx_sfp_vsc7224_mod_abs_changed,
+ NULL);
}
- } while (!err && num_chan < 4);
+
+ if (num_chan >= 4)
+ break;
+ }
return err;
}
@@ -441,12 +443,17 @@ static int cvmx_fdt_parse_vsc7224_channels(const void *fdt_addr, int of_offset,
*/
int __cvmx_fdt_parse_vsc7224(const void *fdt_addr)
{
- int of_offset = -1;
struct cvmx_vsc7224 *vsc7224 = NULL;
- struct cvmx_fdt_gpio_info *gpio_info = NULL;
+ ofnode node;
int err = 0;
- int of_parent;
static bool parsed;
+ const int *init_array;
+ struct udevice *dev;
+ u16 value;
+ int reg;
+ int len;
+ int ret;
+ int i;
debug("%s(%p)\n", __func__, fdt_addr);
@@ -454,30 +461,23 @@ int __cvmx_fdt_parse_vsc7224(const void *fdt_addr)
debug("%s: Already parsed\n", __func__);
return 0;
}
- do {
- of_offset = fdt_node_offset_by_compatible(fdt_addr, of_offset,
- "vitesse,vsc7224");
- debug("%s: of_offset: %d\n", __func__, of_offset);
- if (of_offset == -FDT_ERR_NOTFOUND) {
- break;
- } else if (of_offset < 0) {
- err = -1;
- debug("%s: Error %d parsing FDT\n",
- __func__, of_offset);
+
+ ofnode_for_each_compatible_node(node, "vitesse,vsc7224") {
+ if (!ofnode_valid(node)) {
+ debug("%s: Error parsing FDT node %s\n",
+ __func__, ofnode_get_name(node));
break;
}
vsc7224 = __cvmx_fdt_alloc(sizeof(*vsc7224));
-
if (!vsc7224) {
debug("%s: Out of memory!\n", __func__);
return -1;
}
- vsc7224->of_offset = of_offset;
- vsc7224->i2c_addr = cvmx_fdt_get_int(fdt_addr, of_offset,
- "reg", -1);
- of_parent = fdt_parent_offset(fdt_addr, of_offset);
- vsc7224->i2c_bus = cvmx_fdt_get_i2c_bus(fdt_addr, of_parent);
+
+ vsc7224->of_offset = ofnode_to_offset(node);
+ vsc7224->i2c_addr = ofnode_get_addr(node);
+ vsc7224->i2c_bus = cvmx_ofnode_get_i2c_bus(ofnode_get_parent(node));
if (vsc7224->i2c_addr < 0) {
debug("%s: Error: reg field missing\n", __func__);
err = -1;
@@ -488,168 +488,90 @@ int __cvmx_fdt_parse_vsc7224(const void *fdt_addr)
err = -1;
break;
}
- vsc7224->name = fdt_get_name(fdt_addr, of_offset, NULL);
+ vsc7224->name = ofnode_get_name(node);
debug("%s: Adding %s\n", __func__, vsc7224->name);
- if (fdt_getprop(fdt_addr, of_offset, "reset", NULL)) {
- gpio_info = cvmx_fdt_gpio_get_info_phandle(fdt_addr, of_offset, "reset");
- vsc7224->reset_gpio = gpio_info;
- }
- if (fdt_getprop(fdt_addr, of_offset, "los", NULL)) {
- gpio_info = cvmx_fdt_gpio_get_info_phandle(fdt_addr, of_offset, "los");
- vsc7224->los_gpio = gpio_info;
- }
- debug("%s: Parsing channels\n", __func__);
- err = cvmx_fdt_parse_vsc7224_channels(fdt_addr, of_offset, vsc7224);
+
+ err = gpio_request_by_name_nodev(node, "reset", 0,
+ &vsc7224->reset_gpio,
+ GPIOD_IS_OUT);
if (err) {
- debug("%s: Error parsing VSC7224 channels\n", __func__);
- break;
+ printf("%s: reset GPIO not found in DT!\n", __func__);
+ return -ENODEV;
}
- } while (of_offset > 0);
- if (err) {
- debug("%s(): Error\n", __func__);
- if (vsc7224) {
- if (vsc7224->reset_gpio)
- __cvmx_fdt_free(vsc7224->reset_gpio, sizeof(*vsc7224->reset_gpio));
- if (vsc7224->los_gpio)
- __cvmx_fdt_free(vsc7224->los_gpio, sizeof(*vsc7224->los_gpio));
- if (vsc7224->i2c_bus)
- cvmx_fdt_free_i2c_bus(vsc7224->i2c_bus);
- __cvmx_fdt_free(vsc7224, sizeof(*vsc7224));
+ err = gpio_request_by_name_nodev(node, "los", 0,
+ &vsc7224->los_gpio,
+ GPIOD_IS_IN);
+ if (err) {
+ printf("%s: los GPIO not found in DT!\n", __func__);
+ return -ENODEV;
}
- }
- if (!err)
- parsed = true;
-
- return err;
-}
-/**
- * @INTERNAL
- * Parses all instances of the Avago AVSP5410 gearbox phy
- *
- * @param[in] fdt_addr Address of flat device tree
- *
- * Return: 0 for success, error otherwise
- */
-int __cvmx_fdt_parse_avsp5410(const void *fdt_addr)
-{
- int of_offset = -1;
- struct cvmx_avsp5410 *avsp5410 = NULL;
- struct cvmx_fdt_sfp_info *sfp_info;
- int err = 0;
- int of_parent;
- static bool parsed;
- int of_mac;
- int xiface, index;
- bool is_qsfp;
- const char *mac_str;
-
- debug("%s(%p)\n", __func__, fdt_addr);
-
- if (parsed) {
- debug("%s: Already parsed\n", __func__);
- return 0;
- }
-
- do {
- of_offset = fdt_node_offset_by_compatible(fdt_addr, of_offset,
- "avago,avsp-5410");
- debug("%s: of_offset: %d\n", __func__, of_offset);
- if (of_offset == -FDT_ERR_NOTFOUND) {
- break;
- } else if (of_offset < 0) {
- err = -1;
- debug("%s: Error %d parsing FDT\n", __func__, of_offset);
- break;
+ /*
+ * This code was taken from the NIC23 board specific code
+ * but should be better placed here in the common code
+ */
+ debug("%s: Putting device in reset\n", __func__);
+ dm_gpio_set_value(&vsc7224->reset_gpio, 1);
+ mdelay(10);
+ debug("%s: Taking device out of reset\n", __func__);
+ dm_gpio_set_value(&vsc7224->reset_gpio, 0);
+ mdelay(50);
+
+ init_array = ofnode_get_property(node, "vitesse,reg-init",
+ &len);
+ if (!init_array) {
+ debug("%s: No initialization array\n", __func__);
+ continue;
}
-
- avsp5410 = __cvmx_fdt_alloc(sizeof(*avsp5410));
-
- if (!avsp5410) {
- debug("%s: Out of memory!\n", __func__);
+ if ((len % 8) != 0) {
+ printf("%s: Error: register init string should be an array of reg number followed by value\n",
+ __func__);
return -1;
}
- avsp5410->of_offset = of_offset;
- avsp5410->i2c_addr = cvmx_fdt_get_int(fdt_addr, of_offset,
- "reg", -1);
- of_parent = fdt_parent_offset(fdt_addr, of_offset);
- avsp5410->i2c_bus = cvmx_fdt_get_i2c_bus(fdt_addr, of_parent);
- if (avsp5410->i2c_addr < 0) {
- debug("%s: Error: reg field missing\n", __func__);
- err = -1;
- break;
- }
- if (!avsp5410->i2c_bus) {
- debug("%s: Error getting i2c bus\n", __func__);
- err = -1;
- break;
- }
- avsp5410->name = fdt_get_name(fdt_addr, of_offset, NULL);
- debug("%s: Adding %s\n", __func__, avsp5410->name);
- /* Now find out which interface it's mapped to */
- avsp5410->ipd_port = -1;
-
- mac_str = "sfp-mac";
- if (fdt_getprop(fdt_addr, of_offset, mac_str, NULL)) {
- is_qsfp = false;
- } else if (fdt_getprop(fdt_addr, of_offset, "qsfp-mac", NULL)) {
- is_qsfp = true;
- mac_str = "qsfp-mac";
- } else {
- debug("%s: Error: MAC not found for %s\n", __func__, avsp5410->name);
- return -1;
- }
- of_mac = cvmx_fdt_lookup_phandle(fdt_addr, of_offset, mac_str);
- if (of_mac < 0) {
- debug("%s: Error %d with MAC %s phandle for %s\n", __func__, of_mac,
- mac_str, avsp5410->name);
+ ret = i2c_get_chip(vsc7224->i2c_bus->i2c_bus,
+ vsc7224->i2c_addr, 1, &dev);
+ if (ret) {
+ debug("Cannot find I2C device: %d\n", ret);
return -1;
}
- debug("%s: Found mac at offset %d\n", __func__, of_mac);
- err = cvmx_helper_cfg_get_xiface_index_by_fdt_node_offset(of_mac, &xiface, &index);
- if (!err) {
- avsp5410->xiface = xiface;
- avsp5410->index = index;
- avsp5410->ipd_port = cvmx_helper_get_ipd_port(xiface, index);
-
- debug("%s: Found MAC, xiface: 0x%x, index: %d, ipd port: %d\n", __func__,
- xiface, index, avsp5410->ipd_port);
- if (avsp5410->ipd_port >= 0) {
- cvmx_helper_cfg_set_avsp5410_info(xiface, index, avsp5410);
- debug("%s: Storing config phy for xiface 0x%x, index %d\n",
- __func__, xiface, index);
- }
- sfp_info = cvmx_helper_cfg_get_sfp_info(xiface, index);
- if (!sfp_info) {
- debug("%s: Warning: no (Q)SFP+ slot found for xinterface 0x%x, index %d\n",
- __func__, xiface, index);
- continue;
+ for (i = 0; i < len / sizeof(int); i += 2) {
+ u8 buffer[2];
+
+ reg = fdt32_to_cpu(init_array[i]);
+ value = fdt32_to_cpu(init_array[i + 1]);
+ buffer[0] = value >> 8;
+ buffer[1] = value & 0xff;
+ ret = dm_i2c_write(dev, reg, buffer, 2);
+ if (ret) {
+ debug("Cannot write I2C device: %d\n", ret);
+ return -1;
}
- sfp_info->is_avsp5410 = true;
- sfp_info->avsp5410 = avsp5410;
- debug("%s: Registering AVSP5410 %s with SFP %s\n", __func__, avsp5410->name,
- sfp_info->name);
- if (!sfp_info->mod_abs_changed) {
- debug("%s: Registering cvmx_sfp_avsp5410_mod_abs_changed at %p for xinterface 0x%x, index %d\n",
- __func__, &cvmx_sfp_avsp5410_mod_abs_changed, xiface, index);
- cvmx_sfp_register_mod_abs_changed(
- sfp_info,
- &cvmx_sfp_avsp5410_mod_abs_changed,
- NULL);
- }
+ debug(" Wrote 0x%02x <= 0x%02x%02x\n", reg,
+ buffer[0], buffer[1]);
+ }
+
+ debug("%s: Parsing channels\n", __func__);
+ err = cvmx_fdt_parse_vsc7224_channels(node, vsc7224);
+ if (err) {
+ debug("%s: Error parsing VSC7224 channels\n", __func__);
+ break;
}
- } while (of_offset > 0);
+ }
if (err) {
debug("%s(): Error\n", __func__);
- if (avsp5410) {
- if (avsp5410->i2c_bus)
- cvmx_fdt_free_i2c_bus(avsp5410->i2c_bus);
- __cvmx_fdt_free(avsp5410, sizeof(*avsp5410));
+ if (vsc7224) {
+ dm_gpio_free(vsc7224->reset_gpio.dev,
+ &vsc7224->reset_gpio);
+ dm_gpio_free(vsc7224->los_gpio.dev,
+ &vsc7224->los_gpio);
+ if (vsc7224->i2c_bus)
+ cvmx_fdt_free_i2c_bus(vsc7224->i2c_bus);
+ __cvmx_fdt_free(vsc7224, sizeof(*vsc7224));
}
}
if (!err)
@@ -658,146 +580,6 @@ int __cvmx_fdt_parse_avsp5410(const void *fdt_addr)
return err;
}
-/**
- * Parse QSFP GPIOs for SFP
- *
- * @param[in] fdt_addr Pointer to flat device tree
- * @param of_offset Offset of QSFP node
- * @param[out] sfp_info Pointer to sfp info to fill in
- *
- * Return: 0 for success
- */
-static int cvmx_parse_qsfp(const void *fdt_addr, int of_offset, struct cvmx_fdt_sfp_info *sfp_info)
-{
- sfp_info->select = cvmx_fdt_gpio_get_info_phandle(fdt_addr, of_offset, "select");
- sfp_info->mod_abs = cvmx_fdt_gpio_get_info_phandle(fdt_addr, of_offset, "mod_prs");
- sfp_info->reset = cvmx_fdt_gpio_get_info_phandle(fdt_addr, of_offset, "reset");
- sfp_info->interrupt = cvmx_fdt_gpio_get_info_phandle(fdt_addr, of_offset, "interrupt");
- sfp_info->lp_mode = cvmx_fdt_gpio_get_info_phandle(fdt_addr, of_offset, "lp_mode");
- return 0;
-}
-
-/**
- * Parse SFP GPIOs for SFP
- *
- * @param[in] fdt_addr Pointer to flat device tree
- * @param of_offset Offset of SFP node
- * @param[out] sfp_info Pointer to sfp info to fill in
- *
- * Return: 0 for success
- */
-static int cvmx_parse_sfp(const void *fdt_addr, int of_offset, struct cvmx_fdt_sfp_info *sfp_info)
-{
- sfp_info->mod_abs = cvmx_fdt_gpio_get_info_phandle(fdt_addr, of_offset, "mod_abs");
- sfp_info->rx_los = cvmx_fdt_gpio_get_info_phandle(fdt_addr, of_offset, "rx_los");
- sfp_info->tx_disable = cvmx_fdt_gpio_get_info_phandle(fdt_addr, of_offset, "tx_disable");
- sfp_info->tx_error = cvmx_fdt_gpio_get_info_phandle(fdt_addr, of_offset, "tx_error");
- return 0;
-}
-
-/**
- * Parse SFP/QSFP EEPROM and diag
- *
- * @param[in] fdt_addr Pointer to flat device tree
- * @param of_offset Offset of SFP node
- * @param[out] sfp_info Pointer to sfp info to fill in
- *
- * Return: 0 for success, -1 on error
- */
-static int cvmx_parse_sfp_eeprom(const void *fdt_addr, int of_offset,
- struct cvmx_fdt_sfp_info *sfp_info)
-{
- int of_eeprom;
- int of_diag;
-
- debug("%s(%p, %d, %s)\n", __func__, fdt_addr, of_offset, sfp_info->name);
- of_eeprom = cvmx_fdt_lookup_phandle(fdt_addr, of_offset, "eeprom");
- if (of_eeprom < 0) {
- debug("%s: Missing \"eeprom\" from device tree for %s\n", __func__, sfp_info->name);
- return -1;
- }
-
- sfp_info->i2c_bus = cvmx_fdt_get_i2c_bus(fdt_addr, fdt_parent_offset(fdt_addr, of_eeprom));
- sfp_info->i2c_eeprom_addr = cvmx_fdt_get_int(fdt_addr, of_eeprom, "reg", 0x50);
-
- debug("%s(%p, %d, %s, %d)\n", __func__, fdt_addr, of_offset, sfp_info->name,
- sfp_info->i2c_eeprom_addr);
-
- if (!sfp_info->i2c_bus) {
- debug("%s: Error: could not determine i2c bus for eeprom for %s\n", __func__,
- sfp_info->name);
- return -1;
- }
- of_diag = cvmx_fdt_lookup_phandle(fdt_addr, of_offset, "diag");
- if (of_diag >= 0)
- sfp_info->i2c_diag_addr = cvmx_fdt_get_int(fdt_addr, of_diag, "reg", 0x51);
- else
- sfp_info->i2c_diag_addr = 0x51;
- return 0;
-}
-
-/**
- * Parse SFP information from device tree
- *
- * @param[in] fdt_addr Address of flat device tree
- *
- * Return: pointer to sfp info or NULL if error
- */
-struct cvmx_fdt_sfp_info *cvmx_helper_fdt_parse_sfp_info(const void *fdt_addr, int of_offset)
-{
- struct cvmx_fdt_sfp_info *sfp_info = NULL;
- int err = -1;
- bool is_qsfp;
-
- if (!fdt_node_check_compatible(fdt_addr, of_offset, "ethernet,sfp-slot")) {
- is_qsfp = false;
- } else if (!fdt_node_check_compatible(fdt_addr, of_offset, "ethernet,qsfp-slot")) {
- is_qsfp = true;
- } else {
- debug("%s: Error: incompatible sfp/qsfp slot, compatible=%s\n", __func__,
- (char *)fdt_getprop(fdt_addr, of_offset, "compatible", NULL));
- goto error_exit;
- }
-
- debug("%s: %ssfp module found at offset %d\n", __func__, is_qsfp ? "q" : "", of_offset);
- sfp_info = __cvmx_fdt_alloc(sizeof(*sfp_info));
- if (!sfp_info) {
- debug("%s: Error: out of memory\n", __func__);
- goto error_exit;
- }
- sfp_info->name = fdt_get_name(fdt_addr, of_offset, NULL);
- sfp_info->of_offset = of_offset;
- sfp_info->is_qsfp = is_qsfp;
- sfp_info->last_mod_abs = -1;
- sfp_info->last_rx_los = -1;
-
- if (is_qsfp)
- err = cvmx_parse_qsfp(fdt_addr, of_offset, sfp_info);
- else
- err = cvmx_parse_sfp(fdt_addr, of_offset, sfp_info);
- if (err) {
- debug("%s: Error in %s parsing %ssfp GPIO info\n", __func__, sfp_info->name,
- is_qsfp ? "q" : "");
- goto error_exit;
- }
- debug("%s: Parsing %ssfp module eeprom\n", __func__, is_qsfp ? "q" : "");
- err = cvmx_parse_sfp_eeprom(fdt_addr, of_offset, sfp_info);
- if (err) {
- debug("%s: Error parsing eeprom info for %s\n", __func__, sfp_info->name);
- goto error_exit;
- }
-
- /* Register default check for mod_abs changed */
- if (!err)
- cvmx_sfp_register_check_mod_abs(sfp_info, cvmx_sfp_check_mod_abs, NULL);
-
-error_exit:
- /* Note: we don't free any data structures on error since it gets
- * rather complicated with i2c buses and whatnot.
- */
- return err ? NULL : sfp_info;
-}
-
/**
* @INTERNAL
* Parse a slice of the Inphi/Cortina CS4343 in the device tree
@@ -968,3 +750,78 @@ int cvmx_fdt_parse_cs4343(const void *fdt_addr, int of_offset, struct cvmx_phy_i
return err < 0 ? -1 : 0;
}
+
+/**
+ * Given the parent offset of an i2c device build up a list describing the bus
+ * which can contain i2c muxes and switches.
+ *
+ * @param[in] node ofnode of the parent node of a GPIO device in
+ * the device tree.
+ *
+ * @return pointer to list of i2c devices starting from the root which
+ * can include i2c muxes and switches or NULL if error. Note that
+ * all entries are allocated on the heap.
+ *
+ * @see cvmx_fdt_free_i2c_bus()
+ */
+struct cvmx_fdt_i2c_bus_info *cvmx_ofnode_get_i2c_bus(ofnode node)
+{
+ struct cvmx_fdt_i2c_bus_info *businfo = NULL;
+ struct udevice *bus;
+ int ret;
+
+ businfo = __cvmx_fdt_alloc(sizeof(*businfo));
+ if (!businfo) {
+ debug("Out of memory\n");
+ return NULL;
+ }
+
+ debug("%s: Found node %s\n", __func__, ofnode_get_name(node));
+ businfo->of_offset = ofnode_to_offset(node);
+
+ /*
+ * Get I2C bus and probe it automatically - needed for later use
+ */
+ ret = device_get_global_by_ofnode(node, &bus);
+ if (!bus || ret) {
+ printf("Cannot find a I2C bus\n");
+ return NULL;
+ }
+
+ businfo->i2c_bus = bus;
+
+ return businfo;
+}
+
+/**
+ * Return the Octeon bus number for a bus descriptor
+ *
+ * @param[in] bus bus descriptor
+ *
+ * @return Octeon twsi bus number or -1 on error
+ */
+int cvmx_fdt_i2c_get_root_bus(const struct cvmx_fdt_i2c_bus_info *bus)
+{
+ if (bus->type != CVMX_I2C_BUS_OCTEON)
+ return -1;
+ return bus->channel;
+}
+
+/**
+ * Frees all entries for an i2c bus descriptor
+ *
+ * @param bus bus to free
+ *
+ * @return 0
+ */
+int cvmx_fdt_free_i2c_bus(struct cvmx_fdt_i2c_bus_info *bus)
+{
+ struct cvmx_fdt_i2c_bus_info *last;
+
+ while (bus) {
+ last = bus;
+ bus = bus->child;
+ __cvmx_fdt_free(last, sizeof(*last));
+ }
+ return 0;
+}
diff --git a/arch/mips/mach-octeon/cvmx-helper.c b/arch/mips/mach-octeon/cvmx-helper.c
index d0620d6cda0f..c7851717c05a 100644
--- a/arch/mips/mach-octeon/cvmx-helper.c
+++ b/arch/mips/mach-octeon/cvmx-helper.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (C) 2020 Marvell International Ltd.
+ * Copyright (C) 2020-2022 Marvell International Ltd.
*
* Helper functions for common, but complicated tasks.
*/
@@ -302,20 +302,6 @@ static const struct iface_ops iface_ops_npi = {
.enable = __cvmx_helper_npi_enable,
};
-/**
- * @INTERNAL
- * This structure specifies the interface methods used by interfaces
- * configured as srio.
- */
-static const struct iface_ops iface_ops_srio = {
- .mode = CVMX_HELPER_INTERFACE_MODE_SRIO,
- .enumerate = __cvmx_helper_srio_probe,
- .probe = __cvmx_helper_srio_probe,
- .enable = __cvmx_helper_srio_enable,
- .link_get = __cvmx_helper_srio_link_get,
- .link_set = __cvmx_helper_srio_link_set,
-};
-
/**
* @INTERNAL
* This structure specifies the interface methods used by interfaces
@@ -607,7 +593,7 @@ int __cvmx_helper_early_ports_on_interface(int interface)
* chip and configuration, this can be 1-16. A value of 0
* specifies that the interface doesn't exist or isn't usable.
*
- * @param xiface xiface to get the port count for
+ * @param xiface to get the port count for
*
* Return: Number of ports on interface. Can be Zero.
*/
@@ -919,15 +905,9 @@ static cvmx_helper_interface_mode_t __cvmx_get_mode_cnf75xx(int xiface)
break;
}
} else if ((interface < 3) && OCTEON_IS_MODEL(OCTEON_CNF75XX)) {
- cvmx_sriox_status_reg_t sriox_status_reg;
- int srio_port = interface - 1;
-
- sriox_status_reg.u64 = csr_rd(CVMX_SRIOX_STATUS_REG(srio_port));
-
- if (sriox_status_reg.s.srio)
- iface_ops[interface] = &iface_ops_srio;
- else
- iface_ops[interface] = &iface_ops_dis;
+ /* SRIO is disabled for now */
+ printf("SRIO disabled for now!\n");
+ iface_ops[interface] = &iface_ops_dis;
} else if (interface == 3) { /* DPI */
iface_ops[interface] = &iface_ops_npi;
} else if (interface == 4) { /* LOOP */
@@ -1046,7 +1026,6 @@ static cvmx_helper_interface_mode_t __cvmx_get_mode_octeon2(int interface)
(OCTEON_IS_MODEL(OCTEON_CN66XX) && interface >= 4 &&
interface <= 7)) {
/* Only present in CN63XX & CN66XX Octeon model */
- union cvmx_sriox_status_reg sriox_status_reg;
/* cn66xx pass1.0 has only 2 SRIO interfaces. */
if ((interface == 5 || interface == 7) &&
@@ -1059,12 +1038,9 @@ static cvmx_helper_interface_mode_t __cvmx_get_mode_octeon2(int interface)
*/
iface_ops[interface] = &iface_ops_dis;
} else {
- sriox_status_reg.u64 =
- csr_rd(CVMX_SRIOX_STATUS_REG(interface - 4));
- if (sriox_status_reg.s.srio)
- iface_ops[interface] = &iface_ops_srio;
- else
- iface_ops[interface] = &iface_ops_dis;
+ /* SRIO is disabled for now */
+ printf("SRIO disabled for now!\n");
+ iface_ops[interface] = &iface_ops_dis;
}
} else if (OCTEON_IS_MODEL(OCTEON_CN66XX)) {
union cvmx_mio_qlmx_cfg mio_qlm_cfg;
@@ -1467,7 +1443,7 @@ int __cvmx_helper_packet_hardware_enable(int xiface)
if (iface_node_ops[xi.node][xi.interface]->enable)
result = iface_node_ops[xi.node][xi.interface]->enable(xiface);
- result |= __cvmx_helper_board_hardware_enable(xiface);
+
return result;
}
@@ -1609,7 +1585,8 @@ int cvmx_helper_initialize_packet_io_node(unsigned int node)
/* Skip invalid/disabled interfaces */
if (cvmx_helper_ports_on_interface(xiface) <= 0)
continue;
- printf("Node %d Interface %d has %d ports (%s)\n", node, interface,
+ debug("Node %d Interface %d has %d ports (%s)\n",
+ node, interface,
cvmx_helper_ports_on_interface(xiface),
cvmx_helper_interface_mode_to_string(
cvmx_helper_interface_get_mode(xiface)));
--
2.35.1
^ permalink raw reply related [flat|nested] 52+ messages in thread* [PATCH 43/52] mips: octeon: Makefile: Enable building of the newly added C files
2022-03-30 10:06 [PATCH 00/52] mips: octeon: Add ethernet support Stefan Roese
` (37 preceding siblings ...)
2022-03-30 10:07 ` [PATCH 42/52] mips: octeon: Misc changes to existing C files for upcoming eth support Stefan Roese
@ 2022-03-30 10:07 ` Stefan Roese
2022-03-30 10:07 ` [PATCH 44/52] mips: octeon: cpu.c: Move bootmem init to arch_early_init_r() Stefan Roese
` (10 subsequent siblings)
49 siblings, 0 replies; 52+ messages in thread
From: Stefan Roese @ 2022-03-30 10:07 UTC (permalink / raw)
To: u-boot; +Cc: awilliams, cchavva
This patch adds the newly added C files to the Makefile to enable
compilation. This is done in a separate step, to not introduce build
breakage while adding the single files with potentially missing
externals.
Signed-off-by: Stefan Roese <sr@denx.de>
---
arch/mips/mach-octeon/Makefile | 35 +++++++++++++++++++++++++++++++++-
1 file changed, 34 insertions(+), 1 deletion(-)
diff --git a/arch/mips/mach-octeon/Makefile b/arch/mips/mach-octeon/Makefile
index 40ddab27eacd..6aa7b367416f 100644
--- a/arch/mips/mach-octeon/Makefile
+++ b/arch/mips/mach-octeon/Makefile
@@ -12,13 +12,46 @@ obj-y += cvmx-coremask.o
obj-y += cvmx-bootmem.o
obj-y += bootoctlinux.o
-# QLM related code
+# Misc Octeon C files, mostly for QLM & ethernet support
+obj-y += cvmx-agl.o
+obj-y += cvmx-fpa.o
+obj-y += cvmx-fpa-resource.o
+obj-y += cvmx-fau-compat.o
+obj-y += cvmx-global-resources.o
+obj-y += cvmx-cmd-queue.o
+obj-y += cvmx-helper-agl.o
+obj-y += cvmx-helper-bgx.o
+obj-y += cvmx-helper-board.o
obj-y += cvmx-helper-cfg.o
obj-y += cvmx-helper-fdt.o
+obj-y += cvmx-helper-fpa.o
+obj-y += cvmx-helper-ilk.o
+obj-y += cvmx-helper-ipd.o
obj-y += cvmx-helper-jtag.o
+obj-y += cvmx-helper-loop.o
+obj-y += cvmx-helper-npi.o
+obj-y += cvmx-helper-pki.o
+obj-y += cvmx-helper-pko.o
+obj-y += cvmx-helper-pko3.o
+obj-y += cvmx-helper-rgmii.o
+obj-y += cvmx-helper-sfp.o
+obj-y += cvmx-helper-sgmii.o
obj-y += cvmx-helper-util.o
+obj-y += cvmx-helper-xaui.o
obj-y += cvmx-helper.o
+obj-y += cvmx-ilk.o
+obj-y += cvmx-ipd.o
obj-y += cvmx-pcie.o
+obj-y += cvmx-pki.o
+obj-y += cvmx-pki-resources.o
+obj-y += cvmx-pko.o
+obj-y += cvmx-pko-internal-ports-range.o
+obj-y += cvmx-pko3.o
+obj-y += cvmx-pko3-compat.o
+obj-y += cvmx-pko3-resources.o
+obj-y += cvmx-pko3-queue.o
+obj-y += cvmx-range.o
obj-y += cvmx-qlm.o
+obj-y += cvmx-qlm-tables.o
obj-y += octeon_fdt.o
obj-y += octeon_qlm.o
--
2.35.1
^ permalink raw reply related [flat|nested] 52+ messages in thread* [PATCH 44/52] mips: octeon: cpu.c: Move bootmem init to arch_early_init_r()
2022-03-30 10:06 [PATCH 00/52] mips: octeon: Add ethernet support Stefan Roese
` (38 preceding siblings ...)
2022-03-30 10:07 ` [PATCH 43/52] mips: octeon: Makefile: Enable building of the newly added C files Stefan Roese
@ 2022-03-30 10:07 ` Stefan Roese
2022-03-30 10:07 ` [PATCH 45/52] mips: octeon: cpu.c: Implement configure_lmtdma_window() Stefan Roese
` (9 subsequent siblings)
49 siblings, 0 replies; 52+ messages in thread
From: Stefan Roese @ 2022-03-30 10:07 UTC (permalink / raw)
To: u-boot; +Cc: awilliams, cchavva
Call octeon_bootmem_init() earlier in the boot process, so that this
bootmemory infrastructure is already initialized when e.g. the
networking support gets probed.
Signed-off-by: Stefan Roese <sr@denx.de>
---
arch/mips/Kconfig | 1 +
arch/mips/mach-octeon/cpu.c | 13 ++++++++++++-
2 files changed, 13 insertions(+), 1 deletion(-)
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 28234aa0bb6c..34376511daee 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -101,6 +101,7 @@ config ARCH_JZ47XX
config ARCH_OCTEON
bool "Support Marvell Octeon CN7xxx platforms"
+ select ARCH_EARLY_INIT_R
select CPU_CAVIUM_OCTEON
select DISPLAY_CPUINFO
select DMA_ADDR_T_64BIT
diff --git a/arch/mips/mach-octeon/cpu.c b/arch/mips/mach-octeon/cpu.c
index 6cfcc3eae04e..fffd9dfb8580 100644
--- a/arch/mips/mach-octeon/cpu.c
+++ b/arch/mips/mach-octeon/cpu.c
@@ -393,14 +393,25 @@ static int init_bootcmd_console(void)
return ret;
}
-int arch_misc_init(void)
+int arch_early_init_r(void)
{
int ret;
+ /*
+ * Needs to be called pretty early, so that e.g. networking etc
+ * can access the bootmem infrastructure
+ */
ret = octeon_bootmem_init();
if (ret)
return ret;
+ return 0;
+}
+
+int arch_misc_init(void)
+{
+ int ret;
+
ret = octeon_configure_load_memory();
if (ret)
return ret;
--
2.35.1
^ permalink raw reply related [flat|nested] 52+ messages in thread* [PATCH 45/52] mips: octeon: cpu.c: Implement configure_lmtdma_window()
2022-03-30 10:06 [PATCH 00/52] mips: octeon: Add ethernet support Stefan Roese
` (39 preceding siblings ...)
2022-03-30 10:07 ` [PATCH 44/52] mips: octeon: cpu.c: Move bootmem init to arch_early_init_r() Stefan Roese
@ 2022-03-30 10:07 ` Stefan Roese
2022-03-30 10:07 ` [PATCH 46/52] mips: octeon: octeon_common.h: Move init SP because of increased image size Stefan Roese
` (8 subsequent siblings)
49 siblings, 0 replies; 52+ messages in thread
From: Stefan Roese @ 2022-03-30 10:07 UTC (permalink / raw)
To: u-boot; +Cc: awilliams, cchavva
Import configure_lmtdma_window from Marvell 2013 U-Boot as it's needed
for network functionality.
Signed-off-by: Stefan Roese <sr@denx.de>
---
arch/mips/mach-octeon/cpu.c | 34 +++++++++++++++++++++++++++++++++-
1 file changed, 33 insertions(+), 1 deletion(-)
diff --git a/arch/mips/mach-octeon/cpu.c b/arch/mips/mach-octeon/cpu.c
index fffd9dfb8580..1bdc6cd72903 100644
--- a/arch/mips/mach-octeon/cpu.c
+++ b/arch/mips/mach-octeon/cpu.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * Copyright (C) 2020 Marvell International Ltd.
+ * Copyright (C) 2020-2022 Marvell International Ltd.
*/
#include <dm.h>
@@ -17,6 +17,8 @@
#include <mach/cvmx-bootmem.h>
#include <mach/cvmx-regs.h>
#include <mach/cvmx-sata-defs.h>
+#include <mach/octeon-model.h>
+#include <mach/octeon-feature.h>
DECLARE_GLOBAL_DATA_PTR;
@@ -393,6 +395,33 @@ static int init_bootcmd_console(void)
return ret;
}
+static void configure_lmtdma_window(void)
+{
+ u64 tmp;
+ u64 addr;
+ u64 end_addr;
+
+ CVMX_MF_CVM_MEM_CTL(tmp);
+ tmp &= ~0x1ffull;
+ tmp |= 0x104ull;
+
+ /* enable LMTDMA */
+ tmp |= (1ull << 51);
+ /* configure scratch line 2 for LMT */
+ /* TODO: reserve this scratch line, so that others will not use it */
+ /* TODO: store LMTLINE in global var */
+ tmp |= (CVMX_PKO_LMTLINE << 45);
+ /* clear LMTLINE in scratch */
+ addr = CVMX_PKO_LMTLINE * CVMX_CACHE_LINE_SIZE;
+ end_addr = addr + CVMX_CACHE_LINE_SIZE;
+
+ while (addr < end_addr) {
+ *CASTPTR(volatile u64, addr + CVMX_SCRATCH_BASE) = (u64)0;
+ addr += 8;
+ }
+ CVMX_MT_CVM_MEM_CTL(tmp);
+}
+
int arch_early_init_r(void)
{
int ret;
@@ -405,6 +434,9 @@ int arch_early_init_r(void)
if (ret)
return ret;
+ if (octeon_has_feature(OCTEON_FEATURE_PKO3))
+ configure_lmtdma_window();
+
return 0;
}
--
2.35.1
^ permalink raw reply related [flat|nested] 52+ messages in thread* [PATCH 46/52] mips: octeon: octeon_common.h: Move init SP because of increased image size
2022-03-30 10:06 [PATCH 00/52] mips: octeon: Add ethernet support Stefan Roese
` (40 preceding siblings ...)
2022-03-30 10:07 ` [PATCH 45/52] mips: octeon: cpu.c: Implement configure_lmtdma_window() Stefan Roese
@ 2022-03-30 10:07 ` Stefan Roese
2022-03-30 10:07 ` [PATCH 47/52] mips: octeon: mrvl, cn73xx.dtsi: Add ethernet (BGX) and SMI DT nodes Stefan Roese
` (7 subsequent siblings)
49 siblings, 0 replies; 52+ messages in thread
From: Stefan Roese @ 2022-03-30 10:07 UTC (permalink / raw)
To: u-boot; +Cc: awilliams, cchavva
This patch moves CONFIG_SYS_INIT_SP_OFFSET to a higher address so that
it does not interfere with larger U-Boot images. This was noticed, while
adding network support to the EBB7304 board.
Signed-off-by: Stefan Roese <sr@denx.de>
---
include/configs/octeon_common.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/include/configs/octeon_common.h b/include/configs/octeon_common.h
index 23bb4f676f8e..884745d514a3 100644
--- a/include/configs/octeon_common.h
+++ b/include/configs/octeon_common.h
@@ -8,7 +8,7 @@
#define __OCTEON_COMMON_H__
#if defined(CONFIG_RAM_OCTEON)
-#define CONFIG_SYS_INIT_SP_OFFSET 0x20100000
+#define CONFIG_SYS_INIT_SP_OFFSET 0x20180000
#else
/* No DDR init -> run in L2 cache with limited resources */
#define CONFIG_SYS_INIT_SP_OFFSET 0x00180000
--
2.35.1
^ permalink raw reply related [flat|nested] 52+ messages in thread* [PATCH 47/52] mips: octeon: mrvl, cn73xx.dtsi: Add ethernet (BGX) and SMI DT nodes
2022-03-30 10:06 [PATCH 00/52] mips: octeon: Add ethernet support Stefan Roese
` (41 preceding siblings ...)
2022-03-30 10:07 ` [PATCH 46/52] mips: octeon: octeon_common.h: Move init SP because of increased image size Stefan Roese
@ 2022-03-30 10:07 ` Stefan Roese
2022-03-30 10:07 ` [PATCH 48/52] mips: octeon: mrvl, octeon-ebb7304.dts: Add ethernet DT support Stefan Roese
` (6 subsequent siblings)
49 siblings, 0 replies; 52+ messages in thread
From: Stefan Roese @ 2022-03-30 10:07 UTC (permalink / raw)
To: u-boot; +Cc: awilliams, cchavva
Add the Octeon ethernet (BGX) and SMI DT node to the dtsi file.
Signed-off-by: Stefan Roese <sr@denx.de>
---
arch/mips/dts/mrvl,cn73xx.dtsi | 35 ++++++++++++++++++++++++++++++++++
1 file changed, 35 insertions(+)
diff --git a/arch/mips/dts/mrvl,cn73xx.dtsi b/arch/mips/dts/mrvl,cn73xx.dtsi
index 2a17f7a6a63e..77f3548a326a 100644
--- a/arch/mips/dts/mrvl,cn73xx.dtsi
+++ b/arch/mips/dts/mrvl,cn73xx.dtsi
@@ -267,5 +267,40 @@
interrupts = <0x6c010 4>;
};
};
+
+ /* SMI1 */
+ smi1: mdio@1180000003880 {
+ compatible = "cavium,octeon-3860-mdio";
+ reg = <0x11800 0x00003880 0x0 0x40>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ /* BGX 0 */
+ bgx0: ethernet-mac-nexus@11800e0000000 {
+ compatible = "cavium,octeon-7890-bgx";
+ reg = <0x11800 0xe0000000 0x0 0x1000000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ /* BGX 1 */
+ bgx1: ethernet-mac-nexus@11800e1000000 {
+ compatible = "cavium,octeon-7890-bgx";
+ reg = <0x11800 0xe1000000 0x0 0x1000000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ /* BGX 2*/
+ bgx2: ethernet-mac-nexus@11800e2000000 {
+ compatible = "cavium,octeon-7890-bgx";
+ reg = <0x11800 0xe2000000 0x0 0x1000000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
};
};
--
2.35.1
^ permalink raw reply related [flat|nested] 52+ messages in thread* [PATCH 48/52] mips: octeon: mrvl, octeon-ebb7304.dts: Add ethernet DT support
2022-03-30 10:06 [PATCH 00/52] mips: octeon: Add ethernet support Stefan Roese
` (42 preceding siblings ...)
2022-03-30 10:07 ` [PATCH 47/52] mips: octeon: mrvl, cn73xx.dtsi: Add ethernet (BGX) and SMI DT nodes Stefan Roese
@ 2022-03-30 10:07 ` Stefan Roese
2022-03-30 10:07 ` [PATCH 49/52] mips: octeon: mrvl, octeon-nic23.dts: " Stefan Roese
` (5 subsequent siblings)
49 siblings, 0 replies; 52+ messages in thread
From: Stefan Roese @ 2022-03-30 10:07 UTC (permalink / raw)
To: u-boot; +Cc: awilliams, cchavva
Add the Octeon ethernet (BGX), SMI and PHY DT nodes to the EBB7304 dts
file to enable ethernet support on this board.
Signed-off-by: Stefan Roese <sr@denx.de>
---
arch/mips/dts/mrvl,octeon-ebb7304.dts | 45 +++++++++++++++++++++++++++
1 file changed, 45 insertions(+)
diff --git a/arch/mips/dts/mrvl,octeon-ebb7304.dts b/arch/mips/dts/mrvl,octeon-ebb7304.dts
index fda559d8629d..08247eb4e0ee 100644
--- a/arch/mips/dts/mrvl,octeon-ebb7304.dts
+++ b/arch/mips/dts/mrvl,octeon-ebb7304.dts
@@ -201,3 +201,48 @@
cd-gpios = <&gpio 25 1>; /* active low */
};
};
+
+/* SMI_1 -- Available on rev 2 and later boards */
+&smi1 {
+ /**
+ * The phy names are broken down as follows:
+ * (m)phyxxyzzs
+ * where:
+ * xx = 01 for SGMII, 10 for DXAUI, 20 for RXAUI
+ * and 40 for XFI/LXAUI
+ * y = QLM/DLM number
+ * zz = PHY address (decimal)
+ * s = sub-phy number in the case of the Cortina
+ * PHY
+ * a mphy is a nexus phy that contains one or more
+ * sub-phys, for example the Cortina CS4223.
+ */
+
+ /* QLM 2 */
+ phy01208: ethernet-phy@01208 {
+ reg = <8>;
+ compatible = "marvell,88e1240", "ethernet-phy-ieee802.3-c22";
+
+ marvell,reg-init = <3 0x10 0 0x8665>,
+ <3 0x11 0 0x00aa>,
+ <3 0x12 0 0x4105>,
+ <3 0x13 0 0x8a08>;
+
+ interrupt-parent = <&gpio>;
+ interrupts = <12 8>; /* Pin 12, active low */
+ };
+};
+
+/* BGX 0 */
+&bgx0 {
+ status = "okay";
+ phy-handle = <&phy01208>; /* put phy-handle in BGX node and MAC node */
+
+ /* SerDes 0, may differ from PCS Lane/LMAC */
+ eth0: ethernet-mac@D {
+ compatible = "cavium,octeon-7890-bgx-port";
+ reg = <0>;
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ phy-handle = <&phy01208>;
+ };
+};
--
2.35.1
^ permalink raw reply related [flat|nested] 52+ messages in thread* [PATCH 49/52] mips: octeon: mrvl, octeon-nic23.dts: Add ethernet DT support
2022-03-30 10:06 [PATCH 00/52] mips: octeon: Add ethernet support Stefan Roese
` (43 preceding siblings ...)
2022-03-30 10:07 ` [PATCH 48/52] mips: octeon: mrvl, octeon-ebb7304.dts: Add ethernet DT support Stefan Roese
@ 2022-03-30 10:07 ` Stefan Roese
2022-03-30 10:07 ` [PATCH 50/52] net: Add ethernet support for MIPS Octeon Stefan Roese
` (4 subsequent siblings)
49 siblings, 0 replies; 52+ messages in thread
From: Stefan Roese @ 2022-03-30 10:07 UTC (permalink / raw)
To: u-boot; +Cc: awilliams, cchavva
Add the Octeon ethernet (BGX) and SFP DT nodes to the NIC23 dts file to
enable ethernet support on this board.
Signed-off-by: Stefan Roese <sr@denx.de>
---
arch/mips/dts/mrvl,octeon-nic23.dts | 238 ++++++++++++++++++++++++++++
1 file changed, 238 insertions(+)
diff --git a/arch/mips/dts/mrvl,octeon-nic23.dts b/arch/mips/dts/mrvl,octeon-nic23.dts
index 72ef56d834e4..dfbd51c92468 100644
--- a/arch/mips/dts/mrvl,octeon-nic23.dts
+++ b/arch/mips/dts/mrvl,octeon-nic23.dts
@@ -118,11 +118,208 @@
&i2c0 {
u-boot,dm-pre-reloc; /* Needed early for DDR SPD EEPROM */
clock-frequency = <100000>;
+
+ sfp0eeprom: eeprom@50 {
+ compatible = "atmel,24c01";
+ reg = <0x50>;
+ };
+
+ sfp0alerts: eeprom@51 {
+ compatible = "atmel,24c01";
+ reg = <0x51>;
+ };
};
&i2c1 {
u-boot,dm-pre-reloc; /* Needed early for DDR SPD EEPROM */
clock-frequency = <100000>;
+
+ vitesse@10 {
+ compatible = "vitesse,vsc7224";
+ reg = <0x10>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* Note that reset is active high with this device */
+ reset = <&gpio 7 0>;
+
+ /* LoS pin can be pulled low when there is a loss of signal */
+ los = <&gpio 6 0>;
+
+ vitesse,reg-init =
+ /* Clear all masks */
+ /* Page select FSYNC0 (0x30) */
+ <0x7f 0x0030>,
+ /* Set FSYNC0 for 10.3125Gbps */
+ <0x80 0x2841>, /* See Table 3. */
+ <0x81 0x0008>,
+ <0x82 0xc000>,
+ <0x83 0x0010>,
+ <0x84 0x1d00>,
+
+ /* All channels Rx settings set equally */
+ <0x7f 0x0050>,
+ /* Shrink EQ_BUFF */
+ <0x82 0x0014>,
+ /* Set EQVGA_ADAP = 1 (enable EQVGA circuitry),
+ * USE_UNIT_GAIN = 1 (EQVGA is in unity gain),
+ * USE_LPF = 0 (VGA adapt not using LPF),
+ * USE_EQVGA = 1
+ <0x89 0x7f13>,
+ /* Select min DFE Delay (DFE_DELAY) */
+ <0x90 0x5785>,
+ /* Set DFE 1-3 limit (DXMAX) = 32dec,
+ * AP Max limit = 127 decimal
+ */
+ <0x92 0x207f>,
+ /* Set AP Min limit = 32 decimal */
+ <0x93 0x2000>,
+ /* Set DFE Averaging to the slowest (DFE_AVG) */
+ <0x94 0x0031>,
+ /* Set Inductor Bypass OD_IND_BYP = 0 & fastest Rise/Fall */
+ <0x9c 0x0000>,
+ /* Setting DFE Boost = none. Must set for
+ * rev C (if DFE in adapt mode)
+ */
+ <0xaa 0x0888>,
+ /* Setting EQ Min = 8 & Max limit = 72 dec.
+ * Must set for rev C, otherwise EQ is 0
+ * (if EQ is in adaptive mode)
+ */
+ <0xa8 0x2408>,
+ /* Setting EQVGA = 96, when in EQVGA manual mode */
+ <0xa9 0x0060>,
+ /* Setting SW_BFOCM, bits 15:14 to 01 */
+ <0x87 0x4021>,
+ /* Turn off adaptive input equalization
+ * and VGA adaptive algorithm control.
+ */
+ <0x89 0x7313>,
+ /* Turn on adaptive input equalization
+ * and VGA adaptive algorithm control.
+ */
+ <0x89 0x7f13>;
+
+ vitesse-channel@0 {
+ compatible = "vitesse,vsc7224-channel";
+ reg = <0>;
+ direction-tx;
+ sfp-mac = <ð0>;
+
+ /* TAP settings. The format of this is as
+ * follows:
+ * - cable length in meters, 0 = active or
+ * optical module
+ * - maintap value
+ * - pretap value
+ * - posttap value
+ *
+ * For the cable length, the value will apply
+ * for that cable length and greater until the
+ * next largest cable length specified. These
+ * values must be ordered first by channel mask
+ * then by cable length. These are typically
+ * set for the transmit channels, not the
+ * receive channels.
+ */
+ taps = <0 0x0013 0x000f 0x0000>,
+ <1 0x001f 0x000f 0x0004>,
+ <3 0x0014 0x000b 0x0004>,
+ <5 0x0014 0x0009 0x0006>,
+ <7 0x0014 0x000f 0x0000>,
+ <10 0x0012 0x000b 0x0013>;
+ };
+
+ vitesse-channel@1 {
+ compatible = "vitesse,vsc7224-channel";
+ reg = <1>;
+ /* Ignore mod_abs and module */
+ direction-rx;
+ sfp-mac = <ð0>;
+
+ /* Disable pre-tap */
+ pretap-disable;
+
+ /* Disable post-tap */
+ posttap-disable;
+
+ /* Taps has the following fields:
+ * - cable length (ignored for rx)
+ * - main tap value
+ * - pre tap value
+ * - post tap value
+ *
+ * NOTE: if taps are disabled then they
+ * are not programmed.
+ */
+ taps = <0 0x0a 0x0b 0x10>;
+ };
+
+ vitesse-channel@2 {
+ compatible = "vitesse,vsc7224-channel";
+ reg = <2>;
+ direction-tx;
+ sfp-mac = <ð1>;
+
+ /* TAP settings. The format of this is as
+ * follows:
+ * - cable length in meters, 0 = active or
+ * optical module
+ * - maintap value
+ * - pretap value
+ * - posttap value
+ *
+ * For the cable length, the value will apply
+ * for that cable length and greater until the
+ * next largest cable length specified. These
+ * values must be ordered first by channel mask
+ * then by cable length. These are typically
+ * set for the transmit channels, not the
+ * receive channels.
+ */
+ taps = <0 0x0013 0x000f 0x0000>,
+ <1 0x001f 0x000f 0x0004>,
+ <3 0x0014 0x000b 0x0004>,
+ <5 0x0014 0x0009 0x0006>,
+ <7 0x0014 0x000f 0x0000>,
+ <10 0x0012 0x000b 0x0013>;
+ };
+
+ vitesse-channel@3 {
+ compatible = "vitesse,vsc7224-channel";
+ reg = <3>;
+ /* Ignore mod_abs and module */
+ direction-rx;
+ sfp-mac = <ð1>;
+
+ /* Disable pre-tap */
+ pretap-disable;
+
+ /* Disable post-tap */
+ posttap-disable;
+
+ /* Taps has the following fields:
+ * - cable length (ignored for rx)
+ * - main tap value
+ * - pre tap value
+ * - post tap value
+ *
+ * NOTE: if taps are disabled then they
+ * are not programmed.
+ */
+ taps = <0 0x0a 0x0b 0x10>;
+ };
+ };
+
+ sfp1eeprom: eeprom@50 {
+ compatible = "atmel,24c01";
+ reg = <0x50>;
+ };
+
+ sfp1alerts: eeprom@51 {
+ compatible = "atmel,24c01";
+ reg = <0x51>;
+ };
};
&mmc {
@@ -151,6 +348,26 @@
compatible = "marvell,pci-bootcmd";
status = "okay";
};
+
+ sfp0: sfp-slot@0 {
+ compatible = "ethernet,sfp-slot";
+ tx_disable = <&gpio 16 0>;
+ mod_abs = <&gpio 17 0>;
+ tx_error = <&gpio 19 0>;
+ rx_los = <&gpio 18 0>;
+ eeprom = <&sfp0eeprom>;
+ diag = <&sfp0alerts>;
+ };
+
+ sfp1: sfp-slot@1 {
+ compatible = "ethernet,sfp-slot";
+ tx_disable = <&gpio 21 0>;
+ mod_abs = <&gpio 22 0>;
+ tx_error = <&gpio 24 0>;
+ rx_los = <&gpio 23 0>;
+ eeprom = <&sfp1eeprom>;
+ diag = <&sfp1alerts>;
+ };
};
&spi {
@@ -160,3 +377,24 @@
reg = <0>;
};
};
+
+/* BGX 2 */
+&bgx2 {
+ status = "okay";
+
+ /* SerDes 0, may differ from PCS Lane/LMAC */
+ eth0: ethernet-mac@0 {
+ compatible = "cavium,octeon-7890-bgx-port";
+ reg = <0>;
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ sfp-slot = <&sfp0>;
+ };
+
+ /* SerDes 1, may differ from PCS Lane/LMAC */
+ eth1: ethernet-mac@1 {
+ compatible = "cavium,octeon-7890-bgx-port";
+ reg = <1>;
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ sfp-slot = <&sfp1>;
+ };
+};
--
2.35.1
^ permalink raw reply related [flat|nested] 52+ messages in thread* [PATCH 50/52] net: Add ethernet support for MIPS Octeon
2022-03-30 10:06 [PATCH 00/52] mips: octeon: Add ethernet support Stefan Roese
` (44 preceding siblings ...)
2022-03-30 10:07 ` [PATCH 49/52] mips: octeon: mrvl, octeon-nic23.dts: " Stefan Roese
@ 2022-03-30 10:07 ` Stefan Roese
2022-03-30 10:07 ` [PATCH 51/52] mips: octeon: ebb7304: Enable ethernet support Stefan Roese
` (3 subsequent siblings)
49 siblings, 0 replies; 52+ messages in thread
From: Stefan Roese @ 2022-03-30 10:07 UTC (permalink / raw)
To: u-boot; +Cc: awilliams, cchavva, Ramon Fried, Joe Hershberger
This patchs adds the ethernet & MDIO driver for the MIPS Octeon II / III
SoC platform. Please note that these drivers are based on the 2013
U-Boot version from Marvell and make use of the platform supported
helper functions for the ethernet functionality, including stuff like
SFP handling.
Signed-off-by: Stefan Roese <sr@denx.de>
Cc: Ramon Fried <rfried.dev@gmail.com>
Cc: Joe Hershberger <joe.hershberger@ni.com>
---
drivers/net/Kconfig | 7 +
drivers/net/Makefile | 1 +
drivers/net/octeon/Makefile | 6 +
drivers/net/octeon/octeon_eth.c | 1060 ++++++++++++++++++++++++++++++
drivers/net/octeon/octeon_mdio.c | 226 +++++++
5 files changed, 1300 insertions(+)
create mode 100644 drivers/net/octeon/Makefile
create mode 100644 drivers/net/octeon/octeon_eth.c
create mode 100644 drivers/net/octeon/octeon_mdio.c
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 71e0cbafb412..76cf61f4c862 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -485,6 +485,13 @@ config MT7628_ETH
The MediaTek MT7628 ethernet interface is used on MT7628 and
MT7688 based boards.
+config NET_OCTEON
+ bool "MIPS Octeon ethernet support"
+ depends on ARCH_OCTEON
+ help
+ You must select Y to enable network device support for
+ MIPS Octeon SoCs. If unsure, say n
+
config NET_OCTEONTX
bool "OcteonTX Ethernet support"
depends on ARCH_OCTEONTX
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index a6d0c23f02d3..63144b27bf02 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -72,6 +72,7 @@ obj-$(CONFIG_MVNETA) += mvneta.o
obj-$(CONFIG_MVPP2) += mvpp2.o
obj-$(CONFIG_NATSEMI) += natsemi.o
obj-$(CONFIG_NETCONSOLE) += netconsole.o
+obj-$(CONFIG_NET_OCTEON) += octeon/
obj-$(CONFIG_NET_OCTEONTX) += octeontx/
obj-$(CONFIG_NET_OCTEONTX2) += octeontx2/
obj-$(CONFIG_NS8382X) += ns8382x.o
diff --git a/drivers/net/octeon/Makefile b/drivers/net/octeon/Makefile
new file mode 100644
index 000000000000..c573411a6293
--- /dev/null
+++ b/drivers/net/octeon/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Copyright (C) 2018-2022 Marvell International Ltd.
+#
+
+obj-$(CONFIG_NET_OCTEON) += octeon_eth.o octeon_mdio.o
diff --git a/drivers/net/octeon/octeon_eth.c b/drivers/net/octeon/octeon_eth.c
new file mode 100644
index 000000000000..fbb1afc08abc
--- /dev/null
+++ b/drivers/net/octeon/octeon_eth.c
@@ -0,0 +1,1060 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ */
+
+#include <dm.h>
+#include <dm/device_compat.h>
+#include <env.h>
+#include <net.h>
+#include <netdev.h>
+#include <malloc.h>
+#include <miiphy.h>
+#include <misc.h>
+#include <asm/io.h>
+#include <linux/delay.h>
+
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-csr.h>
+#include <mach/cvmx-bootmem.h>
+#include <mach/octeon-model.h>
+#include <mach/cvmx-fuse.h>
+#include <mach/octeon-feature.h>
+#include <mach/octeon_fdt.h>
+#include <mach/cvmx-qlm.h>
+#include <mach/octeon_eth.h>
+#include <mach/octeon_qlm.h>
+#include <mach/cvmx-pcie.h>
+#include <mach/cvmx-coremask.h>
+
+#include <mach/cvmx-agl-defs.h>
+#include <mach/cvmx-asxx-defs.h>
+#include <mach/cvmx-bgxx-defs.h>
+#include <mach/cvmx-dbg-defs.h>
+#include <mach/cvmx-gmxx-defs.h>
+#include <mach/cvmx-gserx-defs.h>
+#include <mach/cvmx-ipd-defs.h>
+#include <mach/cvmx-l2c-defs.h>
+#include <mach/cvmx-npi-defs.h>
+#include <mach/cvmx-pcsx-defs.h>
+#include <mach/cvmx-pexp-defs.h>
+#include <mach/cvmx-pki-defs.h>
+#include <mach/cvmx-pko-defs.h>
+#include <mach/cvmx-smix-defs.h>
+#include <mach/cvmx-sriox-defs.h>
+#include <mach/cvmx-xcv-defs.h>
+#include <mach/cvmx-pcsxx-defs.h>
+
+#include <mach/cvmx-helper.h>
+#include <mach/cvmx-helper-board.h>
+#include <mach/cvmx-helper-fdt.h>
+#include <mach/cvmx-helper-bgx.h>
+#include <mach/cvmx-helper-cfg.h>
+
+#include <mach/cvmx-hwpko.h>
+#include <mach/cvmx-pko.h>
+#include <mach/cvmx-pki.h>
+#include <mach/cvmx-config.h>
+#include <mach/cvmx-mdio.h>
+
+/** Maximum receive packet size (hardware default is 1536) */
+#define CONFIG_OCTEON_NETWORK_MRU 1536
+
+#define OCTEON_BOOTLOADER_NAMED_BLOCK_TMP_PREFIX "__tmp"
+
+/**
+ * Enables RX packet debugging if octeon_debug_rx_packets is set in the
+ * environment.
+ */
+#define DEBUG_RX_PACKET
+
+/**
+ * Enables TX packet debugging if octeon_debug_tx_packets is set in the
+ * environment.
+ */
+#define DEBUG_TX_PACKET
+
+/* Global flag indicating common hw has been set up */
+static int octeon_global_hw_inited;
+
+#if defined(DEBUG_RX_PACKET) || defined(DEBUG_TX_PACKET)
+static int packet_rx_debug;
+static int packet_tx_debug;
+#endif
+
+/* Make sure that we have enough buffers to keep prefetching blocks happy.
+ * Absolute minimum is probably about 200.
+ */
+#define NUM_PACKET_BUFFERS 1000
+
+#define PKO_SHUTDOWN_TIMEOUT_VAL 100
+
+/* Define the offsets from the base CSR */
+#define GMX_PRT_CFG 0x10
+
+#define GMX_RX_FRM_MAX 0x30
+#define GMX_RX_JABBER 0x38
+
+#define GMX_RX_ADR_CTL 0x100
+#define GMX_RX_ADR_CAM_EN 0x108
+#define GMX_RX_ADR_CAM0 0x180
+#define GMX_RX_ADR_CAM1 0x188
+#define GMX_RX_ADR_CAM2 0x190
+#define GMX_RX_ADR_CAM3 0x198
+#define GMX_RX_ADR_CAM4 0x1a0
+#define GMX_RX_ADR_CAM5 0x1a8
+#define GMX_TX_OVR_BP 0x4c8
+
+/**
+ * Set the hardware MAC address for a device
+ *
+ * @param interface interface of port to set
+ * @param index index of port to set MAC address for
+ * @param addr Address structure to change it too.
+ * @return Zero on success
+ */
+static int cvm_oct_set_mac_address(struct udevice *dev)
+{
+ struct octeon_eth_info *priv = dev_get_priv(dev);
+ struct eth_pdata *pdata = dev_get_plat(dev);
+ cvmx_gmxx_prtx_cfg_t gmx_cfg;
+ cvmx_helper_interface_mode_t mode;
+ cvmx_gmxx_rxx_adr_ctl_t control;
+ u8 *ptr = (uint8_t *)pdata->enetaddr;
+ int interface = priv->interface;
+ int index = priv->index;
+ u64 mac = 0;
+ u64 gmx_reg;
+ int xipd_port;
+ int i;
+
+ for (i = 0; i < 6; i++)
+ mac = (mac << 8) | (u64)(ptr[i]);
+
+ debug("%s(%s (%pM))\n", __func__, dev->name, ptr);
+ mode = cvmx_helper_interface_get_mode(interface);
+
+ /* It's rather expensive to change the MAC address for BGX so we only
+ * do this if it has changed or not been set previously.
+ */
+ if (octeon_has_feature(OCTEON_FEATURE_BGX)) {
+ xipd_port = cvmx_helper_get_ipd_port(interface, index);
+ if (priv->last_bgx_mac != mac || !priv->bgx_mac_set) {
+ cvmx_helper_bgx_set_mac(xipd_port, 1, 2, mac);
+ priv->last_bgx_mac = mac;
+ priv->bgx_mac_set = 1;
+ }
+ return 0;
+ }
+
+ if (mode == CVMX_HELPER_INTERFACE_MODE_AGL) {
+ gmx_reg = CVMX_AGL_GMX_RXX_INT_REG(0);
+ } else {
+ gmx_reg = CVMX_GMXX_RXX_INT_REG(index, interface);
+ csr_wr(CVMX_GMXX_SMACX(index, interface), mac);
+ }
+
+ /* Disable interface */
+ gmx_cfg.u64 = csr_rd(gmx_reg + GMX_PRT_CFG);
+ csr_wr(gmx_reg + GMX_PRT_CFG, gmx_cfg.u64 & ~1ull);
+ debug("%s: gmx reg: 0x%llx\n", __func__, gmx_reg);
+
+ csr_wr(gmx_reg + GMX_RX_ADR_CAM0, ptr[0]);
+ csr_wr(gmx_reg + GMX_RX_ADR_CAM1, ptr[1]);
+ csr_wr(gmx_reg + GMX_RX_ADR_CAM2, ptr[2]);
+ csr_wr(gmx_reg + GMX_RX_ADR_CAM3, ptr[3]);
+ csr_wr(gmx_reg + GMX_RX_ADR_CAM4, ptr[4]);
+ csr_wr(gmx_reg + GMX_RX_ADR_CAM5, ptr[5]);
+
+ control.u64 = 0;
+ control.s.bcst = 1; /* Allow broadcast MAC addresses */
+ control.s.mcst = 1; /* Force reject multicast packets */
+ control.s.cam_mode = 1; /* Filter packets based on the CAM */
+
+ csr_wr(gmx_reg + GMX_RX_ADR_CTL, control.u64);
+
+ csr_wr(gmx_reg + GMX_RX_ADR_CAM_EN, 1);
+
+ /* Return interface to previous enable state */
+ csr_wr(gmx_reg + GMX_PRT_CFG, gmx_cfg.u64);
+
+ return 0;
+}
+
+static void cvm_oct_fill_hw_memory(u64 pool, u64 size, u64 elements)
+{
+ static int alloc_count;
+ char tmp_name[64];
+ int ret;
+
+ debug("%s: pool: 0x%llx, size: 0xx%llx, count: 0x%llx\n",
+ __func__, pool, size, elements);
+ sprintf(tmp_name, "%s_fpa_alloc_%d",
+ OCTEON_BOOTLOADER_NAMED_BLOCK_TMP_PREFIX, alloc_count++);
+ ret = cvmx_fpa_setup_pool(pool, tmp_name, NULL, size, elements);
+}
+
+/**
+ * Configure common hardware for all interfaces
+ */
+static void cvm_oct_configure_common_hw(void)
+{
+ int mru = env_get_ulong("octeon_mru", 0, CONFIG_OCTEON_NETWORK_MRU);
+ int packet_pool_size = CVMX_FPA_PACKET_POOL_SIZE;
+
+ if (mru > packet_pool_size)
+ packet_pool_size = (mru + CVMX_CACHE_LINE_SIZE - 1) &
+ ~(CVMX_CACHE_LINE_SIZE - 1);
+
+ /* Setup the FPA */
+ cvmx_fpa_enable();
+
+ cvm_oct_fill_hw_memory(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
+ NUM_PACKET_BUFFERS);
+#if CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL
+ if (!octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
+ cvm_oct_fill_hw_memory(CVMX_FPA_OUTPUT_BUFFER_POOL,
+ CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128);
+ }
+#endif
+ cvm_oct_fill_hw_memory(CVMX_FPA_PACKET_POOL, packet_pool_size,
+ NUM_PACKET_BUFFERS);
+
+ cvmx_helper_initialize_packet_io_global();
+ cvmx_helper_initialize_packet_io_local();
+
+ /* The MRU defaults to 1536 bytes by the hardware. Setting
+ * CONFIG_OCTEON_NETWORK_MRU allows this to be overridden.
+ */
+ if (octeon_has_feature(OCTEON_FEATURE_PKI)) {
+ struct cvmx_pki_global_config gbl_cfg;
+ int i;
+
+ cvmx_pki_read_global_config(0, &gbl_cfg);
+ for (i = 0; i < CVMX_PKI_NUM_FRAME_CHECK; i++)
+ gbl_cfg.frm_len[i].maxlen = mru;
+ cvmx_pki_write_global_config(0, &gbl_cfg);
+ }
+
+ /* Set POW get work timeout to maximum value */
+ if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE) ||
+ octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))
+ csr_wr(CVMX_SSO_NW_TIM, 0x3ff);
+ else
+ csr_wr(CVMX_POW_NW_TIM, 0x3ff);
+}
+
+/**
+ * Enables Ethernet devices to allow packets to be transmitted and received.
+ * For example, this is activated when the DHCP command is issued.
+ *
+ * @param dev Ethernet device to initialize
+ * @param bis board data structure, not used.
+ *
+ * @return 1 for success
+ */
+int octeon_eth_init(struct udevice *dev)
+{
+ struct octeon_eth_info *priv = dev_get_priv(dev);
+
+ debug("%s(), dev_ptr: %p, dev: %s, port: %d\n", __func__, dev,
+ dev->name, priv->port);
+
+ if (priv->initted_flag) {
+ debug("%s already initialized\n", dev->name);
+ return 1;
+ }
+
+ if (!octeon_global_hw_inited) {
+ debug("Initializing common hardware\n");
+ cvm_oct_configure_common_hw();
+ }
+
+ /* Ignore backpressure on RGMII ports */
+ if (!octeon_has_feature(OCTEON_FEATURE_BGX))
+ csr_wr(priv->gmx_base + GMX_TX_OVR_BP, 0xf << 8 | 0xf);
+
+ debug("%s: Setting MAC address\n", __func__);
+ cvm_oct_set_mac_address(dev);
+
+ if (!octeon_global_hw_inited) {
+ debug("Enabling packet input\n");
+ cvmx_helper_ipd_and_packet_input_enable();
+ octeon_global_hw_inited = 1;
+
+ /* Connect, configure and start the PHY, if the device is
+ * connected to one. If not, then it's most likely an SPF
+ * enabled port, which does not have such PHY setup here.
+ */
+ if (priv->mdio_dev) {
+ priv->phy_dev = dm_eth_phy_connect(dev);
+ phy_config(priv->phy_dev);
+ phy_startup(priv->phy_dev);
+ }
+ }
+ priv->enabled = 0;
+ priv->initted_flag = 1;
+
+ debug("%s exiting successfully\n", __func__);
+ return 1;
+}
+
+/**
+ * Initializes the specified interface and port
+ *
+ * @param interface interface to initialize
+ * @param index port index on interface
+ * @param port ipd port number
+ * @param if_mode interface mode
+ *
+ * @return 0 for success, -1 if out of memory, 1 if port is invalid
+ */
+static int octeon_eth_initialize(struct udevice *dev, int interface,
+ int index, int port,
+ cvmx_helper_interface_mode_t if_mode)
+{
+ struct octeon_eth_info *oct_eth_info = dev_get_priv(dev);
+ int eth;
+
+ eth = cvmx_helper_get_port_fdt_node_offset(interface, index);
+ if (eth <= 0) {
+ debug("ERROR: No fdt node for interface %d, index %d\n",
+ interface, index);
+ return 1;
+ }
+
+ oct_eth_info->is_c45 = (if_mode == CVMX_HELPER_INTERFACE_MODE_XAUI) ||
+ (if_mode == CVMX_HELPER_INTERFACE_MODE_RXAUI) ||
+ (if_mode == CVMX_HELPER_INTERFACE_MODE_XFI) ||
+ (if_mode == CVMX_HELPER_INTERFACE_MODE_XLAUI) ||
+ (if_mode == CVMX_HELPER_INTERFACE_MODE_10G_KR) ||
+ (if_mode == CVMX_HELPER_INTERFACE_MODE_10G_KR);
+ oct_eth_info->port = port;
+ oct_eth_info->index = index;
+ oct_eth_info->interface = interface;
+ oct_eth_info->initted_flag = 0;
+ /* This is guaranteed to force the link state to be printed out */
+ oct_eth_info->link_state = 0xffffffffffffffffULL;
+ debug("Setting up port: %d, int: %d, index: %d, device: octeth%d\n",
+ oct_eth_info->port, oct_eth_info->interface, oct_eth_info->index,
+ dev_seq(dev));
+ if (if_mode == CVMX_HELPER_INTERFACE_MODE_AGL) {
+ oct_eth_info->gmx_base = CVMX_AGL_GMX_RXX_INT_REG(0);
+ } else {
+ if (!octeon_has_feature(OCTEON_FEATURE_BGX))
+ oct_eth_info->gmx_base =
+ CVMX_GMXX_RXX_INT_REG(index, interface);
+ }
+
+ return 0;
+}
+
+/**
+ * @INTERNAL
+ * Converts a BGX address to the node, interface and port number
+ *
+ * @param bgx_addr Address of CSR register
+ *
+ * @return node, interface and port number, will be -1 for invalid address.
+ */
+static struct cvmx_xiface __cvmx_bgx_reg_addr_to_xiface(u64 bgx_addr)
+{
+ struct cvmx_xiface xi = { -1, -1 };
+
+ xi.node = cvmx_csr_addr_to_node(bgx_addr);
+ bgx_addr = cvmx_csr_addr_strip_node(bgx_addr);
+ if ((bgx_addr & 0xFFFFFFFFF0000000) != 0x00011800E0000000) {
+ debug("%s: Invalid BGX address 0x%llx\n", __func__,
+ (unsigned long long)bgx_addr);
+ xi.node = -1;
+ return xi;
+ }
+ xi.interface = (bgx_addr >> 24) & 0x0F;
+
+ return xi;
+}
+
+static int octeon_nic_probe(struct udevice *dev)
+{
+ struct octeon_eth_info *info = dev_get_priv(dev);
+ struct ofnode_phandle_args phandle;
+ struct cvmx_xiface xi;
+ ofnode node, mdio_node;
+ int ipd_port;
+ int intf;
+ int ret;
+
+ /* The empty stub is to keep cvmx_user_app_init() happy. */
+ cvmx_npi_max_pknds = 1;
+ __cvmx_helper_init_port_valid();
+
+ xi = __cvmx_bgx_reg_addr_to_xiface(dev_read_addr(dev));
+ intf = xi.interface;
+ debug("%s: Found BGX node %d, interface %d\n", __func__, xi.node, intf);
+
+ ipd_port = cvmx_helper_get_ipd_port(intf, xi.node);
+ ret = octeon_eth_initialize(dev, intf, xi.node, ipd_port,
+ cvmx_helper_interface_get_mode(intf));
+
+ /* Move to subnode, as this includes the "phy-handle" */
+ node = dev_read_first_subnode(dev);
+
+ /* Check if an SPF module is conneted, then no MDIO is probed */
+ ret = ofnode_parse_phandle_with_args(node, "sfp-slot", NULL, 0, 0,
+ &phandle);
+ if (!ret) {
+ dev_dbg(dev, "sfp-slot found, not probing for MDIO\n");
+ return 0;
+ }
+
+ /* Continue with MDIO probing */
+ ret = ofnode_parse_phandle_with_args(node, "phy-handle", NULL, 0, 0,
+ &phandle);
+ if (ret) {
+ dev_err(dev, "phy-handle not found in subnode\n");
+ return -ENODEV;
+ }
+
+ /* Get MDIO node */
+ mdio_node = ofnode_get_parent(phandle.node);
+ ret = uclass_get_device_by_ofnode(UCLASS_MDIO, mdio_node,
+ &info->mdio_dev);
+ if (ret) {
+ dev_err(dev, "mdio_dev not found\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+/**
+ * Sets the hardware MAC address of the Ethernet device
+ *
+ * @param dev - Ethernet device
+ *
+ * @return 0 for success
+ */
+int octeon_eth_write_hwaddr(struct udevice *dev)
+{
+ struct octeon_eth_info *oct_eth_info = dev_get_priv(dev);
+ struct eth_pdata *pdata = dev_get_plat(dev);
+
+ /* Skip if the interface isn't yet enabled */
+ if (!oct_eth_info->enabled) {
+ debug("%s: Interface not enabled, not setting MAC address\n",
+ __func__);
+ return 0;
+ }
+ debug("%s: Setting %s address to %02x:%02x:%02x:%02x:%02x:%02x\n",
+ __func__, dev->name, pdata->enetaddr[0], pdata->enetaddr[1],
+ pdata->enetaddr[2], pdata->enetaddr[3], pdata->enetaddr[4],
+ pdata->enetaddr[5]);
+ return cvm_oct_set_mac_address(dev);
+}
+
+/**
+ * Enables and disables the XCV RGMII interface
+ *
+ * @param interface Interface number
+ * @param index Port index (should be 0 for RGMII)
+ * @param enable True to enable it, false to disable it
+ */
+static void octeon_bgx_xcv_rgmii_enable(int interface, int index, bool enable)
+{
+ union cvmx_xcv_reset xcv_reset;
+
+ debug("%s(%d, %d, %sable)\n", __func__, interface, index,
+ enable ? "en" : "dis");
+ xcv_reset.u64 = csr_rd(CVMX_XCV_RESET);
+ xcv_reset.s.rx_pkt_rst_n = enable ? 1 : 0;
+ csr_wr(CVMX_XCV_RESET, xcv_reset.u64);
+}
+
+/**
+ * Enables a SGMII interface
+ *
+ * @param dev - Ethernet device to initialize
+ */
+void octeon_eth_sgmii_enable(struct udevice *dev)
+{
+ struct octeon_eth_info *oct_eth_info;
+ cvmx_gmxx_prtx_cfg_t gmx_cfg;
+ int index, interface;
+ cvmx_helper_interface_mode_t if_mode;
+
+ oct_eth_info = dev_get_priv(dev);
+ interface = oct_eth_info->interface;
+ index = oct_eth_info->index;
+
+ debug("%s(%s) (%d.%d)\n", __func__, dev->name, interface, index);
+ if (octeon_has_feature(OCTEON_FEATURE_BGX)) {
+ cvmx_bgxx_cmrx_config_t cmr_config;
+
+ cmr_config.u64 =
+ csr_rd(CVMX_BGXX_CMRX_CONFIG(index, interface));
+ cmr_config.s.enable = 1;
+ cmr_config.s.data_pkt_tx_en = 1;
+ cmr_config.s.data_pkt_rx_en = 1;
+ csr_wr(CVMX_BGXX_CMRX_CONFIG(index, interface), cmr_config.u64);
+ mdelay(100);
+ if (cvmx_helper_bgx_is_rgmii(interface, index))
+ octeon_bgx_xcv_rgmii_enable(interface, index, true);
+ } else {
+ if_mode = cvmx_helper_interface_get_mode(interface);
+ /* Normal operating mode. */
+
+ if (if_mode == CVMX_HELPER_INTERFACE_MODE_SGMII ||
+ if_mode == CVMX_HELPER_INTERFACE_MODE_QSGMII) {
+ cvmx_pcsx_miscx_ctl_reg_t pcsx_miscx_ctl_reg;
+
+ debug(" if mode: (Q)SGMII\n");
+ pcsx_miscx_ctl_reg.u64 = csr_rd(CVMX_PCSX_MISCX_CTL_REG(index, interface));
+ pcsx_miscx_ctl_reg.s.gmxeno = 0;
+ csr_wr(CVMX_PCSX_MISCX_CTL_REG(index, interface),
+ pcsx_miscx_ctl_reg.u64);
+ } else if (if_mode != CVMX_HELPER_INTERFACE_MODE_AGL) {
+ cvmx_pcsxx_misc_ctl_reg_t pcsxx_misc_ctl_reg;
+
+ debug(" if mode: AGM\n");
+ pcsxx_misc_ctl_reg.u64 =
+ csr_rd(CVMX_PCSXX_MISC_CTL_REG(interface));
+ pcsxx_misc_ctl_reg.s.gmxeno = 0;
+ csr_wr(CVMX_PCSXX_MISC_CTL_REG(interface),
+ pcsxx_misc_ctl_reg.u64);
+ }
+
+ gmx_cfg.u64 = csr_rd(oct_eth_info->gmx_base + GMX_PRT_CFG);
+ gmx_cfg.s.en = 1;
+ csr_wr(oct_eth_info->gmx_base + GMX_PRT_CFG, gmx_cfg.u64);
+ gmx_cfg.u64 = csr_rd(oct_eth_info->gmx_base + GMX_PRT_CFG);
+ }
+}
+
+/**
+ * Enables an Ethernet interface
+ *
+ * @param dev - Ethernet device to enable
+ */
+void octeon_eth_enable(struct udevice *dev)
+{
+ struct octeon_eth_info *oct_eth_info;
+ u64 tmp;
+ int interface;
+ cvmx_helper_interface_mode_t if_mode;
+
+ oct_eth_info = dev_get_priv(dev);
+ interface = oct_eth_info->interface;
+ if_mode = cvmx_helper_interface_get_mode(interface);
+
+ switch (if_mode) {
+ case CVMX_HELPER_INTERFACE_MODE_RGMII:
+ case CVMX_HELPER_INTERFACE_MODE_GMII:
+ debug(" rgmii/gmii mode\n");
+ tmp = csr_rd(CVMX_ASXX_RX_PRT_EN(interface));
+ tmp |= (1ull << (oct_eth_info->port & 0x3));
+ csr_wr(CVMX_ASXX_RX_PRT_EN(interface), tmp);
+ tmp = csr_rd(CVMX_ASXX_TX_PRT_EN(interface));
+ tmp |= (1ull << (oct_eth_info->port & 0x3));
+ csr_wr(CVMX_ASXX_TX_PRT_EN(interface), tmp);
+ octeon_eth_write_hwaddr(dev);
+ break;
+
+ case CVMX_HELPER_INTERFACE_MODE_SGMII:
+ case CVMX_HELPER_INTERFACE_MODE_XAUI:
+ case CVMX_HELPER_INTERFACE_MODE_RXAUI:
+ case CVMX_HELPER_INTERFACE_MODE_XLAUI:
+ case CVMX_HELPER_INTERFACE_MODE_XFI:
+ case CVMX_HELPER_INTERFACE_MODE_10G_KR:
+ case CVMX_HELPER_INTERFACE_MODE_40G_KR4:
+ case CVMX_HELPER_INTERFACE_MODE_MIXED:
+ case CVMX_HELPER_INTERFACE_MODE_AGL:
+ debug(" SGMII/XAUI/etc.\n");
+ octeon_eth_sgmii_enable(dev);
+ octeon_eth_write_hwaddr(dev);
+ break;
+
+ default:
+ break;
+ }
+}
+
+void octeon_phy_port_check(struct udevice *dev)
+{
+ struct octeon_eth_info *oct_eth_info = dev_get_priv(dev);
+ struct phy_device *phydev = oct_eth_info->phydev;
+
+ if (oct_eth_info->phy_port_check)
+ oct_eth_info->phy_port_check(phydev);
+}
+
+/**
+ * Configure the RGMII port for the negotiated speed
+ *
+ * @param dev Linux device for the RGMII port
+ */
+static void cvm_oct_configure_rgmii_speed(struct udevice *dev)
+{
+ struct octeon_eth_info *priv = dev_get_priv(dev);
+ int port = priv->port;
+ cvmx_helper_link_info_t link_state = cvmx_helper_link_get(port);
+
+ /* If the port is down some PHYs we need to check modules, etc. */
+ if (!link_state.s.link_up)
+ octeon_phy_port_check(dev);
+
+ if (link_state.u64 != priv->link_state) {
+ cvmx_helper_interface_mode_t mode;
+
+ octeon_phy_port_check(dev);
+
+ debug("%s(%s): Link state changed\n", __func__, dev->name);
+ printf("%s: ", dev->name);
+ if (!link_state.s.link_up) {
+ puts("Down ");
+ } else {
+ printf("Up %d Mbps ", link_state.s.speed);
+ if (link_state.s.full_duplex)
+ puts("Full duplex ");
+ else
+ puts("Half duplex ");
+ }
+ mode = cvmx_helper_interface_get_mode(priv->interface);
+ printf("(port %2d) (%s)\n", port,
+ cvmx_helper_interface_mode_to_string(mode));
+ debug("%s: Setting link state\n", __func__);
+ cvmx_helper_link_set(priv->port, link_state);
+ priv->link_state = link_state.u64;
+ }
+}
+
+#if defined(DEBUG_TX_PACKET) || defined(DEBUG_RX_PACKET)
+static void print_mac(const char *label, const uint8_t *mac_addr)
+{
+ printf("%s: %02x:%02x:%02x:%02x:%02x:%02x", label, mac_addr[0],
+ mac_addr[1], mac_addr[2], mac_addr[3], mac_addr[4], mac_addr[5]);
+}
+
+static void print_ip(const void *packet)
+{
+ u8 *p = (uint8_t *)packet;
+ u16 length;
+ u8 hdr_len;
+
+ puts("IP Header:\n");
+ if ((p[0] & 0xF0) != 0x40) {
+ printf("Invalid IP version %d\n", *p >> 4);
+ return;
+ }
+ hdr_len = *p & 0x0F;
+ if (hdr_len < 5)
+ printf("Invalid IP header length %d\n", hdr_len);
+ printf(" Version: 4, Header length: %d\n", hdr_len);
+ length = (p[2] << 8) | p[3];
+ printf(" TOS: 0x%02x, length: %d\n", p[1], length);
+ printf(" ID: %d, %s%s%s fragment offset: %d\n", (p[4] << 8) | p[5],
+ p[6] & 0x80 ? "congested, " : "", p[6] & 0x40 ? "DF, " : "",
+ p[6] & 0x20 ? "MF, " : "", ((p[6] & 0x1F) << 8) | p[7]);
+ printf(" TTL: %d, Protocol: %d, Header Checksum: 0x%x\n", p[8], p[9],
+ (p[10] << 8) | p[11]);
+ printf(" Source IP: %d.%d.%d.%d\n Destination IP: %d.%d.%d.%d\n",
+ p[12], p[13], p[14], p[15], p[16], p[17], p[18], p[19]);
+ if (p[9] == 17 || p[9] == 6)
+ printf(" Source port: %u, Destination port: %u\n",
+ (p[20] << 8) | p[21], (p[22] << 8) | p[23]);
+ puts("\n");
+}
+
+/**
+ * Prints out a packet for debugging purposes
+ *
+ * @param[in] packet - pointer to packet data
+ * @param length - length of packet in bytes
+ */
+static void print_packet(const void *packet, int length)
+{
+ int i, j;
+ const unsigned char *up = packet;
+ u16 type = (up[12] << 8 | up[13]);
+ int start = 14;
+
+ print_mac("DMAC", &up[0]);
+ puts(" ");
+ print_mac("SMAC", &up[6]);
+ printf(" TYPE: %04x\n", type);
+
+ if (type == 0x0800)
+ print_ip(&up[start]);
+
+ for (i = start; (i + 16) < length; i += 16) {
+ printf("%04x ", i);
+ for (j = 0; j < 16; ++j)
+ printf("%02x ", up[i + j]);
+
+ printf(" ");
+ for (j = 0; j < 16; ++j)
+ printf("%c",
+ ((up[i + j] >= ' ') && (up[i + j] <= '~')) ?
+ up[i + j] :
+ '.');
+ printf("\n");
+ }
+ printf("%04x ", i);
+ for (j = 0; i + j < length; ++j)
+ printf("%02x ", up[i + j]);
+
+ for (; j < 16; ++j)
+ printf(" ");
+
+ printf(" ");
+ for (j = 0; i + j < length; ++j)
+ printf("%c", ((up[i + j] >= ' ') && (up[i + j] <= '~')) ?
+ up[i + j] :
+ '.');
+
+ printf("\n");
+}
+#endif
+
+/**
+ * String representation of error codes.
+ */
+static const char * const rx_error_codes[] = {
+ "OK",
+ "partial",
+ "jabber",
+ "overrun",
+ "oversize",
+ "alignment",
+ "fragment",
+ "fcs",
+ "undersize",
+ "extend",
+ "length mismatch",
+ "rgmii rx",
+ "skip error",
+ "nibble error (studder)",
+ "(undefined)",
+ "(undefined)",
+ "SPI 4.2 FCS",
+ "skip",
+ "L2 malformed",
+};
+
+/**
+ * Called to receive a packet
+ *
+ * @param dev - device to receive on
+ *
+ * @return - length of packet
+ *
+ * This function is used to poll packets. In turn it calls NetReceive
+ * to process the packets.
+ */
+static int nic_recv(struct udevice *dev, int flags, uchar **packetp)
+{
+ cvmx_wqe_t *work = cvmx_pow_work_request_sync(CVMX_POW_WAIT);
+ struct octeon_eth_info *oct_eth_info = dev_get_priv(dev);
+ cvmx_buf_ptr_t buf_ptr;
+ void *packet_data;
+ int length;
+ int error_code;
+
+ if (!oct_eth_info->enabled) {
+ oct_eth_info->enabled = 1;
+ debug("%s: Enabling interface %s\n", __func__, dev->name);
+ octeon_eth_enable(dev);
+ }
+
+ if (!work) {
+ /*
+ * Somtimes the link is not up yet. Return here in this
+ * case, this function will be called again later.
+ */
+ return 0;
+ }
+
+ error_code = cvmx_wqe_get_rcv_err(work);
+ if (error_code) {
+ /* Work has error, so drop */
+ cvmx_helper_free_packet_data(work);
+ cvmx_wqe_free(work);
+ if (error_code < ARRAY_SIZE(rx_error_codes) &&
+ !octeon_has_feature(OCTEON_FEATURE_BGX))
+ printf("Receive error (code %d: %s), dropping\n",
+ error_code, rx_error_codes[error_code]);
+ else
+ printf("Receive error (code %d (unknown), dropping\n",
+ error_code);
+ return 0;
+ }
+ if (cvmx_wqe_get_bufs(work) != 1) {
+ /* can only support single-buffer packets */
+ printf("Abnormal packet received in %u bufs, dropping\n",
+ cvmx_wqe_get_bufs(work));
+ length = cvmx_wqe_get_len(work);
+ buf_ptr = cvmx_wqe_get_packet_ptr(work);
+ packet_data = cvmx_phys_to_ptr(buf_ptr.s.addr);
+ print_packet(packet_data, length);
+ cvmx_helper_free_packet_data(work);
+ cvmx_wqe_free(work);
+ return 0;
+ }
+
+ buf_ptr = cvmx_wqe_get_packet_ptr(work);
+ packet_data = cvmx_phys_to_ptr(buf_ptr.s.addr);
+ length = cvmx_wqe_get_len(work);
+
+ oct_eth_info->packets_received++;
+ debug("############# got work: %p, len: %d, packet_ptr: %p\n", work,
+ length, packet_data);
+#if defined(DEBUG_RX_PACKET)
+ if (packet_rx_debug) {
+ printf("\nRX packet: interface: %d, index: %d\n",
+ oct_eth_info->interface, oct_eth_info->index);
+ print_packet(packet_data, length);
+ }
+#endif
+ *packetp = (uchar *)packet_data;
+
+ /* Save work for free_pkt() */
+ oct_eth_info->work = work;
+
+ /* Free WQE and packet data */
+ return length;
+}
+
+static int nic_free_pkt(struct udevice *dev, uchar *pkt, int pkt_len)
+{
+ struct octeon_eth_info *oct_eth_info = dev_get_priv(dev);
+ cvmx_wqe_t *work = oct_eth_info->work;
+
+ if (!work)
+ return 0;
+
+ cvmx_helper_free_packet_data(work);
+ cvmx_wqe_free(work);
+ oct_eth_info->work = NULL;
+
+ return 0;
+}
+
+/**
+ * Packet transmit
+ *
+ * @param skb Packet to send
+ * @param dev Device info structure
+ * @return Always returns zero
+ */
+static int cvm_oct_xmit(struct udevice *dev, void *packet, int len)
+{
+ struct octeon_eth_info *priv = dev_get_priv(dev);
+ int queue = cvmx_pko_get_base_queue(priv->port);
+ cvmx_pko_command_word0_t pko_command;
+ cvmx_buf_ptr_t hw_buffer;
+ int rv;
+
+ debug("%s: addr: %p, len: %d\n", __func__, packet, len);
+
+ hw_buffer.u64 = 0;
+ hw_buffer.s.addr = cvmx_ptr_to_phys(packet);
+ hw_buffer.s.pool = CVMX_FPA_PACKET_POOL;
+ hw_buffer.s.size = len;
+ hw_buffer.s.back = 0;
+
+ /* Build the PKO command */
+ pko_command.u64 = 0;
+ pko_command.s.subone0 = 1;
+ pko_command.s.dontfree = 0;
+ pko_command.s.segs = 1;
+ pko_command.s.total_bytes = len;
+ /* Send the packet to the output queue */
+
+ debug("%s: port: %d, queue: %d\n", __func__, priv->port, queue);
+ cvmx_pko_send_packet_prepare(priv->port, queue, 0);
+ rv = cvmx_pko_send_packet_finish(priv->port, queue, pko_command,
+ hw_buffer, 0);
+ if (rv)
+ printf("Failed to send the packet rv=%d\n", rv);
+
+ return 0;
+}
+
+static int nic_xmit(struct udevice *dev, void *pkt, int pkt_len)
+{
+ struct octeon_eth_info *oct_eth_info = dev_get_priv(dev);
+ void *fpa_buf = cvmx_fpa_alloc(CVMX_FPA_PACKET_POOL);
+
+ if (!oct_eth_info->enabled) {
+ oct_eth_info->enabled = 1;
+ octeon_eth_enable(dev);
+ }
+
+ /* We need to copy this to a FPA buffer, then give that to TX */
+
+ if (oct_eth_info->packets_sent == 0 &&
+ !octeon_has_feature(OCTEON_FEATURE_BGX))
+ cvm_oct_configure_rgmii_speed(dev);
+
+ if (!fpa_buf) {
+ printf("ERROR allocating buffer for packet!\n");
+ return -1;
+ }
+
+ memcpy(fpa_buf, pkt, pkt_len);
+#ifdef DEBUG_TX_PACKET
+ if (packet_tx_debug) {
+ printf("\nTX packet: interface: %d, index: %d\n",
+ oct_eth_info->interface, oct_eth_info->index);
+ print_packet(pkt, pkt_len);
+ }
+#endif
+ cvm_oct_xmit(dev, fpa_buf, pkt_len);
+ oct_eth_info->packets_sent++;
+
+ return 0;
+}
+
+int nic_open(struct udevice *dev)
+{
+ octeon_eth_init(dev);
+
+ return 0;
+}
+
+static void octeon_eth_halt_bgx(struct udevice *dev,
+ cvmx_helper_interface_mode_t mode)
+{
+ union cvmx_bgxx_cmrx_config cmr_config;
+ union cvmx_bgxx_cmr_rx_adrx_cam cmr_cam;
+ struct octeon_eth_info *oct_eth_info = dev_get_priv(dev);
+ int index = oct_eth_info->index;
+ int xiface = oct_eth_info->interface;
+ struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+
+ debug("%s(%s(%d.%d), %d)\n", __func__, dev->name, xiface, index, mode);
+
+ /* For RGMII we need to properly shut down the XCV interface */
+ if (cvmx_helper_bgx_is_rgmii(xiface, index)) {
+ debug(" Shut down XCV RGMII\n");
+ octeon_bgx_xcv_rgmii_enable(xi.interface, index, false);
+ } else {
+ cmr_config.u64 = csr_rd_node(xi.node,
+ CVMX_BGXX_CMRX_CONFIG(index, xi.interface));
+ cmr_config.s.data_pkt_tx_en = 0;
+ cmr_config.s.data_pkt_rx_en = 0;
+ csr_wr_node(xi.node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface),
+ cmr_config.u64);
+
+ cmr_cam.u64 = csr_rd_node(xi.node,
+ CVMX_BGXX_CMR_RX_ADRX_CAM(index * 8, xi.interface));
+ cmr_cam.s.en = 0;
+ csr_wr_node(xi.node,
+ CVMX_BGXX_CMR_RX_ADRX_CAM(index * 8, xi.interface),
+ cmr_cam.u64);
+ oct_eth_info->last_bgx_mac = 0;
+ oct_eth_info->bgx_mac_set = 0;
+ }
+}
+
+/**
+ * Halts the specified Ethernet interface preventing it from receiving any more
+ * packets.
+ *
+ * @param dev - Ethernet device to shut down.
+ */
+void octeon_eth_halt(struct udevice *dev)
+{
+ struct octeon_eth_info *oct_eth_info = dev_get_priv(dev);
+ int index = oct_eth_info->index;
+ int interface = oct_eth_info->interface;
+ cvmx_helper_interface_mode_t mode;
+ union cvmx_gmxx_rxx_adr_ctl adr_ctl;
+ cvmx_gmxx_prtx_cfg_t gmx_cfg;
+ u64 tmp;
+
+ debug("%s(%s): Halting\n", __func__, dev->name);
+
+ oct_eth_info->enabled = 0;
+
+ mode = cvmx_helper_interface_get_mode(oct_eth_info->interface);
+ if (octeon_has_feature(OCTEON_FEATURE_BGX)) {
+ octeon_eth_halt_bgx(dev, mode);
+ return;
+ }
+
+ /* Stop SCC */
+ /* Disable reception on this port at the GMX block */
+ switch (mode) {
+ case CVMX_HELPER_INTERFACE_MODE_RGMII:
+ case CVMX_HELPER_INTERFACE_MODE_GMII:
+ debug(" RGMII/GMII\n");
+ tmp = csr_rd(CVMX_ASXX_RX_PRT_EN(oct_eth_info->interface));
+ tmp &= ~(1ull << index);
+ /* Disable the RGMII RX ports */
+ csr_wr(CVMX_ASXX_RX_PRT_EN(oct_eth_info->interface), tmp);
+ tmp = csr_rd(CVMX_ASXX_TX_PRT_EN(oct_eth_info->interface));
+ tmp &= ~(1ull << index);
+ /* Disable the RGMII TX ports */
+ csr_wr(CVMX_ASXX_TX_PRT_EN(oct_eth_info->interface), tmp);
+ /* No break! */
+ case CVMX_HELPER_INTERFACE_MODE_SGMII:
+ case CVMX_HELPER_INTERFACE_MODE_QSGMII:
+ case CVMX_HELPER_INTERFACE_MODE_XAUI:
+ case CVMX_HELPER_INTERFACE_MODE_RXAUI:
+ case CVMX_HELPER_INTERFACE_MODE_XLAUI:
+ case CVMX_HELPER_INTERFACE_MODE_XFI:
+ case CVMX_HELPER_INTERFACE_MODE_10G_KR:
+ case CVMX_HELPER_INTERFACE_MODE_40G_KR4:
+ case CVMX_HELPER_INTERFACE_MODE_MIXED:
+ case CVMX_HELPER_INTERFACE_MODE_AGL:
+ /* Disable MAC filtering */
+ gmx_cfg.u64 = csr_rd(oct_eth_info->gmx_base + GMX_PRT_CFG);
+ csr_wr(oct_eth_info->gmx_base + GMX_PRT_CFG,
+ gmx_cfg.u64 & ~1ull);
+ adr_ctl.u64 = 0;
+ adr_ctl.s.bcst = 1; /* Reject broadcast */
+ csr_wr(oct_eth_info->gmx_base + GMX_RX_ADR_CTL, adr_ctl.u64);
+ csr_wr(oct_eth_info->gmx_base + GMX_RX_ADR_CAM_EN, 0);
+ csr_wr(oct_eth_info->gmx_base + GMX_PRT_CFG, gmx_cfg.u64);
+ break;
+ default:
+ printf("%s: Unknown mode %d for interface 0x%x:%d\n", __func__,
+ mode, interface, index);
+ break;
+ }
+}
+
+void nic_stop(struct udevice *dev)
+{
+ octeon_eth_halt(dev);
+}
+
+int nic_write_hwaddr(struct udevice *dev)
+{
+ cvm_oct_set_mac_address(dev);
+
+ return 0;
+}
+
+static const struct eth_ops octeon_nic_ops = {
+ .start = nic_open,
+ .stop = nic_stop,
+ .send = nic_xmit,
+ .recv = nic_recv,
+ .free_pkt = nic_free_pkt,
+ .write_hwaddr = nic_write_hwaddr,
+};
+
+static const struct udevice_id octeon_nic_ids[] = {
+ { .compatible = "cavium,octeon-7890-bgx" },
+ {}
+};
+
+U_BOOT_DRIVER(octeon_nic) = {
+ .name = "octeon_nic",
+ .id = UCLASS_ETH,
+ .probe = octeon_nic_probe,
+ .of_match = octeon_nic_ids,
+ .ops = &octeon_nic_ops,
+ .priv_auto = sizeof(struct octeon_eth_info),
+};
diff --git a/drivers/net/octeon/octeon_mdio.c b/drivers/net/octeon/octeon_mdio.c
new file mode 100644
index 000000000000..34ee80901fb6
--- /dev/null
+++ b/drivers/net/octeon/octeon_mdio.c
@@ -0,0 +1,226 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ */
+
+#include <dm.h>
+#include <fdt_support.h>
+#include <log.h>
+#include <miiphy.h>
+#include <net.h>
+#include <linux/delay.h>
+
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-csr.h>
+#include <mach/octeon-model.h>
+#include <mach/octeon-feature.h>
+#include <mach/cvmx-smix-defs.h>
+#include <mach/cvmx-config.h>
+#include <mach/cvmx-helper.h>
+#include <mach/cvmx-helper-board.h>
+#include <mach/cvmx-mdio.h>
+
+#define CVMX_SMI_DRV_CTL 0x0001180000001828ull
+#define DEFAULT_MDIO_SPEED 2500000 /** 2.5 MHz default speed */
+
+/**
+ * cvmx_smi_drv_ctl
+ *
+ * Enables the SMI interface.
+ *
+ */
+union cvmx_smi_drv_ctl {
+ u64 u64;
+ struct cvmx_smi_drv_ctl_s {
+ u64 reserved_14_63 : 50;
+ u64 pctl : 6;
+ u64 reserved_6_7 : 2;
+ u64 nctl : 6;
+ } s;
+};
+
+struct octeon_mdiobus {
+ struct mii_dev *mii_dev;
+ /**
+ * The local bus is in the lower 8 bits, followed by the remote bus in
+ * the top 8 bits. Bit 16 will be set if the bus is non-local.
+ */
+ u32 bus_id;
+
+ int node; /** Node number */
+ int speed; /** Bus speed, normally 2.5 MHz */
+ int fdt_node; /** Node in FDT */
+ bool local; /** true if local MDIO bus */
+};
+
+static int octeon_mdio_read(struct udevice *mdio_dev, int phy_addr,
+ int dev_addr, int reg_addr)
+{
+ struct octeon_mdiobus *p = dev_get_priv(mdio_dev);
+ struct mii_dev *dev = p->mii_dev;
+ int value;
+
+ debug("%s(0x%p(%s): bus_id=%d phy_addr=%d, 0x%x, 0x%x) - ", __func__,
+ dev, dev->name, p->bus_id, phy_addr, dev_addr, reg_addr);
+ if (IS_ENABLED(CONFIG_PHYLIB_10G) && dev_addr != MDIO_DEVAD_NONE) {
+ debug("clause 45 mode\n");
+ value = cvmx_mdio_45_read(p->bus_id & 0xff, phy_addr, dev_addr,
+ reg_addr);
+ } else {
+ value = cvmx_mdio_read(p->bus_id & 0xff, phy_addr, reg_addr);
+ }
+
+ debug("Return value: 0x%x\n", value);
+ return value;
+}
+
+static int octeon_mdio_write(struct udevice *mdio_dev, int phy_addr,
+ int dev_addr, int reg_addr, u16 value)
+{
+ struct octeon_mdiobus *p = dev_get_priv(mdio_dev);
+ struct mii_dev *dev = p->mii_dev;
+
+ debug("%s(0x%p(%s): bus_id=%d phy_addr=%d, 0x%x, 0x%x, 0x%x)\n",
+ __func__, dev, dev->name, p->bus_id, phy_addr, dev_addr, reg_addr,
+ value);
+
+ if (IS_ENABLED(CONFIG_PHYLIB_10G) && dev_addr != MDIO_DEVAD_NONE) {
+ debug("clause 45 mode\n");
+ return cvmx_mdio_45_write(p->bus_id & 0xff, phy_addr, dev_addr,
+ reg_addr, value);
+ }
+
+ return cvmx_mdio_write(p->bus_id & 0xff, phy_addr, reg_addr, value);
+}
+
+/**
+ * Converts a MDIO register address to a bus number
+ *
+ * @param reg_addr MDIO base register address
+ *
+ * @return MDIO bus number or -1 if invalid address
+ */
+int octeon_mdio_reg_addr_to_bus(u64 reg_addr)
+{
+ int bus_base;
+ int bus;
+
+ /* Adjust the bus number based on the node number */
+ bus_base = cvmx_csr_addr_to_node(reg_addr) * 4;
+ reg_addr = cvmx_csr_addr_strip_node(reg_addr);
+
+ switch (reg_addr) {
+ case 0x1180000001800:
+ case 0x1180000003800: /* 68XX/78XX address */
+ bus = 0;
+ break;
+ case 0x1180000001900:
+ case 0x1180000003880:
+ bus = 1;
+ break;
+ case 0x1180000003900:
+ bus = 2;
+ break;
+ case 0x1180000003980:
+ bus = 3;
+ break;
+ default:
+ printf("%s: Unknown register address 0x%llx\n", __func__,
+ reg_addr);
+ return -1;
+ }
+ bus += bus_base;
+ debug("%s: address 0x%llx is bus %d\n", __func__, reg_addr, bus);
+ return bus;
+}
+
+static int octeon_mdio_probe(struct udevice *dev)
+{
+ struct octeon_mdiobus *p = dev_get_priv(dev);
+ union cvmx_smi_drv_ctl drv_ctl;
+ cvmx_smix_clk_t smi_clk;
+ u64 mdio_addr;
+ int bus;
+ u64 sclock;
+ u32 sample_dly;
+ u64 denom;
+
+ mdio_addr = dev_read_addr(dev);
+ debug("%s: Translated address: 0x%llx\n", __func__, mdio_addr);
+ bus = octeon_mdio_reg_addr_to_bus(mdio_addr);
+ p->bus_id = bus;
+ debug("%s: bus: %d\n", __func__, bus);
+
+ drv_ctl.u64 = csr_rd(CVMX_SMI_DRV_CTL);
+ drv_ctl.s.pctl = dev_read_u32_default(dev, "cavium,pctl-drive-strength",
+ drv_ctl.s.pctl);
+ drv_ctl.s.nctl = dev_read_u32_default(dev, "cavium,nctl-drive-strength",
+ drv_ctl.s.nctl);
+ debug("%s: Set MDIO PCTL drive strength to 0x%x and NCTL drive strength to 0x%x\n",
+ __func__, drv_ctl.s.pctl, drv_ctl.s.nctl);
+ csr_wr(CVMX_SMI_DRV_CTL, drv_ctl.u64);
+
+ /* Set the bus speed, default is 2.5MHz */
+ p->speed = dev_read_u32_default(dev, "cavium,max-speed",
+ DEFAULT_MDIO_SPEED);
+ sclock = gd->bus_clk;
+ smi_clk.u64 = csr_rd(CVMX_SMIX_CLK(bus & 3));
+ smi_clk.s.phase = sclock / (p->speed * 2);
+
+ /* Allow sample delay to be specified */
+ sample_dly = dev_read_u32_default(dev, "cavium,sample-delay", 0);
+ /* Only change the sample delay if it is set, otherwise use
+ * the default value of 2.
+ */
+ if (sample_dly) {
+ u32 sample;
+
+ denom = (sclock * 1000ULL) / sample_dly;
+ debug("%s: sclock: %llu, sample_dly: %u ps, denom: %llu\n",
+ __func__, sclock, sample_dly, denom);
+ sample = (sclock + denom - 1) / denom;
+ debug("%s: sample: %u\n", __func__, smi_clk.s.sample);
+ if (sample < 2) {
+ printf("%s: warning: cavium,sample-delay %u ps is too small in device tree for %s\n",
+ __func__, sample_dly, dev->name);
+ sample = 2;
+ }
+ if (sample > (2 * smi_clk.s.phase - 3)) {
+ printf("%s: warning: cavium,sample-delay %u ps is too large in device tree for %s\n",
+ __func__, sample_dly, dev->name);
+ sample = 2 * smi_clk.s.phase - 3;
+ }
+ smi_clk.s.sample = sample & 0xf;
+ smi_clk.s.sample_hi = (sample >> 4) & 0xf;
+ debug("%s(%s): sample delay: %u ps (%d clocks)\n", __func__,
+ dev->name, sample_dly, smi_clk.s.sample);
+ }
+ csr_wr(CVMX_SMIX_CLK(bus & 3), smi_clk.u64);
+
+ debug("mdio clock phase: %d clocks\n", smi_clk.s.phase);
+ csr_wr(CVMX_SMIX_CLK(bus & 3), smi_clk.u64);
+ debug("Enabling SMI interface %s\n", dev->name);
+ csr_wr(CVMX_SMIX_EN(bus & 3), 1);
+
+ /* Muxed MDIO bus support removed for now! */
+ return 0;
+}
+
+static const struct mdio_ops octeon_mdio_ops = {
+ .read = octeon_mdio_read,
+ .write = octeon_mdio_write,
+};
+
+static const struct udevice_id octeon_mdio_ids[] = {
+ { .compatible = "cavium,octeon-3860-mdio" },
+ {}
+};
+
+U_BOOT_DRIVER(octeon_mdio) = {
+ .name = "octeon_mdio",
+ .id = UCLASS_MDIO,
+ .of_match = octeon_mdio_ids,
+ .probe = octeon_mdio_probe,
+ .ops = &octeon_mdio_ops,
+ .priv_auto = sizeof(struct octeon_mdiobus),
+};
--
2.35.1
^ permalink raw reply related [flat|nested] 52+ messages in thread* [PATCH 51/52] mips: octeon: ebb7304: Enable ethernet support
2022-03-30 10:06 [PATCH 00/52] mips: octeon: Add ethernet support Stefan Roese
` (45 preceding siblings ...)
2022-03-30 10:07 ` [PATCH 50/52] net: Add ethernet support for MIPS Octeon Stefan Roese
@ 2022-03-30 10:07 ` Stefan Roese
2022-03-30 10:07 ` [PATCH 52/52] mips: octeon: nic23: " Stefan Roese
` (2 subsequent siblings)
49 siblings, 0 replies; 52+ messages in thread
From: Stefan Roese @ 2022-03-30 10:07 UTC (permalink / raw)
To: u-boot; +Cc: awilliams, cchavva
This patch enables the Kconfig symbols needed for full ethernet support
on the EBB7304. Also the PHY autonegotiation timeout is increased, as
the default 5 seconds are sometime a bit short. With this, ethernet can
be used on this board. Here an example of a tftp load:
=> tftp ffffffff81000000 big
ethernet-mac-nexus@11800e0000000 Waiting for PHY auto negotiation to complete....... done
Using ethernet-mac-nexus@11800e0000000 device
TFTP from server 192.168.1.5; our IP address is 192.168.1.243
Filename 'big'.
Load address: 0xffffffff81000000
Loading: ################################################## 10 MiB
13.2 MiB/s
done
Bytes transferred = 10485760 (a00000 hex)
Signed-off-by: Stefan Roese <sr@denx.de>
---
configs/octeon_ebb7304_defconfig | 7 +++++++
include/configs/octeon_ebb7304.h | 2 ++
2 files changed, 9 insertions(+)
diff --git a/configs/octeon_ebb7304_defconfig b/configs/octeon_ebb7304_defconfig
index 9824f8b97ae5..3d9bbaaab4df 100644
--- a/configs/octeon_ebb7304_defconfig
+++ b/configs/octeon_ebb7304_defconfig
@@ -25,6 +25,7 @@ CONFIG_CMD_PART=y
CONFIG_CMD_PCI=y
CONFIG_CMD_USB=y
CONFIG_CMD_DHCP=y
+CONFIG_CMD_MII=y
CONFIG_CMD_PING=y
CONFIG_CMD_RTC=y
CONFIG_CMD_TIME=y
@@ -36,6 +37,7 @@ CONFIG_EFI_PARTITION=y
CONFIG_PARTITION_TYPE_GUID=y
CONFIG_ENV_IS_IN_FLASH=y
CONFIG_ENV_ADDR=0x800000001FBFE000
+CONFIG_TFTP_TSIZE=y
CONFIG_CLK=y
# CONFIG_INPUT is not set
CONFIG_MISC=y
@@ -53,7 +55,12 @@ CONFIG_DM_SPI_FLASH=y
CONFIG_SPI_FLASH_ATMEL=y
CONFIG_SPI_FLASH_SPANSION=y
CONFIG_SPI_FLASH_STMICRO=y
+CONFIG_PHYLIB=y
+CONFIG_PHY_MARVELL=y
+CONFIG_DM_MDIO=y
+CONFIG_DM_ETH_PHY=y
CONFIG_E1000=y
+CONFIG_NET_OCTEON=y
CONFIG_PCI=y
CONFIG_PCIE_OCTEON=y
CONFIG_DM_REGULATOR=y
diff --git a/include/configs/octeon_ebb7304.h b/include/configs/octeon_ebb7304.h
index 358db69a05b3..8c6c57bd546a 100644
--- a/include/configs/octeon_ebb7304.h
+++ b/include/configs/octeon_ebb7304.h
@@ -16,4 +16,6 @@
#define CONFIG_SYS_FLASH_CFI_WIDTH FLASH_CFI_8BIT
#define CONFIG_SYS_FLASH_EMPTY_INFO /* flinfo indicates empty blocks */
+#define PHY_ANEG_TIMEOUT 8000 /* PHY needs a longer aneg time */
+
#endif /* __CONFIG_H__ */
--
2.35.1
^ permalink raw reply related [flat|nested] 52+ messages in thread* [PATCH 52/52] mips: octeon: nic23: Enable ethernet support
2022-03-30 10:06 [PATCH 00/52] mips: octeon: Add ethernet support Stefan Roese
` (46 preceding siblings ...)
2022-03-30 10:07 ` [PATCH 51/52] mips: octeon: ebb7304: Enable ethernet support Stefan Roese
@ 2022-03-30 10:07 ` Stefan Roese
2022-03-30 10:30 ` [PATCH 00/52] mips: octeon: Add " Stefan Roese
2022-03-30 23:56 ` Daniel Schwierzeck
49 siblings, 0 replies; 52+ messages in thread
From: Stefan Roese @ 2022-03-30 10:07 UTC (permalink / raw)
To: u-boot; +Cc: awilliams, cchavva
This patch enables the Kconfig symbols needed for full ethernet support
on the NIC23. Additionally board specific setup is done, mostly GPIOs
related to SFP / GPIO configuration. With this, ethernet can be used on
this board. Here an example of a tftp load:
=> tftp ffffffff81000000 big
Using ethernet-mac-nexus@11800e2000000 device
TFTP from server 192.168.1.5; our IP address is 192.168.1.247
Filename 'big'.
Load address: 0xffffffff81000000
Loading: ################################################## 10 MiB
9.7 MiB/s
done
Bytes transferred = 10485760 (a00000 hex)
Signed-off-by: Stefan Roese <sr@denx.de>
---
board/Marvell/octeon_nic23/board.c | 87 +++++++++++++++++++++++++++++-
configs/octeon_nic23_defconfig | 10 +++-
2 files changed, 95 insertions(+), 2 deletions(-)
diff --git a/board/Marvell/octeon_nic23/board.c b/board/Marvell/octeon_nic23/board.c
index 9f5eb2e2a182..3e2c54444397 100644
--- a/board/Marvell/octeon_nic23/board.c
+++ b/board/Marvell/octeon_nic23/board.c
@@ -1,10 +1,11 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * Copyright (C) 2021 Stefan Roese <sr@denx.de>
+ * Copyright (C) 2021-2022 Stefan Roese <sr@denx.de>
*/
#include <dm.h>
#include <ram.h>
+#include <asm/gpio.h>
#include <mach/octeon_ddr.h>
#include <mach/cvmx-qlm.h>
@@ -84,6 +85,52 @@ int board_fix_fdt(void *fdt)
return rc;
}
+int board_early_init_f(void)
+{
+ struct gpio_desc gpio = {};
+ ofnode node;
+
+ /* Initial GPIO configuration */
+
+ /* GPIO 7: Vitesse reset */
+ node = ofnode_by_compatible(ofnode_null(), "vitesse,vsc7224");
+ if (ofnode_valid(node)) {
+ gpio_request_by_name_nodev(node, "los", 0, &gpio, GPIOD_IS_IN);
+ dm_gpio_free(gpio.dev, &gpio);
+ gpio_request_by_name_nodev(node, "reset", 0, &gpio,
+ GPIOD_IS_OUT);
+ if (dm_gpio_is_valid(&gpio)) {
+ /* Vitesse reset */
+ debug("%s: Setting GPIO 7 to 1\n", __func__);
+ dm_gpio_set_value(&gpio, 1);
+ }
+ dm_gpio_free(gpio.dev, &gpio);
+ }
+
+ /* SFP+ transmitters */
+ ofnode_for_each_compatible_node(node, "ethernet,sfp-slot") {
+ gpio_request_by_name_nodev(node, "tx_disable", 0,
+ &gpio, GPIOD_IS_OUT);
+ if (dm_gpio_is_valid(&gpio)) {
+ debug("%s: Setting GPIO %d to 1\n", __func__,
+ gpio.offset);
+ dm_gpio_set_value(&gpio, 1);
+ }
+ dm_gpio_free(gpio.dev, &gpio);
+ gpio_request_by_name_nodev(node, "mod_abs", 0, &gpio,
+ GPIOD_IS_IN);
+ dm_gpio_free(gpio.dev, &gpio);
+ gpio_request_by_name_nodev(node, "tx_error", 0, &gpio,
+ GPIOD_IS_IN);
+ dm_gpio_free(gpio.dev, &gpio);
+ gpio_request_by_name_nodev(node, "rx_los", 0, &gpio,
+ GPIOD_IS_IN);
+ dm_gpio_free(gpio.dev, &gpio);
+ }
+
+ return 0;
+}
+
void board_configure_qlms(void)
{
octeon_configure_qlm(4, 3000, CVMX_QLM_MODE_SATA_2X1, 0, 0, 0, 0);
@@ -100,7 +147,45 @@ void board_configure_qlms(void)
int board_late_init(void)
{
+ struct gpio_desc gpio = {};
+ ofnode node;
+
+ /* Turn on SFP+ transmitters */
+ ofnode_for_each_compatible_node(node, "ethernet,sfp-slot") {
+ gpio_request_by_name_nodev(node, "tx_disable", 0,
+ &gpio, GPIOD_IS_OUT);
+ if (dm_gpio_is_valid(&gpio)) {
+ debug("%s: Setting GPIO %d to 0\n", __func__,
+ gpio.offset);
+ dm_gpio_set_value(&gpio, 0);
+ }
+ dm_gpio_free(gpio.dev, &gpio);
+ }
+
board_configure_qlms();
return 0;
}
+
+int last_stage_init(void)
+{
+ struct gpio_desc gpio = {};
+ ofnode node;
+
+ node = ofnode_by_compatible(ofnode_null(), "vitesse,vsc7224");
+ if (!ofnode_valid(node)) {
+ printf("Vitesse SPF DT node not found!");
+ return 0;
+ }
+
+ gpio_request_by_name_nodev(node, "reset", 0, &gpio, GPIOD_IS_OUT);
+ if (dm_gpio_is_valid(&gpio)) {
+ /* Take Vitesse retimer out of reset */
+ debug("%s: Setting GPIO 7 to 0\n", __func__);
+ dm_gpio_set_value(&gpio, 0);
+ mdelay(50);
+ }
+ dm_gpio_free(gpio.dev, &gpio);
+
+ return 0;
+}
diff --git a/configs/octeon_nic23_defconfig b/configs/octeon_nic23_defconfig
index 5427a9970a71..d44c650cef3c 100644
--- a/configs/octeon_nic23_defconfig
+++ b/configs/octeon_nic23_defconfig
@@ -12,6 +12,7 @@ CONFIG_ARCH_OCTEON=y
CONFIG_TARGET_OCTEON_NIC23=y
# CONFIG_MIPS_CACHE_SETUP is not set
# CONFIG_MIPS_CACHE_DISABLE is not set
+CONFIG_MIPS_RELOCATION_TABLE_SIZE=0xc000
CONFIG_DEBUG_UART=y
CONFIG_AHCI=y
CONFIG_OF_BOARD_FIXUP=y
@@ -19,7 +20,9 @@ CONFIG_SYS_LOAD_ADDR=0xffffffff80100000
CONFIG_SYS_CONSOLE_ENV_OVERWRITE=y
# CONFIG_SYS_DEVICE_NULLDEV is not set
CONFIG_ARCH_MISC_INIT=y
+CONFIG_BOARD_EARLY_INIT_F=y
CONFIG_BOARD_LATE_INIT=y
+CONFIG_LAST_STAGE_INIT=y
CONFIG_HUSH_PARSER=y
# CONFIG_CMD_FLASH is not set
CONFIG_CMD_GPIO=y
@@ -37,6 +40,7 @@ CONFIG_CMD_FS_GENERIC=y
CONFIG_EFI_PARTITION=y
CONFIG_ENV_IS_IN_SPI_FLASH=y
CONFIG_ENV_ADDR=0xe000
+CONFIG_TFTP_TSIZE=y
CONFIG_SATA=y
CONFIG_AHCI_MVEBU=y
CONFIG_CLK=y
@@ -50,7 +54,11 @@ CONFIG_DM_SPI_FLASH=y
CONFIG_SPI_FLASH_ATMEL=y
CONFIG_SPI_FLASH_SPANSION=y
CONFIG_SPI_FLASH_STMICRO=y
-# CONFIG_NETDEVICES is not set
+CONFIG_PHYLIB=y
+CONFIG_PHYLIB_10G=y
+CONFIG_DM_MDIO=y
+CONFIG_DM_ETH_PHY=y
+CONFIG_NET_OCTEON=y
CONFIG_PCI=y
CONFIG_DM_REGULATOR=y
CONFIG_DM_REGULATOR_FIXED=y
--
2.35.1
^ permalink raw reply related [flat|nested] 52+ messages in thread* Re: [PATCH 00/52] mips: octeon: Add ethernet support
2022-03-30 10:06 [PATCH 00/52] mips: octeon: Add ethernet support Stefan Roese
` (47 preceding siblings ...)
2022-03-30 10:07 ` [PATCH 52/52] mips: octeon: nic23: " Stefan Roese
@ 2022-03-30 10:30 ` Stefan Roese
2022-03-30 23:56 ` Daniel Schwierzeck
49 siblings, 0 replies; 52+ messages in thread
From: Stefan Roese @ 2022-03-30 10:30 UTC (permalink / raw)
To: u-boot; +Cc: awilliams, cchavva, Daniel Schwierzeck
Add Daniel to Cc (sorry, I forgot you Daniel).
Thanks,
Stefan
On 3/30/22 12:06, Stefan Roese wrote:
> This patchset adds the networking files and drivers including device
> helper headers and C files. Please excuse the massive amount of files
> in this patch series. Also the sometimes huge files (mostly headers
> with register definitions) that I needed to include.
>
> The infrastructure code with all the headers is ported mistly without
> any intended functional changes from the 2013 Cavium / Marvell U-Boot
> version. It has undergone many hours of extensive code cleanup and
> reformatting. Some of it done by using tools (checkpatch, Lindent, clang
> format etc) and also some of it done manually, as I couldn't find some
> tools that could do the needed work in a reliable and functional way.
> The result is that checkpatch now only throws a "few" warnings that are
> left. Some of those can't be removed without an even more extensive
> cleanup / rewrite of the code, like the addition of typedefs.
>
> The added header, helper and infrastructure files in the first part of
> the patch-series (patches 1-43) are the foundation, that is used by the
> main Octeon U-Boot ethernet driver (patch 50/52). Patches 47-49 add the
> DT nodes and properties to the corresponding dtsi / dts files. Patches
> 51 & 52 finally enable the ethernet support both MIPS Octeon boards,
> EBB7304 & NIC23.
>
> All this is tested on the 2 Cavium / Marvell MIPS Octeon boards:
> EBB7304 & NIC23
>
> This patchset including the small Marvell PHY patches is available in
> this gitlab branch:
>
> https://source.denx.de/u-boot/custodians/u-boot-marvell/-/tree/mips-octeon-ethernet-v1-2022-03-30
>
> Thanks,
> Stefan
>
> Aaron Williams (40):
> mips: octeon: Add misc cvmx-* header files
> mips: octeon: Add cvmx-ilk-defs.h header file
> mips: octeon: Add cvmx-iob-defs.h header file
> mips: octeon: Add cvmx-lbk-defs.h header file
> mips: octeon: Add cvmx-npei-defs.h header file
> mips: octeon: Add cvmx-pcsxx-defs.h header file
> mips: octeon: Add cvmx-xcv-defs.h header file
> mips: octeon: Add cvmx-helper-agl.c
> mips: octeon: Add cvmx-helper-bgx.c
> mips: octeon: Add cvmx-helper-board.c
> mips: octeon: Add cvmx-helper-fpa.c
> mips: octeon: Add cvmx-helper-igl.c
> mips: octeon: Add cvmx-helper-ipd.c
> mips: octeon: Add cvmx-helper-loop.c
> mips: octeon: Add cvmx-helper-npi.c
> mips: octeon: Add cvmx-helper-pki.c
> mips: octeon: Add cvmx-helper-pko.c
> mips: octeon: Add cvmx-helper-pko3.c
> mips: octeon: Add cvmx-helper-rgmii.c
> mips: octeon: Add cvmx-helper-sgmii.c
> mips: octeon: Add cvmx-helper-sfp.c
> mips: octeon: Add cvmx-helper-xaui.c
> mips: octeon: Add cvmx-agl.c
> mips: octeon: Add cvmx-cmd-queue.c
> mips: octeon: Add cvmx-fau-compat.c
> mips: octeon: Add cvmx-fpa.c
> mips: octeon: Add cvmx-fpa-resource.c
> mips: octeon: Add cvmx-global-resource.c
> mips: octeon: Add cvmx-ilk.c
> mips: octeon: Add cvmx-ipd.c
> mips: octeon: Add cvmx-pki.c
> mips: octeon: Add cvmx-pki-resources.c
> mips: octeon: Add cvmx-pko.c
> mips: octeon: Add cvmx-pko3.c
> mips: octeon: Add cvmx-pko3-queue.c
> mips: octeon: Add cvmx-pko3-compat.c
> mips: octeon: Add cvmx-pko3-resources.c
> mips: octeon: Add cvmx-pko-internal-ports-range.c
> mips: octeon: Add cvmx-qlm-tables.c
> mips: octeon: Add cvmx-range.c
>
> Stefan Roese (12):
> mips: octeon: Misc changes to existing headers for upcoming eth
> support
> mips: octeon: Misc changes to existing C files for upcoming eth
> support
> mips: octeon: Makefile: Enable building of the newly added C files
> mips: octeon: cpu.c: Move bootmem init to arch_early_init_r()
> mips: octeon: cpu.c: Implement configure_lmtdma_window()
> mips: octeon: octeon_common.h: Move init SP because of increased image
> size
> mips: octeon: mrvl,cn73xx.dtsi: Add ethernet (BGX) and SMI DT nodes
> mips: octeon: mrvl,octeon-ebb7304.dts: Add ethernet DT support
> mips: octeon: mrvl,octeon-nic23.dts: Add ethernet DT support
> net: Add ethernet support for MIPS Octeon
> mips: octeon: ebb7304: Enable ethernet support
> mips: octeon: nic23: Enable ethernet support
>
> arch/mips/Kconfig | 1 +
> arch/mips/dts/mrvl,cn73xx.dtsi | 35 +
> arch/mips/dts/mrvl,octeon-ebb7304.dts | 45 +
> arch/mips/dts/mrvl,octeon-nic23.dts | 238 ++
> arch/mips/mach-octeon/Makefile | 35 +-
> arch/mips/mach-octeon/cpu.c | 47 +-
> arch/mips/mach-octeon/cvmx-agl.c | 216 +
> arch/mips/mach-octeon/cvmx-bootmem.c | 3 +-
> arch/mips/mach-octeon/cvmx-cmd-queue.c | 449 +++
> arch/mips/mach-octeon/cvmx-fau-compat.c | 53 +
> arch/mips/mach-octeon/cvmx-fpa-resource.c | 305 ++
> arch/mips/mach-octeon/cvmx-fpa.c | 1672 ++++++++
> arch/mips/mach-octeon/cvmx-global-resources.c | 639 +++
> arch/mips/mach-octeon/cvmx-helper-agl.c | 231 ++
> arch/mips/mach-octeon/cvmx-helper-bgx.c | 3215 +++++++++++++++
> arch/mips/mach-octeon/cvmx-helper-board.c | 2030 ++++++++++
> arch/mips/mach-octeon/cvmx-helper-cfg.c | 67 +-
> arch/mips/mach-octeon/cvmx-helper-fdt.c | 645 ++-
> arch/mips/mach-octeon/cvmx-helper-fpa.c | 329 ++
> arch/mips/mach-octeon/cvmx-helper-ilk.c | 926 +++++
> arch/mips/mach-octeon/cvmx-helper-ipd.c | 313 ++
> arch/mips/mach-octeon/cvmx-helper-loop.c | 178 +
> arch/mips/mach-octeon/cvmx-helper-npi.c | 146 +
> arch/mips/mach-octeon/cvmx-helper-pki.c | 2156 ++++++++++
> arch/mips/mach-octeon/cvmx-helper-pko.c | 312 ++
> arch/mips/mach-octeon/cvmx-helper-pko3.c | 1252 ++++++
> arch/mips/mach-octeon/cvmx-helper-rgmii.c | 431 ++
> arch/mips/mach-octeon/cvmx-helper-sfp.c | 1877 +++++++++
> arch/mips/mach-octeon/cvmx-helper-sgmii.c | 781 ++++
> arch/mips/mach-octeon/cvmx-helper-xaui.c | 587 +++
> arch/mips/mach-octeon/cvmx-helper.c | 45 +-
> arch/mips/mach-octeon/cvmx-ilk.c | 1618 ++++++++
> arch/mips/mach-octeon/cvmx-ipd.c | 690 ++++
> arch/mips/mach-octeon/cvmx-pki-resources.c | 519 +++
> arch/mips/mach-octeon/cvmx-pki.c | 1619 ++++++++
> .../cvmx-pko-internal-ports-range.c | 164 +
> arch/mips/mach-octeon/cvmx-pko.c | 1110 ++++++
> arch/mips/mach-octeon/cvmx-pko3-compat.c | 656 +++
> arch/mips/mach-octeon/cvmx-pko3-queue.c | 1331 ++++++
> arch/mips/mach-octeon/cvmx-pko3-resources.c | 229 ++
> arch/mips/mach-octeon/cvmx-pko3.c | 2143 ++++++++++
> arch/mips/mach-octeon/cvmx-qlm-tables.c | 292 ++
> arch/mips/mach-octeon/cvmx-range.c | 344 ++
> arch/mips/mach-octeon/include/mach/cvmx-agl.h | 45 +
> .../mach-octeon/include/mach/cvmx-bootmem.h | 3 +-
> .../mach-octeon/include/mach/cvmx-config.h | 128 +
> arch/mips/mach-octeon/include/mach/cvmx-fau.h | 581 +++
> arch/mips/mach-octeon/include/mach/cvmx-fpa.h | 3 +-
> .../mips/mach-octeon/include/mach/cvmx-fpa3.h | 37 -
> .../include/mach/cvmx-helper-board.h | 6 +-
> .../include/mach/cvmx-helper-fdt.h | 40 +-
> .../include/mach/cvmx-helper-pko.h | 2 +-
> .../mach-octeon/include/mach/cvmx-helper.h | 20 +
> .../mach-octeon/include/mach/cvmx-ilk-defs.h | 2269 +++++++++++
> .../mach-octeon/include/mach/cvmx-iob-defs.h | 1328 ++++++
> .../mach-octeon/include/mach/cvmx-lbk-defs.h | 157 +
> .../mips/mach-octeon/include/mach/cvmx-mdio.h | 516 +++
> .../mach-octeon/include/mach/cvmx-npei-defs.h | 3550 +++++++++++++++++
> .../include/mach/cvmx-pcsxx-defs.h | 787 ++++
> .../include/mach/cvmx-pki-cluster.h | 343 ++
> arch/mips/mach-octeon/include/mach/cvmx-pko.h | 213 +
> .../include/mach/cvmx-pko3-resources.h | 36 +
> .../mips/mach-octeon/include/mach/cvmx-pko3.h | 1052 +++++
> .../mach-octeon/include/mach/cvmx-range.h | 23 +
> .../mips/mach-octeon/include/mach/cvmx-regs.h | 100 +-
> .../mach-octeon/include/mach/cvmx-xcv-defs.h | 226 ++
> .../mach-octeon/include/mach/octeon_eth.h | 54 +-
> board/Marvell/octeon_nic23/board.c | 87 +-
> configs/octeon_ebb7304_defconfig | 7 +
> configs/octeon_nic23_defconfig | 10 +-
> drivers/net/Kconfig | 7 +
> drivers/net/Makefile | 1 +
> drivers/net/octeon/Makefile | 6 +
> drivers/net/octeon/octeon_eth.c | 1060 +++++
> drivers/net/octeon/octeon_mdio.c | 226 ++
> include/configs/octeon_common.h | 2 +-
> include/configs/octeon_ebb7304.h | 2 +
> 77 files changed, 42315 insertions(+), 586 deletions(-)
> create mode 100644 arch/mips/mach-octeon/cvmx-agl.c
> create mode 100644 arch/mips/mach-octeon/cvmx-cmd-queue.c
> create mode 100644 arch/mips/mach-octeon/cvmx-fau-compat.c
> create mode 100644 arch/mips/mach-octeon/cvmx-fpa-resource.c
> create mode 100644 arch/mips/mach-octeon/cvmx-fpa.c
> create mode 100644 arch/mips/mach-octeon/cvmx-global-resources.c
> create mode 100644 arch/mips/mach-octeon/cvmx-helper-agl.c
> create mode 100644 arch/mips/mach-octeon/cvmx-helper-bgx.c
> create mode 100644 arch/mips/mach-octeon/cvmx-helper-board.c
> create mode 100644 arch/mips/mach-octeon/cvmx-helper-fpa.c
> create mode 100644 arch/mips/mach-octeon/cvmx-helper-ilk.c
> create mode 100644 arch/mips/mach-octeon/cvmx-helper-ipd.c
> create mode 100644 arch/mips/mach-octeon/cvmx-helper-loop.c
> create mode 100644 arch/mips/mach-octeon/cvmx-helper-npi.c
> create mode 100644 arch/mips/mach-octeon/cvmx-helper-pki.c
> create mode 100644 arch/mips/mach-octeon/cvmx-helper-pko.c
> create mode 100644 arch/mips/mach-octeon/cvmx-helper-pko3.c
> create mode 100644 arch/mips/mach-octeon/cvmx-helper-rgmii.c
> create mode 100644 arch/mips/mach-octeon/cvmx-helper-sfp.c
> create mode 100644 arch/mips/mach-octeon/cvmx-helper-sgmii.c
> create mode 100644 arch/mips/mach-octeon/cvmx-helper-xaui.c
> create mode 100644 arch/mips/mach-octeon/cvmx-ilk.c
> create mode 100644 arch/mips/mach-octeon/cvmx-ipd.c
> create mode 100644 arch/mips/mach-octeon/cvmx-pki-resources.c
> create mode 100644 arch/mips/mach-octeon/cvmx-pki.c
> create mode 100644 arch/mips/mach-octeon/cvmx-pko-internal-ports-range.c
> create mode 100644 arch/mips/mach-octeon/cvmx-pko.c
> create mode 100644 arch/mips/mach-octeon/cvmx-pko3-compat.c
> create mode 100644 arch/mips/mach-octeon/cvmx-pko3-queue.c
> create mode 100644 arch/mips/mach-octeon/cvmx-pko3-resources.c
> create mode 100644 arch/mips/mach-octeon/cvmx-pko3.c
> create mode 100644 arch/mips/mach-octeon/cvmx-qlm-tables.c
> create mode 100644 arch/mips/mach-octeon/cvmx-range.c
> create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-agl.h
> create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-config.h
> create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-fau.h
> create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-ilk-defs.h
> create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-iob-defs.h
> create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-lbk-defs.h
> create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-mdio.h
> create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-npei-defs.h
> create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-pcsxx-defs.h
> create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-pki-cluster.h
> create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-pko.h
> create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-pko3-resources.h
> create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-pko3.h
> create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-range.h
> create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-xcv-defs.h
> create mode 100644 drivers/net/octeon/Makefile
> create mode 100644 drivers/net/octeon/octeon_eth.c
> create mode 100644 drivers/net/octeon/octeon_mdio.c
>
^ permalink raw reply [flat|nested] 52+ messages in thread* Re: [PATCH 00/52] mips: octeon: Add ethernet support
2022-03-30 10:06 [PATCH 00/52] mips: octeon: Add ethernet support Stefan Roese
` (48 preceding siblings ...)
2022-03-30 10:30 ` [PATCH 00/52] mips: octeon: Add " Stefan Roese
@ 2022-03-30 23:56 ` Daniel Schwierzeck
2022-03-31 5:27 ` Stefan Roese
49 siblings, 1 reply; 52+ messages in thread
From: Daniel Schwierzeck @ 2022-03-30 23:56 UTC (permalink / raw)
To: Stefan Roese, u-boot; +Cc: awilliams, cchavva
Am Mittwoch, dem 30.03.2022 um 12:06 +0200 schrieb Stefan Roese:
> This patchset adds the networking files and drivers including device
> helper headers and C files. Please excuse the massive amount of files
> in this patch series. Also the sometimes huge files (mostly headers
> with register definitions) that I needed to include.
>
> The infrastructure code with all the headers is ported mistly without
> any intended functional changes from the 2013 Cavium / Marvell U-Boot
> version. It has undergone many hours of extensive code cleanup and
> reformatting. Some of it done by using tools (checkpatch, Lindent,
> clang
> format etc) and also some of it done manually, as I couldn't find
> some
> tools that could do the needed work in a reliable and functional way.
> The result is that checkpatch now only throws a "few" warnings that
> are
> left. Some of those can't be removed without an even more extensive
> cleanup / rewrite of the code, like the addition of typedefs.
>
> The added header, helper and infrastructure files in the first part
> of
> the patch-series (patches 1-43) are the foundation, that is used by
> the
> main Octeon U-Boot ethernet driver (patch 50/52). Patches 47-49 add
> the
> DT nodes and properties to the corresponding dtsi / dts files.
> Patches
> 51 & 52 finally enable the ethernet support both MIPS Octeon boards,
> EBB7304 & NIC23.
>
> All this is tested on the 2 Cavium / Marvell MIPS Octeon boards:
> EBB7304 & NIC23
>
> This patchset including the small Marvell PHY patches is available in
> this gitlab branch:
>
> https://source.denx.de/u-boot/custodians/u-boot-marvell/-/tree/mips-octeon-ethernet-v1-2022-03-30
>
> Thanks,
> Stefan
>
> Aaron Williams (40):
> mips: octeon: Add misc cvmx-* header files
> mips: octeon: Add cvmx-ilk-defs.h header file
> mips: octeon: Add cvmx-iob-defs.h header file
> mips: octeon: Add cvmx-lbk-defs.h header file
> mips: octeon: Add cvmx-npei-defs.h header file
> mips: octeon: Add cvmx-pcsxx-defs.h header file
> mips: octeon: Add cvmx-xcv-defs.h header file
> mips: octeon: Add cvmx-helper-agl.c
> mips: octeon: Add cvmx-helper-bgx.c
> mips: octeon: Add cvmx-helper-board.c
> mips: octeon: Add cvmx-helper-fpa.c
> mips: octeon: Add cvmx-helper-igl.c
> mips: octeon: Add cvmx-helper-ipd.c
> mips: octeon: Add cvmx-helper-loop.c
> mips: octeon: Add cvmx-helper-npi.c
> mips: octeon: Add cvmx-helper-pki.c
> mips: octeon: Add cvmx-helper-pko.c
> mips: octeon: Add cvmx-helper-pko3.c
> mips: octeon: Add cvmx-helper-rgmii.c
> mips: octeon: Add cvmx-helper-sgmii.c
> mips: octeon: Add cvmx-helper-sfp.c
> mips: octeon: Add cvmx-helper-xaui.c
> mips: octeon: Add cvmx-agl.c
> mips: octeon: Add cvmx-cmd-queue.c
> mips: octeon: Add cvmx-fau-compat.c
> mips: octeon: Add cvmx-fpa.c
> mips: octeon: Add cvmx-fpa-resource.c
> mips: octeon: Add cvmx-global-resource.c
> mips: octeon: Add cvmx-ilk.c
> mips: octeon: Add cvmx-ipd.c
> mips: octeon: Add cvmx-pki.c
> mips: octeon: Add cvmx-pki-resources.c
> mips: octeon: Add cvmx-pko.c
> mips: octeon: Add cvmx-pko3.c
> mips: octeon: Add cvmx-pko3-queue.c
> mips: octeon: Add cvmx-pko3-compat.c
> mips: octeon: Add cvmx-pko3-resources.c
> mips: octeon: Add cvmx-pko-internal-ports-range.c
> mips: octeon: Add cvmx-qlm-tables.c
> mips: octeon: Add cvmx-range.c
are those 10 millions helper functions really used by the ethernet
driver? Do you really need features like SFP modules in U-Boot?
Maybe it helps to have a look at u-boot.map to see which functions are
unused and are discarded by the linker. Those functions could be
actually removed to reduce the LoC count ;)
>
> Stefan Roese (12):
> mips: octeon: Misc changes to existing headers for upcoming eth
> support
> mips: octeon: Misc changes to existing C files for upcoming eth
> support
> mips: octeon: Makefile: Enable building of the newly added C files
> mips: octeon: cpu.c: Move bootmem init to arch_early_init_r()
> mips: octeon: cpu.c: Implement configure_lmtdma_window()
> mips: octeon: octeon_common.h: Move init SP because of increased
> image
> size
> mips: octeon: mrvl,cn73xx.dtsi: Add ethernet (BGX) and SMI DT nodes
> mips: octeon: mrvl,octeon-ebb7304.dts: Add ethernet DT support
> mips: octeon: mrvl,octeon-nic23.dts: Add ethernet DT support
> net: Add ethernet support for MIPS Octeon
> mips: octeon: ebb7304: Enable ethernet support
> mips: octeon: nic23: Enable ethernet support
>
> arch/mips/Kconfig | 1 +
> arch/mips/dts/mrvl,cn73xx.dtsi | 35 +
> arch/mips/dts/mrvl,octeon-ebb7304.dts | 45 +
> arch/mips/dts/mrvl,octeon-nic23.dts | 238 ++
> arch/mips/mach-octeon/Makefile | 35 +-
> arch/mips/mach-octeon/cpu.c | 47 +-
> arch/mips/mach-octeon/cvmx-agl.c | 216 +
> arch/mips/mach-octeon/cvmx-bootmem.c | 3 +-
> arch/mips/mach-octeon/cvmx-cmd-queue.c | 449 +++
> arch/mips/mach-octeon/cvmx-fau-compat.c | 53 +
> arch/mips/mach-octeon/cvmx-fpa-resource.c | 305 ++
> arch/mips/mach-octeon/cvmx-fpa.c | 1672 ++++++++
> arch/mips/mach-octeon/cvmx-global-resources.c | 639 +++
> arch/mips/mach-octeon/cvmx-helper-agl.c | 231 ++
> arch/mips/mach-octeon/cvmx-helper-bgx.c | 3215 +++++++++++++++
> arch/mips/mach-octeon/cvmx-helper-board.c | 2030 ++++++++++
> arch/mips/mach-octeon/cvmx-helper-cfg.c | 67 +-
> arch/mips/mach-octeon/cvmx-helper-fdt.c | 645 ++-
> arch/mips/mach-octeon/cvmx-helper-fpa.c | 329 ++
> arch/mips/mach-octeon/cvmx-helper-ilk.c | 926 +++++
> arch/mips/mach-octeon/cvmx-helper-ipd.c | 313 ++
> arch/mips/mach-octeon/cvmx-helper-loop.c | 178 +
> arch/mips/mach-octeon/cvmx-helper-npi.c | 146 +
> arch/mips/mach-octeon/cvmx-helper-pki.c | 2156 ++++++++++
> arch/mips/mach-octeon/cvmx-helper-pko.c | 312 ++
> arch/mips/mach-octeon/cvmx-helper-pko3.c | 1252 ++++++
> arch/mips/mach-octeon/cvmx-helper-rgmii.c | 431 ++
> arch/mips/mach-octeon/cvmx-helper-sfp.c | 1877 +++++++++
> arch/mips/mach-octeon/cvmx-helper-sgmii.c | 781 ++++
> arch/mips/mach-octeon/cvmx-helper-xaui.c | 587 +++
> arch/mips/mach-octeon/cvmx-helper.c | 45 +-
> arch/mips/mach-octeon/cvmx-ilk.c | 1618 ++++++++
> arch/mips/mach-octeon/cvmx-ipd.c | 690 ++++
> arch/mips/mach-octeon/cvmx-pki-resources.c | 519 +++
> arch/mips/mach-octeon/cvmx-pki.c | 1619 ++++++++
> .../cvmx-pko-internal-ports-range.c | 164 +
> arch/mips/mach-octeon/cvmx-pko.c | 1110 ++++++
> arch/mips/mach-octeon/cvmx-pko3-compat.c | 656 +++
> arch/mips/mach-octeon/cvmx-pko3-queue.c | 1331 ++++++
> arch/mips/mach-octeon/cvmx-pko3-resources.c | 229 ++
> arch/mips/mach-octeon/cvmx-pko3.c | 2143 ++++++++++
> arch/mips/mach-octeon/cvmx-qlm-tables.c | 292 ++
> arch/mips/mach-octeon/cvmx-range.c | 344 ++
> arch/mips/mach-octeon/include/mach/cvmx-agl.h | 45 +
> .../mach-octeon/include/mach/cvmx-bootmem.h | 3 +-
> .../mach-octeon/include/mach/cvmx-config.h | 128 +
> arch/mips/mach-octeon/include/mach/cvmx-fau.h | 581 +++
> arch/mips/mach-octeon/include/mach/cvmx-fpa.h | 3 +-
> .../mips/mach-octeon/include/mach/cvmx-fpa3.h | 37 -
> .../include/mach/cvmx-helper-board.h | 6 +-
> .../include/mach/cvmx-helper-fdt.h | 40 +-
> .../include/mach/cvmx-helper-pko.h | 2 +-
> .../mach-octeon/include/mach/cvmx-helper.h | 20 +
> .../mach-octeon/include/mach/cvmx-ilk-defs.h | 2269 +++++++++++
> .../mach-octeon/include/mach/cvmx-iob-defs.h | 1328 ++++++
> .../mach-octeon/include/mach/cvmx-lbk-defs.h | 157 +
> .../mips/mach-octeon/include/mach/cvmx-mdio.h | 516 +++
> .../mach-octeon/include/mach/cvmx-npei-defs.h | 3550
> +++++++++++++++++
> .../include/mach/cvmx-pcsxx-defs.h | 787 ++++
> .../include/mach/cvmx-pki-cluster.h | 343 ++
> arch/mips/mach-octeon/include/mach/cvmx-pko.h | 213 +
> .../include/mach/cvmx-pko3-resources.h | 36 +
> .../mips/mach-octeon/include/mach/cvmx-pko3.h | 1052 +++++
> .../mach-octeon/include/mach/cvmx-range.h | 23 +
> .../mips/mach-octeon/include/mach/cvmx-regs.h | 100 +-
> .../mach-octeon/include/mach/cvmx-xcv-defs.h | 226 ++
> .../mach-octeon/include/mach/octeon_eth.h | 54 +-
> board/Marvell/octeon_nic23/board.c | 87 +-
> configs/octeon_ebb7304_defconfig | 7 +
> configs/octeon_nic23_defconfig | 10 +-
> drivers/net/Kconfig | 7 +
> drivers/net/Makefile | 1 +
> drivers/net/octeon/Makefile | 6 +
> drivers/net/octeon/octeon_eth.c | 1060 +++++
> drivers/net/octeon/octeon_mdio.c | 226 ++
> include/configs/octeon_common.h | 2 +-
> include/configs/octeon_ebb7304.h | 2 +
> 77 files changed, 42315 insertions(+), 586 deletions(-)
> create mode 100644 arch/mips/mach-octeon/cvmx-agl.c
> create mode 100644 arch/mips/mach-octeon/cvmx-cmd-queue.c
> create mode 100644 arch/mips/mach-octeon/cvmx-fau-compat.c
> create mode 100644 arch/mips/mach-octeon/cvmx-fpa-resource.c
> create mode 100644 arch/mips/mach-octeon/cvmx-fpa.c
> create mode 100644 arch/mips/mach-octeon/cvmx-global-resources.c
> create mode 100644 arch/mips/mach-octeon/cvmx-helper-agl.c
> create mode 100644 arch/mips/mach-octeon/cvmx-helper-bgx.c
> create mode 100644 arch/mips/mach-octeon/cvmx-helper-board.c
> create mode 100644 arch/mips/mach-octeon/cvmx-helper-fpa.c
> create mode 100644 arch/mips/mach-octeon/cvmx-helper-ilk.c
> create mode 100644 arch/mips/mach-octeon/cvmx-helper-ipd.c
> create mode 100644 arch/mips/mach-octeon/cvmx-helper-loop.c
> create mode 100644 arch/mips/mach-octeon/cvmx-helper-npi.c
> create mode 100644 arch/mips/mach-octeon/cvmx-helper-pki.c
> create mode 100644 arch/mips/mach-octeon/cvmx-helper-pko.c
> create mode 100644 arch/mips/mach-octeon/cvmx-helper-pko3.c
> create mode 100644 arch/mips/mach-octeon/cvmx-helper-rgmii.c
> create mode 100644 arch/mips/mach-octeon/cvmx-helper-sfp.c
> create mode 100644 arch/mips/mach-octeon/cvmx-helper-sgmii.c
> create mode 100644 arch/mips/mach-octeon/cvmx-helper-xaui.c
> create mode 100644 arch/mips/mach-octeon/cvmx-ilk.c
> create mode 100644 arch/mips/mach-octeon/cvmx-ipd.c
> create mode 100644 arch/mips/mach-octeon/cvmx-pki-resources.c
> create mode 100644 arch/mips/mach-octeon/cvmx-pki.c
> create mode 100644 arch/mips/mach-octeon/cvmx-pko-internal-ports-
> range.c
> create mode 100644 arch/mips/mach-octeon/cvmx-pko.c
> create mode 100644 arch/mips/mach-octeon/cvmx-pko3-compat.c
> create mode 100644 arch/mips/mach-octeon/cvmx-pko3-queue.c
> create mode 100644 arch/mips/mach-octeon/cvmx-pko3-resources.c
> create mode 100644 arch/mips/mach-octeon/cvmx-pko3.c
> create mode 100644 arch/mips/mach-octeon/cvmx-qlm-tables.c
> create mode 100644 arch/mips/mach-octeon/cvmx-range.c
> create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-agl.h
> create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-config.h
> create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-fau.h
> create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-ilk-
> defs.h
> create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-iob-
> defs.h
> create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-lbk-
> defs.h
> create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-mdio.h
> create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-npei-
> defs.h
> create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-pcsxx-
> defs.h
> create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-pki-
> cluster.h
> create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-pko.h
> create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-pko3-
> resources.h
> create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-pko3.h
> create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-range.h
> create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-xcv-
> defs.h
> create mode 100644 drivers/net/octeon/Makefile
> create mode 100644 drivers/net/octeon/octeon_eth.c
> create mode 100644 drivers/net/octeon/octeon_mdio.c
>
--
- Daniel
^ permalink raw reply [flat|nested] 52+ messages in thread* Re: [PATCH 00/52] mips: octeon: Add ethernet support
2022-03-30 23:56 ` Daniel Schwierzeck
@ 2022-03-31 5:27 ` Stefan Roese
0 siblings, 0 replies; 52+ messages in thread
From: Stefan Roese @ 2022-03-31 5:27 UTC (permalink / raw)
To: Daniel Schwierzeck, u-boot; +Cc: awilliams, cchavva
Hi Daniel,
On 3/31/22 01:56, Daniel Schwierzeck wrote:
> Am Mittwoch, dem 30.03.2022 um 12:06 +0200 schrieb Stefan Roese:
>> This patchset adds the networking files and drivers including device
>> helper headers and C files. Please excuse the massive amount of files
>> in this patch series. Also the sometimes huge files (mostly headers
>> with register definitions) that I needed to include.
>>
>> The infrastructure code with all the headers is ported mistly without
>> any intended functional changes from the 2013 Cavium / Marvell U-Boot
>> version. It has undergone many hours of extensive code cleanup and
>> reformatting. Some of it done by using tools (checkpatch, Lindent,
>> clang
>> format etc) and also some of it done manually, as I couldn't find
>> some
>> tools that could do the needed work in a reliable and functional way.
>> The result is that checkpatch now only throws a "few" warnings that
>> are
>> left. Some of those can't be removed without an even more extensive
>> cleanup / rewrite of the code, like the addition of typedefs.
>>
>> The added header, helper and infrastructure files in the first part
>> of
>> the patch-series (patches 1-43) are the foundation, that is used by
>> the
>> main Octeon U-Boot ethernet driver (patch 50/52). Patches 47-49 add
>> the
>> DT nodes and properties to the corresponding dtsi / dts files.
>> Patches
>> 51 & 52 finally enable the ethernet support both MIPS Octeon boards,
>> EBB7304 & NIC23.
>>
>> All this is tested on the 2 Cavium / Marvell MIPS Octeon boards:
>> EBB7304 & NIC23
>>
>> This patchset including the small Marvell PHY patches is available in
>> this gitlab branch:
>>
>> https://source.denx.de/u-boot/custodians/u-boot-marvell/-/tree/mips-octeon-ethernet-v1-2022-03-30
>>
>> Thanks,
>> Stefan
>>
>> Aaron Williams (40):
>> mips: octeon: Add misc cvmx-* header files
>> mips: octeon: Add cvmx-ilk-defs.h header file
>> mips: octeon: Add cvmx-iob-defs.h header file
>> mips: octeon: Add cvmx-lbk-defs.h header file
>> mips: octeon: Add cvmx-npei-defs.h header file
>> mips: octeon: Add cvmx-pcsxx-defs.h header file
>> mips: octeon: Add cvmx-xcv-defs.h header file
>> mips: octeon: Add cvmx-helper-agl.c
>> mips: octeon: Add cvmx-helper-bgx.c
>> mips: octeon: Add cvmx-helper-board.c
>> mips: octeon: Add cvmx-helper-fpa.c
>> mips: octeon: Add cvmx-helper-igl.c
>> mips: octeon: Add cvmx-helper-ipd.c
>> mips: octeon: Add cvmx-helper-loop.c
>> mips: octeon: Add cvmx-helper-npi.c
>> mips: octeon: Add cvmx-helper-pki.c
>> mips: octeon: Add cvmx-helper-pko.c
>> mips: octeon: Add cvmx-helper-pko3.c
>> mips: octeon: Add cvmx-helper-rgmii.c
>> mips: octeon: Add cvmx-helper-sgmii.c
>> mips: octeon: Add cvmx-helper-sfp.c
>> mips: octeon: Add cvmx-helper-xaui.c
>> mips: octeon: Add cvmx-agl.c
>> mips: octeon: Add cvmx-cmd-queue.c
>> mips: octeon: Add cvmx-fau-compat.c
>> mips: octeon: Add cvmx-fpa.c
>> mips: octeon: Add cvmx-fpa-resource.c
>> mips: octeon: Add cvmx-global-resource.c
>> mips: octeon: Add cvmx-ilk.c
>> mips: octeon: Add cvmx-ipd.c
>> mips: octeon: Add cvmx-pki.c
>> mips: octeon: Add cvmx-pki-resources.c
>> mips: octeon: Add cvmx-pko.c
>> mips: octeon: Add cvmx-pko3.c
>> mips: octeon: Add cvmx-pko3-queue.c
>> mips: octeon: Add cvmx-pko3-compat.c
>> mips: octeon: Add cvmx-pko3-resources.c
>> mips: octeon: Add cvmx-pko-internal-ports-range.c
>> mips: octeon: Add cvmx-qlm-tables.c
>> mips: octeon: Add cvmx-range.c
>
> are those 10 millions helper functions really used by the ethernet
> driver? Do you really need features like SFP modules in U-Boot?
It's very hard to add the network support for those SoCs with their
quite complex devices and interfaces without using this proven code.
I agree, that this is not really appealing. And at least the NIC23
only supports network via SFP modules, so there is not alterative
interface here.
> Maybe it helps to have a look at u-boot.map to see which functions are
> unused and are discarded by the linker. Those functions could be
> actually removed to reduce the LoC count ;)
Good idea, thanks. I was looking for something like this, but never got
the idea to actually look at the u-boot.map file to detect the unused
functions, so that I can remove them. I'll work on this to get the LoC
down a bit. ;)
Thanks,
Stefan
>>
>> Stefan Roese (12):
>> mips: octeon: Misc changes to existing headers for upcoming eth
>> support
>> mips: octeon: Misc changes to existing C files for upcoming eth
>> support
>> mips: octeon: Makefile: Enable building of the newly added C files
>> mips: octeon: cpu.c: Move bootmem init to arch_early_init_r()
>> mips: octeon: cpu.c: Implement configure_lmtdma_window()
>> mips: octeon: octeon_common.h: Move init SP because of increased
>> image
>> size
>> mips: octeon: mrvl,cn73xx.dtsi: Add ethernet (BGX) and SMI DT nodes
>> mips: octeon: mrvl,octeon-ebb7304.dts: Add ethernet DT support
>> mips: octeon: mrvl,octeon-nic23.dts: Add ethernet DT support
>> net: Add ethernet support for MIPS Octeon
>> mips: octeon: ebb7304: Enable ethernet support
>> mips: octeon: nic23: Enable ethernet support
>>
>> arch/mips/Kconfig | 1 +
>> arch/mips/dts/mrvl,cn73xx.dtsi | 35 +
>> arch/mips/dts/mrvl,octeon-ebb7304.dts | 45 +
>> arch/mips/dts/mrvl,octeon-nic23.dts | 238 ++
>> arch/mips/mach-octeon/Makefile | 35 +-
>> arch/mips/mach-octeon/cpu.c | 47 +-
>> arch/mips/mach-octeon/cvmx-agl.c | 216 +
>> arch/mips/mach-octeon/cvmx-bootmem.c | 3 +-
>> arch/mips/mach-octeon/cvmx-cmd-queue.c | 449 +++
>> arch/mips/mach-octeon/cvmx-fau-compat.c | 53 +
>> arch/mips/mach-octeon/cvmx-fpa-resource.c | 305 ++
>> arch/mips/mach-octeon/cvmx-fpa.c | 1672 ++++++++
>> arch/mips/mach-octeon/cvmx-global-resources.c | 639 +++
>> arch/mips/mach-octeon/cvmx-helper-agl.c | 231 ++
>> arch/mips/mach-octeon/cvmx-helper-bgx.c | 3215 +++++++++++++++
>> arch/mips/mach-octeon/cvmx-helper-board.c | 2030 ++++++++++
>> arch/mips/mach-octeon/cvmx-helper-cfg.c | 67 +-
>> arch/mips/mach-octeon/cvmx-helper-fdt.c | 645 ++-
>> arch/mips/mach-octeon/cvmx-helper-fpa.c | 329 ++
>> arch/mips/mach-octeon/cvmx-helper-ilk.c | 926 +++++
>> arch/mips/mach-octeon/cvmx-helper-ipd.c | 313 ++
>> arch/mips/mach-octeon/cvmx-helper-loop.c | 178 +
>> arch/mips/mach-octeon/cvmx-helper-npi.c | 146 +
>> arch/mips/mach-octeon/cvmx-helper-pki.c | 2156 ++++++++++
>> arch/mips/mach-octeon/cvmx-helper-pko.c | 312 ++
>> arch/mips/mach-octeon/cvmx-helper-pko3.c | 1252 ++++++
>> arch/mips/mach-octeon/cvmx-helper-rgmii.c | 431 ++
>> arch/mips/mach-octeon/cvmx-helper-sfp.c | 1877 +++++++++
>> arch/mips/mach-octeon/cvmx-helper-sgmii.c | 781 ++++
>> arch/mips/mach-octeon/cvmx-helper-xaui.c | 587 +++
>> arch/mips/mach-octeon/cvmx-helper.c | 45 +-
>> arch/mips/mach-octeon/cvmx-ilk.c | 1618 ++++++++
>> arch/mips/mach-octeon/cvmx-ipd.c | 690 ++++
>> arch/mips/mach-octeon/cvmx-pki-resources.c | 519 +++
>> arch/mips/mach-octeon/cvmx-pki.c | 1619 ++++++++
>> .../cvmx-pko-internal-ports-range.c | 164 +
>> arch/mips/mach-octeon/cvmx-pko.c | 1110 ++++++
>> arch/mips/mach-octeon/cvmx-pko3-compat.c | 656 +++
>> arch/mips/mach-octeon/cvmx-pko3-queue.c | 1331 ++++++
>> arch/mips/mach-octeon/cvmx-pko3-resources.c | 229 ++
>> arch/mips/mach-octeon/cvmx-pko3.c | 2143 ++++++++++
>> arch/mips/mach-octeon/cvmx-qlm-tables.c | 292 ++
>> arch/mips/mach-octeon/cvmx-range.c | 344 ++
>> arch/mips/mach-octeon/include/mach/cvmx-agl.h | 45 +
>> .../mach-octeon/include/mach/cvmx-bootmem.h | 3 +-
>> .../mach-octeon/include/mach/cvmx-config.h | 128 +
>> arch/mips/mach-octeon/include/mach/cvmx-fau.h | 581 +++
>> arch/mips/mach-octeon/include/mach/cvmx-fpa.h | 3 +-
>> .../mips/mach-octeon/include/mach/cvmx-fpa3.h | 37 -
>> .../include/mach/cvmx-helper-board.h | 6 +-
>> .../include/mach/cvmx-helper-fdt.h | 40 +-
>> .../include/mach/cvmx-helper-pko.h | 2 +-
>> .../mach-octeon/include/mach/cvmx-helper.h | 20 +
>> .../mach-octeon/include/mach/cvmx-ilk-defs.h | 2269 +++++++++++
>> .../mach-octeon/include/mach/cvmx-iob-defs.h | 1328 ++++++
>> .../mach-octeon/include/mach/cvmx-lbk-defs.h | 157 +
>> .../mips/mach-octeon/include/mach/cvmx-mdio.h | 516 +++
>> .../mach-octeon/include/mach/cvmx-npei-defs.h | 3550
>> +++++++++++++++++
>> .../include/mach/cvmx-pcsxx-defs.h | 787 ++++
>> .../include/mach/cvmx-pki-cluster.h | 343 ++
>> arch/mips/mach-octeon/include/mach/cvmx-pko.h | 213 +
>> .../include/mach/cvmx-pko3-resources.h | 36 +
>> .../mips/mach-octeon/include/mach/cvmx-pko3.h | 1052 +++++
>> .../mach-octeon/include/mach/cvmx-range.h | 23 +
>> .../mips/mach-octeon/include/mach/cvmx-regs.h | 100 +-
>> .../mach-octeon/include/mach/cvmx-xcv-defs.h | 226 ++
>> .../mach-octeon/include/mach/octeon_eth.h | 54 +-
>> board/Marvell/octeon_nic23/board.c | 87 +-
>> configs/octeon_ebb7304_defconfig | 7 +
>> configs/octeon_nic23_defconfig | 10 +-
>> drivers/net/Kconfig | 7 +
>> drivers/net/Makefile | 1 +
>> drivers/net/octeon/Makefile | 6 +
>> drivers/net/octeon/octeon_eth.c | 1060 +++++
>> drivers/net/octeon/octeon_mdio.c | 226 ++
>> include/configs/octeon_common.h | 2 +-
>> include/configs/octeon_ebb7304.h | 2 +
>> 77 files changed, 42315 insertions(+), 586 deletions(-)
>> create mode 100644 arch/mips/mach-octeon/cvmx-agl.c
>> create mode 100644 arch/mips/mach-octeon/cvmx-cmd-queue.c
>> create mode 100644 arch/mips/mach-octeon/cvmx-fau-compat.c
>> create mode 100644 arch/mips/mach-octeon/cvmx-fpa-resource.c
>> create mode 100644 arch/mips/mach-octeon/cvmx-fpa.c
>> create mode 100644 arch/mips/mach-octeon/cvmx-global-resources.c
>> create mode 100644 arch/mips/mach-octeon/cvmx-helper-agl.c
>> create mode 100644 arch/mips/mach-octeon/cvmx-helper-bgx.c
>> create mode 100644 arch/mips/mach-octeon/cvmx-helper-board.c
>> create mode 100644 arch/mips/mach-octeon/cvmx-helper-fpa.c
>> create mode 100644 arch/mips/mach-octeon/cvmx-helper-ilk.c
>> create mode 100644 arch/mips/mach-octeon/cvmx-helper-ipd.c
>> create mode 100644 arch/mips/mach-octeon/cvmx-helper-loop.c
>> create mode 100644 arch/mips/mach-octeon/cvmx-helper-npi.c
>> create mode 100644 arch/mips/mach-octeon/cvmx-helper-pki.c
>> create mode 100644 arch/mips/mach-octeon/cvmx-helper-pko.c
>> create mode 100644 arch/mips/mach-octeon/cvmx-helper-pko3.c
>> create mode 100644 arch/mips/mach-octeon/cvmx-helper-rgmii.c
>> create mode 100644 arch/mips/mach-octeon/cvmx-helper-sfp.c
>> create mode 100644 arch/mips/mach-octeon/cvmx-helper-sgmii.c
>> create mode 100644 arch/mips/mach-octeon/cvmx-helper-xaui.c
>> create mode 100644 arch/mips/mach-octeon/cvmx-ilk.c
>> create mode 100644 arch/mips/mach-octeon/cvmx-ipd.c
>> create mode 100644 arch/mips/mach-octeon/cvmx-pki-resources.c
>> create mode 100644 arch/mips/mach-octeon/cvmx-pki.c
>> create mode 100644 arch/mips/mach-octeon/cvmx-pko-internal-ports-
>> range.c
>> create mode 100644 arch/mips/mach-octeon/cvmx-pko.c
>> create mode 100644 arch/mips/mach-octeon/cvmx-pko3-compat.c
>> create mode 100644 arch/mips/mach-octeon/cvmx-pko3-queue.c
>> create mode 100644 arch/mips/mach-octeon/cvmx-pko3-resources.c
>> create mode 100644 arch/mips/mach-octeon/cvmx-pko3.c
>> create mode 100644 arch/mips/mach-octeon/cvmx-qlm-tables.c
>> create mode 100644 arch/mips/mach-octeon/cvmx-range.c
>> create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-agl.h
>> create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-config.h
>> create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-fau.h
>> create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-ilk-
>> defs.h
>> create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-iob-
>> defs.h
>> create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-lbk-
>> defs.h
>> create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-mdio.h
>> create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-npei-
>> defs.h
>> create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-pcsxx-
>> defs.h
>> create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-pki-
>> cluster.h
>> create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-pko.h
>> create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-pko3-
>> resources.h
>> create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-pko3.h
>> create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-range.h
>> create mode 100644 arch/mips/mach-octeon/include/mach/cvmx-xcv-
>> defs.h
>> create mode 100644 drivers/net/octeon/Makefile
>> create mode 100644 drivers/net/octeon/octeon_eth.c
>> create mode 100644 drivers/net/octeon/octeon_mdio.c
>>
Viele Grüße,
Stefan Roese
--
DENX Software Engineering GmbH, Managing Director: Wolfgang Denk
HRB 165235 Munich, Office: Kirchenstr.5, D-82194 Groebenzell, Germany
Phone: (+49)-8142-66989-51 Fax: (+49)-8142-66989-80 Email: sr@denx.de
^ permalink raw reply [flat|nested] 52+ messages in thread