* [PATCH net-next 1/4] net: rnpgbe: Add interrupt handling
2026-03-25 9:12 [PATCH net-next 0/4] net: rnpgbe: Add TX/RX and link status support Dong Yibo
@ 2026-03-25 9:12 ` Dong Yibo
2026-03-25 9:12 ` [PATCH net-next 2/4] net: rnpgbe: Add basic TX packet transmission support Dong Yibo
` (2 subsequent siblings)
3 siblings, 0 replies; 6+ messages in thread
From: Dong Yibo @ 2026-03-25 9:12 UTC (permalink / raw)
To: andrew+netdev, davem, edumazet, kuba, pabeni, danishanwar
Cc: linux-kernel, netdev, dong100
Add comprehensive interrupt handling for the RNPGBE driver:
- Implement MSI-X/legacy interrupt configuration and management
- Create library functions for interrupt registration and cleanup
This infrastructure enables proper interrupt handling for the RNPGBE driver.
Signed-off-by: Dong Yibo <dong100@mucse.com>
---
drivers/net/ethernet/mucse/rnpgbe/Makefile | 3 +-
drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h | 44 ++
.../net/ethernet/mucse/rnpgbe/rnpgbe_chip.c | 4 +
drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h | 4 +
.../net/ethernet/mucse/rnpgbe/rnpgbe_lib.c | 603 ++++++++++++++++++
.../net/ethernet/mucse/rnpgbe/rnpgbe_lib.h | 33 +
.../net/ethernet/mucse/rnpgbe/rnpgbe_main.c | 43 +-
7 files changed, 732 insertions(+), 2 deletions(-)
create mode 100644 drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.c
create mode 100644 drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.h
diff --git a/drivers/net/ethernet/mucse/rnpgbe/Makefile b/drivers/net/ethernet/mucse/rnpgbe/Makefile
index de8bcb7772ab..17574cad392a 100644
--- a/drivers/net/ethernet/mucse/rnpgbe/Makefile
+++ b/drivers/net/ethernet/mucse/rnpgbe/Makefile
@@ -8,4 +8,5 @@ obj-$(CONFIG_MGBE) += rnpgbe.o
rnpgbe-objs := rnpgbe_main.o\
rnpgbe_chip.o\
rnpgbe_mbx.o\
- rnpgbe_mbx_fw.o
+ rnpgbe_mbx_fw.o\
+ rnpgbe_lib.o
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h
index 5b024f9f7e17..47cfaa6739f7 100644
--- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h
@@ -6,6 +6,10 @@
#include <linux/types.h>
#include <linux/mutex.h>
+#include <linux/netdevice.h>
+#include <linux/if.h>
+
+#include "rnpgbe_hw.h"
enum rnpgbe_boards {
board_n500,
@@ -35,21 +39,61 @@ enum {
struct mucse_hw {
void __iomem *hw_addr;
+ void __iomem *ring_msix_base;
struct pci_dev *pdev;
struct mucse_mbx_info mbx;
int port;
u8 pfvfnum;
};
+struct mucse_ring {
+ struct mucse_ring *next;
+ struct mucse_q_vector *q_vector;
+ void __iomem *ring_addr;
+ void __iomem *irq_mask;
+ void __iomem *trig;
+ u8 queue_index;
+ /* hw ring idx */
+ u8 rnpgbe_queue_idx;
+} ____cacheline_internodealigned_in_smp;
+
+struct mucse_ring_container {
+ struct mucse_ring *ring;
+ u16 count;
+};
+
+struct mucse_q_vector {
+ struct mucse *mucse;
+ int v_idx;
+ struct mucse_ring_container rx, tx;
+ struct napi_struct napi;
+ char name[IFNAMSIZ + 18];
+ /* for dynamic allocation of rings associated with this q_vector */
+ struct mucse_ring ring[0] ____cacheline_internodealigned_in_smp;
+};
+
struct mucse_stats {
u64 tx_dropped;
};
+#define MAX_Q_VECTORS 8
+
struct mucse {
struct net_device *netdev;
struct pci_dev *pdev;
struct mucse_hw hw;
struct mucse_stats stats;
+#define M_FLAG_LEGACY_EN BIT(0)
+#define M_FLAG_MSI_EN BIT(1)
+#define M_FLAG_MSIX_SINGLE_EN BIT(2)
+#define M_FLAG_MSIX_EN BIT(3)
+ u32 flags;
+ struct mucse_ring *tx_ring[RNPGBE_MAX_QUEUES] ____cacheline_aligned_in_smp;
+ struct mucse_ring *rx_ring[RNPGBE_MAX_QUEUES] ____cacheline_aligned_in_smp;
+ struct mucse_q_vector *q_vector[MAX_Q_VECTORS];
+ int num_tx_queues;
+ int num_q_vectors;
+ int num_rx_queues;
};
int rnpgbe_get_permanent_mac(struct mucse_hw *hw, u8 *perm_addr);
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c
index ebc7b3750157..921cc325a991 100644
--- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c
@@ -89,6 +89,8 @@ static void rnpgbe_init_n500(struct mucse_hw *hw)
{
struct mucse_mbx_info *mbx = &hw->mbx;
+ hw->ring_msix_base = hw->hw_addr + MUCSE_N500_RING_MSIX_BASE;
+
mbx->fwpf_ctrl_base = MUCSE_N500_FWPF_CTRL_BASE;
mbx->fwpf_shm_base = MUCSE_N500_FWPF_SHM_BASE;
}
@@ -104,6 +106,8 @@ static void rnpgbe_init_n210(struct mucse_hw *hw)
{
struct mucse_mbx_info *mbx = &hw->mbx;
+ hw->ring_msix_base = hw->hw_addr + MUCSE_N210_RING_MSIX_BASE;
+
mbx->fwpf_ctrl_base = MUCSE_N210_FWPF_CTRL_BASE;
mbx->fwpf_shm_base = MUCSE_N210_FWPF_SHM_BASE;
}
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h
index e77e6bc3d3e3..bc2c27fa6e71 100644
--- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h
@@ -6,12 +6,16 @@
#define MUCSE_N500_FWPF_CTRL_BASE 0x28b00
#define MUCSE_N500_FWPF_SHM_BASE 0x2d000
+#define MUCSE_N500_RING_MSIX_BASE 0x28700
#define MUCSE_GBE_PFFW_MBX_CTRL_OFFSET 0x5500
#define MUCSE_GBE_FWPF_MBX_MASK_OFFSET 0x5700
#define MUCSE_N210_FWPF_CTRL_BASE 0x29400
#define MUCSE_N210_FWPF_SHM_BASE 0x2d900
+#define MUCSE_N210_RING_MSIX_BASE 0x29000
#define RNPGBE_DMA_AXI_EN 0x0010
+#define RNPGBE_LEGACY_TIME 0xd000
+#define RNPGBE_LEGACY_ENABLE 0xd004
#define RNPGBE_MAX_QUEUES 8
#endif /* _RNPGBE_HW_H */
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.c
new file mode 100644
index 000000000000..00943deff940
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.c
@@ -0,0 +1,603 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2020 - 2025 Mucse Corporation. */
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+
+#include "rnpgbe_lib.h"
+#include "rnpgbe.h"
+
+/**
+ * rnpgbe_msix_other - Other irq handler
+ * @irq: irq num
+ * @data: private data
+ *
+ * @return: IRQ_HANDLED
+ **/
+static irqreturn_t rnpgbe_msix_other(int irq, void *data)
+{
+ return IRQ_HANDLED;
+}
+
+static void rnpgbe_irq_disable_queues(struct mucse_q_vector *q_vector)
+{
+ struct mucse_ring *ring;
+
+ /* tx/rx use one register, different bit */
+ mucse_for_each_ring(ring, q_vector->tx) {
+ writel(INT_VALID, ring->trig);
+ writel((RX_INT_MASK | TX_INT_MASK), ring->irq_mask);
+ }
+}
+
+static void rnpgbe_irq_enable_queues(struct mucse_q_vector *q_vector)
+{
+ struct mucse_ring *ring;
+
+ /* tx/rx use one register, different bit */
+ mucse_for_each_ring(ring, q_vector->tx) {
+ writel(0, ring->irq_mask);
+ writel(INT_VALID | TX_INT_MASK | RX_INT_MASK, ring->trig);
+ }
+}
+
+/**
+ * rnpgbe_poll - NAPI Rx polling callback
+ * @napi: structure for representing this polling device
+ * @budget: how many packets driver is allowed to clean
+ *
+ * @return: work done in this call
+ * This function is used for legacy and MSI, NAPI mode
+ **/
+static int rnpgbe_poll(struct napi_struct *napi, int budget)
+{
+ struct mucse_q_vector *q_vector =
+ container_of(napi, struct mucse_q_vector, napi);
+
+ rnpgbe_irq_enable_queues(q_vector);
+
+ return 0;
+}
+
+/**
+ * register_mbx_irq - Register mbx routine
+ * @mucse: pointer to private structure
+ *
+ * @return: 0 on success, negative on failure
+ **/
+int register_mbx_irq(struct mucse *mucse)
+{
+ struct net_device *netdev = mucse->netdev;
+ struct pci_dev *pdev = mucse->pdev;
+ int err = 0;
+
+ if (mucse->flags & M_FLAG_MSIX_EN) {
+ err = request_irq(pci_irq_vector(pdev, 0),
+ rnpgbe_msix_other, 0, netdev->name,
+ mucse);
+ }
+
+ return err;
+}
+
+/**
+ * remove_mbx_irq - Remove mbx routine
+ * @mucse: pointer to private structure
+ **/
+void remove_mbx_irq(struct mucse *mucse)
+{
+ struct pci_dev *pdev = mucse->pdev;
+
+ if (mucse->flags & M_FLAG_MSIX_EN)
+ free_irq(pci_irq_vector(pdev, 0), mucse);
+}
+
+/**
+ * rnpgbe_set_num_queues - Allocate queues for device, feature dependent
+ * @mucse: pointer to private structure
+ *
+ * Determine tx/rx queue nums
+ **/
+static void rnpgbe_set_num_queues(struct mucse *mucse)
+{
+ /* start from 1 queue */
+ mucse->num_tx_queues = 1;
+ mucse->num_rx_queues = 1;
+}
+
+/**
+ * rnpgbe_set_interrupt_capability - Set MSI-X or MSI if supported
+ * @mucse: pointer to private structure
+ *
+ * Attempt to configure the interrupts using the best available
+ * capabilities of the hardware.
+ *
+ * @return: 0 on success, negative on failure
+ **/
+static int rnpgbe_set_interrupt_capability(struct mucse *mucse)
+{
+ int v_budget;
+
+ v_budget = min_t(int, mucse->num_tx_queues, mucse->num_rx_queues);
+ v_budget = min_t(int, v_budget, num_online_cpus());
+ /* add one vector for mbx */
+ v_budget += 1;
+ v_budget = pci_alloc_irq_vectors(mucse->pdev, 1, v_budget,
+ PCI_IRQ_ALL_TYPES);
+ if (v_budget < 0)
+ return v_budget;
+
+ if (mucse->pdev->msix_enabled) {
+ /* q_vector not include mbx */
+ if (v_budget > 1) {
+ mucse->flags |= M_FLAG_MSIX_EN;
+ mucse->num_q_vectors = v_budget - 1;
+ } else {
+ mucse->flags |= M_FLAG_MSIX_SINGLE_EN;
+ mucse->num_q_vectors = 1;
+ }
+ } else {
+ /* msi/legacy use only 1 irq */
+ mucse->num_q_vectors = 1;
+
+ if (mucse->pdev->msi_enabled)
+ mucse->flags |= M_FLAG_MSI_EN;
+ else
+ mucse->flags |= M_FLAG_LEGACY_EN;
+ }
+
+ return 0;
+}
+
+/**
+ * mucse_add_ring - Add ring to ring container
+ * @ring: ring to be added
+ * @head: ring container
+ **/
+static void mucse_add_ring(struct mucse_ring *ring,
+ struct mucse_ring_container *head)
+{
+ ring->next = head->ring;
+ head->ring = ring;
+ head->count++;
+}
+
+/**
+ * rnpgbe_alloc_q_vector - Allocate memory for a single interrupt vector
+ * @mucse: pointer to private structure
+ * @eth_queue_idx: queue_index idx for this q_vector
+ * @v_idx: index of vector used for this q_vector
+ * @r_idx: total number of rings to allocate
+ * @r_count: ring count
+ * @step: ring step
+ *
+ * @return: 0 on success. If allocation fails we return -ENOMEM.
+ **/
+static int rnpgbe_alloc_q_vector(struct mucse *mucse,
+ int eth_queue_idx, int v_idx, int r_idx,
+ int r_count, int step)
+{
+ int rxr_idx = r_idx, txr_idx = r_idx;
+ struct mucse_hw *hw = &mucse->hw;
+ struct mucse_q_vector *q_vector;
+ int txr_count, rxr_count, idx;
+ struct mucse_ring *ring;
+ int ring_count, size;
+
+ txr_count = r_count;
+ rxr_count = r_count;
+ ring_count = txr_count + rxr_count;
+ size = sizeof(struct mucse_q_vector) +
+ (sizeof(struct mucse_ring) * ring_count);
+
+ q_vector = kzalloc(size, GFP_KERNEL);
+ if (!q_vector)
+ return -ENOMEM;
+
+ netif_napi_add(mucse->netdev, &q_vector->napi, rnpgbe_poll);
+ /* tie q_vector and mucse together */
+ mucse->q_vector[v_idx] = q_vector;
+ q_vector->mucse = mucse;
+ q_vector->v_idx = v_idx;
+ /* if mbx use separate irq, we should add 1 */
+ if (mucse->flags & M_FLAG_MSIX_EN)
+ q_vector->v_idx++;
+
+ ring = q_vector->ring;
+
+ for (idx = 0; idx < txr_count; idx++) {
+ mucse_add_ring(ring, &q_vector->tx);
+ ring->queue_index = eth_queue_idx + idx;
+ ring->rnpgbe_queue_idx = txr_idx;
+ ring->ring_addr = hw->hw_addr + RING_OFFSET(txr_idx);
+ ring->irq_mask = ring->ring_addr + RNPGBE_DMA_INT_MASK;
+ ring->trig = ring->ring_addr + RNPGBE_DMA_INT_TRIG;
+ mucse->tx_ring[ring->queue_index] = ring;
+ txr_idx += step;
+ ring++;
+ }
+
+ for (idx = 0; idx < rxr_count; idx++) {
+ mucse_add_ring(ring, &q_vector->rx);
+ ring->queue_index = eth_queue_idx + idx;
+ ring->rnpgbe_queue_idx = rxr_idx;
+ ring->ring_addr = hw->hw_addr + RING_OFFSET(rxr_idx);
+ ring->irq_mask = ring->ring_addr + RNPGBE_DMA_INT_MASK;
+ ring->trig = ring->ring_addr + RNPGBE_DMA_INT_TRIG;
+ mucse->rx_ring[ring->queue_index] = ring;
+ rxr_idx += step;
+ ring++;
+ }
+
+ return 0;
+}
+
+/**
+ * rnpgbe_free_q_vector - Free memory allocated for specific interrupt vector
+ * @mucse: pointer to private structure
+ * @v_idx: index of vector to be freed
+ *
+ * This function frees the memory allocated to the q_vector. In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void rnpgbe_free_q_vector(struct mucse *mucse, int v_idx)
+{
+ struct mucse_q_vector *q_vector = mucse->q_vector[v_idx];
+ struct mucse_ring *ring;
+
+ mucse_for_each_ring(ring, q_vector->tx)
+ mucse->tx_ring[ring->queue_index] = NULL;
+ mucse_for_each_ring(ring, q_vector->rx)
+ mucse->rx_ring[ring->queue_index] = NULL;
+ mucse->q_vector[v_idx] = NULL;
+ netif_napi_del(&q_vector->napi);
+ kfree(q_vector);
+}
+
+/**
+ * rnpgbe_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @mucse: pointer to private structure
+ *
+ * @return: 0 if success. if allocation fails we return -ENOMEM.
+ **/
+static int rnpgbe_alloc_q_vectors(struct mucse *mucse)
+{
+ int err, ring_cnt, v_remaing = mucse->num_q_vectors;
+ int r_remaing = min_t(int, mucse->num_tx_queues,
+ mucse->num_rx_queues);
+ int q_vector_nums = 0;
+ int eth_queue_idx = 0;
+ int ring_step = 1;
+ int ring_idx = 0;
+ int v_idx = 0;
+
+ for (; r_remaing > 0 && v_remaing > 0; v_remaing--) {
+ ring_cnt = DIV_ROUND_UP(r_remaing, v_remaing);
+ err = rnpgbe_alloc_q_vector(mucse, eth_queue_idx,
+ v_idx, ring_idx, ring_cnt,
+ ring_step);
+ if (err)
+ goto err_free_q_vector;
+ ring_idx += ring_step * ring_cnt;
+ eth_queue_idx += ring_cnt;
+ r_remaing -= ring_cnt;
+ q_vector_nums++;
+ v_idx++;
+ }
+ /* Fix the real used q_vectors_nums */
+ mucse->num_q_vectors = q_vector_nums;
+
+ return 0;
+
+err_free_q_vector:
+ mucse->num_tx_queues = 0;
+ mucse->num_rx_queues = 0;
+ mucse->num_q_vectors = 0;
+
+ while (v_idx--)
+ rnpgbe_free_q_vector(mucse, v_idx);
+
+ return err;
+}
+
+/**
+ * rnpgbe_reset_interrupt_capability - Reset irq capability setup
+ * @mucse: pointer to private structure
+ **/
+static void rnpgbe_reset_interrupt_capability(struct mucse *mucse)
+{
+ pci_free_irq_vectors(mucse->pdev);
+ mucse->flags &= ~(M_FLAG_MSIX_EN |
+ M_FLAG_MSIX_SINGLE_EN |
+ M_FLAG_MSI_EN |
+ M_FLAG_LEGACY_EN);
+}
+
+/**
+ * rnpgbe_init_interrupt_scheme - Determine proper interrupt scheme
+ * @mucse: pointer to private structure
+ *
+ * We determine which interrupt scheme to use based on...
+ * - Hardware queue count
+ * - cpu numbers
+ * - irq mode (msi/legacy force 1)
+ *
+ * @return: 0 on success, negative on failure
+ **/
+int rnpgbe_init_interrupt_scheme(struct mucse *mucse)
+{
+ int err;
+
+ rnpgbe_set_num_queues(mucse);
+
+ err = rnpgbe_set_interrupt_capability(mucse);
+ if (err)
+ return err;
+
+ err = rnpgbe_alloc_q_vectors(mucse);
+ if (err) {
+ rnpgbe_reset_interrupt_capability(mucse);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * rnpgbe_free_q_vectors - Free memory allocated for interrupt vectors
+ * @mucse: pointer to private structure
+ *
+ * This function frees the memory allocated to the q_vectors. In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void rnpgbe_free_q_vectors(struct mucse *mucse)
+{
+ int v_idx = mucse->num_q_vectors;
+
+ mucse->num_rx_queues = 0;
+ mucse->num_tx_queues = 0;
+ mucse->num_q_vectors = 0;
+
+ while (v_idx--)
+ rnpgbe_free_q_vector(mucse, v_idx);
+}
+
+/**
+ * rnpgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
+ * @mucse: pointer to private structure
+ *
+ * Clear interrupt specific resources and reset the structure
+ **/
+void rnpgbe_clear_interrupt_scheme(struct mucse *mucse)
+{
+ mucse->num_tx_queues = 0;
+ mucse->num_rx_queues = 0;
+ rnpgbe_free_q_vectors(mucse);
+ rnpgbe_reset_interrupt_capability(mucse);
+}
+
+/**
+ * rnpgbe_msix_clean_rings - Msix irq handler for ring irq
+ * @irq: irq num
+ * @data: private data
+ *
+ * rnpgbe_msix_clean_rings handle irq from ring, start napi
+ * @return: IRQ_HANDLED
+ **/
+static irqreturn_t rnpgbe_msix_clean_rings(int irq, void *data)
+{
+ struct mucse_q_vector *q_vector = (struct mucse_q_vector *)data;
+
+ rnpgbe_irq_disable_queues(q_vector);
+ if (q_vector->rx.ring || q_vector->tx.ring)
+ napi_schedule_irqoff(&q_vector->napi);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * rnpgbe_intr - Msi/Legacy irq handler
+ * @irq: irq num
+ * @data: private data
+ * @return: IRQ_HANDLED
+ **/
+static irqreturn_t rnpgbe_intr(int irq, void *data)
+{
+ struct mucse *mucse = (struct mucse *)data;
+ struct mucse_q_vector *q_vector;
+
+ q_vector = mucse->q_vector[0];
+ rnpgbe_irq_disable_queues(q_vector);
+ if (q_vector->rx.ring || q_vector->tx.ring)
+ napi_schedule_irqoff(&q_vector->napi);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * rnpgbe_request_irq - Initialize interrupts
+ * @mucse: pointer to private structure
+ *
+ * Attempts to configure interrupts using the best available
+ * capabilities of the hardware and kernel.
+ *
+ * @return: 0 on success, negative value on failure
+ **/
+int rnpgbe_request_irq(struct mucse *mucse)
+{
+ struct net_device *netdev = mucse->netdev;
+ struct pci_dev *pdev = mucse->pdev;
+ struct mucse_hw *hw = &mucse->hw;
+ struct mucse_q_vector *q_vector;
+ int err, i;
+
+ if (mucse->flags & M_FLAG_MSIX_EN) {
+ for (i = 0; i < mucse->num_q_vectors; i++) {
+ q_vector = mucse->q_vector[i];
+
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "%s-%s-%d", netdev->name, "TxRx", i);
+
+ err = request_irq(pci_irq_vector(pdev, i + 1),
+ rnpgbe_msix_clean_rings, 0,
+ q_vector->name,
+ q_vector);
+ if (err) {
+ dev_err(&pdev->dev, "MSI-X req err %d: %d\n",
+ i + 1, err);
+ goto err_free_irqs;
+ }
+ }
+ } else {
+ /* Generic irq handler for all single-vector modes */
+ err = request_irq(pci_irq_vector(pdev, 0),
+ rnpgbe_intr, 0, netdev->name,
+ mucse);
+ if (err)
+ return err;
+ }
+
+ if (mucse->flags & M_FLAG_LEGACY_EN) {
+ mucse_hw_wr32(hw, RNPGBE_LEGACY_ENABLE, 1);
+ mucse_hw_wr32(hw, RNPGBE_LEGACY_TIME, 0x200);
+ } else {
+ mucse_hw_wr32(hw, RNPGBE_LEGACY_ENABLE, 0);
+ }
+
+ return 0;
+err_free_irqs:
+ while (i >= 0) {
+ i--;
+ q_vector = mucse->q_vector[i];
+ synchronize_irq(pci_irq_vector(pdev, i + 1));
+ free_irq(pci_irq_vector(pdev, i + 1), q_vector);
+ }
+
+ return err;
+}
+
+/**
+ * rnpgbe_free_irq - Free interrupts
+ * @mucse: pointer to private structure
+ *
+ * Attempts to free interrupts according initialized type.
+ **/
+void rnpgbe_free_irq(struct mucse *mucse)
+{
+ struct pci_dev *pdev = mucse->pdev;
+ struct mucse_q_vector *q_vector;
+
+ if (mucse->flags & M_FLAG_MSIX_EN) {
+ for (int i = 0; i < mucse->num_q_vectors; i++) {
+ q_vector = mucse->q_vector[i];
+ if (!q_vector)
+ continue;
+
+ free_irq(pci_irq_vector(pdev, i + 1), q_vector);
+ }
+ } else {
+ free_irq(pci_irq_vector(pdev, 0), mucse);
+ }
+}
+
+/**
+ * rnpgbe_set_ring_vector - Set the ring_vector registers,
+ * mapping interrupt causes to vectors
+ * @mucse: pointer to private structure
+ * @queue: queue to map the corresponding interrupt to
+ * @msix_vector: the vector num to map to the corresponding queue
+ *
+ */
+static void rnpgbe_set_ring_vector(struct mucse *mucse,
+ u8 queue, u8 msix_vector)
+{
+ struct mucse_hw *hw = &mucse->hw;
+ u32 data;
+
+ data = hw->pfvfnum << 24;
+ data |= (msix_vector << 8);
+ data |= msix_vector;
+ writel(data, hw->ring_msix_base + RING_VECTOR(queue));
+}
+
+/**
+ * rnpgbe_configure_msix - Configure MSI-X hardware
+ * @mucse: pointer to private structure
+ *
+ * rnpgbe_configure_msix sets up the hardware to properly generate MSI-X
+ * interrupts.
+ **/
+static void rnpgbe_configure_msix(struct mucse *mucse)
+{
+ struct mucse_q_vector *q_vector;
+
+ if (!(mucse->flags & (M_FLAG_MSIX_EN | M_FLAG_MSIX_SINGLE_EN)))
+ return;
+
+ for (int i = 0; i < mucse->num_q_vectors; i++) {
+ struct mucse_ring *ring;
+
+ q_vector = mucse->q_vector[i];
+ /* tx/rx use one register, different bit */
+ mucse_for_each_ring(ring, q_vector->tx) {
+ rnpgbe_set_ring_vector(mucse, ring->rnpgbe_queue_idx,
+ q_vector->v_idx);
+ }
+ }
+}
+
+static void rnpgbe_irq_enable(struct mucse *mucse)
+{
+ for (int i = 0; i < mucse->num_q_vectors; i++)
+ rnpgbe_irq_enable_queues(mucse->q_vector[i]);
+}
+
+/**
+ * rnpgbe_irq_disable - Mask off interrupt generation on the NIC
+ * @mucse: board private structure
+ **/
+void rnpgbe_irq_disable(struct mucse *mucse)
+{
+ struct pci_dev *pdev = mucse->pdev;
+
+ if (mucse->flags & M_FLAG_MSIX_EN) {
+ for (int i = 0; i < mucse->num_q_vectors; i++) {
+ rnpgbe_irq_disable_queues(mucse->q_vector[i]);
+ synchronize_irq(pci_irq_vector(pdev, i + 1));
+ }
+ } else {
+ rnpgbe_irq_disable_queues(mucse->q_vector[0]);
+ synchronize_irq(pci_irq_vector(pdev, 0));
+ }
+}
+
+static void rnpgbe_napi_enable_all(struct mucse *mucse)
+{
+ for (int i = 0; i < mucse->num_q_vectors; i++)
+ napi_enable(&mucse->q_vector[i]->napi);
+}
+
+static void rnpgbe_napi_disable_all(struct mucse *mucse)
+{
+ for (int i = 0; i < mucse->num_q_vectors; i++)
+ napi_disable(&mucse->q_vector[i]->napi);
+}
+
+void rnpgbe_down(struct mucse *mucse)
+{
+ rnpgbe_irq_disable(mucse);
+ rnpgbe_napi_disable_all(mucse);
+}
+
+/**
+ * rnpgbe_up_complete - Final step for port up
+ * @mucse: pointer to private structure
+ **/
+void rnpgbe_up_complete(struct mucse *mucse)
+{
+ rnpgbe_configure_msix(mucse);
+ rnpgbe_napi_enable_all(mucse);
+ rnpgbe_irq_enable(mucse);
+}
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.h
new file mode 100644
index 000000000000..8e8234209840
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2020 - 2025 Mucse Corporation. */
+
+#ifndef _RNPGBE_LIB_H
+#define _RNPGBE_LIB_H
+
+struct mucse;
+
+#define RING_OFFSET(n) (0x1000 + 0x100 * (n))
+#define RNPGBE_DMA_INT_MASK 0x24
+#define TX_INT_MASK BIT(1)
+#define RX_INT_MASK BIT(0)
+#define INT_VALID (BIT(16) | BIT(17))
+#define RNPGBE_DMA_INT_TRIG 0x2c
+/* | 31:24 | .... | 15:8 | 7:0 | */
+/* | pfvfnum | | tx vector | rx vector | */
+#define RING_VECTOR(n) (0x04 * (n))
+
+#define mucse_for_each_ring(pos, head)\
+ for (typeof((head).ring) __pos = (head).ring;\
+ __pos ? ({ pos = __pos; 1; }) : 0;\
+ __pos = __pos->next)
+
+int rnpgbe_init_interrupt_scheme(struct mucse *mucse);
+void rnpgbe_clear_interrupt_scheme(struct mucse *mucse);
+int register_mbx_irq(struct mucse *mucse);
+void remove_mbx_irq(struct mucse *mucse);
+int rnpgbe_request_irq(struct mucse *mucse);
+void rnpgbe_free_irq(struct mucse *mucse);
+void rnpgbe_irq_disable(struct mucse *mucse);
+void rnpgbe_down(struct mucse *mucse);
+void rnpgbe_up_complete(struct mucse *mucse);
+#endif
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c
index 316f941629d4..343c53d872a5 100644
--- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c
@@ -7,6 +7,7 @@
#include "rnpgbe.h"
#include "rnpgbe_hw.h"
+#include "rnpgbe_lib.h"
#include "rnpgbe_mbx_fw.h"
static const char rnpgbe_driver_name[] = "rnpgbe";
@@ -36,7 +37,24 @@ static struct pci_device_id rnpgbe_pci_tbl[] = {
**/
static int rnpgbe_open(struct net_device *netdev)
{
+ struct mucse *mucse = netdev_priv(netdev);
+ int err;
+
+ err = rnpgbe_request_irq(mucse);
+ if (err)
+ return err;
+
+ err = netif_set_real_num_queues(netdev, mucse->num_tx_queues,
+ mucse->num_rx_queues);
+ if (err)
+ goto err_free_irqs;
+
+ rnpgbe_up_complete(mucse);
+
return 0;
+err_free_irqs:
+ rnpgbe_free_irq(mucse);
+ return err;
}
/**
@@ -50,6 +68,11 @@ static int rnpgbe_open(struct net_device *netdev)
**/
static int rnpgbe_close(struct net_device *netdev)
{
+ struct mucse *mucse = netdev_priv(netdev);
+
+ rnpgbe_down(mucse);
+ rnpgbe_free_irq(mucse);
+
return 0;
}
@@ -166,11 +189,27 @@ static int rnpgbe_add_adapter(struct pci_dev *pdev,
goto err_powerdown;
}
+ err = rnpgbe_init_interrupt_scheme(mucse);
+ if (err) {
+ dev_err(&pdev->dev, "init interrupt failed %d\n", err);
+ goto err_powerdown;
+ }
+
+ err = register_mbx_irq(mucse);
+ if (err) {
+ dev_err(&pdev->dev, "register mbx irq failed %d\n", err);
+ goto err_clear_interrupt;
+ }
+
err = register_netdev(netdev);
if (err)
- goto err_powerdown;
+ goto err_free_mbx_irq;
return 0;
+err_free_mbx_irq:
+ remove_mbx_irq(mucse);
+err_clear_interrupt:
+ rnpgbe_clear_interrupt_scheme(mucse);
err_powerdown:
/* notify powerdown only powerup ok */
if (!err_notify) {
@@ -256,6 +295,8 @@ static void rnpgbe_rm_adapter(struct pci_dev *pdev)
err = rnpgbe_send_notify(hw, false, mucse_fw_powerup);
if (err)
dev_warn(&pdev->dev, "Send powerdown to hw failed %d\n", err);
+ remove_mbx_irq(mucse);
+ rnpgbe_clear_interrupt_scheme(mucse);
free_netdev(netdev);
}
--
2.25.1
^ permalink raw reply related [flat|nested] 6+ messages in thread* [PATCH net-next 2/4] net: rnpgbe: Add basic TX packet transmission support
2026-03-25 9:12 [PATCH net-next 0/4] net: rnpgbe: Add TX/RX and link status support Dong Yibo
2026-03-25 9:12 ` [PATCH net-next 1/4] net: rnpgbe: Add interrupt handling Dong Yibo
@ 2026-03-25 9:12 ` Dong Yibo
2026-03-25 9:12 ` [PATCH net-next 3/4] net: rnpgbe: Add RX packet reception support Dong Yibo
2026-03-25 9:12 ` [PATCH net-next 4/4] net: rnpgbe: Add link status handling support Dong Yibo
3 siblings, 0 replies; 6+ messages in thread
From: Dong Yibo @ 2026-03-25 9:12 UTC (permalink / raw)
To: andrew+netdev, davem, edumazet, kuba, pabeni, danishanwar
Cc: linux-kernel, netdev, dong100
Implement basic transmit path for the RNPGBE driver:
- Add TX descriptor structure (rnpgbe_tx_desc) and TX buffer management
- Implement rnpgbe_xmit_frame_ring() for packet transmission
- Add TX ring resource allocation and cleanup functions
- Implement TX completion handling via rnpgbe_clean_tx_irq()
- Implement statistics collection for TX packets/bytes
This enables basic packet transmission functionality for the RNPGBE driver.
Signed-off-by: Dong Yibo <dong100@mucse.com>
---
drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h | 74 +++
.../net/ethernet/mucse/rnpgbe/rnpgbe_chip.c | 4 +
drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h | 3 +
.../net/ethernet/mucse/rnpgbe/rnpgbe_lib.c | 553 ++++++++++++++++++
.../net/ethernet/mucse/rnpgbe/rnpgbe_lib.h | 26 +
.../net/ethernet/mucse/rnpgbe/rnpgbe_main.c | 33 +-
6 files changed, 689 insertions(+), 4 deletions(-)
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h
index 47cfaa6739f7..7d28ef3bdd86 100644
--- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h
@@ -43,20 +43,83 @@ struct mucse_hw {
struct pci_dev *pdev;
struct mucse_mbx_info mbx;
int port;
+ u16 cycles_per_us;
u8 pfvfnum;
};
+struct rnpgbe_tx_desc {
+ __le64 pkt_addr; /* Packet buffer address */
+ union {
+ __le64 vlan_cmd_bsz;
+ struct {
+ __le32 blen_mac_ip_len;
+ __le32 vlan_cmd; /* vlan & cmd status */
+ };
+ };
+#define M_TXD_CMD_RS 0x040000 /* Report Status */
+#define M_TXD_STAT_DD 0x020000 /* Descriptor Done */
+#define M_TXD_CMD_EOP 0x010000 /* End of Packet */
+};
+
+#define M_TX_DESC(R, i) (&(((struct rnpgbe_tx_desc *)((R)->desc))[i]))
+
+struct mucse_tx_buffer {
+ struct rnpgbe_tx_desc *next_to_watch;
+ struct sk_buff *skb;
+ unsigned int bytecount;
+ unsigned short gso_segs;
+ DEFINE_DMA_UNMAP_ADDR(dma);
+ DEFINE_DMA_UNMAP_LEN(len);
+};
+
+struct mucse_queue_stats {
+ u64 packets;
+ u64 bytes;
+};
+
struct mucse_ring {
struct mucse_ring *next;
struct mucse_q_vector *q_vector;
+ struct net_device *netdev;
+ struct device *dev;
+ void *desc;
+ struct mucse_tx_buffer *tx_buffer_info;
void __iomem *ring_addr;
+ void __iomem *tail;
void __iomem *irq_mask;
void __iomem *trig;
u8 queue_index;
/* hw ring idx */
u8 rnpgbe_queue_idx;
+ u8 pfvfnum;
+ u16 count;
+ u16 next_to_use;
+ u16 next_to_clean;
+ dma_addr_t dma;
+ unsigned int size;
+ struct mucse_queue_stats stats;
+ struct u64_stats_sync syncp;
} ____cacheline_internodealigned_in_smp;
+static inline u16 mucse_desc_unused(struct mucse_ring *ring)
+{
+ u16 ntc = ring->next_to_clean;
+ u16 ntu = ring->next_to_use;
+
+ return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
+}
+
+static inline __le64 build_ctob(u32 vlan_cmd, u32 mac_ip_len, u32 size)
+{
+ return cpu_to_le64(((u64)vlan_cmd << 32) | ((u64)mac_ip_len << 16) |
+ ((u64)size));
+}
+
+static inline struct netdev_queue *txring_txq(const struct mucse_ring *ring)
+{
+ return netdev_get_tx_queue(ring->netdev, ring->queue_index);
+}
+
struct mucse_ring_container {
struct mucse_ring *ring;
u16 count;
@@ -78,6 +141,9 @@ struct mucse_stats {
#define MAX_Q_VECTORS 8
+#define M_DEFAULT_TXD 512
+#define M_DEFAULT_TX_WORK 256
+
struct mucse {
struct net_device *netdev;
struct pci_dev *pdev;
@@ -91,6 +157,8 @@ struct mucse {
struct mucse_ring *tx_ring[RNPGBE_MAX_QUEUES] ____cacheline_aligned_in_smp;
struct mucse_ring *rx_ring[RNPGBE_MAX_QUEUES] ____cacheline_aligned_in_smp;
struct mucse_q_vector *q_vector[MAX_Q_VECTORS];
+ int tx_ring_item_count;
+ int tx_work_limit;
int num_tx_queues;
int num_q_vectors;
int num_rx_queues;
@@ -112,4 +180,10 @@ int rnpgbe_init_hw(struct mucse_hw *hw, int board_type);
#define mucse_hw_wr32(hw, reg, val) \
writel((val), (hw)->hw_addr + (reg))
+#define mucse_hw_rd32(hw, reg) \
+ readl((hw)->hw_addr + (reg))
+#define mucse_ring_wr32(ring, reg, val) \
+ writel((val), (ring)->ring_addr + (reg))
+#define mucse_ring_rd32(ring, reg) \
+ readl((ring)->ring_addr + (reg))
#endif /* _RNPGBE_H */
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c
index 921cc325a991..291e77d573fe 100644
--- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c
@@ -93,6 +93,8 @@ static void rnpgbe_init_n500(struct mucse_hw *hw)
mbx->fwpf_ctrl_base = MUCSE_N500_FWPF_CTRL_BASE;
mbx->fwpf_shm_base = MUCSE_N500_FWPF_SHM_BASE;
+
+ hw->cycles_per_us = M_DEFAULT_N500_MHZ;
}
/**
@@ -110,6 +112,8 @@ static void rnpgbe_init_n210(struct mucse_hw *hw)
mbx->fwpf_ctrl_base = MUCSE_N210_FWPF_CTRL_BASE;
mbx->fwpf_shm_base = MUCSE_N210_FWPF_SHM_BASE;
+
+ hw->cycles_per_us = M_DEFAULT_N210_MHZ;
}
/**
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h
index bc2c27fa6e71..f060c39e9690 100644
--- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h
@@ -7,12 +7,15 @@
#define MUCSE_N500_FWPF_CTRL_BASE 0x28b00
#define MUCSE_N500_FWPF_SHM_BASE 0x2d000
#define MUCSE_N500_RING_MSIX_BASE 0x28700
+#define M_DEFAULT_N500_MHZ 125
#define MUCSE_GBE_PFFW_MBX_CTRL_OFFSET 0x5500
#define MUCSE_GBE_FWPF_MBX_MASK_OFFSET 0x5700
#define MUCSE_N210_FWPF_CTRL_BASE 0x29400
#define MUCSE_N210_FWPF_SHM_BASE 0x2d900
#define MUCSE_N210_RING_MSIX_BASE 0x29000
+#define M_DEFAULT_N210_MHZ 62
+#define TX_AXI_RW_EN 0xc
#define RNPGBE_DMA_AXI_EN 0x0010
#define RNPGBE_LEGACY_TIME 0xd000
#define RNPGBE_LEGACY_ENABLE 0xd004
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.c
index 00943deff940..9153e38fdd15 100644
--- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.c
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.c
@@ -3,6 +3,7 @@
#include <linux/pci.h>
#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
#include "rnpgbe_lib.h"
#include "rnpgbe.h"
@@ -41,6 +42,111 @@ static void rnpgbe_irq_enable_queues(struct mucse_q_vector *q_vector)
}
}
+/**
+ * rnpgbe_clean_tx_irq - Reclaim resources after transmit completes
+ * @q_vector: structure containing interrupt and ring information
+ * @tx_ring: tx ring to clean
+ * @napi_budget: Used to determine if we are in netpoll
+ *
+ * @return: true is for no tx packets
+ **/
+static bool rnpgbe_clean_tx_irq(struct mucse_q_vector *q_vector,
+ struct mucse_ring *tx_ring,
+ int napi_budget)
+{
+ int budget = q_vector->mucse->tx_work_limit;
+ u64 total_bytes = 0, total_packets = 0;
+ struct mucse_tx_buffer *tx_buffer;
+ struct rnpgbe_tx_desc *tx_desc;
+ int i = tx_ring->next_to_clean;
+
+ tx_buffer = &tx_ring->tx_buffer_info[i];
+ tx_desc = M_TX_DESC(tx_ring, i);
+ i -= tx_ring->count;
+
+ do {
+ struct rnpgbe_tx_desc *eop_desc = tx_buffer->next_to_watch;
+
+ /* if next_to_watch is not set then there is no work pending */
+ if (!eop_desc)
+ break;
+
+ /* prevent any other reads prior to eop_desc */
+ rmb();
+
+ /* if eop DD is not set pending work has not been completed */
+ if (!(eop_desc->vlan_cmd & cpu_to_le32(M_TXD_STAT_DD)))
+ break;
+ /* clear next_to_watch to prevent false hangs */
+ tx_buffer->next_to_watch = NULL;
+ total_bytes += tx_buffer->bytecount;
+ total_packets += tx_buffer->gso_segs;
+ napi_consume_skb(tx_buffer->skb, napi_budget);
+ dma_unmap_single(tx_ring->dev, dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE);
+ tx_buffer->skb = NULL;
+ dma_unmap_len_set(tx_buffer, len, 0);
+
+ /* unmap remaining buffers */
+ while (tx_desc != eop_desc) {
+ tx_buffer++;
+ tx_desc++;
+ i++;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
+ tx_buffer = tx_ring->tx_buffer_info;
+ tx_desc = M_TX_DESC(tx_ring, 0);
+ }
+
+ /* unmap any remaining paged data */
+ if (dma_unmap_len(tx_buffer, len)) {
+ dma_unmap_page(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buffer, len, 0);
+ }
+ }
+
+ /* move us one more past the eop_desc for start of next pkt */
+ tx_buffer++;
+ tx_desc++;
+ i++;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
+ tx_buffer = tx_ring->tx_buffer_info;
+ tx_desc = M_TX_DESC(tx_ring, 0);
+ }
+
+ prefetch(tx_desc);
+ budget--;
+ } while (likely(budget > 0));
+ netdev_tx_completed_queue(txring_txq(tx_ring), total_packets,
+ total_bytes);
+ i += tx_ring->count;
+ tx_ring->next_to_clean = i;
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->stats.bytes += total_bytes;
+ tx_ring->stats.packets += total_packets;
+ u64_stats_update_end(&tx_ring->syncp);
+
+#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
+ if (likely(netif_carrier_ok(tx_ring->netdev) &&
+ (mucse_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
+ /* Make sure that anybody stopping the queue after this
+ * sees the new next_to_clean.
+ */
+ smp_mb();
+ if (__netif_subqueue_stopped(tx_ring->netdev,
+ tx_ring->queue_index)) {
+ netif_wake_subqueue(tx_ring->netdev,
+ tx_ring->queue_index);
+ }
+ }
+
+ return total_bytes == 0;
+}
+
/**
* rnpgbe_poll - NAPI Rx polling callback
* @napi: structure for representing this polling device
@@ -53,6 +159,16 @@ static int rnpgbe_poll(struct napi_struct *napi, int budget)
{
struct mucse_q_vector *q_vector =
container_of(napi, struct mucse_q_vector, napi);
+ bool clean_complete = true;
+ struct mucse_ring *ring;
+
+ mucse_for_each_ring(ring, q_vector->tx) {
+ if (!rnpgbe_clean_tx_irq(q_vector, ring, budget))
+ clean_complete = false;
+ }
+
+ if (!clean_complete)
+ return budget;
rnpgbe_irq_enable_queues(q_vector);
@@ -206,12 +322,16 @@ static int rnpgbe_alloc_q_vector(struct mucse *mucse,
ring = q_vector->ring;
for (idx = 0; idx < txr_count; idx++) {
+ ring->dev = &mucse->pdev->dev;
mucse_add_ring(ring, &q_vector->tx);
+ ring->count = mucse->tx_ring_item_count;
+ ring->netdev = mucse->netdev;
ring->queue_index = eth_queue_idx + idx;
ring->rnpgbe_queue_idx = txr_idx;
ring->ring_addr = hw->hw_addr + RING_OFFSET(txr_idx);
ring->irq_mask = ring->ring_addr + RNPGBE_DMA_INT_MASK;
ring->trig = ring->ring_addr + RNPGBE_DMA_INT_TRIG;
+ ring->pfvfnum = hw->pfvfnum;
mucse->tx_ring[ring->queue_index] = ring;
txr_idx += step;
ring++;
@@ -585,9 +705,85 @@ static void rnpgbe_napi_disable_all(struct mucse *mucse)
napi_disable(&mucse->q_vector[i]->napi);
}
+/**
+ * rnpgbe_clean_tx_ring - Free Tx Buffers
+ * @tx_ring: ring to be cleaned
+ **/
+static void rnpgbe_clean_tx_ring(struct mucse_ring *tx_ring)
+{
+ u16 i = tx_ring->next_to_clean;
+ struct mucse_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
+ unsigned long size;
+
+ /* ring already cleared, nothing to do */
+ if (!tx_ring->tx_buffer_info)
+ return;
+
+ while (i != tx_ring->next_to_use) {
+ struct rnpgbe_tx_desc *eop_desc, *tx_desc;
+
+ dev_kfree_skb_any(tx_buffer->skb);
+ /* unmap skb header data */
+ if (dma_unmap_len(tx_buffer, len)) {
+ dma_unmap_single(tx_ring->dev, dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE);
+ }
+ eop_desc = tx_buffer->next_to_watch;
+ tx_desc = M_TX_DESC(tx_ring, i);
+ /* unmap remaining buffers */
+ while (tx_desc != eop_desc) {
+ tx_buffer++;
+ tx_desc++;
+ i++;
+ if (unlikely(i == tx_ring->count)) {
+ i = 0;
+ tx_buffer = tx_ring->tx_buffer_info;
+ tx_desc = M_TX_DESC(tx_ring, 0);
+ }
+
+ /* unmap any remaining paged data */
+ if (dma_unmap_len(tx_buffer, len))
+ dma_unmap_page(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ }
+ /* move us one more past the eop_desc for start of next pkt */
+ tx_buffer++;
+ i++;
+ if (unlikely(i == tx_ring->count)) {
+ i = 0;
+ tx_buffer = tx_ring->tx_buffer_info;
+ }
+ }
+
+ netdev_tx_reset_queue(txring_txq(tx_ring));
+ size = sizeof(struct mucse_tx_buffer) * tx_ring->count;
+ memset(tx_ring->tx_buffer_info, 0, size);
+ /* Zero out the descriptor ring */
+ memset(tx_ring->desc, 0, tx_ring->size);
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+}
+
+/**
+ * rnpgbe_clean_all_tx_rings - Free Tx Buffers for all queues
+ * @mucse: board private structure
+ **/
+static void rnpgbe_clean_all_tx_rings(struct mucse *mucse)
+{
+ for (int i = 0; i < mucse->num_tx_queues; i++)
+ rnpgbe_clean_tx_ring(mucse->tx_ring[i]);
+}
+
void rnpgbe_down(struct mucse *mucse)
{
+ struct net_device *netdev = mucse->netdev;
+
+ netif_tx_stop_all_queues(netdev);
+ rnpgbe_clean_all_tx_rings(mucse);
rnpgbe_irq_disable(mucse);
+ netif_tx_disable(netdev);
rnpgbe_napi_disable_all(mucse);
}
@@ -597,7 +793,364 @@ void rnpgbe_down(struct mucse *mucse)
**/
void rnpgbe_up_complete(struct mucse *mucse)
{
+ struct net_device *netdev = mucse->netdev;
+
rnpgbe_configure_msix(mucse);
rnpgbe_napi_enable_all(mucse);
rnpgbe_irq_enable(mucse);
+ netif_tx_start_all_queues(netdev);
+}
+
+/**
+ * rnpgbe_free_tx_resources - Free Tx Resources per Queue
+ * @tx_ring: tx descriptor ring for a specific queue
+ *
+ * Free all transmit software resources
+ **/
+static void rnpgbe_free_tx_resources(struct mucse_ring *tx_ring)
+{
+ rnpgbe_clean_tx_ring(tx_ring);
+ vfree(tx_ring->tx_buffer_info);
+ tx_ring->tx_buffer_info = NULL;
+ /* if not set, then don't free */
+ if (!tx_ring->desc)
+ return;
+
+ dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc,
+ tx_ring->dma);
+ tx_ring->desc = NULL;
+}
+
+/**
+ * rnpgbe_setup_tx_resources - allocate Tx resources (Descriptors)
+ * @tx_ring: tx descriptor ring (for a specific queue) to setup
+ * @mucse: pointer to private structure
+ *
+ * @return: 0 on success, negative on failure
+ **/
+static int rnpgbe_setup_tx_resources(struct mucse_ring *tx_ring,
+ struct mucse *mucse)
+{
+ struct device *dev = tx_ring->dev;
+ int size;
+
+ size = sizeof(struct mucse_tx_buffer) * tx_ring->count;
+
+ tx_ring->tx_buffer_info = vzalloc(size);
+ if (!tx_ring->tx_buffer_info)
+ goto err_return;
+ /* round up to nearest 4K */
+ tx_ring->size = tx_ring->count * sizeof(struct rnpgbe_tx_desc);
+ tx_ring->size = ALIGN(tx_ring->size, 4096);
+ tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, &tx_ring->dma,
+ GFP_KERNEL);
+ if (!tx_ring->desc)
+ goto err_free_buffer;
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+
+ return 0;
+
+err_free_buffer:
+ vfree(tx_ring->tx_buffer_info);
+err_return:
+ tx_ring->tx_buffer_info = NULL;
+ return -ENOMEM;
+}
+
+/**
+ * rnpgbe_configure_tx_ring - Configure Tx ring after Reset
+ * @mucse: pointer to private structure
+ * @ring: structure containing ring specific data
+ *
+ * Configure the Tx descriptor ring after a reset.
+ **/
+static void rnpgbe_configure_tx_ring(struct mucse *mucse,
+ struct mucse_ring *ring)
+{
+ struct mucse_hw *hw = &mucse->hw;
+
+ mucse_ring_wr32(ring, RNPGBE_TX_START, 0);
+ mucse_ring_wr32(ring, RNPGBE_TX_BASE_ADDR_LO, (u32)ring->dma);
+ mucse_ring_wr32(ring, RNPGBE_TX_BASE_ADDR_HI,
+ (u32)(((u64)ring->dma) >> 32) | (hw->pfvfnum << 24));
+ mucse_ring_wr32(ring, RNPGBE_TX_LEN, ring->count);
+ ring->next_to_clean = mucse_ring_rd32(ring, RNPGBE_TX_HEAD);
+ ring->next_to_use = ring->next_to_clean;
+ ring->tail = ring->ring_addr + RNPGBE_TX_TAIL;
+ writel(ring->next_to_use, ring->tail);
+ mucse_ring_wr32(ring, RNPGBE_TX_FETCH_CTRL, M_DEFAULT_TX_FETCH);
+ mucse_ring_wr32(ring, RNPGBE_TX_INT_TIMER,
+ M_DEFAULT_INT_TIMER * hw->cycles_per_us);
+ mucse_ring_wr32(ring, RNPGBE_TX_INT_PKTCNT, M_DEFAULT_INT_PKTCNT);
+ /* Ensure all config is written before enabling queue */
+ wmb();
+ mucse_ring_wr32(ring, RNPGBE_TX_START, 1);
+}
+
+/**
+ * rnpgbe_configure_tx - Configure Transmit Unit after Reset
+ * @mucse: pointer to private structure
+ *
+ * Configure the Tx DMA after a reset.
+ **/
+void rnpgbe_configure_tx(struct mucse *mucse)
+{
+ struct mucse_hw *hw = &mucse->hw;
+ u32 i, dma_axi_ctl;
+
+ dma_axi_ctl = mucse_hw_rd32(hw, RNPGBE_DMA_AXI_EN);
+ dma_axi_ctl |= TX_AXI_RW_EN;
+ mucse_hw_wr32(hw, RNPGBE_DMA_AXI_EN, dma_axi_ctl);
+ /* Setup the HW Tx Head and Tail descriptor pointers */
+ for (i = 0; i < mucse->num_tx_queues; i++)
+ rnpgbe_configure_tx_ring(mucse, mucse->tx_ring[i]);
+}
+
+/**
+ * rnpgbe_setup_all_tx_resources - allocate all queues Tx resources
+ * @mucse: pointer to private structure
+ *
+ * Allocate memory for tx_ring.
+ *
+ * @return: 0 on success, negative on failure
+ **/
+int rnpgbe_setup_all_tx_resources(struct mucse *mucse)
+{
+ int i, err = 0;
+
+ for (i = 0; i < mucse->num_tx_queues; i++) {
+ err = rnpgbe_setup_tx_resources(mucse->tx_ring[i], mucse);
+ if (!err)
+ continue;
+
+ goto err_free_res;
+ }
+
+ return 0;
+err_free_res:
+ while (i--)
+ rnpgbe_free_tx_resources(mucse->tx_ring[i]);
+ return err;
+}
+
+/**
+ * rnpgbe_free_all_tx_resources - Free Tx Resources for All Queues
+ * @mucse: pointer to private structure
+ *
+ * Free all transmit software resources
+ **/
+void rnpgbe_free_all_tx_resources(struct mucse *mucse)
+{
+ for (int i = 0; i < (mucse->num_tx_queues); i++)
+ rnpgbe_free_tx_resources(mucse->tx_ring[i]);
+}
+
+static int rnpgbe_tx_map(struct mucse_ring *tx_ring,
+ struct mucse_tx_buffer *first, u32 mac_ip_len,
+ u32 tx_flags)
+{
+ /* hw need this in high 8 bytes desc */
+ u64 fun_id = ((u64)(tx_ring->pfvfnum) << (56));
+ struct mucse_tx_buffer *tx_buffer;
+ struct sk_buff *skb = first->skb;
+ struct rnpgbe_tx_desc *tx_desc;
+ u16 i = tx_ring->next_to_use;
+ unsigned int data_len, size;
+ skb_frag_t *frag;
+ dma_addr_t dma;
+
+ tx_desc = M_TX_DESC(tx_ring, i);
+ size = skb_headlen(skb);
+ data_len = skb->data_len;
+ dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
+ tx_buffer = first;
+
+ dma_unmap_len_set(tx_buffer, len, 0);
+ dma_unmap_addr_set(tx_buffer, dma, 0);
+
+ for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+ if (dma_mapping_error(tx_ring->dev, dma))
+ goto err_unmap;
+
+ /* record length, and DMA address */
+ dma_unmap_len_set(tx_buffer, len, size);
+ dma_unmap_addr_set(tx_buffer, dma, dma);
+
+ tx_desc->pkt_addr = cpu_to_le64(dma | fun_id);
+
+ while (unlikely(size > M_MAX_DATA_PER_TXD)) {
+ tx_desc->vlan_cmd_bsz = build_ctob(tx_flags,
+ mac_ip_len,
+ M_MAX_DATA_PER_TXD);
+ i++;
+ tx_desc++;
+ if (i == tx_ring->count) {
+ tx_desc = M_TX_DESC(tx_ring, 0);
+ i = 0;
+ }
+ dma += M_MAX_DATA_PER_TXD;
+ size -= M_MAX_DATA_PER_TXD;
+ tx_desc->pkt_addr = cpu_to_le64(dma | fun_id);
+ }
+
+ if (likely(!data_len))
+ break;
+ tx_desc->vlan_cmd_bsz = build_ctob(tx_flags, mac_ip_len, size);
+ i++;
+ tx_desc++;
+ if (i == tx_ring->count) {
+ tx_desc = M_TX_DESC(tx_ring, 0);
+ i = 0;
+ }
+
+ size = skb_frag_size(frag);
+ data_len -= size;
+ dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
+ DMA_TO_DEVICE);
+ tx_buffer = &tx_ring->tx_buffer_info[i];
+ }
+
+ /* write last descriptor with RS and EOP bits */
+ tx_desc->vlan_cmd_bsz = build_ctob(tx_flags | M_TXD_CMD_EOP | M_TXD_CMD_RS,
+ mac_ip_len, size);
+
+ /*
+ * Force memory writes to complete before letting h/w know there
+ * are new descriptors to fetch. (Only applicable for weak-ordered
+ * memory model archs, such as IA-64).
+ *
+ * We also need this memory barrier to make certain all of the
+ * status bits have been updated before next_to_watch is written.
+ */
+ wmb();
+ /* set next_to_watch value indicating a packet is present */
+ first->next_to_watch = tx_desc;
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+ tx_ring->next_to_use = i;
+ skb_tx_timestamp(skb);
+ netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
+ /* notify HW of packet */
+ writel(i, tx_ring->tail);
+
+ return 0;
+err_unmap:
+ for (;;) {
+ tx_buffer = &tx_ring->tx_buffer_info[i];
+ if (dma_unmap_len(tx_buffer, len)) {
+ if (tx_buffer == first) {
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ } else {
+ dma_unmap_page(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ }
+ }
+ dma_unmap_len_set(tx_buffer, len, 0);
+ dma_unmap_addr_set(tx_buffer, dma, 0);
+ if (tx_buffer == first)
+ break;
+ if (i == 0)
+ i += tx_ring->count;
+ i--;
+ }
+ dev_kfree_skb_any(first->skb);
+ first->skb = NULL;
+ tx_ring->next_to_use = i;
+
+ return -ENOMEM;
+}
+
+static int rnpgbe_maybe_stop_tx(struct mucse_ring *tx_ring, u16 size)
+{
+ if (likely(mucse_desc_unused(tx_ring) >= size))
+ return 0;
+
+ netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+ /* Herbert's original patch had:
+ * smp_mb__after_netif_stop_queue();
+ * but since that doesn't exist yet, just open code it.
+ */
+ smp_mb();
+
+ /* We need to check again in a case another CPU has just
+ * made room available.
+ */
+ if (likely(mucse_desc_unused(tx_ring) < size))
+ return -EBUSY;
+
+ /* A reprieve! - use start_queue because it doesn't call schedule */
+ netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
+
+ return 0;
+}
+
+netdev_tx_t rnpgbe_xmit_frame_ring(struct sk_buff *skb,
+ struct mucse_ring *tx_ring)
+{
+ u16 count = TXD_USE_COUNT(skb_headlen(skb));
+ /* hw requires it not zero */
+ u32 mac_ip_len = M_DEFAULT_MAC_IP_LEN;
+ struct mucse_tx_buffer *first;
+ u32 tx_flags = 0;
+ unsigned short f;
+
+ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
+ skb_frag_t *frag_temp = &skb_shinfo(skb)->frags[f];
+
+ count += TXD_USE_COUNT(skb_frag_size(frag_temp));
+ }
+
+ if (rnpgbe_maybe_stop_tx(tx_ring, count + 3))
+ return NETDEV_TX_BUSY;
+
+ /* record the location of the first descriptor for this packet */
+ first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
+ first->skb = skb;
+ first->bytecount = skb->len;
+ first->gso_segs = 1;
+
+ if (rnpgbe_tx_map(tx_ring, first, mac_ip_len, tx_flags))
+ goto out;
+
+ rnpgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
+out:
+ return NETDEV_TX_OK;
+}
+
+/**
+ * rnpgbe_get_stats64 - Get stats for this netdev
+ * @netdev: network interface device structure
+ * @stats: stats data
+ **/
+void rnpgbe_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct mucse *mucse = netdev_priv(netdev);
+ int i;
+
+ rcu_read_lock();
+ for (i = 0; i < mucse->num_tx_queues; i++) {
+ struct mucse_ring *ring = READ_ONCE(mucse->tx_ring[i]);
+ u64 bytes, packets;
+ unsigned int start;
+
+ if (ring) {
+ do {
+ start = u64_stats_fetch_begin(&ring->syncp);
+ packets = ring->stats.packets;
+ bytes = ring->stats.bytes;
+ } while (u64_stats_fetch_retry(&ring->syncp, start));
+ stats->tx_packets += packets;
+ stats->tx_bytes += bytes;
+ }
+ }
+ rcu_read_unlock();
}
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.h
index 8e8234209840..2c2796764c2d 100644
--- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.h
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.h
@@ -5,17 +5,36 @@
#define _RNPGBE_LIB_H
struct mucse;
+struct mucse_ring;
#define RING_OFFSET(n) (0x1000 + 0x100 * (n))
+#define RNPGBE_TX_START 0x18
#define RNPGBE_DMA_INT_MASK 0x24
#define TX_INT_MASK BIT(1)
#define RX_INT_MASK BIT(0)
#define INT_VALID (BIT(16) | BIT(17))
+#define RNPGBE_TX_BASE_ADDR_HI 0x60
+#define RNPGBE_TX_BASE_ADDR_LO 0x64
+#define RNPGBE_TX_LEN 0x68
+#define RNPGBE_TX_HEAD 0x6c
+#define RNPGBE_TX_TAIL 0x70
+#define M_DEFAULT_TX_FETCH 0x80008
+#define RNPGBE_TX_FETCH_CTRL 0x74
+#define M_DEFAULT_INT_TIMER 100
+#define RNPGBE_TX_INT_TIMER 0x78
+#define M_DEFAULT_INT_PKTCNT 48
+#define RNPGBE_TX_INT_PKTCNT 0x7c
#define RNPGBE_DMA_INT_TRIG 0x2c
/* | 31:24 | .... | 15:8 | 7:0 | */
/* | pfvfnum | | tx vector | rx vector | */
#define RING_VECTOR(n) (0x04 * (n))
+#define M_MAX_TXD_PWR 12
+#define M_MAX_DATA_PER_TXD (0x1 << M_MAX_TXD_PWR)
+#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), M_MAX_DATA_PER_TXD)
+#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
+/* hw require this not zero */
+#define M_DEFAULT_MAC_IP_LEN 20
#define mucse_for_each_ring(pos, head)\
for (typeof((head).ring) __pos = (head).ring;\
__pos ? ({ pos = __pos; 1; }) : 0;\
@@ -30,4 +49,11 @@ void rnpgbe_free_irq(struct mucse *mucse);
void rnpgbe_irq_disable(struct mucse *mucse);
void rnpgbe_down(struct mucse *mucse);
void rnpgbe_up_complete(struct mucse *mucse);
+void rnpgbe_configure_tx(struct mucse *mucse);
+int rnpgbe_setup_all_tx_resources(struct mucse *mucse);
+void rnpgbe_free_all_tx_resources(struct mucse *mucse);
+netdev_tx_t rnpgbe_xmit_frame_ring(struct sk_buff *skb,
+ struct mucse_ring *tx_ring);
+void rnpgbe_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats);
#endif
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c
index 343c53d872a5..6c9ff8a6a0bf 100644
--- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c
@@ -26,6 +26,17 @@ static struct pci_device_id rnpgbe_pci_tbl[] = {
{0, },
};
+/**
+ * rnpgbe_configure - Configure info to hw
+ * @mucse: pointer to private structure
+ *
+ * rnpgbe_configure configure mac, tx, rx regs to hw
+ **/
+static void rnpgbe_configure(struct mucse *mucse)
+{
+ rnpgbe_configure_tx(mucse);
+}
+
/**
* rnpgbe_open - Called when a network interface is made active
* @netdev: network interface device structure
@@ -49,6 +60,11 @@ static int rnpgbe_open(struct net_device *netdev)
if (err)
goto err_free_irqs;
+ err = rnpgbe_setup_all_tx_resources(mucse);
+ if (err)
+ goto err_free_irqs;
+
+ rnpgbe_configure(mucse);
rnpgbe_up_complete(mucse);
return 0;
@@ -72,6 +88,7 @@ static int rnpgbe_close(struct net_device *netdev)
rnpgbe_down(mucse);
rnpgbe_free_irq(mucse);
+ rnpgbe_free_all_tx_resources(mucse);
return 0;
}
@@ -81,25 +98,32 @@ static int rnpgbe_close(struct net_device *netdev)
* @skb: skb structure to be sent
* @netdev: network interface device structure
*
- * Return: NETDEV_TX_OK
+ * Return: NETDEV_TX_OK or NETDEV_TX_BUSY when insufficient descriptors
**/
static netdev_tx_t rnpgbe_xmit_frame(struct sk_buff *skb,
struct net_device *netdev)
{
struct mucse *mucse = netdev_priv(netdev);
+ struct mucse_ring *tx_ring;
- dev_kfree_skb_any(skb);
- mucse->stats.tx_dropped++;
+ tx_ring = mucse->tx_ring[skb_get_queue_mapping(skb)];
- return NETDEV_TX_OK;
+ return rnpgbe_xmit_frame_ring(skb, tx_ring);
}
static const struct net_device_ops rnpgbe_netdev_ops = {
.ndo_open = rnpgbe_open,
.ndo_stop = rnpgbe_close,
.ndo_start_xmit = rnpgbe_xmit_frame,
+ .ndo_get_stats64 = rnpgbe_get_stats64,
};
+static void rnpgbe_sw_init(struct mucse *mucse)
+{
+ mucse->tx_ring_item_count = M_DEFAULT_TXD;
+ mucse->tx_work_limit = M_DEFAULT_TX_WORK;
+}
+
/**
* rnpgbe_add_adapter - Add netdev for this pci_dev
* @pdev: PCI device information structure
@@ -172,6 +196,7 @@ static int rnpgbe_add_adapter(struct pci_dev *pdev,
}
netdev->netdev_ops = &rnpgbe_netdev_ops;
+ rnpgbe_sw_init(mucse);
err = rnpgbe_reset_hw(hw);
if (err) {
dev_err(&pdev->dev, "Hw reset failed %d\n", err);
--
2.25.1
^ permalink raw reply related [flat|nested] 6+ messages in thread* [PATCH net-next 3/4] net: rnpgbe: Add RX packet reception support
2026-03-25 9:12 [PATCH net-next 0/4] net: rnpgbe: Add TX/RX and link status support Dong Yibo
2026-03-25 9:12 ` [PATCH net-next 1/4] net: rnpgbe: Add interrupt handling Dong Yibo
2026-03-25 9:12 ` [PATCH net-next 2/4] net: rnpgbe: Add basic TX packet transmission support Dong Yibo
@ 2026-03-25 9:12 ` Dong Yibo
2026-03-25 9:12 ` [PATCH net-next 4/4] net: rnpgbe: Add link status handling support Dong Yibo
3 siblings, 0 replies; 6+ messages in thread
From: Dong Yibo @ 2026-03-25 9:12 UTC (permalink / raw)
To: andrew+netdev, davem, edumazet, kuba, pabeni, danishanwar
Cc: linux-kernel, netdev, dong100
Add basic RX packet reception infrastructure to the rnpgbe driver:
- Add RX descriptor structure (union rnpgbe_rx_desc) with write-back
format for hardware status
- Add RX buffer management using page_pool for efficient page recycling
- Implement NAPI poll callback (rnpgbe_poll) for RX processing
- Add RX ring setup and cleanup functions
- Implement packet building from page buffer
- Add RX statistics tracking
Signed-off-by: Dong Yibo <dong100@mucse.com>
---
drivers/net/ethernet/mucse/Kconfig | 1 +
drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h | 49 +-
drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h | 1 +
.../net/ethernet/mucse/rnpgbe/rnpgbe_lib.c | 622 +++++++++++++++++-
.../net/ethernet/mucse/rnpgbe/rnpgbe_lib.h | 32 +-
.../net/ethernet/mucse/rnpgbe/rnpgbe_main.c | 9 +
6 files changed, 710 insertions(+), 4 deletions(-)
diff --git a/drivers/net/ethernet/mucse/Kconfig b/drivers/net/ethernet/mucse/Kconfig
index 0b3e853d625f..be0fdf268484 100644
--- a/drivers/net/ethernet/mucse/Kconfig
+++ b/drivers/net/ethernet/mucse/Kconfig
@@ -19,6 +19,7 @@ if NET_VENDOR_MUCSE
config MGBE
tristate "Mucse(R) 1GbE PCI Express adapters support"
depends on PCI
+ select PAGE_POOL
help
This driver supports Mucse(R) 1GbE PCI Express family of
adapters.
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h
index 7d28ef3bdd86..13838e370165 100644
--- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h
@@ -61,7 +61,32 @@ struct rnpgbe_tx_desc {
#define M_TXD_CMD_EOP 0x010000 /* End of Packet */
};
+union rnpgbe_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 resv_cmd; /* cmd status */
+ };
+ struct {
+ __le32 rss_hash; /* RSS HASH */
+ __le16 mark; /* mark info */
+ __le16 rev1;
+ __le16 len; /* Packet length */
+ __le16 padding_len;
+ __le16 vlan; /* VLAN tag */
+ __le16 cmd; /* cmd status */
+#define M_RXD_STAT_DD BIT(1) /* Descriptor Done */
+#define M_RXD_STAT_EOP BIT(0) /* End of Packet */
+ } wb;
+};
+
#define M_TX_DESC(R, i) (&(((struct rnpgbe_tx_desc *)((R)->desc))[i]))
+#define M_RX_DESC(R, i) (&(((union rnpgbe_rx_desc *)((R)->desc))[i]))
+
+static inline __le16 rnpgbe_test_staterr(union rnpgbe_rx_desc *rx_desc,
+ const u16 stat_err_bits)
+{
+ return rx_desc->wb.cmd & cpu_to_le16(stat_err_bits);
+}
struct mucse_tx_buffer {
struct rnpgbe_tx_desc *next_to_watch;
@@ -77,13 +102,24 @@ struct mucse_queue_stats {
u64 bytes;
};
+struct mucse_rx_buffer {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ struct page *page;
+ u32 page_offset;
+};
+
struct mucse_ring {
struct mucse_ring *next;
struct mucse_q_vector *q_vector;
struct net_device *netdev;
struct device *dev;
+ struct page_pool *page_pool;
void *desc;
- struct mucse_tx_buffer *tx_buffer_info;
+ union {
+ struct mucse_tx_buffer *tx_buffer_info;
+ struct mucse_rx_buffer *rx_buffer_info;
+ };
void __iomem *ring_addr;
void __iomem *tail;
void __iomem *irq_mask;
@@ -109,6 +145,15 @@ static inline u16 mucse_desc_unused(struct mucse_ring *ring)
return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
}
+static inline u16 mucse_desc_unused_rx(struct mucse_ring *ring)
+{
+ u16 ntc = ring->next_to_clean;
+ u16 ntu = ring->next_to_use;
+
+ /* 16 * 16 = 256 tlp-max-payload size */
+ return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 16;
+}
+
static inline __le64 build_ctob(u32 vlan_cmd, u32 mac_ip_len, u32 size)
{
return cpu_to_le64(((u64)vlan_cmd << 32) | ((u64)mac_ip_len << 16) |
@@ -142,6 +187,7 @@ struct mucse_stats {
#define MAX_Q_VECTORS 8
#define M_DEFAULT_TXD 512
+#define M_DEFAULT_RXD 512
#define M_DEFAULT_TX_WORK 256
struct mucse {
@@ -161,6 +207,7 @@ struct mucse {
int tx_work_limit;
int num_tx_queues;
int num_q_vectors;
+ int rx_ring_item_count;
int num_rx_queues;
};
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h
index f060c39e9690..ce092edf920a 100644
--- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h
@@ -16,6 +16,7 @@
#define M_DEFAULT_N210_MHZ 62
#define TX_AXI_RW_EN 0xc
+#define RX_AXI_RW_EN 0x03
#define RNPGBE_DMA_AXI_EN 0x0010
#define RNPGBE_LEGACY_TIME 0xd000
#define RNPGBE_LEGACY_ENABLE 0xd004
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.c
index 9153e38fdd15..3dbb697a0667 100644
--- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.c
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.c
@@ -3,7 +3,9 @@
#include <linux/pci.h>
#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
#include <linux/vmalloc.h>
+#include <net/page_pool/helpers.h>
#include "rnpgbe_lib.h"
#include "rnpgbe.h"
@@ -147,6 +149,353 @@ static bool rnpgbe_clean_tx_irq(struct mucse_q_vector *q_vector,
return total_bytes == 0;
}
+static bool mucse_alloc_mapped_page(struct mucse_ring *rx_ring,
+ struct mucse_rx_buffer *bi)
+{
+ struct page *page = bi->page;
+ dma_addr_t dma;
+
+ if (page)
+ return true;
+
+ page = page_pool_dev_alloc_pages(rx_ring->page_pool);
+ if (unlikely(!page))
+ return false;
+ dma = page_pool_get_dma_addr(page);
+
+ bi->dma = dma;
+ bi->page = page;
+ bi->page_offset = RNPGBE_SKB_PAD;
+
+ return true;
+}
+
+static void mucse_update_rx_tail(struct mucse_ring *rx_ring,
+ u32 val)
+{
+ rx_ring->next_to_use = val;
+ /*
+ * Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ writel(val, rx_ring->tail);
+}
+
+/**
+ * rnpgbe_alloc_rx_buffers - Replace used receive buffers
+ * @rx_ring: ring to place buffers on
+ * @cleaned_count: number of buffers to replace
+ * @return: true if alloc failed
+ **/
+static bool rnpgbe_alloc_rx_buffers(struct mucse_ring *rx_ring,
+ u16 cleaned_count)
+{
+ u64 fun_id = ((u64)(rx_ring->pfvfnum) << 56);
+ union rnpgbe_rx_desc *rx_desc;
+ u16 i = rx_ring->next_to_use;
+ struct mucse_rx_buffer *bi;
+ bool err = false;
+ u16 bufsz;
+ /* nothing to do */
+ if (!cleaned_count)
+ return err;
+
+ rx_desc = M_RX_DESC(rx_ring, i);
+ bi = &rx_ring->rx_buffer_info[i];
+ i -= rx_ring->count;
+ bufsz = mucse_rx_bufsz(rx_ring);
+
+ do {
+ if (!mucse_alloc_mapped_page(rx_ring, bi)) {
+ err = true;
+ break;
+ }
+
+ dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
+ bi->page_offset, bufsz,
+ DMA_FROM_DEVICE);
+ rx_desc->pkt_addr = cpu_to_le64(bi->dma + bi->page_offset +
+ fun_id);
+
+ /* clean dd */
+ rx_desc->resv_cmd = 0;
+ rx_desc++;
+ bi++;
+ i++;
+ if (unlikely(!i)) {
+ rx_desc = M_RX_DESC(rx_ring, 0);
+ bi = rx_ring->rx_buffer_info;
+ i -= rx_ring->count;
+ }
+ cleaned_count--;
+ } while (cleaned_count);
+
+ i += rx_ring->count;
+
+ if (rx_ring->next_to_use != i)
+ mucse_update_rx_tail(rx_ring, i);
+
+ return err;
+}
+
+/**
+ * rnpgbe_get_buffer - Get the rx_buffer to be used
+ * @rx_ring: pointer to rx ring
+ * @skb: pointer skb for this packet
+ * @size: data size in this desc
+ * @return: rx_buffer.
+ **/
+static struct mucse_rx_buffer *rnpgbe_get_buffer(struct mucse_ring *rx_ring,
+ struct sk_buff **skb,
+ const unsigned int size)
+{
+ struct mucse_rx_buffer *rx_buffer;
+
+ rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+ *skb = rx_buffer->skb;
+ prefetchw(page_address(rx_buffer->page) + rx_buffer->page_offset);
+ /* we are reusing so sync this buffer for CPU use */
+ dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma,
+ rx_buffer->page_offset, size,
+ DMA_FROM_DEVICE);
+
+ return rx_buffer;
+}
+
+/**
+ * rnpgbe_add_rx_frag - Add no-linear data to the skb
+ * @rx_buffer: pointer to rx_buffer
+ * @skb: pointer skb for this packet
+ * @size: data size in this desc
+ **/
+static void rnpgbe_add_rx_frag(struct mucse_rx_buffer *rx_buffer,
+ struct sk_buff *skb,
+ unsigned int size)
+{
+ unsigned int truesize = SKB_DATA_ALIGN(RNPGBE_SKB_PAD + size);
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
+ rx_buffer->page_offset, size, truesize);
+}
+
+/**
+ * rnpgbe_build_skb - Try to build a sbk based on rx_buffer
+ * @rx_buffer: pointer to rx_buffer
+ * @size: data size in this desc
+ * @return: skb for this rx_buffer
+ **/
+static struct sk_buff *rnpgbe_build_skb(struct mucse_rx_buffer *rx_buffer,
+ unsigned int size)
+{
+ unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
+ SKB_DATA_ALIGN(size + RNPGBE_SKB_PAD);
+ void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+ struct sk_buff *skb;
+
+ net_prefetch(va);
+ /* build an skb around the page buffer */
+ skb = build_skb(va - RNPGBE_SKB_PAD, truesize);
+ if (unlikely(!skb))
+ return NULL;
+ /* update pointers within the skb to store the data */
+ skb_reserve(skb, RNPGBE_SKB_PAD);
+ __skb_put(skb, size);
+ skb_mark_for_recycle(skb);
+
+ return skb;
+}
+
+/**
+ * rnpgbe_pull_tail - Pull header to linear portion of buffer
+ * @skb: current socket buffer containing buffer in progress
+ **/
+static void rnpgbe_pull_tail(struct sk_buff *skb)
+{
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
+ unsigned int pull_len;
+ unsigned char *va;
+
+ va = skb_frag_address(frag);
+ pull_len = eth_get_headlen(skb->dev, va, M_RX_HDR_SIZE);
+ /* align pull length to size of long to optimize memcpy performance */
+ skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
+ /* update all of the pointers */
+ skb_frag_size_sub(frag, pull_len);
+ skb_frag_off_add(frag, pull_len);
+ skb->data_len -= pull_len;
+ skb->tail += pull_len;
+}
+
+/**
+ * rnpgbe_is_non_eop - Process handling of non-EOP buffers
+ * @rx_ring: rx ring being processed
+ * @rx_desc: rx descriptor for current buffer
+ * @skb: current socket buffer containing buffer in progress
+ *
+ * This function updates next to clean. If the buffer is an EOP buffer
+ * this function exits returning false, otherwise it will place the
+ * sk_buff in the next buffer to be chained and return true indicating
+ * that this is in fact a non-EOP buffer.
+ *
+ * @return: true for not end of packet
+ **/
+static bool rnpgbe_is_non_eop(struct mucse_ring *rx_ring,
+ union rnpgbe_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ u32 ntc = rx_ring->next_to_clean + 1;
+
+ /* fetch, update, and store next to clean */
+ ntc = (ntc < rx_ring->count) ? ntc : 0;
+ rx_ring->next_to_clean = ntc;
+ prefetch(M_RX_DESC(rx_ring, ntc));
+ /* if we are the last buffer then there is nothing else to do */
+ if (likely(rnpgbe_test_staterr(rx_desc, M_RXD_STAT_EOP)))
+ return false;
+ /* place skb in next buffer to be received */
+ rx_ring->rx_buffer_info[ntc].skb = skb;
+ /* we should clean it since we used all info in it */
+ rx_desc->wb.cmd = 0;
+
+ return true;
+}
+
+/**
+ * rnpgbe_cleanup_headers - Correct corrupted or empty headers
+ * @skb: current socket buffer containing buffer in progress
+ * @return: true if an error was encountered and skb was freed.
+ **/
+static bool rnpgbe_cleanup_headers(struct sk_buff *skb)
+{
+ if (IS_ERR(skb))
+ return true;
+ /* place header in linear portion of buffer */
+ if (!skb_headlen(skb))
+ rnpgbe_pull_tail(skb);
+ /* if eth_skb_pad returns an error the skb was freed */
+ if (eth_skb_pad(skb))
+ return true;
+
+ return false;
+}
+
+/**
+ * rnpgbe_process_skb_fields - Setup skb header fields from desc
+ * @rx_ring: structure containing ring specific data
+ * @skb: skb currently being received and modified
+ *
+ * rnpgbe_process_skb_fields checks the ring, descriptor information
+ * in order to setup the hash, chksum, vlan, protocol, and other
+ * fields within the skb.
+ **/
+static void rnpgbe_process_skb_fields(struct mucse_ring *rx_ring,
+ struct sk_buff *skb)
+{
+ struct net_device *dev = rx_ring->netdev;
+
+ skb_record_rx_queue(skb, rx_ring->queue_index);
+ skb->protocol = eth_type_trans(skb, dev);
+}
+
+/**
+ * rnpgbe_clean_rx_irq - Clean completed descriptors from Rx ring
+ * @q_vector: structure containing interrupt and ring information
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @budget: total limit on number of packets to process
+ *
+ * rnpgbe_clean_rx_irq tries to check dd in desc, handle this desc
+ * if dd is set which means data is write-back by hw
+ *
+ * @return: amount of work completed.
+ **/
+static int rnpgbe_clean_rx_irq(struct mucse_q_vector *q_vector,
+ struct mucse_ring *rx_ring,
+ int budget)
+{
+ unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+ u16 cleaned_count = mucse_desc_unused_rx(rx_ring);
+ bool fail_alloc = false;
+
+ while (likely(total_rx_packets < budget)) {
+ struct mucse_rx_buffer *rx_buffer;
+ union rnpgbe_rx_desc *rx_desc;
+ struct sk_buff *skb;
+ unsigned int size;
+
+ if (cleaned_count >= M_RX_BUFFER_WRITE) {
+ if (rnpgbe_alloc_rx_buffers(rx_ring, cleaned_count)) {
+ fail_alloc = true;
+ cleaned_count = mucse_desc_unused_rx(rx_ring);
+ } else {
+ cleaned_count = 0;
+ }
+ }
+ rx_desc = M_RX_DESC(rx_ring, rx_ring->next_to_clean);
+
+ if (!rnpgbe_test_staterr(rx_desc, M_RXD_STAT_DD))
+ break;
+
+ /* This memory barrier is needed to keep us from reading
+ * any other fields out of the rx_desc until we know the
+ * descriptor has been written back
+ */
+ dma_rmb();
+ size = le16_to_cpu(rx_desc->wb.len);
+ if (!size)
+ break;
+
+ rx_buffer = rnpgbe_get_buffer(rx_ring, &skb, size);
+
+ if (skb)
+ rnpgbe_add_rx_frag(rx_buffer, skb, size);
+ else
+ skb = rnpgbe_build_skb(rx_buffer, size);
+ /* exit if we failed to retrieve a buffer */
+ if (!skb) {
+ dma_sync_single_range_for_device(rx_ring->dev,
+ rx_buffer->dma,
+ rx_buffer->page_offset,
+ size,
+ DMA_FROM_DEVICE);
+ page_pool_recycle_direct(rx_ring->page_pool,
+ rx_buffer->page);
+ break;
+ }
+
+ rx_buffer->page = NULL;
+ rx_buffer->skb = NULL;
+ cleaned_count++;
+
+ if (rnpgbe_is_non_eop(rx_ring, rx_desc, skb))
+ continue;
+
+ /* verify the packet layout is correct */
+ if (rnpgbe_cleanup_headers(skb)) {
+ /* we should clean it since we used all info in it */
+ rx_desc->wb.cmd = 0;
+ continue;
+ }
+
+ /* probably a little skewed due to removing CRC */
+ total_rx_bytes += skb->len;
+ rnpgbe_process_skb_fields(rx_ring, skb);
+ rx_desc->wb.cmd = 0;
+ napi_gro_receive(&q_vector->napi, skb);
+ /* update budget accounting */
+ total_rx_packets++;
+ }
+
+ u64_stats_update_begin(&rx_ring->syncp);
+ rx_ring->stats.packets += total_rx_packets;
+ rx_ring->stats.bytes += total_rx_bytes;
+ u64_stats_update_end(&rx_ring->syncp);
+ /* keep polling if alloc mem failed */
+ return fail_alloc ? budget : total_rx_packets;
+}
+
/**
* rnpgbe_poll - NAPI Rx polling callback
* @napi: structure for representing this polling device
@@ -159,6 +508,7 @@ static int rnpgbe_poll(struct napi_struct *napi, int budget)
{
struct mucse_q_vector *q_vector =
container_of(napi, struct mucse_q_vector, napi);
+ int per_ring_budget, work_done = 0;
bool clean_complete = true;
struct mucse_ring *ring;
@@ -167,12 +517,27 @@ static int rnpgbe_poll(struct napi_struct *napi, int budget)
clean_complete = false;
}
+ if (q_vector->rx.count > 1)
+ per_ring_budget = max(budget / q_vector->rx.count, 1);
+ else
+ per_ring_budget = budget;
+
+ mucse_for_each_ring(ring, q_vector->rx) {
+ int cleaned = 0;
+
+ cleaned = rnpgbe_clean_rx_irq(q_vector, ring, per_ring_budget);
+ work_done += cleaned;
+ if (cleaned >= per_ring_budget)
+ clean_complete = false;
+ }
+
if (!clean_complete)
return budget;
- rnpgbe_irq_enable_queues(q_vector);
+ if (likely(napi_complete_done(napi, work_done)))
+ rnpgbe_irq_enable_queues(q_vector);
- return 0;
+ return min(work_done, budget - 1);
}
/**
@@ -338,12 +703,16 @@ static int rnpgbe_alloc_q_vector(struct mucse *mucse,
}
for (idx = 0; idx < rxr_count; idx++) {
+ ring->dev = &mucse->pdev->dev;
mucse_add_ring(ring, &q_vector->rx);
+ ring->count = mucse->rx_ring_item_count;
+ ring->netdev = mucse->netdev;
ring->queue_index = eth_queue_idx + idx;
ring->rnpgbe_queue_idx = rxr_idx;
ring->ring_addr = hw->hw_addr + RING_OFFSET(rxr_idx);
ring->irq_mask = ring->ring_addr + RNPGBE_DMA_INT_MASK;
ring->trig = ring->ring_addr + RNPGBE_DMA_INT_TRIG;
+ ring->pfvfnum = hw->pfvfnum;
mucse->rx_ring[ring->queue_index] = ring;
rxr_idx += step;
ring++;
@@ -776,6 +1145,16 @@ static void rnpgbe_clean_all_tx_rings(struct mucse *mucse)
rnpgbe_clean_tx_ring(mucse->tx_ring[i]);
}
+/**
+ * rnpgbe_clean_all_rx_rings - Free Rx Buffers for all queues
+ * @mucse: board private structure
+ **/
+static void rnpgbe_clean_all_rx_rings(struct mucse *mucse)
+{
+ for (int i = 0; i < mucse->num_rx_queues; i++)
+ rnpgbe_clean_rx_ring(mucse->rx_ring[i]);
+}
+
void rnpgbe_down(struct mucse *mucse)
{
struct net_device *netdev = mucse->netdev;
@@ -785,6 +1164,7 @@ void rnpgbe_down(struct mucse *mucse)
rnpgbe_irq_disable(mucse);
netif_tx_disable(netdev);
rnpgbe_napi_disable_all(mucse);
+ rnpgbe_clean_all_rx_rings(mucse);
}
/**
@@ -799,6 +1179,8 @@ void rnpgbe_up_complete(struct mucse *mucse)
rnpgbe_napi_enable_all(mucse);
rnpgbe_irq_enable(mucse);
netif_tx_start_all_queues(netdev);
+ for (int i = 0; i < mucse->num_rx_queues; i++)
+ mucse_ring_wr32(mucse->rx_ring[i], RNPGBE_RX_START, 1);
}
/**
@@ -1152,5 +1534,241 @@ void rnpgbe_get_stats64(struct net_device *netdev,
stats->tx_bytes += bytes;
}
}
+
+ for (i = 0; i < mucse->num_rx_queues; i++) {
+ struct mucse_ring *ring = READ_ONCE(mucse->rx_ring[i]);
+ u64 bytes, packets;
+ unsigned int start;
+
+ if (ring) {
+ do {
+ start = u64_stats_fetch_begin(&ring->syncp);
+ packets = ring->stats.packets;
+ bytes = ring->stats.bytes;
+ } while (u64_stats_fetch_retry(&ring->syncp, start));
+ stats->rx_packets += packets;
+ stats->rx_bytes += bytes;
+ }
+ }
rcu_read_unlock();
}
+
+static int mucse_alloc_page_pool(struct mucse_ring *rx_ring)
+{
+ int ret = 0;
+
+ struct page_pool_params pp_params = {
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ .order = 0,
+ .pool_size = rx_ring->count,
+ .nid = dev_to_node(rx_ring->dev),
+ .dev = rx_ring->dev,
+ .dma_dir = DMA_FROM_DEVICE,
+ .offset = 0,
+ .max_len = PAGE_SIZE,
+ };
+
+ rx_ring->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(rx_ring->page_pool)) {
+ ret = PTR_ERR(rx_ring->page_pool);
+ rx_ring->page_pool = NULL;
+ }
+
+ return ret;
+}
+
+/**
+ * rnpgbe_setup_rx_resources - allocate Rx resources (Descriptors)
+ * @rx_ring: rx descriptor ring (for a specific queue) to setup
+ * @mucse: pointer to private structure
+ *
+ * @return: 0 on success, negative on failure
+ **/
+static int rnpgbe_setup_rx_resources(struct mucse_ring *rx_ring,
+ struct mucse *mucse)
+{
+ struct device *dev = rx_ring->dev;
+ int size;
+
+ size = sizeof(struct mucse_rx_buffer) * rx_ring->count;
+
+ rx_ring->rx_buffer_info = vzalloc(size);
+
+ if (!rx_ring->rx_buffer_info)
+ goto err_return;
+ /* Round up to nearest 4K */
+ rx_ring->size = rx_ring->count * sizeof(union rnpgbe_rx_desc);
+ rx_ring->size = ALIGN(rx_ring->size, 4096);
+ rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, &rx_ring->dma,
+ GFP_KERNEL);
+ if (!rx_ring->desc)
+ goto err_free_buffer;
+
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+
+ if (mucse_alloc_page_pool(rx_ring))
+ goto err_free_desc;
+
+ return 0;
+err_free_desc:
+ dma_free_coherent(dev, rx_ring->size, rx_ring->desc,
+ rx_ring->dma);
+ rx_ring->desc = NULL;
+err_free_buffer:
+ vfree(rx_ring->rx_buffer_info);
+err_return:
+ rx_ring->rx_buffer_info = NULL;
+ return -ENOMEM;
+}
+
+/**
+ * rnpgbe_clean_rx_ring - Free Rx Buffers per Queue
+ * @rx_ring: ring to free buffers from
+ **/
+void rnpgbe_clean_rx_ring(struct mucse_ring *rx_ring)
+{
+ u16 i = rx_ring->next_to_clean;
+ struct mucse_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i];
+
+ /* Free all the Rx ring sk_buffs */
+ while (i != rx_ring->next_to_use) {
+ if (rx_buffer->skb) {
+ struct sk_buff *skb = rx_buffer->skb;
+
+ dev_kfree_skb(skb);
+ rx_buffer->skb = NULL;
+ }
+ dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma,
+ rx_buffer->page_offset,
+ mucse_rx_bufsz(rx_ring),
+ DMA_FROM_DEVICE);
+ if (rx_buffer->page) {
+ page_pool_put_full_page(rx_ring->page_pool,
+ rx_buffer->page, false);
+ rx_buffer->page = NULL;
+ }
+ i++;
+ rx_buffer++;
+ if (i == rx_ring->count) {
+ i = 0;
+ rx_buffer = rx_ring->rx_buffer_info;
+ }
+ }
+
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+}
+
+/**
+ * rnpgbe_free_rx_resources - Free Rx Resources
+ * @rx_ring: ring to clean the resources from
+ *
+ * Free all receive software resources
+ **/
+static void rnpgbe_free_rx_resources(struct mucse_ring *rx_ring)
+{
+ rnpgbe_clean_rx_ring(rx_ring);
+ vfree(rx_ring->rx_buffer_info);
+ rx_ring->rx_buffer_info = NULL;
+ /* if not set, then don't free */
+ if (!rx_ring->desc)
+ return;
+
+ dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc,
+ rx_ring->dma);
+ rx_ring->desc = NULL;
+ if (rx_ring->page_pool) {
+ page_pool_destroy(rx_ring->page_pool);
+ rx_ring->page_pool = NULL;
+ }
+}
+
+/**
+ * rnpgbe_setup_all_rx_resources - allocate all queues Rx resources
+ * @mucse: pointer to private structure
+ *
+ * @return: 0 on success, negative on failure
+ **/
+int rnpgbe_setup_all_rx_resources(struct mucse *mucse)
+{
+ int i, err = 0;
+
+ for (i = 0; i < mucse->num_rx_queues; i++) {
+ err = rnpgbe_setup_rx_resources(mucse->rx_ring[i], mucse);
+ if (!err)
+ continue;
+
+ goto err_setup_rx;
+ }
+
+ return 0;
+err_setup_rx:
+ while (i--)
+ rnpgbe_free_rx_resources(mucse->rx_ring[i]);
+ return err;
+}
+
+/**
+ * rnpgbe_free_all_rx_resources - Free Rx Resources for All Queues
+ * @mucse: pointer to private structure
+ *
+ * Free all receive software resources
+ **/
+void rnpgbe_free_all_rx_resources(struct mucse *mucse)
+{
+ for (int i = 0; i < (mucse->num_rx_queues); i++) {
+ if (mucse->rx_ring[i]->desc)
+ rnpgbe_free_rx_resources(mucse->rx_ring[i]);
+ }
+}
+
+/**
+ * rnpgbe_configure_rx_ring - Configure Rx ring info to hw
+ * @mucse: pointer to private structure
+ * @ring: structure containing ring specific data
+ *
+ * Configure the Rx descriptor ring after a reset.
+ **/
+static void rnpgbe_configure_rx_ring(struct mucse *mucse,
+ struct mucse_ring *ring)
+{
+ struct mucse_hw *hw = &mucse->hw;
+
+ /* disable queue to avoid issues while updating state */
+ mucse_ring_wr32(ring, RNPGBE_RX_START, 0);
+ /* set descripts registers*/
+ mucse_ring_wr32(ring, RNPGBE_RX_BASE_ADDR_LO, (u32)ring->dma);
+ mucse_ring_wr32(ring, RNPGBE_RX_BASE_ADDR_HI,
+ (u32)((u64)ring->dma >> 32) | (hw->pfvfnum << 24));
+ mucse_ring_wr32(ring, RNPGBE_RX_LEN, ring->count);
+ ring->tail = ring->ring_addr + RNPGBE_RX_TAIL;
+ ring->next_to_clean = mucse_ring_rd32(ring, RNPGBE_RX_HEAD);
+ ring->next_to_use = ring->next_to_clean;
+ mucse_ring_wr32(ring, RNPGBE_RX_SG_LEN, M_DEFAULT_SG);
+ mucse_ring_wr32(ring, RNPGBE_RX_FETCH, M_DEFAULT_RX_FETCH);
+ mucse_ring_wr32(ring, RNPGBE_RX_TIMEOUT_TH, 0);
+ mucse_ring_wr32(ring, RNPGBE_RX_INT_TIMER,
+ M_DEFAULT_INT_TIMER_R * hw->cycles_per_us);
+ mucse_ring_wr32(ring, RNPGBE_RX_INT_PKTCNT, M_DEFAULT_RX_INT_PKTCNT);
+ rnpgbe_alloc_rx_buffers(ring, mucse_desc_unused_rx(ring));
+}
+
+/**
+ * rnpgbe_configure_rx - Configure Receive Unit after Reset
+ * @mucse: pointer to private structure
+ *
+ * Configure the Rx unit after a reset.
+ **/
+void rnpgbe_configure_rx(struct mucse *mucse)
+{
+ struct mucse_hw *hw = &mucse->hw;
+ u32 dma_axi_ctl;
+
+ for (int i = 0; i < mucse->num_rx_queues; i++)
+ rnpgbe_configure_rx_ring(mucse, mucse->rx_ring[i]);
+
+ dma_axi_ctl = mucse_hw_rd32(hw, RNPGBE_DMA_AXI_EN);
+ dma_axi_ctl |= RX_AXI_RW_EN;
+ mucse_hw_wr32(hw, RNPGBE_DMA_AXI_EN, dma_axi_ctl);
+}
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.h
index 2c2796764c2d..29520ad716ca 100644
--- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.h
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.h
@@ -8,11 +8,27 @@ struct mucse;
struct mucse_ring;
#define RING_OFFSET(n) (0x1000 + 0x100 * (n))
+#define RNPGBE_RX_START 0x10
#define RNPGBE_TX_START 0x18
#define RNPGBE_DMA_INT_MASK 0x24
#define TX_INT_MASK BIT(1)
#define RX_INT_MASK BIT(0)
#define INT_VALID (BIT(16) | BIT(17))
+#define RNPGBE_RX_BASE_ADDR_HI 0x30
+#define RNPGBE_RX_BASE_ADDR_LO 0x34
+#define RNPGBE_RX_LEN 0x38
+#define RNPGBE_RX_HEAD 0x3c
+#define RNPGBE_RX_TAIL 0x40
+#define M_DEFAULT_RX_FETCH 0x100020
+#define RNPGBE_RX_FETCH 0x44
+#define M_DEFAULT_INT_TIMER_R 30
+#define RNPGBE_RX_INT_TIMER 0x48
+#define M_DEFAULT_RX_INT_PKTCNT 64
+#define RNPGBE_RX_INT_PKTCNT 0x4c
+#define RNPGBE_RX_ARB_DEF_LVL 0x50
+#define RNPGBE_RX_TIMEOUT_TH 0x54
+#define M_DEFAULT_SG 96 /* unit 16b, 1536 bytes */
+#define RNPGBE_RX_SG_LEN 0x58
#define RNPGBE_TX_BASE_ADDR_HI 0x60
#define RNPGBE_TX_BASE_ADDR_LO 0x64
#define RNPGBE_TX_LEN 0x68
@@ -33,13 +49,23 @@ struct mucse_ring;
#define M_MAX_DATA_PER_TXD (0x1 << M_MAX_TXD_PWR)
#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), M_MAX_DATA_PER_TXD)
#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
+#define RNPGBE_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
+#define M_RXBUFFER_1536 1536
+#define M_RX_BUFFER_WRITE 16
+#define M_RX_HDR_SIZE 256
+
+static inline unsigned int mucse_rx_bufsz(struct mucse_ring *ring)
+{
+ /* 1536 is enough for mtu 1500 packets */
+ return (M_RXBUFFER_1536 - NET_IP_ALIGN);
+}
+
/* hw require this not zero */
#define M_DEFAULT_MAC_IP_LEN 20
#define mucse_for_each_ring(pos, head)\
for (typeof((head).ring) __pos = (head).ring;\
__pos ? ({ pos = __pos; 1; }) : 0;\
__pos = __pos->next)
-
int rnpgbe_init_interrupt_scheme(struct mucse *mucse);
void rnpgbe_clear_interrupt_scheme(struct mucse *mucse);
int register_mbx_irq(struct mucse *mucse);
@@ -50,10 +76,14 @@ void rnpgbe_irq_disable(struct mucse *mucse);
void rnpgbe_down(struct mucse *mucse);
void rnpgbe_up_complete(struct mucse *mucse);
void rnpgbe_configure_tx(struct mucse *mucse);
+void rnpgbe_configure_rx(struct mucse *mucse);
int rnpgbe_setup_all_tx_resources(struct mucse *mucse);
void rnpgbe_free_all_tx_resources(struct mucse *mucse);
netdev_tx_t rnpgbe_xmit_frame_ring(struct sk_buff *skb,
struct mucse_ring *tx_ring);
void rnpgbe_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats);
+void rnpgbe_clean_rx_ring(struct mucse_ring *rx_ring);
+int rnpgbe_setup_all_rx_resources(struct mucse *mucse);
+void rnpgbe_free_all_rx_resources(struct mucse *mucse);
#endif
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c
index 6c9ff8a6a0bf..413eefae65dd 100644
--- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c
@@ -35,6 +35,7 @@ static struct pci_device_id rnpgbe_pci_tbl[] = {
static void rnpgbe_configure(struct mucse *mucse)
{
rnpgbe_configure_tx(mucse);
+ rnpgbe_configure_rx(mucse);
}
/**
@@ -63,11 +64,17 @@ static int rnpgbe_open(struct net_device *netdev)
err = rnpgbe_setup_all_tx_resources(mucse);
if (err)
goto err_free_irqs;
+ err = rnpgbe_setup_all_rx_resources(mucse);
+ if (err)
+ goto err_free_tx;
+
rnpgbe_configure(mucse);
rnpgbe_up_complete(mucse);
return 0;
+err_free_tx:
+ rnpgbe_free_all_tx_resources(mucse);
err_free_irqs:
rnpgbe_free_irq(mucse);
return err;
@@ -89,6 +96,7 @@ static int rnpgbe_close(struct net_device *netdev)
rnpgbe_down(mucse);
rnpgbe_free_irq(mucse);
rnpgbe_free_all_tx_resources(mucse);
+ rnpgbe_free_all_rx_resources(mucse);
return 0;
}
@@ -121,6 +129,7 @@ static const struct net_device_ops rnpgbe_netdev_ops = {
static void rnpgbe_sw_init(struct mucse *mucse)
{
mucse->tx_ring_item_count = M_DEFAULT_TXD;
+ mucse->rx_ring_item_count = M_DEFAULT_RXD;
mucse->tx_work_limit = M_DEFAULT_TX_WORK;
}
--
2.25.1
^ permalink raw reply related [flat|nested] 6+ messages in thread* [PATCH net-next 4/4] net: rnpgbe: Add link status handling support
2026-03-25 9:12 [PATCH net-next 0/4] net: rnpgbe: Add TX/RX and link status support Dong Yibo
` (2 preceding siblings ...)
2026-03-25 9:12 ` [PATCH net-next 3/4] net: rnpgbe: Add RX packet reception support Dong Yibo
@ 2026-03-25 9:12 ` Dong Yibo
2026-03-26 4:46 ` kernel test robot
3 siblings, 1 reply; 6+ messages in thread
From: Dong Yibo @ 2026-03-25 9:12 UTC (permalink / raw)
To: andrew+netdev, davem, edumazet, kuba, pabeni, danishanwar
Cc: linux-kernel, netdev, dong100
Add link status management infrastructure to the rnpgbe driver:
- Add link status related data structures (speed, duplex, link state)
- Implement firmware link event handling via mailbox
- Add service task for periodic link status monitoring
- Implement carrier status management (netif_carrier_on/off)
- Add port up/down notification to firmware
This enables the driver to properly track and report link status changes.
Signed-off-by: Dong Yibo <dong100@mucse.com>
---
drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h | 18 +-
.../net/ethernet/mucse/rnpgbe/rnpgbe_chip.c | 31 +++-
drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h | 12 ++
.../net/ethernet/mucse/rnpgbe/rnpgbe_lib.c | 143 +++++++++++++++
.../net/ethernet/mucse/rnpgbe/rnpgbe_lib.h | 1 +
.../net/ethernet/mucse/rnpgbe/rnpgbe_main.c | 18 ++
.../net/ethernet/mucse/rnpgbe/rnpgbe_mbx.c | 20 +++
.../net/ethernet/mucse/rnpgbe/rnpgbe_mbx.h | 1 +
.../net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.c | 165 ++++++++++++++++++
.../net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.h | 47 +++++
10 files changed, 452 insertions(+), 4 deletions(-)
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h
index 13838e370165..9d80b28c4ae9 100644
--- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h
@@ -30,11 +30,10 @@ struct mucse_mbx_info {
u32 fwpf_ctrl_base;
};
-/* Enum for firmware notification modes,
- * more modes (e.g., portup, link_report) will be added in future
- **/
enum {
mucse_fw_powerup,
+ mucse_fw_portup,
+ mucse_fw_link_report_en,
};
struct mucse_hw {
@@ -43,8 +42,11 @@ struct mucse_hw {
struct pci_dev *pdev;
struct mucse_mbx_info mbx;
int port;
+ int speed;
+ bool link;
u16 cycles_per_us;
u8 pfvfnum;
+ u8 duplex;
};
struct rnpgbe_tx_desc {
@@ -190,6 +192,10 @@ struct mucse_stats {
#define M_DEFAULT_RXD 512
#define M_DEFAULT_TX_WORK 256
+enum mucse_state_t {
+ __MUCSE_DOWN,
+};
+
struct mucse {
struct net_device *netdev;
struct pci_dev *pdev;
@@ -199,6 +205,7 @@ struct mucse {
#define M_FLAG_MSI_EN BIT(1)
#define M_FLAG_MSIX_SINGLE_EN BIT(2)
#define M_FLAG_MSIX_EN BIT(3)
+#define M_FLAG_NEED_LINK_UPDATE BIT(4)
u32 flags;
struct mucse_ring *tx_ring[RNPGBE_MAX_QUEUES] ____cacheline_aligned_in_smp;
struct mucse_ring *rx_ring[RNPGBE_MAX_QUEUES] ____cacheline_aligned_in_smp;
@@ -209,6 +216,10 @@ struct mucse {
int num_q_vectors;
int rx_ring_item_count;
int num_rx_queues;
+ unsigned long state;
+ struct delayed_work serv_task;
+ struct workqueue_struct *serv_wq;
+ spinlock_t link_lock; /* spinlock for link update */
};
int rnpgbe_get_permanent_mac(struct mucse_hw *hw, u8 *perm_addr);
@@ -217,6 +228,7 @@ int rnpgbe_send_notify(struct mucse_hw *hw,
bool enable,
int mode);
int rnpgbe_init_hw(struct mucse_hw *hw, int board_type);
+void rnpgbe_set_rx(struct mucse_hw *hw, bool enable);
/* Device IDs */
#define PCI_VENDOR_ID_MUCSE 0x8848
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c
index 291e77d573fe..902c8a801ba3 100644
--- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c
@@ -66,11 +66,17 @@ int rnpgbe_send_notify(struct mucse_hw *hw,
int mode)
{
int err;
- /* Keep switch struct to support more modes in the future */
+
switch (mode) {
case mucse_fw_powerup:
err = mucse_mbx_powerup(hw, enable);
break;
+ case mucse_fw_portup:
+ err = mucse_mbx_phyup(hw, enable);
+ break;
+ case mucse_fw_link_report_en:
+ err = mucse_mbx_link_report(hw, enable);
+ break;
default:
err = -EINVAL;
}
@@ -149,3 +155,26 @@ int rnpgbe_init_hw(struct mucse_hw *hw, int board_type)
return 0;
}
+
+/**
+ * rnpgbe_set_rx - Setup rx state
+ * @hw: hw information structure
+ * @enable: set rx on or off
+ *
+ * rnpgbe_set_rx setup rx enable
+ *
+ **/
+void rnpgbe_set_rx(struct mucse_hw *hw, bool enable)
+{
+ u32 value = mucse_hw_rd32(hw, GMAC_CONTROL);
+
+ if (enable)
+ value |= GMAC_CONTROL_RE;
+ else
+ value &= ~GMAC_CONTROL_RE;
+
+ mucse_hw_wr32(hw, GMAC_CONTROL, value);
+
+ value = mucse_hw_rd32(hw, GMAC_FRAME_FILTER);
+ mucse_hw_wr32(hw, GMAC_FRAME_FILTER, value | BIT(0));
+}
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h
index ce092edf920a..b17573c57638 100644
--- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h
@@ -17,9 +17,21 @@
#define TX_AXI_RW_EN 0xc
#define RX_AXI_RW_EN 0x03
+/* mask all valid info */
+#define M_ST_MASK 0x0f000f11
+/* 31:28 set 0xa to valid it is a driver set info */
+#define M_DEFAULT_ST 0xa0000000
+/* driver setup this by own info */
+/*bit: 27:24 | 11:8 | 4 | 0 */
+/*fun: pause | speed | duplex | up/down */
+#define RNPGBE_LINK_ST 0x000c
#define RNPGBE_DMA_AXI_EN 0x0010
#define RNPGBE_LEGACY_TIME 0xd000
#define RNPGBE_LEGACY_ENABLE 0xd004
+#define MUCSE_GMAC_OFF(_n) (0x20000 + (_n))
+#define GMAC_CONTROL_RE 0x00000004
+#define GMAC_CONTROL MUCSE_GMAC_OFF(0)
+#define GMAC_FRAME_FILTER MUCSE_GMAC_OFF(0x4)
#define RNPGBE_MAX_QUEUES 8
#endif /* _RNPGBE_HW_H */
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.c
index 3dbb697a0667..ccd69224944d 100644
--- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.c
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.c
@@ -9,6 +9,7 @@
#include "rnpgbe_lib.h"
#include "rnpgbe.h"
+#include "rnpgbe_mbx_fw.h"
/**
* rnpgbe_msix_other - Other irq handler
@@ -19,6 +20,10 @@
**/
static irqreturn_t rnpgbe_msix_other(int irq, void *data)
{
+ struct mucse *mucse = (struct mucse *)data;
+
+ mucse_fw_irq_handler(&mucse->hw);
+
return IRQ_HANDLED;
}
@@ -897,6 +902,8 @@ static irqreturn_t rnpgbe_intr(int irq, void *data)
struct mucse *mucse = (struct mucse *)data;
struct mucse_q_vector *q_vector;
+ mucse_fw_irq_handler(&mucse->hw);
+
q_vector = mucse->q_vector[0];
rnpgbe_irq_disable_queues(q_vector);
if (q_vector->rx.ring || q_vector->tx.ring)
@@ -1158,7 +1165,23 @@ static void rnpgbe_clean_all_rx_rings(struct mucse *mucse)
void rnpgbe_down(struct mucse *mucse)
{
struct net_device *netdev = mucse->netdev;
+ struct mucse_hw *hw = &mucse->hw;
+ int err;
+
+ set_bit(__MUCSE_DOWN, &mucse->state);
+
+ err = rnpgbe_send_notify(hw, false, mucse_fw_link_report_en);
+ if (err) {
+ dev_warn(&hw->pdev->dev, "Send link report to hw failed %d\n", err);
+ dev_warn(&hw->pdev->dev, "Fw will still report link event\n");
+ }
+ err = rnpgbe_send_notify(hw, false, mucse_fw_portup);
+ if (err) {
+ dev_warn(&hw->pdev->dev, "Send port down to hw failed %d\n", err);
+ dev_warn(&hw->pdev->dev, "Port is not truly down\n");
+ }
+ netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
rnpgbe_clean_all_tx_rings(mucse);
rnpgbe_irq_disable(mucse);
@@ -1174,6 +1197,8 @@ void rnpgbe_down(struct mucse *mucse)
void rnpgbe_up_complete(struct mucse *mucse)
{
struct net_device *netdev = mucse->netdev;
+ struct mucse_hw *hw = &mucse->hw;
+ int err;
rnpgbe_configure_msix(mucse);
rnpgbe_napi_enable_all(mucse);
@@ -1181,6 +1206,20 @@ void rnpgbe_up_complete(struct mucse *mucse)
netif_tx_start_all_queues(netdev);
for (int i = 0; i < mucse->num_rx_queues; i++)
mucse_ring_wr32(mucse->rx_ring[i], RNPGBE_RX_START, 1);
+
+ err = rnpgbe_send_notify(hw, true, mucse_fw_portup);
+ if (err) {
+ dev_warn(&hw->pdev->dev, "Send portup to hw failed %d\n", err);
+ dev_warn(&hw->pdev->dev, "Port is not truly up\n");
+ }
+
+ err = rnpgbe_send_notify(hw, true, mucse_fw_link_report_en);
+ if (err) {
+ dev_warn(&hw->pdev->dev, "Send link report to hw failed %d\n", err);
+ dev_warn(&hw->pdev->dev, "Fw will not report link event\n");
+ }
+ clear_bit(__MUCSE_DOWN, &mucse->state);
+ queue_delayed_work(mucse->serv_wq, &mucse->serv_task, msecs_to_jiffies(500));
}
/**
@@ -1772,3 +1811,107 @@ void rnpgbe_configure_rx(struct mucse *mucse)
dma_axi_ctl |= RX_AXI_RW_EN;
mucse_hw_wr32(hw, RNPGBE_DMA_AXI_EN, dma_axi_ctl);
}
+
+/**
+ * rnpgbe_watchdog_update_link - Update the link status
+ * @mucse: pointer to the device private structure
+ **/
+static void rnpgbe_watchdog_update_link(struct mucse *mucse)
+{
+ struct net_device *netdev = mucse->netdev;
+ struct mucse_hw *hw = &mucse->hw;
+ unsigned long flags;
+ bool link;
+ int speed;
+ u8 duplex;
+
+ if (!(mucse->flags & M_FLAG_NEED_LINK_UPDATE))
+ return;
+
+ spin_lock_irqsave(&mucse->link_lock, flags);
+
+ link = hw->link;
+ speed = hw->speed;
+ duplex = hw->duplex;
+
+ mucse->flags &= ~M_FLAG_NEED_LINK_UPDATE;
+ spin_unlock_irqrestore(&mucse->link_lock, flags);
+
+ if (link) {
+ netdev_info(netdev, "NIC Link is Up %d Mbps, %s Duplex\n",
+ speed,
+ duplex ? "Full" : "Half");
+ }
+}
+
+/**
+ * rnpgbe_watchdog_link_is_up - Update netif_carrier status and
+ * print link up message
+ * @mucse: pointer to the device private structure
+ **/
+static void rnpgbe_watchdog_link_is_up(struct mucse *mucse)
+{
+ struct net_device *netdev = mucse->netdev;
+ struct mucse_hw *hw = &mucse->hw;
+
+ /* Only continue if link was previously down */
+ if (netif_carrier_ok(netdev))
+ return;
+ rnpgbe_set_rx(hw, true);
+ netif_carrier_on(netdev);
+ netif_tx_wake_all_queues(netdev);
+}
+
+/**
+ * rnpgbe_watchdog_link_is_down - Update netif_carrier status and
+ * print link down message
+ * @mucse: pointer to the private structure
+ **/
+static void rnpgbe_watchdog_link_is_down(struct mucse *mucse)
+{
+ struct net_device *netdev = mucse->netdev;
+ struct mucse_hw *hw = &mucse->hw;
+
+ /* Only continue if link was up previously */
+ if (!netif_carrier_ok(netdev))
+ return;
+ netdev_info(netdev, "NIC Link is Down\n");
+ rnpgbe_set_rx(hw, false);
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+}
+
+/**
+ * mucse_watchdog_subtask - Check and bring link up
+ * @mucse: pointer to the device private structure
+ **/
+static void rnpgbe_watchdog_subtask(struct mucse *mucse)
+{
+ struct mucse_hw *hw = &mucse->hw;
+ /* if interface is down do nothing */
+ if (test_bit(__MUCSE_DOWN, &mucse->state))
+ return;
+
+ rnpgbe_watchdog_update_link(mucse);
+ if (hw->link)
+ rnpgbe_watchdog_link_is_up(mucse);
+ else
+ rnpgbe_watchdog_link_is_down(mucse);
+}
+
+/**
+ * rnpgbe_service_task - Manages and runs subtasks
+ * @work: pointer to work_struct containing our data
+ **/
+void rnpgbe_service_task(struct work_struct *work)
+{
+ struct mucse *mucse = container_of(work, struct mucse, serv_task.work);
+
+ if (test_bit(__MUCSE_DOWN, &mucse->state))
+ return;
+
+ rnpgbe_watchdog_subtask(mucse);
+
+ queue_delayed_work(mucse->serv_wq, &mucse->serv_task,
+ msecs_to_jiffies(500));
+}
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.h
index 29520ad716ca..74b4b0ab0b89 100644
--- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.h
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.h
@@ -86,4 +86,5 @@ void rnpgbe_get_stats64(struct net_device *netdev,
void rnpgbe_clean_rx_ring(struct mucse_ring *rx_ring);
int rnpgbe_setup_all_rx_resources(struct mucse *mucse);
void rnpgbe_free_all_rx_resources(struct mucse *mucse);
+void rnpgbe_service_task(struct work_struct *work);
#endif
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c
index 413eefae65dd..6b009c1be270 100644
--- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c
@@ -52,6 +52,7 @@ static int rnpgbe_open(struct net_device *netdev)
struct mucse *mucse = netdev_priv(netdev);
int err;
+ netif_carrier_off(netdev);
err = rnpgbe_request_irq(mucse);
if (err)
return err;
@@ -147,6 +148,7 @@ static void rnpgbe_sw_init(struct mucse *mucse)
static int rnpgbe_add_adapter(struct pci_dev *pdev,
int board_type)
{
+ struct device *dev = &pdev->dev;
struct net_device *netdev;
u8 perm_addr[ETH_ALEN];
void __iomem *hw_addr;
@@ -181,6 +183,16 @@ static int rnpgbe_add_adapter(struct pci_dev *pdev,
dev_err(&pdev->dev, "Init hw err %d\n", err);
goto err_free_net;
}
+
+ mucse->serv_wq = alloc_workqueue("%s-%s-service",
+ WQ_UNBOUND | WQ_MEM_RECLAIM, 0,
+ dev_driver_string(dev),
+ dev_name(dev));
+ if (!mucse->serv_wq) {
+ dev_err(dev, "Failed to allocate service workqueue\n");
+ err = -ENOMEM;
+ goto err_free_net;
+ }
/* Step 1: Send power-up notification to firmware (no response expected)
* This informs firmware to initialize hardware power state, but
* firmware only acknowledges receipt without returning data. Must be
@@ -223,6 +235,9 @@ static int rnpgbe_add_adapter(struct pci_dev *pdev,
goto err_powerdown;
}
+ INIT_DELAYED_WORK(&mucse->serv_task, rnpgbe_service_task);
+ spin_lock_init(&mucse->link_lock);
+
err = rnpgbe_init_interrupt_scheme(mucse);
if (err) {
dev_err(&pdev->dev, "init interrupt failed %d\n", err);
@@ -245,6 +260,7 @@ static int rnpgbe_add_adapter(struct pci_dev *pdev,
err_clear_interrupt:
rnpgbe_clear_interrupt_scheme(mucse);
err_powerdown:
+ destroy_workqueue(mucse->serv_wq);
/* notify powerdown only powerup ok */
if (!err_notify) {
err_notify = rnpgbe_send_notify(hw, false, mucse_fw_powerup);
@@ -324,6 +340,7 @@ static void rnpgbe_rm_adapter(struct pci_dev *pdev)
if (!mucse)
return;
+ cancel_delayed_work_sync(&mucse->serv_task);
netdev = mucse->netdev;
unregister_netdev(netdev);
err = rnpgbe_send_notify(hw, false, mucse_fw_powerup);
@@ -331,6 +348,7 @@ static void rnpgbe_rm_adapter(struct pci_dev *pdev)
dev_warn(&pdev->dev, "Send powerdown to hw failed %d\n", err);
remove_mbx_irq(mucse);
rnpgbe_clear_interrupt_scheme(mucse);
+ destroy_workqueue(mucse->serv_wq);
free_netdev(netdev);
}
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.c
index de5e29230b3c..1d4e2ae78154 100644
--- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.c
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.c
@@ -247,6 +247,26 @@ int mucse_poll_and_read_mbx(struct mucse_hw *hw, u32 *msg, u16 size)
return mucse_read_mbx_pf(hw, msg, size);
}
+/**
+ * mucse_check_and_read_mbx - check if there is notification and receive message
+ * @hw: pointer to the HW structure
+ * @msg: the message buffer
+ * @size: length of buffer
+ *
+ * Return: 0 if it successfully received a message notification and
+ * copied it into the receive buffer, negative errno on failure
+ **/
+int mucse_check_and_read_mbx(struct mucse_hw *hw, u32 *msg, u16 size)
+{
+ int err;
+
+ err = mucse_check_for_msg_pf(hw);
+ if (err)
+ return err;
+
+ return mucse_read_mbx_pf(hw, msg, size);
+}
+
/**
* mucse_mbx_get_fwack - Read fw ack from reg
* @mbx: pointer to the MBX structure
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.h
index e6fcc8d1d3ca..cba54a07a7fa 100644
--- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.h
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.h
@@ -17,4 +17,5 @@
int mucse_write_and_wait_ack_mbx(struct mucse_hw *hw, u32 *msg, u16 size);
void mucse_init_mbx_params_pf(struct mucse_hw *hw);
int mucse_poll_and_read_mbx(struct mucse_hw *hw, u32 *msg, u16 size);
+int mucse_check_and_read_mbx(struct mucse_hw *hw, u32 *msg, u16 size);
#endif /* _RNPGBE_MBX_H */
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.c
index 8c8bd5e8e1db..09e2505ab8cd 100644
--- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.c
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.c
@@ -3,6 +3,7 @@
#include <linux/if_ether.h>
#include <linux/bitfield.h>
+#include <linux/ethtool.h>
#include "rnpgbe.h"
#include "rnpgbe_mbx.h"
@@ -189,3 +190,167 @@ int mucse_mbx_get_macaddr(struct mucse_hw *hw, int pfvfnum,
return 0;
}
+
+/**
+ * mucse_mbx_phyup - Echo fw let the phy up
+ * @hw: pointer to the HW structure
+ * @is_phyup: true for up, false for down
+ *
+ * mucse_mbx_phyup echo fw to change phy status
+ *
+ * Return: 0 on success, negative errno on failure
+ **/
+int mucse_mbx_phyup(struct mucse_hw *hw, bool is_phyup)
+{
+ struct mbx_fw_cmd_req req = {
+ .datalen = cpu_to_le16(sizeof(req.phy_status) +
+ MUCSE_MBX_REQ_HDR_LEN),
+ .opcode = cpu_to_le16(SET_PHY_UP),
+ .phy_status = {
+ .port = cpu_to_le32(hw->port),
+ .status = cpu_to_le32(is_phyup ? 1 : 0),
+ },
+ };
+ int len, err;
+
+ len = le16_to_cpu(req.datalen);
+ mutex_lock(&hw->mbx.lock);
+ err = mucse_write_and_wait_ack_mbx(hw, (u32 *)&req, len);
+ mutex_unlock(&hw->mbx.lock);
+
+ return err;
+}
+
+/**
+ * mucse_mbx_link_report - Echo fw report link change event or not
+ * @hw: pointer to the HW structure
+ * @is_eventup: true for report, false for no
+ *
+ * mucse_mbx_link_eventup echo fw to change event report state
+ *
+ * Return: 0 on success, negative errno on failure
+ **/
+int mucse_mbx_link_report(struct mucse_hw *hw, bool is_report)
+{
+ struct mbx_fw_cmd_req req = {
+ .datalen = cpu_to_le16(sizeof(req. report_status) +
+ MUCSE_MBX_REQ_HDR_LEN),
+ .opcode = cpu_to_le16(LINK_REPORT_EN),
+ .report_status = {
+ .port_mask = cpu_to_le16(hw->port),
+ .status = cpu_to_le16(is_report ? 1 : 0),
+ },
+ };
+ int len, err;
+
+ len = le16_to_cpu(req.datalen);
+ mutex_lock(&hw->mbx.lock);
+ err = mucse_write_and_wait_ack_mbx(hw, (u32 *)&req, len);
+ mutex_unlock(&hw->mbx.lock);
+
+ return err;
+}
+
+/**
+ * mucse_update_link_status_reg - update driver speed inf to reg
+ * @hw: pointer to the HW structure
+ * @req: pointer to req data
+ *
+ * mucse_update_link_status_reg update reg according to driver info,
+ * fw will send irq if status is differ with reg
+ *
+ **/
+static void mucse_update_link_status_reg(struct mucse_hw *hw,
+ struct mbx_fw_cmd_req *req)
+{
+ u32 value;
+
+ value = mucse_hw_rd32(hw, RNPGBE_LINK_ST);
+ value &= ~M_ST_MASK;
+ value |= M_DEFAULT_ST;
+
+ if (le16_to_cpu(req->link_stat.port_status)) {
+ value |= BIT(0);
+ switch (hw->speed) {
+ case 10:
+ value |= (mucse_speed_10 << 8);
+ break;
+ case 100:
+ value |= (mucse_speed_100 << 8);
+ break;
+ case 1000:
+ value |= (mucse_speed_1000 << 8);
+ break;
+ default:
+ /* invalid speed do nothing */
+ break;
+ }
+
+ value |= (hw->duplex << 4);
+ value |= (req->link_stat.st[0].s_host.pause << 24);
+ } else {
+ value &= ~BIT(0);
+ }
+
+ if (req->link_stat.st[0].s_host.lldp_status)
+ value |= BIT(6);
+ else
+ value &= ~BIT(6);
+
+ mucse_hw_wr32(hw, RNPGBE_LINK_ST, value);
+}
+
+/**
+ * mucse_mbx_fw_req_handler - Handle fw req
+ * @mucse: pointer to the device private structure
+ * @req: pointer to req data
+ *
+ * rnpgbe_mbx_fw_req_handler handler fw req, such as a link event req.
+ *
+ * @return: 0 on success, negative on failure
+ **/
+static void mucse_mbx_fw_req_handler(struct mucse_hw *hw,
+ struct mbx_fw_cmd_req *req)
+{
+ struct mucse *mucse = container_of(hw, struct mucse, hw);
+ u32 magic = le32_to_cpu(req->link_stat.port_magic);
+ unsigned long flags;
+
+ if (le16_to_cpu(req->opcode) == LINK_CHANGE_EVT) {
+ spin_lock_irqsave(&mucse->link_lock, flags);
+
+ if (le16_to_cpu(req->link_stat.port_status))
+ hw->link = true;
+ else
+ hw->link = false;
+
+ if (magic == ST_VALID_MAGIC) {
+ hw->speed = le16_to_cpu(req->link_stat.st[0].speed);
+ hw->duplex = req->link_stat.st[0].duplex;
+ } else {
+ hw->speed = 0;
+ hw->duplex = 0;
+ }
+ /* update regs to notify link info is received from fw */
+ mucse_update_link_status_reg(hw, req);
+ mucse->flags |= M_FLAG_NEED_LINK_UPDATE;
+ spin_unlock_irqrestore(&mucse->link_lock, flags);
+ }
+}
+
+/**
+ * mucse_fw_irq_handler - Try to handle a req from hw
+ * @hw: pointer to the HW structure
+ **/
+void mucse_fw_irq_handler(struct mucse_hw *hw)
+{
+ struct mbx_fw_cmd_req req = {};
+
+ /* try to check and read fw req */
+ if (mucse_check_and_read_mbx(hw, (u32 *)&req, sizeof(req)))
+ return;
+
+ /* handle it if is a req from fw */
+ if (!(le16_to_cpu(req.flags) & FLAGS_REPLY))
+ mucse_mbx_fw_req_handler(hw, &req);
+}
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.h
index fb24fc12b613..b0f6ae8f90d9 100644
--- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.h
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.h
@@ -14,6 +14,9 @@ enum MUCSE_FW_CMD {
GET_HW_INFO = 0x0601,
GET_MAC_ADDRESS = 0x0602,
RESET_HW = 0x0603,
+ LINK_CHANGE_EVT = 0x0608,
+ LINK_REPORT_EN = 0x0613,
+ SET_PHY_UP = 0x0800,
POWER_UP = 0x0803,
};
@@ -36,6 +39,25 @@ struct mucse_hw_info {
__le32 ext_info;
} __packed;
+struct st_status {
+ u8 phyid;
+ u8 duplex : 1;
+ u8 autoneg : 1;
+ u8 fec : 1;
+ __le16 speed;
+ union {
+ __le16 status;
+ struct {
+ u16 pause : 4;
+ u16 local_eee : 3;
+ u16 partner_eee : 3;
+ u16 tp_mdx : 2;
+ u16 lldp_status : 1;
+ u16 revs : 3;
+ } s_host;
+ };
+} __packed;
+
struct mbx_fw_cmd_req {
__le16 flags;
__le16 opcode;
@@ -55,10 +77,26 @@ struct mbx_fw_cmd_req {
__le32 port_mask;
__le32 pfvf_num;
} get_mac_addr;
+ struct {
+ __le32 port;
+ __le32 status;
+ } phy_status;
+ struct {
+ __le16 status;
+ __le16 port_mask;
+ } report_status;
+ struct {
+ __le16 changed_lanes;
+ __le16 port_status;
+ __le32 port_magic;
+#define ST_VALID_MAGIC 0xa4a6a8a9
+ struct st_status st[4];
+ } link_stat;
};
} __packed;
struct mbx_fw_cmd_reply {
+#define FLAGS_REPLY BIT(0)
__le16 flags;
__le16 opcode;
__le16 error_code;
@@ -80,9 +118,18 @@ struct mbx_fw_cmd_reply {
};
} __packed;
+enum mucse_speed {
+ mucse_speed_10,
+ mucse_speed_100,
+ mucse_speed_1000,
+};
+
int mucse_mbx_sync_fw(struct mucse_hw *hw);
int mucse_mbx_powerup(struct mucse_hw *hw, bool is_powerup);
int mucse_mbx_reset_hw(struct mucse_hw *hw);
int mucse_mbx_get_macaddr(struct mucse_hw *hw, int pfvfnum,
u8 *mac_addr, int port);
+int mucse_mbx_phyup(struct mucse_hw *hw, bool is_phyup);
+int mucse_mbx_link_report(struct mucse_hw *hw, bool is_report);
+void mucse_fw_irq_handler(struct mucse_hw *hw);
#endif /* _RNPGBE_MBX_FW_H */
--
2.25.1
^ permalink raw reply related [flat|nested] 6+ messages in thread* Re: [PATCH net-next 4/4] net: rnpgbe: Add link status handling support
2026-03-25 9:12 ` [PATCH net-next 4/4] net: rnpgbe: Add link status handling support Dong Yibo
@ 2026-03-26 4:46 ` kernel test robot
0 siblings, 0 replies; 6+ messages in thread
From: kernel test robot @ 2026-03-26 4:46 UTC (permalink / raw)
To: Dong Yibo, andrew+netdev, davem, edumazet, kuba, pabeni,
danishanwar
Cc: oe-kbuild-all, linux-kernel, netdev, dong100
Hi Dong,
kernel test robot noticed the following build warnings:
[auto build test WARNING on net-next/main]
url: https://github.com/intel-lab-lkp/linux/commits/Dong-Yibo/net-rnpgbe-Add-interrupt-handling/20260326-025137
base: net-next/main
patch link: https://lore.kernel.org/r/20260325091204.94015-5-dong100%40mucse.com
patch subject: [PATCH net-next 4/4] net: rnpgbe: Add link status handling support
config: x86_64-randconfig-002-20260326 (https://download.01.org/0day-ci/archive/20260326/202603261216.IjF1gt9b-lkp@intel.com/config)
compiler: gcc-14 (Debian 14.2.0-19) 14.2.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20260326/202603261216.IjF1gt9b-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202603261216.IjF1gt9b-lkp@intel.com/
All warnings (new ones prefixed by >>):
>> Warning: drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.c:233 function parameter 'is_report' not described in 'mucse_mbx_link_report'
>> Warning: drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.c:313 function parameter 'hw' not described in 'mucse_mbx_fw_req_handler'
>> Warning: drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.c:233 function parameter 'is_report' not described in 'mucse_mbx_link_report'
>> Warning: drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.c:313 function parameter 'hw' not described in 'mucse_mbx_fw_req_handler'
--
>> Warning: drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.c:1888 expecting prototype for mucse_watchdog_subtask(). Prototype was for rnpgbe_watchdog_subtask() instead
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
^ permalink raw reply [flat|nested] 6+ messages in thread