From: Bhargava Marreddy <bhargava.marreddy@broadcom.com>
To: davem@davemloft.net, edumazet@google.com, kuba@kernel.org,
pabeni@redhat.com, andrew+netdev@lunn.ch, horms@kernel.org
Cc: netdev@vger.kernel.org, linux-kernel@vger.kernel.org,
michael.chan@broadcom.com, pavan.chebbi@broadcom.com,
vsrama-krishna.nemani@broadcom.com,
Bhargava Marreddy <bhargava.marreddy@broadcom.com>,
Vikas Gupta <vikas.gupta@broadcom.com>,
Rajashekar Hudumula <rajashekar.hudumula@broadcom.com>
Subject: [v2, net-next 2/9] bng_en: Add initial support for CP and NQ rings
Date: Mon, 18 Aug 2025 19:47:09 +0000 [thread overview]
Message-ID: <20250818194716.15229-3-bhargava.marreddy@broadcom.com> (raw)
In-Reply-To: <20250818194716.15229-1-bhargava.marreddy@broadcom.com>
Allocate CP and NQ related data structures and add support to
associate NQ and CQ rings. Also, add the association of NQ, NAPI,
and interrupts.
Signed-off-by: Bhargava Marreddy <bhargava.marreddy@broadcom.com>
Reviewed-by: Vikas Gupta <vikas.gupta@broadcom.com>
Reviewed-by: Rajashekar Hudumula <rajashekar.hudumula@broadcom.com>
---
drivers/net/ethernet/broadcom/bnge/bnge.h | 1 +
.../net/ethernet/broadcom/bnge/bnge_netdev.c | 434 ++++++++++++++++++
.../net/ethernet/broadcom/bnge/bnge_netdev.h | 12 +
.../net/ethernet/broadcom/bnge/bnge_resc.c | 2 +-
4 files changed, 448 insertions(+), 1 deletion(-)
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge.h b/drivers/net/ethernet/broadcom/bnge/bnge.h
index 03e55b931f7..c536c0cc66e 100644
--- a/drivers/net/ethernet/broadcom/bnge/bnge.h
+++ b/drivers/net/ethernet/broadcom/bnge/bnge.h
@@ -215,5 +215,6 @@ static inline bool bnge_is_agg_reqd(struct bnge_dev *bd)
}
bool bnge_aux_registered(struct bnge_dev *bd);
+u16 bnge_aux_get_msix(struct bnge_dev *bd);
#endif /* _BNGE_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_netdev.c b/drivers/net/ethernet/broadcom/bnge/bnge_netdev.c
index 9fbb1c385fe..d8976faabd3 100644
--- a/drivers/net/ethernet/broadcom/bnge/bnge_netdev.c
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_netdev.c
@@ -27,6 +27,225 @@
#define BNGE_RING_TO_TC(bd, tx) \
((tx) / (bd)->tx_nr_rings_per_tc)
+#define BNGE_TC_TO_RING_BASE(bd, tc) \
+ ((tc) * (bd)->tx_nr_rings_per_tc)
+
+static void bnge_free_nq_desc_arr(struct bnge_nq_ring_info *nqr)
+{
+ struct bnge_ring_struct *ring = &nqr->ring_struct;
+
+ kfree(nqr->desc_ring);
+ nqr->desc_ring = NULL;
+ ring->ring_mem.pg_arr = NULL;
+ kfree(nqr->desc_mapping);
+ nqr->desc_mapping = NULL;
+ ring->ring_mem.dma_arr = NULL;
+}
+
+static void bnge_free_cp_desc_arr(struct bnge_cp_ring_info *cpr)
+{
+ struct bnge_ring_struct *ring = &cpr->ring_struct;
+
+ kfree(cpr->desc_ring);
+ cpr->desc_ring = NULL;
+ ring->ring_mem.pg_arr = NULL;
+ kfree(cpr->desc_mapping);
+ cpr->desc_mapping = NULL;
+ ring->ring_mem.dma_arr = NULL;
+}
+
+static int bnge_alloc_nq_desc_arr(struct bnge_nq_ring_info *nqr, int n)
+{
+ nqr->desc_ring = kcalloc(n, sizeof(*nqr->desc_ring), GFP_KERNEL);
+ if (!nqr->desc_ring)
+ return -ENOMEM;
+
+ nqr->desc_mapping = kcalloc(n, sizeof(*nqr->desc_mapping), GFP_KERNEL);
+ if (!nqr->desc_mapping)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int bnge_alloc_cp_desc_arr(struct bnge_cp_ring_info *cpr, int n)
+{
+ cpr->desc_ring = kcalloc(n, sizeof(*cpr->desc_ring), GFP_KERNEL);
+ if (!cpr->desc_ring)
+ return -ENOMEM;
+
+ cpr->desc_mapping = kcalloc(n, sizeof(*cpr->desc_mapping), GFP_KERNEL);
+ if (!cpr->desc_mapping)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void bnge_free_nq_arrays(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+
+ bnge_free_nq_desc_arr(&bnapi->nq_ring);
+ }
+}
+
+static int bnge_alloc_nq_arrays(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+ int rc;
+
+ rc = bnge_alloc_nq_desc_arr(&bnapi->nq_ring, bn->cp_nr_pages);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+static void bnge_free_nq_tree(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ if (!bn->bnapi)
+ return;
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+ struct bnge_nq_ring_info *nqr;
+ struct bnge_ring_struct *ring;
+ int j;
+
+ nqr = &bnapi->nq_ring;
+ ring = &nqr->ring_struct;
+
+ bnge_free_ring(bd, &ring->ring_mem);
+
+ if (!nqr->cp_ring_arr)
+ continue;
+
+ for (j = 0; j < nqr->cp_ring_count; j++) {
+ struct bnge_cp_ring_info *cpr = &nqr->cp_ring_arr[j];
+
+ ring = &cpr->ring_struct;
+ bnge_free_ring(bd, &ring->ring_mem);
+ bnge_free_cp_desc_arr(cpr);
+ }
+ kfree(nqr->cp_ring_arr);
+ nqr->cp_ring_arr = NULL;
+ nqr->cp_ring_count = 0;
+ }
+}
+
+static int alloc_one_cp_ring(struct bnge_net *bn,
+ struct bnge_cp_ring_info *cpr)
+{
+ struct bnge_ring_mem_info *rmem;
+ struct bnge_ring_struct *ring;
+ struct bnge_dev *bd = bn->bd;
+ int rc;
+
+ rc = bnge_alloc_cp_desc_arr(cpr, bn->cp_nr_pages);
+ if (rc) {
+ bnge_free_cp_desc_arr(cpr);
+ return -ENOMEM;
+ }
+ ring = &cpr->ring_struct;
+ rmem = &ring->ring_mem;
+ rmem->nr_pages = bn->cp_nr_pages;
+ rmem->page_size = HW_CMPD_RING_SIZE;
+ rmem->pg_arr = (void **)cpr->desc_ring;
+ rmem->dma_arr = cpr->desc_mapping;
+ rmem->flags = BNGE_RMEM_RING_PTE_FLAG;
+ rc = bnge_alloc_ring(bd, rmem);
+ if (rc) {
+ bnge_free_ring(bd, rmem);
+ bnge_free_cp_desc_arr(cpr);
+ }
+
+ return rc;
+}
+
+static int bnge_alloc_nq_tree(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i, j, rc, ulp_msix;
+ int tcs = 1;
+
+ ulp_msix = bnge_aux_get_msix(bd);
+ for (i = 0, j = 0; i < bd->nq_nr_rings; i++) {
+ bool sh = !!(bd->flags & BNGE_EN_SHARED_CHNL);
+ struct bnge_napi *bnapi = bn->bnapi[i];
+ struct bnge_nq_ring_info *nqr;
+ struct bnge_cp_ring_info *cpr;
+ struct bnge_ring_struct *ring;
+ int cp_count = 0, k;
+ int rx = 0, tx = 0;
+
+ if (!bnapi)
+ continue;
+
+ nqr = &bnapi->nq_ring;
+ nqr->bnapi = bnapi;
+ ring = &nqr->ring_struct;
+
+ rc = bnge_alloc_ring(bd, &ring->ring_mem);
+ if (rc)
+ return rc;
+
+ ring->map_idx = ulp_msix + i;
+
+ if (i < bd->rx_nr_rings) {
+ cp_count++;
+ rx = 1;
+ }
+
+ if ((sh && i < bd->tx_nr_rings) ||
+ (!sh && i >= bd->rx_nr_rings)) {
+ cp_count += tcs;
+ tx = 1;
+ }
+
+ nqr->cp_ring_arr = kcalloc(cp_count, sizeof(*cpr),
+ GFP_KERNEL);
+ if (!nqr->cp_ring_arr)
+ return -ENOMEM;
+
+ nqr->cp_ring_count = cp_count;
+
+ for (k = 0; k < cp_count; k++) {
+ cpr = &nqr->cp_ring_arr[k];
+ rc = alloc_one_cp_ring(bn, cpr);
+ if (rc)
+ return rc;
+
+ cpr->bnapi = bnapi;
+ cpr->cp_idx = k;
+ if (!k && rx) {
+ bn->rx_ring[i].rx_cpr = cpr;
+ cpr->cp_ring_type = BNGE_NQ_HDL_TYPE_RX;
+ } else {
+ int n, tc = k - rx;
+
+ n = BNGE_TC_TO_RING_BASE(bd, tc) + j;
+ bn->tx_ring[n].tx_cpr = cpr;
+ cpr->cp_ring_type = BNGE_NQ_HDL_TYPE_TX;
+ }
+ }
+ if (tx)
+ j++;
+ }
+
+ return 0;
+}
+
static bool bnge_separate_head_pool(struct bnge_rx_ring_info *rxr)
{
return rxr->need_head_pool || PAGE_SIZE > BNGE_RX_PAGE_SIZE;
@@ -209,10 +428,19 @@ static int bnge_alloc_tx_rings(struct bnge_net *bn)
return 0;
}
+static void bnge_free_ring_grps(struct bnge_net *bn)
+{
+ kfree(bn->grp_info);
+ bn->grp_info = NULL;
+}
+
static void bnge_free_core(struct bnge_net *bn)
{
bnge_free_tx_rings(bn);
bnge_free_rx_rings(bn);
+ bnge_free_nq_tree(bn);
+ bnge_free_nq_arrays(bn);
+ bnge_free_ring_grps(bn);
kfree(bn->tx_ring_map);
bn->tx_ring_map = NULL;
kfree(bn->tx_ring);
@@ -300,6 +528,10 @@ static int bnge_alloc_core(struct bnge_net *bn)
txr->bnapi = bnapi2;
}
+ rc = bnge_alloc_nq_arrays(bn);
+ if (rc)
+ goto err_free_core;
+
bnge_init_ring_struct(bn);
rc = bnge_alloc_rx_rings(bn);
@@ -310,13 +542,198 @@ static int bnge_alloc_core(struct bnge_net *bn)
if (rc)
goto err_free_core;
+ rc = bnge_alloc_nq_tree(bn);
+ if (rc)
+ goto err_free_core;
+
return 0;
err_free_core:
bnge_free_core(bn);
+
+ return rc;
+}
+
+static int bnge_cp_num_to_irq_num(struct bnge_net *bn, int n)
+{
+ struct bnge_napi *bnapi = bn->bnapi[n];
+ struct bnge_nq_ring_info *nqr;
+
+ nqr = &bnapi->nq_ring;
+
+ return nqr->ring_struct.map_idx;
+}
+
+static irqreturn_t bnge_msix(int irq, void *dev_instance)
+{
+ /* NAPI scheduling to be added in a future patch */
+ return IRQ_HANDLED;
+}
+
+static void bnge_setup_msix(struct bnge_net *bn)
+{
+ struct net_device *dev = bn->netdev;
+ struct bnge_dev *bd = bn->bd;
+ int len, i;
+
+ len = sizeof(bd->irq_tbl[0].name);
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ int map_idx = bnge_cp_num_to_irq_num(bn, i);
+ char *attr;
+
+ if (bd->flags & BNGE_EN_SHARED_CHNL)
+ attr = "TxRx";
+ else if (i < bd->rx_nr_rings)
+ attr = "rx";
+ else
+ attr = "tx";
+
+ snprintf(bd->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
+ attr, i);
+ bd->irq_tbl[map_idx].handler = bnge_msix;
+ }
+}
+
+static int bnge_set_real_num_queues(struct bnge_net *bn)
+{
+ struct net_device *dev = bn->netdev;
+ struct bnge_dev *bd = bn->bd;
+ int rc;
+
+ rc = netif_set_real_num_tx_queues(dev, bd->tx_nr_rings);
+ if (rc)
+ return rc;
+
+ return netif_set_real_num_rx_queues(dev, bd->rx_nr_rings);
+}
+
+static int bnge_setup_interrupts(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+
+ if (!bd->irq_tbl) {
+ if (bnge_alloc_irqs(bd))
+ return -ENODEV;
+ }
+
+ bnge_setup_msix(bn);
+
+ return bnge_set_real_num_queues(bn);
+}
+
+static int bnge_request_irq(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i, rc;
+
+ rc = bnge_setup_interrupts(bn);
+ if (rc) {
+ netdev_err(bn->netdev, "bnge_setup_interrupts err: %d\n", rc);
+ return rc;
+ }
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ int map_idx = bnge_cp_num_to_irq_num(bn, i);
+ struct bnge_irq *irq = &bd->irq_tbl[map_idx];
+
+ rc = request_irq(irq->vector, irq->handler, 0, irq->name,
+ bn->bnapi[i]);
+ if (rc)
+ break;
+
+ netif_napi_set_irq_locked(&bn->bnapi[i]->napi, irq->vector);
+ irq->requested = 1;
+
+ if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
+ int numa_node = dev_to_node(&bd->pdev->dev);
+
+ irq->have_cpumask = 1;
+ cpumask_set_cpu(cpumask_local_spread(i, numa_node),
+ irq->cpu_mask);
+ rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
+ if (rc) {
+ netdev_warn(bn->netdev,
+ "Set affinity failed, IRQ = %d\n",
+ irq->vector);
+ break;
+ }
+ }
+ }
+
return rc;
}
+static int bnge_napi_poll(struct napi_struct *napi, int budget)
+{
+ int work_done = 0;
+
+ /* defer NAPI implementation to next patch series */
+ napi_complete_done(napi, work_done);
+
+ return work_done;
+}
+
+static void bnge_init_napi(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ struct bnge_napi *bnapi;
+ int i;
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ bnapi = bn->bnapi[i];
+ netif_napi_add_config(bn->netdev, &bnapi->napi, bnge_napi_poll,
+ bnapi->index);
+ }
+}
+
+static void bnge_del_napi(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ if (!bn->bnapi)
+ return;
+
+ for (i = 0; i < bd->rx_nr_rings; i++)
+ netif_queue_set_napi(bn->netdev, i, NETDEV_QUEUE_TYPE_RX, NULL);
+ for (i = 0; i < bd->tx_nr_rings; i++)
+ netif_queue_set_napi(bn->netdev, i, NETDEV_QUEUE_TYPE_TX, NULL);
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+
+ __netif_napi_del(&bnapi->napi);
+ }
+
+ /* Wait for RCU grace period after removing NAPI instances */
+ synchronize_net();
+}
+
+static void bnge_free_irq(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ struct bnge_irq *irq;
+ int i;
+
+ if (!bd->irq_tbl || !bn->bnapi)
+ return;
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ int map_idx = bnge_cp_num_to_irq_num(bn, i);
+
+ irq = &bd->irq_tbl[map_idx];
+ if (irq->requested) {
+ if (irq->have_cpumask) {
+ irq_set_affinity_hint(irq->vector, NULL);
+ free_cpumask_var(irq->cpu_mask);
+ irq->have_cpumask = 0;
+ }
+ free_irq(irq->vector, bn->bnapi[i]);
+ }
+
+ irq->requested = 0;
+ }
+}
+
static int bnge_open_core(struct bnge_net *bn)
{
struct bnge_dev *bd = bn->bd;
@@ -336,8 +753,22 @@ static int bnge_open_core(struct bnge_net *bn)
return rc;
}
+ bnge_init_napi(bn);
+ rc = bnge_request_irq(bn);
+ if (rc) {
+ netdev_err(bn->netdev, "bnge_request_irq err: %d\n", rc);
+ goto err_del_napi;
+ }
+
set_bit(BNGE_STATE_OPEN, &bd->state);
+
return 0;
+
+err_del_napi:
+ bnge_del_napi(bn);
+ bnge_free_core(bn);
+
+ return rc;
}
static netdev_tx_t bnge_start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -364,6 +795,9 @@ static void bnge_close_core(struct bnge_net *bn)
struct bnge_dev *bd = bn->bd;
clear_bit(BNGE_STATE_OPEN, &bd->state);
+ bnge_free_irq(bn);
+ bnge_del_napi(bn);
+
bnge_free_core(bn);
}
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_netdev.h b/drivers/net/ethernet/broadcom/bnge/bnge_netdev.h
index b78b5e6d3ed..665a20380de 100644
--- a/drivers/net/ethernet/broadcom/bnge/bnge_netdev.h
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_netdev.h
@@ -132,6 +132,9 @@ enum {
#define BNGE_NET_EN_TPA (BNGE_NET_EN_GRO | BNGE_NET_EN_LRO)
+#define BNGE_NQ_HDL_TYPE_RX 0x00
+#define BNGE_NQ_HDL_TYPE_TX 0x01
+
struct bnge_net {
struct bnge_dev *bd;
struct net_device *netdev;
@@ -171,6 +174,10 @@ struct bnge_net {
u16 *tx_ring_map;
enum dma_data_direction rx_dir;
+
+ /* grp_info indexed by napi/nq index */
+ struct bnge_ring_grp_info *grp_info;
+ int total_irqs;
};
#define BNGE_DEFAULT_RX_RING_SIZE 511
@@ -222,6 +229,8 @@ struct bnge_cp_ring_info {
dma_addr_t *desc_mapping;
struct tx_cmp **desc_ring;
struct bnge_ring_struct ring_struct;
+ u8 cp_ring_type;
+ u8 cp_idx;
};
struct bnge_nq_ring_info {
@@ -229,6 +238,9 @@ struct bnge_nq_ring_info {
dma_addr_t *desc_mapping;
struct nqe_cn **desc_ring;
struct bnge_ring_struct ring_struct;
+
+ int cp_ring_count;
+ struct bnge_cp_ring_info *cp_ring_arr;
};
struct bnge_rx_ring_info {
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_resc.c b/drivers/net/ethernet/broadcom/bnge/bnge_resc.c
index c79a3607a1b..5597af1b3b7 100644
--- a/drivers/net/ethernet/broadcom/bnge/bnge_resc.c
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_resc.c
@@ -46,7 +46,7 @@ static int bnge_aux_get_dflt_msix(struct bnge_dev *bd)
return min_t(int, roce_msix, num_online_cpus() + 1);
}
-static u16 bnge_aux_get_msix(struct bnge_dev *bd)
+u16 bnge_aux_get_msix(struct bnge_dev *bd)
{
if (bnge_is_roce_en(bd))
return bd->aux_num_msix;
--
2.47.3
next prev parent reply other threads:[~2025-08-18 14:20 UTC|newest]
Thread overview: 11+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-08-18 19:47 [v2, net-next 0/9] Add more functionality to BNGE Bhargava Marreddy
2025-08-18 19:47 ` [v2, net-next 1/9] bng_en: Add initial support for RX and TX rings Bhargava Marreddy
2025-08-18 19:47 ` Bhargava Marreddy [this message]
2025-08-18 19:47 ` [v2, net-next 3/9] bng_en: Introduce VNIC Bhargava Marreddy
2025-08-18 19:47 ` [v2, net-next 4/9] bng_en: Initialise core resources Bhargava Marreddy
2025-08-18 19:47 ` [v2, net-next 5/9] bng_en: Allocate packet buffers Bhargava Marreddy
2025-08-18 19:47 ` [v2, net-next 6/9] bng_en: Allocate stat contexts Bhargava Marreddy
2025-08-18 19:47 ` [v2, net-next 7/9] bng_en: Register rings with the firmware Bhargava Marreddy
2025-08-18 19:47 ` [v2, net-next 8/9] bng_en: Register default VNIC Bhargava Marreddy
2025-08-18 19:47 ` [v2, net-next 9/9] bng_en: Configure " Bhargava Marreddy
2025-08-19 13:03 ` Jakub Kicinski
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250818194716.15229-3-bhargava.marreddy@broadcom.com \
--to=bhargava.marreddy@broadcom.com \
--cc=andrew+netdev@lunn.ch \
--cc=davem@davemloft.net \
--cc=edumazet@google.com \
--cc=horms@kernel.org \
--cc=kuba@kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=michael.chan@broadcom.com \
--cc=netdev@vger.kernel.org \
--cc=pabeni@redhat.com \
--cc=pavan.chebbi@broadcom.com \
--cc=rajashekar.hudumula@broadcom.com \
--cc=vikas.gupta@broadcom.com \
--cc=vsrama-krishna.nemani@broadcom.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).