From: Michael Chan <michael.chan@broadcom.com>
To: davem@davemloft.net
Cc: netdev@vger.kernel.org, edumazet@google.com, kuba@kernel.org,
pabeni@redhat.com, andrew+netdev@lunn.ch,
pavan.chebbi@broadcom.com, andrew.gospodarek@broadcom.com,
Ajit Khaparde <ajit.khaparde@broadcom.com>
Subject: [PATCH net-next 05/15] bnxt_en: Allocate and free MPC software structures
Date: Mon, 4 May 2026 16:58:26 -0700 [thread overview]
Message-ID: <20260504235836.3019499-6-michael.chan@broadcom.com> (raw)
In-Reply-To: <20260504235836.3019499-1-michael.chan@broadcom.com>
Each MPC consists of a special TX ring and a completion ring. Use
existing structs bnxt_tx_ring_info and bnxt_cp_ring_info as control
structures. The 2 MPC channels to TCE and RCE that share the MSIX
will use a shared completion ring.
Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Reviewed-by: Pavan Chebbi <pavan.chebbi@broadcom.com>
Signed-off-by: Michael Chan <michael.chan@broadcom.com>
---
drivers/net/ethernet/broadcom/bnxt/bnxt.c | 35 ++-
drivers/net/ethernet/broadcom/bnxt/bnxt.h | 12 +-
drivers/net/ethernet/broadcom/bnxt/bnxt_mpc.c | 204 ++++++++++++++++++
drivers/net/ethernet/broadcom/bnxt/bnxt_mpc.h | 46 ++++
4 files changed, 290 insertions(+), 7 deletions(-)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index bcc0c94c8e48..54f7672eeba0 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -3679,7 +3679,7 @@ static size_t __bnxt_copy_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem,
return total_len;
}
-static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
+void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
{
struct pci_dev *pdev = bp->pdev;
int i;
@@ -3712,7 +3712,7 @@ static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
}
}
-static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
+int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
{
struct pci_dev *pdev = bp->pdev;
u64 valid_bit = 0;
@@ -4316,6 +4316,8 @@ static int bnxt_alloc_cp_rings(struct bnxt *bp)
(!sh && i >= bp->rx_nr_rings)) {
cp_count += tcs;
tx = 1;
+ if (bnxt_napi_has_mpc(bp, i))
+ cp_count++;
}
cpr->cp_ring_arr = kzalloc_objs(*cpr, cp_count);
@@ -4337,6 +4339,11 @@ static int bnxt_alloc_cp_rings(struct bnxt *bp)
} else {
int n, tc = k - rx;
+ /* MPC rings are at the highest k indices */
+ if (tc >= tcs) {
+ bnxt_set_mpc_cp_ring(bp, i, cpr2);
+ continue;
+ }
n = BNXT_TC_TO_RING_BASE(bp, tc) + j;
bp->tx_ring[n].tx_cpr = cpr2;
cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_TX;
@@ -4469,6 +4476,7 @@ static void bnxt_init_ring_struct(struct bnxt *bp)
rmem->vmem = (void **)&txr->tx_buf_ring;
}
}
+ bnxt_init_mpc_ring_struct(bp);
}
static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
@@ -5545,6 +5553,7 @@ static void bnxt_init_l2_fltr_tbl(struct bnxt *bp)
static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
{
bnxt_free_vnic_attributes(bp);
+ bnxt_free_mpc_rings(bp);
bnxt_free_tx_rings(bp);
bnxt_free_rx_rings(bp);
bnxt_free_cp_rings(bp);
@@ -5558,6 +5567,7 @@ static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
bnxt_free_port_stats(bp);
bnxt_free_ring_grps(bp);
bnxt_free_vnics(bp);
+ bnxt_free_mpcs(bp);
kfree(bp->tx_ring_map);
bp->tx_ring_map = NULL;
kfree(bp->tx_ring);
@@ -5667,6 +5677,10 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
txr->tx_cpr = &bnapi2->cp_ring;
}
+ rc = bnxt_alloc_mpcs(bp);
+ if (rc)
+ goto alloc_mem_err;
+
rc = bnxt_alloc_stats(bp);
if (rc)
goto alloc_mem_err;
@@ -5695,6 +5709,10 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
if (rc)
goto alloc_mem_err;
+ rc = bnxt_alloc_mpc_rings(bp);
+ if (rc)
+ goto alloc_mem_err;
+
rc = bnxt_alloc_cp_rings(bp);
if (rc)
goto alloc_mem_err;
@@ -7249,10 +7267,15 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
req->cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
req->length = cpu_to_le32(bp->tx_ring_mask + 1);
req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
- req->queue_id = cpu_to_le16(ring->queue_id);
- if (bp->flags & BNXT_FLAG_TX_COAL_CMPL)
- req->cmpl_coal_cnt =
- RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_64;
+ if (ring->queue_id == BNXT_MPC_QUEUE_ID) {
+ req->mpc_chnls_type = ring->mpc_chnl_type;
+ req->enables |= cpu_to_le32(RING_ALLOC_REQ_ENABLES_MPC_CHNLS_TYPE);
+ } else {
+ req->queue_id = cpu_to_le16(ring->queue_id);
+ if (bp->flags & BNXT_FLAG_TX_COAL_CMPL)
+ req->cmpl_coal_cnt =
+ RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_64;
+ }
if ((bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP) && bp->ptp_cfg)
flags |= RING_ALLOC_REQ_FLAGS_TX_PKT_TS_CMPL_ENABLE;
req->flags = cpu_to_le16(flags);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 6c8ed3cb7dfd..253c0739f680 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -686,6 +686,7 @@ struct nqe_cn {
#define BNXT_NQ_HDL_TYPE_SHIFT 24
#define BNXT_NQ_HDL_TYPE_RX 0x00
#define BNXT_NQ_HDL_TYPE_TX 0x01
+#define BNXT_NQ_HDL_TYPE_MP 0x02
#define BNXT_NQ_HDL_IDX(hdl) ((hdl) & BNXT_NQ_HDL_IDX_MASK)
#define BNXT_NQ_HDL_TYPE(hdl) (((hdl) & BNXT_NQ_HDL_TYPE_MASK) >> \
@@ -950,6 +951,8 @@ struct bnxt_ring_struct {
};
u32 handle;
u8 queue_id;
+#define BNXT_MPC_QUEUE_ID 0xff
+ u8 mpc_chnl_type;
};
struct tx_push_bd {
@@ -990,12 +993,16 @@ struct bnxt_tx_ring_info {
u16 tx_cons;
u16 tx_hw_cons;
u16 txq_index;
+ /* index for tx_ring[] or tx_mpc_ring[] in struct bnxt_napi */
u8 tx_napi_idx;
u8 kick_pending;
struct bnxt_db_info tx_db;
struct tx_bd *tx_desc_ring[MAX_TX_PAGES];
- struct bnxt_sw_tx_bd *tx_buf_ring;
+ union {
+ struct bnxt_sw_tx_bd *tx_buf_ring;
+ struct bnxt_sw_mpc_tx_bd *tx_mpc_buf_ring;
+ };
dma_addr_t tx_desc_mapping[MAX_TX_PAGES];
@@ -1241,6 +1248,7 @@ struct bnxt_napi {
struct bnxt_cp_ring_info cp_ring;
struct bnxt_rx_ring_info *rx_ring;
struct bnxt_tx_ring_info *tx_ring[BNXT_MAX_TXR_PER_NAPI];
+ struct bnxt_tx_ring_info **tx_mpc_ring;
void (*tx_int)(struct bnxt *, struct bnxt_napi *,
int budget);
@@ -2961,6 +2969,8 @@ int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data);
u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx);
bool bnxt_bs_trace_avail(struct bnxt *bp, u16 type);
+void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem);
+int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem);
void bnxt_set_tpa_flags(struct bnxt *bp);
void bnxt_set_ring_params(struct bnxt *);
void bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_mpc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_mpc.c
index cce73d56e46e..9ffc62bf771f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_mpc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_mpc.c
@@ -47,6 +47,55 @@ int bnxt_mpc_cp_rings_in_use(struct bnxt *bp)
return mpc->mpc_cp_rings;
}
+bool bnxt_napi_has_mpc(struct bnxt *bp, int i)
+{
+ struct bnxt_mpc_info *mpc = bp->mpc_info;
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_tx_ring_info *txr;
+
+ if (!mpc)
+ return false;
+
+ txr = bnapi->tx_ring[0];
+ if (txr && !(bnapi->flags & BNXT_NAPI_FLAG_XDP))
+ return txr->txq_index < mpc->mpc_cp_rings;
+ return false;
+}
+
+void bnxt_set_mpc_cp_ring(struct bnxt *bp, int bnapi_idx,
+ struct bnxt_cp_ring_info *cpr)
+{
+ struct bnxt_mpc_info *mpc = bp->mpc_info;
+ struct bnxt_napi *bnapi;
+ bool found = false;
+ int i, j;
+
+ if (!mpc)
+ return;
+ bnapi = bp->bnapi[bnapi_idx];
+ /* Check both TCE and RCE MPCs for the matching NAPI */
+ for (i = 0; i < BNXT_MPC_TYPE_MAX; i++) {
+ int num = mpc->mpc_ring_count[i];
+
+ for (j = 0; j < num; j++) {
+ struct bnxt_tx_ring_info *txr = &mpc->mpc_rings[i][j];
+
+ /* Only 1 ring with index j will use this NAPI */
+ if (txr->bnapi == bnapi) {
+ txr->tx_cpr = cpr;
+ txr->tx_napi_idx = i;
+ bnapi->tx_mpc_ring[i] = txr;
+ found = true;
+ break;
+ }
+ }
+ }
+ if (!found)
+ netdev_warn_once(bp->dev, "No MPC match for napi index %d\n",
+ bnapi_idx);
+ cpr->cp_ring_type = BNXT_NQ_HDL_TYPE_MP;
+}
+
void bnxt_trim_mpc_rings(struct bnxt *bp)
{
struct bnxt_mpc_info *mpc = bp->mpc_info;
@@ -107,3 +156,158 @@ void bnxt_set_dflt_mpc_rings(struct bnxt *bp)
for (i = 0; i < BNXT_MPC_TYPE_MAX; i++)
mpc->mpc_ring_count[i] = 0;
}
+
+void bnxt_init_mpc_ring_struct(struct bnxt *bp)
+{
+ struct bnxt_mpc_info *mpc = bp->mpc_info;
+ int i, j;
+
+ if (!BNXT_MPC_CRYPTO_CAPABLE(bp))
+ return;
+
+ for (i = 0; i < BNXT_MPC_TYPE_MAX; i++) {
+ int num = mpc->mpc_ring_count[i];
+ struct bnxt_tx_ring_info *txr;
+
+ txr = mpc->mpc_rings[i];
+ if (!txr)
+ continue;
+ for (j = 0; j < num; j++) {
+ struct bnxt_ring_mem_info *rmem;
+ struct bnxt_ring_struct *ring;
+
+ txr = &mpc->mpc_rings[i][j];
+
+ txr->tx_ring_struct.ring_mem.flags =
+ BNXT_RMEM_RING_PTE_FLAG;
+ txr->bnapi = bp->tx_ring[bp->tx_ring_map[j]].bnapi;
+ txr->txq_index = j;
+
+ ring = &txr->tx_ring_struct;
+ rmem = &ring->ring_mem;
+ rmem->nr_pages = bp->tx_nr_pages;
+ rmem->page_size = HW_TXBD_RING_SIZE;
+ rmem->pg_arr = (void **)txr->tx_desc_ring;
+ rmem->dma_arr = txr->tx_desc_mapping;
+ rmem->vmem_size = SW_MPC_TXBD_RING_SIZE *
+ bp->tx_nr_pages;
+ rmem->vmem = (void **)&txr->tx_mpc_buf_ring;
+ }
+ }
+}
+
+int bnxt_alloc_mpcs(struct bnxt *bp)
+{
+ struct bnxt_mpc_info *mpc = bp->mpc_info;
+ int i, rc = 0;
+
+ if (!BNXT_MPC_CRYPTO_CAPABLE(bp))
+ return 0;
+
+ for (i = 0; i < BNXT_MPC_TYPE_MAX; i++) {
+ int num = mpc->mpc_ring_count[i];
+ struct bnxt_tx_ring_info *txr;
+
+ if (!num)
+ continue;
+ txr = kzalloc_objs(*txr, num);
+ if (!txr) {
+ rc = -ENOMEM;
+ goto alloc_mpcs_exit;
+ }
+ mpc->mpc_rings[i] = txr;
+ }
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+
+ if (!bnxt_napi_has_mpc(bp, i))
+ continue;
+ bnapi->tx_mpc_ring = kzalloc_objs(*bnapi->tx_mpc_ring,
+ BNXT_MPC_TYPE_MAX);
+ if (!bnapi->tx_mpc_ring) {
+ rc = -ENOMEM;
+ goto alloc_mpcs_exit;
+ }
+ }
+alloc_mpcs_exit:
+ if (rc)
+ bnxt_free_mpcs(bp);
+ return rc;
+}
+
+void bnxt_free_mpcs(struct bnxt *bp)
+{
+ struct bnxt_mpc_info *mpc = bp->mpc_info;
+ int i;
+
+ if (!mpc)
+ return;
+
+ for (i = 0; i < BNXT_MPC_TYPE_MAX; i++) {
+ kfree(mpc->mpc_rings[i]);
+ mpc->mpc_rings[i] = NULL;
+ }
+ if (!bp->bnapi)
+ return;
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+
+ kfree(bnapi->tx_mpc_ring);
+ bnapi->tx_mpc_ring = NULL;
+ }
+}
+
+int bnxt_alloc_mpc_rings(struct bnxt *bp)
+{
+ struct bnxt_mpc_info *mpc = bp->mpc_info;
+ int i, j, rc = 0;
+
+ if (!mpc)
+ return 0;
+
+ for (i = 0; i < BNXT_MPC_TYPE_MAX; i++) {
+ int num = mpc->mpc_ring_count[i];
+
+ for (j = 0; j < num; j++) {
+ struct bnxt_tx_ring_info *txr = &mpc->mpc_rings[i][j];
+ struct bnxt_ring_struct *ring;
+
+ ring = &txr->tx_ring_struct;
+ rc = bnxt_alloc_ring(bp, &ring->ring_mem);
+ if (rc)
+ goto alloc_mpc_rings_exit;
+ ring->queue_id = BNXT_MPC_QUEUE_ID;
+ ring->mpc_chnl_type = i;
+ /* for stats context */
+ ring->grp_idx = txr->bnapi->index;
+ spin_lock_init(&txr->tx_lock);
+ }
+ }
+alloc_mpc_rings_exit:
+ if (rc)
+ bnxt_free_mpc_rings(bp);
+ return rc;
+}
+
+void bnxt_free_mpc_rings(struct bnxt *bp)
+{
+ struct bnxt_mpc_info *mpc = bp->mpc_info;
+ int i, j;
+
+ if (!mpc)
+ return;
+
+ for (i = 0; i < BNXT_MPC_TYPE_MAX; i++) {
+ int num = mpc->mpc_ring_count[i];
+
+ if (!mpc->mpc_rings[i])
+ continue;
+ for (j = 0; j < num; j++) {
+ struct bnxt_tx_ring_info *txr = &mpc->mpc_rings[i][j];
+ struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
+
+ bnxt_free_ring(bp, &ring->ring_mem);
+ }
+ }
+}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_mpc.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_mpc.h
index 4ff8cad75a23..b54daf4ddd2f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_mpc.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_mpc.h
@@ -29,6 +29,12 @@ struct bnxt_mpc_info {
struct bnxt_tx_ring_info *mpc_rings[BNXT_MPC_TYPE_MAX];
};
+struct bnxt_sw_mpc_tx_bd {
+ unsigned long handle;
+};
+
+#define SW_MPC_TXBD_RING_SIZE (sizeof(struct bnxt_sw_mpc_tx_bd) * TX_DESC_CNT)
+
#define BNXT_MPC_CRYPTO_CAP \
(FUNC_QCAPS_RESP_MPC_CHNLS_CAP_TCE | FUNC_QCAPS_RESP_MPC_CHNLS_CAP_RCE)
@@ -42,8 +48,16 @@ void bnxt_alloc_mpc_info(struct bnxt *bp, u8 mpc_chnls_cap);
void bnxt_free_mpc_info(struct bnxt *bp);
int bnxt_mpc_tx_rings_in_use(struct bnxt *bp);
int bnxt_mpc_cp_rings_in_use(struct bnxt *bp);
+bool bnxt_napi_has_mpc(struct bnxt *bp, int i);
+void bnxt_set_mpc_cp_ring(struct bnxt *bp, int bnapi_idx,
+ struct bnxt_cp_ring_info *cpr);
void bnxt_trim_mpc_rings(struct bnxt *bp);
void bnxt_set_dflt_mpc_rings(struct bnxt *bp);
+void bnxt_init_mpc_ring_struct(struct bnxt *bp);
+int bnxt_alloc_mpcs(struct bnxt *bp);
+void bnxt_free_mpcs(struct bnxt *bp);
+int bnxt_alloc_mpc_rings(struct bnxt *bp);
+void bnxt_free_mpc_rings(struct bnxt *bp);
#else
static inline void bnxt_alloc_mpc_info(struct bnxt *bp, u8 mpc_chnls_cap)
{
@@ -63,6 +77,16 @@ static inline int bnxt_mpc_cp_rings_in_use(struct bnxt *bp)
return 0;
}
+static inline bool bnxt_napi_has_mpc(struct bnxt *bp, int i)
+{
+ return false;
+}
+
+static inline void bnxt_set_mpc_cp_ring(struct bnxt *bp, int bnapi_idx,
+ struct bnxt_cp_ring_info *cpr)
+{
+}
+
static inline void bnxt_trim_mpc_rings(struct bnxt *bp)
{
}
@@ -70,5 +94,27 @@ static inline void bnxt_trim_mpc_rings(struct bnxt *bp)
static inline void bnxt_set_dflt_mpc_rings(struct bnxt *bp)
{
}
+
+static inline void bnxt_init_mpc_ring_struct(struct bnxt *bp)
+{
+}
+
+static inline int bnxt_alloc_mpcs(struct bnxt *bp)
+{
+ return 0;
+}
+
+static inline void bnxt_free_mpcs(struct bnxt *bp)
+{
+}
+
+static inline int bnxt_alloc_mpc_rings(struct bnxt *bp)
+{
+ return 0;
+}
+
+static inline void bnxt_free_mpc_rings(struct bnxt *bp)
+{
+}
#endif /* CONFIG_BNXT_TLS */
#endif /* BNXT_MPC_H */
--
2.51.0
next prev parent reply other threads:[~2026-05-04 23:59 UTC|newest]
Thread overview: 18+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-05-04 23:58 [PATCH net-next 00/15] bnxt_en: Add kTLS TX offload support Michael Chan
2026-05-04 23:58 ` [PATCH net-next 01/15] bnxt_en: Add Midpath channel information Michael Chan
2026-05-04 23:58 ` [PATCH net-next 02/15] bnxt_en: Account for the MPC TX and CP rings Michael Chan
2026-05-04 23:58 ` [PATCH net-next 03/15] bnxt_en: Set default MPC ring count Michael Chan
2026-05-04 23:58 ` [PATCH net-next 04/15] bnxt_en: Rename xdp_tx_lock to tx_lock Michael Chan
2026-05-04 23:58 ` Michael Chan [this message]
2026-05-04 23:58 ` [PATCH net-next 06/15] bnxt_en: Allocate and free MPC channels from firmware Michael Chan
2026-05-04 23:58 ` [PATCH net-next 07/15] bnxt_en: Allocate crypto structure and backing store Michael Chan
2026-05-04 23:58 ` [PATCH net-next 08/15] bnxt_en: Reserve crypto RX and TX key contexts on a PF Michael Chan
2026-05-04 23:58 ` [PATCH net-next 09/15] bnxt_en: Add infrastructure for crypto key context IDs Michael Chan
2026-05-04 23:58 ` [PATCH net-next 10/15] bnxt_en: Add MPC transmit and completion functions Michael Chan
2026-05-06 0:57 ` Jakub Kicinski
2026-05-04 23:58 ` [PATCH net-next 11/15] bnxt_en: Add crypto MPC transmit/completion infrastructure Michael Chan
2026-05-04 23:58 ` [PATCH net-next 12/15] bnxt_en: Support kTLS TX offload by implementing .tls_dev_add/del() Michael Chan
2026-05-06 0:58 ` Jakub Kicinski
2026-05-04 23:58 ` [PATCH net-next 13/15] bnxt_en: Implement kTLS TX normal path Michael Chan
2026-05-04 23:58 ` [PATCH net-next 14/15] bnxt_en: Add support for inline transmit BDs Michael Chan
2026-05-04 23:58 ` [PATCH net-next 15/15] bnxt_en: Add kTLS retransmission support Michael Chan
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260504235836.3019499-6-michael.chan@broadcom.com \
--to=michael.chan@broadcom.com \
--cc=ajit.khaparde@broadcom.com \
--cc=andrew+netdev@lunn.ch \
--cc=andrew.gospodarek@broadcom.com \
--cc=davem@davemloft.net \
--cc=edumazet@google.com \
--cc=kuba@kernel.org \
--cc=netdev@vger.kernel.org \
--cc=pabeni@redhat.com \
--cc=pavan.chebbi@broadcom.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox