public inbox for dev@dpdk.org
 help / color / mirror / Atom feed
* [PATCH 1/8] net/cnxk: support of plain packet reassembly
@ 2026-02-19  9:08 Rahul Bhansali
  2026-02-19  9:08 ` [PATCH 2/8] net/cnxk: support IPsec Rx inject for cn20k Rahul Bhansali
                   ` (9 more replies)
  0 siblings, 10 replies; 27+ messages in thread
From: Rahul Bhansali @ 2026-02-19  9:08 UTC (permalink / raw)
  To: dev, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
	Satha Rao, Harman Kalra
  Cc: jerinj, Rakesh Kudurumalla

From: Rakesh Kudurumalla <rkudurumalla@marvell.com>

Adds support of plain packet reassembly by configuring
UCAST_CPT rule.

Signed-off-by: Rakesh Kudurumalla <rkudurumalla@marvell.com>
---
 doc/guides/rel_notes/release_26_03.rst        |   1 +
 drivers/common/cnxk/roc_nix_inl.h             |   2 +-
 .../common/cnxk/roc_platform_base_symbols.c   |   1 +
 drivers/net/cnxk/cn20k_ethdev.c               |  78 ++++--
 drivers/net/cnxk/cn20k_rx.h                   |   6 +-
 drivers/net/cnxk/cnxk_ethdev.c                | 233 ++++++++++++++----
 drivers/net/cnxk/cnxk_ethdev.h                |   8 +
 7 files changed, 270 insertions(+), 59 deletions(-)

diff --git a/doc/guides/rel_notes/release_26_03.rst b/doc/guides/rel_notes/release_26_03.rst
index b4499ec066..b1f9b3c82b 100644
--- a/doc/guides/rel_notes/release_26_03.rst
+++ b/doc/guides/rel_notes/release_26_03.rst
@@ -80,6 +80,7 @@ New Features
 * **Updated Marvell cnxk net driver.**
 
   * Added out-of-place support for CN20K SoC.
+  * Added plain packet reassembly support for CN20K SoC.
 
 * **Updated ZTE zxdh ethernet driver.**
 
diff --git a/drivers/common/cnxk/roc_nix_inl.h b/drivers/common/cnxk/roc_nix_inl.h
index 68f395438c..596f12d1c7 100644
--- a/drivers/common/cnxk/roc_nix_inl.h
+++ b/drivers/common/cnxk/roc_nix_inl.h
@@ -160,7 +160,7 @@ bool __roc_api roc_nix_inl_inb_is_enabled(struct roc_nix *roc_nix);
 uintptr_t __roc_api roc_nix_inl_inb_sa_base_get(struct roc_nix *roc_nix,
 						bool inl_dev_sa);
 uint16_t roc_nix_inl_inb_ipsec_profile_id_get(struct roc_nix *roc_nix, bool inb_inl_dev);
-uint16_t roc_nix_inl_inb_reass_profile_id_get(struct roc_nix *roc_nix, bool inb_inl_dev);
+uint16_t __roc_api roc_nix_inl_inb_reass_profile_id_get(struct roc_nix *roc_nix, bool inb_inl_dev);
 bool __roc_api roc_nix_inl_inb_rx_inject_enable(struct roc_nix *roc_nix, bool inl_dev_sa);
 uint32_t __roc_api roc_nix_inl_inb_spi_range(struct roc_nix *roc_nix,
 					     bool inl_dev_sa, uint32_t *min,
diff --git a/drivers/common/cnxk/roc_platform_base_symbols.c b/drivers/common/cnxk/roc_platform_base_symbols.c
index 79dd18fbd7..2c73efd877 100644
--- a/drivers/common/cnxk/roc_platform_base_symbols.c
+++ b/drivers/common/cnxk/roc_platform_base_symbols.c
@@ -228,6 +228,7 @@ RTE_EXPORT_INTERNAL_SYMBOL(roc_nix_tm_dump)
 RTE_EXPORT_INTERNAL_SYMBOL(roc_nix_tm_sdp_prepare_tree)
 RTE_EXPORT_INTERNAL_SYMBOL(roc_nix_dump)
 RTE_EXPORT_INTERNAL_SYMBOL(roc_nix_inl_dev_dump)
+RTE_EXPORT_INTERNAL_SYMBOL(roc_nix_inl_inb_reass_profile_id_get)
 RTE_EXPORT_INTERNAL_SYMBOL(roc_nix_inl_outb_cpt_lfs_dump)
 RTE_EXPORT_INTERNAL_SYMBOL(roc_nix_sq_desc_dump)
 RTE_EXPORT_INTERNAL_SYMBOL(roc_nix_fc_config_get)
diff --git a/drivers/net/cnxk/cn20k_ethdev.c b/drivers/net/cnxk/cn20k_ethdev.c
index 7e9e32f80b..fd153c3105 100644
--- a/drivers/net/cnxk/cn20k_ethdev.c
+++ b/drivers/net/cnxk/cn20k_ethdev.c
@@ -616,22 +616,17 @@ static int
 cn20k_nix_reassembly_capability_get(struct rte_eth_dev *eth_dev,
 				    struct rte_eth_ip_reassembly_params *reassembly_capa)
 {
-	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
-	int rc = -ENOTSUP;
 	RTE_SET_USED(eth_dev);
 
 	if (!roc_feature_nix_has_reass())
 		return -ENOTSUP;
 
-	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
-		reassembly_capa->timeout_ms = 60 * 1000;
-		reassembly_capa->max_frags = 4;
-		reassembly_capa->flags =
-			RTE_ETH_DEV_REASSEMBLY_F_IPV4 | RTE_ETH_DEV_REASSEMBLY_F_IPV6;
-		rc = 0;
-	}
+	reassembly_capa->timeout_ms = 60 * 1000;
+	reassembly_capa->max_frags = 8;
+	reassembly_capa->flags =
+		RTE_ETH_DEV_REASSEMBLY_F_IPV4 | RTE_ETH_DEV_REASSEMBLY_F_IPV6;
 
-	return rc;
+	return 0;
 }
 
 static int
@@ -649,7 +644,8 @@ cn20k_nix_reassembly_conf_set(struct rte_eth_dev *eth_dev,
 {
 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
 	struct roc_cpt_rxc_time_cfg rxc_time_cfg = {0};
-	int rc = 0;
+	uint16_t nb_rxq = dev->nb_rxq;
+	int rc = 0, i;
 
 	if (!roc_feature_nix_has_reass())
 		return -ENOTSUP;
@@ -659,15 +655,69 @@ cn20k_nix_reassembly_conf_set(struct rte_eth_dev *eth_dev,
 		if (!dev->inb.nb_oop)
 			dev->rx_offload_flags &= ~NIX_RX_REAS_F;
 		dev->inb.reass_en = false;
+		if (dev->ip_reass_en) {
+			cnxk_nix_ip_reass_rule_clr(eth_dev);
+			dev->ip_reass_en = false;
+		}
 		return 0;
 	}
 
+	if (!(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)) {
+		rc = cnxk_nix_inline_inbound_setup(dev);
+		if (rc) {
+			plt_err("Nix inline inbound setup failed rc=%d", rc);
+			goto done;
+		}
+		rc = cnxk_nix_inline_inbound_mode_setup(dev);
+		if (rc) {
+			plt_err("Nix inline inbound mode setup failed rc=%d", rc);
+			goto cleanup;
+		}
+
+		for (i = 0; i < nb_rxq; i++) {
+			struct roc_nix_rq *rq = &dev->rqs[i];
+			if (!rq) {
+				plt_err("Receive queue = %d not enabled ", i);
+				goto cleanup;
+			}
+			struct cn20k_eth_rxq *rxq = eth_dev->data->rx_queues[i];
+
+			roc_nix_inl_dev_xaq_realloc(rq->aura_handle);
+
+			rq->tag_mask = 0x0FF00000 | ((uint32_t)RTE_EVENT_TYPE_ETHDEV << 28);
+			rc = roc_nix_inl_dev_rq_get(rq, !!eth_dev->data->dev_started);
+			if (rc)
+				goto cleanup;
+			rxq->lmt_base = dev->nix.lmt_base;
+			rxq->sa_base = roc_nix_inl_inb_sa_base_get(&dev->nix, dev->inb.inl_dev);
+			rc = roc_npa_buf_type_update(rq->aura_handle,
+						     ROC_NPA_BUF_TYPE_PACKET_IPSEC, 1);
+			if (rc)
+				goto cleanup;
+		}
+	}
 	rc = roc_nix_reassembly_configure(&rxc_time_cfg, conf->timeout_ms);
-	if (!rc && dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
-		dev->rx_offload_flags |= NIX_RX_REAS_F;
-		dev->inb.reass_en = true;
+	if (rc) {
+		plt_err("Nix reassembly_configure failed rc=%d", rc);
+		goto cleanup;
 	}
 
+	dev->rx_offload_flags |= NIX_RX_REAS_F | NIX_RX_OFFLOAD_SECURITY_F;
+	dev->inb.reass_en = !!((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY));
+
+	if (!dev->ip_reass_en) {
+		rc = cnxk_nix_ip_reass_rule_set(eth_dev, 0);
+		if (rc) {
+			plt_err("Nix reassembly rule setup failed rc=%d", rc);
+			goto cleanup;
+		}
+	}
+	return 0;
+cleanup:
+	dev->inb.reass_en = false;
+	if (!(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY))
+		rc |= cnxk_nix_inl_inb_fini(dev);
+done:
 	return rc;
 }
 
diff --git a/drivers/net/cnxk/cn20k_rx.h b/drivers/net/cnxk/cn20k_rx.h
index 83c222c53c..d6c217cdf5 100644
--- a/drivers/net/cnxk/cn20k_rx.h
+++ b/drivers/net/cnxk/cn20k_rx.h
@@ -258,7 +258,8 @@ nix_sec_meta_to_mbuf_sc(uint64_t cq_w5, uint64_t cpth, const uint64_t sa_base,
 			*rte_security_dynfield(mbuf) = (uint64_t)inb_priv->userdata;
 	} else {
 		/* Update dynamic field with userdata */
-		*rte_security_dynfield(mbuf) = (uint64_t)inb_priv->userdata;
+		if (flags & NIX_RX_REAS_F && inb_priv->userdata)
+			*rte_security_dynfield(mbuf) = (uint64_t)inb_priv->userdata;
 	}
 
 	*len = ((w3 >> 48) & 0xFFFF) + ((cq_w5 >> 16) & 0xFF) - (cq_w5 & 0xFF);
@@ -917,7 +918,8 @@ nix_sec_meta_to_mbuf(uintptr_t inb_sa, uintptr_t cpth, struct rte_mbuf **inner,
 		/* Get SPI from CPT_PARSE_S's cookie(already swapped) */
 		inb_priv = roc_nix_inl_ot_ipsec_inb_sa_sw_rsvd((void *)inb_sa);
 		/* Update dynamic field with userdata */
-		*rte_security_dynfield(inner_m) = (uint64_t)inb_priv->userdata;
+		if (flags & NIX_RX_REAS_F && inb_priv->userdata)
+			*rte_security_dynfield(inner_m) = (uint64_t)inb_priv->userdata;
 	}
 
 	/* Clear and update original lower 16 bit of data offset */
diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index ff78622b58..c979454f37 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -7,6 +7,11 @@
 #include <eal_export.h>
 #include <rte_eventdev.h>
 #include <rte_pmd_cnxk.h>
+#include "roc_priv.h"
+
+#define REASS_PRIORITY             0
+#define CLS_LTYPE_OFFSET_START     7
+#define CLS_LFLAGS_LC_OFFSET (CLS_LTYPE_OFFSET_START + 4)
 
 static const uint32_t cnxk_mac_modes[CGX_MODE_MAX + 1] = {
 	[CGX_MODE_SGMII] = RTE_ETH_LINK_SPEED_1G,
@@ -203,46 +208,160 @@ cnxk_nix_inb_mode_set(struct cnxk_eth_dev *dev, bool use_inl_dev)
 	return cnxk_nix_lookup_mem_sa_base_set(dev);
 }
 
-static int
-nix_security_setup(struct cnxk_eth_dev *dev)
+int
+cnxk_nix_inline_inbound_mode_setup(struct cnxk_eth_dev *dev)
+{
+	int rc = 0;
+
+	/* By default pick using inline device for poll mode.
+	 * Will be overridden when event mode rq's are setup.
+	 */
+	cnxk_nix_inb_mode_set(dev, !dev->inb.no_inl_dev);
+
+	/* Allocate memory to be used as dptr for CPT ucode
+	 * WRITE_SA op.
+	 */
+	dev->inb.sa_dptr =
+		plt_zmalloc(ROC_NIX_INL_OT_IPSEC_INB_HW_SZ, 0);
+	if (!dev->inb.sa_dptr) {
+		plt_err("Couldn't allocate memory for SA dptr");
+		rc = -ENOMEM;
+		goto cleanup;
+	}
+	dev->inb.inl_dev_q = roc_nix_inl_dev_qptr_get(0);
+cleanup:
+	return rc;
+}
+
+static void
+cnxk_flow_ipfrag_set(struct roc_npc_flow *flow, struct roc_npc *npc)
+{
+	uint8_t lc_offset;
+	uint64_t mask;
+
+	lc_offset = rte_popcount64(npc->rx_parse_nibble & ((1ULL << CLS_LFLAGS_LC_OFFSET) - 1));
+
+	lc_offset *= 4;
+
+	mask = (~(0xffULL << lc_offset));
+	flow->mcam_data[0] &= mask;
+	flow->mcam_mask[0] &= mask;
+	flow->mcam_data[0] |= (0x02ULL << lc_offset);
+	flow->mcam_mask[0] |= (0x82ULL << lc_offset);
+}
+
+int
+cnxk_nix_ip_reass_rule_set(struct rte_eth_dev *eth_dev, uint32_t rq)
+{
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	struct nix_rx_action2_s *action2;
+	struct nix_rx_action_s *action;
+	struct roc_npc_flow mcam;
+	int prio = 0, rc = 0;
+	struct roc_npc *npc;
+	int resp_count = 0;
+	bool inl_dev;
+
+	npc = &dev->npc;
+	inl_dev = roc_nix_inb_is_with_inl_dev(&dev->nix);
+
+	prio = REASS_PRIORITY;
+	memset(&mcam, 0, sizeof(struct roc_npc_flow));
+
+	action = (struct nix_rx_action_s *)&mcam.npc_action;
+	action2 = (struct nix_rx_action2_s *)&mcam.npc_action2;
+
+	if (inl_dev) {
+		struct roc_nix_rq *inl_rq;
+
+		inl_rq = roc_nix_inl_dev_rq(&dev->nix);
+		if (!inl_rq) {
+			plt_err("Failed to get inline dev rq for %d", dev->nix.port_id);
+			goto mcam_alloc_failed;
+		}
+		action->pf_func = roc_idev_nix_inl_dev_pffunc_get();
+		action->index = inl_rq->qid;
+	} else {
+		action->pf_func = npc->pf_func;
+		action->index = rq;
+	}
+	action->op = NIX_RX_ACTIONOP_UCAST_CPT;
+
+	action2->inline_profile_id = roc_nix_inl_inb_reass_profile_id_get(npc->roc_nix, inl_dev);
+
+	rc = roc_npc_mcam_merge_base_steering_rule(npc, &mcam);
+	if (rc < 0)
+		goto mcam_alloc_failed;
+
+	/* Channel[11] should be 'b0 */
+	mcam.mcam_data[0] &= (~0xfffULL);
+	mcam.mcam_mask[0] &= (~0xfffULL);
+	mcam.mcam_data[0] |= (uint64_t)(npc->channel & 0x7ff);
+	mcam.mcam_mask[0] |= (BIT_ULL(12) - 1);
+	cnxk_flow_ipfrag_set(&mcam, npc);
+
+	mcam.priority = prio;
+	mcam.key_type = roc_npc_get_key_type(npc, &mcam);
+	rc = roc_npc_mcam_alloc_entry(npc, &mcam, NULL, prio, &resp_count);
+	if (rc || resp_count == 0)
+		goto mcam_alloc_failed;
+
+	mcam.enable = true;
+	rc = roc_npc_mcam_write_entry(npc, &mcam);
+	if (rc < 0)
+		goto mcam_write_failed;
+
+	dev->ip_reass_rule_id = mcam.mcam_id;
+	dev->ip_reass_en = true;
+	return 0;
+
+mcam_write_failed:
+	rc = roc_npc_mcam_free(npc, &mcam);
+	if (rc)
+		return rc;
+mcam_alloc_failed:
+	return -EIO;
+}
+
+int
+cnxk_nix_inline_inbound_setup(struct cnxk_eth_dev *dev)
 {
 	struct roc_nix *nix = &dev->nix;
-	int i, rc = 0;
+	int rc = 0;
 
-	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
-		/* Setup minimum SA table when inline device is used */
-		nix->ipsec_in_min_spi = dev->inb.no_inl_dev ? dev->inb.min_spi : 0;
-		nix->ipsec_in_max_spi = dev->inb.no_inl_dev ? dev->inb.max_spi : 1;
+	/* Setup minimum SA table when inline device is used */
+	nix->ipsec_in_min_spi = dev->inb.no_inl_dev ? dev->inb.min_spi : 0;
+	nix->ipsec_in_max_spi = dev->inb.no_inl_dev ? dev->inb.max_spi : 1;
 
-		/* Enable custom meta aura when multi-chan is used */
-		if (nix->local_meta_aura_ena && roc_nix_inl_dev_is_multi_channel() &&
-		    !dev->inb.custom_meta_aura_dis)
-			nix->custom_meta_aura_ena = true;
+	/* Enable custom meta aura when multi-chan is used */
+	if (nix->local_meta_aura_ena && roc_nix_inl_dev_is_multi_channel() &&
+	    !dev->inb.custom_meta_aura_dis)
+		nix->custom_meta_aura_ena = true;
 
-		/* Setup Inline Inbound */
-		rc = roc_nix_inl_inb_init(nix);
-		if (rc) {
-			plt_err("Failed to initialize nix inline inb, rc=%d",
+	/* Setup Inline Inbound */
+	rc = roc_nix_inl_inb_init(nix);
+	if (rc) {
+		plt_err("Failed to initialize nix inline inb, rc=%d",
 				rc);
-			return rc;
-		}
+		return rc;
+	}
 
-		/* By default pick using inline device for poll mode.
-		 * Will be overridden when event mode rq's are setup.
-		 */
-		cnxk_nix_inb_mode_set(dev, !dev->inb.no_inl_dev);
+	return 0;
+}
 
-		/* Allocate memory to be used as dptr for CPT ucode
-		 * WRITE_SA op.
-		 */
-		dev->inb.sa_dptr =
-			plt_zmalloc(ROC_NIX_INL_OT_IPSEC_INB_HW_SZ, 0);
-		if (!dev->inb.sa_dptr) {
-			plt_err("Couldn't allocate memory for SA dptr");
-			rc = -ENOMEM;
+static int
+nix_security_setup(struct cnxk_eth_dev *dev)
+{
+	struct roc_nix *nix = &dev->nix;
+	int i, rc = 0;
+
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
+		rc = cnxk_nix_inline_inbound_setup(dev);
+		if (rc)
+			return rc;
+		rc = cnxk_nix_inline_inbound_mode_setup(dev);
+		if (rc)
 			goto cleanup;
-		}
-		dev->inb.inl_dev_q = roc_nix_inl_dev_qptr_get(0);
 	}
 
 	if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY ||
@@ -365,6 +484,22 @@ nix_meter_fini(struct cnxk_eth_dev *dev)
 	return 0;
 }
 
+int
+cnxk_nix_inl_inb_fini(struct cnxk_eth_dev *dev)
+{
+	struct roc_nix *nix = &dev->nix;
+	int rc;
+
+	if (dev->inb.sa_dptr) {
+		plt_free(dev->inb.sa_dptr);
+		dev->inb.sa_dptr = NULL;
+	}
+	rc = roc_nix_inl_inb_fini(nix);
+	if (rc)
+		plt_err("Failed to cleanup nix inline inb, rc=%d", rc);
+	return rc;
+}
+
 static int
 nix_security_release(struct cnxk_eth_dev *dev)
 {
@@ -374,7 +509,7 @@ nix_security_release(struct cnxk_eth_dev *dev)
 	int rc, ret = 0;
 
 	/* Cleanup Inline inbound */
-	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY || dev->ip_reass_en) {
 		/* Destroy inbound sessions */
 		tvar = NULL;
 		RTE_TAILQ_FOREACH_SAFE(eth_sec, &dev->inb.list, entry, tvar)
@@ -384,17 +519,14 @@ nix_security_release(struct cnxk_eth_dev *dev)
 		/* Clear lookup mem */
 		cnxk_nix_lookup_mem_sa_base_clear(dev);
 
-		rc = roc_nix_inl_inb_fini(nix);
-		if (rc)
-			plt_err("Failed to cleanup nix inline inb, rc=%d", rc);
-		ret |= rc;
+		ret |= cnxk_nix_inl_inb_fini(dev);
 
 		cnxk_nix_lookup_mem_metapool_clear(dev);
+	}
 
-		if (dev->inb.sa_dptr) {
-			plt_free(dev->inb.sa_dptr);
-			dev->inb.sa_dptr = NULL;
-		}
+	if (dev->ip_reass_en) {
+		cnxk_nix_ip_reass_rule_clr(eth_dev);
+		dev->ip_reass_en = false;
 	}
 
 	/* Cleanup Inline outbound */
@@ -946,7 +1078,7 @@ cnxk_nix_rx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
 	plt_nix_dbg("Releasing rxq %u", qid);
 
 	/* Release rq reference for inline dev if present */
-	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY || dev->ip_reass_en)
 		roc_nix_inl_dev_rq_put(rq);
 
 	/* Cleanup ROC RQ */
@@ -1760,6 +1892,18 @@ cnxk_nix_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qid)
 	return rc;
 }
 
+int
+cnxk_nix_ip_reass_rule_clr(struct rte_eth_dev *eth_dev)
+{
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	struct roc_npc *npc = &dev->npc;
+
+	if (dev->ip_reass_en)
+		return roc_npc_mcam_free_entry(npc, dev->ip_reass_rule_id);
+	else
+		return 0;
+}
+
 static int
 cnxk_nix_dev_stop(struct rte_eth_dev *eth_dev)
 {
@@ -1842,7 +1986,7 @@ cnxk_nix_dev_start(struct rte_eth_dev *eth_dev)
 			return rc;
 	}
 
-	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) || dev->ip_reass_en) {
 		rc = roc_nix_inl_rq_ena_dis(&dev->nix, true);
 		if (rc) {
 			plt_err("Failed to enable Inline device RQ, rc=%d", rc);
@@ -2258,6 +2402,11 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset)
 	/* Disable and free rte_meter entries */
 	nix_meter_fini(dev);
 
+	if (dev->ip_reass_en) {
+		cnxk_nix_ip_reass_rule_clr(eth_dev);
+		dev->ip_reass_en = false;
+	}
+
 	/* Disable and free rte_flow entries */
 	roc_npc_fini(&dev->npc);
 
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index 3d0a587406..1b63b02ad8 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -427,6 +427,8 @@ struct cnxk_eth_dev {
 	/* Reassembly dynfield/flag offsets */
 	int reass_dynfield_off;
 	int reass_dynflag_bit;
+	uint32_t ip_reass_rule_id;
+	bool ip_reass_en;
 
 	/* MCS device */
 	struct cnxk_mcs_dev *mcs_dev;
@@ -645,6 +647,10 @@ int cnxk_nix_tm_set_queue_rate_limit(struct rte_eth_dev *eth_dev,
 int cnxk_nix_tm_mark_vlan_dei(struct rte_eth_dev *eth_dev, int mark_green,
 			      int mark_yellow, int mark_red,
 			      struct rte_tm_error *error);
+int cnxk_nix_ip_reass_rule_clr(struct rte_eth_dev *eth_dev);
+int cnxk_nix_ip_reass_rule_set(struct rte_eth_dev *eth_dev, uint32_t rq);
+int cnxk_nix_inl_inb_fini(struct cnxk_eth_dev *dev);
+
 int cnxk_nix_tm_mark_ip_ecn(struct rte_eth_dev *eth_dev, int mark_green,
 			    int mark_yellow, int mark_red,
 			    struct rte_tm_error *error);
@@ -736,6 +742,8 @@ __rte_internal
 int cnxk_nix_inb_mode_set(struct cnxk_eth_dev *dev, bool use_inl_dev);
 __rte_internal
 void cnxk_ethdev_rx_offload_cb_register(cnxk_ethdev_rx_offload_cb_t cb);
+int cnxk_nix_inline_inbound_setup(struct cnxk_eth_dev *dev);
+int cnxk_nix_inline_inbound_mode_setup(struct cnxk_eth_dev *dev);
 
 struct cnxk_eth_sec_sess *cnxk_eth_sec_sess_get_by_sa_idx(struct cnxk_eth_dev *dev,
 							  uint32_t sa_idx, bool inb);
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 27+ messages in thread

* [PATCH 2/8] net/cnxk: support IPsec Rx inject for cn20k
  2026-02-19  9:08 [PATCH 1/8] net/cnxk: support of plain packet reassembly Rahul Bhansali
@ 2026-02-19  9:08 ` Rahul Bhansali
  2026-02-19  9:08 ` [PATCH 3/8] common/cnxk: update platform features Rahul Bhansali
                   ` (8 subsequent siblings)
  9 siblings, 0 replies; 27+ messages in thread
From: Rahul Bhansali @ 2026-02-19  9:08 UTC (permalink / raw)
  To: dev, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
	Satha Rao, Harman Kalra
  Cc: jerinj, Rahul Bhansali

Adds support of IPsec Rx inject handling for cn20k platform.

Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
---
 doc/guides/rel_notes/release_26_03.rst |   1 +
 drivers/net/cnxk/cn20k_ethdev_sec.c    |  50 +++++++
 drivers/net/cnxk/cn20k_rx.h            | 174 +++++++++++++++++++++++++
 3 files changed, 225 insertions(+)

diff --git a/doc/guides/rel_notes/release_26_03.rst b/doc/guides/rel_notes/release_26_03.rst
index b1f9b3c82b..300e105740 100644
--- a/doc/guides/rel_notes/release_26_03.rst
+++ b/doc/guides/rel_notes/release_26_03.rst
@@ -81,6 +81,7 @@ New Features
 
   * Added out-of-place support for CN20K SoC.
   * Added plain packet reassembly support for CN20K SoC.
+  * Added IPsec Rx inject support for CN20k Soc.
 
 * **Updated ZTE zxdh ethernet driver.**
 
diff --git a/drivers/net/cnxk/cn20k_ethdev_sec.c b/drivers/net/cnxk/cn20k_ethdev_sec.c
index c6a51f99f5..e406f0e879 100644
--- a/drivers/net/cnxk/cn20k_ethdev_sec.c
+++ b/drivers/net/cnxk/cn20k_ethdev_sec.c
@@ -1172,6 +1172,54 @@ eth_sec_caps_add(struct rte_security_capability eth_sec_caps[], uint32_t *idx,
 	*idx += nb_caps;
 }
 
+static uint16_t __rte_hot
+cn20k_eth_sec_inb_rx_inject(void *device, struct rte_mbuf **pkts,
+			    struct rte_security_session **sess, uint16_t nb_pkts)
+{
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+
+	return cn20k_nix_inj_pkts(sess, &dev->inj_cfg, pkts, nb_pkts);
+}
+
+static int
+cn20k_eth_sec_rx_inject_config(void *device, uint16_t port_id, bool enable)
+{
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	uint64_t channel, pf_func, inj_match_id = 0xFFFFUL;
+	struct cnxk_ethdev_inj_cfg *inj_cfg;
+	struct roc_nix *nix = &dev->nix;
+	struct roc_cpt_lf *inl_lf;
+	uint64_t sa_base;
+
+	if (!rte_eth_dev_is_valid_port(port_id))
+		return -EINVAL;
+
+	if (eth_dev->data->dev_started || !eth_dev->data->dev_configured)
+		return -EBUSY;
+
+	if (!roc_nix_inl_inb_rx_inject_enable(nix, dev->inb.inl_dev))
+		return -ENOTSUP;
+
+	roc_idev_nix_rx_inject_set(port_id, enable);
+
+	inl_lf = roc_nix_inl_inb_inj_lf_get(nix);
+	if (!inl_lf)
+		return -ENOTSUP;
+	sa_base = roc_nix_inl_inb_sa_base_get(nix, dev->inb.inl_dev);
+
+	inj_cfg = &dev->inj_cfg;
+	inj_cfg->sa_base = sa_base | eth_dev->data->port_id;
+	inj_cfg->io_addr = inl_lf->io_addr;
+	inj_cfg->lmt_base = nix->lmt_base;
+	channel = roc_nix_get_base_chan(nix);
+	pf_func = roc_idev_nix_inl_dev_pffunc_get();
+	inj_cfg->cmd_w0 = pf_func << 48 | inj_match_id << 32 | channel << 4;
+
+	return 0;
+}
+
 #define CPT_LMTST_BURST 32
 static uint16_t
 cn20k_inl_dev_submit(struct roc_nix_inl_dev_q *q, void *inst, uint16_t nb_inst)
@@ -1233,6 +1281,8 @@ cn20k_eth_sec_ops_override(void)
 	cnxk_eth_sec_ops.capabilities_get = cn20k_eth_sec_capabilities_get;
 	cnxk_eth_sec_ops.session_update = cn20k_eth_sec_session_update;
 	cnxk_eth_sec_ops.session_stats_get = cn20k_eth_sec_session_stats_get;
+	cnxk_eth_sec_ops.rx_inject_configure = cn20k_eth_sec_rx_inject_config;
+	cnxk_eth_sec_ops.inb_pkt_rx_inject = cn20k_eth_sec_inb_rx_inject;
 
 	/* Update platform specific rte_pmd_cnxk ops */
 	cnxk_pmd_ops.inl_dev_submit = cn20k_inl_dev_submit;
diff --git a/drivers/net/cnxk/cn20k_rx.h b/drivers/net/cnxk/cn20k_rx.h
index d6c217cdf5..ca056efd30 100644
--- a/drivers/net/cnxk/cn20k_rx.h
+++ b/drivers/net/cnxk/cn20k_rx.h
@@ -890,6 +890,169 @@ cn20k_nix_flush_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pk
 
 #if defined(RTE_ARCH_ARM64)
 
+static __rte_always_inline uint16_t
+cn20k_nix_rx_inj_prepare_mseg(struct rte_mbuf *m, uint64_t *cmd)
+{
+	union nix_send_sg_s *sg, l_sg;
+	struct rte_mbuf *m_next;
+	uint16_t segdw, nb_segs;
+	uint64_t len, dlen;
+	uint64_t *slist;
+
+	sg = (union nix_send_sg_s *)cmd;
+	l_sg.u = sg->u;
+	l_sg.u &= 0xC00000000000000; /* LD_TYPE */
+	l_sg.subdc = NIX_SUBDC_SG;
+	nb_segs = m->nb_segs;
+	len = m->pkt_len;
+	slist = &cmd[1];
+
+	/* Fill mbuf segments */
+	do {
+		*slist = rte_pktmbuf_iova(m);
+		dlen = m->data_len;
+		len -= dlen;
+
+		/* Set the segment length */
+		l_sg.u |= ((uint64_t)dlen << (l_sg.segs << 4));
+		l_sg.segs += 1;
+		slist++;
+		nb_segs--;
+		if (l_sg.segs > 2 && nb_segs) {
+			sg->u = l_sg.u;
+			/* Next SG subdesc */
+			sg = (union nix_send_sg_s *)slist;
+			l_sg.u = sg->u;
+			l_sg.u &= 0xC00000000000000; /* LD_TYPE */
+			l_sg.subdc = NIX_SUBDC_SG;
+			slist++;
+		}
+		m_next = m->next;
+		m = m_next;
+	} while (nb_segs);
+
+	/* Add remaining bytes of data to last seg */
+	if (len) {
+		uint8_t shft = (l_sg.subdc == NIX_SUBDC_SG) ? ((l_sg.segs - 1) << 4) : 0;
+		dlen = ((l_sg.u >> shft) & 0xFFFFULL) + len;
+		l_sg.u = l_sg.u & ~(0xFFFFULL << shft);
+		l_sg.u |= dlen << shft;
+	}
+	/* Write the last subdc out */
+	sg->u = l_sg.u;
+
+	segdw = (uint64_t *)slist - cmd;
+	/* Roundup extra dwords to multiple of 2 */
+	segdw = (segdw >> 1) + (segdw & 0x1);
+	return segdw;
+}
+
+static __rte_always_inline uint16_t
+cn20k_nix_inj_pkts(struct rte_security_session **sess, struct cnxk_ethdev_inj_cfg *inj_cfg,
+		   struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+	uintptr_t c_lbase = inj_cfg->lmt_base;
+	struct cn20k_sec_sess_priv sess_priv;
+	uint64_t sa_base = inj_cfg->sa_base;
+	uint16_t c_lmt_id, burst, left, i;
+	uintptr_t cptres, rxphdr, dptr;
+	struct rte_mbuf *m, *last;
+	uint64_t sa, w0, gthr_sz;
+	uint8_t lnum, shft, loff;
+	uint64x2_t cmd01, cmd23;
+	uint64_t ucode_cmd[4];
+	rte_iova_t c_io_addr;
+	uint16_t segdw, segs;
+	uint64_t *laddr;
+
+	/* Get LMT base address and LMT ID as lcore id */
+	ROC_LMT_CPT_BASE_ID_GET(c_lbase, c_lmt_id);
+	c_io_addr = inj_cfg->io_addr;
+
+	left = nb_pkts;
+again:
+	burst = left > 32 ? 32 : left;
+
+	lnum = 0;
+	loff = 0;
+	shft = 16;
+
+	for (i = 0; i < burst; i++) {
+		m = tx_pkts[i];
+		sess_priv.u64 = sess[i]->fast_mdata;
+		last = rte_pktmbuf_lastseg(m);
+
+		cptres = rte_pktmbuf_mtod_offset(last, uintptr_t, last->data_len);
+		cptres += BIT_ULL(7);
+		cptres = (cptres - 1) & ~(BIT_ULL(7) - 1);
+		segs = m->nb_segs;
+
+		if (segs > 1) {
+			/* Pointer to WQE header */
+			/* Reserve 8 Dwords of WQE Hdr + Rx Parse Hdr */
+			rxphdr = cptres + 8;
+			dptr = rxphdr + 7 * 8;
+			/* Prepare Multiseg SG list */
+			segdw = cn20k_nix_rx_inj_prepare_mseg(m, (uint64_t *)(dptr));
+			*(uint64_t *)rxphdr = (uint64_t)(segdw - 1) << 12;
+			cptres += 64 + segdw * 16;
+			gthr_sz = (segs % 3) == 0 ? (segs / 3) : (segs / 3 + 1);
+			ucode_cmd[1] = dptr | (gthr_sz << 60);
+		} else {
+			dptr = (uint64_t)rte_pktmbuf_iova(m);
+			ucode_cmd[1] = dptr;
+		}
+
+		/* Prepare CPT instruction */
+		/* CPT word 0 and 1 */
+		cmd01 = vdupq_n_u64(0);
+		w0 = inj_cfg->cmd_w0 | ((uint64_t)m->l2_len - 2) << 24 | (uint64_t)m->l2_len << 16;
+		cmd01 = vsetq_lane_u64(w0, cmd01, 0);
+		cmd01 = vsetq_lane_u64(cptres, cmd01, 1);
+
+		/* CPT word 2 and 3 */
+		cmd23 = vdupq_n_u64(0);
+		/* Set PF func */
+		w0 &= 0xFFFF000000000000UL;
+		cmd23 = vsetq_lane_u64(w0, cmd23, 0);
+		cmd23 = vsetq_lane_u64(((uint64_t)m + sizeof(struct rte_mbuf)) | 1, cmd23, 1);
+
+		sa_base &= ~0xFFFFUL;
+		sa = (uintptr_t)roc_nix_inl_ow_ipsec_inb_sa(sa_base, sess_priv.sa_idx);
+		ucode_cmd[0] = (ROC_IE_OW_MAJOR_OP_PROCESS_INBOUND_IPSEC << 48 | 1UL << 54 |
+				((uint64_t)sess_priv.chksum) << 32 | (1ULL << 34) | m->pkt_len);
+
+		ucode_cmd[2] = 0;
+		ucode_cmd[3] = (ROC_CPT_DFLT_ENG_GRP_SE << 61 | 1UL << 60 | sa);
+
+		/* Move to our line */
+		laddr = LMT_OFF(c_lbase, lnum, loff ? 64 : 0);
+
+		/* Write CPT instruction to lmt line */
+		vst1q_u64(laddr, cmd01);
+		vst1q_u64((laddr + 2), cmd23);
+
+		*(__uint128_t *)(laddr + 4) = *(__uint128_t *)ucode_cmd;
+		*(__uint128_t *)(laddr + 6) = *(__uint128_t *)(ucode_cmd + 2);
+
+		loff = !loff;
+		lnum = lnum + (loff ? 0 : 1);
+		shft = shft + (loff ? 0 : 3);
+	}
+
+	left -= burst;
+	tx_pkts += burst;
+	sess += burst;
+
+	cn20k_nix_sec_steorl(c_io_addr, c_lmt_id, lnum, loff, shft);
+
+	rte_io_wmb();
+	if (left)
+		goto again;
+
+	return nb_pkts;
+}
+
 static __rte_always_inline void
 nix_sec_meta_to_mbuf(uintptr_t inb_sa, uintptr_t cpth, struct rte_mbuf **inner, uint64_t *ol_flags,
 		     const uint16_t flags, uint64x2_t *rearm, uint64_t buf_sz)
@@ -1741,6 +1904,17 @@ cn20k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts, c
 	return 0;
 }
 
+static __rte_always_inline uint16_t
+cn20k_nix_inj_pkts(struct rte_security_session **sess, struct cnxk_ethdev_inj_cfg *inj_cfg,
+		   struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+	RTE_SET_USED(sess);
+	RTE_SET_USED(inj_cfg);
+	RTE_SET_USED(tx_pkts);
+	RTE_SET_USED(nb_pkts);
+	return 0;
+}
+
 #endif
 
 #define RSS_F	  NIX_RX_OFFLOAD_RSS_F
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 27+ messages in thread

* [PATCH 3/8] common/cnxk: update platform features
  2026-02-19  9:08 [PATCH 1/8] net/cnxk: support of plain packet reassembly Rahul Bhansali
  2026-02-19  9:08 ` [PATCH 2/8] net/cnxk: support IPsec Rx inject for cn20k Rahul Bhansali
@ 2026-02-19  9:08 ` Rahul Bhansali
  2026-02-19  9:08 ` [PATCH 4/8] common/cnxk: add RQ PB and WQE cache config Rahul Bhansali
                   ` (7 subsequent siblings)
  9 siblings, 0 replies; 27+ messages in thread
From: Rahul Bhansali @ 2026-02-19  9:08 UTC (permalink / raw)
  To: dev, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
	Satha Rao, Harman Kalra
  Cc: jerinj, Rahul Bhansali

Adds cnf20ka platform check for plain packet reassembly
features.

Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
---
 drivers/common/cnxk/roc_features.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/common/cnxk/roc_features.h b/drivers/common/cnxk/roc_features.h
index 3c34041d76..57a51c4db3 100644
--- a/drivers/common/cnxk/roc_features.h
+++ b/drivers/common/cnxk/roc_features.h
@@ -117,7 +117,7 @@ roc_feature_nix_has_inl_profile(void)
 static inline bool
 roc_feature_nix_has_plain_pkt_reassembly(void)
 {
-	return roc_model_is_cn20k();
+	return roc_model_is_cn20k() && !roc_model_is_cnf20ka();
 }
 
 static inline bool
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 27+ messages in thread

* [PATCH 4/8] common/cnxk: add RQ PB and WQE cache config
  2026-02-19  9:08 [PATCH 1/8] net/cnxk: support of plain packet reassembly Rahul Bhansali
  2026-02-19  9:08 ` [PATCH 2/8] net/cnxk: support IPsec Rx inject for cn20k Rahul Bhansali
  2026-02-19  9:08 ` [PATCH 3/8] common/cnxk: update platform features Rahul Bhansali
@ 2026-02-19  9:08 ` Rahul Bhansali
  2026-02-19  9:08 ` [PATCH 5/8] net/cnxk: config RQ PB and WQE caching Rahul Bhansali
                   ` (6 subsequent siblings)
  9 siblings, 0 replies; 27+ messages in thread
From: Rahul Bhansali @ 2026-02-19  9:08 UTC (permalink / raw)
  To: dev, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
	Satha Rao, Harman Kalra
  Cc: jerinj, Rahul Bhansali

Adds RQ's PB (Packet buffer) and WQE cache configuration
options.

Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
---
 drivers/common/cnxk/roc_nix.h       | 14 ++++++++++++++
 drivers/common/cnxk/roc_nix_inl.c   |  2 ++
 drivers/common/cnxk/roc_nix_queue.c | 16 ++++++++--------
 3 files changed, 24 insertions(+), 8 deletions(-)

diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index ffa1a706f9..7bc3e1f5c6 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -34,6 +34,16 @@
 #define ROC_NIX_LSO_FORMAT_IDX_TSOV6 1
 #define ROC_NIX_LSO_FORMAT_IDX_IPV4  2
 
+#define ROC_NIX_RQ_MAX_PB_CACHING_VAL 3
+
+/* First aligned cache block is allocated into the LLC.
+ * All remaining cache blocks are not allocated.
+ */
+#define ROC_NIX_RQ_DEFAULT_PB_CACHING 2
+
+/* Writes of WQE data are allocated into LLC. */
+#define ROC_NIX_RQ_DEFAULT_WQE_CACHING 1
+
 enum roc_nix_rss_reta_sz {
 	ROC_NIX_RSS_RETA_SZ_64 = 64,
 	ROC_NIX_RSS_RETA_SZ_128 = 128,
@@ -448,6 +458,10 @@ struct roc_nix_rq {
 	bool spb_drop_ena;
 	/* XQE drop enable */
 	bool xqe_drop_ena;
+	/* RQ PB caching */
+	uint8_t pb_caching;
+	/* RQ WQE caching */
+	uint8_t wqe_caching;
 	/* End of Input parameters */
 	struct roc_nix *roc_nix;
 	uint64_t meta_aura_handle;
diff --git a/drivers/common/cnxk/roc_nix_inl.c b/drivers/common/cnxk/roc_nix_inl.c
index a21c40acf1..911c349604 100644
--- a/drivers/common/cnxk/roc_nix_inl.c
+++ b/drivers/common/cnxk/roc_nix_inl.c
@@ -1838,6 +1838,8 @@ roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq, bool enable)
 	inl_rq->spb_ena = rq->spb_ena;
 	inl_rq->spb_aura_handle = rq->spb_aura_handle;
 	inl_rq->spb_size = rq->spb_size;
+	inl_rq->pb_caching = rq->pb_caching;
+	inl_rq->wqe_caching = rq->wqe_caching;
 
 	if (roc_errata_nix_no_meta_aura()) {
 		uint64_t aura_limit =
diff --git a/drivers/common/cnxk/roc_nix_queue.c b/drivers/common/cnxk/roc_nix_queue.c
index ab3a71ec60..ef9b651022 100644
--- a/drivers/common/cnxk/roc_nix_queue.c
+++ b/drivers/common/cnxk/roc_nix_queue.c
@@ -499,7 +499,7 @@ nix_rq_cn9k_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints,
 		aq->rq.sso_grp = rq->hwgrp;
 		aq->rq.ena_wqwd = 1;
 		aq->rq.wqe_skip = rq->wqe_skip;
-		aq->rq.wqe_caching = 1;
+		aq->rq.wqe_caching = rq->wqe_caching;
 
 		aq->rq.good_utag = rq->tag_mask >> 24;
 		aq->rq.bad_utag = rq->tag_mask >> 24;
@@ -530,7 +530,7 @@ nix_rq_cn9k_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints,
 	aq->rq.lpb_sizem1 = rq->lpb_size / 8;
 	aq->rq.lpb_sizem1 -= 1; /* Expressed in size minus one */
 	aq->rq.ena = ena;
-	aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */
+	aq->rq.pb_caching = rq->pb_caching;
 	aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */
 	aq->rq.rq_int_ena = 0;
 	/* Many to one reduction */
@@ -616,7 +616,7 @@ nix_rq_cn10k_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cf
 		aq->rq.sso_grp = rq->hwgrp;
 		aq->rq.ena_wqwd = 1;
 		aq->rq.wqe_skip = rq->wqe_skip;
-		aq->rq.wqe_caching = 1;
+		aq->rq.wqe_caching = rq->wqe_caching;
 
 		aq->rq.xqe_drop_ena = 0;
 		aq->rq.good_utag = rq->tag_mask >> 24;
@@ -647,7 +647,7 @@ nix_rq_cn10k_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cf
 		aq->rq.ipsecd_drop_en = 1;
 		aq->rq.ena_wqwd = 1;
 		aq->rq.wqe_skip = rq->wqe_skip;
-		aq->rq.wqe_caching = 1;
+		aq->rq.wqe_caching = rq->wqe_caching;
 	}
 
 	aq->rq.lpb_aura = roc_npa_aura_handle_to_aura(rq->aura_handle);
@@ -683,7 +683,7 @@ nix_rq_cn10k_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cf
 		aq->rq.spb_ena = 0;
 	}
 
-	aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */
+	aq->rq.pb_caching = rq->pb_caching;
 	aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */
 	aq->rq.rq_int_ena = 0;
 	/* Many to one reduction */
@@ -797,7 +797,7 @@ nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg, boo
 		aq->rq.sso_grp = rq->hwgrp;
 		aq->rq.ena_wqwd = 1;
 		aq->rq.wqe_skip = rq->wqe_skip;
-		aq->rq.wqe_caching = 1;
+		aq->rq.wqe_caching = rq->wqe_caching;
 
 		aq->rq.good_utag = rq->tag_mask >> 24;
 		aq->rq.bad_utag = rq->tag_mask >> 24;
@@ -816,7 +816,7 @@ nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg, boo
 		aq->rq.ipsecd_drop_en = 1;
 		aq->rq.ena_wqwd = 1;
 		aq->rq.wqe_skip = rq->wqe_skip;
-		aq->rq.wqe_caching = 1;
+		aq->rq.wqe_caching = rq->wqe_caching;
 	}
 
 	aq->rq.lpb_aura = roc_npa_aura_handle_to_aura(rq->aura_handle);
@@ -852,7 +852,7 @@ nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg, boo
 		aq->rq.spb_ena = 0;
 	}
 
-	aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */
+	aq->rq.pb_caching = rq->pb_caching;
 	aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */
 	aq->rq.rq_int_ena = 0;
 	/* Many to one reduction */
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 27+ messages in thread

* [PATCH 5/8] net/cnxk: config RQ PB and WQE caching
  2026-02-19  9:08 [PATCH 1/8] net/cnxk: support of plain packet reassembly Rahul Bhansali
                   ` (2 preceding siblings ...)
  2026-02-19  9:08 ` [PATCH 4/8] common/cnxk: add RQ PB and WQE cache config Rahul Bhansali
@ 2026-02-19  9:08 ` Rahul Bhansali
  2026-02-19  9:08 ` [PATCH 6/8] net/cnxk: update SA context push size Rahul Bhansali
                   ` (5 subsequent siblings)
  9 siblings, 0 replies; 27+ messages in thread
From: Rahul Bhansali @ 2026-02-19  9:08 UTC (permalink / raw)
  To: dev, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
	Satha Rao, Harman Kalra
  Cc: jerinj, Rahul Bhansali

Config RQ's PB (packet buffer) and WQE caching to default
values.

Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
---
 drivers/net/cnxk/cnxk_eswitch.c | 2 ++
 drivers/net/cnxk/cnxk_ethdev.c  | 2 ++
 2 files changed, 4 insertions(+)

diff --git a/drivers/net/cnxk/cnxk_eswitch.c b/drivers/net/cnxk/cnxk_eswitch.c
index 6b1bfdd476..e45c7dfd07 100644
--- a/drivers/net/cnxk/cnxk_eswitch.c
+++ b/drivers/net/cnxk/cnxk_eswitch.c
@@ -389,6 +389,8 @@ cnxk_eswitch_rxq_setup(struct cnxk_eswitch_dev *eswitch_dev, uint16_t qid, uint1
 	rq->aura_handle = lpb_pool->pool_id;
 	rq->flow_tag_width = 32;
 	rq->sso_ena = false;
+	rq->pb_caching = ROC_NIX_RQ_DEFAULT_PB_CACHING;
+	rq->wqe_caching = ROC_NIX_RQ_DEFAULT_WQE_CACHING;
 
 	/* Calculate first mbuf skip */
 	first_skip = (sizeof(struct rte_mbuf));
diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index c979454f37..c5daf0ca10 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -959,6 +959,8 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
 	rq->aura_handle = lpb_pool->pool_id;
 	rq->flow_tag_width = 32;
 	rq->sso_ena = false;
+	rq->pb_caching = ROC_NIX_RQ_DEFAULT_PB_CACHING;
+	rq->wqe_caching = ROC_NIX_RQ_DEFAULT_WQE_CACHING;
 
 	/* Calculate first mbuf skip */
 	first_skip = (sizeof(struct rte_mbuf));
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 27+ messages in thread

* [PATCH 6/8] net/cnxk: update SA context push size
  2026-02-19  9:08 [PATCH 1/8] net/cnxk: support of plain packet reassembly Rahul Bhansali
                   ` (3 preceding siblings ...)
  2026-02-19  9:08 ` [PATCH 5/8] net/cnxk: config RQ PB and WQE caching Rahul Bhansali
@ 2026-02-19  9:08 ` Rahul Bhansali
  2026-02-19  9:08 ` [PATCH 7/8] net/cnxk: flow rule update for non-in-place IPsec Rahul Bhansali
                   ` (4 subsequent siblings)
  9 siblings, 0 replies; 27+ messages in thread
From: Rahul Bhansali @ 2026-02-19  9:08 UTC (permalink / raw)
  To: dev, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
	Satha Rao, Harman Kalra
  Cc: jerinj, Rahul Bhansali

Reduce SA context push size to 128 byte for AES_GCM encryption
for CPT performance improvement on CN20k platform.
Also, corrects few macros for cn20k specific.

Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
---
 drivers/net/cnxk/cn20k_ethdev_sec.c | 96 ++++++++++++++++++++++++++---
 1 file changed, 87 insertions(+), 9 deletions(-)

diff --git a/drivers/net/cnxk/cn20k_ethdev_sec.c b/drivers/net/cnxk/cn20k_ethdev_sec.c
index e406f0e879..5553aed1b1 100644
--- a/drivers/net/cnxk/cn20k_ethdev_sec.c
+++ b/drivers/net/cnxk/cn20k_ethdev_sec.c
@@ -655,16 +655,71 @@ outb_dbg_iv_update(struct roc_ow_ipsec_outb_sa *outb_sa, const char *__iv_str)
 	}
 
 	/* Update source of IV */
-	outb_sa->w2.s.iv_src = ROC_IE_OT_SA_IV_SRC_FROM_SA;
+	outb_sa->w2.s.iv_src = ROC_IE_OW_SA_IV_SRC_FROM_SA;
 	free(iv_str);
 }
 
+static void
+cn20k_eth_sec_inb_sa_misc_fill(struct roc_ow_ipsec_inb_sa *sa,
+			       struct rte_security_ipsec_xform *ipsec_xfrm)
+{
+	struct roc_ow_ipsec_inb_ctx_update_reg *ctx;
+	size_t offset;
+
+	if (sa->w2.s.enc_type != ROC_IE_SA_ENC_AES_GCM)
+		return;
+
+	/* Update ctx push size for AES GCM */
+	offset = offsetof(struct roc_ow_ipsec_inb_sa, hmac_opad_ipad);
+	ctx = (struct roc_ow_ipsec_inb_ctx_update_reg *)((uint8_t *)sa + offset);
+	sa->w0.s.hw_ctx_off = offset / 8;
+	sa->w0.s.ctx_push_size = sa->w0.s.hw_ctx_off + 1;
+
+	if (ipsec_xfrm->life.bytes_soft_limit)
+		ctx->soft_life = ipsec_xfrm->life.bytes_soft_limit + 1;
+
+	if (ipsec_xfrm->life.packets_soft_limit)
+		ctx->soft_life = ipsec_xfrm->life.packets_soft_limit + 1;
+
+	if (ipsec_xfrm->life.bytes_hard_limit)
+		ctx->hard_life = ipsec_xfrm->life.bytes_hard_limit + 1;
+
+	if (ipsec_xfrm->life.packets_hard_limit)
+		ctx->hard_life = ipsec_xfrm->life.packets_hard_limit + 1;
+}
+
 static int
 cn20k_eth_sec_outb_sa_misc_fill(struct roc_nix *roc_nix, struct roc_ow_ipsec_outb_sa *sa,
 				void *sa_cptr, struct rte_security_ipsec_xform *ipsec_xfrm,
 				uint32_t sa_idx)
 {
+	struct roc_ow_ipsec_outb_ctx_update_reg *ctx;
 	uint64_t *ring_base, ring_addr;
+	size_t offset;
+
+	if (sa->w2.s.enc_type == ROC_IE_SA_ENC_AES_GCM) {
+		offset = offsetof(struct roc_ow_ipsec_outb_sa, hmac_opad_ipad);
+		ctx = (struct roc_ow_ipsec_outb_ctx_update_reg *)((uint8_t *)sa + offset);
+		sa->w0.s.hw_ctx_off = offset / 8;
+		sa->w0.s.ctx_push_size = sa->w0.s.hw_ctx_off + 1;
+
+		if (ipsec_xfrm->esn.value)
+			ctx->esn_val = ipsec_xfrm->esn.value - 1;
+
+		if (ipsec_xfrm->life.bytes_soft_limit)
+			ctx->soft_life = ipsec_xfrm->life.bytes_soft_limit + 1;
+
+		if (ipsec_xfrm->life.packets_soft_limit)
+			ctx->soft_life = ipsec_xfrm->life.packets_soft_limit + 1;
+
+		if (ipsec_xfrm->life.bytes_hard_limit)
+			ctx->hard_life = ipsec_xfrm->life.bytes_hard_limit + 1;
+
+		if (ipsec_xfrm->life.packets_hard_limit)
+			ctx->hard_life = ipsec_xfrm->life.packets_hard_limit + 1;
+	} else {
+		ctx = &sa->ctx;
+	}
 
 	if (roc_nix_inl_is_cq_ena(roc_nix))
 		goto done;
@@ -675,8 +730,8 @@ cn20k_eth_sec_outb_sa_misc_fill(struct roc_nix *roc_nix, struct roc_ow_ipsec_out
 			return -ENOTSUP;
 
 		ring_addr = ring_base[sa_idx >> ROC_NIX_SOFT_EXP_ERR_RING_MAX_ENTRY_LOG2];
-		sa->ctx.err_ctl.s.mode = ROC_IE_OT_ERR_CTL_MODE_RING;
-		sa->ctx.err_ctl.s.address = ring_addr >> 3;
+		ctx->err_ctl.s.mode = ROC_IE_OW_ERR_CTL_MODE_RING;
+		ctx->err_ctl.s.address = ring_addr >> 3;
 		sa->w0.s.ctx_id = ((uintptr_t)sa_cptr >> 51) & 0x1ff;
 	}
 done:
@@ -751,7 +806,7 @@ cn20k_eth_sec_session_create(void *device, struct rte_security_session_conf *con
 		uintptr_t sa;
 
 		PLT_STATIC_ASSERT(sizeof(struct cn20k_inb_priv_data) <
-				  ROC_NIX_INL_OT_IPSEC_INB_SW_RSVD);
+				  ROC_NIX_INL_OW_IPSEC_INB_SW_RSVD);
 
 		spi_mask = roc_nix_inl_inb_spi_range(nix, inl_dev, NULL, NULL);
 
@@ -796,6 +851,8 @@ cn20k_eth_sec_session_create(void *device, struct rte_security_session_conf *con
 			goto err;
 		}
 
+		cn20k_eth_sec_inb_sa_misc_fill(inb_sa_dptr, ipsec);
+
 		inb_priv = roc_nix_inl_ow_ipsec_inb_sa_sw_rsvd(inb_sa);
 		/* Back pointer to get eth_sec */
 		inb_priv->eth_sec = eth_sec;
@@ -856,7 +913,7 @@ cn20k_eth_sec_session_create(void *device, struct rte_security_session_conf *con
 		uint32_t sa_idx;
 
 		PLT_STATIC_ASSERT(sizeof(struct cn20k_outb_priv_data) <
-				  ROC_NIX_INL_OT_IPSEC_OUTB_SW_RSVD);
+				  ROC_NIX_INL_OW_IPSEC_OUTB_SW_RSVD);
 
 		/* Alloc an sa index */
 		rc = cnxk_eth_outb_sa_idx_get(dev, &sa_idx, ipsec->spi);
@@ -1138,6 +1195,7 @@ cn20k_eth_sec_session_stats_get(void *device, struct rte_security_session *sess,
 	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
 	struct cnxk_eth_sec_sess *eth_sec;
+	size_t offset;
 	int rc;
 
 	eth_sec = cnxk_eth_sec_sess_get_by_sess(dev, sess);
@@ -1152,11 +1210,31 @@ cn20k_eth_sec_session_stats_get(void *device, struct rte_security_session *sess,
 	stats->protocol = RTE_SECURITY_PROTOCOL_IPSEC;
 
 	if (eth_sec->inb) {
-		stats->ipsec.ipackets = ((struct roc_ow_ipsec_inb_sa *)eth_sec->sa)->ctx.mib_pkts;
-		stats->ipsec.ibytes = ((struct roc_ow_ipsec_inb_sa *)eth_sec->sa)->ctx.mib_octs;
+		struct roc_ow_ipsec_inb_sa *sa = (struct roc_ow_ipsec_inb_sa *)eth_sec->sa;
+		struct roc_ow_ipsec_inb_ctx_update_reg *ctx;
+
+		if (sa->w2.s.enc_type == ROC_IE_SA_ENC_AES_GCM) {
+			offset = offsetof(struct roc_ow_ipsec_inb_sa, hmac_opad_ipad);
+			ctx = (struct roc_ow_ipsec_inb_ctx_update_reg *)((uint8_t *)sa + offset);
+		} else {
+			ctx = &sa->ctx;
+		}
+
+		stats->ipsec.ipackets = ctx->mib_pkts;
+		stats->ipsec.ibytes = ctx->mib_octs;
 	} else {
-		stats->ipsec.opackets = ((struct roc_ow_ipsec_outb_sa *)eth_sec->sa)->ctx.mib_pkts;
-		stats->ipsec.obytes = ((struct roc_ow_ipsec_outb_sa *)eth_sec->sa)->ctx.mib_octs;
+		struct roc_ow_ipsec_outb_sa *sa = (struct roc_ow_ipsec_outb_sa *)eth_sec->sa;
+		struct roc_ow_ipsec_outb_ctx_update_reg *ctx;
+
+		if (sa->w2.s.enc_type == ROC_IE_SA_ENC_AES_GCM) {
+			offset = offsetof(struct roc_ow_ipsec_outb_sa, hmac_opad_ipad);
+			ctx = (struct roc_ow_ipsec_outb_ctx_update_reg *)((uint8_t *)sa + offset);
+		} else {
+			ctx = &sa->ctx;
+		}
+
+		stats->ipsec.opackets = ctx->mib_pkts;
+		stats->ipsec.obytes = ctx->mib_octs;
 	}
 
 	return 0;
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 27+ messages in thread

* [PATCH 7/8] net/cnxk: flow rule update for non-in-place IPsec
  2026-02-19  9:08 [PATCH 1/8] net/cnxk: support of plain packet reassembly Rahul Bhansali
                   ` (4 preceding siblings ...)
  2026-02-19  9:08 ` [PATCH 6/8] net/cnxk: update SA context push size Rahul Bhansali
@ 2026-02-19  9:08 ` Rahul Bhansali
  2026-02-19  9:08 ` [PATCH 8/8] common/cnxk: enable CPT CQ for inline IPSec inbound Rahul Bhansali
                   ` (3 subsequent siblings)
  9 siblings, 0 replies; 27+ messages in thread
From: Rahul Bhansali @ 2026-02-19  9:08 UTC (permalink / raw)
  To: dev, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
	Satha Rao, Harman Kalra
  Cc: jerinj, Rahul Bhansali

Updates flow rule based of inbound non-inplace (Out-Of-Place)
config of IPsec session.

Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
---
 drivers/net/cnxk/cn9k_flow.c        |  2 +-
 drivers/net/cnxk/cnxk_ethdev.h      |  6 +++---
 drivers/net/cnxk/cnxk_ethdev_sec.c  |  3 +--
 drivers/net/cnxk/cnxk_flow.c        | 31 +++++++++++++++++++++--------
 drivers/net/cnxk/cnxk_flow.h        |  8 ++++++--
 drivers/net/cnxk/cnxk_flow_common.c | 12 ++++++++++-
 drivers/net/cnxk/cnxk_rep_flow.c    |  4 ++--
 drivers/net/cnxk/rte_pmd_cnxk.h     |  1 +
 8 files changed, 48 insertions(+), 19 deletions(-)

diff --git a/drivers/net/cnxk/cn9k_flow.c b/drivers/net/cnxk/cn9k_flow.c
index ae4629ea69..c39564201d 100644
--- a/drivers/net/cnxk/cn9k_flow.c
+++ b/drivers/net/cnxk/cn9k_flow.c
@@ -18,7 +18,7 @@ cn9k_flow_create(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr,
 	int vtag_actions = 0;
 	int mark_actions;
 
-	flow = cnxk_flow_create_common(eth_dev, attr, pattern, actions, error, false);
+	flow = cnxk_flow_create_common(eth_dev, attr, pattern, actions, error, false, 0);
 	if (!flow)
 		return NULL;
 
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index 1b63b02ad8..e3edf39a5c 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -747,9 +747,9 @@ int cnxk_nix_inline_inbound_mode_setup(struct cnxk_eth_dev *dev);
 
 struct cnxk_eth_sec_sess *cnxk_eth_sec_sess_get_by_sa_idx(struct cnxk_eth_dev *dev,
 							  uint32_t sa_idx, bool inb);
-struct cnxk_eth_sec_sess *
-cnxk_eth_sec_sess_get_by_sess(struct cnxk_eth_dev *dev,
-			      struct rte_security_session *sess);
+struct cnxk_eth_sec_sess *cnxk_eth_sec_sess_get_by_sess(struct cnxk_eth_dev *dev,
+							const struct rte_security_session *sess);
+
 int cnxk_nix_inl_meta_pool_cb(uint64_t *aura_handle, uintptr_t *mpool, uint32_t buf_sz,
 			      uint32_t nb_bufs, bool destroy, const char *mempool_name);
 int cnxk_nix_inl_custom_meta_pool_cb(uintptr_t pmpool, uintptr_t *mpool, const char *mempool_name,
diff --git a/drivers/net/cnxk/cnxk_ethdev_sec.c b/drivers/net/cnxk/cnxk_ethdev_sec.c
index 59a00408ad..abb50d32de 100644
--- a/drivers/net/cnxk/cnxk_ethdev_sec.c
+++ b/drivers/net/cnxk/cnxk_ethdev_sec.c
@@ -287,8 +287,7 @@ cnxk_eth_sec_sess_get_by_sa_idx(struct cnxk_eth_dev *dev, uint32_t sa_idx, bool
 }
 
 struct cnxk_eth_sec_sess *
-cnxk_eth_sec_sess_get_by_sess(struct cnxk_eth_dev *dev,
-			      struct rte_security_session *sess)
+cnxk_eth_sec_sess_get_by_sess(struct cnxk_eth_dev *dev, const struct rte_security_session *sess)
 {
 	struct cnxk_eth_sec_sess *eth_sec = NULL;
 
diff --git a/drivers/net/cnxk/cnxk_flow.c b/drivers/net/cnxk/cnxk_flow.c
index 33501310e0..c1c48eb7ab 100644
--- a/drivers/net/cnxk/cnxk_flow.c
+++ b/drivers/net/cnxk/cnxk_flow.c
@@ -465,7 +465,7 @@ cnxk_map_actions(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr,
 		 const struct rte_flow_action actions[], struct roc_npc_action in_actions[],
 		 struct roc_npc_action_sample *in_sample_actions, uint32_t *flowkey_cfg,
 		 uint16_t *dst_pf_func, uint64_t *npc_default_action, uint8_t has_tunnel_pattern,
-		 bool is_rep, uint8_t rep_pattern, uint64_t *free_allocs)
+		 bool is_rep, uint8_t rep_pattern, uint64_t *free_allocs, uint32_t flow_flags)
 {
 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
 	const struct rte_flow_action_queue *act_q = NULL;
@@ -614,6 +614,8 @@ cnxk_map_actions(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr,
 		case RTE_FLOW_ACTION_TYPE_SECURITY:
 			in_actions[i].type = ROC_NPC_ACTION_TYPE_SEC;
 			in_actions[i].conf = actions->conf;
+			in_actions[i].is_non_inp = flow_flags & CNXK_FLOW_NON_INPLACE;
+			in_actions[i].no_sec_action = flow_flags & CNXK_FLOW_NO_SEC_ACTION;
 			break;
 		case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
 			in_actions[i].type = ROC_NPC_ACTION_TYPE_VLAN_STRIP;
@@ -803,7 +805,8 @@ cnxk_map_flow_data(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr
 		   struct roc_npc_attr *in_attr, struct roc_npc_item_info in_pattern[],
 		   struct roc_npc_action in_actions[],
 		   struct roc_npc_action_sample *in_sample_actions, uint32_t *flowkey_cfg,
-		   uint16_t *dst_pf_func, uint64_t *def_action, bool is_rep, uint64_t *free_allocs)
+		   uint16_t *dst_pf_func, uint64_t *def_action, bool is_rep, uint64_t *free_allocs,
+		   uint32_t flow_flags)
 {
 	uint8_t has_tunnel_pattern = 0, rep_pattern = 0;
 	int rc;
@@ -842,14 +845,14 @@ cnxk_map_flow_data(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr
 
 	return cnxk_map_actions(eth_dev, attr, actions, in_actions, in_sample_actions, flowkey_cfg,
 				dst_pf_func, def_action, has_tunnel_pattern, is_rep, rep_pattern,
-				free_allocs);
+				free_allocs, flow_flags);
 }
 
 int
 cnxk_flow_validate_common(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr,
 			  const struct rte_flow_item pattern[],
 			  const struct rte_flow_action actions[], struct rte_flow_error *error,
-			  bool is_rep)
+			  bool is_rep, uint32_t flow_flags)
 {
 	struct roc_npc_item_info in_pattern[ROC_NPC_ITEM_TYPE_END + 1];
 	struct roc_npc_action in_actions[ROC_NPC_MAX_ACTION_COUNT];
@@ -891,7 +894,7 @@ cnxk_flow_validate_common(struct rte_eth_dev *eth_dev, const struct rte_flow_att
 	}
 	rc = cnxk_map_flow_data(eth_dev, attr, pattern, actions, &in_attr, in_pattern, in_actions,
 				&in_sample_action, &flowkey_cfg, &dst_pf_func, &npc_default_action,
-				is_rep, free_allocs);
+				is_rep, free_allocs, flow_flags);
 	if (rc) {
 		rte_flow_error_set(error, 0, RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
 				   "Failed to map flow data");
@@ -919,14 +922,26 @@ cnxk_flow_validate(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr
 		   const struct rte_flow_item pattern[], const struct rte_flow_action actions[],
 		   struct rte_flow_error *error)
 {
-	return cnxk_flow_validate_common(eth_dev, attr, pattern, actions, error, false);
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	struct cnxk_eth_sec_sess *eth_sec = NULL;
+	uint32_t flow_flags = 0;
+
+	if (actions[0].type == RTE_FLOW_ACTION_TYPE_SECURITY) {
+		eth_sec = cnxk_eth_sec_sess_get_by_sess(dev, actions[0].conf);
+		if (eth_sec != NULL) {
+			flow_flags = eth_sec->inb_oop ? CNXK_FLOW_NON_INPLACE : 0;
+			flow_flags |= CNXK_FLOW_NO_SEC_ACTION;
+		}
+	}
+
+	return cnxk_flow_validate_common(eth_dev, attr, pattern, actions, error, false, flow_flags);
 }
 
 struct roc_npc_flow *
 cnxk_flow_create_common(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr,
 			const struct rte_flow_item pattern[],
 			const struct rte_flow_action actions[], struct rte_flow_error *error,
-			bool is_rep)
+			bool is_rep, uint32_t flow_flags)
 {
 	struct roc_npc_item_info in_pattern[ROC_NPC_ITEM_TYPE_END + 1] = {0};
 	struct roc_npc_action in_actions[ROC_NPC_MAX_ACTION_COUNT] = {0};
@@ -962,7 +977,7 @@ cnxk_flow_create_common(struct rte_eth_dev *eth_dev, const struct rte_flow_attr
 	memset(&in_attr, 0, sizeof(struct roc_npc_attr));
 	rc = cnxk_map_flow_data(eth_dev, attr, pattern, actions, &in_attr, in_pattern, in_actions,
 				&in_sample_action, &npc->flowkey_cfg_state, &dst_pf_func,
-				&npc_default_action, is_rep, free_allocs);
+				&npc_default_action, is_rep, free_allocs, flow_flags);
 	if (rc) {
 		rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
 				   "Failed to map flow data");
diff --git a/drivers/net/cnxk/cnxk_flow.h b/drivers/net/cnxk/cnxk_flow.h
index 80b8d2c36a..2986ea81d1 100644
--- a/drivers/net/cnxk/cnxk_flow.h
+++ b/drivers/net/cnxk/cnxk_flow.h
@@ -20,6 +20,9 @@ struct cnxk_rte_flow_action_info {
 	uint16_t conf_size;
 };
 
+#define CNXK_FLOW_NO_SEC_ACTION BIT(0)
+#define CNXK_FLOW_NON_INPLACE	BIT(1)
+
 extern const struct cnxk_rte_flow_term_info term[];
 
 int cnxk_flow_destroy(struct rte_eth_dev *dev, struct roc_npc_flow *flow,
@@ -29,11 +32,12 @@ struct roc_npc_flow *cnxk_flow_create_common(struct rte_eth_dev *eth_dev,
 					     const struct rte_flow_attr *attr,
 					     const struct rte_flow_item pattern[],
 					     const struct rte_flow_action actions[],
-					     struct rte_flow_error *error, bool is_rep);
+					     struct rte_flow_error *error, bool is_rep,
+					     uint32_t flow_flags);
 int cnxk_flow_validate_common(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr,
 			      const struct rte_flow_item pattern[],
 			      const struct rte_flow_action actions[], struct rte_flow_error *error,
-			      bool is_rep);
+			      bool is_rep, uint32_t flow_flags);
 int cnxk_flow_destroy_common(struct rte_eth_dev *eth_dev, struct roc_npc_flow *flow,
 			     struct rte_flow_error *error, bool is_rep);
 int cnxk_flow_flush_common(struct rte_eth_dev *eth_dev, struct rte_flow_error *error, bool is_rep);
diff --git a/drivers/net/cnxk/cnxk_flow_common.c b/drivers/net/cnxk/cnxk_flow_common.c
index 59aa920d91..14ac3b5b65 100644
--- a/drivers/net/cnxk/cnxk_flow_common.c
+++ b/drivers/net/cnxk/cnxk_flow_common.c
@@ -122,7 +122,9 @@ cnxk_flow_create(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr,
 	const struct rte_flow_action *action_rss = NULL;
 	const struct rte_flow_action_meter *mtr = NULL;
 	const struct rte_flow_action *act_q = NULL;
+	struct cnxk_eth_sec_sess *eth_sec = NULL;
 	struct roc_npc_flow *flow;
+	uint32_t flow_flags = 0;
 	void *mcs_flow = NULL;
 	uint32_t req_act = 0;
 	int i, rc;
@@ -183,7 +185,15 @@ cnxk_flow_create(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr,
 		return mcs_flow;
 	}
 
-	flow = cnxk_flow_create_common(eth_dev, attr, pattern, actions, error, false);
+	if (actions[0].type == RTE_FLOW_ACTION_TYPE_SECURITY) {
+		eth_sec = cnxk_eth_sec_sess_get_by_sess(dev, actions[0].conf);
+		if (eth_sec != NULL) {
+			flow_flags = eth_sec->inb_oop ? CNXK_FLOW_NON_INPLACE : 0;
+			flow_flags |= CNXK_FLOW_NO_SEC_ACTION;
+		}
+	}
+
+	flow = cnxk_flow_create_common(eth_dev, attr, pattern, actions, error, false, flow_flags);
 	if (!flow) {
 		if (mtr)
 			nix_mtr_chain_reset(eth_dev, mtr->mtr_id);
diff --git a/drivers/net/cnxk/cnxk_rep_flow.c b/drivers/net/cnxk/cnxk_rep_flow.c
index f1cf81a90c..1b013ce598 100644
--- a/drivers/net/cnxk/cnxk_rep_flow.c
+++ b/drivers/net/cnxk/cnxk_rep_flow.c
@@ -547,7 +547,7 @@ cnxk_rep_flow_create_native(struct rte_eth_dev *eth_dev, const struct rte_flow_a
 	uint16_t new_entry;
 	int rc;
 
-	flow = cnxk_flow_create_common(eth_dev, attr, pattern, actions, error, true);
+	flow = cnxk_flow_create_common(eth_dev, attr, pattern, actions, error, true, 0);
 	if (!flow) {
 		plt_err("Fail to create flow");
 		goto fail;
@@ -632,7 +632,7 @@ cnxk_rep_flow_validate(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *
 	}
 
 	if (rep_dev->native_repte)
-		return cnxk_flow_validate_common(eth_dev, attr, pattern, actions, error, true);
+		return cnxk_flow_validate_common(eth_dev, attr, pattern, actions, error, true, 0);
 
 	rc = process_flow_rule(rep_dev, attr, pattern, actions, &adata, CNXK_REP_MSG_FLOW_VALIDATE);
 	if (!rc || adata.u.sval < 0) {
diff --git a/drivers/net/cnxk/rte_pmd_cnxk.h b/drivers/net/cnxk/rte_pmd_cnxk.h
index b186b529fa..d344137dd5 100644
--- a/drivers/net/cnxk/rte_pmd_cnxk.h
+++ b/drivers/net/cnxk/rte_pmd_cnxk.h
@@ -82,6 +82,7 @@ struct rte_pmd_cnxk_sec_action {
 	 * XOR.
 	 */
 	enum rte_pmd_cnxk_sec_action_alg alg;
+	bool is_non_inp;
 };
 
 #define RTE_PMD_CNXK_CTX_MAX_CKEY_LEN	   32
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 27+ messages in thread

* [PATCH 8/8] common/cnxk: enable CPT CQ for inline IPSec inbound
  2026-02-19  9:08 [PATCH 1/8] net/cnxk: support of plain packet reassembly Rahul Bhansali
                   ` (5 preceding siblings ...)
  2026-02-19  9:08 ` [PATCH 7/8] net/cnxk: flow rule update for non-in-place IPsec Rahul Bhansali
@ 2026-02-19  9:08 ` Rahul Bhansali
  2026-02-26  5:05   ` Jerin Jacob
  2026-02-19 18:58 ` [PATCH 1/8] net/cnxk: support of plain packet reassembly Stephen Hemminger
                   ` (2 subsequent siblings)
  9 siblings, 1 reply; 27+ messages in thread
From: Rahul Bhansali @ 2026-02-19  9:08 UTC (permalink / raw)
  To: dev, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
	Satha Rao, Harman Kalra
  Cc: jerinj, Rakesh Kudurumalla

From: Rakesh Kudurumalla <rkudurumalla@marvell.com>

Added support of CPT CQ configurations for inline inbound IPsec.

Signed-off-by: Rakesh Kudurumalla <rkudurumalla@marvell.com>
---
 drivers/common/cnxk/roc_nix_inl.c         |  8 ++-
 drivers/common/cnxk/roc_nix_inl.h         |  3 +-
 drivers/common/cnxk/roc_nix_inl_dev.c     | 86 ++++++++++++++++++++---
 drivers/common/cnxk/roc_nix_inl_dev_irq.c | 19 +++--
 drivers/net/cnxk/cn20k_ethdev_sec.c       | 54 ++++++++------
 5 files changed, 132 insertions(+), 38 deletions(-)

diff --git a/drivers/common/cnxk/roc_nix_inl.c b/drivers/common/cnxk/roc_nix_inl.c
index 911c349604..26be1adac9 100644
--- a/drivers/common/cnxk/roc_nix_inl.c
+++ b/drivers/common/cnxk/roc_nix_inl.c
@@ -486,6 +486,7 @@ nix_inl_inb_ipsec_sa_tbl_setup(struct roc_nix *roc_nix)
 	} else {
 		struct nix_rx_inl_lf_cfg_req *lf_cfg;
 		uint64_t def_cptq = 0;
+		uint64_t cpt_cq_ena = 0;
 
 		/* Setup device specific inb SA table */
 		lf_cfg = mbox_alloc_msg_nix_rx_inl_lf_cfg(mbox);
@@ -508,9 +509,10 @@ nix_inl_inb_ipsec_sa_tbl_setup(struct roc_nix *roc_nix)
 		if (res_addr_offset)
 			res_addr_offset |= (1UL << 56);
 
+		cpt_cq_ena = (uint64_t)inl_dev->cpt_cq_ena << 63;
 		lf_cfg->enable = 1;
 		lf_cfg->profile_id = profile_id; /* IPsec profile is 0th one */
-		lf_cfg->rx_inline_sa_base = (uintptr_t)nix->inb_sa_base[profile_id];
+		lf_cfg->rx_inline_sa_base = (uintptr_t)nix->inb_sa_base[profile_id] | cpt_cq_ena;
 		lf_cfg->rx_inline_cfg0 =
 			((def_cptq << 57) | res_addr_offset | ((uint64_t)SSO_TT_ORDERED << 44) |
 			 (sa_pow2_sz << 16) | lenm1_max);
@@ -588,6 +590,7 @@ nix_inl_reass_inb_sa_tbl_setup(struct roc_nix *roc_nix)
 	uint64_t max_sa = 1, sa_pow2_sz;
 	uint64_t sa_idx_w, lenm1_max;
 	uint64_t res_addr_offset = 0;
+	uint64_t cpt_cq_ena = 0;
 	uint64_t def_cptq = 0;
 	size_t inb_sa_sz = 1;
 	uint8_t profile_id;
@@ -637,9 +640,10 @@ nix_inl_reass_inb_sa_tbl_setup(struct roc_nix *roc_nix)
 			res_addr_offset |= (1UL << 56);
 	}
 
+	cpt_cq_ena = (uint64_t)inl_dev->cpt_cq_ena << 63;
 	lf_cfg->enable = 1;
 	lf_cfg->profile_id = profile_id;
-	lf_cfg->rx_inline_sa_base = (uintptr_t)nix->inb_sa_base[profile_id];
+	lf_cfg->rx_inline_sa_base = (uintptr_t)nix->inb_sa_base[profile_id] | cpt_cq_ena;
 	lf_cfg->rx_inline_cfg0 =
 		((def_cptq << 57) | res_addr_offset | ((uint64_t)SSO_TT_ORDERED << 44) |
 		 (sa_pow2_sz << 16) | lenm1_max);
diff --git a/drivers/common/cnxk/roc_nix_inl.h b/drivers/common/cnxk/roc_nix_inl.h
index 596f12d1c7..d1a08a4495 100644
--- a/drivers/common/cnxk/roc_nix_inl.h
+++ b/drivers/common/cnxk/roc_nix_inl.h
@@ -44,7 +44,8 @@
 #define ROC_NIX_INL_RXC_QUE_BLK_THR 0x40UL
 
 enum nix_inl_event_type {
-	NIX_INL_CPT_CQ = 1,
+	NIX_INL_INB_CPT_CQ = 1,
+	NIX_INL_OUTB_CPT_CQ,
 	NIX_INL_SSO,
 	NIX_INL_SOFT_EXPIRY_THRD,
 };
diff --git a/drivers/common/cnxk/roc_nix_inl_dev.c b/drivers/common/cnxk/roc_nix_inl_dev.c
index 35528efa46..0f97952af3 100644
--- a/drivers/common/cnxk/roc_nix_inl_dev.c
+++ b/drivers/common/cnxk/roc_nix_inl_dev.c
@@ -382,6 +382,7 @@ nix_inl_nix_ipsec_cfg(struct nix_inl_dev *inl_dev, bool ena)
 		struct nix_rx_inl_lf_cfg_req *lf_cfg;
 		uint64_t res_addr_offset;
 		uint64_t def_cptq;
+		uint64_t cpt_cq_ena;
 
 		lf_cfg = mbox_alloc_msg_nix_rx_inl_lf_cfg(mbox);
 		if (lf_cfg == NULL) {
@@ -401,7 +402,9 @@ nix_inl_nix_ipsec_cfg(struct nix_inl_dev *inl_dev, bool ena)
 		lf_cfg->profile_id = inl_dev->ipsec_prof_id;
 		if (ena) {
 			lf_cfg->enable = 1;
-			lf_cfg->rx_inline_sa_base = (uintptr_t)inl_dev->inb_sa_base[profile_id];
+			cpt_cq_ena = (uint64_t)inl_dev->cpt_cq_ena << 63;
+			lf_cfg->rx_inline_sa_base =
+				(uintptr_t)inl_dev->inb_sa_base[profile_id] | (cpt_cq_ena);
 			lf_cfg->rx_inline_cfg0 =
 				((def_cptq << 57) | res_addr_offset |
 				 ((uint64_t)SSO_TT_ORDERED << 44) | (sa_pow2_sz << 16) | lenm1_max);
@@ -482,13 +485,33 @@ nix_inl_cpt_setup(struct nix_inl_dev *inl_dev, bool inl_dev_sso)
 lf_fini:
 	for (i = 0; i < inl_dev->nb_cptlf; i++) {
 		struct roc_cpt_lf *lf = &inl_dev->cpt_lf[i];
-		cpt_lf_fini(lf, lf->cpt_cq_ena);
+		cpt_lf_fini(lf, false);
 	}
 lf_free:
 	rc |= cpt_lfs_free(dev);
 	return rc;
 }
 
+static int
+nix_inl_cpt_cq_inb_release(struct nix_inl_dev *inl_dev)
+{
+	int i;
+
+	if (!inl_dev || !inl_dev->cpt_cq_ena)
+		return 0;
+	for (i = 0; i < inl_dev->nb_inb_cptlfs; i++) {
+		uint8_t slot_id = inl_dev->inb_cpt_lf_id + i;
+		struct roc_cpt_lf *lf = &inl_dev->cpt_lf[slot_id];
+
+		if (lf->cpt_cq_ena) {
+			cpt_lf_cq_fini(lf);
+			cpt_lf_unregister_irqs(lf, cpt_lf_misc_irq, nix_inl_cpt_done_irq);
+		}
+	}
+
+	return 0;
+}
+
 static int
 nix_inl_cpt_release(struct nix_inl_dev *inl_dev)
 {
@@ -625,6 +648,7 @@ nix_inl_nix_profile_config(struct nix_inl_dev *inl_dev, uint8_t profile_id)
 	uint64_t max_sa, sa_w, sa_pow2_sz, lenm1_max;
 	struct nix_rx_inl_lf_cfg_req *lf_cfg;
 	uint64_t res_addr_offset;
+	uint64_t cpt_cq_ena;
 	uint64_t def_cptq;
 	size_t inb_sa_sz;
 	void *sa;
@@ -665,7 +689,8 @@ nix_inl_nix_profile_config(struct nix_inl_dev *inl_dev, uint8_t profile_id)
 
 	lf_cfg->enable = 1;
 	lf_cfg->profile_id = profile_id;
-	lf_cfg->rx_inline_sa_base = (uintptr_t)inl_dev->inb_sa_base[profile_id];
+	cpt_cq_ena = (uint64_t)inl_dev->cpt_cq_ena << 63;
+	lf_cfg->rx_inline_sa_base = (uintptr_t)inl_dev->inb_sa_base[profile_id] | cpt_cq_ena;
 	lf_cfg->rx_inline_cfg0 =
 		((def_cptq << 57) | res_addr_offset | ((uint64_t)SSO_TT_ORDERED << 44) |
 		 (sa_pow2_sz << 16) | lenm1_max);
@@ -716,6 +741,38 @@ nix_inl_nix_profile_release(struct nix_inl_dev *inl_dev, uint8_t profile_id)
 	return rc;
 }
 
+static int
+nix_inl_cpt_cq_inb_setup(struct nix_inl_dev *inl_dev)
+{
+	int i, rc;
+
+	if (!inl_dev->cpt_cq_ena)
+		return 0;
+	for (i = 0; i < inl_dev->nb_inb_cptlfs; i++) {
+		uint8_t slot_id = inl_dev->inb_cpt_lf_id + i;
+		struct roc_cpt_lf *lf = &inl_dev->cpt_lf[slot_id];
+
+		lf->dq_ack_ena = true;
+		lf->cpt_cq_ena = true;
+		lf->cq_entry_size = 0;
+		lf->cq_all = 0;
+		lf->cq_size = lf->nb_desc;
+		lf->dev = &inl_dev->dev;
+		lf->cq_head = 1;
+
+		rc = cpt_lf_cq_init(lf);
+		if (rc)
+			return rc;
+
+		rc = cpt_lf_register_irqs(lf, cpt_lf_misc_irq, nix_inl_cpt_done_irq);
+		if (rc)
+			return rc;
+		roc_cpt_cq_enable(lf);
+	}
+
+	return 0;
+}
+
 static int
 nix_inl_nix_reass_setup(struct nix_inl_dev *inl_dev)
 {
@@ -1451,11 +1508,17 @@ roc_nix_inl_dev_init(struct roc_nix_inl_dev *roc_inl_dev)
 	if (rc)
 		goto sso_release;
 
+	if (roc_feature_nix_has_cpt_cq_support()) {
+		rc = nix_inl_cpt_cq_inb_setup(inl_dev);
+		if (rc)
+			goto cpt_release;
+	}
+
 	/* Setup device specific inb SA table */
 	rc = nix_inl_nix_ipsec_cfg(inl_dev, true);
 	if (rc) {
 		plt_err("Failed to setup NIX Inbound SA conf, rc=%d", rc);
-		goto cpt_release;
+		goto cpt_cq_inb_release;
 	}
 
 	/* Setup Reassembly */
@@ -1464,20 +1527,20 @@ roc_nix_inl_dev_init(struct roc_nix_inl_dev *roc_inl_dev)
 
 		rc = nix_inl_nix_reass_setup(inl_dev);
 		if (rc)
-			goto cpt_release;
+			goto cpt_cq_inb_release;
 	}
 
 	if (inl_dev->set_soft_exp_poll) {
 		rc = nix_inl_outb_poll_thread_setup(inl_dev);
 		if (rc)
-			goto cpt_release;
+			goto cpt_cq_inb_release;
 	}
 
 	/* Perform selftest if asked for */
 	if (inl_dev->selftest) {
 		rc = nix_inl_selftest();
 		if (rc)
-			goto cpt_release;
+			goto cpt_cq_inb_release;
 	}
 	inl_dev->max_ipsec_rules = roc_inl_dev->max_ipsec_rules;
 
@@ -1486,14 +1549,14 @@ roc_nix_inl_dev_init(struct roc_nix_inl_dev *roc_inl_dev)
 			plt_zmalloc(sizeof(int) * inl_dev->max_ipsec_rules, PLT_CACHE_LINE_SIZE);
 		if (inl_dev->ipsec_index == NULL) {
 			rc = NPC_ERR_NO_MEM;
-			goto cpt_release;
+			goto cpt_cq_inb_release;
 		}
 		rc = npc_mcam_alloc_entries(inl_dev->dev.mbox, inl_dev->max_ipsec_rules,
 					    inl_dev->ipsec_index, inl_dev->max_ipsec_rules,
 					    NPC_MCAM_HIGHER_PRIO, &resp_count, 1);
 		if (rc) {
 			plt_free(inl_dev->ipsec_index);
-			goto cpt_release;
+			goto cpt_cq_inb_release;
 		}
 
 		start_index = inl_dev->ipsec_index[0];
@@ -1507,6 +1570,8 @@ roc_nix_inl_dev_init(struct roc_nix_inl_dev *roc_inl_dev)
 	idev->nix_inl_dev = inl_dev;
 
 	return 0;
+cpt_cq_inb_release:
+	rc |= nix_inl_cpt_cq_inb_release(inl_dev);
 cpt_release:
 	rc |= nix_inl_cpt_release(inl_dev);
 sso_release:
@@ -1558,8 +1623,9 @@ roc_nix_inl_dev_fini(struct roc_nix_inl_dev *roc_inl_dev)
 	/* Flush Inbound CTX cache entries */
 	nix_inl_cpt_ctx_cache_sync(inl_dev);
 
+	rc = nix_inl_cpt_cq_inb_release(inl_dev);
 	/* Release CPT */
-	rc = nix_inl_cpt_release(inl_dev);
+	rc |= nix_inl_cpt_release(inl_dev);
 
 	/* Release SSO */
 	rc |= nix_inl_sso_release(inl_dev);
diff --git a/drivers/common/cnxk/roc_nix_inl_dev_irq.c b/drivers/common/cnxk/roc_nix_inl_dev_irq.c
index 89155a1f7d..30986e780a 100644
--- a/drivers/common/cnxk/roc_nix_inl_dev_irq.c
+++ b/drivers/common/cnxk/roc_nix_inl_dev_irq.c
@@ -49,10 +49,11 @@ static void
 nix_inl_cpt_cq_cb(struct roc_cpt_lf *lf)
 {
 	struct roc_nix *roc_nix = (struct roc_nix *)lf->dev->roc_nix;
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
 	struct idev_cfg *idev = idev_get_cfg();
 	uint32_t port_id = roc_nix->port_id;
 	struct nix_inl_dev *inl_dev = NULL;
-	struct roc_ow_ipsec_outb_sa *sa;
+	enum nix_inl_event_type cq_type;
 	union cpt_lf_cq_base cq_base;
 	union cpt_lf_cq_ptr cq_ptr;
 	struct cpt_cq_s *cq_s;
@@ -60,6 +61,7 @@ nix_inl_cpt_cq_cb(struct roc_cpt_lf *lf)
 	uint32_t count, head;
 	uint32_t nq_ptr;
 	uint64_t i;
+	void *sa;
 
 	if (idev)
 		inl_dev = idev->nix_inl_dev;
@@ -75,23 +77,30 @@ nix_inl_cpt_cq_cb(struct roc_cpt_lf *lf)
 	count = cq_ptr.s.count;
 	nq_ptr = cq_ptr.s.nq_ptr;
 
+	if (lf->dev == &inl_dev->dev)
+		cq_type = NIX_INL_INB_CPT_CQ;
+	else if (lf->dev == &nix->dev)
+		cq_type = NIX_INL_OUTB_CPT_CQ;
+	else
+		return;
+
 	for (i = 0; i < count; i++) {
 		cq_s = (struct cpt_cq_s *)(uintptr_t)(((cq_base.s.addr << 7)) + (head << 5));
 
 		if (cq_s->w0.s.uc_compcode && cq_s->w0.s.compcode) {
 			switch (cq_s->w2.s.fmt & fmt_msk) {
 			case WQE_PTR_CPTR:
-				sa = (struct roc_ow_ipsec_outb_sa *)cq_s->w1.esn;
+				sa = (void *)cq_s->w1.esn;
 				break;
 			case CPTR_WQE_PTR:
-				sa = (struct roc_ow_ipsec_outb_sa *)cq_s->w3.comp_ptr;
+				sa = (void *)cq_s->w3.comp_ptr;
 				break;
 			default:
 				plt_err("Invalid event Received ");
 				goto done;
 			}
 			uint64_t tmp = ~(uint32_t)0x0;
-			inl_dev->work_cb(&tmp, sa, NIX_INL_CPT_CQ, (void *)cq_s, port_id);
+			inl_dev->work_cb(&tmp, sa, cq_type, (void *)cq_s, port_id);
 		}
 done:
 		head = (head + 1) % lf->cq_size;
@@ -165,7 +174,7 @@ nix_inl_sso_hws_irq(void *param)
 void
 nix_inl_cpt_done_irq(void *param)
 {
-	struct roc_cpt_lf *lf = param;
+	struct roc_cpt_lf *lf = (struct roc_cpt_lf *)param;
 	uint64_t done_wait;
 	uint64_t intr;
 
diff --git a/drivers/net/cnxk/cn20k_ethdev_sec.c b/drivers/net/cnxk/cn20k_ethdev_sec.c
index 5553aed1b1..5e9212948b 100644
--- a/drivers/net/cnxk/cn20k_ethdev_sec.c
+++ b/drivers/net/cnxk/cn20k_ethdev_sec.c
@@ -439,18 +439,31 @@ cnxk_pktmbuf_free_no_cache(struct rte_mbuf *mbuf)
 }
 
 static void
-cn20k_eth_sec_post_event(struct rte_eth_dev *eth_dev, struct roc_ow_ipsec_outb_sa *sa,
+cn20k_eth_sec_post_event(struct rte_eth_dev *eth_dev, void *sa, enum nix_inl_event_type type,
 			 uint16_t uc_compcode, uint16_t compcode, struct rte_mbuf *mbuf)
 {
 	struct rte_eth_event_ipsec_desc desc;
 	struct cn20k_sec_sess_priv sess_priv;
-	struct cn20k_outb_priv_data *priv;
+	struct cn20k_outb_priv_data *outb_priv;
+	struct cn20k_inb_priv_data *inb_priv;
 	static uint64_t warn_cnt;
+	uint64_t life_unit;
 
 	memset(&desc, 0, sizeof(desc));
-	priv = roc_nix_inl_ow_ipsec_outb_sa_sw_rsvd(sa);
 	sess_priv.u64 = 0;
 
+	if (type == NIX_INL_INB_CPT_CQ) {
+		struct roc_ow_ipsec_inb_sa *inb_sa = (struct roc_ow_ipsec_inb_sa *)sa;
+		inb_priv = roc_nix_inl_ow_ipsec_inb_sa_sw_rsvd(sa);
+		desc.metadata = (uint64_t)inb_priv->userdata;
+		life_unit = inb_sa->w2.s.life_unit;
+	} else {
+		struct roc_ow_ipsec_outb_sa *outb_sa = (struct roc_ow_ipsec_outb_sa *)sa;
+		outb_priv = roc_nix_inl_ow_ipsec_outb_sa_sw_rsvd(sa);
+		desc.metadata = (uint64_t)outb_priv->userdata;
+		life_unit = outb_sa->w2.s.life_unit;
+	}
+
 	if (mbuf)
 		sess_priv.u64 = *rte_security_dynfield(mbuf);
 
@@ -459,14 +472,14 @@ cn20k_eth_sec_post_event(struct rte_eth_dev *eth_dev, struct roc_ow_ipsec_outb_s
 		desc.subtype = RTE_ETH_EVENT_IPSEC_ESN_OVERFLOW;
 		break;
 	case ROC_IE_OW_UCC_ERR_SA_EXPIRED:
-		if (sa->w2.s.life_unit == ROC_IE_OW_SA_LIFE_UNIT_PKTS)
+		if (life_unit == ROC_IE_OW_SA_LIFE_UNIT_PKTS)
 			desc.subtype = RTE_ETH_EVENT_IPSEC_SA_PKT_HARD_EXPIRY;
 		else
 			desc.subtype = RTE_ETH_EVENT_IPSEC_SA_BYTE_HARD_EXPIRY;
 		break;
 	case ROC_IE_OW_UCC_SUCCESS_SA_SOFTEXP_FIRST:
 	case ROC_IE_OW_UCC_SUCCESS_SA_SOFTEXP_AGAIN:
-		if (sa->w2.s.life_unit == ROC_IE_OW_SA_LIFE_UNIT_PKTS)
+		if (life_unit == ROC_IE_OW_SA_LIFE_UNIT_PKTS)
 			desc.subtype = RTE_ETH_EVENT_IPSEC_SA_PKT_EXPIRY;
 		else
 			desc.subtype = RTE_ETH_EVENT_IPSEC_SA_BYTE_EXPIRY;
@@ -490,7 +503,6 @@ cn20k_eth_sec_post_event(struct rte_eth_dev *eth_dev, struct roc_ow_ipsec_outb_s
 		break;
 	}
 
-	desc.metadata = (uint64_t)priv->userdata;
 	rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_IPSEC, &desc);
 }
 
@@ -498,12 +510,15 @@ static const char *
 get_inl_event_type(enum nix_inl_event_type type)
 {
 	switch (type) {
-	case NIX_INL_CPT_CQ:
-		return "NIX_INL_CPT_CQ";
+	case NIX_INL_OUTB_CPT_CQ:
+		return "NIX_INL_OUTB_CPT_CQ";
+	case NIX_INL_INB_CPT_CQ:
+		return "NIX_INL_INB_CPT_CQ";
 	case NIX_INL_SSO:
 		return "NIX_INL_SSO";
 	case NIX_INL_SOFT_EXPIRY_THRD:
 		return "NIX_INL_SOFT_EXPIRY_THRD";
+
 	default:
 		return "Unknown event";
 	}
@@ -515,8 +530,8 @@ cn20k_eth_sec_sso_work_cb(uint64_t *gw, void *args, enum nix_inl_event_type type
 {
 	struct rte_eth_event_ipsec_desc desc;
 	struct cn20k_sec_sess_priv sess_priv;
-	struct cn20k_outb_priv_data *priv;
-	struct roc_ow_ipsec_outb_sa *sa;
+	struct cn20k_outb_priv_data *outb_priv;
+	struct roc_ow_ipsec_outb_sa *outb_sa;
 	struct cpt_cn20k_res_s *res;
 	struct rte_eth_dev *eth_dev;
 	struct cnxk_eth_dev *dev;
@@ -546,20 +561,19 @@ cn20k_eth_sec_sso_work_cb(uint64_t *gw, void *args, enum nix_inl_event_type type
 		/* Fall through */
 	default:
 		if (type) {
-			sa = (struct roc_ow_ipsec_outb_sa *)args;
-			priv = roc_nix_inl_ow_ipsec_outb_sa_sw_rsvd(sa);
-			desc.metadata = (uint64_t)priv->userdata;
 			eth_dev = &rte_eth_devices[port_id];
-			if (type == NIX_INL_CPT_CQ) {
-				struct cpt_cq_s *cqs = (struct cpt_cq_s *)cq_s;
-
-				cn20k_eth_sec_post_event(eth_dev, sa,
+			struct cpt_cq_s *cqs = (struct cpt_cq_s *)cq_s;
+			if (type < NIX_INL_SSO) {
+				cn20k_eth_sec_post_event(eth_dev, args, type,
 							 (uint16_t)cqs->w0.s.uc_compcode,
 							 (uint16_t)cqs->w0.s.compcode, NULL);
 				return;
 			}
 			if (type == NIX_INL_SOFT_EXPIRY_THRD) {
-				if (sa->w2.s.life_unit == ROC_IE_OW_SA_LIFE_UNIT_PKTS)
+				outb_sa = (struct roc_ow_ipsec_outb_sa *)args;
+				outb_priv = roc_nix_inl_ow_ipsec_outb_sa_sw_rsvd(outb_sa);
+				desc.metadata = (uint64_t)outb_priv->userdata;
+				if (outb_sa->w2.s.life_unit == ROC_IE_OW_SA_LIFE_UNIT_PKTS)
 					desc.subtype = RTE_ETH_EVENT_IPSEC_SA_PKT_EXPIRY;
 				else
 					desc.subtype = RTE_ETH_EVENT_IPSEC_SA_BYTE_EXPIRY;
@@ -596,9 +610,9 @@ cn20k_eth_sec_sso_work_cb(uint64_t *gw, void *args, enum nix_inl_event_type type
 	sess_priv.u64 = *rte_security_dynfield(mbuf);
 
 	sa_base = dev->outb.sa_base;
-	sa = roc_nix_inl_ow_ipsec_outb_sa(sa_base, sess_priv.sa_idx);
+	outb_sa = roc_nix_inl_ow_ipsec_outb_sa(sa_base, sess_priv.sa_idx);
 
-	cn20k_eth_sec_post_event(eth_dev, sa, res->uc_compcode, res->compcode, mbuf);
+	cn20k_eth_sec_post_event(eth_dev, outb_sa, type, res->uc_compcode, res->compcode, mbuf);
 
 	cnxk_pktmbuf_free_no_cache(mbuf);
 }
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 27+ messages in thread

* Re: [PATCH 1/8] net/cnxk: support of plain packet reassembly
  2026-02-19  9:08 [PATCH 1/8] net/cnxk: support of plain packet reassembly Rahul Bhansali
                   ` (6 preceding siblings ...)
  2026-02-19  9:08 ` [PATCH 8/8] common/cnxk: enable CPT CQ for inline IPSec inbound Rahul Bhansali
@ 2026-02-19 18:58 ` Stephen Hemminger
  2026-02-26 13:17 ` [PATCH v2 " Rahul Bhansali
  2026-02-27  4:37 ` [PATCH v3 1/8] net/cnxk: support of plain packet reassembly Rahul Bhansali
  9 siblings, 0 replies; 27+ messages in thread
From: Stephen Hemminger @ 2026-02-19 18:58 UTC (permalink / raw)
  To: Rahul Bhansali
  Cc: dev, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
	Satha Rao, Harman Kalra, jerinj, Rakesh Kudurumalla

On Thu, 19 Feb 2026 14:38:40 +0530
Rahul Bhansali <rbhansali@marvell.com> wrote:

> From: Rakesh Kudurumalla <rkudurumalla@marvell.com>
> 
> Adds support of plain packet reassembly by configuring
> UCAST_CPT rule.
> 
> Signed-off-by: Rakesh Kudurumalla <rkudurumalla@marvell.com>
> ---

Automated review found these issues to address.

Patch 1/8: net/cnxk: support of plain packet reassembly

&dev->rqs[i] can never be NULL — it's an array offset. The NULL check
should be on eth_dev->data->rx_queues[i] instead, which is used
immediately after without any guard.

If roc_nix_inl_dev_rq_get() or
roc_npa_buf_type_update() fails mid-loop, RQs obtained in prior
iterations are never released via roc_nix_inl_dev_rq_put().

cnxk_nix_ip_reass_rule_set() returns -EIO on all error paths,
discarding the actual rc. The mcam_write_failed label also overwrites
rc with the result of roc_npc_mcam_free(), losing the original write
error. 

Missing co-developer Signed-off-by (sent by Rahul, From Rakesh).

Patch 2/8: net/cnxk: support IPsec Rx inject for cn20k

sa_base &= ~0xFFFFUL is inside the per-packet loop but only matters on
the first iteration. Should be hoisted before the loop for clarity.

Release notes: "CN20k Soc" should be "CN20K SoC" to match patch 1.

Patch 8/8: common/cnxk: enable CPT CQ for inline IPSec inbound

nix_inl_cpt_cq_inb_setup(): if cpt_lf_register_irqs() fails, the CQ
from cpt_lf_cq_init() on that LF is not cleaned up, and prior
successfully-setup LFs may have inconsistent state during the error
path teardown.

Unnecessary cast of void * to struct roc_cpt_lf * in nix_inl_cpt_done_irq().

^ permalink raw reply	[flat|nested] 27+ messages in thread

* Re: [PATCH 8/8] common/cnxk: enable CPT CQ for inline IPSec inbound
  2026-02-19  9:08 ` [PATCH 8/8] common/cnxk: enable CPT CQ for inline IPSec inbound Rahul Bhansali
@ 2026-02-26  5:05   ` Jerin Jacob
  0 siblings, 0 replies; 27+ messages in thread
From: Jerin Jacob @ 2026-02-26  5:05 UTC (permalink / raw)
  To: Rahul Bhansali
  Cc: dev, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
	Satha Rao, Harman Kalra, jerinj, Rakesh Kudurumalla

On Thu, Feb 19, 2026 at 2:39 PM Rahul Bhansali <rbhansali@marvell.com> wrote:
>
> From: Rakesh Kudurumalla <rkudurumalla@marvell.com>
>
> Added support of CPT CQ configurations for inline inbound IPsec.
>
> Signed-off-by: Rakesh Kudurumalla <rkudurumalla@marvell.com>

Fix

Wrong headline case:
                        "common/cnxk: enable CPT CQ for inline IPSec
inbound": IPSec --> IPsec

Invalid patch(es) found - checked 8 patches
check-git-log failed

### [PATCH] net/cnxk: support of plain packet reassembly

__rte_internal must appear alone on the line immediately preceding the
return type of a function.
__rte_internal must appear alone on the line immediately preceding the
return type of a function.

7/8 valid patches
checkpatch failed

^ permalink raw reply	[flat|nested] 27+ messages in thread

* [PATCH v2 1/8] net/cnxk: support of plain packet reassembly
  2026-02-19  9:08 [PATCH 1/8] net/cnxk: support of plain packet reassembly Rahul Bhansali
                   ` (7 preceding siblings ...)
  2026-02-19 18:58 ` [PATCH 1/8] net/cnxk: support of plain packet reassembly Stephen Hemminger
@ 2026-02-26 13:17 ` Rahul Bhansali
  2026-02-26 13:17   ` [PATCH v2 2/8] net/cnxk: support IPsec Rx inject for cn20k Rahul Bhansali
                     ` (6 more replies)
  2026-02-27  4:37 ` [PATCH v3 1/8] net/cnxk: support of plain packet reassembly Rahul Bhansali
  9 siblings, 7 replies; 27+ messages in thread
From: Rahul Bhansali @ 2026-02-26 13:17 UTC (permalink / raw)
  To: dev, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
	Satha Rao, Harman Kalra
  Cc: jerinj, Rakesh Kudurumalla

From: Rakesh Kudurumalla <rkudurumalla@marvell.com>

Adds support of plain packet reassembly by configuring
UCAST_CPT rule.

Signed-off-by: Rakesh Kudurumalla <rkudurumalla@marvell.com>
---
Changes in v2: Updated doc, fix cleanup on configuration failure cases.

 doc/guides/nics/cnxk.rst                      |   1 +
 doc/guides/rel_notes/release_26_03.rst        |   1 +
 drivers/common/cnxk/roc_nix_inl.h             |   2 +-
 .../common/cnxk/roc_platform_base_symbols.c   |   1 +
 drivers/net/cnxk/cn20k_ethdev.c               |  94 +++++--
 drivers/net/cnxk/cn20k_rx.h                   |   6 +-
 drivers/net/cnxk/cnxk_ethdev.c                | 233 ++++++++++++++----
 drivers/net/cnxk/cnxk_ethdev.h                |   8 +
 8 files changed, 287 insertions(+), 59 deletions(-)

diff --git a/doc/guides/nics/cnxk.rst b/doc/guides/nics/cnxk.rst
index 4105b101b2..9e758a1b5e 100644
--- a/doc/guides/nics/cnxk.rst
+++ b/doc/guides/nics/cnxk.rst
@@ -40,6 +40,7 @@ Features of the CNXK Ethdev PMD are:
 - Port representors
 - Represented port pattern matching and action
 - Port representor pattern matching and action
+- Plain packet reassembly on CN20K SoC family

 Prerequisites
 -------------
diff --git a/doc/guides/rel_notes/release_26_03.rst b/doc/guides/rel_notes/release_26_03.rst
index b4499ec066..b1f9b3c82b 100644
--- a/doc/guides/rel_notes/release_26_03.rst
+++ b/doc/guides/rel_notes/release_26_03.rst
@@ -80,6 +80,7 @@ New Features
 * **Updated Marvell cnxk net driver.**

   * Added out-of-place support for CN20K SoC.
+  * Added plain packet reassembly support for CN20K SoC.

 * **Updated ZTE zxdh ethernet driver.**

diff --git a/drivers/common/cnxk/roc_nix_inl.h b/drivers/common/cnxk/roc_nix_inl.h
index 68f395438c..596f12d1c7 100644
--- a/drivers/common/cnxk/roc_nix_inl.h
+++ b/drivers/common/cnxk/roc_nix_inl.h
@@ -160,7 +160,7 @@ bool __roc_api roc_nix_inl_inb_is_enabled(struct roc_nix *roc_nix);
 uintptr_t __roc_api roc_nix_inl_inb_sa_base_get(struct roc_nix *roc_nix,
 						bool inl_dev_sa);
 uint16_t roc_nix_inl_inb_ipsec_profile_id_get(struct roc_nix *roc_nix, bool inb_inl_dev);
-uint16_t roc_nix_inl_inb_reass_profile_id_get(struct roc_nix *roc_nix, bool inb_inl_dev);
+uint16_t __roc_api roc_nix_inl_inb_reass_profile_id_get(struct roc_nix *roc_nix, bool inb_inl_dev);
 bool __roc_api roc_nix_inl_inb_rx_inject_enable(struct roc_nix *roc_nix, bool inl_dev_sa);
 uint32_t __roc_api roc_nix_inl_inb_spi_range(struct roc_nix *roc_nix,
 					     bool inl_dev_sa, uint32_t *min,
diff --git a/drivers/common/cnxk/roc_platform_base_symbols.c b/drivers/common/cnxk/roc_platform_base_symbols.c
index 79dd18fbd7..2c73efd877 100644
--- a/drivers/common/cnxk/roc_platform_base_symbols.c
+++ b/drivers/common/cnxk/roc_platform_base_symbols.c
@@ -228,6 +228,7 @@ RTE_EXPORT_INTERNAL_SYMBOL(roc_nix_tm_dump)
 RTE_EXPORT_INTERNAL_SYMBOL(roc_nix_tm_sdp_prepare_tree)
 RTE_EXPORT_INTERNAL_SYMBOL(roc_nix_dump)
 RTE_EXPORT_INTERNAL_SYMBOL(roc_nix_inl_dev_dump)
+RTE_EXPORT_INTERNAL_SYMBOL(roc_nix_inl_inb_reass_profile_id_get)
 RTE_EXPORT_INTERNAL_SYMBOL(roc_nix_inl_outb_cpt_lfs_dump)
 RTE_EXPORT_INTERNAL_SYMBOL(roc_nix_sq_desc_dump)
 RTE_EXPORT_INTERNAL_SYMBOL(roc_nix_fc_config_get)
diff --git a/drivers/net/cnxk/cn20k_ethdev.c b/drivers/net/cnxk/cn20k_ethdev.c
index 7e9e32f80b..4a3d163c75 100644
--- a/drivers/net/cnxk/cn20k_ethdev.c
+++ b/drivers/net/cnxk/cn20k_ethdev.c
@@ -616,22 +616,17 @@ static int
 cn20k_nix_reassembly_capability_get(struct rte_eth_dev *eth_dev,
 				    struct rte_eth_ip_reassembly_params *reassembly_capa)
 {
-	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
-	int rc = -ENOTSUP;
 	RTE_SET_USED(eth_dev);

 	if (!roc_feature_nix_has_reass())
 		return -ENOTSUP;

-	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
-		reassembly_capa->timeout_ms = 60 * 1000;
-		reassembly_capa->max_frags = 4;
-		reassembly_capa->flags =
-			RTE_ETH_DEV_REASSEMBLY_F_IPV4 | RTE_ETH_DEV_REASSEMBLY_F_IPV6;
-		rc = 0;
-	}
+	reassembly_capa->timeout_ms = 60 * 1000;
+	reassembly_capa->max_frags = 8;
+	reassembly_capa->flags =
+		RTE_ETH_DEV_REASSEMBLY_F_IPV4 | RTE_ETH_DEV_REASSEMBLY_F_IPV6;

-	return rc;
+	return 0;
 }

 static int
@@ -649,7 +644,10 @@ cn20k_nix_reassembly_conf_set(struct rte_eth_dev *eth_dev,
 {
 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
 	struct roc_cpt_rxc_time_cfg rxc_time_cfg = {0};
-	int rc = 0;
+	uint16_t nb_rxq = dev->nb_rxq;
+	int rc = 0, i, rxq_cnt = 0;
+	struct cn20k_eth_rxq *rxq;
+	struct roc_nix_rq *rq;

 	if (!roc_feature_nix_has_reass())
 		return -ENOTSUP;
@@ -659,15 +657,83 @@ cn20k_nix_reassembly_conf_set(struct rte_eth_dev *eth_dev,
 		if (!dev->inb.nb_oop)
 			dev->rx_offload_flags &= ~NIX_RX_REAS_F;
 		dev->inb.reass_en = false;
+		if (dev->ip_reass_en) {
+			cnxk_nix_ip_reass_rule_clr(eth_dev);
+			dev->ip_reass_en = false;
+		}
 		return 0;
 	}

+	if (!(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)) {
+		rc = cnxk_nix_inline_inbound_setup(dev);
+		if (rc) {
+			plt_err("Nix inline inbound setup failed rc=%d", rc);
+			goto done;
+		}
+
+		rc = cnxk_nix_inline_inbound_mode_setup(dev);
+		if (rc) {
+			plt_err("Nix inline inbound mode setup failed rc=%d", rc);
+			goto cleanup;
+		}
+
+		for (i = 0; i < nb_rxq; i++) {
+			rq = &dev->rqs[i];
+			rxq = eth_dev->data->rx_queues[i];
+
+			if (!rxq) {
+				plt_err("Receive queue = %d not enabled", i);
+				rc = -EINVAL;
+				goto cleanup;
+			}
+
+			roc_nix_inl_dev_xaq_realloc(rq->aura_handle);
+
+			rq->tag_mask = 0x0FF00000 | ((uint32_t)RTE_EVENT_TYPE_ETHDEV << 28);
+			rc = roc_nix_inl_dev_rq_get(rq, !!eth_dev->data->dev_started);
+			if (rc)
+				goto cleanup;
+
+			rxq->lmt_base = dev->nix.lmt_base;
+			rxq->sa_base = roc_nix_inl_inb_sa_base_get(&dev->nix, dev->inb.inl_dev);
+			rc = roc_npa_buf_type_update(rq->aura_handle,
+						     ROC_NPA_BUF_TYPE_PACKET_IPSEC, 1);
+			if (rc)
+				goto cleanup;
+
+			rxq_cnt = i + 1;
+		}
+	}
+
 	rc = roc_nix_reassembly_configure(&rxc_time_cfg, conf->timeout_ms);
-	if (!rc && dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
-		dev->rx_offload_flags |= NIX_RX_REAS_F;
-		dev->inb.reass_en = true;
+	if (rc) {
+		plt_err("Nix reassembly_configure failed rc=%d", rc);
+		goto cleanup;
 	}

+	dev->rx_offload_flags |= NIX_RX_REAS_F | NIX_RX_OFFLOAD_SECURITY_F;
+	dev->inb.reass_en = !!((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY));
+
+	if (!dev->ip_reass_en) {
+		rc = cnxk_nix_ip_reass_rule_set(eth_dev, 0);
+		if (rc) {
+			plt_err("Nix reassembly rule setup failed rc=%d", rc);
+			goto cleanup;
+		}
+	}
+
+	return 0;
+cleanup:
+	dev->inb.reass_en = false;
+	if (!(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)) {
+		rc |= cnxk_nix_inl_inb_fini(dev);
+		for (i = 0; i < rxq_cnt; i++) {
+			struct roc_nix_rq *rq = &dev->rqs[i];
+
+			roc_nix_inl_dev_rq_put(rq);
+		}
+	}
+done:
 	return rc;
 }

diff --git a/drivers/net/cnxk/cn20k_rx.h b/drivers/net/cnxk/cn20k_rx.h
index 83c222c53c..d6c217cdf5 100644
--- a/drivers/net/cnxk/cn20k_rx.h
+++ b/drivers/net/cnxk/cn20k_rx.h
@@ -258,7 +258,8 @@ nix_sec_meta_to_mbuf_sc(uint64_t cq_w5, uint64_t cpth, const uint64_t sa_base,
 			*rte_security_dynfield(mbuf) = (uint64_t)inb_priv->userdata;
 	} else {
 		/* Update dynamic field with userdata */
-		*rte_security_dynfield(mbuf) = (uint64_t)inb_priv->userdata;
+		if (flags & NIX_RX_REAS_F && inb_priv->userdata)
+			*rte_security_dynfield(mbuf) = (uint64_t)inb_priv->userdata;
 	}

 	*len = ((w3 >> 48) & 0xFFFF) + ((cq_w5 >> 16) & 0xFF) - (cq_w5 & 0xFF);
@@ -917,7 +918,8 @@ nix_sec_meta_to_mbuf(uintptr_t inb_sa, uintptr_t cpth, struct rte_mbuf **inner,
 		/* Get SPI from CPT_PARSE_S's cookie(already swapped) */
 		inb_priv = roc_nix_inl_ot_ipsec_inb_sa_sw_rsvd((void *)inb_sa);
 		/* Update dynamic field with userdata */
-		*rte_security_dynfield(inner_m) = (uint64_t)inb_priv->userdata;
+		if (flags & NIX_RX_REAS_F && inb_priv->userdata)
+			*rte_security_dynfield(inner_m) = (uint64_t)inb_priv->userdata;
 	}

 	/* Clear and update original lower 16 bit of data offset */
diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index ff78622b58..ba8ac52b46 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -7,6 +7,11 @@
 #include <eal_export.h>
 #include <rte_eventdev.h>
 #include <rte_pmd_cnxk.h>
+#include "roc_priv.h"
+
+#define REASS_PRIORITY             0
+#define CLS_LTYPE_OFFSET_START     7
+#define CLS_LFLAGS_LC_OFFSET (CLS_LTYPE_OFFSET_START + 4)

 static const uint32_t cnxk_mac_modes[CGX_MODE_MAX + 1] = {
 	[CGX_MODE_SGMII] = RTE_ETH_LINK_SPEED_1G,
@@ -203,46 +208,160 @@ cnxk_nix_inb_mode_set(struct cnxk_eth_dev *dev, bool use_inl_dev)
 	return cnxk_nix_lookup_mem_sa_base_set(dev);
 }

-static int
-nix_security_setup(struct cnxk_eth_dev *dev)
+int
+cnxk_nix_inline_inbound_mode_setup(struct cnxk_eth_dev *dev)
+{
+	int rc = 0;
+
+	/* By default pick using inline device for poll mode.
+	 * Will be overridden when event mode rq's are setup.
+	 */
+	cnxk_nix_inb_mode_set(dev, !dev->inb.no_inl_dev);
+
+	/* Allocate memory to be used as dptr for CPT ucode
+	 * WRITE_SA op.
+	 */
+	dev->inb.sa_dptr =
+		plt_zmalloc(ROC_NIX_INL_OT_IPSEC_INB_HW_SZ, 0);
+	if (!dev->inb.sa_dptr) {
+		plt_err("Couldn't allocate memory for SA dptr");
+		rc = -ENOMEM;
+		goto cleanup;
+	}
+	dev->inb.inl_dev_q = roc_nix_inl_dev_qptr_get(0);
+cleanup:
+	return rc;
+}
+
+static void
+cnxk_flow_ipfrag_set(struct roc_npc_flow *flow, struct roc_npc *npc)
+{
+	uint8_t lc_offset;
+	uint64_t mask;
+
+	lc_offset = rte_popcount64(npc->rx_parse_nibble & ((1ULL << CLS_LFLAGS_LC_OFFSET) - 1));
+
+	lc_offset *= 4;
+
+	mask = (~(0xffULL << lc_offset));
+	flow->mcam_data[0] &= mask;
+	flow->mcam_mask[0] &= mask;
+	flow->mcam_data[0] |= (0x02ULL << lc_offset);
+	flow->mcam_mask[0] |= (0x82ULL << lc_offset);
+}
+
+int
+cnxk_nix_ip_reass_rule_set(struct rte_eth_dev *eth_dev, uint32_t rq)
+{
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	struct nix_rx_action2_s *action2;
+	struct nix_rx_action_s *action;
+	struct roc_npc_flow mcam;
+	int prio = 0, rc = 0;
+	struct roc_npc *npc;
+	int resp_count = 0;
+	bool inl_dev;
+
+	npc = &dev->npc;
+	inl_dev = roc_nix_inb_is_with_inl_dev(&dev->nix);
+
+	prio = REASS_PRIORITY;
+	memset(&mcam, 0, sizeof(struct roc_npc_flow));
+
+	action = (struct nix_rx_action_s *)&mcam.npc_action;
+	action2 = (struct nix_rx_action2_s *)&mcam.npc_action2;
+
+	if (inl_dev) {
+		struct roc_nix_rq *inl_rq;
+
+		inl_rq = roc_nix_inl_dev_rq(&dev->nix);
+		if (!inl_rq) {
+			plt_err("Failed to get inline dev rq for %d", dev->nix.port_id);
+			goto mcam_alloc_failed;
+		}
+		action->pf_func = roc_idev_nix_inl_dev_pffunc_get();
+		action->index = inl_rq->qid;
+	} else {
+		action->pf_func = npc->pf_func;
+		action->index = rq;
+	}
+	action->op = NIX_RX_ACTIONOP_UCAST_CPT;
+
+	action2->inline_profile_id = roc_nix_inl_inb_reass_profile_id_get(npc->roc_nix, inl_dev);
+
+	rc = roc_npc_mcam_merge_base_steering_rule(npc, &mcam);
+	if (rc < 0)
+		goto mcam_alloc_failed;
+
+	/* Channel[11] should be 'b0 */
+	mcam.mcam_data[0] &= (~0xfffULL);
+	mcam.mcam_mask[0] &= (~0xfffULL);
+	mcam.mcam_data[0] |= (uint64_t)(npc->channel & 0x7ff);
+	mcam.mcam_mask[0] |= (BIT_ULL(12) - 1);
+	cnxk_flow_ipfrag_set(&mcam, npc);
+
+	mcam.priority = prio;
+	mcam.key_type = roc_npc_get_key_type(npc, &mcam);
+	rc = roc_npc_mcam_alloc_entry(npc, &mcam, NULL, prio, &resp_count);
+	if (rc || resp_count == 0)
+		goto mcam_alloc_failed;
+
+	mcam.enable = true;
+	rc = roc_npc_mcam_write_entry(npc, &mcam);
+	if (rc < 0)
+		goto mcam_write_failed;
+
+	dev->ip_reass_rule_id = mcam.mcam_id;
+	dev->ip_reass_en = true;
+	return 0;
+
+mcam_write_failed:
+	rc |= roc_npc_mcam_free(npc, &mcam);
+	if (rc)
+		return rc;
+mcam_alloc_failed:
+	return -EIO;
+}
+
+int
+cnxk_nix_inline_inbound_setup(struct cnxk_eth_dev *dev)
 {
 	struct roc_nix *nix = &dev->nix;
-	int i, rc = 0;
+	int rc = 0;

-	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
-		/* Setup minimum SA table when inline device is used */
-		nix->ipsec_in_min_spi = dev->inb.no_inl_dev ? dev->inb.min_spi : 0;
-		nix->ipsec_in_max_spi = dev->inb.no_inl_dev ? dev->inb.max_spi : 1;
+	/* Setup minimum SA table when inline device is used */
+	nix->ipsec_in_min_spi = dev->inb.no_inl_dev ? dev->inb.min_spi : 0;
+	nix->ipsec_in_max_spi = dev->inb.no_inl_dev ? dev->inb.max_spi : 1;

-		/* Enable custom meta aura when multi-chan is used */
-		if (nix->local_meta_aura_ena && roc_nix_inl_dev_is_multi_channel() &&
-		    !dev->inb.custom_meta_aura_dis)
-			nix->custom_meta_aura_ena = true;
+	/* Enable custom meta aura when multi-chan is used */
+	if (nix->local_meta_aura_ena && roc_nix_inl_dev_is_multi_channel() &&
+	    !dev->inb.custom_meta_aura_dis)
+		nix->custom_meta_aura_ena = true;

-		/* Setup Inline Inbound */
-		rc = roc_nix_inl_inb_init(nix);
-		if (rc) {
-			plt_err("Failed to initialize nix inline inb, rc=%d",
+	/* Setup Inline Inbound */
+	rc = roc_nix_inl_inb_init(nix);
+	if (rc) {
+		plt_err("Failed to initialize nix inline inb, rc=%d",
 				rc);
-			return rc;
-		}
+		return rc;
+	}

-		/* By default pick using inline device for poll mode.
-		 * Will be overridden when event mode rq's are setup.
-		 */
-		cnxk_nix_inb_mode_set(dev, !dev->inb.no_inl_dev);
+	return 0;
+}

-		/* Allocate memory to be used as dptr for CPT ucode
-		 * WRITE_SA op.
-		 */
-		dev->inb.sa_dptr =
-			plt_zmalloc(ROC_NIX_INL_OT_IPSEC_INB_HW_SZ, 0);
-		if (!dev->inb.sa_dptr) {
-			plt_err("Couldn't allocate memory for SA dptr");
-			rc = -ENOMEM;
+static int
+nix_security_setup(struct cnxk_eth_dev *dev)
+{
+	struct roc_nix *nix = &dev->nix;
+	int i, rc = 0;
+
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
+		rc = cnxk_nix_inline_inbound_setup(dev);
+		if (rc)
+			return rc;
+		rc = cnxk_nix_inline_inbound_mode_setup(dev);
+		if (rc)
 			goto cleanup;
-		}
-		dev->inb.inl_dev_q = roc_nix_inl_dev_qptr_get(0);
 	}

 	if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY ||
@@ -365,6 +484,22 @@ nix_meter_fini(struct cnxk_eth_dev *dev)
 	return 0;
 }

+int
+cnxk_nix_inl_inb_fini(struct cnxk_eth_dev *dev)
+{
+	struct roc_nix *nix = &dev->nix;
+	int rc;
+
+	if (dev->inb.sa_dptr) {
+		plt_free(dev->inb.sa_dptr);
+		dev->inb.sa_dptr = NULL;
+	}
+	rc = roc_nix_inl_inb_fini(nix);
+	if (rc)
+		plt_err("Failed to cleanup nix inline inb, rc=%d", rc);
+	return rc;
+}
+
 static int
 nix_security_release(struct cnxk_eth_dev *dev)
 {
@@ -374,7 +509,7 @@ nix_security_release(struct cnxk_eth_dev *dev)
 	int rc, ret = 0;

 	/* Cleanup Inline inbound */
-	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY || dev->ip_reass_en) {
 		/* Destroy inbound sessions */
 		tvar = NULL;
 		RTE_TAILQ_FOREACH_SAFE(eth_sec, &dev->inb.list, entry, tvar)
@@ -384,17 +519,14 @@ nix_security_release(struct cnxk_eth_dev *dev)
 		/* Clear lookup mem */
 		cnxk_nix_lookup_mem_sa_base_clear(dev);

-		rc = roc_nix_inl_inb_fini(nix);
-		if (rc)
-			plt_err("Failed to cleanup nix inline inb, rc=%d", rc);
-		ret |= rc;
+		ret |= cnxk_nix_inl_inb_fini(dev);

 		cnxk_nix_lookup_mem_metapool_clear(dev);
+	}

-		if (dev->inb.sa_dptr) {
-			plt_free(dev->inb.sa_dptr);
-			dev->inb.sa_dptr = NULL;
-		}
+	if (dev->ip_reass_en) {
+		cnxk_nix_ip_reass_rule_clr(eth_dev);
+		dev->ip_reass_en = false;
 	}

 	/* Cleanup Inline outbound */
@@ -946,7 +1078,7 @@ cnxk_nix_rx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
 	plt_nix_dbg("Releasing rxq %u", qid);

 	/* Release rq reference for inline dev if present */
-	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY || dev->ip_reass_en)
 		roc_nix_inl_dev_rq_put(rq);

 	/* Cleanup ROC RQ */
@@ -1760,6 +1892,18 @@ cnxk_nix_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qid)
 	return rc;
 }

+int
+cnxk_nix_ip_reass_rule_clr(struct rte_eth_dev *eth_dev)
+{
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	struct roc_npc *npc = &dev->npc;
+
+	if (dev->ip_reass_en)
+		return roc_npc_mcam_free_entry(npc, dev->ip_reass_rule_id);
+	else
+		return 0;
+}
+
 static int
 cnxk_nix_dev_stop(struct rte_eth_dev *eth_dev)
 {
@@ -1842,7 +1986,7 @@ cnxk_nix_dev_start(struct rte_eth_dev *eth_dev)
 			return rc;
 	}

-	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) || dev->ip_reass_en) {
 		rc = roc_nix_inl_rq_ena_dis(&dev->nix, true);
 		if (rc) {
 			plt_err("Failed to enable Inline device RQ, rc=%d", rc);
@@ -2258,6 +2402,11 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset)
 	/* Disable and free rte_meter entries */
 	nix_meter_fini(dev);

+	if (dev->ip_reass_en) {
+		cnxk_nix_ip_reass_rule_clr(eth_dev);
+		dev->ip_reass_en = false;
+	}
+
 	/* Disable and free rte_flow entries */
 	roc_npc_fini(&dev->npc);

diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index 3d0a587406..1b63b02ad8 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -427,6 +427,8 @@ struct cnxk_eth_dev {
 	/* Reassembly dynfield/flag offsets */
 	int reass_dynfield_off;
 	int reass_dynflag_bit;
+	uint32_t ip_reass_rule_id;
+	bool ip_reass_en;

 	/* MCS device */
 	struct cnxk_mcs_dev *mcs_dev;
@@ -645,6 +647,10 @@ int cnxk_nix_tm_set_queue_rate_limit(struct rte_eth_dev *eth_dev,
 int cnxk_nix_tm_mark_vlan_dei(struct rte_eth_dev *eth_dev, int mark_green,
 			      int mark_yellow, int mark_red,
 			      struct rte_tm_error *error);
+int cnxk_nix_ip_reass_rule_clr(struct rte_eth_dev *eth_dev);
+int cnxk_nix_ip_reass_rule_set(struct rte_eth_dev *eth_dev, uint32_t rq);
+int cnxk_nix_inl_inb_fini(struct cnxk_eth_dev *dev);
+
 int cnxk_nix_tm_mark_ip_ecn(struct rte_eth_dev *eth_dev, int mark_green,
 			    int mark_yellow, int mark_red,
 			    struct rte_tm_error *error);
@@ -736,6 +742,8 @@ __rte_internal
 int cnxk_nix_inb_mode_set(struct cnxk_eth_dev *dev, bool use_inl_dev);
 __rte_internal
 void cnxk_ethdev_rx_offload_cb_register(cnxk_ethdev_rx_offload_cb_t cb);
+int cnxk_nix_inline_inbound_setup(struct cnxk_eth_dev *dev);
+int cnxk_nix_inline_inbound_mode_setup(struct cnxk_eth_dev *dev);

 struct cnxk_eth_sec_sess *cnxk_eth_sec_sess_get_by_sa_idx(struct cnxk_eth_dev *dev,
 							  uint32_t sa_idx, bool inb);
--
2.34.1


^ permalink raw reply related	[flat|nested] 27+ messages in thread

* [PATCH v2 2/8] net/cnxk: support IPsec Rx inject for cn20k
  2026-02-26 13:17 ` [PATCH v2 " Rahul Bhansali
@ 2026-02-26 13:17   ` Rahul Bhansali
  2026-02-26 13:17   ` [PATCH v2 3/8] common/cnxk: update platform features Rahul Bhansali
                     ` (5 subsequent siblings)
  6 siblings, 0 replies; 27+ messages in thread
From: Rahul Bhansali @ 2026-02-26 13:17 UTC (permalink / raw)
  To: dev, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
	Satha Rao, Harman Kalra
  Cc: jerinj, Rahul Bhansali

Adds support of IPsec Rx inject handling for cn20k platform.

Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
---
Changes in v2: fix release notes.

 doc/guides/rel_notes/release_26_03.rst |   1 +
 drivers/net/cnxk/cn20k_ethdev_sec.c    |  50 +++++++
 drivers/net/cnxk/cn20k_rx.h            | 174 +++++++++++++++++++++++++
 3 files changed, 225 insertions(+)

diff --git a/doc/guides/rel_notes/release_26_03.rst b/doc/guides/rel_notes/release_26_03.rst
index b1f9b3c82b..62246881d1 100644
--- a/doc/guides/rel_notes/release_26_03.rst
+++ b/doc/guides/rel_notes/release_26_03.rst
@@ -81,6 +81,7 @@ New Features

   * Added out-of-place support for CN20K SoC.
   * Added plain packet reassembly support for CN20K SoC.
+  * Added IPsec Rx inject support for CN20K SoC.

 * **Updated ZTE zxdh ethernet driver.**

diff --git a/drivers/net/cnxk/cn20k_ethdev_sec.c b/drivers/net/cnxk/cn20k_ethdev_sec.c
index c6a51f99f5..e406f0e879 100644
--- a/drivers/net/cnxk/cn20k_ethdev_sec.c
+++ b/drivers/net/cnxk/cn20k_ethdev_sec.c
@@ -1172,6 +1172,54 @@ eth_sec_caps_add(struct rte_security_capability eth_sec_caps[], uint32_t *idx,
 	*idx += nb_caps;
 }

+static uint16_t __rte_hot
+cn20k_eth_sec_inb_rx_inject(void *device, struct rte_mbuf **pkts,
+			    struct rte_security_session **sess, uint16_t nb_pkts)
+{
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+
+	return cn20k_nix_inj_pkts(sess, &dev->inj_cfg, pkts, nb_pkts);
+}
+
+static int
+cn20k_eth_sec_rx_inject_config(void *device, uint16_t port_id, bool enable)
+{
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	uint64_t channel, pf_func, inj_match_id = 0xFFFFUL;
+	struct cnxk_ethdev_inj_cfg *inj_cfg;
+	struct roc_nix *nix = &dev->nix;
+	struct roc_cpt_lf *inl_lf;
+	uint64_t sa_base;
+
+	if (!rte_eth_dev_is_valid_port(port_id))
+		return -EINVAL;
+
+	if (eth_dev->data->dev_started || !eth_dev->data->dev_configured)
+		return -EBUSY;
+
+	if (!roc_nix_inl_inb_rx_inject_enable(nix, dev->inb.inl_dev))
+		return -ENOTSUP;
+
+	roc_idev_nix_rx_inject_set(port_id, enable);
+
+	inl_lf = roc_nix_inl_inb_inj_lf_get(nix);
+	if (!inl_lf)
+		return -ENOTSUP;
+	sa_base = roc_nix_inl_inb_sa_base_get(nix, dev->inb.inl_dev);
+
+	inj_cfg = &dev->inj_cfg;
+	inj_cfg->sa_base = sa_base | eth_dev->data->port_id;
+	inj_cfg->io_addr = inl_lf->io_addr;
+	inj_cfg->lmt_base = nix->lmt_base;
+	channel = roc_nix_get_base_chan(nix);
+	pf_func = roc_idev_nix_inl_dev_pffunc_get();
+	inj_cfg->cmd_w0 = pf_func << 48 | inj_match_id << 32 | channel << 4;
+
+	return 0;
+}
+
 #define CPT_LMTST_BURST 32
 static uint16_t
 cn20k_inl_dev_submit(struct roc_nix_inl_dev_q *q, void *inst, uint16_t nb_inst)
@@ -1233,6 +1281,8 @@ cn20k_eth_sec_ops_override(void)
 	cnxk_eth_sec_ops.capabilities_get = cn20k_eth_sec_capabilities_get;
 	cnxk_eth_sec_ops.session_update = cn20k_eth_sec_session_update;
 	cnxk_eth_sec_ops.session_stats_get = cn20k_eth_sec_session_stats_get;
+	cnxk_eth_sec_ops.rx_inject_configure = cn20k_eth_sec_rx_inject_config;
+	cnxk_eth_sec_ops.inb_pkt_rx_inject = cn20k_eth_sec_inb_rx_inject;

 	/* Update platform specific rte_pmd_cnxk ops */
 	cnxk_pmd_ops.inl_dev_submit = cn20k_inl_dev_submit;
diff --git a/drivers/net/cnxk/cn20k_rx.h b/drivers/net/cnxk/cn20k_rx.h
index d6c217cdf5..f8fa6de2b9 100644
--- a/drivers/net/cnxk/cn20k_rx.h
+++ b/drivers/net/cnxk/cn20k_rx.h
@@ -890,6 +890,169 @@ cn20k_nix_flush_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pk

 #if defined(RTE_ARCH_ARM64)

+static __rte_always_inline uint16_t
+cn20k_nix_rx_inj_prepare_mseg(struct rte_mbuf *m, uint64_t *cmd)
+{
+	union nix_send_sg_s *sg, l_sg;
+	struct rte_mbuf *m_next;
+	uint16_t segdw, nb_segs;
+	uint64_t len, dlen;
+	uint64_t *slist;
+
+	sg = (union nix_send_sg_s *)cmd;
+	l_sg.u = sg->u;
+	l_sg.u &= 0xC00000000000000; /* LD_TYPE */
+	l_sg.subdc = NIX_SUBDC_SG;
+	nb_segs = m->nb_segs;
+	len = m->pkt_len;
+	slist = &cmd[1];
+
+	/* Fill mbuf segments */
+	do {
+		*slist = rte_pktmbuf_iova(m);
+		dlen = m->data_len;
+		len -= dlen;
+
+		/* Set the segment length */
+		l_sg.u |= ((uint64_t)dlen << (l_sg.segs << 4));
+		l_sg.segs += 1;
+		slist++;
+		nb_segs--;
+		if (l_sg.segs > 2 && nb_segs) {
+			sg->u = l_sg.u;
+			/* Next SG subdesc */
+			sg = (union nix_send_sg_s *)slist;
+			l_sg.u = sg->u;
+			l_sg.u &= 0xC00000000000000; /* LD_TYPE */
+			l_sg.subdc = NIX_SUBDC_SG;
+			slist++;
+		}
+		m_next = m->next;
+		m = m_next;
+	} while (nb_segs);
+
+	/* Add remaining bytes of data to last seg */
+	if (len) {
+		uint8_t shft = (l_sg.subdc == NIX_SUBDC_SG) ? ((l_sg.segs - 1) << 4) : 0;
+		dlen = ((l_sg.u >> shft) & 0xFFFFULL) + len;
+		l_sg.u = l_sg.u & ~(0xFFFFULL << shft);
+		l_sg.u |= dlen << shft;
+	}
+	/* Write the last subdc out */
+	sg->u = l_sg.u;
+
+	segdw = (uint64_t *)slist - cmd;
+	/* Roundup extra dwords to multiple of 2 */
+	segdw = (segdw >> 1) + (segdw & 0x1);
+	return segdw;
+}
+
+static __rte_always_inline uint16_t
+cn20k_nix_inj_pkts(struct rte_security_session **sess, struct cnxk_ethdev_inj_cfg *inj_cfg,
+		   struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+	uintptr_t c_lbase = inj_cfg->lmt_base;
+	struct cn20k_sec_sess_priv sess_priv;
+	uint64_t sa_base = inj_cfg->sa_base;
+	uint16_t c_lmt_id, burst, left, i;
+	uintptr_t cptres, rxphdr, dptr;
+	struct rte_mbuf *m, *last;
+	uint64_t sa, w0, gthr_sz;
+	uint8_t lnum, shft, loff;
+	uint64x2_t cmd01, cmd23;
+	uint64_t ucode_cmd[4];
+	rte_iova_t c_io_addr;
+	uint16_t segdw, segs;
+	uint64_t *laddr;
+
+	/* Get LMT base address and LMT ID as lcore id */
+	ROC_LMT_CPT_BASE_ID_GET(c_lbase, c_lmt_id);
+	c_io_addr = inj_cfg->io_addr;
+
+	sa_base &= ~0xFFFFUL;
+	left = nb_pkts;
+again:
+	burst = left > 32 ? 32 : left;
+
+	lnum = 0;
+	loff = 0;
+	shft = 16;
+
+	for (i = 0; i < burst; i++) {
+		m = tx_pkts[i];
+		sess_priv.u64 = sess[i]->fast_mdata;
+		last = rte_pktmbuf_lastseg(m);
+
+		cptres = rte_pktmbuf_mtod_offset(last, uintptr_t, last->data_len);
+		cptres += BIT_ULL(7);
+		cptres = (cptres - 1) & ~(BIT_ULL(7) - 1);
+		segs = m->nb_segs;
+
+		if (segs > 1) {
+			/* Pointer to WQE header */
+			/* Reserve 8 Dwords of WQE Hdr + Rx Parse Hdr */
+			rxphdr = cptres + 8;
+			dptr = rxphdr + 7 * 8;
+			/* Prepare Multiseg SG list */
+			segdw = cn20k_nix_rx_inj_prepare_mseg(m, (uint64_t *)(dptr));
+			*(uint64_t *)rxphdr = (uint64_t)(segdw - 1) << 12;
+			cptres += 64 + segdw * 16;
+			gthr_sz = (segs % 3) == 0 ? (segs / 3) : (segs / 3 + 1);
+			ucode_cmd[1] = dptr | (gthr_sz << 60);
+		} else {
+			dptr = (uint64_t)rte_pktmbuf_iova(m);
+			ucode_cmd[1] = dptr;
+		}
+
+		/* Prepare CPT instruction */
+		/* CPT word 0 and 1 */
+		cmd01 = vdupq_n_u64(0);
+		w0 = inj_cfg->cmd_w0 | ((uint64_t)m->l2_len - 2) << 24 | (uint64_t)m->l2_len << 16;
+		cmd01 = vsetq_lane_u64(w0, cmd01, 0);
+		cmd01 = vsetq_lane_u64(cptres, cmd01, 1);
+
+		/* CPT word 2 and 3 */
+		cmd23 = vdupq_n_u64(0);
+		/* Set PF func */
+		w0 &= 0xFFFF000000000000UL;
+		cmd23 = vsetq_lane_u64(w0, cmd23, 0);
+		cmd23 = vsetq_lane_u64(((uint64_t)m + sizeof(struct rte_mbuf)) | 1, cmd23, 1);
+
+		sa = (uintptr_t)roc_nix_inl_ow_ipsec_inb_sa(sa_base, sess_priv.sa_idx);
+		ucode_cmd[0] = (ROC_IE_OW_MAJOR_OP_PROCESS_INBOUND_IPSEC << 48 | 1UL << 54 |
+				((uint64_t)sess_priv.chksum) << 32 | (1ULL << 34) | m->pkt_len);
+
+		ucode_cmd[2] = 0;
+		ucode_cmd[3] = (ROC_CPT_DFLT_ENG_GRP_SE << 61 | 1UL << 60 | sa);
+
+		/* Move to our line */
+		laddr = LMT_OFF(c_lbase, lnum, loff ? 64 : 0);
+
+		/* Write CPT instruction to lmt line */
+		vst1q_u64(laddr, cmd01);
+		vst1q_u64((laddr + 2), cmd23);
+
+		*(__uint128_t *)(laddr + 4) = *(__uint128_t *)ucode_cmd;
+		*(__uint128_t *)(laddr + 6) = *(__uint128_t *)(ucode_cmd + 2);
+
+		loff = !loff;
+		lnum = lnum + (loff ? 0 : 1);
+		shft = shft + (loff ? 0 : 3);
+	}
+
+	left -= burst;
+	tx_pkts += burst;
+	sess += burst;
+
+	cn20k_nix_sec_steorl(c_io_addr, c_lmt_id, lnum, loff, shft);
+
+	rte_io_wmb();
+	if (left)
+		goto again;
+
+	return nb_pkts;
+}
+
 static __rte_always_inline void
 nix_sec_meta_to_mbuf(uintptr_t inb_sa, uintptr_t cpth, struct rte_mbuf **inner, uint64_t *ol_flags,
 		     const uint16_t flags, uint64x2_t *rearm, uint64_t buf_sz)
@@ -1741,6 +1904,17 @@ cn20k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts, c
 	return 0;
 }

+static __rte_always_inline uint16_t
+cn20k_nix_inj_pkts(struct rte_security_session **sess, struct cnxk_ethdev_inj_cfg *inj_cfg,
+		   struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+	RTE_SET_USED(sess);
+	RTE_SET_USED(inj_cfg);
+	RTE_SET_USED(tx_pkts);
+	RTE_SET_USED(nb_pkts);
+	return 0;
+}
+
 #endif

 #define RSS_F	  NIX_RX_OFFLOAD_RSS_F
--
2.34.1


^ permalink raw reply related	[flat|nested] 27+ messages in thread

* [PATCH v2 3/8] common/cnxk: update platform features
  2026-02-26 13:17 ` [PATCH v2 " Rahul Bhansali
  2026-02-26 13:17   ` [PATCH v2 2/8] net/cnxk: support IPsec Rx inject for cn20k Rahul Bhansali
@ 2026-02-26 13:17   ` Rahul Bhansali
  2026-02-26 13:17   ` [PATCH v2 4/8] common/cnxk: add RQ PB and WQE cache config Rahul Bhansali
                     ` (4 subsequent siblings)
  6 siblings, 0 replies; 27+ messages in thread
From: Rahul Bhansali @ 2026-02-26 13:17 UTC (permalink / raw)
  To: dev, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
	Satha Rao, Harman Kalra
  Cc: jerinj, Rahul Bhansali

Adds cnf20ka platform check for plain packet reassembly
features.

Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
---
Changes in v2: No changes.

 drivers/common/cnxk/roc_features.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/common/cnxk/roc_features.h b/drivers/common/cnxk/roc_features.h
index 3c34041d76..57a51c4db3 100644
--- a/drivers/common/cnxk/roc_features.h
+++ b/drivers/common/cnxk/roc_features.h
@@ -117,7 +117,7 @@ roc_feature_nix_has_inl_profile(void)
 static inline bool
 roc_feature_nix_has_plain_pkt_reassembly(void)
 {
-	return roc_model_is_cn20k();
+	return roc_model_is_cn20k() && !roc_model_is_cnf20ka();
 }

 static inline bool
--
2.34.1


^ permalink raw reply related	[flat|nested] 27+ messages in thread

* [PATCH v2 4/8] common/cnxk: add RQ PB and WQE cache config
  2026-02-26 13:17 ` [PATCH v2 " Rahul Bhansali
  2026-02-26 13:17   ` [PATCH v2 2/8] net/cnxk: support IPsec Rx inject for cn20k Rahul Bhansali
  2026-02-26 13:17   ` [PATCH v2 3/8] common/cnxk: update platform features Rahul Bhansali
@ 2026-02-26 13:17   ` Rahul Bhansali
  2026-02-26 13:17   ` [PATCH v2 5/8] net/cnxk: config RQ PB and WQE caching Rahul Bhansali
                     ` (3 subsequent siblings)
  6 siblings, 0 replies; 27+ messages in thread
From: Rahul Bhansali @ 2026-02-26 13:17 UTC (permalink / raw)
  To: dev, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
	Satha Rao, Harman Kalra
  Cc: jerinj, Rahul Bhansali

Adds RQ's PB (Packet buffer) and WQE cache configuration
options.

Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
---
Changes in v2: No changes.

 drivers/common/cnxk/roc_nix.h       | 14 ++++++++++++++
 drivers/common/cnxk/roc_nix_inl.c   |  2 ++
 drivers/common/cnxk/roc_nix_queue.c | 16 ++++++++--------
 3 files changed, 24 insertions(+), 8 deletions(-)

diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index ffa1a706f9..7bc3e1f5c6 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -34,6 +34,16 @@
 #define ROC_NIX_LSO_FORMAT_IDX_TSOV6 1
 #define ROC_NIX_LSO_FORMAT_IDX_IPV4  2

+#define ROC_NIX_RQ_MAX_PB_CACHING_VAL 3
+
+/* First aligned cache block is allocated into the LLC.
+ * All remaining cache blocks are not allocated.
+ */
+#define ROC_NIX_RQ_DEFAULT_PB_CACHING 2
+
+/* Writes of WQE data are allocated into LLC. */
+#define ROC_NIX_RQ_DEFAULT_WQE_CACHING 1
+
 enum roc_nix_rss_reta_sz {
 	ROC_NIX_RSS_RETA_SZ_64 = 64,
 	ROC_NIX_RSS_RETA_SZ_128 = 128,
@@ -448,6 +458,10 @@ struct roc_nix_rq {
 	bool spb_drop_ena;
 	/* XQE drop enable */
 	bool xqe_drop_ena;
+	/* RQ PB caching */
+	uint8_t pb_caching;
+	/* RQ WQE caching */
+	uint8_t wqe_caching;
 	/* End of Input parameters */
 	struct roc_nix *roc_nix;
 	uint64_t meta_aura_handle;
diff --git a/drivers/common/cnxk/roc_nix_inl.c b/drivers/common/cnxk/roc_nix_inl.c
index a21c40acf1..911c349604 100644
--- a/drivers/common/cnxk/roc_nix_inl.c
+++ b/drivers/common/cnxk/roc_nix_inl.c
@@ -1838,6 +1838,8 @@ roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq, bool enable)
 	inl_rq->spb_ena = rq->spb_ena;
 	inl_rq->spb_aura_handle = rq->spb_aura_handle;
 	inl_rq->spb_size = rq->spb_size;
+	inl_rq->pb_caching = rq->pb_caching;
+	inl_rq->wqe_caching = rq->wqe_caching;

 	if (roc_errata_nix_no_meta_aura()) {
 		uint64_t aura_limit =
diff --git a/drivers/common/cnxk/roc_nix_queue.c b/drivers/common/cnxk/roc_nix_queue.c
index ab3a71ec60..ef9b651022 100644
--- a/drivers/common/cnxk/roc_nix_queue.c
+++ b/drivers/common/cnxk/roc_nix_queue.c
@@ -499,7 +499,7 @@ nix_rq_cn9k_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints,
 		aq->rq.sso_grp = rq->hwgrp;
 		aq->rq.ena_wqwd = 1;
 		aq->rq.wqe_skip = rq->wqe_skip;
-		aq->rq.wqe_caching = 1;
+		aq->rq.wqe_caching = rq->wqe_caching;

 		aq->rq.good_utag = rq->tag_mask >> 24;
 		aq->rq.bad_utag = rq->tag_mask >> 24;
@@ -530,7 +530,7 @@ nix_rq_cn9k_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints,
 	aq->rq.lpb_sizem1 = rq->lpb_size / 8;
 	aq->rq.lpb_sizem1 -= 1; /* Expressed in size minus one */
 	aq->rq.ena = ena;
-	aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */
+	aq->rq.pb_caching = rq->pb_caching;
 	aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */
 	aq->rq.rq_int_ena = 0;
 	/* Many to one reduction */
@@ -616,7 +616,7 @@ nix_rq_cn10k_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cf
 		aq->rq.sso_grp = rq->hwgrp;
 		aq->rq.ena_wqwd = 1;
 		aq->rq.wqe_skip = rq->wqe_skip;
-		aq->rq.wqe_caching = 1;
+		aq->rq.wqe_caching = rq->wqe_caching;

 		aq->rq.xqe_drop_ena = 0;
 		aq->rq.good_utag = rq->tag_mask >> 24;
@@ -647,7 +647,7 @@ nix_rq_cn10k_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cf
 		aq->rq.ipsecd_drop_en = 1;
 		aq->rq.ena_wqwd = 1;
 		aq->rq.wqe_skip = rq->wqe_skip;
-		aq->rq.wqe_caching = 1;
+		aq->rq.wqe_caching = rq->wqe_caching;
 	}

 	aq->rq.lpb_aura = roc_npa_aura_handle_to_aura(rq->aura_handle);
@@ -683,7 +683,7 @@ nix_rq_cn10k_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cf
 		aq->rq.spb_ena = 0;
 	}

-	aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */
+	aq->rq.pb_caching = rq->pb_caching;
 	aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */
 	aq->rq.rq_int_ena = 0;
 	/* Many to one reduction */
@@ -797,7 +797,7 @@ nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg, boo
 		aq->rq.sso_grp = rq->hwgrp;
 		aq->rq.ena_wqwd = 1;
 		aq->rq.wqe_skip = rq->wqe_skip;
-		aq->rq.wqe_caching = 1;
+		aq->rq.wqe_caching = rq->wqe_caching;

 		aq->rq.good_utag = rq->tag_mask >> 24;
 		aq->rq.bad_utag = rq->tag_mask >> 24;
@@ -816,7 +816,7 @@ nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg, boo
 		aq->rq.ipsecd_drop_en = 1;
 		aq->rq.ena_wqwd = 1;
 		aq->rq.wqe_skip = rq->wqe_skip;
-		aq->rq.wqe_caching = 1;
+		aq->rq.wqe_caching = rq->wqe_caching;
 	}

 	aq->rq.lpb_aura = roc_npa_aura_handle_to_aura(rq->aura_handle);
@@ -852,7 +852,7 @@ nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg, boo
 		aq->rq.spb_ena = 0;
 	}

-	aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */
+	aq->rq.pb_caching = rq->pb_caching;
 	aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */
 	aq->rq.rq_int_ena = 0;
 	/* Many to one reduction */
--
2.34.1


^ permalink raw reply related	[flat|nested] 27+ messages in thread

* [PATCH v2 5/8] net/cnxk: config RQ PB and WQE caching
  2026-02-26 13:17 ` [PATCH v2 " Rahul Bhansali
                     ` (2 preceding siblings ...)
  2026-02-26 13:17   ` [PATCH v2 4/8] common/cnxk: add RQ PB and WQE cache config Rahul Bhansali
@ 2026-02-26 13:17   ` Rahul Bhansali
  2026-02-26 13:17   ` [PATCH v2 6/8] net/cnxk: update SA context push size Rahul Bhansali
                     ` (2 subsequent siblings)
  6 siblings, 0 replies; 27+ messages in thread
From: Rahul Bhansali @ 2026-02-26 13:17 UTC (permalink / raw)
  To: dev, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
	Satha Rao, Harman Kalra
  Cc: jerinj, Rahul Bhansali

Config RQ's PB (packet buffer) and WQE caching to default
values.

Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
---
Changes in v2: No changes.

 drivers/net/cnxk/cnxk_eswitch.c | 2 ++
 drivers/net/cnxk/cnxk_ethdev.c  | 2 ++
 2 files changed, 4 insertions(+)

diff --git a/drivers/net/cnxk/cnxk_eswitch.c b/drivers/net/cnxk/cnxk_eswitch.c
index 6b1bfdd476..e45c7dfd07 100644
--- a/drivers/net/cnxk/cnxk_eswitch.c
+++ b/drivers/net/cnxk/cnxk_eswitch.c
@@ -389,6 +389,8 @@ cnxk_eswitch_rxq_setup(struct cnxk_eswitch_dev *eswitch_dev, uint16_t qid, uint1
 	rq->aura_handle = lpb_pool->pool_id;
 	rq->flow_tag_width = 32;
 	rq->sso_ena = false;
+	rq->pb_caching = ROC_NIX_RQ_DEFAULT_PB_CACHING;
+	rq->wqe_caching = ROC_NIX_RQ_DEFAULT_WQE_CACHING;

 	/* Calculate first mbuf skip */
 	first_skip = (sizeof(struct rte_mbuf));
diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index ba8ac52b46..06d1c9b362 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -959,6 +959,8 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
 	rq->aura_handle = lpb_pool->pool_id;
 	rq->flow_tag_width = 32;
 	rq->sso_ena = false;
+	rq->pb_caching = ROC_NIX_RQ_DEFAULT_PB_CACHING;
+	rq->wqe_caching = ROC_NIX_RQ_DEFAULT_WQE_CACHING;

 	/* Calculate first mbuf skip */
 	first_skip = (sizeof(struct rte_mbuf));
--
2.34.1


^ permalink raw reply related	[flat|nested] 27+ messages in thread

* [PATCH v2 6/8] net/cnxk: update SA context push size
  2026-02-26 13:17 ` [PATCH v2 " Rahul Bhansali
                     ` (3 preceding siblings ...)
  2026-02-26 13:17   ` [PATCH v2 5/8] net/cnxk: config RQ PB and WQE caching Rahul Bhansali
@ 2026-02-26 13:17   ` Rahul Bhansali
  2026-02-26 13:17   ` [PATCH v2 7/8] net/cnxk: flow rule update for non-in-place IPsec Rahul Bhansali
  2026-02-26 13:17   ` [PATCH v2 8/8] common/cnxk: enable CPT CQ for inline IPsec inbound Rahul Bhansali
  6 siblings, 0 replies; 27+ messages in thread
From: Rahul Bhansali @ 2026-02-26 13:17 UTC (permalink / raw)
  To: dev, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
	Satha Rao, Harman Kalra
  Cc: jerinj, Rahul Bhansali

Reduce SA context push size to 128 byte for AES_GCM encryption
for CPT performance improvement on CN20k platform.
Also, corrects few macros for cn20k specific.

Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
---
Changes in v2: SA context push size change on security session update APIs also.

 drivers/net/cnxk/cn20k_ethdev_sec.c | 107 +++++++++++++++++++++++++---
 1 file changed, 98 insertions(+), 9 deletions(-)

diff --git a/drivers/net/cnxk/cn20k_ethdev_sec.c b/drivers/net/cnxk/cn20k_ethdev_sec.c
index e406f0e879..eab06be68f 100644
--- a/drivers/net/cnxk/cn20k_ethdev_sec.c
+++ b/drivers/net/cnxk/cn20k_ethdev_sec.c
@@ -655,16 +655,71 @@ outb_dbg_iv_update(struct roc_ow_ipsec_outb_sa *outb_sa, const char *__iv_str)
 	}

 	/* Update source of IV */
-	outb_sa->w2.s.iv_src = ROC_IE_OT_SA_IV_SRC_FROM_SA;
+	outb_sa->w2.s.iv_src = ROC_IE_OW_SA_IV_SRC_FROM_SA;
 	free(iv_str);
 }

+static void
+cn20k_eth_sec_inb_sa_misc_fill(struct roc_ow_ipsec_inb_sa *sa,
+			       struct rte_security_ipsec_xform *ipsec_xfrm)
+{
+	struct roc_ow_ipsec_inb_ctx_update_reg *ctx;
+	size_t offset;
+
+	if (sa->w2.s.enc_type != ROC_IE_SA_ENC_AES_GCM)
+		return;
+
+	/* Update ctx push size for AES GCM */
+	offset = offsetof(struct roc_ow_ipsec_inb_sa, hmac_opad_ipad);
+	ctx = (struct roc_ow_ipsec_inb_ctx_update_reg *)((uint8_t *)sa + offset);
+	sa->w0.s.hw_ctx_off = offset / 8;
+	sa->w0.s.ctx_push_size = sa->w0.s.hw_ctx_off + 1;
+
+	if (ipsec_xfrm->life.bytes_soft_limit)
+		ctx->soft_life = ipsec_xfrm->life.bytes_soft_limit + 1;
+
+	if (ipsec_xfrm->life.packets_soft_limit)
+		ctx->soft_life = ipsec_xfrm->life.packets_soft_limit + 1;
+
+	if (ipsec_xfrm->life.bytes_hard_limit)
+		ctx->hard_life = ipsec_xfrm->life.bytes_hard_limit + 1;
+
+	if (ipsec_xfrm->life.packets_hard_limit)
+		ctx->hard_life = ipsec_xfrm->life.packets_hard_limit + 1;
+}
+
 static int
 cn20k_eth_sec_outb_sa_misc_fill(struct roc_nix *roc_nix, struct roc_ow_ipsec_outb_sa *sa,
 				void *sa_cptr, struct rte_security_ipsec_xform *ipsec_xfrm,
 				uint32_t sa_idx)
 {
+	struct roc_ow_ipsec_outb_ctx_update_reg *ctx;
 	uint64_t *ring_base, ring_addr;
+	size_t offset;
+
+	if (sa->w2.s.enc_type == ROC_IE_SA_ENC_AES_GCM) {
+		offset = offsetof(struct roc_ow_ipsec_outb_sa, hmac_opad_ipad);
+		ctx = (struct roc_ow_ipsec_outb_ctx_update_reg *)((uint8_t *)sa + offset);
+		sa->w0.s.hw_ctx_off = offset / 8;
+		sa->w0.s.ctx_push_size = sa->w0.s.hw_ctx_off + 1;
+
+		if (ipsec_xfrm->esn.value)
+			ctx->esn_val = ipsec_xfrm->esn.value - 1;
+
+		if (ipsec_xfrm->life.bytes_soft_limit)
+			ctx->soft_life = ipsec_xfrm->life.bytes_soft_limit + 1;
+
+		if (ipsec_xfrm->life.packets_soft_limit)
+			ctx->soft_life = ipsec_xfrm->life.packets_soft_limit + 1;
+
+		if (ipsec_xfrm->life.bytes_hard_limit)
+			ctx->hard_life = ipsec_xfrm->life.bytes_hard_limit + 1;
+
+		if (ipsec_xfrm->life.packets_hard_limit)
+			ctx->hard_life = ipsec_xfrm->life.packets_hard_limit + 1;
+	} else {
+		ctx = &sa->ctx;
+	}

 	if (roc_nix_inl_is_cq_ena(roc_nix))
 		goto done;
@@ -675,8 +730,8 @@ cn20k_eth_sec_outb_sa_misc_fill(struct roc_nix *roc_nix, struct roc_ow_ipsec_out
 			return -ENOTSUP;

 		ring_addr = ring_base[sa_idx >> ROC_NIX_SOFT_EXP_ERR_RING_MAX_ENTRY_LOG2];
-		sa->ctx.err_ctl.s.mode = ROC_IE_OT_ERR_CTL_MODE_RING;
-		sa->ctx.err_ctl.s.address = ring_addr >> 3;
+		ctx->err_ctl.s.mode = ROC_IE_OW_ERR_CTL_MODE_RING;
+		ctx->err_ctl.s.address = ring_addr >> 3;
 		sa->w0.s.ctx_id = ((uintptr_t)sa_cptr >> 51) & 0x1ff;
 	}
 done:
@@ -751,7 +806,7 @@ cn20k_eth_sec_session_create(void *device, struct rte_security_session_conf *con
 		uintptr_t sa;

 		PLT_STATIC_ASSERT(sizeof(struct cn20k_inb_priv_data) <
-				  ROC_NIX_INL_OT_IPSEC_INB_SW_RSVD);
+				  ROC_NIX_INL_OW_IPSEC_INB_SW_RSVD);

 		spi_mask = roc_nix_inl_inb_spi_range(nix, inl_dev, NULL, NULL);

@@ -796,6 +851,8 @@ cn20k_eth_sec_session_create(void *device, struct rte_security_session_conf *con
 			goto err;
 		}

+		cn20k_eth_sec_inb_sa_misc_fill(inb_sa_dptr, ipsec);
+
 		inb_priv = roc_nix_inl_ow_ipsec_inb_sa_sw_rsvd(inb_sa);
 		/* Back pointer to get eth_sec */
 		inb_priv->eth_sec = eth_sec;
@@ -856,7 +913,7 @@ cn20k_eth_sec_session_create(void *device, struct rte_security_session_conf *con
 		uint32_t sa_idx;

 		PLT_STATIC_ASSERT(sizeof(struct cn20k_outb_priv_data) <
-				  ROC_NIX_INL_OT_IPSEC_OUTB_SW_RSVD);
+				  ROC_NIX_INL_OW_IPSEC_OUTB_SW_RSVD);

 		/* Alloc an sa index */
 		rc = cnxk_eth_outb_sa_idx_get(dev, &sa_idx, ipsec->spi);
@@ -1065,6 +1122,9 @@ cn20k_eth_sec_session_update(void *device, struct rte_security_session *sess,
 		rc = cnxk_ow_ipsec_inb_sa_fill(inb_sa_dptr, ipsec, crypto, 0);
 		if (rc)
 			return -EINVAL;
+
+		cn20k_eth_sec_inb_sa_misc_fill(inb_sa_dptr, ipsec);
+
 		/* Use cookie for original data */
 		inb_sa_dptr->w1.s.cookie = inb_sa->w1.s.cookie;

@@ -1096,6 +1156,14 @@ cn20k_eth_sec_session_update(void *device, struct rte_security_session *sess,
 		if (rc)
 			return -EINVAL;

+		/* Fill outbound sa misc params */
+		rc = cn20k_eth_sec_outb_sa_misc_fill(&dev->nix, outb_sa_dptr, outb_sa, ipsec,
+						     eth_sec->sa_idx);
+		if (rc) {
+			plt_err("Failed to init outb sa misc params, rc=%d", rc);
+			return rc;
+		}
+
 		/* Save rlen info */
 		cnxk_ipsec_outb_rlens_get(rlens, ipsec, crypto);

@@ -1138,6 +1206,7 @@ cn20k_eth_sec_session_stats_get(void *device, struct rte_security_session *sess,
 	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
 	struct cnxk_eth_sec_sess *eth_sec;
+	size_t offset;
 	int rc;

 	eth_sec = cnxk_eth_sec_sess_get_by_sess(dev, sess);
@@ -1152,11 +1221,31 @@ cn20k_eth_sec_session_stats_get(void *device, struct rte_security_session *sess,
 	stats->protocol = RTE_SECURITY_PROTOCOL_IPSEC;

 	if (eth_sec->inb) {
-		stats->ipsec.ipackets = ((struct roc_ow_ipsec_inb_sa *)eth_sec->sa)->ctx.mib_pkts;
-		stats->ipsec.ibytes = ((struct roc_ow_ipsec_inb_sa *)eth_sec->sa)->ctx.mib_octs;
+		struct roc_ow_ipsec_inb_sa *sa = (struct roc_ow_ipsec_inb_sa *)eth_sec->sa;
+		struct roc_ow_ipsec_inb_ctx_update_reg *ctx;
+
+		if (sa->w2.s.enc_type == ROC_IE_SA_ENC_AES_GCM) {
+			offset = offsetof(struct roc_ow_ipsec_inb_sa, hmac_opad_ipad);
+			ctx = (struct roc_ow_ipsec_inb_ctx_update_reg *)((uint8_t *)sa + offset);
+		} else {
+			ctx = &sa->ctx;
+		}
+
+		stats->ipsec.ipackets = ctx->mib_pkts;
+		stats->ipsec.ibytes = ctx->mib_octs;
 	} else {
-		stats->ipsec.opackets = ((struct roc_ow_ipsec_outb_sa *)eth_sec->sa)->ctx.mib_pkts;
-		stats->ipsec.obytes = ((struct roc_ow_ipsec_outb_sa *)eth_sec->sa)->ctx.mib_octs;
+		struct roc_ow_ipsec_outb_sa *sa = (struct roc_ow_ipsec_outb_sa *)eth_sec->sa;
+		struct roc_ow_ipsec_outb_ctx_update_reg *ctx;
+
+		if (sa->w2.s.enc_type == ROC_IE_SA_ENC_AES_GCM) {
+			offset = offsetof(struct roc_ow_ipsec_outb_sa, hmac_opad_ipad);
+			ctx = (struct roc_ow_ipsec_outb_ctx_update_reg *)((uint8_t *)sa + offset);
+		} else {
+			ctx = &sa->ctx;
+		}
+
+		stats->ipsec.opackets = ctx->mib_pkts;
+		stats->ipsec.obytes = ctx->mib_octs;
 	}

 	return 0;
--
2.34.1


^ permalink raw reply related	[flat|nested] 27+ messages in thread

* [PATCH v2 7/8] net/cnxk: flow rule update for non-in-place IPsec
  2026-02-26 13:17 ` [PATCH v2 " Rahul Bhansali
                     ` (4 preceding siblings ...)
  2026-02-26 13:17   ` [PATCH v2 6/8] net/cnxk: update SA context push size Rahul Bhansali
@ 2026-02-26 13:17   ` Rahul Bhansali
  2026-02-26 13:17   ` [PATCH v2 8/8] common/cnxk: enable CPT CQ for inline IPsec inbound Rahul Bhansali
  6 siblings, 0 replies; 27+ messages in thread
From: Rahul Bhansali @ 2026-02-26 13:17 UTC (permalink / raw)
  To: dev, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
	Satha Rao, Harman Kalra
  Cc: jerinj, Rahul Bhansali

Updates flow rule based of inbound non-inplace (Out-Of-Place)
config of IPsec session.

Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
---
Changes in v2: No changes.

 drivers/net/cnxk/cn9k_flow.c        |  2 +-
 drivers/net/cnxk/cnxk_ethdev.h      |  6 +++---
 drivers/net/cnxk/cnxk_ethdev_sec.c  |  3 +--
 drivers/net/cnxk/cnxk_flow.c        | 31 +++++++++++++++++++++--------
 drivers/net/cnxk/cnxk_flow.h        |  8 ++++++--
 drivers/net/cnxk/cnxk_flow_common.c | 12 ++++++++++-
 drivers/net/cnxk/cnxk_rep_flow.c    |  4 ++--
 drivers/net/cnxk/rte_pmd_cnxk.h     |  1 +
 8 files changed, 48 insertions(+), 19 deletions(-)

diff --git a/drivers/net/cnxk/cn9k_flow.c b/drivers/net/cnxk/cn9k_flow.c
index ae4629ea69..c39564201d 100644
--- a/drivers/net/cnxk/cn9k_flow.c
+++ b/drivers/net/cnxk/cn9k_flow.c
@@ -18,7 +18,7 @@ cn9k_flow_create(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr,
 	int vtag_actions = 0;
 	int mark_actions;

-	flow = cnxk_flow_create_common(eth_dev, attr, pattern, actions, error, false);
+	flow = cnxk_flow_create_common(eth_dev, attr, pattern, actions, error, false, 0);
 	if (!flow)
 		return NULL;

diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index 1b63b02ad8..e3edf39a5c 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -747,9 +747,9 @@ int cnxk_nix_inline_inbound_mode_setup(struct cnxk_eth_dev *dev);

 struct cnxk_eth_sec_sess *cnxk_eth_sec_sess_get_by_sa_idx(struct cnxk_eth_dev *dev,
 							  uint32_t sa_idx, bool inb);
-struct cnxk_eth_sec_sess *
-cnxk_eth_sec_sess_get_by_sess(struct cnxk_eth_dev *dev,
-			      struct rte_security_session *sess);
+struct cnxk_eth_sec_sess *cnxk_eth_sec_sess_get_by_sess(struct cnxk_eth_dev *dev,
+							const struct rte_security_session *sess);
+
 int cnxk_nix_inl_meta_pool_cb(uint64_t *aura_handle, uintptr_t *mpool, uint32_t buf_sz,
 			      uint32_t nb_bufs, bool destroy, const char *mempool_name);
 int cnxk_nix_inl_custom_meta_pool_cb(uintptr_t pmpool, uintptr_t *mpool, const char *mempool_name,
diff --git a/drivers/net/cnxk/cnxk_ethdev_sec.c b/drivers/net/cnxk/cnxk_ethdev_sec.c
index 59a00408ad..abb50d32de 100644
--- a/drivers/net/cnxk/cnxk_ethdev_sec.c
+++ b/drivers/net/cnxk/cnxk_ethdev_sec.c
@@ -287,8 +287,7 @@ cnxk_eth_sec_sess_get_by_sa_idx(struct cnxk_eth_dev *dev, uint32_t sa_idx, bool
 }

 struct cnxk_eth_sec_sess *
-cnxk_eth_sec_sess_get_by_sess(struct cnxk_eth_dev *dev,
-			      struct rte_security_session *sess)
+cnxk_eth_sec_sess_get_by_sess(struct cnxk_eth_dev *dev, const struct rte_security_session *sess)
 {
 	struct cnxk_eth_sec_sess *eth_sec = NULL;

diff --git a/drivers/net/cnxk/cnxk_flow.c b/drivers/net/cnxk/cnxk_flow.c
index 33501310e0..c1c48eb7ab 100644
--- a/drivers/net/cnxk/cnxk_flow.c
+++ b/drivers/net/cnxk/cnxk_flow.c
@@ -465,7 +465,7 @@ cnxk_map_actions(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr,
 		 const struct rte_flow_action actions[], struct roc_npc_action in_actions[],
 		 struct roc_npc_action_sample *in_sample_actions, uint32_t *flowkey_cfg,
 		 uint16_t *dst_pf_func, uint64_t *npc_default_action, uint8_t has_tunnel_pattern,
-		 bool is_rep, uint8_t rep_pattern, uint64_t *free_allocs)
+		 bool is_rep, uint8_t rep_pattern, uint64_t *free_allocs, uint32_t flow_flags)
 {
 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
 	const struct rte_flow_action_queue *act_q = NULL;
@@ -614,6 +614,8 @@ cnxk_map_actions(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr,
 		case RTE_FLOW_ACTION_TYPE_SECURITY:
 			in_actions[i].type = ROC_NPC_ACTION_TYPE_SEC;
 			in_actions[i].conf = actions->conf;
+			in_actions[i].is_non_inp = flow_flags & CNXK_FLOW_NON_INPLACE;
+			in_actions[i].no_sec_action = flow_flags & CNXK_FLOW_NO_SEC_ACTION;
 			break;
 		case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
 			in_actions[i].type = ROC_NPC_ACTION_TYPE_VLAN_STRIP;
@@ -803,7 +805,8 @@ cnxk_map_flow_data(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr
 		   struct roc_npc_attr *in_attr, struct roc_npc_item_info in_pattern[],
 		   struct roc_npc_action in_actions[],
 		   struct roc_npc_action_sample *in_sample_actions, uint32_t *flowkey_cfg,
-		   uint16_t *dst_pf_func, uint64_t *def_action, bool is_rep, uint64_t *free_allocs)
+		   uint16_t *dst_pf_func, uint64_t *def_action, bool is_rep, uint64_t *free_allocs,
+		   uint32_t flow_flags)
 {
 	uint8_t has_tunnel_pattern = 0, rep_pattern = 0;
 	int rc;
@@ -842,14 +845,14 @@ cnxk_map_flow_data(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr

 	return cnxk_map_actions(eth_dev, attr, actions, in_actions, in_sample_actions, flowkey_cfg,
 				dst_pf_func, def_action, has_tunnel_pattern, is_rep, rep_pattern,
-				free_allocs);
+				free_allocs, flow_flags);
 }

 int
 cnxk_flow_validate_common(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr,
 			  const struct rte_flow_item pattern[],
 			  const struct rte_flow_action actions[], struct rte_flow_error *error,
-			  bool is_rep)
+			  bool is_rep, uint32_t flow_flags)
 {
 	struct roc_npc_item_info in_pattern[ROC_NPC_ITEM_TYPE_END + 1];
 	struct roc_npc_action in_actions[ROC_NPC_MAX_ACTION_COUNT];
@@ -891,7 +894,7 @@ cnxk_flow_validate_common(struct rte_eth_dev *eth_dev, const struct rte_flow_att
 	}
 	rc = cnxk_map_flow_data(eth_dev, attr, pattern, actions, &in_attr, in_pattern, in_actions,
 				&in_sample_action, &flowkey_cfg, &dst_pf_func, &npc_default_action,
-				is_rep, free_allocs);
+				is_rep, free_allocs, flow_flags);
 	if (rc) {
 		rte_flow_error_set(error, 0, RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
 				   "Failed to map flow data");
@@ -919,14 +922,26 @@ cnxk_flow_validate(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr
 		   const struct rte_flow_item pattern[], const struct rte_flow_action actions[],
 		   struct rte_flow_error *error)
 {
-	return cnxk_flow_validate_common(eth_dev, attr, pattern, actions, error, false);
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	struct cnxk_eth_sec_sess *eth_sec = NULL;
+	uint32_t flow_flags = 0;
+
+	if (actions[0].type == RTE_FLOW_ACTION_TYPE_SECURITY) {
+		eth_sec = cnxk_eth_sec_sess_get_by_sess(dev, actions[0].conf);
+		if (eth_sec != NULL) {
+			flow_flags = eth_sec->inb_oop ? CNXK_FLOW_NON_INPLACE : 0;
+			flow_flags |= CNXK_FLOW_NO_SEC_ACTION;
+		}
+	}
+
+	return cnxk_flow_validate_common(eth_dev, attr, pattern, actions, error, false, flow_flags);
 }

 struct roc_npc_flow *
 cnxk_flow_create_common(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr,
 			const struct rte_flow_item pattern[],
 			const struct rte_flow_action actions[], struct rte_flow_error *error,
-			bool is_rep)
+			bool is_rep, uint32_t flow_flags)
 {
 	struct roc_npc_item_info in_pattern[ROC_NPC_ITEM_TYPE_END + 1] = {0};
 	struct roc_npc_action in_actions[ROC_NPC_MAX_ACTION_COUNT] = {0};
@@ -962,7 +977,7 @@ cnxk_flow_create_common(struct rte_eth_dev *eth_dev, const struct rte_flow_attr
 	memset(&in_attr, 0, sizeof(struct roc_npc_attr));
 	rc = cnxk_map_flow_data(eth_dev, attr, pattern, actions, &in_attr, in_pattern, in_actions,
 				&in_sample_action, &npc->flowkey_cfg_state, &dst_pf_func,
-				&npc_default_action, is_rep, free_allocs);
+				&npc_default_action, is_rep, free_allocs, flow_flags);
 	if (rc) {
 		rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
 				   "Failed to map flow data");
diff --git a/drivers/net/cnxk/cnxk_flow.h b/drivers/net/cnxk/cnxk_flow.h
index 80b8d2c36a..2986ea81d1 100644
--- a/drivers/net/cnxk/cnxk_flow.h
+++ b/drivers/net/cnxk/cnxk_flow.h
@@ -20,6 +20,9 @@ struct cnxk_rte_flow_action_info {
 	uint16_t conf_size;
 };

+#define CNXK_FLOW_NO_SEC_ACTION BIT(0)
+#define CNXK_FLOW_NON_INPLACE	BIT(1)
+
 extern const struct cnxk_rte_flow_term_info term[];

 int cnxk_flow_destroy(struct rte_eth_dev *dev, struct roc_npc_flow *flow,
@@ -29,11 +32,12 @@ struct roc_npc_flow *cnxk_flow_create_common(struct rte_eth_dev *eth_dev,
 					     const struct rte_flow_attr *attr,
 					     const struct rte_flow_item pattern[],
 					     const struct rte_flow_action actions[],
-					     struct rte_flow_error *error, bool is_rep);
+					     struct rte_flow_error *error, bool is_rep,
+					     uint32_t flow_flags);
 int cnxk_flow_validate_common(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr,
 			      const struct rte_flow_item pattern[],
 			      const struct rte_flow_action actions[], struct rte_flow_error *error,
-			      bool is_rep);
+			      bool is_rep, uint32_t flow_flags);
 int cnxk_flow_destroy_common(struct rte_eth_dev *eth_dev, struct roc_npc_flow *flow,
 			     struct rte_flow_error *error, bool is_rep);
 int cnxk_flow_flush_common(struct rte_eth_dev *eth_dev, struct rte_flow_error *error, bool is_rep);
diff --git a/drivers/net/cnxk/cnxk_flow_common.c b/drivers/net/cnxk/cnxk_flow_common.c
index 59aa920d91..14ac3b5b65 100644
--- a/drivers/net/cnxk/cnxk_flow_common.c
+++ b/drivers/net/cnxk/cnxk_flow_common.c
@@ -122,7 +122,9 @@ cnxk_flow_create(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr,
 	const struct rte_flow_action *action_rss = NULL;
 	const struct rte_flow_action_meter *mtr = NULL;
 	const struct rte_flow_action *act_q = NULL;
+	struct cnxk_eth_sec_sess *eth_sec = NULL;
 	struct roc_npc_flow *flow;
+	uint32_t flow_flags = 0;
 	void *mcs_flow = NULL;
 	uint32_t req_act = 0;
 	int i, rc;
@@ -183,7 +185,15 @@ cnxk_flow_create(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr,
 		return mcs_flow;
 	}

-	flow = cnxk_flow_create_common(eth_dev, attr, pattern, actions, error, false);
+	if (actions[0].type == RTE_FLOW_ACTION_TYPE_SECURITY) {
+		eth_sec = cnxk_eth_sec_sess_get_by_sess(dev, actions[0].conf);
+		if (eth_sec != NULL) {
+			flow_flags = eth_sec->inb_oop ? CNXK_FLOW_NON_INPLACE : 0;
+			flow_flags |= CNXK_FLOW_NO_SEC_ACTION;
+		}
+	}
+
+	flow = cnxk_flow_create_common(eth_dev, attr, pattern, actions, error, false, flow_flags);
 	if (!flow) {
 		if (mtr)
 			nix_mtr_chain_reset(eth_dev, mtr->mtr_id);
diff --git a/drivers/net/cnxk/cnxk_rep_flow.c b/drivers/net/cnxk/cnxk_rep_flow.c
index f1cf81a90c..1b013ce598 100644
--- a/drivers/net/cnxk/cnxk_rep_flow.c
+++ b/drivers/net/cnxk/cnxk_rep_flow.c
@@ -547,7 +547,7 @@ cnxk_rep_flow_create_native(struct rte_eth_dev *eth_dev, const struct rte_flow_a
 	uint16_t new_entry;
 	int rc;

-	flow = cnxk_flow_create_common(eth_dev, attr, pattern, actions, error, true);
+	flow = cnxk_flow_create_common(eth_dev, attr, pattern, actions, error, true, 0);
 	if (!flow) {
 		plt_err("Fail to create flow");
 		goto fail;
@@ -632,7 +632,7 @@ cnxk_rep_flow_validate(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *
 	}

 	if (rep_dev->native_repte)
-		return cnxk_flow_validate_common(eth_dev, attr, pattern, actions, error, true);
+		return cnxk_flow_validate_common(eth_dev, attr, pattern, actions, error, true, 0);

 	rc = process_flow_rule(rep_dev, attr, pattern, actions, &adata, CNXK_REP_MSG_FLOW_VALIDATE);
 	if (!rc || adata.u.sval < 0) {
diff --git a/drivers/net/cnxk/rte_pmd_cnxk.h b/drivers/net/cnxk/rte_pmd_cnxk.h
index b186b529fa..d344137dd5 100644
--- a/drivers/net/cnxk/rte_pmd_cnxk.h
+++ b/drivers/net/cnxk/rte_pmd_cnxk.h
@@ -82,6 +82,7 @@ struct rte_pmd_cnxk_sec_action {
 	 * XOR.
 	 */
 	enum rte_pmd_cnxk_sec_action_alg alg;
+	bool is_non_inp;
 };

 #define RTE_PMD_CNXK_CTX_MAX_CKEY_LEN	   32
--
2.34.1


^ permalink raw reply related	[flat|nested] 27+ messages in thread

* [PATCH v2 8/8] common/cnxk: enable CPT CQ for inline IPsec inbound
  2026-02-26 13:17 ` [PATCH v2 " Rahul Bhansali
                     ` (5 preceding siblings ...)
  2026-02-26 13:17   ` [PATCH v2 7/8] net/cnxk: flow rule update for non-in-place IPsec Rahul Bhansali
@ 2026-02-26 13:17   ` Rahul Bhansali
  6 siblings, 0 replies; 27+ messages in thread
From: Rahul Bhansali @ 2026-02-26 13:17 UTC (permalink / raw)
  To: dev, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
	Satha Rao, Harman Kalra
  Cc: jerinj, Rakesh Kudurumalla

From: Rakesh Kudurumalla <rkudurumalla@marvell.com>

Added support of CPT CQ configurations for inline inbound IPsec.

Signed-off-by: Rakesh Kudurumalla <rkudurumalla@marvell.com>
---
Changes in v2: fix cleanup on configuration failure case.

 drivers/common/cnxk/roc_nix_inl.c         |  8 +-
 drivers/common/cnxk/roc_nix_inl.h         |  3 +-
 drivers/common/cnxk/roc_nix_inl_dev.c     | 90 ++++++++++++++++++++---
 drivers/common/cnxk/roc_nix_inl_dev_irq.c | 19 +++--
 drivers/net/cnxk/cn20k_ethdev_sec.c       | 54 +++++++++-----
 5 files changed, 136 insertions(+), 38 deletions(-)

diff --git a/drivers/common/cnxk/roc_nix_inl.c b/drivers/common/cnxk/roc_nix_inl.c
index 911c349604..26be1adac9 100644
--- a/drivers/common/cnxk/roc_nix_inl.c
+++ b/drivers/common/cnxk/roc_nix_inl.c
@@ -486,6 +486,7 @@ nix_inl_inb_ipsec_sa_tbl_setup(struct roc_nix *roc_nix)
 	} else {
 		struct nix_rx_inl_lf_cfg_req *lf_cfg;
 		uint64_t def_cptq = 0;
+		uint64_t cpt_cq_ena = 0;

 		/* Setup device specific inb SA table */
 		lf_cfg = mbox_alloc_msg_nix_rx_inl_lf_cfg(mbox);
@@ -508,9 +509,10 @@ nix_inl_inb_ipsec_sa_tbl_setup(struct roc_nix *roc_nix)
 		if (res_addr_offset)
 			res_addr_offset |= (1UL << 56);

+		cpt_cq_ena = (uint64_t)inl_dev->cpt_cq_ena << 63;
 		lf_cfg->enable = 1;
 		lf_cfg->profile_id = profile_id; /* IPsec profile is 0th one */
-		lf_cfg->rx_inline_sa_base = (uintptr_t)nix->inb_sa_base[profile_id];
+		lf_cfg->rx_inline_sa_base = (uintptr_t)nix->inb_sa_base[profile_id] | cpt_cq_ena;
 		lf_cfg->rx_inline_cfg0 =
 			((def_cptq << 57) | res_addr_offset | ((uint64_t)SSO_TT_ORDERED << 44) |
 			 (sa_pow2_sz << 16) | lenm1_max);
@@ -588,6 +590,7 @@ nix_inl_reass_inb_sa_tbl_setup(struct roc_nix *roc_nix)
 	uint64_t max_sa = 1, sa_pow2_sz;
 	uint64_t sa_idx_w, lenm1_max;
 	uint64_t res_addr_offset = 0;
+	uint64_t cpt_cq_ena = 0;
 	uint64_t def_cptq = 0;
 	size_t inb_sa_sz = 1;
 	uint8_t profile_id;
@@ -637,9 +640,10 @@ nix_inl_reass_inb_sa_tbl_setup(struct roc_nix *roc_nix)
 			res_addr_offset |= (1UL << 56);
 	}

+	cpt_cq_ena = (uint64_t)inl_dev->cpt_cq_ena << 63;
 	lf_cfg->enable = 1;
 	lf_cfg->profile_id = profile_id;
-	lf_cfg->rx_inline_sa_base = (uintptr_t)nix->inb_sa_base[profile_id];
+	lf_cfg->rx_inline_sa_base = (uintptr_t)nix->inb_sa_base[profile_id] | cpt_cq_ena;
 	lf_cfg->rx_inline_cfg0 =
 		((def_cptq << 57) | res_addr_offset | ((uint64_t)SSO_TT_ORDERED << 44) |
 		 (sa_pow2_sz << 16) | lenm1_max);
diff --git a/drivers/common/cnxk/roc_nix_inl.h b/drivers/common/cnxk/roc_nix_inl.h
index 596f12d1c7..d1a08a4495 100644
--- a/drivers/common/cnxk/roc_nix_inl.h
+++ b/drivers/common/cnxk/roc_nix_inl.h
@@ -44,7 +44,8 @@
 #define ROC_NIX_INL_RXC_QUE_BLK_THR 0x40UL

 enum nix_inl_event_type {
-	NIX_INL_CPT_CQ = 1,
+	NIX_INL_INB_CPT_CQ = 1,
+	NIX_INL_OUTB_CPT_CQ,
 	NIX_INL_SSO,
 	NIX_INL_SOFT_EXPIRY_THRD,
 };
diff --git a/drivers/common/cnxk/roc_nix_inl_dev.c b/drivers/common/cnxk/roc_nix_inl_dev.c
index 35528efa46..246dd4612f 100644
--- a/drivers/common/cnxk/roc_nix_inl_dev.c
+++ b/drivers/common/cnxk/roc_nix_inl_dev.c
@@ -382,6 +382,7 @@ nix_inl_nix_ipsec_cfg(struct nix_inl_dev *inl_dev, bool ena)
 		struct nix_rx_inl_lf_cfg_req *lf_cfg;
 		uint64_t res_addr_offset;
 		uint64_t def_cptq;
+		uint64_t cpt_cq_ena;

 		lf_cfg = mbox_alloc_msg_nix_rx_inl_lf_cfg(mbox);
 		if (lf_cfg == NULL) {
@@ -401,7 +402,9 @@ nix_inl_nix_ipsec_cfg(struct nix_inl_dev *inl_dev, bool ena)
 		lf_cfg->profile_id = inl_dev->ipsec_prof_id;
 		if (ena) {
 			lf_cfg->enable = 1;
-			lf_cfg->rx_inline_sa_base = (uintptr_t)inl_dev->inb_sa_base[profile_id];
+			cpt_cq_ena = (uint64_t)inl_dev->cpt_cq_ena << 63;
+			lf_cfg->rx_inline_sa_base =
+				(uintptr_t)inl_dev->inb_sa_base[profile_id] | (cpt_cq_ena);
 			lf_cfg->rx_inline_cfg0 =
 				((def_cptq << 57) | res_addr_offset |
 				 ((uint64_t)SSO_TT_ORDERED << 44) | (sa_pow2_sz << 16) | lenm1_max);
@@ -482,13 +485,33 @@ nix_inl_cpt_setup(struct nix_inl_dev *inl_dev, bool inl_dev_sso)
 lf_fini:
 	for (i = 0; i < inl_dev->nb_cptlf; i++) {
 		struct roc_cpt_lf *lf = &inl_dev->cpt_lf[i];
-		cpt_lf_fini(lf, lf->cpt_cq_ena);
+		cpt_lf_fini(lf, false);
 	}
 lf_free:
 	rc |= cpt_lfs_free(dev);
 	return rc;
 }

+static int
+nix_inl_cpt_cq_inb_release(struct nix_inl_dev *inl_dev)
+{
+	int i;
+
+	if (!inl_dev || !inl_dev->cpt_cq_ena)
+		return 0;
+	for (i = 0; i < inl_dev->nb_inb_cptlfs; i++) {
+		uint8_t slot_id = inl_dev->inb_cpt_lf_id + i;
+		struct roc_cpt_lf *lf = &inl_dev->cpt_lf[slot_id];
+
+		if (lf->cpt_cq_ena) {
+			cpt_lf_cq_fini(lf);
+			cpt_lf_unregister_irqs(lf, cpt_lf_misc_irq, nix_inl_cpt_done_irq);
+		}
+	}
+
+	return 0;
+}
+
 static int
 nix_inl_cpt_release(struct nix_inl_dev *inl_dev)
 {
@@ -625,6 +648,7 @@ nix_inl_nix_profile_config(struct nix_inl_dev *inl_dev, uint8_t profile_id)
 	uint64_t max_sa, sa_w, sa_pow2_sz, lenm1_max;
 	struct nix_rx_inl_lf_cfg_req *lf_cfg;
 	uint64_t res_addr_offset;
+	uint64_t cpt_cq_ena;
 	uint64_t def_cptq;
 	size_t inb_sa_sz;
 	void *sa;
@@ -665,7 +689,8 @@ nix_inl_nix_profile_config(struct nix_inl_dev *inl_dev, uint8_t profile_id)

 	lf_cfg->enable = 1;
 	lf_cfg->profile_id = profile_id;
-	lf_cfg->rx_inline_sa_base = (uintptr_t)inl_dev->inb_sa_base[profile_id];
+	cpt_cq_ena = (uint64_t)inl_dev->cpt_cq_ena << 63;
+	lf_cfg->rx_inline_sa_base = (uintptr_t)inl_dev->inb_sa_base[profile_id] | cpt_cq_ena;
 	lf_cfg->rx_inline_cfg0 =
 		((def_cptq << 57) | res_addr_offset | ((uint64_t)SSO_TT_ORDERED << 44) |
 		 (sa_pow2_sz << 16) | lenm1_max);
@@ -716,6 +741,42 @@ nix_inl_nix_profile_release(struct nix_inl_dev *inl_dev, uint8_t profile_id)
 	return rc;
 }

+static int
+nix_inl_cpt_cq_inb_setup(struct nix_inl_dev *inl_dev)
+{
+	int i, rc;
+
+	if (!inl_dev->cpt_cq_ena)
+		return 0;
+
+	for (i = 0; i < inl_dev->nb_inb_cptlfs; i++) {
+		uint8_t slot_id = inl_dev->inb_cpt_lf_id + i;
+		struct roc_cpt_lf *lf = &inl_dev->cpt_lf[slot_id];
+
+		lf->dq_ack_ena = true;
+		lf->cpt_cq_ena = true;
+		lf->cq_entry_size = 0;
+		lf->cq_all = 0;
+		lf->cq_size = lf->nb_desc;
+		lf->dev = &inl_dev->dev;
+		lf->cq_head = 1;
+
+		rc = cpt_lf_cq_init(lf);
+		if (rc)
+			return rc;
+
+		rc = cpt_lf_register_irqs(lf, cpt_lf_misc_irq, nix_inl_cpt_done_irq);
+		if (rc) {
+			cpt_lf_cq_fini(lf);
+			return rc;
+		}
+
+		roc_cpt_cq_enable(lf);
+	}
+
+	return 0;
+}
+
 static int
 nix_inl_nix_reass_setup(struct nix_inl_dev *inl_dev)
 {
@@ -1451,11 +1512,17 @@ roc_nix_inl_dev_init(struct roc_nix_inl_dev *roc_inl_dev)
 	if (rc)
 		goto sso_release;

+	if (roc_feature_nix_has_cpt_cq_support()) {
+		rc = nix_inl_cpt_cq_inb_setup(inl_dev);
+		if (rc)
+			goto cpt_release;
+	}
+
 	/* Setup device specific inb SA table */
 	rc = nix_inl_nix_ipsec_cfg(inl_dev, true);
 	if (rc) {
 		plt_err("Failed to setup NIX Inbound SA conf, rc=%d", rc);
-		goto cpt_release;
+		goto cpt_cq_inb_release;
 	}

 	/* Setup Reassembly */
@@ -1464,20 +1531,20 @@ roc_nix_inl_dev_init(struct roc_nix_inl_dev *roc_inl_dev)

 		rc = nix_inl_nix_reass_setup(inl_dev);
 		if (rc)
-			goto cpt_release;
+			goto cpt_cq_inb_release;
 	}

 	if (inl_dev->set_soft_exp_poll) {
 		rc = nix_inl_outb_poll_thread_setup(inl_dev);
 		if (rc)
-			goto cpt_release;
+			goto cpt_cq_inb_release;
 	}

 	/* Perform selftest if asked for */
 	if (inl_dev->selftest) {
 		rc = nix_inl_selftest();
 		if (rc)
-			goto cpt_release;
+			goto cpt_cq_inb_release;
 	}
 	inl_dev->max_ipsec_rules = roc_inl_dev->max_ipsec_rules;

@@ -1486,14 +1553,14 @@ roc_nix_inl_dev_init(struct roc_nix_inl_dev *roc_inl_dev)
 			plt_zmalloc(sizeof(int) * inl_dev->max_ipsec_rules, PLT_CACHE_LINE_SIZE);
 		if (inl_dev->ipsec_index == NULL) {
 			rc = NPC_ERR_NO_MEM;
-			goto cpt_release;
+			goto cpt_cq_inb_release;
 		}
 		rc = npc_mcam_alloc_entries(inl_dev->dev.mbox, inl_dev->max_ipsec_rules,
 					    inl_dev->ipsec_index, inl_dev->max_ipsec_rules,
 					    NPC_MCAM_HIGHER_PRIO, &resp_count, 1);
 		if (rc) {
 			plt_free(inl_dev->ipsec_index);
-			goto cpt_release;
+			goto cpt_cq_inb_release;
 		}

 		start_index = inl_dev->ipsec_index[0];
@@ -1507,6 +1574,8 @@ roc_nix_inl_dev_init(struct roc_nix_inl_dev *roc_inl_dev)
 	idev->nix_inl_dev = inl_dev;

 	return 0;
+cpt_cq_inb_release:
+	rc |= nix_inl_cpt_cq_inb_release(inl_dev);
 cpt_release:
 	rc |= nix_inl_cpt_release(inl_dev);
 sso_release:
@@ -1558,8 +1627,9 @@ roc_nix_inl_dev_fini(struct roc_nix_inl_dev *roc_inl_dev)
 	/* Flush Inbound CTX cache entries */
 	nix_inl_cpt_ctx_cache_sync(inl_dev);

+	rc = nix_inl_cpt_cq_inb_release(inl_dev);
 	/* Release CPT */
-	rc = nix_inl_cpt_release(inl_dev);
+	rc |= nix_inl_cpt_release(inl_dev);

 	/* Release SSO */
 	rc |= nix_inl_sso_release(inl_dev);
diff --git a/drivers/common/cnxk/roc_nix_inl_dev_irq.c b/drivers/common/cnxk/roc_nix_inl_dev_irq.c
index 89155a1f7d..30986e780a 100644
--- a/drivers/common/cnxk/roc_nix_inl_dev_irq.c
+++ b/drivers/common/cnxk/roc_nix_inl_dev_irq.c
@@ -49,10 +49,11 @@ static void
 nix_inl_cpt_cq_cb(struct roc_cpt_lf *lf)
 {
 	struct roc_nix *roc_nix = (struct roc_nix *)lf->dev->roc_nix;
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
 	struct idev_cfg *idev = idev_get_cfg();
 	uint32_t port_id = roc_nix->port_id;
 	struct nix_inl_dev *inl_dev = NULL;
-	struct roc_ow_ipsec_outb_sa *sa;
+	enum nix_inl_event_type cq_type;
 	union cpt_lf_cq_base cq_base;
 	union cpt_lf_cq_ptr cq_ptr;
 	struct cpt_cq_s *cq_s;
@@ -60,6 +61,7 @@ nix_inl_cpt_cq_cb(struct roc_cpt_lf *lf)
 	uint32_t count, head;
 	uint32_t nq_ptr;
 	uint64_t i;
+	void *sa;

 	if (idev)
 		inl_dev = idev->nix_inl_dev;
@@ -75,23 +77,30 @@ nix_inl_cpt_cq_cb(struct roc_cpt_lf *lf)
 	count = cq_ptr.s.count;
 	nq_ptr = cq_ptr.s.nq_ptr;

+	if (lf->dev == &inl_dev->dev)
+		cq_type = NIX_INL_INB_CPT_CQ;
+	else if (lf->dev == &nix->dev)
+		cq_type = NIX_INL_OUTB_CPT_CQ;
+	else
+		return;
+
 	for (i = 0; i < count; i++) {
 		cq_s = (struct cpt_cq_s *)(uintptr_t)(((cq_base.s.addr << 7)) + (head << 5));

 		if (cq_s->w0.s.uc_compcode && cq_s->w0.s.compcode) {
 			switch (cq_s->w2.s.fmt & fmt_msk) {
 			case WQE_PTR_CPTR:
-				sa = (struct roc_ow_ipsec_outb_sa *)cq_s->w1.esn;
+				sa = (void *)cq_s->w1.esn;
 				break;
 			case CPTR_WQE_PTR:
-				sa = (struct roc_ow_ipsec_outb_sa *)cq_s->w3.comp_ptr;
+				sa = (void *)cq_s->w3.comp_ptr;
 				break;
 			default:
 				plt_err("Invalid event Received ");
 				goto done;
 			}
 			uint64_t tmp = ~(uint32_t)0x0;
-			inl_dev->work_cb(&tmp, sa, NIX_INL_CPT_CQ, (void *)cq_s, port_id);
+			inl_dev->work_cb(&tmp, sa, cq_type, (void *)cq_s, port_id);
 		}
 done:
 		head = (head + 1) % lf->cq_size;
@@ -165,7 +174,7 @@ nix_inl_sso_hws_irq(void *param)
 void
 nix_inl_cpt_done_irq(void *param)
 {
-	struct roc_cpt_lf *lf = param;
+	struct roc_cpt_lf *lf = (struct roc_cpt_lf *)param;
 	uint64_t done_wait;
 	uint64_t intr;

diff --git a/drivers/net/cnxk/cn20k_ethdev_sec.c b/drivers/net/cnxk/cn20k_ethdev_sec.c
index eab06be68f..5d0debb81d 100644
--- a/drivers/net/cnxk/cn20k_ethdev_sec.c
+++ b/drivers/net/cnxk/cn20k_ethdev_sec.c
@@ -439,18 +439,31 @@ cnxk_pktmbuf_free_no_cache(struct rte_mbuf *mbuf)
 }

 static void
-cn20k_eth_sec_post_event(struct rte_eth_dev *eth_dev, struct roc_ow_ipsec_outb_sa *sa,
+cn20k_eth_sec_post_event(struct rte_eth_dev *eth_dev, void *sa, enum nix_inl_event_type type,
 			 uint16_t uc_compcode, uint16_t compcode, struct rte_mbuf *mbuf)
 {
 	struct rte_eth_event_ipsec_desc desc;
 	struct cn20k_sec_sess_priv sess_priv;
-	struct cn20k_outb_priv_data *priv;
+	struct cn20k_outb_priv_data *outb_priv;
+	struct cn20k_inb_priv_data *inb_priv;
 	static uint64_t warn_cnt;
+	uint64_t life_unit;

 	memset(&desc, 0, sizeof(desc));
-	priv = roc_nix_inl_ow_ipsec_outb_sa_sw_rsvd(sa);
 	sess_priv.u64 = 0;

+	if (type == NIX_INL_INB_CPT_CQ) {
+		struct roc_ow_ipsec_inb_sa *inb_sa = (struct roc_ow_ipsec_inb_sa *)sa;
+		inb_priv = roc_nix_inl_ow_ipsec_inb_sa_sw_rsvd(sa);
+		desc.metadata = (uint64_t)inb_priv->userdata;
+		life_unit = inb_sa->w2.s.life_unit;
+	} else {
+		struct roc_ow_ipsec_outb_sa *outb_sa = (struct roc_ow_ipsec_outb_sa *)sa;
+		outb_priv = roc_nix_inl_ow_ipsec_outb_sa_sw_rsvd(sa);
+		desc.metadata = (uint64_t)outb_priv->userdata;
+		life_unit = outb_sa->w2.s.life_unit;
+	}
+
 	if (mbuf)
 		sess_priv.u64 = *rte_security_dynfield(mbuf);

@@ -459,14 +472,14 @@ cn20k_eth_sec_post_event(struct rte_eth_dev *eth_dev, struct roc_ow_ipsec_outb_s
 		desc.subtype = RTE_ETH_EVENT_IPSEC_ESN_OVERFLOW;
 		break;
 	case ROC_IE_OW_UCC_ERR_SA_EXPIRED:
-		if (sa->w2.s.life_unit == ROC_IE_OW_SA_LIFE_UNIT_PKTS)
+		if (life_unit == ROC_IE_OW_SA_LIFE_UNIT_PKTS)
 			desc.subtype = RTE_ETH_EVENT_IPSEC_SA_PKT_HARD_EXPIRY;
 		else
 			desc.subtype = RTE_ETH_EVENT_IPSEC_SA_BYTE_HARD_EXPIRY;
 		break;
 	case ROC_IE_OW_UCC_SUCCESS_SA_SOFTEXP_FIRST:
 	case ROC_IE_OW_UCC_SUCCESS_SA_SOFTEXP_AGAIN:
-		if (sa->w2.s.life_unit == ROC_IE_OW_SA_LIFE_UNIT_PKTS)
+		if (life_unit == ROC_IE_OW_SA_LIFE_UNIT_PKTS)
 			desc.subtype = RTE_ETH_EVENT_IPSEC_SA_PKT_EXPIRY;
 		else
 			desc.subtype = RTE_ETH_EVENT_IPSEC_SA_BYTE_EXPIRY;
@@ -490,7 +503,6 @@ cn20k_eth_sec_post_event(struct rte_eth_dev *eth_dev, struct roc_ow_ipsec_outb_s
 		break;
 	}

-	desc.metadata = (uint64_t)priv->userdata;
 	rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_IPSEC, &desc);
 }

@@ -498,12 +510,15 @@ static const char *
 get_inl_event_type(enum nix_inl_event_type type)
 {
 	switch (type) {
-	case NIX_INL_CPT_CQ:
-		return "NIX_INL_CPT_CQ";
+	case NIX_INL_OUTB_CPT_CQ:
+		return "NIX_INL_OUTB_CPT_CQ";
+	case NIX_INL_INB_CPT_CQ:
+		return "NIX_INL_INB_CPT_CQ";
 	case NIX_INL_SSO:
 		return "NIX_INL_SSO";
 	case NIX_INL_SOFT_EXPIRY_THRD:
 		return "NIX_INL_SOFT_EXPIRY_THRD";
+
 	default:
 		return "Unknown event";
 	}
@@ -515,8 +530,8 @@ cn20k_eth_sec_sso_work_cb(uint64_t *gw, void *args, enum nix_inl_event_type type
 {
 	struct rte_eth_event_ipsec_desc desc;
 	struct cn20k_sec_sess_priv sess_priv;
-	struct cn20k_outb_priv_data *priv;
-	struct roc_ow_ipsec_outb_sa *sa;
+	struct cn20k_outb_priv_data *outb_priv;
+	struct roc_ow_ipsec_outb_sa *outb_sa;
 	struct cpt_cn20k_res_s *res;
 	struct rte_eth_dev *eth_dev;
 	struct cnxk_eth_dev *dev;
@@ -546,20 +561,19 @@ cn20k_eth_sec_sso_work_cb(uint64_t *gw, void *args, enum nix_inl_event_type type
 		/* Fall through */
 	default:
 		if (type) {
-			sa = (struct roc_ow_ipsec_outb_sa *)args;
-			priv = roc_nix_inl_ow_ipsec_outb_sa_sw_rsvd(sa);
-			desc.metadata = (uint64_t)priv->userdata;
 			eth_dev = &rte_eth_devices[port_id];
-			if (type == NIX_INL_CPT_CQ) {
-				struct cpt_cq_s *cqs = (struct cpt_cq_s *)cq_s;
-
-				cn20k_eth_sec_post_event(eth_dev, sa,
+			struct cpt_cq_s *cqs = (struct cpt_cq_s *)cq_s;
+			if (type < NIX_INL_SSO) {
+				cn20k_eth_sec_post_event(eth_dev, args, type,
 							 (uint16_t)cqs->w0.s.uc_compcode,
 							 (uint16_t)cqs->w0.s.compcode, NULL);
 				return;
 			}
 			if (type == NIX_INL_SOFT_EXPIRY_THRD) {
-				if (sa->w2.s.life_unit == ROC_IE_OW_SA_LIFE_UNIT_PKTS)
+				outb_sa = (struct roc_ow_ipsec_outb_sa *)args;
+				outb_priv = roc_nix_inl_ow_ipsec_outb_sa_sw_rsvd(outb_sa);
+				desc.metadata = (uint64_t)outb_priv->userdata;
+				if (outb_sa->w2.s.life_unit == ROC_IE_OW_SA_LIFE_UNIT_PKTS)
 					desc.subtype = RTE_ETH_EVENT_IPSEC_SA_PKT_EXPIRY;
 				else
 					desc.subtype = RTE_ETH_EVENT_IPSEC_SA_BYTE_EXPIRY;
@@ -596,9 +610,9 @@ cn20k_eth_sec_sso_work_cb(uint64_t *gw, void *args, enum nix_inl_event_type type
 	sess_priv.u64 = *rte_security_dynfield(mbuf);

 	sa_base = dev->outb.sa_base;
-	sa = roc_nix_inl_ow_ipsec_outb_sa(sa_base, sess_priv.sa_idx);
+	outb_sa = roc_nix_inl_ow_ipsec_outb_sa(sa_base, sess_priv.sa_idx);

-	cn20k_eth_sec_post_event(eth_dev, sa, res->uc_compcode, res->compcode, mbuf);
+	cn20k_eth_sec_post_event(eth_dev, outb_sa, type, res->uc_compcode, res->compcode, mbuf);

 	cnxk_pktmbuf_free_no_cache(mbuf);
 }
--
2.34.1


^ permalink raw reply related	[flat|nested] 27+ messages in thread

* [PATCH v3 1/8] net/cnxk: support of plain packet reassembly
  2026-02-19  9:08 [PATCH 1/8] net/cnxk: support of plain packet reassembly Rahul Bhansali
                   ` (8 preceding siblings ...)
  2026-02-26 13:17 ` [PATCH v2 " Rahul Bhansali
@ 2026-02-27  4:37 ` Rahul Bhansali
  2026-02-27  4:37   ` [PATCH v3 2/8] net/cnxk: support IPsec Rx inject for cn20k Rahul Bhansali
                     ` (6 more replies)
  9 siblings, 7 replies; 27+ messages in thread
From: Rahul Bhansali @ 2026-02-27  4:37 UTC (permalink / raw)
  To: dev, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
	Satha Rao, Harman Kalra
  Cc: jerinj, Rakesh Kudurumalla

From: Rakesh Kudurumalla <rkudurumalla@marvell.com>

Adds support of plain packet reassembly by configuring
UCAST_CPT rule.

Signed-off-by: Rakesh Kudurumalla <rkudurumalla@marvell.com>
---
Changes in v2: Updated doc, fix cleanup on configuration failure cases.
Changes in v3: fix checkpatch error

 doc/guides/nics/cnxk.rst                      |   1 +
 doc/guides/rel_notes/release_26_03.rst        |   1 +
 drivers/common/cnxk/roc_nix_inl.h             |   2 +-
 .../common/cnxk/roc_platform_base_symbols.c   |   1 +
 drivers/net/cnxk/cn20k_ethdev.c               |  94 +++++--
 drivers/net/cnxk/cn20k_rx.h                   |   6 +-
 drivers/net/cnxk/cnxk_ethdev.c                | 233 ++++++++++++++----
 drivers/net/cnxk/cnxk_ethdev.h                |  11 +
 8 files changed, 290 insertions(+), 59 deletions(-)

diff --git a/doc/guides/nics/cnxk.rst b/doc/guides/nics/cnxk.rst
index 4105b101b2..9e758a1b5e 100644
--- a/doc/guides/nics/cnxk.rst
+++ b/doc/guides/nics/cnxk.rst
@@ -40,6 +40,7 @@ Features of the CNXK Ethdev PMD are:
 - Port representors
 - Represented port pattern matching and action
 - Port representor pattern matching and action
+- Plain packet reassembly on CN20K SoC family

 Prerequisites
 -------------
diff --git a/doc/guides/rel_notes/release_26_03.rst b/doc/guides/rel_notes/release_26_03.rst
index b4499ec066..b1f9b3c82b 100644
--- a/doc/guides/rel_notes/release_26_03.rst
+++ b/doc/guides/rel_notes/release_26_03.rst
@@ -80,6 +80,7 @@ New Features
 * **Updated Marvell cnxk net driver.**

   * Added out-of-place support for CN20K SoC.
+  * Added plain packet reassembly support for CN20K SoC.

 * **Updated ZTE zxdh ethernet driver.**

diff --git a/drivers/common/cnxk/roc_nix_inl.h b/drivers/common/cnxk/roc_nix_inl.h
index 68f395438c..596f12d1c7 100644
--- a/drivers/common/cnxk/roc_nix_inl.h
+++ b/drivers/common/cnxk/roc_nix_inl.h
@@ -160,7 +160,7 @@ bool __roc_api roc_nix_inl_inb_is_enabled(struct roc_nix *roc_nix);
 uintptr_t __roc_api roc_nix_inl_inb_sa_base_get(struct roc_nix *roc_nix,
 						bool inl_dev_sa);
 uint16_t roc_nix_inl_inb_ipsec_profile_id_get(struct roc_nix *roc_nix, bool inb_inl_dev);
-uint16_t roc_nix_inl_inb_reass_profile_id_get(struct roc_nix *roc_nix, bool inb_inl_dev);
+uint16_t __roc_api roc_nix_inl_inb_reass_profile_id_get(struct roc_nix *roc_nix, bool inb_inl_dev);
 bool __roc_api roc_nix_inl_inb_rx_inject_enable(struct roc_nix *roc_nix, bool inl_dev_sa);
 uint32_t __roc_api roc_nix_inl_inb_spi_range(struct roc_nix *roc_nix,
 					     bool inl_dev_sa, uint32_t *min,
diff --git a/drivers/common/cnxk/roc_platform_base_symbols.c b/drivers/common/cnxk/roc_platform_base_symbols.c
index 79dd18fbd7..2c73efd877 100644
--- a/drivers/common/cnxk/roc_platform_base_symbols.c
+++ b/drivers/common/cnxk/roc_platform_base_symbols.c
@@ -228,6 +228,7 @@ RTE_EXPORT_INTERNAL_SYMBOL(roc_nix_tm_dump)
 RTE_EXPORT_INTERNAL_SYMBOL(roc_nix_tm_sdp_prepare_tree)
 RTE_EXPORT_INTERNAL_SYMBOL(roc_nix_dump)
 RTE_EXPORT_INTERNAL_SYMBOL(roc_nix_inl_dev_dump)
+RTE_EXPORT_INTERNAL_SYMBOL(roc_nix_inl_inb_reass_profile_id_get)
 RTE_EXPORT_INTERNAL_SYMBOL(roc_nix_inl_outb_cpt_lfs_dump)
 RTE_EXPORT_INTERNAL_SYMBOL(roc_nix_sq_desc_dump)
 RTE_EXPORT_INTERNAL_SYMBOL(roc_nix_fc_config_get)
diff --git a/drivers/net/cnxk/cn20k_ethdev.c b/drivers/net/cnxk/cn20k_ethdev.c
index 7e9e32f80b..4a3d163c75 100644
--- a/drivers/net/cnxk/cn20k_ethdev.c
+++ b/drivers/net/cnxk/cn20k_ethdev.c
@@ -616,22 +616,17 @@ static int
 cn20k_nix_reassembly_capability_get(struct rte_eth_dev *eth_dev,
 				    struct rte_eth_ip_reassembly_params *reassembly_capa)
 {
-	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
-	int rc = -ENOTSUP;
 	RTE_SET_USED(eth_dev);

 	if (!roc_feature_nix_has_reass())
 		return -ENOTSUP;

-	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
-		reassembly_capa->timeout_ms = 60 * 1000;
-		reassembly_capa->max_frags = 4;
-		reassembly_capa->flags =
-			RTE_ETH_DEV_REASSEMBLY_F_IPV4 | RTE_ETH_DEV_REASSEMBLY_F_IPV6;
-		rc = 0;
-	}
+	reassembly_capa->timeout_ms = 60 * 1000;
+	reassembly_capa->max_frags = 8;
+	reassembly_capa->flags =
+		RTE_ETH_DEV_REASSEMBLY_F_IPV4 | RTE_ETH_DEV_REASSEMBLY_F_IPV6;

-	return rc;
+	return 0;
 }

 static int
@@ -649,7 +644,10 @@ cn20k_nix_reassembly_conf_set(struct rte_eth_dev *eth_dev,
 {
 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
 	struct roc_cpt_rxc_time_cfg rxc_time_cfg = {0};
-	int rc = 0;
+	uint16_t nb_rxq = dev->nb_rxq;
+	int rc = 0, i, rxq_cnt = 0;
+	struct cn20k_eth_rxq *rxq;
+	struct roc_nix_rq *rq;

 	if (!roc_feature_nix_has_reass())
 		return -ENOTSUP;
@@ -659,15 +657,83 @@ cn20k_nix_reassembly_conf_set(struct rte_eth_dev *eth_dev,
 		if (!dev->inb.nb_oop)
 			dev->rx_offload_flags &= ~NIX_RX_REAS_F;
 		dev->inb.reass_en = false;
+		if (dev->ip_reass_en) {
+			cnxk_nix_ip_reass_rule_clr(eth_dev);
+			dev->ip_reass_en = false;
+		}
 		return 0;
 	}

+	if (!(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)) {
+		rc = cnxk_nix_inline_inbound_setup(dev);
+		if (rc) {
+			plt_err("Nix inline inbound setup failed rc=%d", rc);
+			goto done;
+		}
+
+		rc = cnxk_nix_inline_inbound_mode_setup(dev);
+		if (rc) {
+			plt_err("Nix inline inbound mode setup failed rc=%d", rc);
+			goto cleanup;
+		}
+
+		for (i = 0; i < nb_rxq; i++) {
+			rq = &dev->rqs[i];
+			rxq = eth_dev->data->rx_queues[i];
+
+			if (!rxq) {
+				plt_err("Receive queue = %d not enabled", i);
+				rc = -EINVAL;
+				goto cleanup;
+			}
+
+			roc_nix_inl_dev_xaq_realloc(rq->aura_handle);
+
+			rq->tag_mask = 0x0FF00000 | ((uint32_t)RTE_EVENT_TYPE_ETHDEV << 28);
+			rc = roc_nix_inl_dev_rq_get(rq, !!eth_dev->data->dev_started);
+			if (rc)
+				goto cleanup;
+
+			rxq->lmt_base = dev->nix.lmt_base;
+			rxq->sa_base = roc_nix_inl_inb_sa_base_get(&dev->nix, dev->inb.inl_dev);
+			rc = roc_npa_buf_type_update(rq->aura_handle,
+						     ROC_NPA_BUF_TYPE_PACKET_IPSEC, 1);
+			if (rc)
+				goto cleanup;
+
+			rxq_cnt = i + 1;
+		}
+	}
+
 	rc = roc_nix_reassembly_configure(&rxc_time_cfg, conf->timeout_ms);
-	if (!rc && dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
-		dev->rx_offload_flags |= NIX_RX_REAS_F;
-		dev->inb.reass_en = true;
+	if (rc) {
+		plt_err("Nix reassembly_configure failed rc=%d", rc);
+		goto cleanup;
 	}

+	dev->rx_offload_flags |= NIX_RX_REAS_F | NIX_RX_OFFLOAD_SECURITY_F;
+	dev->inb.reass_en = !!((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY));
+
+	if (!dev->ip_reass_en) {
+		rc = cnxk_nix_ip_reass_rule_set(eth_dev, 0);
+		if (rc) {
+			plt_err("Nix reassembly rule setup failed rc=%d", rc);
+			goto cleanup;
+		}
+	}
+
+	return 0;
+cleanup:
+	dev->inb.reass_en = false;
+	if (!(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)) {
+		rc |= cnxk_nix_inl_inb_fini(dev);
+		for (i = 0; i < rxq_cnt; i++) {
+			struct roc_nix_rq *rq = &dev->rqs[i];
+
+			roc_nix_inl_dev_rq_put(rq);
+		}
+	}
+done:
 	return rc;
 }

diff --git a/drivers/net/cnxk/cn20k_rx.h b/drivers/net/cnxk/cn20k_rx.h
index 83c222c53c..d6c217cdf5 100644
--- a/drivers/net/cnxk/cn20k_rx.h
+++ b/drivers/net/cnxk/cn20k_rx.h
@@ -258,7 +258,8 @@ nix_sec_meta_to_mbuf_sc(uint64_t cq_w5, uint64_t cpth, const uint64_t sa_base,
 			*rte_security_dynfield(mbuf) = (uint64_t)inb_priv->userdata;
 	} else {
 		/* Update dynamic field with userdata */
-		*rte_security_dynfield(mbuf) = (uint64_t)inb_priv->userdata;
+		if (flags & NIX_RX_REAS_F && inb_priv->userdata)
+			*rte_security_dynfield(mbuf) = (uint64_t)inb_priv->userdata;
 	}

 	*len = ((w3 >> 48) & 0xFFFF) + ((cq_w5 >> 16) & 0xFF) - (cq_w5 & 0xFF);
@@ -917,7 +918,8 @@ nix_sec_meta_to_mbuf(uintptr_t inb_sa, uintptr_t cpth, struct rte_mbuf **inner,
 		/* Get SPI from CPT_PARSE_S's cookie(already swapped) */
 		inb_priv = roc_nix_inl_ot_ipsec_inb_sa_sw_rsvd((void *)inb_sa);
 		/* Update dynamic field with userdata */
-		*rte_security_dynfield(inner_m) = (uint64_t)inb_priv->userdata;
+		if (flags & NIX_RX_REAS_F && inb_priv->userdata)
+			*rte_security_dynfield(inner_m) = (uint64_t)inb_priv->userdata;
 	}

 	/* Clear and update original lower 16 bit of data offset */
diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index ff78622b58..ba8ac52b46 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -7,6 +7,11 @@
 #include <eal_export.h>
 #include <rte_eventdev.h>
 #include <rte_pmd_cnxk.h>
+#include "roc_priv.h"
+
+#define REASS_PRIORITY             0
+#define CLS_LTYPE_OFFSET_START     7
+#define CLS_LFLAGS_LC_OFFSET (CLS_LTYPE_OFFSET_START + 4)

 static const uint32_t cnxk_mac_modes[CGX_MODE_MAX + 1] = {
 	[CGX_MODE_SGMII] = RTE_ETH_LINK_SPEED_1G,
@@ -203,46 +208,160 @@ cnxk_nix_inb_mode_set(struct cnxk_eth_dev *dev, bool use_inl_dev)
 	return cnxk_nix_lookup_mem_sa_base_set(dev);
 }

-static int
-nix_security_setup(struct cnxk_eth_dev *dev)
+int
+cnxk_nix_inline_inbound_mode_setup(struct cnxk_eth_dev *dev)
+{
+	int rc = 0;
+
+	/* By default pick using inline device for poll mode.
+	 * Will be overridden when event mode rq's are setup.
+	 */
+	cnxk_nix_inb_mode_set(dev, !dev->inb.no_inl_dev);
+
+	/* Allocate memory to be used as dptr for CPT ucode
+	 * WRITE_SA op.
+	 */
+	dev->inb.sa_dptr =
+		plt_zmalloc(ROC_NIX_INL_OT_IPSEC_INB_HW_SZ, 0);
+	if (!dev->inb.sa_dptr) {
+		plt_err("Couldn't allocate memory for SA dptr");
+		rc = -ENOMEM;
+		goto cleanup;
+	}
+	dev->inb.inl_dev_q = roc_nix_inl_dev_qptr_get(0);
+cleanup:
+	return rc;
+}
+
+static void
+cnxk_flow_ipfrag_set(struct roc_npc_flow *flow, struct roc_npc *npc)
+{
+	uint8_t lc_offset;
+	uint64_t mask;
+
+	lc_offset = rte_popcount64(npc->rx_parse_nibble & ((1ULL << CLS_LFLAGS_LC_OFFSET) - 1));
+
+	lc_offset *= 4;
+
+	mask = (~(0xffULL << lc_offset));
+	flow->mcam_data[0] &= mask;
+	flow->mcam_mask[0] &= mask;
+	flow->mcam_data[0] |= (0x02ULL << lc_offset);
+	flow->mcam_mask[0] |= (0x82ULL << lc_offset);
+}
+
+int
+cnxk_nix_ip_reass_rule_set(struct rte_eth_dev *eth_dev, uint32_t rq)
+{
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	struct nix_rx_action2_s *action2;
+	struct nix_rx_action_s *action;
+	struct roc_npc_flow mcam;
+	int prio = 0, rc = 0;
+	struct roc_npc *npc;
+	int resp_count = 0;
+	bool inl_dev;
+
+	npc = &dev->npc;
+	inl_dev = roc_nix_inb_is_with_inl_dev(&dev->nix);
+
+	prio = REASS_PRIORITY;
+	memset(&mcam, 0, sizeof(struct roc_npc_flow));
+
+	action = (struct nix_rx_action_s *)&mcam.npc_action;
+	action2 = (struct nix_rx_action2_s *)&mcam.npc_action2;
+
+	if (inl_dev) {
+		struct roc_nix_rq *inl_rq;
+
+		inl_rq = roc_nix_inl_dev_rq(&dev->nix);
+		if (!inl_rq) {
+			plt_err("Failed to get inline dev rq for %d", dev->nix.port_id);
+			goto mcam_alloc_failed;
+		}
+		action->pf_func = roc_idev_nix_inl_dev_pffunc_get();
+		action->index = inl_rq->qid;
+	} else {
+		action->pf_func = npc->pf_func;
+		action->index = rq;
+	}
+	action->op = NIX_RX_ACTIONOP_UCAST_CPT;
+
+	action2->inline_profile_id = roc_nix_inl_inb_reass_profile_id_get(npc->roc_nix, inl_dev);
+
+	rc = roc_npc_mcam_merge_base_steering_rule(npc, &mcam);
+	if (rc < 0)
+		goto mcam_alloc_failed;
+
+	/* Channel[11] should be 'b0 */
+	mcam.mcam_data[0] &= (~0xfffULL);
+	mcam.mcam_mask[0] &= (~0xfffULL);
+	mcam.mcam_data[0] |= (uint64_t)(npc->channel & 0x7ff);
+	mcam.mcam_mask[0] |= (BIT_ULL(12) - 1);
+	cnxk_flow_ipfrag_set(&mcam, npc);
+
+	mcam.priority = prio;
+	mcam.key_type = roc_npc_get_key_type(npc, &mcam);
+	rc = roc_npc_mcam_alloc_entry(npc, &mcam, NULL, prio, &resp_count);
+	if (rc || resp_count == 0)
+		goto mcam_alloc_failed;
+
+	mcam.enable = true;
+	rc = roc_npc_mcam_write_entry(npc, &mcam);
+	if (rc < 0)
+		goto mcam_write_failed;
+
+	dev->ip_reass_rule_id = mcam.mcam_id;
+	dev->ip_reass_en = true;
+	return 0;
+
+mcam_write_failed:
+	rc |= roc_npc_mcam_free(npc, &mcam);
+	if (rc)
+		return rc;
+mcam_alloc_failed:
+	return -EIO;
+}
+
+int
+cnxk_nix_inline_inbound_setup(struct cnxk_eth_dev *dev)
 {
 	struct roc_nix *nix = &dev->nix;
-	int i, rc = 0;
+	int rc = 0;

-	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
-		/* Setup minimum SA table when inline device is used */
-		nix->ipsec_in_min_spi = dev->inb.no_inl_dev ? dev->inb.min_spi : 0;
-		nix->ipsec_in_max_spi = dev->inb.no_inl_dev ? dev->inb.max_spi : 1;
+	/* Setup minimum SA table when inline device is used */
+	nix->ipsec_in_min_spi = dev->inb.no_inl_dev ? dev->inb.min_spi : 0;
+	nix->ipsec_in_max_spi = dev->inb.no_inl_dev ? dev->inb.max_spi : 1;

-		/* Enable custom meta aura when multi-chan is used */
-		if (nix->local_meta_aura_ena && roc_nix_inl_dev_is_multi_channel() &&
-		    !dev->inb.custom_meta_aura_dis)
-			nix->custom_meta_aura_ena = true;
+	/* Enable custom meta aura when multi-chan is used */
+	if (nix->local_meta_aura_ena && roc_nix_inl_dev_is_multi_channel() &&
+	    !dev->inb.custom_meta_aura_dis)
+		nix->custom_meta_aura_ena = true;

-		/* Setup Inline Inbound */
-		rc = roc_nix_inl_inb_init(nix);
-		if (rc) {
-			plt_err("Failed to initialize nix inline inb, rc=%d",
+	/* Setup Inline Inbound */
+	rc = roc_nix_inl_inb_init(nix);
+	if (rc) {
+		plt_err("Failed to initialize nix inline inb, rc=%d",
 				rc);
-			return rc;
-		}
+		return rc;
+	}

-		/* By default pick using inline device for poll mode.
-		 * Will be overridden when event mode rq's are setup.
-		 */
-		cnxk_nix_inb_mode_set(dev, !dev->inb.no_inl_dev);
+	return 0;
+}

-		/* Allocate memory to be used as dptr for CPT ucode
-		 * WRITE_SA op.
-		 */
-		dev->inb.sa_dptr =
-			plt_zmalloc(ROC_NIX_INL_OT_IPSEC_INB_HW_SZ, 0);
-		if (!dev->inb.sa_dptr) {
-			plt_err("Couldn't allocate memory for SA dptr");
-			rc = -ENOMEM;
+static int
+nix_security_setup(struct cnxk_eth_dev *dev)
+{
+	struct roc_nix *nix = &dev->nix;
+	int i, rc = 0;
+
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
+		rc = cnxk_nix_inline_inbound_setup(dev);
+		if (rc)
+			return rc;
+		rc = cnxk_nix_inline_inbound_mode_setup(dev);
+		if (rc)
 			goto cleanup;
-		}
-		dev->inb.inl_dev_q = roc_nix_inl_dev_qptr_get(0);
 	}

 	if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY ||
@@ -365,6 +484,22 @@ nix_meter_fini(struct cnxk_eth_dev *dev)
 	return 0;
 }

+int
+cnxk_nix_inl_inb_fini(struct cnxk_eth_dev *dev)
+{
+	struct roc_nix *nix = &dev->nix;
+	int rc;
+
+	if (dev->inb.sa_dptr) {
+		plt_free(dev->inb.sa_dptr);
+		dev->inb.sa_dptr = NULL;
+	}
+	rc = roc_nix_inl_inb_fini(nix);
+	if (rc)
+		plt_err("Failed to cleanup nix inline inb, rc=%d", rc);
+	return rc;
+}
+
 static int
 nix_security_release(struct cnxk_eth_dev *dev)
 {
@@ -374,7 +509,7 @@ nix_security_release(struct cnxk_eth_dev *dev)
 	int rc, ret = 0;

 	/* Cleanup Inline inbound */
-	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY || dev->ip_reass_en) {
 		/* Destroy inbound sessions */
 		tvar = NULL;
 		RTE_TAILQ_FOREACH_SAFE(eth_sec, &dev->inb.list, entry, tvar)
@@ -384,17 +519,14 @@ nix_security_release(struct cnxk_eth_dev *dev)
 		/* Clear lookup mem */
 		cnxk_nix_lookup_mem_sa_base_clear(dev);

-		rc = roc_nix_inl_inb_fini(nix);
-		if (rc)
-			plt_err("Failed to cleanup nix inline inb, rc=%d", rc);
-		ret |= rc;
+		ret |= cnxk_nix_inl_inb_fini(dev);

 		cnxk_nix_lookup_mem_metapool_clear(dev);
+	}

-		if (dev->inb.sa_dptr) {
-			plt_free(dev->inb.sa_dptr);
-			dev->inb.sa_dptr = NULL;
-		}
+	if (dev->ip_reass_en) {
+		cnxk_nix_ip_reass_rule_clr(eth_dev);
+		dev->ip_reass_en = false;
 	}

 	/* Cleanup Inline outbound */
@@ -946,7 +1078,7 @@ cnxk_nix_rx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
 	plt_nix_dbg("Releasing rxq %u", qid);

 	/* Release rq reference for inline dev if present */
-	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY || dev->ip_reass_en)
 		roc_nix_inl_dev_rq_put(rq);

 	/* Cleanup ROC RQ */
@@ -1760,6 +1892,18 @@ cnxk_nix_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qid)
 	return rc;
 }

+int
+cnxk_nix_ip_reass_rule_clr(struct rte_eth_dev *eth_dev)
+{
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	struct roc_npc *npc = &dev->npc;
+
+	if (dev->ip_reass_en)
+		return roc_npc_mcam_free_entry(npc, dev->ip_reass_rule_id);
+	else
+		return 0;
+}
+
 static int
 cnxk_nix_dev_stop(struct rte_eth_dev *eth_dev)
 {
@@ -1842,7 +1986,7 @@ cnxk_nix_dev_start(struct rte_eth_dev *eth_dev)
 			return rc;
 	}

-	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
+	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) || dev->ip_reass_en) {
 		rc = roc_nix_inl_rq_ena_dis(&dev->nix, true);
 		if (rc) {
 			plt_err("Failed to enable Inline device RQ, rc=%d", rc);
@@ -2258,6 +2402,11 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset)
 	/* Disable and free rte_meter entries */
 	nix_meter_fini(dev);

+	if (dev->ip_reass_en) {
+		cnxk_nix_ip_reass_rule_clr(eth_dev);
+		dev->ip_reass_en = false;
+	}
+
 	/* Disable and free rte_flow entries */
 	roc_npc_fini(&dev->npc);

diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index 3d0a587406..dbac8cdc1a 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -427,6 +427,8 @@ struct cnxk_eth_dev {
 	/* Reassembly dynfield/flag offsets */
 	int reass_dynfield_off;
 	int reass_dynflag_bit;
+	uint32_t ip_reass_rule_id;
+	bool ip_reass_en;

 	/* MCS device */
 	struct cnxk_mcs_dev *mcs_dev;
@@ -645,6 +647,10 @@ int cnxk_nix_tm_set_queue_rate_limit(struct rte_eth_dev *eth_dev,
 int cnxk_nix_tm_mark_vlan_dei(struct rte_eth_dev *eth_dev, int mark_green,
 			      int mark_yellow, int mark_red,
 			      struct rte_tm_error *error);
+int cnxk_nix_ip_reass_rule_clr(struct rte_eth_dev *eth_dev);
+int cnxk_nix_ip_reass_rule_set(struct rte_eth_dev *eth_dev, uint32_t rq);
+int cnxk_nix_inl_inb_fini(struct cnxk_eth_dev *dev);
+
 int cnxk_nix_tm_mark_ip_ecn(struct rte_eth_dev *eth_dev, int mark_green,
 			    int mark_yellow, int mark_red,
 			    struct rte_tm_error *error);
@@ -732,11 +738,16 @@ int cnxk_nix_lookup_mem_metapool_set(struct cnxk_eth_dev *dev);
 int cnxk_nix_lookup_mem_metapool_clear(struct cnxk_eth_dev *dev);
 int cnxk_nix_lookup_mem_bufsize_set(struct cnxk_eth_dev *dev, uint64_t size);
 int cnxk_nix_lookup_mem_bufsize_clear(struct cnxk_eth_dev *dev);
+
 __rte_internal
 int cnxk_nix_inb_mode_set(struct cnxk_eth_dev *dev, bool use_inl_dev);
+
 __rte_internal
 void cnxk_ethdev_rx_offload_cb_register(cnxk_ethdev_rx_offload_cb_t cb);

+int cnxk_nix_inline_inbound_setup(struct cnxk_eth_dev *dev);
+int cnxk_nix_inline_inbound_mode_setup(struct cnxk_eth_dev *dev);
+
 struct cnxk_eth_sec_sess *cnxk_eth_sec_sess_get_by_sa_idx(struct cnxk_eth_dev *dev,
 							  uint32_t sa_idx, bool inb);
 struct cnxk_eth_sec_sess *
--
2.34.1


^ permalink raw reply related	[flat|nested] 27+ messages in thread

* [PATCH v3 2/8] net/cnxk: support IPsec Rx inject for cn20k
  2026-02-27  4:37 ` [PATCH v3 1/8] net/cnxk: support of plain packet reassembly Rahul Bhansali
@ 2026-02-27  4:37   ` Rahul Bhansali
  2026-02-27  4:37   ` [PATCH v3 3/8] common/cnxk: update platform features Rahul Bhansali
                     ` (5 subsequent siblings)
  6 siblings, 0 replies; 27+ messages in thread
From: Rahul Bhansali @ 2026-02-27  4:37 UTC (permalink / raw)
  To: dev, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
	Satha Rao, Harman Kalra
  Cc: jerinj, Rahul Bhansali

Adds support of IPsec Rx inject handling for cn20k platform.

Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
---
Changes in v2: fix release notes.
Changes in v3: no change.

 doc/guides/rel_notes/release_26_03.rst |   1 +
 drivers/net/cnxk/cn20k_ethdev_sec.c    |  50 +++++++
 drivers/net/cnxk/cn20k_rx.h            | 174 +++++++++++++++++++++++++
 3 files changed, 225 insertions(+)

diff --git a/doc/guides/rel_notes/release_26_03.rst b/doc/guides/rel_notes/release_26_03.rst
index b1f9b3c82b..62246881d1 100644
--- a/doc/guides/rel_notes/release_26_03.rst
+++ b/doc/guides/rel_notes/release_26_03.rst
@@ -81,6 +81,7 @@ New Features

   * Added out-of-place support for CN20K SoC.
   * Added plain packet reassembly support for CN20K SoC.
+  * Added IPsec Rx inject support for CN20K SoC.

 * **Updated ZTE zxdh ethernet driver.**

diff --git a/drivers/net/cnxk/cn20k_ethdev_sec.c b/drivers/net/cnxk/cn20k_ethdev_sec.c
index c6a51f99f5..e406f0e879 100644
--- a/drivers/net/cnxk/cn20k_ethdev_sec.c
+++ b/drivers/net/cnxk/cn20k_ethdev_sec.c
@@ -1172,6 +1172,54 @@ eth_sec_caps_add(struct rte_security_capability eth_sec_caps[], uint32_t *idx,
 	*idx += nb_caps;
 }

+static uint16_t __rte_hot
+cn20k_eth_sec_inb_rx_inject(void *device, struct rte_mbuf **pkts,
+			    struct rte_security_session **sess, uint16_t nb_pkts)
+{
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+
+	return cn20k_nix_inj_pkts(sess, &dev->inj_cfg, pkts, nb_pkts);
+}
+
+static int
+cn20k_eth_sec_rx_inject_config(void *device, uint16_t port_id, bool enable)
+{
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	uint64_t channel, pf_func, inj_match_id = 0xFFFFUL;
+	struct cnxk_ethdev_inj_cfg *inj_cfg;
+	struct roc_nix *nix = &dev->nix;
+	struct roc_cpt_lf *inl_lf;
+	uint64_t sa_base;
+
+	if (!rte_eth_dev_is_valid_port(port_id))
+		return -EINVAL;
+
+	if (eth_dev->data->dev_started || !eth_dev->data->dev_configured)
+		return -EBUSY;
+
+	if (!roc_nix_inl_inb_rx_inject_enable(nix, dev->inb.inl_dev))
+		return -ENOTSUP;
+
+	roc_idev_nix_rx_inject_set(port_id, enable);
+
+	inl_lf = roc_nix_inl_inb_inj_lf_get(nix);
+	if (!inl_lf)
+		return -ENOTSUP;
+	sa_base = roc_nix_inl_inb_sa_base_get(nix, dev->inb.inl_dev);
+
+	inj_cfg = &dev->inj_cfg;
+	inj_cfg->sa_base = sa_base | eth_dev->data->port_id;
+	inj_cfg->io_addr = inl_lf->io_addr;
+	inj_cfg->lmt_base = nix->lmt_base;
+	channel = roc_nix_get_base_chan(nix);
+	pf_func = roc_idev_nix_inl_dev_pffunc_get();
+	inj_cfg->cmd_w0 = pf_func << 48 | inj_match_id << 32 | channel << 4;
+
+	return 0;
+}
+
 #define CPT_LMTST_BURST 32
 static uint16_t
 cn20k_inl_dev_submit(struct roc_nix_inl_dev_q *q, void *inst, uint16_t nb_inst)
@@ -1233,6 +1281,8 @@ cn20k_eth_sec_ops_override(void)
 	cnxk_eth_sec_ops.capabilities_get = cn20k_eth_sec_capabilities_get;
 	cnxk_eth_sec_ops.session_update = cn20k_eth_sec_session_update;
 	cnxk_eth_sec_ops.session_stats_get = cn20k_eth_sec_session_stats_get;
+	cnxk_eth_sec_ops.rx_inject_configure = cn20k_eth_sec_rx_inject_config;
+	cnxk_eth_sec_ops.inb_pkt_rx_inject = cn20k_eth_sec_inb_rx_inject;

 	/* Update platform specific rte_pmd_cnxk ops */
 	cnxk_pmd_ops.inl_dev_submit = cn20k_inl_dev_submit;
diff --git a/drivers/net/cnxk/cn20k_rx.h b/drivers/net/cnxk/cn20k_rx.h
index d6c217cdf5..f8fa6de2b9 100644
--- a/drivers/net/cnxk/cn20k_rx.h
+++ b/drivers/net/cnxk/cn20k_rx.h
@@ -890,6 +890,169 @@ cn20k_nix_flush_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pk

 #if defined(RTE_ARCH_ARM64)

+static __rte_always_inline uint16_t
+cn20k_nix_rx_inj_prepare_mseg(struct rte_mbuf *m, uint64_t *cmd)
+{
+	union nix_send_sg_s *sg, l_sg;
+	struct rte_mbuf *m_next;
+	uint16_t segdw, nb_segs;
+	uint64_t len, dlen;
+	uint64_t *slist;
+
+	sg = (union nix_send_sg_s *)cmd;
+	l_sg.u = sg->u;
+	l_sg.u &= 0xC00000000000000; /* LD_TYPE */
+	l_sg.subdc = NIX_SUBDC_SG;
+	nb_segs = m->nb_segs;
+	len = m->pkt_len;
+	slist = &cmd[1];
+
+	/* Fill mbuf segments */
+	do {
+		*slist = rte_pktmbuf_iova(m);
+		dlen = m->data_len;
+		len -= dlen;
+
+		/* Set the segment length */
+		l_sg.u |= ((uint64_t)dlen << (l_sg.segs << 4));
+		l_sg.segs += 1;
+		slist++;
+		nb_segs--;
+		if (l_sg.segs > 2 && nb_segs) {
+			sg->u = l_sg.u;
+			/* Next SG subdesc */
+			sg = (union nix_send_sg_s *)slist;
+			l_sg.u = sg->u;
+			l_sg.u &= 0xC00000000000000; /* LD_TYPE */
+			l_sg.subdc = NIX_SUBDC_SG;
+			slist++;
+		}
+		m_next = m->next;
+		m = m_next;
+	} while (nb_segs);
+
+	/* Add remaining bytes of data to last seg */
+	if (len) {
+		uint8_t shft = (l_sg.subdc == NIX_SUBDC_SG) ? ((l_sg.segs - 1) << 4) : 0;
+		dlen = ((l_sg.u >> shft) & 0xFFFFULL) + len;
+		l_sg.u = l_sg.u & ~(0xFFFFULL << shft);
+		l_sg.u |= dlen << shft;
+	}
+	/* Write the last subdc out */
+	sg->u = l_sg.u;
+
+	segdw = (uint64_t *)slist - cmd;
+	/* Roundup extra dwords to multiple of 2 */
+	segdw = (segdw >> 1) + (segdw & 0x1);
+	return segdw;
+}
+
+static __rte_always_inline uint16_t
+cn20k_nix_inj_pkts(struct rte_security_session **sess, struct cnxk_ethdev_inj_cfg *inj_cfg,
+		   struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+	uintptr_t c_lbase = inj_cfg->lmt_base;
+	struct cn20k_sec_sess_priv sess_priv;
+	uint64_t sa_base = inj_cfg->sa_base;
+	uint16_t c_lmt_id, burst, left, i;
+	uintptr_t cptres, rxphdr, dptr;
+	struct rte_mbuf *m, *last;
+	uint64_t sa, w0, gthr_sz;
+	uint8_t lnum, shft, loff;
+	uint64x2_t cmd01, cmd23;
+	uint64_t ucode_cmd[4];
+	rte_iova_t c_io_addr;
+	uint16_t segdw, segs;
+	uint64_t *laddr;
+
+	/* Get LMT base address and LMT ID as lcore id */
+	ROC_LMT_CPT_BASE_ID_GET(c_lbase, c_lmt_id);
+	c_io_addr = inj_cfg->io_addr;
+
+	sa_base &= ~0xFFFFUL;
+	left = nb_pkts;
+again:
+	burst = left > 32 ? 32 : left;
+
+	lnum = 0;
+	loff = 0;
+	shft = 16;
+
+	for (i = 0; i < burst; i++) {
+		m = tx_pkts[i];
+		sess_priv.u64 = sess[i]->fast_mdata;
+		last = rte_pktmbuf_lastseg(m);
+
+		cptres = rte_pktmbuf_mtod_offset(last, uintptr_t, last->data_len);
+		cptres += BIT_ULL(7);
+		cptres = (cptres - 1) & ~(BIT_ULL(7) - 1);
+		segs = m->nb_segs;
+
+		if (segs > 1) {
+			/* Pointer to WQE header */
+			/* Reserve 8 Dwords of WQE Hdr + Rx Parse Hdr */
+			rxphdr = cptres + 8;
+			dptr = rxphdr + 7 * 8;
+			/* Prepare Multiseg SG list */
+			segdw = cn20k_nix_rx_inj_prepare_mseg(m, (uint64_t *)(dptr));
+			*(uint64_t *)rxphdr = (uint64_t)(segdw - 1) << 12;
+			cptres += 64 + segdw * 16;
+			gthr_sz = (segs % 3) == 0 ? (segs / 3) : (segs / 3 + 1);
+			ucode_cmd[1] = dptr | (gthr_sz << 60);
+		} else {
+			dptr = (uint64_t)rte_pktmbuf_iova(m);
+			ucode_cmd[1] = dptr;
+		}
+
+		/* Prepare CPT instruction */
+		/* CPT word 0 and 1 */
+		cmd01 = vdupq_n_u64(0);
+		w0 = inj_cfg->cmd_w0 | ((uint64_t)m->l2_len - 2) << 24 | (uint64_t)m->l2_len << 16;
+		cmd01 = vsetq_lane_u64(w0, cmd01, 0);
+		cmd01 = vsetq_lane_u64(cptres, cmd01, 1);
+
+		/* CPT word 2 and 3 */
+		cmd23 = vdupq_n_u64(0);
+		/* Set PF func */
+		w0 &= 0xFFFF000000000000UL;
+		cmd23 = vsetq_lane_u64(w0, cmd23, 0);
+		cmd23 = vsetq_lane_u64(((uint64_t)m + sizeof(struct rte_mbuf)) | 1, cmd23, 1);
+
+		sa = (uintptr_t)roc_nix_inl_ow_ipsec_inb_sa(sa_base, sess_priv.sa_idx);
+		ucode_cmd[0] = (ROC_IE_OW_MAJOR_OP_PROCESS_INBOUND_IPSEC << 48 | 1UL << 54 |
+				((uint64_t)sess_priv.chksum) << 32 | (1ULL << 34) | m->pkt_len);
+
+		ucode_cmd[2] = 0;
+		ucode_cmd[3] = (ROC_CPT_DFLT_ENG_GRP_SE << 61 | 1UL << 60 | sa);
+
+		/* Move to our line */
+		laddr = LMT_OFF(c_lbase, lnum, loff ? 64 : 0);
+
+		/* Write CPT instruction to lmt line */
+		vst1q_u64(laddr, cmd01);
+		vst1q_u64((laddr + 2), cmd23);
+
+		*(__uint128_t *)(laddr + 4) = *(__uint128_t *)ucode_cmd;
+		*(__uint128_t *)(laddr + 6) = *(__uint128_t *)(ucode_cmd + 2);
+
+		loff = !loff;
+		lnum = lnum + (loff ? 0 : 1);
+		shft = shft + (loff ? 0 : 3);
+	}
+
+	left -= burst;
+	tx_pkts += burst;
+	sess += burst;
+
+	cn20k_nix_sec_steorl(c_io_addr, c_lmt_id, lnum, loff, shft);
+
+	rte_io_wmb();
+	if (left)
+		goto again;
+
+	return nb_pkts;
+}
+
 static __rte_always_inline void
 nix_sec_meta_to_mbuf(uintptr_t inb_sa, uintptr_t cpth, struct rte_mbuf **inner, uint64_t *ol_flags,
 		     const uint16_t flags, uint64x2_t *rearm, uint64_t buf_sz)
@@ -1741,6 +1904,17 @@ cn20k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts, c
 	return 0;
 }

+static __rte_always_inline uint16_t
+cn20k_nix_inj_pkts(struct rte_security_session **sess, struct cnxk_ethdev_inj_cfg *inj_cfg,
+		   struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+	RTE_SET_USED(sess);
+	RTE_SET_USED(inj_cfg);
+	RTE_SET_USED(tx_pkts);
+	RTE_SET_USED(nb_pkts);
+	return 0;
+}
+
 #endif

 #define RSS_F	  NIX_RX_OFFLOAD_RSS_F
--
2.34.1


^ permalink raw reply related	[flat|nested] 27+ messages in thread

* [PATCH v3 3/8] common/cnxk: update platform features
  2026-02-27  4:37 ` [PATCH v3 1/8] net/cnxk: support of plain packet reassembly Rahul Bhansali
  2026-02-27  4:37   ` [PATCH v3 2/8] net/cnxk: support IPsec Rx inject for cn20k Rahul Bhansali
@ 2026-02-27  4:37   ` Rahul Bhansali
  2026-02-27  4:37   ` [PATCH v3 4/8] common/cnxk: add RQ PB and WQE cache config Rahul Bhansali
                     ` (4 subsequent siblings)
  6 siblings, 0 replies; 27+ messages in thread
From: Rahul Bhansali @ 2026-02-27  4:37 UTC (permalink / raw)
  To: dev, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
	Satha Rao, Harman Kalra
  Cc: jerinj, Rahul Bhansali

Adds cnf20ka platform check for plain packet reassembly
features.

Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
---
Changes in v2: No changes.
Changes in v3: No changes.

 drivers/common/cnxk/roc_features.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/common/cnxk/roc_features.h b/drivers/common/cnxk/roc_features.h
index 3c34041d76..57a51c4db3 100644
--- a/drivers/common/cnxk/roc_features.h
+++ b/drivers/common/cnxk/roc_features.h
@@ -117,7 +117,7 @@ roc_feature_nix_has_inl_profile(void)
 static inline bool
 roc_feature_nix_has_plain_pkt_reassembly(void)
 {
-	return roc_model_is_cn20k();
+	return roc_model_is_cn20k() && !roc_model_is_cnf20ka();
 }

 static inline bool
--
2.34.1


^ permalink raw reply related	[flat|nested] 27+ messages in thread

* [PATCH v3 4/8] common/cnxk: add RQ PB and WQE cache config
  2026-02-27  4:37 ` [PATCH v3 1/8] net/cnxk: support of plain packet reassembly Rahul Bhansali
  2026-02-27  4:37   ` [PATCH v3 2/8] net/cnxk: support IPsec Rx inject for cn20k Rahul Bhansali
  2026-02-27  4:37   ` [PATCH v3 3/8] common/cnxk: update platform features Rahul Bhansali
@ 2026-02-27  4:37   ` Rahul Bhansali
  2026-02-27  4:37   ` [PATCH v3 5/8] net/cnxk: config RQ PB and WQE caching Rahul Bhansali
                     ` (3 subsequent siblings)
  6 siblings, 0 replies; 27+ messages in thread
From: Rahul Bhansali @ 2026-02-27  4:37 UTC (permalink / raw)
  To: dev, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
	Satha Rao, Harman Kalra
  Cc: jerinj, Rahul Bhansali

Adds RQ's PB (Packet buffer) and WQE cache configuration
options.

Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
---
Changes in v2: No changes.
Changes in v3: No changes.

 drivers/common/cnxk/roc_nix.h       | 14 ++++++++++++++
 drivers/common/cnxk/roc_nix_inl.c   |  2 ++
 drivers/common/cnxk/roc_nix_queue.c | 16 ++++++++--------
 3 files changed, 24 insertions(+), 8 deletions(-)

diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index ffa1a706f9..7bc3e1f5c6 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -34,6 +34,16 @@
 #define ROC_NIX_LSO_FORMAT_IDX_TSOV6 1
 #define ROC_NIX_LSO_FORMAT_IDX_IPV4  2

+#define ROC_NIX_RQ_MAX_PB_CACHING_VAL 3
+
+/* First aligned cache block is allocated into the LLC.
+ * All remaining cache blocks are not allocated.
+ */
+#define ROC_NIX_RQ_DEFAULT_PB_CACHING 2
+
+/* Writes of WQE data are allocated into LLC. */
+#define ROC_NIX_RQ_DEFAULT_WQE_CACHING 1
+
 enum roc_nix_rss_reta_sz {
 	ROC_NIX_RSS_RETA_SZ_64 = 64,
 	ROC_NIX_RSS_RETA_SZ_128 = 128,
@@ -448,6 +458,10 @@ struct roc_nix_rq {
 	bool spb_drop_ena;
 	/* XQE drop enable */
 	bool xqe_drop_ena;
+	/* RQ PB caching */
+	uint8_t pb_caching;
+	/* RQ WQE caching */
+	uint8_t wqe_caching;
 	/* End of Input parameters */
 	struct roc_nix *roc_nix;
 	uint64_t meta_aura_handle;
diff --git a/drivers/common/cnxk/roc_nix_inl.c b/drivers/common/cnxk/roc_nix_inl.c
index a21c40acf1..911c349604 100644
--- a/drivers/common/cnxk/roc_nix_inl.c
+++ b/drivers/common/cnxk/roc_nix_inl.c
@@ -1838,6 +1838,8 @@ roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq, bool enable)
 	inl_rq->spb_ena = rq->spb_ena;
 	inl_rq->spb_aura_handle = rq->spb_aura_handle;
 	inl_rq->spb_size = rq->spb_size;
+	inl_rq->pb_caching = rq->pb_caching;
+	inl_rq->wqe_caching = rq->wqe_caching;

 	if (roc_errata_nix_no_meta_aura()) {
 		uint64_t aura_limit =
diff --git a/drivers/common/cnxk/roc_nix_queue.c b/drivers/common/cnxk/roc_nix_queue.c
index ab3a71ec60..ef9b651022 100644
--- a/drivers/common/cnxk/roc_nix_queue.c
+++ b/drivers/common/cnxk/roc_nix_queue.c
@@ -499,7 +499,7 @@ nix_rq_cn9k_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints,
 		aq->rq.sso_grp = rq->hwgrp;
 		aq->rq.ena_wqwd = 1;
 		aq->rq.wqe_skip = rq->wqe_skip;
-		aq->rq.wqe_caching = 1;
+		aq->rq.wqe_caching = rq->wqe_caching;

 		aq->rq.good_utag = rq->tag_mask >> 24;
 		aq->rq.bad_utag = rq->tag_mask >> 24;
@@ -530,7 +530,7 @@ nix_rq_cn9k_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints,
 	aq->rq.lpb_sizem1 = rq->lpb_size / 8;
 	aq->rq.lpb_sizem1 -= 1; /* Expressed in size minus one */
 	aq->rq.ena = ena;
-	aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */
+	aq->rq.pb_caching = rq->pb_caching;
 	aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */
 	aq->rq.rq_int_ena = 0;
 	/* Many to one reduction */
@@ -616,7 +616,7 @@ nix_rq_cn10k_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cf
 		aq->rq.sso_grp = rq->hwgrp;
 		aq->rq.ena_wqwd = 1;
 		aq->rq.wqe_skip = rq->wqe_skip;
-		aq->rq.wqe_caching = 1;
+		aq->rq.wqe_caching = rq->wqe_caching;

 		aq->rq.xqe_drop_ena = 0;
 		aq->rq.good_utag = rq->tag_mask >> 24;
@@ -647,7 +647,7 @@ nix_rq_cn10k_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cf
 		aq->rq.ipsecd_drop_en = 1;
 		aq->rq.ena_wqwd = 1;
 		aq->rq.wqe_skip = rq->wqe_skip;
-		aq->rq.wqe_caching = 1;
+		aq->rq.wqe_caching = rq->wqe_caching;
 	}

 	aq->rq.lpb_aura = roc_npa_aura_handle_to_aura(rq->aura_handle);
@@ -683,7 +683,7 @@ nix_rq_cn10k_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cf
 		aq->rq.spb_ena = 0;
 	}

-	aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */
+	aq->rq.pb_caching = rq->pb_caching;
 	aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */
 	aq->rq.rq_int_ena = 0;
 	/* Many to one reduction */
@@ -797,7 +797,7 @@ nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg, boo
 		aq->rq.sso_grp = rq->hwgrp;
 		aq->rq.ena_wqwd = 1;
 		aq->rq.wqe_skip = rq->wqe_skip;
-		aq->rq.wqe_caching = 1;
+		aq->rq.wqe_caching = rq->wqe_caching;

 		aq->rq.good_utag = rq->tag_mask >> 24;
 		aq->rq.bad_utag = rq->tag_mask >> 24;
@@ -816,7 +816,7 @@ nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg, boo
 		aq->rq.ipsecd_drop_en = 1;
 		aq->rq.ena_wqwd = 1;
 		aq->rq.wqe_skip = rq->wqe_skip;
-		aq->rq.wqe_caching = 1;
+		aq->rq.wqe_caching = rq->wqe_caching;
 	}

 	aq->rq.lpb_aura = roc_npa_aura_handle_to_aura(rq->aura_handle);
@@ -852,7 +852,7 @@ nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg, boo
 		aq->rq.spb_ena = 0;
 	}

-	aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */
+	aq->rq.pb_caching = rq->pb_caching;
 	aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */
 	aq->rq.rq_int_ena = 0;
 	/* Many to one reduction */
--
2.34.1


^ permalink raw reply related	[flat|nested] 27+ messages in thread

* [PATCH v3 5/8] net/cnxk: config RQ PB and WQE caching
  2026-02-27  4:37 ` [PATCH v3 1/8] net/cnxk: support of plain packet reassembly Rahul Bhansali
                     ` (2 preceding siblings ...)
  2026-02-27  4:37   ` [PATCH v3 4/8] common/cnxk: add RQ PB and WQE cache config Rahul Bhansali
@ 2026-02-27  4:37   ` Rahul Bhansali
  2026-02-27  4:37   ` [PATCH v3 6/8] net/cnxk: update SA context push size Rahul Bhansali
                     ` (2 subsequent siblings)
  6 siblings, 0 replies; 27+ messages in thread
From: Rahul Bhansali @ 2026-02-27  4:37 UTC (permalink / raw)
  To: dev, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
	Satha Rao, Harman Kalra
  Cc: jerinj, Rahul Bhansali

Config RQ's PB (packet buffer) and WQE caching to default
values.

Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
---
Changes in v2: No changes.
Changes in v3: No changes.

 drivers/net/cnxk/cnxk_eswitch.c | 2 ++
 drivers/net/cnxk/cnxk_ethdev.c  | 2 ++
 2 files changed, 4 insertions(+)

diff --git a/drivers/net/cnxk/cnxk_eswitch.c b/drivers/net/cnxk/cnxk_eswitch.c
index 6b1bfdd476..e45c7dfd07 100644
--- a/drivers/net/cnxk/cnxk_eswitch.c
+++ b/drivers/net/cnxk/cnxk_eswitch.c
@@ -389,6 +389,8 @@ cnxk_eswitch_rxq_setup(struct cnxk_eswitch_dev *eswitch_dev, uint16_t qid, uint1
 	rq->aura_handle = lpb_pool->pool_id;
 	rq->flow_tag_width = 32;
 	rq->sso_ena = false;
+	rq->pb_caching = ROC_NIX_RQ_DEFAULT_PB_CACHING;
+	rq->wqe_caching = ROC_NIX_RQ_DEFAULT_WQE_CACHING;

 	/* Calculate first mbuf skip */
 	first_skip = (sizeof(struct rte_mbuf));
diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index ba8ac52b46..06d1c9b362 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -959,6 +959,8 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
 	rq->aura_handle = lpb_pool->pool_id;
 	rq->flow_tag_width = 32;
 	rq->sso_ena = false;
+	rq->pb_caching = ROC_NIX_RQ_DEFAULT_PB_CACHING;
+	rq->wqe_caching = ROC_NIX_RQ_DEFAULT_WQE_CACHING;

 	/* Calculate first mbuf skip */
 	first_skip = (sizeof(struct rte_mbuf));
--
2.34.1


^ permalink raw reply related	[flat|nested] 27+ messages in thread

* [PATCH v3 6/8] net/cnxk: update SA context push size
  2026-02-27  4:37 ` [PATCH v3 1/8] net/cnxk: support of plain packet reassembly Rahul Bhansali
                     ` (3 preceding siblings ...)
  2026-02-27  4:37   ` [PATCH v3 5/8] net/cnxk: config RQ PB and WQE caching Rahul Bhansali
@ 2026-02-27  4:37   ` Rahul Bhansali
  2026-02-27  4:37   ` [PATCH v3 7/8] net/cnxk: flow rule update for non-in-place IPsec Rahul Bhansali
  2026-02-27  4:37   ` [PATCH v3 8/8] common/cnxk: enable CPT CQ for inline IPsec inbound Rahul Bhansali
  6 siblings, 0 replies; 27+ messages in thread
From: Rahul Bhansali @ 2026-02-27  4:37 UTC (permalink / raw)
  To: dev, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
	Satha Rao, Harman Kalra
  Cc: jerinj, Rahul Bhansali

Reduce SA context push size to 128 byte for AES_GCM encryption
for CPT performance improvement on CN20k platform.
Also, corrects few macros for cn20k specific.

Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
---
Changes in v2: SA context push size change on security session update APIs also.
Changes in v3: No change.

 drivers/net/cnxk/cn20k_ethdev_sec.c | 107 +++++++++++++++++++++++++---
 1 file changed, 98 insertions(+), 9 deletions(-)

diff --git a/drivers/net/cnxk/cn20k_ethdev_sec.c b/drivers/net/cnxk/cn20k_ethdev_sec.c
index e406f0e879..eab06be68f 100644
--- a/drivers/net/cnxk/cn20k_ethdev_sec.c
+++ b/drivers/net/cnxk/cn20k_ethdev_sec.c
@@ -655,16 +655,71 @@ outb_dbg_iv_update(struct roc_ow_ipsec_outb_sa *outb_sa, const char *__iv_str)
 	}

 	/* Update source of IV */
-	outb_sa->w2.s.iv_src = ROC_IE_OT_SA_IV_SRC_FROM_SA;
+	outb_sa->w2.s.iv_src = ROC_IE_OW_SA_IV_SRC_FROM_SA;
 	free(iv_str);
 }

+static void
+cn20k_eth_sec_inb_sa_misc_fill(struct roc_ow_ipsec_inb_sa *sa,
+			       struct rte_security_ipsec_xform *ipsec_xfrm)
+{
+	struct roc_ow_ipsec_inb_ctx_update_reg *ctx;
+	size_t offset;
+
+	if (sa->w2.s.enc_type != ROC_IE_SA_ENC_AES_GCM)
+		return;
+
+	/* Update ctx push size for AES GCM */
+	offset = offsetof(struct roc_ow_ipsec_inb_sa, hmac_opad_ipad);
+	ctx = (struct roc_ow_ipsec_inb_ctx_update_reg *)((uint8_t *)sa + offset);
+	sa->w0.s.hw_ctx_off = offset / 8;
+	sa->w0.s.ctx_push_size = sa->w0.s.hw_ctx_off + 1;
+
+	if (ipsec_xfrm->life.bytes_soft_limit)
+		ctx->soft_life = ipsec_xfrm->life.bytes_soft_limit + 1;
+
+	if (ipsec_xfrm->life.packets_soft_limit)
+		ctx->soft_life = ipsec_xfrm->life.packets_soft_limit + 1;
+
+	if (ipsec_xfrm->life.bytes_hard_limit)
+		ctx->hard_life = ipsec_xfrm->life.bytes_hard_limit + 1;
+
+	if (ipsec_xfrm->life.packets_hard_limit)
+		ctx->hard_life = ipsec_xfrm->life.packets_hard_limit + 1;
+}
+
 static int
 cn20k_eth_sec_outb_sa_misc_fill(struct roc_nix *roc_nix, struct roc_ow_ipsec_outb_sa *sa,
 				void *sa_cptr, struct rte_security_ipsec_xform *ipsec_xfrm,
 				uint32_t sa_idx)
 {
+	struct roc_ow_ipsec_outb_ctx_update_reg *ctx;
 	uint64_t *ring_base, ring_addr;
+	size_t offset;
+
+	if (sa->w2.s.enc_type == ROC_IE_SA_ENC_AES_GCM) {
+		offset = offsetof(struct roc_ow_ipsec_outb_sa, hmac_opad_ipad);
+		ctx = (struct roc_ow_ipsec_outb_ctx_update_reg *)((uint8_t *)sa + offset);
+		sa->w0.s.hw_ctx_off = offset / 8;
+		sa->w0.s.ctx_push_size = sa->w0.s.hw_ctx_off + 1;
+
+		if (ipsec_xfrm->esn.value)
+			ctx->esn_val = ipsec_xfrm->esn.value - 1;
+
+		if (ipsec_xfrm->life.bytes_soft_limit)
+			ctx->soft_life = ipsec_xfrm->life.bytes_soft_limit + 1;
+
+		if (ipsec_xfrm->life.packets_soft_limit)
+			ctx->soft_life = ipsec_xfrm->life.packets_soft_limit + 1;
+
+		if (ipsec_xfrm->life.bytes_hard_limit)
+			ctx->hard_life = ipsec_xfrm->life.bytes_hard_limit + 1;
+
+		if (ipsec_xfrm->life.packets_hard_limit)
+			ctx->hard_life = ipsec_xfrm->life.packets_hard_limit + 1;
+	} else {
+		ctx = &sa->ctx;
+	}

 	if (roc_nix_inl_is_cq_ena(roc_nix))
 		goto done;
@@ -675,8 +730,8 @@ cn20k_eth_sec_outb_sa_misc_fill(struct roc_nix *roc_nix, struct roc_ow_ipsec_out
 			return -ENOTSUP;

 		ring_addr = ring_base[sa_idx >> ROC_NIX_SOFT_EXP_ERR_RING_MAX_ENTRY_LOG2];
-		sa->ctx.err_ctl.s.mode = ROC_IE_OT_ERR_CTL_MODE_RING;
-		sa->ctx.err_ctl.s.address = ring_addr >> 3;
+		ctx->err_ctl.s.mode = ROC_IE_OW_ERR_CTL_MODE_RING;
+		ctx->err_ctl.s.address = ring_addr >> 3;
 		sa->w0.s.ctx_id = ((uintptr_t)sa_cptr >> 51) & 0x1ff;
 	}
 done:
@@ -751,7 +806,7 @@ cn20k_eth_sec_session_create(void *device, struct rte_security_session_conf *con
 		uintptr_t sa;

 		PLT_STATIC_ASSERT(sizeof(struct cn20k_inb_priv_data) <
-				  ROC_NIX_INL_OT_IPSEC_INB_SW_RSVD);
+				  ROC_NIX_INL_OW_IPSEC_INB_SW_RSVD);

 		spi_mask = roc_nix_inl_inb_spi_range(nix, inl_dev, NULL, NULL);

@@ -796,6 +851,8 @@ cn20k_eth_sec_session_create(void *device, struct rte_security_session_conf *con
 			goto err;
 		}

+		cn20k_eth_sec_inb_sa_misc_fill(inb_sa_dptr, ipsec);
+
 		inb_priv = roc_nix_inl_ow_ipsec_inb_sa_sw_rsvd(inb_sa);
 		/* Back pointer to get eth_sec */
 		inb_priv->eth_sec = eth_sec;
@@ -856,7 +913,7 @@ cn20k_eth_sec_session_create(void *device, struct rte_security_session_conf *con
 		uint32_t sa_idx;

 		PLT_STATIC_ASSERT(sizeof(struct cn20k_outb_priv_data) <
-				  ROC_NIX_INL_OT_IPSEC_OUTB_SW_RSVD);
+				  ROC_NIX_INL_OW_IPSEC_OUTB_SW_RSVD);

 		/* Alloc an sa index */
 		rc = cnxk_eth_outb_sa_idx_get(dev, &sa_idx, ipsec->spi);
@@ -1065,6 +1122,9 @@ cn20k_eth_sec_session_update(void *device, struct rte_security_session *sess,
 		rc = cnxk_ow_ipsec_inb_sa_fill(inb_sa_dptr, ipsec, crypto, 0);
 		if (rc)
 			return -EINVAL;
+
+		cn20k_eth_sec_inb_sa_misc_fill(inb_sa_dptr, ipsec);
+
 		/* Use cookie for original data */
 		inb_sa_dptr->w1.s.cookie = inb_sa->w1.s.cookie;

@@ -1096,6 +1156,14 @@ cn20k_eth_sec_session_update(void *device, struct rte_security_session *sess,
 		if (rc)
 			return -EINVAL;

+		/* Fill outbound sa misc params */
+		rc = cn20k_eth_sec_outb_sa_misc_fill(&dev->nix, outb_sa_dptr, outb_sa, ipsec,
+						     eth_sec->sa_idx);
+		if (rc) {
+			plt_err("Failed to init outb sa misc params, rc=%d", rc);
+			return rc;
+		}
+
 		/* Save rlen info */
 		cnxk_ipsec_outb_rlens_get(rlens, ipsec, crypto);

@@ -1138,6 +1206,7 @@ cn20k_eth_sec_session_stats_get(void *device, struct rte_security_session *sess,
 	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
 	struct cnxk_eth_sec_sess *eth_sec;
+	size_t offset;
 	int rc;

 	eth_sec = cnxk_eth_sec_sess_get_by_sess(dev, sess);
@@ -1152,11 +1221,31 @@ cn20k_eth_sec_session_stats_get(void *device, struct rte_security_session *sess,
 	stats->protocol = RTE_SECURITY_PROTOCOL_IPSEC;

 	if (eth_sec->inb) {
-		stats->ipsec.ipackets = ((struct roc_ow_ipsec_inb_sa *)eth_sec->sa)->ctx.mib_pkts;
-		stats->ipsec.ibytes = ((struct roc_ow_ipsec_inb_sa *)eth_sec->sa)->ctx.mib_octs;
+		struct roc_ow_ipsec_inb_sa *sa = (struct roc_ow_ipsec_inb_sa *)eth_sec->sa;
+		struct roc_ow_ipsec_inb_ctx_update_reg *ctx;
+
+		if (sa->w2.s.enc_type == ROC_IE_SA_ENC_AES_GCM) {
+			offset = offsetof(struct roc_ow_ipsec_inb_sa, hmac_opad_ipad);
+			ctx = (struct roc_ow_ipsec_inb_ctx_update_reg *)((uint8_t *)sa + offset);
+		} else {
+			ctx = &sa->ctx;
+		}
+
+		stats->ipsec.ipackets = ctx->mib_pkts;
+		stats->ipsec.ibytes = ctx->mib_octs;
 	} else {
-		stats->ipsec.opackets = ((struct roc_ow_ipsec_outb_sa *)eth_sec->sa)->ctx.mib_pkts;
-		stats->ipsec.obytes = ((struct roc_ow_ipsec_outb_sa *)eth_sec->sa)->ctx.mib_octs;
+		struct roc_ow_ipsec_outb_sa *sa = (struct roc_ow_ipsec_outb_sa *)eth_sec->sa;
+		struct roc_ow_ipsec_outb_ctx_update_reg *ctx;
+
+		if (sa->w2.s.enc_type == ROC_IE_SA_ENC_AES_GCM) {
+			offset = offsetof(struct roc_ow_ipsec_outb_sa, hmac_opad_ipad);
+			ctx = (struct roc_ow_ipsec_outb_ctx_update_reg *)((uint8_t *)sa + offset);
+		} else {
+			ctx = &sa->ctx;
+		}
+
+		stats->ipsec.opackets = ctx->mib_pkts;
+		stats->ipsec.obytes = ctx->mib_octs;
 	}

 	return 0;
--
2.34.1


^ permalink raw reply related	[flat|nested] 27+ messages in thread

* [PATCH v3 7/8] net/cnxk: flow rule update for non-in-place IPsec
  2026-02-27  4:37 ` [PATCH v3 1/8] net/cnxk: support of plain packet reassembly Rahul Bhansali
                     ` (4 preceding siblings ...)
  2026-02-27  4:37   ` [PATCH v3 6/8] net/cnxk: update SA context push size Rahul Bhansali
@ 2026-02-27  4:37   ` Rahul Bhansali
  2026-02-27  4:37   ` [PATCH v3 8/8] common/cnxk: enable CPT CQ for inline IPsec inbound Rahul Bhansali
  6 siblings, 0 replies; 27+ messages in thread
From: Rahul Bhansali @ 2026-02-27  4:37 UTC (permalink / raw)
  To: dev, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
	Satha Rao, Harman Kalra
  Cc: jerinj, Rahul Bhansali

Updates flow rule based of inbound non-inplace (Out-Of-Place)
config of IPsec session.

Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
---
Changes in v2: No changes.
Changes in v3: No changes.

 drivers/net/cnxk/cn9k_flow.c        |  2 +-
 drivers/net/cnxk/cnxk_ethdev.h      |  6 +++---
 drivers/net/cnxk/cnxk_ethdev_sec.c  |  3 +--
 drivers/net/cnxk/cnxk_flow.c        | 31 +++++++++++++++++++++--------
 drivers/net/cnxk/cnxk_flow.h        |  8 ++++++--
 drivers/net/cnxk/cnxk_flow_common.c | 12 ++++++++++-
 drivers/net/cnxk/cnxk_rep_flow.c    |  4 ++--
 drivers/net/cnxk/rte_pmd_cnxk.h     |  1 +
 8 files changed, 48 insertions(+), 19 deletions(-)

diff --git a/drivers/net/cnxk/cn9k_flow.c b/drivers/net/cnxk/cn9k_flow.c
index ae4629ea69..c39564201d 100644
--- a/drivers/net/cnxk/cn9k_flow.c
+++ b/drivers/net/cnxk/cn9k_flow.c
@@ -18,7 +18,7 @@ cn9k_flow_create(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr,
 	int vtag_actions = 0;
 	int mark_actions;

-	flow = cnxk_flow_create_common(eth_dev, attr, pattern, actions, error, false);
+	flow = cnxk_flow_create_common(eth_dev, attr, pattern, actions, error, false, 0);
 	if (!flow)
 		return NULL;

diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index dbac8cdc1a..8691acc642 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -750,9 +750,9 @@ int cnxk_nix_inline_inbound_mode_setup(struct cnxk_eth_dev *dev);

 struct cnxk_eth_sec_sess *cnxk_eth_sec_sess_get_by_sa_idx(struct cnxk_eth_dev *dev,
 							  uint32_t sa_idx, bool inb);
-struct cnxk_eth_sec_sess *
-cnxk_eth_sec_sess_get_by_sess(struct cnxk_eth_dev *dev,
-			      struct rte_security_session *sess);
+struct cnxk_eth_sec_sess *cnxk_eth_sec_sess_get_by_sess(struct cnxk_eth_dev *dev,
+							const struct rte_security_session *sess);
+
 int cnxk_nix_inl_meta_pool_cb(uint64_t *aura_handle, uintptr_t *mpool, uint32_t buf_sz,
 			      uint32_t nb_bufs, bool destroy, const char *mempool_name);
 int cnxk_nix_inl_custom_meta_pool_cb(uintptr_t pmpool, uintptr_t *mpool, const char *mempool_name,
diff --git a/drivers/net/cnxk/cnxk_ethdev_sec.c b/drivers/net/cnxk/cnxk_ethdev_sec.c
index 59a00408ad..abb50d32de 100644
--- a/drivers/net/cnxk/cnxk_ethdev_sec.c
+++ b/drivers/net/cnxk/cnxk_ethdev_sec.c
@@ -287,8 +287,7 @@ cnxk_eth_sec_sess_get_by_sa_idx(struct cnxk_eth_dev *dev, uint32_t sa_idx, bool
 }

 struct cnxk_eth_sec_sess *
-cnxk_eth_sec_sess_get_by_sess(struct cnxk_eth_dev *dev,
-			      struct rte_security_session *sess)
+cnxk_eth_sec_sess_get_by_sess(struct cnxk_eth_dev *dev, const struct rte_security_session *sess)
 {
 	struct cnxk_eth_sec_sess *eth_sec = NULL;

diff --git a/drivers/net/cnxk/cnxk_flow.c b/drivers/net/cnxk/cnxk_flow.c
index 33501310e0..c1c48eb7ab 100644
--- a/drivers/net/cnxk/cnxk_flow.c
+++ b/drivers/net/cnxk/cnxk_flow.c
@@ -465,7 +465,7 @@ cnxk_map_actions(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr,
 		 const struct rte_flow_action actions[], struct roc_npc_action in_actions[],
 		 struct roc_npc_action_sample *in_sample_actions, uint32_t *flowkey_cfg,
 		 uint16_t *dst_pf_func, uint64_t *npc_default_action, uint8_t has_tunnel_pattern,
-		 bool is_rep, uint8_t rep_pattern, uint64_t *free_allocs)
+		 bool is_rep, uint8_t rep_pattern, uint64_t *free_allocs, uint32_t flow_flags)
 {
 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
 	const struct rte_flow_action_queue *act_q = NULL;
@@ -614,6 +614,8 @@ cnxk_map_actions(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr,
 		case RTE_FLOW_ACTION_TYPE_SECURITY:
 			in_actions[i].type = ROC_NPC_ACTION_TYPE_SEC;
 			in_actions[i].conf = actions->conf;
+			in_actions[i].is_non_inp = flow_flags & CNXK_FLOW_NON_INPLACE;
+			in_actions[i].no_sec_action = flow_flags & CNXK_FLOW_NO_SEC_ACTION;
 			break;
 		case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
 			in_actions[i].type = ROC_NPC_ACTION_TYPE_VLAN_STRIP;
@@ -803,7 +805,8 @@ cnxk_map_flow_data(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr
 		   struct roc_npc_attr *in_attr, struct roc_npc_item_info in_pattern[],
 		   struct roc_npc_action in_actions[],
 		   struct roc_npc_action_sample *in_sample_actions, uint32_t *flowkey_cfg,
-		   uint16_t *dst_pf_func, uint64_t *def_action, bool is_rep, uint64_t *free_allocs)
+		   uint16_t *dst_pf_func, uint64_t *def_action, bool is_rep, uint64_t *free_allocs,
+		   uint32_t flow_flags)
 {
 	uint8_t has_tunnel_pattern = 0, rep_pattern = 0;
 	int rc;
@@ -842,14 +845,14 @@ cnxk_map_flow_data(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr

 	return cnxk_map_actions(eth_dev, attr, actions, in_actions, in_sample_actions, flowkey_cfg,
 				dst_pf_func, def_action, has_tunnel_pattern, is_rep, rep_pattern,
-				free_allocs);
+				free_allocs, flow_flags);
 }

 int
 cnxk_flow_validate_common(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr,
 			  const struct rte_flow_item pattern[],
 			  const struct rte_flow_action actions[], struct rte_flow_error *error,
-			  bool is_rep)
+			  bool is_rep, uint32_t flow_flags)
 {
 	struct roc_npc_item_info in_pattern[ROC_NPC_ITEM_TYPE_END + 1];
 	struct roc_npc_action in_actions[ROC_NPC_MAX_ACTION_COUNT];
@@ -891,7 +894,7 @@ cnxk_flow_validate_common(struct rte_eth_dev *eth_dev, const struct rte_flow_att
 	}
 	rc = cnxk_map_flow_data(eth_dev, attr, pattern, actions, &in_attr, in_pattern, in_actions,
 				&in_sample_action, &flowkey_cfg, &dst_pf_func, &npc_default_action,
-				is_rep, free_allocs);
+				is_rep, free_allocs, flow_flags);
 	if (rc) {
 		rte_flow_error_set(error, 0, RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
 				   "Failed to map flow data");
@@ -919,14 +922,26 @@ cnxk_flow_validate(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr
 		   const struct rte_flow_item pattern[], const struct rte_flow_action actions[],
 		   struct rte_flow_error *error)
 {
-	return cnxk_flow_validate_common(eth_dev, attr, pattern, actions, error, false);
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	struct cnxk_eth_sec_sess *eth_sec = NULL;
+	uint32_t flow_flags = 0;
+
+	if (actions[0].type == RTE_FLOW_ACTION_TYPE_SECURITY) {
+		eth_sec = cnxk_eth_sec_sess_get_by_sess(dev, actions[0].conf);
+		if (eth_sec != NULL) {
+			flow_flags = eth_sec->inb_oop ? CNXK_FLOW_NON_INPLACE : 0;
+			flow_flags |= CNXK_FLOW_NO_SEC_ACTION;
+		}
+	}
+
+	return cnxk_flow_validate_common(eth_dev, attr, pattern, actions, error, false, flow_flags);
 }

 struct roc_npc_flow *
 cnxk_flow_create_common(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr,
 			const struct rte_flow_item pattern[],
 			const struct rte_flow_action actions[], struct rte_flow_error *error,
-			bool is_rep)
+			bool is_rep, uint32_t flow_flags)
 {
 	struct roc_npc_item_info in_pattern[ROC_NPC_ITEM_TYPE_END + 1] = {0};
 	struct roc_npc_action in_actions[ROC_NPC_MAX_ACTION_COUNT] = {0};
@@ -962,7 +977,7 @@ cnxk_flow_create_common(struct rte_eth_dev *eth_dev, const struct rte_flow_attr
 	memset(&in_attr, 0, sizeof(struct roc_npc_attr));
 	rc = cnxk_map_flow_data(eth_dev, attr, pattern, actions, &in_attr, in_pattern, in_actions,
 				&in_sample_action, &npc->flowkey_cfg_state, &dst_pf_func,
-				&npc_default_action, is_rep, free_allocs);
+				&npc_default_action, is_rep, free_allocs, flow_flags);
 	if (rc) {
 		rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
 				   "Failed to map flow data");
diff --git a/drivers/net/cnxk/cnxk_flow.h b/drivers/net/cnxk/cnxk_flow.h
index 80b8d2c36a..2986ea81d1 100644
--- a/drivers/net/cnxk/cnxk_flow.h
+++ b/drivers/net/cnxk/cnxk_flow.h
@@ -20,6 +20,9 @@ struct cnxk_rte_flow_action_info {
 	uint16_t conf_size;
 };

+#define CNXK_FLOW_NO_SEC_ACTION BIT(0)
+#define CNXK_FLOW_NON_INPLACE	BIT(1)
+
 extern const struct cnxk_rte_flow_term_info term[];

 int cnxk_flow_destroy(struct rte_eth_dev *dev, struct roc_npc_flow *flow,
@@ -29,11 +32,12 @@ struct roc_npc_flow *cnxk_flow_create_common(struct rte_eth_dev *eth_dev,
 					     const struct rte_flow_attr *attr,
 					     const struct rte_flow_item pattern[],
 					     const struct rte_flow_action actions[],
-					     struct rte_flow_error *error, bool is_rep);
+					     struct rte_flow_error *error, bool is_rep,
+					     uint32_t flow_flags);
 int cnxk_flow_validate_common(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr,
 			      const struct rte_flow_item pattern[],
 			      const struct rte_flow_action actions[], struct rte_flow_error *error,
-			      bool is_rep);
+			      bool is_rep, uint32_t flow_flags);
 int cnxk_flow_destroy_common(struct rte_eth_dev *eth_dev, struct roc_npc_flow *flow,
 			     struct rte_flow_error *error, bool is_rep);
 int cnxk_flow_flush_common(struct rte_eth_dev *eth_dev, struct rte_flow_error *error, bool is_rep);
diff --git a/drivers/net/cnxk/cnxk_flow_common.c b/drivers/net/cnxk/cnxk_flow_common.c
index 59aa920d91..14ac3b5b65 100644
--- a/drivers/net/cnxk/cnxk_flow_common.c
+++ b/drivers/net/cnxk/cnxk_flow_common.c
@@ -122,7 +122,9 @@ cnxk_flow_create(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr,
 	const struct rte_flow_action *action_rss = NULL;
 	const struct rte_flow_action_meter *mtr = NULL;
 	const struct rte_flow_action *act_q = NULL;
+	struct cnxk_eth_sec_sess *eth_sec = NULL;
 	struct roc_npc_flow *flow;
+	uint32_t flow_flags = 0;
 	void *mcs_flow = NULL;
 	uint32_t req_act = 0;
 	int i, rc;
@@ -183,7 +185,15 @@ cnxk_flow_create(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr,
 		return mcs_flow;
 	}

-	flow = cnxk_flow_create_common(eth_dev, attr, pattern, actions, error, false);
+	if (actions[0].type == RTE_FLOW_ACTION_TYPE_SECURITY) {
+		eth_sec = cnxk_eth_sec_sess_get_by_sess(dev, actions[0].conf);
+		if (eth_sec != NULL) {
+			flow_flags = eth_sec->inb_oop ? CNXK_FLOW_NON_INPLACE : 0;
+			flow_flags |= CNXK_FLOW_NO_SEC_ACTION;
+		}
+	}
+
+	flow = cnxk_flow_create_common(eth_dev, attr, pattern, actions, error, false, flow_flags);
 	if (!flow) {
 		if (mtr)
 			nix_mtr_chain_reset(eth_dev, mtr->mtr_id);
diff --git a/drivers/net/cnxk/cnxk_rep_flow.c b/drivers/net/cnxk/cnxk_rep_flow.c
index f1cf81a90c..1b013ce598 100644
--- a/drivers/net/cnxk/cnxk_rep_flow.c
+++ b/drivers/net/cnxk/cnxk_rep_flow.c
@@ -547,7 +547,7 @@ cnxk_rep_flow_create_native(struct rte_eth_dev *eth_dev, const struct rte_flow_a
 	uint16_t new_entry;
 	int rc;

-	flow = cnxk_flow_create_common(eth_dev, attr, pattern, actions, error, true);
+	flow = cnxk_flow_create_common(eth_dev, attr, pattern, actions, error, true, 0);
 	if (!flow) {
 		plt_err("Fail to create flow");
 		goto fail;
@@ -632,7 +632,7 @@ cnxk_rep_flow_validate(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *
 	}

 	if (rep_dev->native_repte)
-		return cnxk_flow_validate_common(eth_dev, attr, pattern, actions, error, true);
+		return cnxk_flow_validate_common(eth_dev, attr, pattern, actions, error, true, 0);

 	rc = process_flow_rule(rep_dev, attr, pattern, actions, &adata, CNXK_REP_MSG_FLOW_VALIDATE);
 	if (!rc || adata.u.sval < 0) {
diff --git a/drivers/net/cnxk/rte_pmd_cnxk.h b/drivers/net/cnxk/rte_pmd_cnxk.h
index b186b529fa..d344137dd5 100644
--- a/drivers/net/cnxk/rte_pmd_cnxk.h
+++ b/drivers/net/cnxk/rte_pmd_cnxk.h
@@ -82,6 +82,7 @@ struct rte_pmd_cnxk_sec_action {
 	 * XOR.
 	 */
 	enum rte_pmd_cnxk_sec_action_alg alg;
+	bool is_non_inp;
 };

 #define RTE_PMD_CNXK_CTX_MAX_CKEY_LEN	   32
--
2.34.1


^ permalink raw reply related	[flat|nested] 27+ messages in thread

* [PATCH v3 8/8] common/cnxk: enable CPT CQ for inline IPsec inbound
  2026-02-27  4:37 ` [PATCH v3 1/8] net/cnxk: support of plain packet reassembly Rahul Bhansali
                     ` (5 preceding siblings ...)
  2026-02-27  4:37   ` [PATCH v3 7/8] net/cnxk: flow rule update for non-in-place IPsec Rahul Bhansali
@ 2026-02-27  4:37   ` Rahul Bhansali
  2026-03-02  6:36     ` Jerin Jacob
  6 siblings, 1 reply; 27+ messages in thread
From: Rahul Bhansali @ 2026-02-27  4:37 UTC (permalink / raw)
  To: dev, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
	Satha Rao, Harman Kalra
  Cc: jerinj, Rakesh Kudurumalla

From: Rakesh Kudurumalla <rkudurumalla@marvell.com>

Added support of CPT CQ configurations for inline inbound IPsec.

Signed-off-by: Rakesh Kudurumalla <rkudurumalla@marvell.com>
---
Changes in v2: fix cleanup on configuration failure case.
Changes in v3: No change.

 drivers/common/cnxk/roc_nix_inl.c         |  8 +-
 drivers/common/cnxk/roc_nix_inl.h         |  3 +-
 drivers/common/cnxk/roc_nix_inl_dev.c     | 90 ++++++++++++++++++++---
 drivers/common/cnxk/roc_nix_inl_dev_irq.c | 19 +++--
 drivers/net/cnxk/cn20k_ethdev_sec.c       | 54 +++++++++-----
 5 files changed, 136 insertions(+), 38 deletions(-)

diff --git a/drivers/common/cnxk/roc_nix_inl.c b/drivers/common/cnxk/roc_nix_inl.c
index 911c349604..26be1adac9 100644
--- a/drivers/common/cnxk/roc_nix_inl.c
+++ b/drivers/common/cnxk/roc_nix_inl.c
@@ -486,6 +486,7 @@ nix_inl_inb_ipsec_sa_tbl_setup(struct roc_nix *roc_nix)
 	} else {
 		struct nix_rx_inl_lf_cfg_req *lf_cfg;
 		uint64_t def_cptq = 0;
+		uint64_t cpt_cq_ena = 0;

 		/* Setup device specific inb SA table */
 		lf_cfg = mbox_alloc_msg_nix_rx_inl_lf_cfg(mbox);
@@ -508,9 +509,10 @@ nix_inl_inb_ipsec_sa_tbl_setup(struct roc_nix *roc_nix)
 		if (res_addr_offset)
 			res_addr_offset |= (1UL << 56);

+		cpt_cq_ena = (uint64_t)inl_dev->cpt_cq_ena << 63;
 		lf_cfg->enable = 1;
 		lf_cfg->profile_id = profile_id; /* IPsec profile is 0th one */
-		lf_cfg->rx_inline_sa_base = (uintptr_t)nix->inb_sa_base[profile_id];
+		lf_cfg->rx_inline_sa_base = (uintptr_t)nix->inb_sa_base[profile_id] | cpt_cq_ena;
 		lf_cfg->rx_inline_cfg0 =
 			((def_cptq << 57) | res_addr_offset | ((uint64_t)SSO_TT_ORDERED << 44) |
 			 (sa_pow2_sz << 16) | lenm1_max);
@@ -588,6 +590,7 @@ nix_inl_reass_inb_sa_tbl_setup(struct roc_nix *roc_nix)
 	uint64_t max_sa = 1, sa_pow2_sz;
 	uint64_t sa_idx_w, lenm1_max;
 	uint64_t res_addr_offset = 0;
+	uint64_t cpt_cq_ena = 0;
 	uint64_t def_cptq = 0;
 	size_t inb_sa_sz = 1;
 	uint8_t profile_id;
@@ -637,9 +640,10 @@ nix_inl_reass_inb_sa_tbl_setup(struct roc_nix *roc_nix)
 			res_addr_offset |= (1UL << 56);
 	}

+	cpt_cq_ena = (uint64_t)inl_dev->cpt_cq_ena << 63;
 	lf_cfg->enable = 1;
 	lf_cfg->profile_id = profile_id;
-	lf_cfg->rx_inline_sa_base = (uintptr_t)nix->inb_sa_base[profile_id];
+	lf_cfg->rx_inline_sa_base = (uintptr_t)nix->inb_sa_base[profile_id] | cpt_cq_ena;
 	lf_cfg->rx_inline_cfg0 =
 		((def_cptq << 57) | res_addr_offset | ((uint64_t)SSO_TT_ORDERED << 44) |
 		 (sa_pow2_sz << 16) | lenm1_max);
diff --git a/drivers/common/cnxk/roc_nix_inl.h b/drivers/common/cnxk/roc_nix_inl.h
index 596f12d1c7..d1a08a4495 100644
--- a/drivers/common/cnxk/roc_nix_inl.h
+++ b/drivers/common/cnxk/roc_nix_inl.h
@@ -44,7 +44,8 @@
 #define ROC_NIX_INL_RXC_QUE_BLK_THR 0x40UL

 enum nix_inl_event_type {
-	NIX_INL_CPT_CQ = 1,
+	NIX_INL_INB_CPT_CQ = 1,
+	NIX_INL_OUTB_CPT_CQ,
 	NIX_INL_SSO,
 	NIX_INL_SOFT_EXPIRY_THRD,
 };
diff --git a/drivers/common/cnxk/roc_nix_inl_dev.c b/drivers/common/cnxk/roc_nix_inl_dev.c
index 35528efa46..246dd4612f 100644
--- a/drivers/common/cnxk/roc_nix_inl_dev.c
+++ b/drivers/common/cnxk/roc_nix_inl_dev.c
@@ -382,6 +382,7 @@ nix_inl_nix_ipsec_cfg(struct nix_inl_dev *inl_dev, bool ena)
 		struct nix_rx_inl_lf_cfg_req *lf_cfg;
 		uint64_t res_addr_offset;
 		uint64_t def_cptq;
+		uint64_t cpt_cq_ena;

 		lf_cfg = mbox_alloc_msg_nix_rx_inl_lf_cfg(mbox);
 		if (lf_cfg == NULL) {
@@ -401,7 +402,9 @@ nix_inl_nix_ipsec_cfg(struct nix_inl_dev *inl_dev, bool ena)
 		lf_cfg->profile_id = inl_dev->ipsec_prof_id;
 		if (ena) {
 			lf_cfg->enable = 1;
-			lf_cfg->rx_inline_sa_base = (uintptr_t)inl_dev->inb_sa_base[profile_id];
+			cpt_cq_ena = (uint64_t)inl_dev->cpt_cq_ena << 63;
+			lf_cfg->rx_inline_sa_base =
+				(uintptr_t)inl_dev->inb_sa_base[profile_id] | (cpt_cq_ena);
 			lf_cfg->rx_inline_cfg0 =
 				((def_cptq << 57) | res_addr_offset |
 				 ((uint64_t)SSO_TT_ORDERED << 44) | (sa_pow2_sz << 16) | lenm1_max);
@@ -482,13 +485,33 @@ nix_inl_cpt_setup(struct nix_inl_dev *inl_dev, bool inl_dev_sso)
 lf_fini:
 	for (i = 0; i < inl_dev->nb_cptlf; i++) {
 		struct roc_cpt_lf *lf = &inl_dev->cpt_lf[i];
-		cpt_lf_fini(lf, lf->cpt_cq_ena);
+		cpt_lf_fini(lf, false);
 	}
 lf_free:
 	rc |= cpt_lfs_free(dev);
 	return rc;
 }

+static int
+nix_inl_cpt_cq_inb_release(struct nix_inl_dev *inl_dev)
+{
+	int i;
+
+	if (!inl_dev || !inl_dev->cpt_cq_ena)
+		return 0;
+	for (i = 0; i < inl_dev->nb_inb_cptlfs; i++) {
+		uint8_t slot_id = inl_dev->inb_cpt_lf_id + i;
+		struct roc_cpt_lf *lf = &inl_dev->cpt_lf[slot_id];
+
+		if (lf->cpt_cq_ena) {
+			cpt_lf_cq_fini(lf);
+			cpt_lf_unregister_irqs(lf, cpt_lf_misc_irq, nix_inl_cpt_done_irq);
+		}
+	}
+
+	return 0;
+}
+
 static int
 nix_inl_cpt_release(struct nix_inl_dev *inl_dev)
 {
@@ -625,6 +648,7 @@ nix_inl_nix_profile_config(struct nix_inl_dev *inl_dev, uint8_t profile_id)
 	uint64_t max_sa, sa_w, sa_pow2_sz, lenm1_max;
 	struct nix_rx_inl_lf_cfg_req *lf_cfg;
 	uint64_t res_addr_offset;
+	uint64_t cpt_cq_ena;
 	uint64_t def_cptq;
 	size_t inb_sa_sz;
 	void *sa;
@@ -665,7 +689,8 @@ nix_inl_nix_profile_config(struct nix_inl_dev *inl_dev, uint8_t profile_id)

 	lf_cfg->enable = 1;
 	lf_cfg->profile_id = profile_id;
-	lf_cfg->rx_inline_sa_base = (uintptr_t)inl_dev->inb_sa_base[profile_id];
+	cpt_cq_ena = (uint64_t)inl_dev->cpt_cq_ena << 63;
+	lf_cfg->rx_inline_sa_base = (uintptr_t)inl_dev->inb_sa_base[profile_id] | cpt_cq_ena;
 	lf_cfg->rx_inline_cfg0 =
 		((def_cptq << 57) | res_addr_offset | ((uint64_t)SSO_TT_ORDERED << 44) |
 		 (sa_pow2_sz << 16) | lenm1_max);
@@ -716,6 +741,42 @@ nix_inl_nix_profile_release(struct nix_inl_dev *inl_dev, uint8_t profile_id)
 	return rc;
 }

+static int
+nix_inl_cpt_cq_inb_setup(struct nix_inl_dev *inl_dev)
+{
+	int i, rc;
+
+	if (!inl_dev->cpt_cq_ena)
+		return 0;
+
+	for (i = 0; i < inl_dev->nb_inb_cptlfs; i++) {
+		uint8_t slot_id = inl_dev->inb_cpt_lf_id + i;
+		struct roc_cpt_lf *lf = &inl_dev->cpt_lf[slot_id];
+
+		lf->dq_ack_ena = true;
+		lf->cpt_cq_ena = true;
+		lf->cq_entry_size = 0;
+		lf->cq_all = 0;
+		lf->cq_size = lf->nb_desc;
+		lf->dev = &inl_dev->dev;
+		lf->cq_head = 1;
+
+		rc = cpt_lf_cq_init(lf);
+		if (rc)
+			return rc;
+
+		rc = cpt_lf_register_irqs(lf, cpt_lf_misc_irq, nix_inl_cpt_done_irq);
+		if (rc) {
+			cpt_lf_cq_fini(lf);
+			return rc;
+		}
+
+		roc_cpt_cq_enable(lf);
+	}
+
+	return 0;
+}
+
 static int
 nix_inl_nix_reass_setup(struct nix_inl_dev *inl_dev)
 {
@@ -1451,11 +1512,17 @@ roc_nix_inl_dev_init(struct roc_nix_inl_dev *roc_inl_dev)
 	if (rc)
 		goto sso_release;

+	if (roc_feature_nix_has_cpt_cq_support()) {
+		rc = nix_inl_cpt_cq_inb_setup(inl_dev);
+		if (rc)
+			goto cpt_release;
+	}
+
 	/* Setup device specific inb SA table */
 	rc = nix_inl_nix_ipsec_cfg(inl_dev, true);
 	if (rc) {
 		plt_err("Failed to setup NIX Inbound SA conf, rc=%d", rc);
-		goto cpt_release;
+		goto cpt_cq_inb_release;
 	}

 	/* Setup Reassembly */
@@ -1464,20 +1531,20 @@ roc_nix_inl_dev_init(struct roc_nix_inl_dev *roc_inl_dev)

 		rc = nix_inl_nix_reass_setup(inl_dev);
 		if (rc)
-			goto cpt_release;
+			goto cpt_cq_inb_release;
 	}

 	if (inl_dev->set_soft_exp_poll) {
 		rc = nix_inl_outb_poll_thread_setup(inl_dev);
 		if (rc)
-			goto cpt_release;
+			goto cpt_cq_inb_release;
 	}

 	/* Perform selftest if asked for */
 	if (inl_dev->selftest) {
 		rc = nix_inl_selftest();
 		if (rc)
-			goto cpt_release;
+			goto cpt_cq_inb_release;
 	}
 	inl_dev->max_ipsec_rules = roc_inl_dev->max_ipsec_rules;

@@ -1486,14 +1553,14 @@ roc_nix_inl_dev_init(struct roc_nix_inl_dev *roc_inl_dev)
 			plt_zmalloc(sizeof(int) * inl_dev->max_ipsec_rules, PLT_CACHE_LINE_SIZE);
 		if (inl_dev->ipsec_index == NULL) {
 			rc = NPC_ERR_NO_MEM;
-			goto cpt_release;
+			goto cpt_cq_inb_release;
 		}
 		rc = npc_mcam_alloc_entries(inl_dev->dev.mbox, inl_dev->max_ipsec_rules,
 					    inl_dev->ipsec_index, inl_dev->max_ipsec_rules,
 					    NPC_MCAM_HIGHER_PRIO, &resp_count, 1);
 		if (rc) {
 			plt_free(inl_dev->ipsec_index);
-			goto cpt_release;
+			goto cpt_cq_inb_release;
 		}

 		start_index = inl_dev->ipsec_index[0];
@@ -1507,6 +1574,8 @@ roc_nix_inl_dev_init(struct roc_nix_inl_dev *roc_inl_dev)
 	idev->nix_inl_dev = inl_dev;

 	return 0;
+cpt_cq_inb_release:
+	rc |= nix_inl_cpt_cq_inb_release(inl_dev);
 cpt_release:
 	rc |= nix_inl_cpt_release(inl_dev);
 sso_release:
@@ -1558,8 +1627,9 @@ roc_nix_inl_dev_fini(struct roc_nix_inl_dev *roc_inl_dev)
 	/* Flush Inbound CTX cache entries */
 	nix_inl_cpt_ctx_cache_sync(inl_dev);

+	rc = nix_inl_cpt_cq_inb_release(inl_dev);
 	/* Release CPT */
-	rc = nix_inl_cpt_release(inl_dev);
+	rc |= nix_inl_cpt_release(inl_dev);

 	/* Release SSO */
 	rc |= nix_inl_sso_release(inl_dev);
diff --git a/drivers/common/cnxk/roc_nix_inl_dev_irq.c b/drivers/common/cnxk/roc_nix_inl_dev_irq.c
index 89155a1f7d..30986e780a 100644
--- a/drivers/common/cnxk/roc_nix_inl_dev_irq.c
+++ b/drivers/common/cnxk/roc_nix_inl_dev_irq.c
@@ -49,10 +49,11 @@ static void
 nix_inl_cpt_cq_cb(struct roc_cpt_lf *lf)
 {
 	struct roc_nix *roc_nix = (struct roc_nix *)lf->dev->roc_nix;
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
 	struct idev_cfg *idev = idev_get_cfg();
 	uint32_t port_id = roc_nix->port_id;
 	struct nix_inl_dev *inl_dev = NULL;
-	struct roc_ow_ipsec_outb_sa *sa;
+	enum nix_inl_event_type cq_type;
 	union cpt_lf_cq_base cq_base;
 	union cpt_lf_cq_ptr cq_ptr;
 	struct cpt_cq_s *cq_s;
@@ -60,6 +61,7 @@ nix_inl_cpt_cq_cb(struct roc_cpt_lf *lf)
 	uint32_t count, head;
 	uint32_t nq_ptr;
 	uint64_t i;
+	void *sa;

 	if (idev)
 		inl_dev = idev->nix_inl_dev;
@@ -75,23 +77,30 @@ nix_inl_cpt_cq_cb(struct roc_cpt_lf *lf)
 	count = cq_ptr.s.count;
 	nq_ptr = cq_ptr.s.nq_ptr;

+	if (lf->dev == &inl_dev->dev)
+		cq_type = NIX_INL_INB_CPT_CQ;
+	else if (lf->dev == &nix->dev)
+		cq_type = NIX_INL_OUTB_CPT_CQ;
+	else
+		return;
+
 	for (i = 0; i < count; i++) {
 		cq_s = (struct cpt_cq_s *)(uintptr_t)(((cq_base.s.addr << 7)) + (head << 5));

 		if (cq_s->w0.s.uc_compcode && cq_s->w0.s.compcode) {
 			switch (cq_s->w2.s.fmt & fmt_msk) {
 			case WQE_PTR_CPTR:
-				sa = (struct roc_ow_ipsec_outb_sa *)cq_s->w1.esn;
+				sa = (void *)cq_s->w1.esn;
 				break;
 			case CPTR_WQE_PTR:
-				sa = (struct roc_ow_ipsec_outb_sa *)cq_s->w3.comp_ptr;
+				sa = (void *)cq_s->w3.comp_ptr;
 				break;
 			default:
 				plt_err("Invalid event Received ");
 				goto done;
 			}
 			uint64_t tmp = ~(uint32_t)0x0;
-			inl_dev->work_cb(&tmp, sa, NIX_INL_CPT_CQ, (void *)cq_s, port_id);
+			inl_dev->work_cb(&tmp, sa, cq_type, (void *)cq_s, port_id);
 		}
 done:
 		head = (head + 1) % lf->cq_size;
@@ -165,7 +174,7 @@ nix_inl_sso_hws_irq(void *param)
 void
 nix_inl_cpt_done_irq(void *param)
 {
-	struct roc_cpt_lf *lf = param;
+	struct roc_cpt_lf *lf = (struct roc_cpt_lf *)param;
 	uint64_t done_wait;
 	uint64_t intr;

diff --git a/drivers/net/cnxk/cn20k_ethdev_sec.c b/drivers/net/cnxk/cn20k_ethdev_sec.c
index eab06be68f..5d0debb81d 100644
--- a/drivers/net/cnxk/cn20k_ethdev_sec.c
+++ b/drivers/net/cnxk/cn20k_ethdev_sec.c
@@ -439,18 +439,31 @@ cnxk_pktmbuf_free_no_cache(struct rte_mbuf *mbuf)
 }

 static void
-cn20k_eth_sec_post_event(struct rte_eth_dev *eth_dev, struct roc_ow_ipsec_outb_sa *sa,
+cn20k_eth_sec_post_event(struct rte_eth_dev *eth_dev, void *sa, enum nix_inl_event_type type,
 			 uint16_t uc_compcode, uint16_t compcode, struct rte_mbuf *mbuf)
 {
 	struct rte_eth_event_ipsec_desc desc;
 	struct cn20k_sec_sess_priv sess_priv;
-	struct cn20k_outb_priv_data *priv;
+	struct cn20k_outb_priv_data *outb_priv;
+	struct cn20k_inb_priv_data *inb_priv;
 	static uint64_t warn_cnt;
+	uint64_t life_unit;

 	memset(&desc, 0, sizeof(desc));
-	priv = roc_nix_inl_ow_ipsec_outb_sa_sw_rsvd(sa);
 	sess_priv.u64 = 0;

+	if (type == NIX_INL_INB_CPT_CQ) {
+		struct roc_ow_ipsec_inb_sa *inb_sa = (struct roc_ow_ipsec_inb_sa *)sa;
+		inb_priv = roc_nix_inl_ow_ipsec_inb_sa_sw_rsvd(sa);
+		desc.metadata = (uint64_t)inb_priv->userdata;
+		life_unit = inb_sa->w2.s.life_unit;
+	} else {
+		struct roc_ow_ipsec_outb_sa *outb_sa = (struct roc_ow_ipsec_outb_sa *)sa;
+		outb_priv = roc_nix_inl_ow_ipsec_outb_sa_sw_rsvd(sa);
+		desc.metadata = (uint64_t)outb_priv->userdata;
+		life_unit = outb_sa->w2.s.life_unit;
+	}
+
 	if (mbuf)
 		sess_priv.u64 = *rte_security_dynfield(mbuf);

@@ -459,14 +472,14 @@ cn20k_eth_sec_post_event(struct rte_eth_dev *eth_dev, struct roc_ow_ipsec_outb_s
 		desc.subtype = RTE_ETH_EVENT_IPSEC_ESN_OVERFLOW;
 		break;
 	case ROC_IE_OW_UCC_ERR_SA_EXPIRED:
-		if (sa->w2.s.life_unit == ROC_IE_OW_SA_LIFE_UNIT_PKTS)
+		if (life_unit == ROC_IE_OW_SA_LIFE_UNIT_PKTS)
 			desc.subtype = RTE_ETH_EVENT_IPSEC_SA_PKT_HARD_EXPIRY;
 		else
 			desc.subtype = RTE_ETH_EVENT_IPSEC_SA_BYTE_HARD_EXPIRY;
 		break;
 	case ROC_IE_OW_UCC_SUCCESS_SA_SOFTEXP_FIRST:
 	case ROC_IE_OW_UCC_SUCCESS_SA_SOFTEXP_AGAIN:
-		if (sa->w2.s.life_unit == ROC_IE_OW_SA_LIFE_UNIT_PKTS)
+		if (life_unit == ROC_IE_OW_SA_LIFE_UNIT_PKTS)
 			desc.subtype = RTE_ETH_EVENT_IPSEC_SA_PKT_EXPIRY;
 		else
 			desc.subtype = RTE_ETH_EVENT_IPSEC_SA_BYTE_EXPIRY;
@@ -490,7 +503,6 @@ cn20k_eth_sec_post_event(struct rte_eth_dev *eth_dev, struct roc_ow_ipsec_outb_s
 		break;
 	}

-	desc.metadata = (uint64_t)priv->userdata;
 	rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_IPSEC, &desc);
 }

@@ -498,12 +510,15 @@ static const char *
 get_inl_event_type(enum nix_inl_event_type type)
 {
 	switch (type) {
-	case NIX_INL_CPT_CQ:
-		return "NIX_INL_CPT_CQ";
+	case NIX_INL_OUTB_CPT_CQ:
+		return "NIX_INL_OUTB_CPT_CQ";
+	case NIX_INL_INB_CPT_CQ:
+		return "NIX_INL_INB_CPT_CQ";
 	case NIX_INL_SSO:
 		return "NIX_INL_SSO";
 	case NIX_INL_SOFT_EXPIRY_THRD:
 		return "NIX_INL_SOFT_EXPIRY_THRD";
+
 	default:
 		return "Unknown event";
 	}
@@ -515,8 +530,8 @@ cn20k_eth_sec_sso_work_cb(uint64_t *gw, void *args, enum nix_inl_event_type type
 {
 	struct rte_eth_event_ipsec_desc desc;
 	struct cn20k_sec_sess_priv sess_priv;
-	struct cn20k_outb_priv_data *priv;
-	struct roc_ow_ipsec_outb_sa *sa;
+	struct cn20k_outb_priv_data *outb_priv;
+	struct roc_ow_ipsec_outb_sa *outb_sa;
 	struct cpt_cn20k_res_s *res;
 	struct rte_eth_dev *eth_dev;
 	struct cnxk_eth_dev *dev;
@@ -546,20 +561,19 @@ cn20k_eth_sec_sso_work_cb(uint64_t *gw, void *args, enum nix_inl_event_type type
 		/* Fall through */
 	default:
 		if (type) {
-			sa = (struct roc_ow_ipsec_outb_sa *)args;
-			priv = roc_nix_inl_ow_ipsec_outb_sa_sw_rsvd(sa);
-			desc.metadata = (uint64_t)priv->userdata;
 			eth_dev = &rte_eth_devices[port_id];
-			if (type == NIX_INL_CPT_CQ) {
-				struct cpt_cq_s *cqs = (struct cpt_cq_s *)cq_s;
-
-				cn20k_eth_sec_post_event(eth_dev, sa,
+			struct cpt_cq_s *cqs = (struct cpt_cq_s *)cq_s;
+			if (type < NIX_INL_SSO) {
+				cn20k_eth_sec_post_event(eth_dev, args, type,
 							 (uint16_t)cqs->w0.s.uc_compcode,
 							 (uint16_t)cqs->w0.s.compcode, NULL);
 				return;
 			}
 			if (type == NIX_INL_SOFT_EXPIRY_THRD) {
-				if (sa->w2.s.life_unit == ROC_IE_OW_SA_LIFE_UNIT_PKTS)
+				outb_sa = (struct roc_ow_ipsec_outb_sa *)args;
+				outb_priv = roc_nix_inl_ow_ipsec_outb_sa_sw_rsvd(outb_sa);
+				desc.metadata = (uint64_t)outb_priv->userdata;
+				if (outb_sa->w2.s.life_unit == ROC_IE_OW_SA_LIFE_UNIT_PKTS)
 					desc.subtype = RTE_ETH_EVENT_IPSEC_SA_PKT_EXPIRY;
 				else
 					desc.subtype = RTE_ETH_EVENT_IPSEC_SA_BYTE_EXPIRY;
@@ -596,9 +610,9 @@ cn20k_eth_sec_sso_work_cb(uint64_t *gw, void *args, enum nix_inl_event_type type
 	sess_priv.u64 = *rte_security_dynfield(mbuf);

 	sa_base = dev->outb.sa_base;
-	sa = roc_nix_inl_ow_ipsec_outb_sa(sa_base, sess_priv.sa_idx);
+	outb_sa = roc_nix_inl_ow_ipsec_outb_sa(sa_base, sess_priv.sa_idx);

-	cn20k_eth_sec_post_event(eth_dev, sa, res->uc_compcode, res->compcode, mbuf);
+	cn20k_eth_sec_post_event(eth_dev, outb_sa, type, res->uc_compcode, res->compcode, mbuf);

 	cnxk_pktmbuf_free_no_cache(mbuf);
 }
--
2.34.1


^ permalink raw reply related	[flat|nested] 27+ messages in thread

* Re: [PATCH v3 8/8] common/cnxk: enable CPT CQ for inline IPsec inbound
  2026-02-27  4:37   ` [PATCH v3 8/8] common/cnxk: enable CPT CQ for inline IPsec inbound Rahul Bhansali
@ 2026-03-02  6:36     ` Jerin Jacob
  0 siblings, 0 replies; 27+ messages in thread
From: Jerin Jacob @ 2026-03-02  6:36 UTC (permalink / raw)
  To: Rahul Bhansali
  Cc: dev, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
	Satha Rao, Harman Kalra, jerinj, Rakesh Kudurumalla

On Fri, Feb 27, 2026 at 10:08 AM Rahul Bhansali <rbhansali@marvell.com> wrote:
>
> From: Rakesh Kudurumalla <rkudurumalla@marvell.com>
>
> Added support of CPT CQ configurations for inline inbound IPsec.
>
> Signed-off-by: Rakesh Kudurumalla <rkudurumalla@marvell.com>
> ---
> Changes in v2: fix cleanup on configuration failure case.
> Changes in v3: No change.


Series applied to dpdk-next-net-mrvl/for-main. Thanks

^ permalink raw reply	[flat|nested] 27+ messages in thread

end of thread, other threads:[~2026-03-02  6:37 UTC | newest]

Thread overview: 27+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-02-19  9:08 [PATCH 1/8] net/cnxk: support of plain packet reassembly Rahul Bhansali
2026-02-19  9:08 ` [PATCH 2/8] net/cnxk: support IPsec Rx inject for cn20k Rahul Bhansali
2026-02-19  9:08 ` [PATCH 3/8] common/cnxk: update platform features Rahul Bhansali
2026-02-19  9:08 ` [PATCH 4/8] common/cnxk: add RQ PB and WQE cache config Rahul Bhansali
2026-02-19  9:08 ` [PATCH 5/8] net/cnxk: config RQ PB and WQE caching Rahul Bhansali
2026-02-19  9:08 ` [PATCH 6/8] net/cnxk: update SA context push size Rahul Bhansali
2026-02-19  9:08 ` [PATCH 7/8] net/cnxk: flow rule update for non-in-place IPsec Rahul Bhansali
2026-02-19  9:08 ` [PATCH 8/8] common/cnxk: enable CPT CQ for inline IPSec inbound Rahul Bhansali
2026-02-26  5:05   ` Jerin Jacob
2026-02-19 18:58 ` [PATCH 1/8] net/cnxk: support of plain packet reassembly Stephen Hemminger
2026-02-26 13:17 ` [PATCH v2 " Rahul Bhansali
2026-02-26 13:17   ` [PATCH v2 2/8] net/cnxk: support IPsec Rx inject for cn20k Rahul Bhansali
2026-02-26 13:17   ` [PATCH v2 3/8] common/cnxk: update platform features Rahul Bhansali
2026-02-26 13:17   ` [PATCH v2 4/8] common/cnxk: add RQ PB and WQE cache config Rahul Bhansali
2026-02-26 13:17   ` [PATCH v2 5/8] net/cnxk: config RQ PB and WQE caching Rahul Bhansali
2026-02-26 13:17   ` [PATCH v2 6/8] net/cnxk: update SA context push size Rahul Bhansali
2026-02-26 13:17   ` [PATCH v2 7/8] net/cnxk: flow rule update for non-in-place IPsec Rahul Bhansali
2026-02-26 13:17   ` [PATCH v2 8/8] common/cnxk: enable CPT CQ for inline IPsec inbound Rahul Bhansali
2026-02-27  4:37 ` [PATCH v3 1/8] net/cnxk: support of plain packet reassembly Rahul Bhansali
2026-02-27  4:37   ` [PATCH v3 2/8] net/cnxk: support IPsec Rx inject for cn20k Rahul Bhansali
2026-02-27  4:37   ` [PATCH v3 3/8] common/cnxk: update platform features Rahul Bhansali
2026-02-27  4:37   ` [PATCH v3 4/8] common/cnxk: add RQ PB and WQE cache config Rahul Bhansali
2026-02-27  4:37   ` [PATCH v3 5/8] net/cnxk: config RQ PB and WQE caching Rahul Bhansali
2026-02-27  4:37   ` [PATCH v3 6/8] net/cnxk: update SA context push size Rahul Bhansali
2026-02-27  4:37   ` [PATCH v3 7/8] net/cnxk: flow rule update for non-in-place IPsec Rahul Bhansali
2026-02-27  4:37   ` [PATCH v3 8/8] common/cnxk: enable CPT CQ for inline IPsec inbound Rahul Bhansali
2026-03-02  6:36     ` Jerin Jacob

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox