public inbox for dev@dpdk.org
 help / color / mirror / Atom feed
From: Bruce Richardson <bruce.richardson@intel.com>
To: dev@dpdk.org
Cc: Bruce Richardson <bruce.richardson@intel.com>,
	Ciara Loftus <ciara.loftus@intel.com>,
	Praveen Shetty <praveen.shetty@intel.com>,
	Vladimir Medvedkin <vladimir.medvedkin@intel.com>,
	Anatoly Burakov <anatoly.burakov@intel.com>,
	Jingjing Wu <jingjing.wu@intel.com>
Subject: [PATCH v5 03/35] net/intel: use common Tx ring structure
Date: Wed, 11 Feb 2026 18:12:32 +0000	[thread overview]
Message-ID: <20260211181309.2838042-4-bruce.richardson@intel.com> (raw)
In-Reply-To: <20260211181309.2838042-1-bruce.richardson@intel.com>

Rather than having separate per-driver ring pointers in a union, since
we now have a common descriptor type, we can merge all but the ixgbe
pointer into one value.

Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Ciara Loftus <ciara.loftus@intel.com>
---
 drivers/net/intel/common/tx.h                 |  5 +--
 drivers/net/intel/cpfl/cpfl_rxtx.c            |  2 +-
 drivers/net/intel/i40e/i40e_fdir.c            |  6 ++--
 drivers/net/intel/i40e/i40e_rxtx.c            | 22 ++++++------
 .../net/intel/i40e/i40e_rxtx_vec_altivec.c    |  6 ++--
 drivers/net/intel/i40e/i40e_rxtx_vec_avx2.c   |  6 ++--
 drivers/net/intel/i40e/i40e_rxtx_vec_avx512.c |  6 ++--
 drivers/net/intel/i40e/i40e_rxtx_vec_common.h |  2 +-
 drivers/net/intel/i40e/i40e_rxtx_vec_neon.c   |  6 ++--
 drivers/net/intel/iavf/iavf_rxtx.c            | 14 ++++----
 drivers/net/intel/iavf/iavf_rxtx_vec_avx2.c   |  6 ++--
 drivers/net/intel/iavf/iavf_rxtx_vec_avx512.c | 12 +++----
 drivers/net/intel/iavf/iavf_rxtx_vec_common.h |  2 +-
 drivers/net/intel/ice/ice_dcf_ethdev.c        |  4 +--
 drivers/net/intel/ice/ice_rxtx.c              | 34 +++++++++----------
 drivers/net/intel/ice/ice_rxtx_vec_avx2.c     |  6 ++--
 drivers/net/intel/ice/ice_rxtx_vec_avx512.c   |  6 ++--
 drivers/net/intel/ice/ice_rxtx_vec_common.h   |  2 +-
 drivers/net/intel/idpf/idpf_common_rxtx.c     |  8 ++---
 .../net/intel/idpf/idpf_common_rxtx_avx2.c    |  6 ++--
 .../net/intel/idpf/idpf_common_rxtx_avx512.c  |  6 ++--
 drivers/net/intel/idpf/idpf_rxtx.c            |  2 +-
 drivers/net/intel/idpf/idpf_rxtx_vec_common.h |  2 +-
 23 files changed, 84 insertions(+), 87 deletions(-)

diff --git a/drivers/net/intel/common/tx.h b/drivers/net/intel/common/tx.h
index d7561a2bbb..8cf63e59ab 100644
--- a/drivers/net/intel/common/tx.h
+++ b/drivers/net/intel/common/tx.h
@@ -41,10 +41,7 @@ typedef void (*ice_tx_release_mbufs_t)(struct ci_tx_queue *txq);
 
 struct ci_tx_queue {
 	union { /* TX ring virtual address */
-		volatile struct ci_tx_desc *i40e_tx_ring;
-		volatile struct ci_tx_desc *iavf_tx_ring;
-		volatile struct ci_tx_desc *ice_tx_ring;
-		volatile struct ci_tx_desc *idpf_tx_ring;
+		volatile struct ci_tx_desc *ci_tx_ring;
 		volatile union ixgbe_adv_tx_desc *ixgbe_tx_ring;
 	};
 	volatile uint8_t *qtx_tail;               /* register address of tail */
diff --git a/drivers/net/intel/cpfl/cpfl_rxtx.c b/drivers/net/intel/cpfl/cpfl_rxtx.c
index 392a7fcc98..a4d15b7f9c 100644
--- a/drivers/net/intel/cpfl/cpfl_rxtx.c
+++ b/drivers/net/intel/cpfl/cpfl_rxtx.c
@@ -606,7 +606,7 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 	}
 
 	if (!is_splitq) {
-		txq->idpf_tx_ring = mz->addr;
+		txq->ci_tx_ring = mz->addr;
 		idpf_qc_single_tx_queue_reset(txq);
 	} else {
 		txq->desc_ring = mz->addr;
diff --git a/drivers/net/intel/i40e/i40e_fdir.c b/drivers/net/intel/i40e/i40e_fdir.c
index 605df73c9e..8a01aec0e2 100644
--- a/drivers/net/intel/i40e/i40e_fdir.c
+++ b/drivers/net/intel/i40e/i40e_fdir.c
@@ -1380,7 +1380,7 @@ i40e_find_available_buffer(struct rte_eth_dev *dev)
 		volatile struct ci_tx_desc *tmp_txdp;
 
 		tmp_tail = txq->tx_tail;
-		tmp_txdp = &txq->i40e_tx_ring[tmp_tail + 1];
+		tmp_txdp = &txq->ci_tx_ring[tmp_tail + 1];
 
 		do {
 			if ((tmp_txdp->cmd_type_offset_bsz &
@@ -1637,7 +1637,7 @@ i40e_flow_fdir_filter_programming(struct i40e_pf *pf,
 
 	PMD_DRV_LOG(INFO, "filling filter programming descriptor.");
 	fdirdp = (volatile struct i40e_filter_program_desc *)
-				(&txq->i40e_tx_ring[txq->tx_tail]);
+				(&txq->ci_tx_ring[txq->tx_tail]);
 
 	fdirdp->qindex_flex_ptype_vsi =
 			rte_cpu_to_le_32((fdir_action->rx_queue <<
@@ -1707,7 +1707,7 @@ i40e_flow_fdir_filter_programming(struct i40e_pf *pf,
 	fdirdp->fd_id = rte_cpu_to_le_32(filter->soft_id);
 
 	PMD_DRV_LOG(INFO, "filling transmit descriptor.");
-	txdp = &txq->i40e_tx_ring[txq->tx_tail + 1];
+	txdp = &txq->ci_tx_ring[txq->tx_tail + 1];
 	txdp->buffer_addr = rte_cpu_to_le_64(pf->fdir.dma_addr[txq->tx_tail >> 1]);
 
 	td_cmd = I40E_TX_DESC_CMD_EOP |
diff --git a/drivers/net/intel/i40e/i40e_rxtx.c b/drivers/net/intel/i40e/i40e_rxtx.c
index 92d49ccb79..210fc0201e 100644
--- a/drivers/net/intel/i40e/i40e_rxtx.c
+++ b/drivers/net/intel/i40e/i40e_rxtx.c
@@ -388,7 +388,7 @@ static inline int
 i40e_xmit_cleanup(struct ci_tx_queue *txq)
 {
 	struct ci_tx_entry *sw_ring = txq->sw_ring;
-	volatile struct ci_tx_desc *txd = txq->i40e_tx_ring;
+	volatile struct ci_tx_desc *txd = txq->ci_tx_ring;
 	uint16_t last_desc_cleaned = txq->last_desc_cleaned;
 	uint16_t nb_tx_desc = txq->nb_tx_desc;
 	uint16_t desc_to_clean_to;
@@ -1112,7 +1112,7 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 
 	txq = tx_queue;
 	sw_ring = txq->sw_ring;
-	txr = txq->i40e_tx_ring;
+	txr = txq->ci_tx_ring;
 	tx_id = txq->tx_tail;
 	txe = &sw_ring[tx_id];
 
@@ -1347,7 +1347,7 @@ i40e_tx_free_bufs(struct ci_tx_queue *txq)
 	const uint16_t k = RTE_ALIGN_FLOOR(tx_rs_thresh, I40E_TX_MAX_FREE_BUF_SZ);
 	const uint16_t m = tx_rs_thresh % I40E_TX_MAX_FREE_BUF_SZ;
 
-	if ((txq->i40e_tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
+	if ((txq->ci_tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
 			rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) !=
 			rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
 		return 0;
@@ -1431,7 +1431,7 @@ i40e_tx_fill_hw_ring(struct ci_tx_queue *txq,
 		     struct rte_mbuf **pkts,
 		     uint16_t nb_pkts)
 {
-	volatile struct ci_tx_desc *txdp = &txq->i40e_tx_ring[txq->tx_tail];
+	volatile struct ci_tx_desc *txdp = &txq->ci_tx_ring[txq->tx_tail];
 	struct ci_tx_entry *txep = &txq->sw_ring[txq->tx_tail];
 	const int N_PER_LOOP = 4;
 	const int N_PER_LOOP_MASK = N_PER_LOOP - 1;
@@ -1459,7 +1459,7 @@ tx_xmit_pkts(struct ci_tx_queue *txq,
 	     struct rte_mbuf **tx_pkts,
 	     uint16_t nb_pkts)
 {
-	volatile struct ci_tx_desc *txr = txq->i40e_tx_ring;
+	volatile struct ci_tx_desc *txr = txq->ci_tx_ring;
 	uint16_t n = 0;
 
 	/**
@@ -2421,7 +2421,7 @@ i40e_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
 			desc -= txq->nb_tx_desc;
 	}
 
-	status = &txq->i40e_tx_ring[desc].cmd_type_offset_bsz;
+	status = &txq->ci_tx_ring[desc].cmd_type_offset_bsz;
 	mask = rte_le_to_cpu_64(I40E_TXD_QW1_DTYPE_MASK);
 	expect = rte_cpu_to_le_64(
 		I40E_TX_DESC_DTYPE_DESC_DONE << I40E_TXD_QW1_DTYPE_SHIFT);
@@ -2618,7 +2618,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	/* Allocate TX hardware ring descriptors. */
 	ring_size = sizeof(struct ci_tx_desc) * I40E_MAX_RING_DESC;
 	ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN);
-	tz = rte_eth_dma_zone_reserve(dev, "i40e_tx_ring", queue_idx,
+	tz = rte_eth_dma_zone_reserve(dev, "ci_tx_ring", queue_idx,
 			      ring_size, I40E_RING_BASE_ALIGN, socket_id);
 	if (!tz) {
 		i40e_tx_queue_release(txq);
@@ -2640,7 +2640,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	txq->tx_deferred_start = tx_conf->tx_deferred_start;
 
 	txq->tx_ring_dma = tz->iova;
-	txq->i40e_tx_ring = (struct ci_tx_desc *)tz->addr;
+	txq->ci_tx_ring = (struct ci_tx_desc *)tz->addr;
 
 	/* Allocate software ring */
 	txq->sw_ring =
@@ -2915,11 +2915,11 @@ i40e_reset_tx_queue(struct ci_tx_queue *txq)
 	txe = txq->sw_ring;
 	size = sizeof(struct ci_tx_desc) * txq->nb_tx_desc;
 	for (i = 0; i < size; i++)
-		((volatile char *)txq->i40e_tx_ring)[i] = 0;
+		((volatile char *)txq->ci_tx_ring)[i] = 0;
 
 	prev = (uint16_t)(txq->nb_tx_desc - 1);
 	for (i = 0; i < txq->nb_tx_desc; i++) {
-		volatile struct ci_tx_desc *txd = &txq->i40e_tx_ring[i];
+		volatile struct ci_tx_desc *txd = &txq->ci_tx_ring[i];
 
 		txd->cmd_type_offset_bsz =
 			rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE);
@@ -3240,7 +3240,7 @@ i40e_fdir_setup_tx_resources(struct i40e_pf *pf)
 	txq->i40e_vsi = pf->fdir.fdir_vsi;
 
 	txq->tx_ring_dma = tz->iova;
-	txq->i40e_tx_ring = (struct ci_tx_desc *)tz->addr;
+	txq->ci_tx_ring = (struct ci_tx_desc *)tz->addr;
 
 	/*
 	 * don't need to allocate software ring and reset for the fdir
diff --git a/drivers/net/intel/i40e/i40e_rxtx_vec_altivec.c b/drivers/net/intel/i40e/i40e_rxtx_vec_altivec.c
index ef5b252898..81e9e2bc0b 100644
--- a/drivers/net/intel/i40e/i40e_rxtx_vec_altivec.c
+++ b/drivers/net/intel/i40e/i40e_rxtx_vec_altivec.c
@@ -489,7 +489,7 @@ i40e_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 		return 0;
 
 	tx_id = txq->tx_tail;
-	txdp = &txq->i40e_tx_ring[tx_id];
+	txdp = &txq->ci_tx_ring[tx_id];
 	txep = &txq->sw_ring_vec[tx_id];
 
 	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
@@ -509,7 +509,7 @@ i40e_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 		txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
 
 		/* avoid reach the end of ring */
-		txdp = &txq->i40e_tx_ring[tx_id];
+		txdp = &txq->ci_tx_ring[tx_id];
 		txep = &txq->sw_ring_vec[tx_id];
 	}
 
@@ -519,7 +519,7 @@ i40e_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 
 	tx_id = (uint16_t)(tx_id + nb_commit);
 	if (tx_id > txq->tx_next_rs) {
-		txq->i40e_tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
+		txq->ci_tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
 			rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) <<
 						I40E_TXD_QW1_CMD_SHIFT);
 		txq->tx_next_rs =
diff --git a/drivers/net/intel/i40e/i40e_rxtx_vec_avx2.c b/drivers/net/intel/i40e/i40e_rxtx_vec_avx2.c
index 137c1f9765..f054bd41bf 100644
--- a/drivers/net/intel/i40e/i40e_rxtx_vec_avx2.c
+++ b/drivers/net/intel/i40e/i40e_rxtx_vec_avx2.c
@@ -753,7 +753,7 @@ i40e_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
 		return 0;
 
 	tx_id = txq->tx_tail;
-	txdp = &txq->i40e_tx_ring[tx_id];
+	txdp = &txq->ci_tx_ring[tx_id];
 	txep = &txq->sw_ring_vec[tx_id];
 
 	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
@@ -774,7 +774,7 @@ i40e_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
 		txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
 
 		/* avoid reach the end of ring */
-		txdp = &txq->i40e_tx_ring[tx_id];
+		txdp = &txq->ci_tx_ring[tx_id];
 		txep = &txq->sw_ring_vec[tx_id];
 	}
 
@@ -784,7 +784,7 @@ i40e_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
 
 	tx_id = (uint16_t)(tx_id + nb_commit);
 	if (tx_id > txq->tx_next_rs) {
-		txq->i40e_tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
+		txq->ci_tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
 			rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) <<
 						I40E_TXD_QW1_CMD_SHIFT);
 		txq->tx_next_rs =
diff --git a/drivers/net/intel/i40e/i40e_rxtx_vec_avx512.c b/drivers/net/intel/i40e/i40e_rxtx_vec_avx512.c
index 6971488750..9a967faeee 100644
--- a/drivers/net/intel/i40e/i40e_rxtx_vec_avx512.c
+++ b/drivers/net/intel/i40e/i40e_rxtx_vec_avx512.c
@@ -821,7 +821,7 @@ i40e_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
 		return 0;
 
 	tx_id = txq->tx_tail;
-	txdp = &txq->i40e_tx_ring[tx_id];
+	txdp = &txq->ci_tx_ring[tx_id];
 	txep = (void *)txq->sw_ring;
 	txep += tx_id;
 
@@ -843,7 +843,7 @@ i40e_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
 		txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
 
 		/* avoid reach the end of ring */
-		txdp = txq->i40e_tx_ring;
+		txdp = txq->ci_tx_ring;
 		txep = (void *)txq->sw_ring;
 	}
 
@@ -853,7 +853,7 @@ i40e_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
 
 	tx_id = (uint16_t)(tx_id + nb_commit);
 	if (tx_id > txq->tx_next_rs) {
-		txq->i40e_tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
+		txq->ci_tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
 			rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) <<
 						I40E_TXD_QW1_CMD_SHIFT);
 		txq->tx_next_rs =
diff --git a/drivers/net/intel/i40e/i40e_rxtx_vec_common.h b/drivers/net/intel/i40e/i40e_rxtx_vec_common.h
index 14651f2f06..1fd7fc75bf 100644
--- a/drivers/net/intel/i40e/i40e_rxtx_vec_common.h
+++ b/drivers/net/intel/i40e/i40e_rxtx_vec_common.h
@@ -15,7 +15,7 @@
 static inline int
 i40e_tx_desc_done(struct ci_tx_queue *txq, uint16_t idx)
 {
-	return (txq->i40e_tx_ring[idx].cmd_type_offset_bsz &
+	return (txq->ci_tx_ring[idx].cmd_type_offset_bsz &
 			rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) ==
 				rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE);
 }
diff --git a/drivers/net/intel/i40e/i40e_rxtx_vec_neon.c b/drivers/net/intel/i40e/i40e_rxtx_vec_neon.c
index 6404b70c56..0b95152232 100644
--- a/drivers/net/intel/i40e/i40e_rxtx_vec_neon.c
+++ b/drivers/net/intel/i40e/i40e_rxtx_vec_neon.c
@@ -638,7 +638,7 @@ i40e_xmit_fixed_burst_vec(void *__rte_restrict tx_queue,
 		return 0;
 
 	tx_id = txq->tx_tail;
-	txdp = &txq->i40e_tx_ring[tx_id];
+	txdp = &txq->ci_tx_ring[tx_id];
 	txep = &txq->sw_ring_vec[tx_id];
 
 	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
@@ -658,7 +658,7 @@ i40e_xmit_fixed_burst_vec(void *__rte_restrict tx_queue,
 		txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
 
 		/* avoid reach the end of ring */
-		txdp = &txq->i40e_tx_ring[tx_id];
+		txdp = &txq->ci_tx_ring[tx_id];
 		txep = &txq->sw_ring_vec[tx_id];
 	}
 
@@ -668,7 +668,7 @@ i40e_xmit_fixed_burst_vec(void *__rte_restrict tx_queue,
 
 	tx_id = (uint16_t)(tx_id + nb_commit);
 	if (tx_id > txq->tx_next_rs) {
-		txq->i40e_tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
+		txq->ci_tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
 			rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) <<
 						I40E_TXD_QW1_CMD_SHIFT);
 		txq->tx_next_rs =
diff --git a/drivers/net/intel/iavf/iavf_rxtx.c b/drivers/net/intel/iavf/iavf_rxtx.c
index e4421a9932..807bc92a45 100644
--- a/drivers/net/intel/iavf/iavf_rxtx.c
+++ b/drivers/net/intel/iavf/iavf_rxtx.c
@@ -269,11 +269,11 @@ reset_tx_queue(struct ci_tx_queue *txq)
 	txe = txq->sw_ring;
 	size = sizeof(struct ci_tx_desc) * txq->nb_tx_desc;
 	for (i = 0; i < size; i++)
-		((volatile char *)txq->iavf_tx_ring)[i] = 0;
+		((volatile char *)txq->ci_tx_ring)[i] = 0;
 
 	prev = (uint16_t)(txq->nb_tx_desc - 1);
 	for (i = 0; i < txq->nb_tx_desc; i++) {
-		txq->iavf_tx_ring[i].cmd_type_offset_bsz =
+		txq->ci_tx_ring[i].cmd_type_offset_bsz =
 			rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE);
 		txe[i].mbuf =  NULL;
 		txe[i].last_id = i;
@@ -829,7 +829,7 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	/* Allocate TX hardware ring descriptors. */
 	ring_size = sizeof(struct ci_tx_desc) * IAVF_MAX_RING_DESC;
 	ring_size = RTE_ALIGN(ring_size, IAVF_DMA_MEM_ALIGN);
-	mz = rte_eth_dma_zone_reserve(dev, "iavf_tx_ring", queue_idx,
+	mz = rte_eth_dma_zone_reserve(dev, "ci_tx_ring", queue_idx,
 				      ring_size, IAVF_RING_BASE_ALIGN,
 				      socket_id);
 	if (!mz) {
@@ -839,7 +839,7 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 		return -ENOMEM;
 	}
 	txq->tx_ring_dma = mz->iova;
-	txq->iavf_tx_ring = (struct ci_tx_desc *)mz->addr;
+	txq->ci_tx_ring = (struct ci_tx_desc *)mz->addr;
 
 	txq->mz = mz;
 	reset_tx_queue(txq);
@@ -2333,7 +2333,7 @@ iavf_xmit_cleanup(struct ci_tx_queue *txq)
 	uint16_t desc_to_clean_to;
 	uint16_t nb_tx_to_clean;
 
-	volatile struct ci_tx_desc *txd = txq->iavf_tx_ring;
+	volatile struct ci_tx_desc *txd = txq->ci_tx_ring;
 
 	desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
 	if (desc_to_clean_to >= nb_tx_desc)
@@ -2756,7 +2756,7 @@ uint16_t
 iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 {
 	struct ci_tx_queue *txq = tx_queue;
-	volatile struct ci_tx_desc *txr = txq->iavf_tx_ring;
+	volatile struct ci_tx_desc *txr = txq->ci_tx_ring;
 	struct ci_tx_entry *txe_ring = txq->sw_ring;
 	struct ci_tx_entry *txe, *txn;
 	struct rte_mbuf *mb, *mb_seg;
@@ -4462,7 +4462,7 @@ iavf_dev_tx_desc_status(void *tx_queue, uint16_t offset)
 			desc -= txq->nb_tx_desc;
 	}
 
-	status = &txq->iavf_tx_ring[desc].cmd_type_offset_bsz;
+	status = &txq->ci_tx_ring[desc].cmd_type_offset_bsz;
 	mask = rte_le_to_cpu_64(IAVF_TXD_QW1_DTYPE_MASK);
 	expect = rte_cpu_to_le_64(
 		 IAVF_TX_DESC_DTYPE_DESC_DONE << IAVF_TXD_QW1_DTYPE_SHIFT);
diff --git a/drivers/net/intel/iavf/iavf_rxtx_vec_avx2.c b/drivers/net/intel/iavf/iavf_rxtx_vec_avx2.c
index 5b62d51cf7..89ce841b9e 100644
--- a/drivers/net/intel/iavf/iavf_rxtx_vec_avx2.c
+++ b/drivers/net/intel/iavf/iavf_rxtx_vec_avx2.c
@@ -1729,7 +1729,7 @@ iavf_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
 	nb_commit = nb_pkts;
 
 	tx_id = txq->tx_tail;
-	txdp = &txq->iavf_tx_ring[tx_id];
+	txdp = &txq->ci_tx_ring[tx_id];
 	txep = &txq->sw_ring_vec[tx_id];
 
 	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
@@ -1750,7 +1750,7 @@ iavf_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
 		txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
 
 		/* avoid reach the end of ring */
-		txdp = &txq->iavf_tx_ring[tx_id];
+		txdp = &txq->ci_tx_ring[tx_id];
 		txep = &txq->sw_ring_vec[tx_id];
 	}
 
@@ -1760,7 +1760,7 @@ iavf_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
 
 	tx_id = (uint16_t)(tx_id + nb_commit);
 	if (tx_id > txq->tx_next_rs) {
-		txq->iavf_tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
+		txq->ci_tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
 			rte_cpu_to_le_64(((uint64_t)IAVF_TX_DESC_CMD_RS) <<
 					 IAVF_TXD_QW1_CMD_SHIFT);
 		txq->tx_next_rs =
diff --git a/drivers/net/intel/iavf/iavf_rxtx_vec_avx512.c b/drivers/net/intel/iavf/iavf_rxtx_vec_avx512.c
index d79d96c7b7..ad1b0b90cd 100644
--- a/drivers/net/intel/iavf/iavf_rxtx_vec_avx512.c
+++ b/drivers/net/intel/iavf/iavf_rxtx_vec_avx512.c
@@ -2219,7 +2219,7 @@ iavf_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
 	nb_commit = nb_pkts;
 
 	tx_id = txq->tx_tail;
-	txdp = &txq->iavf_tx_ring[tx_id];
+	txdp = &txq->ci_tx_ring[tx_id];
 	txep = (void *)txq->sw_ring;
 	txep += tx_id;
 
@@ -2241,7 +2241,7 @@ iavf_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
 		txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
 
 		/* avoid reach the end of ring */
-		txdp = &txq->iavf_tx_ring[tx_id];
+		txdp = &txq->ci_tx_ring[tx_id];
 		txep = (void *)txq->sw_ring;
 		txep += tx_id;
 	}
@@ -2252,7 +2252,7 @@ iavf_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
 
 	tx_id = (uint16_t)(tx_id + nb_commit);
 	if (tx_id > txq->tx_next_rs) {
-		txq->iavf_tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
+		txq->ci_tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
 			rte_cpu_to_le_64(((uint64_t)IAVF_TX_DESC_CMD_RS) <<
 					 IAVF_TXD_QW1_CMD_SHIFT);
 		txq->tx_next_rs =
@@ -2288,7 +2288,7 @@ iavf_xmit_fixed_burst_vec_avx512_ctx(void *tx_queue, struct rte_mbuf **tx_pkts,
 
 	nb_pkts = nb_commit >> 1;
 	tx_id = txq->tx_tail;
-	txdp = &txq->iavf_tx_ring[tx_id];
+	txdp = &txq->ci_tx_ring[tx_id];
 	txep = (void *)txq->sw_ring;
 	txep += (tx_id >> 1);
 
@@ -2309,7 +2309,7 @@ iavf_xmit_fixed_burst_vec_avx512_ctx(void *tx_queue, struct rte_mbuf **tx_pkts,
 		txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
 		tx_id = 0;
 		/* avoid reach the end of ring */
-		txdp = txq->iavf_tx_ring;
+		txdp = txq->ci_tx_ring;
 		txep = (void *)txq->sw_ring;
 	}
 
@@ -2320,7 +2320,7 @@ iavf_xmit_fixed_burst_vec_avx512_ctx(void *tx_queue, struct rte_mbuf **tx_pkts,
 	tx_id = (uint16_t)(tx_id + nb_commit);
 
 	if (tx_id > txq->tx_next_rs) {
-		txq->iavf_tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
+		txq->ci_tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
 			rte_cpu_to_le_64(((uint64_t)IAVF_TX_DESC_CMD_RS) <<
 					 IAVF_TXD_QW1_CMD_SHIFT);
 		txq->tx_next_rs =
diff --git a/drivers/net/intel/iavf/iavf_rxtx_vec_common.h b/drivers/net/intel/iavf/iavf_rxtx_vec_common.h
index f1ea57034f..1832b76f89 100644
--- a/drivers/net/intel/iavf/iavf_rxtx_vec_common.h
+++ b/drivers/net/intel/iavf/iavf_rxtx_vec_common.h
@@ -14,7 +14,7 @@
 static inline int
 iavf_tx_desc_done(struct ci_tx_queue *txq, uint16_t idx)
 {
-	return (txq->iavf_tx_ring[idx].cmd_type_offset_bsz &
+	return (txq->ci_tx_ring[idx].cmd_type_offset_bsz &
 			rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK)) ==
 				rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE);
 }
diff --git a/drivers/net/intel/ice/ice_dcf_ethdev.c b/drivers/net/intel/ice/ice_dcf_ethdev.c
index ab1d499cef..5f537b4c12 100644
--- a/drivers/net/intel/ice/ice_dcf_ethdev.c
+++ b/drivers/net/intel/ice/ice_dcf_ethdev.c
@@ -401,11 +401,11 @@ reset_tx_queue(struct ci_tx_queue *txq)
 	txe = txq->sw_ring;
 	size = sizeof(struct ci_tx_desc) * txq->nb_tx_desc;
 	for (i = 0; i < size; i++)
-		((volatile char *)txq->ice_tx_ring)[i] = 0;
+		((volatile char *)txq->ci_tx_ring)[i] = 0;
 
 	prev = (uint16_t)(txq->nb_tx_desc - 1);
 	for (i = 0; i < txq->nb_tx_desc; i++) {
-		txq->ice_tx_ring[i].cmd_type_offset_bsz =
+		txq->ci_tx_ring[i].cmd_type_offset_bsz =
 			rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE);
 		txe[i].mbuf =  NULL;
 		txe[i].last_id = i;
diff --git a/drivers/net/intel/ice/ice_rxtx.c b/drivers/net/intel/ice/ice_rxtx.c
index 74b80e7df3..e3ffbdb587 100644
--- a/drivers/net/intel/ice/ice_rxtx.c
+++ b/drivers/net/intel/ice/ice_rxtx.c
@@ -1117,11 +1117,11 @@ ice_reset_tx_queue(struct ci_tx_queue *txq)
 	txe = txq->sw_ring;
 	size = sizeof(struct ci_tx_desc) * txq->nb_tx_desc;
 	for (i = 0; i < size; i++)
-		((volatile char *)txq->ice_tx_ring)[i] = 0;
+		((volatile char *)txq->ci_tx_ring)[i] = 0;
 
 	prev = (uint16_t)(txq->nb_tx_desc - 1);
 	for (i = 0; i < txq->nb_tx_desc; i++) {
-		volatile struct ci_tx_desc *txd = &txq->ice_tx_ring[i];
+		volatile struct ci_tx_desc *txd = &txq->ci_tx_ring[i];
 
 		txd->cmd_type_offset_bsz =
 			rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE);
@@ -1625,7 +1625,7 @@ ice_tx_queue_setup(struct rte_eth_dev *dev,
 	/* Allocate TX hardware ring descriptors. */
 	ring_size = sizeof(struct ci_tx_desc) * ICE_MAX_NUM_DESC_BY_MAC(hw);
 	ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
-	tz = rte_eth_dma_zone_reserve(dev, "ice_tx_ring", queue_idx,
+	tz = rte_eth_dma_zone_reserve(dev, "ci_tx_ring", queue_idx,
 				      ring_size, ICE_RING_BASE_ALIGN,
 				      socket_id);
 	if (!tz) {
@@ -1649,7 +1649,7 @@ ice_tx_queue_setup(struct rte_eth_dev *dev,
 	txq->tx_deferred_start = tx_conf->tx_deferred_start;
 
 	txq->tx_ring_dma = tz->iova;
-	txq->ice_tx_ring = tz->addr;
+	txq->ci_tx_ring = tz->addr;
 
 	/* Allocate software ring */
 	txq->sw_ring =
@@ -2555,7 +2555,7 @@ ice_tx_descriptor_status(void *tx_queue, uint16_t offset)
 			desc -= txq->nb_tx_desc;
 	}
 
-	status = &txq->ice_tx_ring[desc].cmd_type_offset_bsz;
+	status = &txq->ci_tx_ring[desc].cmd_type_offset_bsz;
 	mask = rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M);
 	expect = rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE <<
 				  ICE_TXD_QW1_DTYPE_S);
@@ -2638,7 +2638,7 @@ ice_fdir_setup_tx_resources(struct ice_pf *pf)
 	txq->ice_vsi = pf->fdir.fdir_vsi;
 
 	txq->tx_ring_dma = tz->iova;
-	txq->ice_tx_ring = (struct ci_tx_desc *)tz->addr;
+	txq->ci_tx_ring = (struct ci_tx_desc *)tz->addr;
 	/*
 	 * don't need to allocate software ring and reset for the fdir
 	 * program queue just set the queue has been configured.
@@ -3027,7 +3027,7 @@ static inline int
 ice_xmit_cleanup(struct ci_tx_queue *txq)
 {
 	struct ci_tx_entry *sw_ring = txq->sw_ring;
-	volatile struct ci_tx_desc *txd = txq->ice_tx_ring;
+	volatile struct ci_tx_desc *txd = txq->ci_tx_ring;
 	uint16_t last_desc_cleaned = txq->last_desc_cleaned;
 	uint16_t nb_tx_desc = txq->nb_tx_desc;
 	uint16_t desc_to_clean_to;
@@ -3148,7 +3148,7 @@ uint16_t
 ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 {
 	struct ci_tx_queue *txq;
-	volatile struct ci_tx_desc *ice_tx_ring;
+	volatile struct ci_tx_desc *ci_tx_ring;
 	volatile struct ci_tx_desc *txd;
 	struct ci_tx_entry *sw_ring;
 	struct ci_tx_entry *txe, *txn;
@@ -3171,7 +3171,7 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 
 	txq = tx_queue;
 	sw_ring = txq->sw_ring;
-	ice_tx_ring = txq->ice_tx_ring;
+	ci_tx_ring = txq->ci_tx_ring;
 	tx_id = txq->tx_tail;
 	txe = &sw_ring[tx_id];
 
@@ -3257,7 +3257,7 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 			/* Setup TX context descriptor if required */
 			volatile struct ice_tx_ctx_desc *ctx_txd =
 				(volatile struct ice_tx_ctx_desc *)
-					&ice_tx_ring[tx_id];
+					&ci_tx_ring[tx_id];
 			uint16_t cd_l2tag2 = 0;
 			uint64_t cd_type_cmd_tso_mss = ICE_TX_DESC_DTYPE_CTX;
 
@@ -3299,7 +3299,7 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 		m_seg = tx_pkt;
 
 		do {
-			txd = &ice_tx_ring[tx_id];
+			txd = &ci_tx_ring[tx_id];
 			txn = &sw_ring[txe->next_id];
 
 			if (txe->mbuf)
@@ -3327,7 +3327,7 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 				txe->last_id = tx_last;
 				tx_id = txe->next_id;
 				txe = txn;
-				txd = &ice_tx_ring[tx_id];
+				txd = &ci_tx_ring[tx_id];
 				txn = &sw_ring[txe->next_id];
 			}
 
@@ -3410,7 +3410,7 @@ ice_tx_free_bufs(struct ci_tx_queue *txq)
 	struct ci_tx_entry *txep;
 	uint16_t i;
 
-	if ((txq->ice_tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
+	if ((txq->ci_tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
 	     rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) !=
 	    rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
 		return 0;
@@ -3594,7 +3594,7 @@ static inline void
 ice_tx_fill_hw_ring(struct ci_tx_queue *txq, struct rte_mbuf **pkts,
 		    uint16_t nb_pkts)
 {
-	volatile struct ci_tx_desc *txdp = &txq->ice_tx_ring[txq->tx_tail];
+	volatile struct ci_tx_desc *txdp = &txq->ci_tx_ring[txq->tx_tail];
 	struct ci_tx_entry *txep = &txq->sw_ring[txq->tx_tail];
 	const int N_PER_LOOP = 4;
 	const int N_PER_LOOP_MASK = N_PER_LOOP - 1;
@@ -3627,7 +3627,7 @@ tx_xmit_pkts(struct ci_tx_queue *txq,
 	     struct rte_mbuf **tx_pkts,
 	     uint16_t nb_pkts)
 {
-	volatile struct ci_tx_desc *txr = txq->ice_tx_ring;
+	volatile struct ci_tx_desc *txr = txq->ci_tx_ring;
 	uint16_t n = 0;
 
 	/**
@@ -4887,11 +4887,11 @@ ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc)
 	uint16_t i;
 
 	fdirdp = (volatile struct ice_fltr_desc *)
-		(&txq->ice_tx_ring[txq->tx_tail]);
+		(&txq->ci_tx_ring[txq->tx_tail]);
 	fdirdp->qidx_compq_space_stat = fdir_desc->qidx_compq_space_stat;
 	fdirdp->dtype_cmd_vsi_fdid = fdir_desc->dtype_cmd_vsi_fdid;
 
-	txdp = &txq->ice_tx_ring[txq->tx_tail + 1];
+	txdp = &txq->ci_tx_ring[txq->tx_tail + 1];
 	txdp->buffer_addr = rte_cpu_to_le_64(pf->fdir.dma_addr);
 	td_cmd = ICE_TX_DESC_CMD_EOP |
 		ICE_TX_DESC_CMD_RS  |
diff --git a/drivers/net/intel/ice/ice_rxtx_vec_avx2.c b/drivers/net/intel/ice/ice_rxtx_vec_avx2.c
index bef7bb00ba..0a1df0b2f6 100644
--- a/drivers/net/intel/ice/ice_rxtx_vec_avx2.c
+++ b/drivers/net/intel/ice/ice_rxtx_vec_avx2.c
@@ -869,7 +869,7 @@ ice_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
 		return 0;
 
 	tx_id = txq->tx_tail;
-	txdp = &txq->ice_tx_ring[tx_id];
+	txdp = &txq->ci_tx_ring[tx_id];
 	txep = &txq->sw_ring_vec[tx_id];
 
 	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
@@ -890,7 +890,7 @@ ice_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
 		txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
 
 		/* avoid reach the end of ring */
-		txdp = &txq->ice_tx_ring[tx_id];
+		txdp = &txq->ci_tx_ring[tx_id];
 		txep = &txq->sw_ring_vec[tx_id];
 	}
 
@@ -900,7 +900,7 @@ ice_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
 
 	tx_id = (uint16_t)(tx_id + nb_commit);
 	if (tx_id > txq->tx_next_rs) {
-		txq->ice_tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
+		txq->ci_tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
 			rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
 					 ICE_TXD_QW1_CMD_S);
 		txq->tx_next_rs =
diff --git a/drivers/net/intel/ice/ice_rxtx_vec_avx512.c b/drivers/net/intel/ice/ice_rxtx_vec_avx512.c
index 1f6bf5fc8e..d42f41461f 100644
--- a/drivers/net/intel/ice/ice_rxtx_vec_avx512.c
+++ b/drivers/net/intel/ice/ice_rxtx_vec_avx512.c
@@ -933,7 +933,7 @@ ice_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
 		return 0;
 
 	tx_id = txq->tx_tail;
-	txdp = &txq->ice_tx_ring[tx_id];
+	txdp = &txq->ci_tx_ring[tx_id];
 	txep = (void *)txq->sw_ring;
 	txep += tx_id;
 
@@ -955,7 +955,7 @@ ice_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
 		txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
 
 		/* avoid reach the end of ring */
-		txdp = txq->ice_tx_ring;
+		txdp = txq->ci_tx_ring;
 		txep = (void *)txq->sw_ring;
 	}
 
@@ -965,7 +965,7 @@ ice_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
 
 	tx_id = (uint16_t)(tx_id + nb_commit);
 	if (tx_id > txq->tx_next_rs) {
-		txq->ice_tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
+		txq->ci_tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
 			rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
 					 ICE_TXD_QW1_CMD_S);
 		txq->tx_next_rs =
diff --git a/drivers/net/intel/ice/ice_rxtx_vec_common.h b/drivers/net/intel/ice/ice_rxtx_vec_common.h
index ff46a8fb49..8ba591e403 100644
--- a/drivers/net/intel/ice/ice_rxtx_vec_common.h
+++ b/drivers/net/intel/ice/ice_rxtx_vec_common.h
@@ -11,7 +11,7 @@
 static inline int
 ice_tx_desc_done(struct ci_tx_queue *txq, uint16_t idx)
 {
-	return (txq->ice_tx_ring[idx].cmd_type_offset_bsz &
+	return (txq->ci_tx_ring[idx].cmd_type_offset_bsz &
 			rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) ==
 				rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE);
 }
diff --git a/drivers/net/intel/idpf/idpf_common_rxtx.c b/drivers/net/intel/idpf/idpf_common_rxtx.c
index be3c1ef216..51074bda3a 100644
--- a/drivers/net/intel/idpf/idpf_common_rxtx.c
+++ b/drivers/net/intel/idpf/idpf_common_rxtx.c
@@ -266,11 +266,11 @@ idpf_qc_single_tx_queue_reset(struct ci_tx_queue *txq)
 	txe = txq->sw_ring;
 	size = sizeof(struct ci_tx_desc) * txq->nb_tx_desc;
 	for (i = 0; i < size; i++)
-		((volatile char *)txq->idpf_tx_ring)[i] = 0;
+		((volatile char *)txq->ci_tx_ring)[i] = 0;
 
 	prev = (uint16_t)(txq->nb_tx_desc - 1);
 	for (i = 0; i < txq->nb_tx_desc; i++) {
-		txq->idpf_tx_ring[i].cmd_type_offset_bsz =
+		txq->ci_tx_ring[i].cmd_type_offset_bsz =
 			rte_cpu_to_le_64(IDPF_TX_DESC_DTYPE_DESC_DONE);
 		txe[i].mbuf =  NULL;
 		txe[i].last_id = i;
@@ -1335,7 +1335,7 @@ idpf_xmit_cleanup(struct ci_tx_queue *txq)
 	uint16_t desc_to_clean_to;
 	uint16_t nb_tx_to_clean;
 
-	volatile struct ci_tx_desc *txd = txq->idpf_tx_ring;
+	volatile struct ci_tx_desc *txd = txq->ci_tx_ring;
 
 	desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
 	if (desc_to_clean_to >= nb_tx_desc)
@@ -1398,7 +1398,7 @@ idpf_dp_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 		return nb_tx;
 
 	sw_ring = txq->sw_ring;
-	txr = txq->idpf_tx_ring;
+	txr = txq->ci_tx_ring;
 	tx_id = txq->tx_tail;
 	txe = &sw_ring[tx_id];
 
diff --git a/drivers/net/intel/idpf/idpf_common_rxtx_avx2.c b/drivers/net/intel/idpf/idpf_common_rxtx_avx2.c
index 5f5d538dcb..04efee3722 100644
--- a/drivers/net/intel/idpf/idpf_common_rxtx_avx2.c
+++ b/drivers/net/intel/idpf/idpf_common_rxtx_avx2.c
@@ -573,7 +573,7 @@ idpf_singleq_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts
 		return 0;
 
 	tx_id = txq->tx_tail;
-	txdp = &txq->idpf_tx_ring[tx_id];
+	txdp = &txq->ci_tx_ring[tx_id];
 	txep = &txq->sw_ring_vec[tx_id];
 
 	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
@@ -594,7 +594,7 @@ idpf_singleq_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts
 		txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
 
 		/* avoid reach the end of ring */
-		txdp = &txq->idpf_tx_ring[tx_id];
+		txdp = &txq->ci_tx_ring[tx_id];
 		txep = &txq->sw_ring_vec[tx_id];
 	}
 
@@ -604,7 +604,7 @@ idpf_singleq_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts
 
 	tx_id = (uint16_t)(tx_id + nb_commit);
 	if (tx_id > txq->tx_next_rs) {
-		txq->idpf_tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
+		txq->ci_tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
 			rte_cpu_to_le_64(((uint64_t)IDPF_TX_DESC_CMD_RS) <<
 					 IDPF_TXD_QW1_CMD_S);
 		txq->tx_next_rs =
diff --git a/drivers/net/intel/idpf/idpf_common_rxtx_avx512.c b/drivers/net/intel/idpf/idpf_common_rxtx_avx512.c
index c1ec3d1222..d5e5a2ca5f 100644
--- a/drivers/net/intel/idpf/idpf_common_rxtx_avx512.c
+++ b/drivers/net/intel/idpf/idpf_common_rxtx_avx512.c
@@ -1090,7 +1090,7 @@ idpf_singleq_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pk
 		return 0;
 
 	tx_id = txq->tx_tail;
-	txdp = &txq->idpf_tx_ring[tx_id];
+	txdp = &txq->ci_tx_ring[tx_id];
 	txep = (void *)txq->sw_ring;
 	txep += tx_id;
 
@@ -1112,7 +1112,7 @@ idpf_singleq_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pk
 		txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
 
 		/* avoid reach the end of ring */
-		txdp = &txq->idpf_tx_ring[tx_id];
+		txdp = &txq->ci_tx_ring[tx_id];
 		txep = (void *)txq->sw_ring;
 		txep += tx_id;
 	}
@@ -1123,7 +1123,7 @@ idpf_singleq_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pk
 
 	tx_id = (uint16_t)(tx_id + nb_commit);
 	if (tx_id > txq->tx_next_rs) {
-		txq->idpf_tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
+		txq->ci_tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
 			rte_cpu_to_le_64(((uint64_t)IDPF_TX_DESC_CMD_RS) <<
 					 IDPF_TXD_QW1_CMD_S);
 		txq->tx_next_rs =
diff --git a/drivers/net/intel/idpf/idpf_rxtx.c b/drivers/net/intel/idpf/idpf_rxtx.c
index 9317c8b175..7d9c885458 100644
--- a/drivers/net/intel/idpf/idpf_rxtx.c
+++ b/drivers/net/intel/idpf/idpf_rxtx.c
@@ -481,7 +481,7 @@ idpf_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 	}
 
 	if (!is_splitq) {
-		txq->idpf_tx_ring = mz->addr;
+		txq->ci_tx_ring = mz->addr;
 		idpf_qc_single_tx_queue_reset(txq);
 	} else {
 		txq->desc_ring = mz->addr;
diff --git a/drivers/net/intel/idpf/idpf_rxtx_vec_common.h b/drivers/net/intel/idpf/idpf_rxtx_vec_common.h
index 4702061484..b5e8574667 100644
--- a/drivers/net/intel/idpf/idpf_rxtx_vec_common.h
+++ b/drivers/net/intel/idpf/idpf_rxtx_vec_common.h
@@ -31,7 +31,7 @@ idpf_tx_desc_done(struct ci_tx_queue *txq, uint16_t idx)
 	if (txq->complq != NULL)
 		return 1;
 
-	return (txq->idpf_tx_ring[idx].cmd_type_offset_bsz &
+	return (txq->ci_tx_ring[idx].cmd_type_offset_bsz &
 			rte_cpu_to_le_64(IDPF_TXD_QW1_DTYPE_M)) ==
 				rte_cpu_to_le_64(IDPF_TX_DESC_DTYPE_DESC_DONE);
 }
-- 
2.51.0


  parent reply	other threads:[~2026-02-11 18:13 UTC|newest]

Thread overview: 274+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-12-19 17:25 [RFC PATCH 00/27] combine multiple Intel scalar Tx paths Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 01/27] net/intel: create common Tx descriptor structure Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 02/27] net/intel: use common tx ring structure Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 03/27] net/intel: create common post-Tx cleanup function Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 04/27] net/intel: consolidate definitions for Tx desc fields Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 05/27] net/intel: create separate header for Tx scalar fns Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 06/27] net/intel: add common fn to calculate needed descriptors Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 07/27] net/ice: refactor context descriptor handling Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 08/27] net/i40e: " Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 09/27] net/idpf: " Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 10/27] net/intel: consolidate checksum mask definition Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 11/27] net/intel: create common checksum Tx offload function Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 12/27] net/intel: create a common scalar Tx function Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 13/27] net/i40e: use " Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 14/27] net/intel: add IPSec hooks to common " Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 15/27] net/intel: support configurable VLAN tag insertion on Tx Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 16/27] net/iavf: use common scalar Tx function Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 17/27] net/i40e: document requirement for QinQ support Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 18/27] net/idpf: use common scalar Tx function Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 19/27] net/intel: avoid writing the final pkt descriptor twice Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 20/27] net/intel: write descriptors using non-volatile pointers Bruce Richardson
2025-12-20  8:43   ` Morten Brørup
2025-12-22  9:50     ` Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 21/27] net/intel: remove unnecessary flag clearing Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 22/27] net/intel: mark mid-burst ring cleanup as unlikely Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 23/27] net/intel: add special handling for single desc packets Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 24/27] net/intel: use separate array for desc status tracking Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 25/27] net/ixgbe: " Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 26/27] net/intel: drop unused Tx queue used count Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 27/27] net/intel: remove index for tracking end of packet Bruce Richardson
2025-12-20  9:05   ` Morten Brørup
2026-01-13 15:14 ` [PATCH v2 00/36] combine multiple Intel scalar Tx paths Bruce Richardson
2026-01-13 15:14   ` [PATCH v2 01/36] net/intel: create common Tx descriptor structure Bruce Richardson
2026-01-13 15:14   ` [PATCH v2 02/36] net/intel: use common Tx ring structure Bruce Richardson
2026-01-13 15:14   ` [PATCH v2 03/36] net/intel: create common post-Tx cleanup function Bruce Richardson
2026-01-13 15:14   ` [PATCH v2 04/36] net/intel: consolidate definitions for Tx desc fields Bruce Richardson
2026-01-13 15:14   ` [PATCH v2 05/36] net/intel: create separate header for Tx scalar fns Bruce Richardson
2026-01-13 15:14   ` [PATCH v2 06/36] net/intel: add common fn to calculate needed descriptors Bruce Richardson
2026-01-13 15:14   ` [PATCH v2 07/36] net/ice: refactor context descriptor handling Bruce Richardson
2026-01-13 15:14   ` [PATCH v2 08/36] net/i40e: " Bruce Richardson
2026-01-13 15:14   ` [PATCH v2 09/36] net/idpf: " Bruce Richardson
2026-01-13 15:14   ` [PATCH v2 10/36] net/intel: consolidate checksum mask definition Bruce Richardson
2026-01-13 15:14   ` [PATCH v2 11/36] net/intel: create common checksum Tx offload function Bruce Richardson
2026-01-13 15:14   ` [PATCH v2 12/36] net/intel: create a common scalar Tx function Bruce Richardson
2026-01-13 15:14   ` [PATCH v2 13/36] net/i40e: use " Bruce Richardson
2026-01-13 15:14   ` [PATCH v2 14/36] net/intel: add IPsec hooks to common " Bruce Richardson
2026-01-13 15:14   ` [PATCH v2 15/36] net/intel: support configurable VLAN tag insertion on Tx Bruce Richardson
2026-01-13 15:14   ` [PATCH v2 16/36] net/iavf: use common scalar Tx function Bruce Richardson
2026-01-13 15:14   ` [PATCH v2 17/36] net/i40e: document requirement for QinQ support Bruce Richardson
2026-01-13 15:14   ` [PATCH v2 18/36] net/idpf: use common scalar Tx function Bruce Richardson
2026-01-13 15:14   ` [PATCH v2 19/36] net/intel: avoid writing the final pkt descriptor twice Bruce Richardson
2026-01-13 15:14   ` [PATCH v2 20/36] eal: add macro for marking assumed alignment Bruce Richardson
2026-01-13 15:14   ` [PATCH v2 21/36] net/intel: write descriptors using non-volatile pointers Bruce Richardson
2026-01-13 15:14   ` [PATCH v2 22/36] net/intel: remove unnecessary flag clearing Bruce Richardson
2026-01-13 15:14   ` [PATCH v2 23/36] net/intel: mark mid-burst ring cleanup as unlikely Bruce Richardson
2026-01-13 15:14   ` [PATCH v2 24/36] net/intel: add special handling for single desc packets Bruce Richardson
2026-01-13 15:14   ` [PATCH v2 25/36] net/intel: use separate array for desc status tracking Bruce Richardson
2026-01-13 15:14   ` [PATCH v2 26/36] net/ixgbe: " Bruce Richardson
2026-01-13 15:14   ` [PATCH v2 27/36] net/intel: drop unused Tx queue used count Bruce Richardson
2026-01-13 15:14   ` [PATCH v2 28/36] net/intel: remove index for tracking end of packet Bruce Richardson
2026-01-13 15:14   ` [PATCH v2 29/36] net/intel: merge ring writes in simple Tx for ice and i40e Bruce Richardson
2026-01-13 15:14   ` [PATCH v2 30/36] net/intel: consolidate ice and i40e buffer free function Bruce Richardson
2026-01-13 15:14   ` [PATCH v2 31/36] net/intel: complete merging simple Tx paths Bruce Richardson
2026-01-13 15:14   ` [PATCH v2 32/36] net/intel: use non-volatile stores in simple Tx function Bruce Richardson
2026-01-13 15:14   ` [PATCH v2 33/36] net/intel: align scalar simple Tx path with vector logic Bruce Richardson
2026-01-13 15:14   ` [PATCH v2 34/36] net/intel: use vector SW ring entry for simple path Bruce Richardson
2026-01-13 15:14   ` [PATCH v2 35/36] net/intel: use vector mbuf cleanup from simple scalar path Bruce Richardson
2026-01-13 15:15   ` [PATCH v2 36/36] net/idpf: enable simple Tx function Bruce Richardson
2026-01-13 17:17   ` [PATCH v2 00/36] combine multiple Intel scalar Tx paths Stephen Hemminger
2026-01-23  6:26   ` Stephen Hemminger
2026-01-26  9:02     ` Bruce Richardson
2026-01-30 11:41 ` [PATCH v3 " Bruce Richardson
2026-01-30 11:41   ` [PATCH v3 01/36] net/intel: create common Tx descriptor structure Bruce Richardson
2026-02-06  9:56     ` Loftus, Ciara
2026-01-30 11:41   ` [PATCH v3 02/36] net/intel: use common Tx ring structure Bruce Richardson
2026-02-06  9:59     ` Loftus, Ciara
2026-01-30 11:41   ` [PATCH v3 03/36] net/intel: create common post-Tx cleanup function Bruce Richardson
2026-02-06 10:07     ` Loftus, Ciara
2026-02-09 10:41       ` Bruce Richardson
2026-01-30 11:41   ` [PATCH v3 04/36] net/intel: consolidate definitions for Tx desc fields Bruce Richardson
2026-02-06 10:14     ` Loftus, Ciara
2026-02-09 10:43       ` Bruce Richardson
2026-01-30 11:41   ` [PATCH v3 05/36] net/intel: create separate header for Tx scalar fns Bruce Richardson
2026-02-06 10:23     ` Loftus, Ciara
2026-02-09 11:04       ` Bruce Richardson
2026-01-30 11:41   ` [PATCH v3 06/36] net/intel: add common fn to calculate needed descriptors Bruce Richardson
2026-02-06 10:25     ` Loftus, Ciara
2026-02-09 11:15       ` Bruce Richardson
2026-01-30 11:41   ` [PATCH v3 07/36] net/ice: refactor context descriptor handling Bruce Richardson
2026-02-06 10:47     ` Loftus, Ciara
2026-02-09 11:16       ` Bruce Richardson
2026-01-30 11:41   ` [PATCH v3 08/36] net/i40e: " Bruce Richardson
2026-02-06 10:54     ` Loftus, Ciara
2026-01-30 11:41   ` [PATCH v3 09/36] net/idpf: " Bruce Richardson
2026-02-06 10:59     ` Loftus, Ciara
2026-01-30 11:41   ` [PATCH v3 10/36] net/intel: consolidate checksum mask definition Bruce Richardson
2026-02-06 11:25     ` Loftus, Ciara
2026-02-09 11:40       ` Bruce Richardson
2026-02-09 15:00         ` Bruce Richardson
2026-01-30 11:41   ` [PATCH v3 11/36] net/intel: create common checksum Tx offload function Bruce Richardson
2026-02-06 11:37     ` Loftus, Ciara
2026-02-09 11:41       ` Bruce Richardson
2026-01-30 11:41   ` [PATCH v3 12/36] net/intel: create a common scalar Tx function Bruce Richardson
2026-02-06 12:01     ` Loftus, Ciara
2026-02-06 12:13       ` Bruce Richardson
2026-01-30 11:41   ` [PATCH v3 13/36] net/i40e: use " Bruce Richardson
2026-01-30 11:41   ` [PATCH v3 14/36] net/intel: add IPsec hooks to common " Bruce Richardson
2026-01-30 11:41   ` [PATCH v3 15/36] net/intel: support configurable VLAN tag insertion on Tx Bruce Richardson
2026-01-30 11:41   ` [PATCH v3 16/36] net/iavf: use common scalar Tx function Bruce Richardson
2026-01-30 11:41   ` [PATCH v3 17/36] net/i40e: document requirement for QinQ support Bruce Richardson
2026-01-30 11:41   ` [PATCH v3 18/36] net/idpf: use common scalar Tx function Bruce Richardson
2026-01-30 11:41   ` [PATCH v3 19/36] net/intel: avoid writing the final pkt descriptor twice Bruce Richardson
2026-01-30 11:41   ` [PATCH v3 20/36] eal: add macro for marking assumed alignment Bruce Richardson
2026-01-30 11:41   ` [PATCH v3 21/36] net/intel: write descriptors using non-volatile pointers Bruce Richardson
2026-01-30 11:41   ` [PATCH v3 22/36] net/intel: remove unnecessary flag clearing Bruce Richardson
2026-01-30 11:41   ` [PATCH v3 23/36] net/intel: mark mid-burst ring cleanup as unlikely Bruce Richardson
2026-01-30 11:41   ` [PATCH v3 24/36] net/intel: add special handling for single desc packets Bruce Richardson
2026-01-30 11:41   ` [PATCH v3 25/36] net/intel: use separate array for desc status tracking Bruce Richardson
2026-01-30 11:41   ` [PATCH v3 26/36] net/ixgbe: " Bruce Richardson
2026-01-30 11:41   ` [PATCH v3 27/36] net/intel: drop unused Tx queue used count Bruce Richardson
2026-01-30 11:41   ` [PATCH v3 28/36] net/intel: remove index for tracking end of packet Bruce Richardson
2026-01-30 11:41   ` [PATCH v3 29/36] net/intel: merge ring writes in simple Tx for ice and i40e Bruce Richardson
2026-01-30 11:41   ` [PATCH v3 30/36] net/intel: consolidate ice and i40e buffer free function Bruce Richardson
2026-01-30 11:41   ` [PATCH v3 31/36] net/intel: complete merging simple Tx paths Bruce Richardson
2026-01-30 11:41   ` [PATCH v3 32/36] net/intel: use non-volatile stores in simple Tx function Bruce Richardson
2026-01-30 11:42   ` [PATCH v3 33/36] net/intel: align scalar simple Tx path with vector logic Bruce Richardson
2026-01-30 11:42   ` [PATCH v3 34/36] net/intel: use vector SW ring entry for simple path Bruce Richardson
2026-01-30 11:42   ` [PATCH v3 35/36] net/intel: use vector mbuf cleanup from simple scalar path Bruce Richardson
2026-01-30 11:42   ` [PATCH v3 36/36] net/idpf: enable simple Tx function Bruce Richardson
2026-01-30 17:56     ` [REVIEW] " Stephen Hemminger
2026-02-09 16:44 ` [PATCH v4 00/35] combine multiple Intel scalar Tx paths Bruce Richardson
2026-02-09 16:44   ` [PATCH v4 01/35] net/intel: create common Tx descriptor structure Bruce Richardson
2026-02-09 16:45   ` [PATCH v4 02/35] net/intel: use common Tx ring structure Bruce Richardson
2026-02-09 16:45   ` [PATCH v4 03/35] net/intel: create common post-Tx cleanup function Bruce Richardson
2026-02-10 12:18     ` Burakov, Anatoly
2026-02-09 16:45   ` [PATCH v4 04/35] net/intel: consolidate definitions for Tx desc fields Bruce Richardson
2026-02-10 12:26     ` Burakov, Anatoly
2026-02-10 16:47       ` Bruce Richardson
2026-02-09 16:45   ` [PATCH v4 05/35] net/intel: add common fn to calculate needed descriptors Bruce Richardson
2026-02-10 12:29     ` Burakov, Anatoly
2026-02-10 14:08       ` Bruce Richardson
2026-02-10 14:17         ` Burakov, Anatoly
2026-02-10 17:25           ` Bruce Richardson
2026-02-11  9:14             ` Burakov, Anatoly
2026-02-09 16:45   ` [PATCH v4 06/35] net/ice: refactor context descriptor handling Bruce Richardson
2026-02-10 12:42     ` Burakov, Anatoly
2026-02-10 17:40       ` Bruce Richardson
2026-02-11  9:17         ` Burakov, Anatoly
2026-02-11 10:38           ` Bruce Richardson
2026-02-09 16:45   ` [PATCH v4 07/35] net/i40e: " Bruce Richardson
2026-02-10 12:48     ` Burakov, Anatoly
2026-02-10 14:10       ` Bruce Richardson
2026-02-10 14:19         ` Burakov, Anatoly
2026-02-10 17:54           ` Bruce Richardson
2026-02-11  9:20             ` Burakov, Anatoly
2026-02-11 12:04               ` Bruce Richardson
2026-02-09 16:45   ` [PATCH v4 08/35] net/idpf: " Bruce Richardson
2026-02-10 12:52     ` Burakov, Anatoly
2026-02-09 16:45   ` [PATCH v4 09/35] net/intel: consolidate checksum mask definition Bruce Richardson
2026-02-10 13:00     ` Burakov, Anatoly
2026-02-09 16:45   ` [PATCH v4 10/35] net/intel: create common checksum Tx offload function Bruce Richardson
2026-02-10 13:04     ` Burakov, Anatoly
2026-02-10 17:56       ` Bruce Richardson
2026-02-09 16:45   ` [PATCH v4 11/35] net/intel: create a common scalar Tx function Bruce Richardson
2026-02-10 13:14     ` Burakov, Anatoly
2026-02-10 18:03       ` Bruce Richardson
2026-02-11  9:26         ` Burakov, Anatoly
2026-02-09 16:45   ` [PATCH v4 12/35] net/i40e: use " Bruce Richardson
2026-02-10 13:14     ` Burakov, Anatoly
2026-02-09 16:45   ` [PATCH v4 13/35] net/intel: add IPsec hooks to common " Bruce Richardson
2026-02-10 13:16     ` Burakov, Anatoly
2026-02-09 16:45   ` [PATCH v4 14/35] net/intel: support configurable VLAN tag insertion on Tx Bruce Richardson
2026-02-10 13:21     ` Burakov, Anatoly
2026-02-10 18:20       ` Bruce Richardson
2026-02-11  9:29         ` Burakov, Anatoly
2026-02-11 14:19           ` Bruce Richardson
2026-02-09 16:45   ` [PATCH v4 15/35] net/iavf: use common scalar Tx function Bruce Richardson
2026-02-10 13:27     ` Burakov, Anatoly
2026-02-10 18:31       ` Bruce Richardson
2026-02-09 16:45   ` [PATCH v4 16/35] net/i40e: document requirement for QinQ support Bruce Richardson
2026-02-10 13:27     ` Burakov, Anatoly
2026-02-09 16:45   ` [PATCH v4 17/35] net/idpf: use common scalar Tx function Bruce Richardson
2026-02-10 13:30     ` Burakov, Anatoly
2026-02-09 16:45   ` [PATCH v4 18/35] net/intel: avoid writing the final pkt descriptor twice Bruce Richardson
2026-02-10 13:31     ` Burakov, Anatoly
2026-02-09 16:45   ` [PATCH v4 19/35] eal: add macro for marking assumed alignment Bruce Richardson
2026-02-09 22:35     ` Morten Brørup
2026-02-11 14:45       ` Bruce Richardson
2026-02-09 16:45   ` [PATCH v4 20/35] net/intel: write descriptors using non-volatile pointers Bruce Richardson
2026-02-09 23:08     ` Morten Brørup
2026-02-10  9:03       ` Bruce Richardson
2026-02-10  9:28         ` Morten Brørup
2026-02-11 14:44           ` Bruce Richardson
2026-02-11 14:44       ` Bruce Richardson
2026-02-09 16:45   ` [PATCH v4 21/35] net/intel: remove unnecessary flag clearing Bruce Richardson
2026-02-10 13:33     ` Burakov, Anatoly
2026-02-09 16:45   ` [PATCH v4 22/35] net/intel: mark mid-burst ring cleanup as unlikely Bruce Richardson
2026-02-10 13:36     ` Burakov, Anatoly
2026-02-10 14:13       ` Bruce Richardson
2026-02-11 18:12         ` Bruce Richardson
2026-02-09 16:45   ` [PATCH v4 23/35] net/intel: add special handling for single desc packets Bruce Richardson
2026-02-10 13:57     ` Burakov, Anatoly
2026-02-09 16:45   ` [PATCH v4 24/35] net/intel: use separate array for desc status tracking Bruce Richardson
2026-02-10 14:11     ` Burakov, Anatoly
2026-02-09 16:45   ` [PATCH v4 25/35] net/ixgbe: " Bruce Richardson
2026-02-10 14:12     ` Burakov, Anatoly
2026-02-09 16:45   ` [PATCH v4 26/35] net/intel: drop unused Tx queue used count Bruce Richardson
2026-02-10 14:14     ` Burakov, Anatoly
2026-02-09 16:45   ` [PATCH v4 27/35] net/intel: remove index for tracking end of packet Bruce Richardson
2026-02-10 14:15     ` Burakov, Anatoly
2026-02-09 16:45   ` [PATCH v4 28/35] net/intel: merge ring writes in simple Tx for ice and i40e Bruce Richardson
2026-02-09 23:18     ` Medvedkin, Vladimir
2026-02-09 16:45   ` [PATCH v4 29/35] net/intel: consolidate ice and i40e buffer free function Bruce Richardson
2026-02-09 23:19     ` Medvedkin, Vladimir
2026-02-09 16:45   ` [PATCH v4 30/35] net/intel: complete merging simple Tx paths Bruce Richardson
2026-02-09 23:19     ` Medvedkin, Vladimir
2026-02-09 16:45   ` [PATCH v4 31/35] net/intel: use non-volatile stores in simple Tx function Bruce Richardson
2026-02-09 23:19     ` Medvedkin, Vladimir
2026-02-09 16:45   ` [PATCH v4 32/35] net/intel: align scalar simple Tx path with vector logic Bruce Richardson
2026-02-09 23:19     ` Medvedkin, Vladimir
2026-02-09 16:45   ` [PATCH v4 33/35] net/intel: use vector SW ring entry for simple path Bruce Richardson
2026-02-09 23:19     ` Medvedkin, Vladimir
2026-02-09 16:45   ` [PATCH v4 34/35] net/intel: use vector mbuf cleanup from simple scalar path Bruce Richardson
2026-02-09 23:19     ` Medvedkin, Vladimir
2026-02-09 16:45   ` [PATCH v4 35/35] net/idpf: enable simple Tx function Bruce Richardson
2026-02-09 23:20     ` Medvedkin, Vladimir
2026-02-11 18:12 ` [PATCH v5 00/35] combine multiple Intel scalar Tx paths Bruce Richardson
2026-02-11 18:12   ` [PATCH v5 01/35] net/intel: create common Tx descriptor structure Bruce Richardson
2026-02-11 18:12   ` [PATCH v5 02/35] net/intel: fix memory leak on TX queue setup failure Bruce Richardson
2026-02-12 12:14     ` Burakov, Anatoly
2026-02-11 18:12   ` Bruce Richardson [this message]
2026-02-11 18:12   ` [PATCH v5 04/35] net/intel: create common post-Tx cleanup function Bruce Richardson
2026-02-11 18:12   ` [PATCH v5 05/35] net/intel: consolidate definitions for Tx desc fields Bruce Richardson
2026-02-11 18:12   ` [PATCH v5 06/35] net/intel: add common fn to calculate needed descriptors Bruce Richardson
2026-02-11 18:12   ` [PATCH v5 07/35] net/ice: refactor context descriptor handling Bruce Richardson
2026-02-12 12:16     ` Burakov, Anatoly
2026-02-11 18:12   ` [PATCH v5 08/35] net/i40e: " Bruce Richardson
2026-02-12 12:19     ` Burakov, Anatoly
2026-02-11 18:12   ` [PATCH v5 09/35] net/idpf: " Bruce Richardson
2026-02-11 18:12   ` [PATCH v5 10/35] net/intel: consolidate checksum mask definition Bruce Richardson
2026-02-11 18:12   ` [PATCH v5 11/35] net/intel: create common checksum Tx offload function Bruce Richardson
2026-02-11 18:12   ` [PATCH v5 12/35] net/intel: create a common scalar Tx function Bruce Richardson
2026-02-11 18:12   ` [PATCH v5 13/35] net/i40e: use " Bruce Richardson
2026-02-11 18:12   ` [PATCH v5 14/35] net/intel: add IPsec hooks to common " Bruce Richardson
2026-02-11 18:12   ` [PATCH v5 15/35] net/intel: support configurable VLAN tag insertion on Tx Bruce Richardson
2026-02-12 12:20     ` Burakov, Anatoly
2026-02-11 18:12   ` [PATCH v5 16/35] net/iavf: use common scalar Tx function Bruce Richardson
2026-02-11 18:12   ` [PATCH v5 17/35] net/i40e: document requirement for QinQ support Bruce Richardson
2026-02-11 18:12   ` [PATCH v5 18/35] net/idpf: use common scalar Tx function Bruce Richardson
2026-02-11 18:12   ` [PATCH v5 19/35] net/intel: avoid writing the final pkt descriptor twice Bruce Richardson
2026-02-11 18:12   ` [PATCH v5 20/35] net/intel: write descriptors using non-volatile pointers Bruce Richardson
2026-02-11 21:14     ` Morten Brørup
2026-02-12  8:43       ` Bruce Richardson
2026-02-11 18:12   ` [PATCH v5 21/35] net/intel: remove unnecessary flag clearing Bruce Richardson
2026-02-11 18:12   ` [PATCH v5 22/35] net/intel: add special handling for single desc packets Bruce Richardson
2026-02-11 18:12   ` [PATCH v5 23/35] net/intel: use separate array for desc status tracking Bruce Richardson
2026-02-11 21:51     ` Morten Brørup
2026-02-12  9:15       ` Bruce Richardson
2026-02-12 12:38         ` Morten Brørup
2026-02-11 18:12   ` [PATCH v5 24/35] net/ixgbe: " Bruce Richardson
2026-02-11 18:12   ` [PATCH v5 25/35] net/intel: drop unused Tx queue used count Bruce Richardson
2026-02-11 18:12   ` [PATCH v5 26/35] net/intel: remove index for tracking end of packet Bruce Richardson
2026-02-11 18:12   ` [PATCH v5 27/35] net/intel: merge ring writes in simple Tx for ice and i40e Bruce Richardson
2026-02-11 18:12   ` [PATCH v5 28/35] net/intel: consolidate ice and i40e buffer free function Bruce Richardson
2026-02-11 18:12   ` [PATCH v5 29/35] net/intel: complete merging simple Tx paths Bruce Richardson
2026-02-11 18:12   ` [PATCH v5 30/35] net/intel: use non-volatile stores in simple Tx function Bruce Richardson
2026-02-11 18:13   ` [PATCH v5 31/35] net/intel: align scalar simple Tx path with vector logic Bruce Richardson
2026-02-11 18:13   ` [PATCH v5 32/35] net/intel: use vector SW ring entry for simple path Bruce Richardson
2026-02-11 18:13   ` [PATCH v5 33/35] net/intel: use vector mbuf cleanup from simple scalar path Bruce Richardson
2026-02-11 18:13   ` [PATCH v5 34/35] net/idpf: enable simple Tx function Bruce Richardson
2026-02-12 12:28     ` Burakov, Anatoly
2026-02-11 18:13   ` [PATCH v5 35/35] net/cpfl: " Bruce Richardson
2026-02-12 12:30     ` Burakov, Anatoly
2026-02-12 14:45   ` [PATCH v5 00/35] combine multiple Intel scalar Tx paths Bruce Richardson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20260211181309.2838042-4-bruce.richardson@intel.com \
    --to=bruce.richardson@intel.com \
    --cc=anatoly.burakov@intel.com \
    --cc=ciara.loftus@intel.com \
    --cc=dev@dpdk.org \
    --cc=jingjing.wu@intel.com \
    --cc=praveen.shetty@intel.com \
    --cc=vladimir.medvedkin@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox