From: Bruce Richardson <bruce.richardson@intel.com>
To: dev@dpdk.org
Cc: Bruce Richardson <bruce.richardson@intel.com>,
Vladimir Medvedkin <vladimir.medvedkin@intel.com>,
Anatoly Burakov <anatoly.burakov@intel.com>
Subject: [PATCH v5 27/35] net/intel: merge ring writes in simple Tx for ice and i40e
Date: Wed, 11 Feb 2026 18:12:56 +0000 [thread overview]
Message-ID: <20260211181309.2838042-28-bruce.richardson@intel.com> (raw)
In-Reply-To: <20260211181309.2838042-1-bruce.richardson@intel.com>
The ice and i40e drivers have identical code for writing ring entries in
the simple Tx path, so merge in the descriptor writing code.
Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Vladimir Medvedkin <vladimir.medvedkin@intel.com>
---
drivers/net/intel/common/tx.h | 6 ++
drivers/net/intel/common/tx_scalar.h | 60 ++++++++++++++
drivers/net/intel/i40e/i40e_rxtx.c | 79 +------------------
drivers/net/intel/i40e/i40e_rxtx.h | 3 -
.../net/intel/i40e/i40e_rxtx_vec_altivec.c | 4 +-
drivers/net/intel/i40e/i40e_rxtx_vec_avx2.c | 4 +-
drivers/net/intel/i40e/i40e_rxtx_vec_avx512.c | 4 +-
drivers/net/intel/i40e/i40e_rxtx_vec_neon.c | 4 +-
drivers/net/intel/ice/ice_rxtx.c | 69 +---------------
drivers/net/intel/ice/ice_rxtx.h | 2 -
drivers/net/intel/ice/ice_rxtx_vec_avx2.c | 4 +-
drivers/net/intel/ice/ice_rxtx_vec_avx512.c | 4 +-
12 files changed, 86 insertions(+), 157 deletions(-)
diff --git a/drivers/net/intel/common/tx.h b/drivers/net/intel/common/tx.h
index ee7c83cf00..a5cbe070fc 100644
--- a/drivers/net/intel/common/tx.h
+++ b/drivers/net/intel/common/tx.h
@@ -70,6 +70,12 @@ enum ci_tx_l2tag1_field {
/* Common maximum data per TX descriptor */
#define CI_MAX_DATA_PER_TXD (CI_TXD_QW1_TX_BUF_SZ_M >> CI_TXD_QW1_TX_BUF_SZ_S)
+/* Common TX maximum burst size for chunked transmission in simple paths */
+#define CI_TX_MAX_BURST 32
+
+/* Common TX descriptor command flags for simple transmit */
+#define CI_TX_DESC_CMD_DEFAULT (CI_TX_DESC_CMD_ICRC | CI_TX_DESC_CMD_EOP)
+
/* Checksum offload mask to identify packets requesting offload */
#define CI_TX_CKSUM_OFFLOAD_MASK (RTE_MBUF_F_TX_IP_CKSUM | \
RTE_MBUF_F_TX_L4_MASK | \
diff --git a/drivers/net/intel/common/tx_scalar.h b/drivers/net/intel/common/tx_scalar.h
index c91a8156a2..3c9c1f611c 100644
--- a/drivers/net/intel/common/tx_scalar.h
+++ b/drivers/net/intel/common/tx_scalar.h
@@ -12,6 +12,66 @@
/* depends on common Tx definitions. */
#include "tx.h"
+/* Populate 4 descriptors with data from 4 mbufs */
+static inline void
+ci_tx_fill_hw_ring_tx4(volatile struct ci_tx_desc *txdp, struct rte_mbuf **pkts)
+{
+ uint64_t dma_addr;
+ uint32_t i;
+
+ for (i = 0; i < 4; i++, txdp++, pkts++) {
+ dma_addr = rte_mbuf_data_iova(*pkts);
+ txdp->buffer_addr = rte_cpu_to_le_64(dma_addr);
+ txdp->cmd_type_offset_bsz =
+ rte_cpu_to_le_64(CI_TX_DESC_DTYPE_DATA |
+ ((uint64_t)CI_TX_DESC_CMD_DEFAULT << CI_TXD_QW1_CMD_S) |
+ ((uint64_t)(*pkts)->data_len << CI_TXD_QW1_TX_BUF_SZ_S));
+ }
+}
+
+/* Populate 1 descriptor with data from 1 mbuf */
+static inline void
+ci_tx_fill_hw_ring_tx1(volatile struct ci_tx_desc *txdp, struct rte_mbuf **pkts)
+{
+ uint64_t dma_addr;
+
+ dma_addr = rte_mbuf_data_iova(*pkts);
+ txdp->buffer_addr = rte_cpu_to_le_64(dma_addr);
+ txdp->cmd_type_offset_bsz =
+ rte_cpu_to_le_64(CI_TX_DESC_DTYPE_DATA |
+ ((uint64_t)CI_TX_DESC_CMD_DEFAULT << CI_TXD_QW1_CMD_S) |
+ ((uint64_t)(*pkts)->data_len << CI_TXD_QW1_TX_BUF_SZ_S));
+}
+
+/* Fill hardware descriptor ring with mbuf data */
+static inline void
+ci_tx_fill_hw_ring(struct ci_tx_queue *txq, struct rte_mbuf **pkts,
+ uint16_t nb_pkts)
+{
+ volatile struct ci_tx_desc *txdp = &txq->ci_tx_ring[txq->tx_tail];
+ struct ci_tx_entry *txep = &txq->sw_ring[txq->tx_tail];
+ const int N_PER_LOOP = 4;
+ const int N_PER_LOOP_MASK = N_PER_LOOP - 1;
+ int mainpart, leftover;
+ int i, j;
+
+ mainpart = nb_pkts & ((uint32_t)~N_PER_LOOP_MASK);
+ leftover = nb_pkts & ((uint32_t)N_PER_LOOP_MASK);
+ for (i = 0; i < mainpart; i += N_PER_LOOP) {
+ for (j = 0; j < N_PER_LOOP; ++j)
+ (txep + i + j)->mbuf = *(pkts + i + j);
+ ci_tx_fill_hw_ring_tx4(txdp + i, pkts + i);
+ }
+
+ if (unlikely(leftover > 0)) {
+ for (i = 0; i < leftover; ++i) {
+ (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
+ ci_tx_fill_hw_ring_tx1(txdp + mainpart + i,
+ pkts + mainpart + i);
+ }
+ }
+}
+
/*
* Common transmit descriptor cleanup function for Intel drivers.
*
diff --git a/drivers/net/intel/i40e/i40e_rxtx.c b/drivers/net/intel/i40e/i40e_rxtx.c
index ba94c59c0a..174d517e9d 100644
--- a/drivers/net/intel/i40e/i40e_rxtx.c
+++ b/drivers/net/intel/i40e/i40e_rxtx.c
@@ -311,19 +311,6 @@ i40e_parse_tunneling_params(uint64_t ol_flags,
*cd_tunneling |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
}
-/* Construct the tx flags */
-static inline uint64_t
-i40e_build_ctob(uint32_t td_cmd,
- uint32_t td_offset,
- unsigned int size,
- uint32_t td_tag)
-{
- return rte_cpu_to_le_64(CI_TX_DESC_DTYPE_DATA |
- ((uint64_t)td_cmd << CI_TXD_QW1_CMD_S) |
- ((uint64_t)td_offset << CI_TXD_QW1_OFFSET_S) |
- ((uint64_t)size << CI_TXD_QW1_TX_BUF_SZ_S) |
- ((uint64_t)td_tag << CI_TXD_QW1_L2TAG1_S));
-}
static inline int
#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
@@ -1082,64 +1069,6 @@ i40e_tx_free_bufs(struct ci_tx_queue *txq)
return tx_rs_thresh;
}
-/* Populate 4 descriptors with data from 4 mbufs */
-static inline void
-tx4(volatile struct ci_tx_desc *txdp, struct rte_mbuf **pkts)
-{
- uint64_t dma_addr;
- uint32_t i;
-
- for (i = 0; i < 4; i++, txdp++, pkts++) {
- dma_addr = rte_mbuf_data_iova(*pkts);
- txdp->buffer_addr = rte_cpu_to_le_64(dma_addr);
- txdp->cmd_type_offset_bsz =
- i40e_build_ctob((uint32_t)I40E_TD_CMD, 0,
- (*pkts)->data_len, 0);
- }
-}
-
-/* Populate 1 descriptor with data from 1 mbuf */
-static inline void
-tx1(volatile struct ci_tx_desc *txdp, struct rte_mbuf **pkts)
-{
- uint64_t dma_addr;
-
- dma_addr = rte_mbuf_data_iova(*pkts);
- txdp->buffer_addr = rte_cpu_to_le_64(dma_addr);
- txdp->cmd_type_offset_bsz =
- i40e_build_ctob((uint32_t)I40E_TD_CMD, 0,
- (*pkts)->data_len, 0);
-}
-
-/* Fill hardware descriptor ring with mbuf data */
-static inline void
-i40e_tx_fill_hw_ring(struct ci_tx_queue *txq,
- struct rte_mbuf **pkts,
- uint16_t nb_pkts)
-{
- volatile struct ci_tx_desc *txdp = &txq->ci_tx_ring[txq->tx_tail];
- struct ci_tx_entry *txep = &txq->sw_ring[txq->tx_tail];
- const int N_PER_LOOP = 4;
- const int N_PER_LOOP_MASK = N_PER_LOOP - 1;
- int mainpart, leftover;
- int i, j;
-
- mainpart = (nb_pkts & ((uint32_t) ~N_PER_LOOP_MASK));
- leftover = (nb_pkts & ((uint32_t) N_PER_LOOP_MASK));
- for (i = 0; i < mainpart; i += N_PER_LOOP) {
- for (j = 0; j < N_PER_LOOP; ++j) {
- (txep + i + j)->mbuf = *(pkts + i + j);
- }
- tx4(txdp + i, pkts + i);
- }
- if (unlikely(leftover > 0)) {
- for (i = 0; i < leftover; ++i) {
- (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
- tx1(txdp + mainpart + i, pkts + mainpart + i);
- }
- }
-}
-
static inline uint16_t
tx_xmit_pkts(struct ci_tx_queue *txq,
struct rte_mbuf **tx_pkts,
@@ -1164,7 +1093,7 @@ tx_xmit_pkts(struct ci_tx_queue *txq,
txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
- i40e_tx_fill_hw_ring(txq, tx_pkts, n);
+ ci_tx_fill_hw_ring(txq, tx_pkts, n);
txr[txq->tx_next_rs].cmd_type_offset_bsz |=
rte_cpu_to_le_64(((uint64_t)CI_TX_DESC_CMD_RS) << CI_TXD_QW1_CMD_S);
txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
@@ -1172,7 +1101,7 @@ tx_xmit_pkts(struct ci_tx_queue *txq,
}
/* Fill hardware descriptor ring with mbuf data */
- i40e_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
+ ci_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
/* Determine if RS bit needs to be set */
@@ -1201,13 +1130,13 @@ i40e_xmit_pkts_simple(void *tx_queue,
{
uint16_t nb_tx = 0;
- if (likely(nb_pkts <= I40E_TX_MAX_BURST))
+ if (likely(nb_pkts <= CI_TX_MAX_BURST))
return tx_xmit_pkts((struct ci_tx_queue *)tx_queue,
tx_pkts, nb_pkts);
while (nb_pkts) {
uint16_t ret, num = (uint16_t)RTE_MIN(nb_pkts,
- I40E_TX_MAX_BURST);
+ CI_TX_MAX_BURST);
ret = tx_xmit_pkts((struct ci_tx_queue *)tx_queue,
&tx_pkts[nb_tx], num);
diff --git a/drivers/net/intel/i40e/i40e_rxtx.h b/drivers/net/intel/i40e/i40e_rxtx.h
index db8525d52d..88d47f261e 100644
--- a/drivers/net/intel/i40e/i40e_rxtx.h
+++ b/drivers/net/intel/i40e/i40e_rxtx.h
@@ -47,9 +47,6 @@
#define I40E_RX_DESC_EXT_STATUS_FLEXBL_MASK 0x03
#define I40E_RX_DESC_EXT_STATUS_FLEXBL_FLEX 0x01
-#define I40E_TD_CMD (CI_TX_DESC_CMD_ICRC |\
- CI_TX_DESC_CMD_EOP)
-
enum i40e_header_split_mode {
i40e_header_split_none = 0,
i40e_header_split_enabled = 1,
diff --git a/drivers/net/intel/i40e/i40e_rxtx_vec_altivec.c b/drivers/net/intel/i40e/i40e_rxtx_vec_altivec.c
index 4c36748d94..68667bdc9b 100644
--- a/drivers/net/intel/i40e/i40e_rxtx_vec_altivec.c
+++ b/drivers/net/intel/i40e/i40e_rxtx_vec_altivec.c
@@ -476,8 +476,8 @@ i40e_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
volatile struct ci_tx_desc *txdp;
struct ci_tx_entry_vec *txep;
uint16_t n, nb_commit, tx_id;
- uint64_t flags = I40E_TD_CMD;
- uint64_t rs = CI_TX_DESC_CMD_RS | I40E_TD_CMD;
+ uint64_t flags = CI_TX_DESC_CMD_DEFAULT;
+ uint64_t rs = CI_TX_DESC_CMD_RS | CI_TX_DESC_CMD_DEFAULT;
int i;
if (txq->nb_tx_free < txq->tx_free_thresh)
diff --git a/drivers/net/intel/i40e/i40e_rxtx_vec_avx2.c b/drivers/net/intel/i40e/i40e_rxtx_vec_avx2.c
index 502a1842c6..e1672c4371 100644
--- a/drivers/net/intel/i40e/i40e_rxtx_vec_avx2.c
+++ b/drivers/net/intel/i40e/i40e_rxtx_vec_avx2.c
@@ -741,8 +741,8 @@ i40e_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
volatile struct ci_tx_desc *txdp;
struct ci_tx_entry_vec *txep;
uint16_t n, nb_commit, tx_id;
- uint64_t flags = I40E_TD_CMD;
- uint64_t rs = CI_TX_DESC_CMD_RS | I40E_TD_CMD;
+ uint64_t flags = CI_TX_DESC_CMD_DEFAULT;
+ uint64_t rs = CI_TX_DESC_CMD_RS | CI_TX_DESC_CMD_DEFAULT;
if (txq->nb_tx_free < txq->tx_free_thresh)
ci_tx_free_bufs_vec(txq, i40e_tx_desc_done, false);
diff --git a/drivers/net/intel/i40e/i40e_rxtx_vec_avx512.c b/drivers/net/intel/i40e/i40e_rxtx_vec_avx512.c
index d48ff9f51e..bceb95ad2d 100644
--- a/drivers/net/intel/i40e/i40e_rxtx_vec_avx512.c
+++ b/drivers/net/intel/i40e/i40e_rxtx_vec_avx512.c
@@ -801,8 +801,8 @@ i40e_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
volatile struct ci_tx_desc *txdp;
struct ci_tx_entry_vec *txep;
uint16_t n, nb_commit, tx_id;
- uint64_t flags = I40E_TD_CMD;
- uint64_t rs = CI_TX_DESC_CMD_RS | I40E_TD_CMD;
+ uint64_t flags = CI_TX_DESC_CMD_DEFAULT;
+ uint64_t rs = CI_TX_DESC_CMD_RS | CI_TX_DESC_CMD_DEFAULT;
if (txq->nb_tx_free < txq->tx_free_thresh)
ci_tx_free_bufs_vec(txq, i40e_tx_desc_done, false);
diff --git a/drivers/net/intel/i40e/i40e_rxtx_vec_neon.c b/drivers/net/intel/i40e/i40e_rxtx_vec_neon.c
index be4c64942e..debc9bda28 100644
--- a/drivers/net/intel/i40e/i40e_rxtx_vec_neon.c
+++ b/drivers/net/intel/i40e/i40e_rxtx_vec_neon.c
@@ -626,8 +626,8 @@ i40e_xmit_fixed_burst_vec(void *__rte_restrict tx_queue,
volatile struct ci_tx_desc *txdp;
struct ci_tx_entry_vec *txep;
uint16_t n, nb_commit, tx_id;
- uint64_t flags = I40E_TD_CMD;
- uint64_t rs = CI_TX_DESC_CMD_RS | I40E_TD_CMD;
+ uint64_t flags = CI_TX_DESC_CMD_DEFAULT;
+ uint64_t rs = CI_TX_DESC_CMD_RS | CI_TX_DESC_CMD_DEFAULT;
int i;
if (txq->nb_tx_free < txq->tx_free_thresh)
diff --git a/drivers/net/intel/ice/ice_rxtx.c b/drivers/net/intel/ice/ice_rxtx.c
index fe65df94da..e4fba453a9 100644
--- a/drivers/net/intel/ice/ice_rxtx.c
+++ b/drivers/net/intel/ice/ice_rxtx.c
@@ -3286,67 +3286,6 @@ ice_tx_done_cleanup(void *txq, uint32_t free_cnt)
return ice_tx_done_cleanup_full(q, free_cnt);
}
-/* Populate 4 descriptors with data from 4 mbufs */
-static inline void
-tx4(volatile struct ci_tx_desc *txdp, struct rte_mbuf **pkts)
-{
- uint64_t dma_addr;
- uint32_t i;
-
- for (i = 0; i < 4; i++, txdp++, pkts++) {
- dma_addr = rte_mbuf_data_iova(*pkts);
- txdp->buffer_addr = rte_cpu_to_le_64(dma_addr);
- txdp->cmd_type_offset_bsz =
- ice_build_ctob((uint32_t)ICE_TD_CMD, 0,
- (*pkts)->data_len, 0);
- }
-}
-
-/* Populate 1 descriptor with data from 1 mbuf */
-static inline void
-tx1(volatile struct ci_tx_desc *txdp, struct rte_mbuf **pkts)
-{
- uint64_t dma_addr;
-
- dma_addr = rte_mbuf_data_iova(*pkts);
- txdp->buffer_addr = rte_cpu_to_le_64(dma_addr);
- txdp->cmd_type_offset_bsz =
- ice_build_ctob((uint32_t)ICE_TD_CMD, 0,
- (*pkts)->data_len, 0);
-}
-
-static inline void
-ice_tx_fill_hw_ring(struct ci_tx_queue *txq, struct rte_mbuf **pkts,
- uint16_t nb_pkts)
-{
- volatile struct ci_tx_desc *txdp = &txq->ci_tx_ring[txq->tx_tail];
- struct ci_tx_entry *txep = &txq->sw_ring[txq->tx_tail];
- const int N_PER_LOOP = 4;
- const int N_PER_LOOP_MASK = N_PER_LOOP - 1;
- int mainpart, leftover;
- int i, j;
-
- /**
- * Process most of the packets in chunks of N pkts. Any
- * leftover packets will get processed one at a time.
- */
- mainpart = nb_pkts & ((uint32_t)~N_PER_LOOP_MASK);
- leftover = nb_pkts & ((uint32_t)N_PER_LOOP_MASK);
- for (i = 0; i < mainpart; i += N_PER_LOOP) {
- /* Copy N mbuf pointers to the S/W ring */
- for (j = 0; j < N_PER_LOOP; ++j)
- (txep + i + j)->mbuf = *(pkts + i + j);
- tx4(txdp + i, pkts + i);
- }
-
- if (unlikely(leftover > 0)) {
- for (i = 0; i < leftover; ++i) {
- (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
- tx1(txdp + mainpart + i, pkts + mainpart + i);
- }
- }
-}
-
static inline uint16_t
tx_xmit_pkts(struct ci_tx_queue *txq,
struct rte_mbuf **tx_pkts,
@@ -3371,7 +3310,7 @@ tx_xmit_pkts(struct ci_tx_queue *txq,
txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
- ice_tx_fill_hw_ring(txq, tx_pkts, n);
+ ci_tx_fill_hw_ring(txq, tx_pkts, n);
txr[txq->tx_next_rs].cmd_type_offset_bsz |=
rte_cpu_to_le_64(((uint64_t)CI_TX_DESC_CMD_RS) << CI_TXD_QW1_CMD_S);
txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
@@ -3379,7 +3318,7 @@ tx_xmit_pkts(struct ci_tx_queue *txq,
}
/* Fill hardware descriptor ring with mbuf data */
- ice_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
+ ci_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
/* Determine if RS bit needs to be set */
@@ -3408,13 +3347,13 @@ ice_xmit_pkts_simple(void *tx_queue,
{
uint16_t nb_tx = 0;
- if (likely(nb_pkts <= ICE_TX_MAX_BURST))
+ if (likely(nb_pkts <= CI_TX_MAX_BURST))
return tx_xmit_pkts((struct ci_tx_queue *)tx_queue,
tx_pkts, nb_pkts);
while (nb_pkts) {
uint16_t ret, num = (uint16_t)RTE_MIN(nb_pkts,
- ICE_TX_MAX_BURST);
+ CI_TX_MAX_BURST);
ret = tx_xmit_pkts((struct ci_tx_queue *)tx_queue,
&tx_pkts[nb_tx], num);
diff --git a/drivers/net/intel/ice/ice_rxtx.h b/drivers/net/intel/ice/ice_rxtx.h
index 7d6480b410..77ed41f9fd 100644
--- a/drivers/net/intel/ice/ice_rxtx.h
+++ b/drivers/net/intel/ice/ice_rxtx.h
@@ -46,8 +46,6 @@
#define ICE_SUPPORT_CHAIN_NUM 5
-#define ICE_TD_CMD CI_TX_DESC_CMD_EOP
-
#define ICE_VPMD_RX_BURST CI_VPMD_RX_BURST
#define ICE_VPMD_TX_BURST 32
#define ICE_VPMD_RXQ_REARM_THRESH CI_VPMD_RX_REARM_THRESH
diff --git a/drivers/net/intel/ice/ice_rxtx_vec_avx2.c b/drivers/net/intel/ice/ice_rxtx_vec_avx2.c
index 2922671158..d03f2e5b36 100644
--- a/drivers/net/intel/ice/ice_rxtx_vec_avx2.c
+++ b/drivers/net/intel/ice/ice_rxtx_vec_avx2.c
@@ -845,8 +845,8 @@ ice_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
volatile struct ci_tx_desc *txdp;
struct ci_tx_entry_vec *txep;
uint16_t n, nb_commit, tx_id;
- uint64_t flags = ICE_TD_CMD;
- uint64_t rs = CI_TX_DESC_CMD_RS | ICE_TD_CMD;
+ uint64_t flags = CI_TX_DESC_CMD_DEFAULT;
+ uint64_t rs = CI_TX_DESC_CMD_RS | CI_TX_DESC_CMD_DEFAULT;
/* cross rx_thresh boundary is not allowed */
nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
diff --git a/drivers/net/intel/ice/ice_rxtx_vec_avx512.c b/drivers/net/intel/ice/ice_rxtx_vec_avx512.c
index e64b6e227b..004c01054a 100644
--- a/drivers/net/intel/ice/ice_rxtx_vec_avx512.c
+++ b/drivers/net/intel/ice/ice_rxtx_vec_avx512.c
@@ -909,8 +909,8 @@ ice_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
volatile struct ci_tx_desc *txdp;
struct ci_tx_entry_vec *txep;
uint16_t n, nb_commit, tx_id;
- uint64_t flags = ICE_TD_CMD;
- uint64_t rs = CI_TX_DESC_CMD_RS | ICE_TD_CMD;
+ uint64_t flags = CI_TX_DESC_CMD_DEFAULT;
+ uint64_t rs = CI_TX_DESC_CMD_RS | CI_TX_DESC_CMD_DEFAULT;
/* cross rx_thresh boundary is not allowed */
nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
--
2.51.0
next prev parent reply other threads:[~2026-02-11 18:16 UTC|newest]
Thread overview: 274+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-12-19 17:25 [RFC PATCH 00/27] combine multiple Intel scalar Tx paths Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 01/27] net/intel: create common Tx descriptor structure Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 02/27] net/intel: use common tx ring structure Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 03/27] net/intel: create common post-Tx cleanup function Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 04/27] net/intel: consolidate definitions for Tx desc fields Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 05/27] net/intel: create separate header for Tx scalar fns Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 06/27] net/intel: add common fn to calculate needed descriptors Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 07/27] net/ice: refactor context descriptor handling Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 08/27] net/i40e: " Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 09/27] net/idpf: " Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 10/27] net/intel: consolidate checksum mask definition Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 11/27] net/intel: create common checksum Tx offload function Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 12/27] net/intel: create a common scalar Tx function Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 13/27] net/i40e: use " Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 14/27] net/intel: add IPSec hooks to common " Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 15/27] net/intel: support configurable VLAN tag insertion on Tx Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 16/27] net/iavf: use common scalar Tx function Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 17/27] net/i40e: document requirement for QinQ support Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 18/27] net/idpf: use common scalar Tx function Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 19/27] net/intel: avoid writing the final pkt descriptor twice Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 20/27] net/intel: write descriptors using non-volatile pointers Bruce Richardson
2025-12-20 8:43 ` Morten Brørup
2025-12-22 9:50 ` Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 21/27] net/intel: remove unnecessary flag clearing Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 22/27] net/intel: mark mid-burst ring cleanup as unlikely Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 23/27] net/intel: add special handling for single desc packets Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 24/27] net/intel: use separate array for desc status tracking Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 25/27] net/ixgbe: " Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 26/27] net/intel: drop unused Tx queue used count Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 27/27] net/intel: remove index for tracking end of packet Bruce Richardson
2025-12-20 9:05 ` Morten Brørup
2026-01-13 15:14 ` [PATCH v2 00/36] combine multiple Intel scalar Tx paths Bruce Richardson
2026-01-13 15:14 ` [PATCH v2 01/36] net/intel: create common Tx descriptor structure Bruce Richardson
2026-01-13 15:14 ` [PATCH v2 02/36] net/intel: use common Tx ring structure Bruce Richardson
2026-01-13 15:14 ` [PATCH v2 03/36] net/intel: create common post-Tx cleanup function Bruce Richardson
2026-01-13 15:14 ` [PATCH v2 04/36] net/intel: consolidate definitions for Tx desc fields Bruce Richardson
2026-01-13 15:14 ` [PATCH v2 05/36] net/intel: create separate header for Tx scalar fns Bruce Richardson
2026-01-13 15:14 ` [PATCH v2 06/36] net/intel: add common fn to calculate needed descriptors Bruce Richardson
2026-01-13 15:14 ` [PATCH v2 07/36] net/ice: refactor context descriptor handling Bruce Richardson
2026-01-13 15:14 ` [PATCH v2 08/36] net/i40e: " Bruce Richardson
2026-01-13 15:14 ` [PATCH v2 09/36] net/idpf: " Bruce Richardson
2026-01-13 15:14 ` [PATCH v2 10/36] net/intel: consolidate checksum mask definition Bruce Richardson
2026-01-13 15:14 ` [PATCH v2 11/36] net/intel: create common checksum Tx offload function Bruce Richardson
2026-01-13 15:14 ` [PATCH v2 12/36] net/intel: create a common scalar Tx function Bruce Richardson
2026-01-13 15:14 ` [PATCH v2 13/36] net/i40e: use " Bruce Richardson
2026-01-13 15:14 ` [PATCH v2 14/36] net/intel: add IPsec hooks to common " Bruce Richardson
2026-01-13 15:14 ` [PATCH v2 15/36] net/intel: support configurable VLAN tag insertion on Tx Bruce Richardson
2026-01-13 15:14 ` [PATCH v2 16/36] net/iavf: use common scalar Tx function Bruce Richardson
2026-01-13 15:14 ` [PATCH v2 17/36] net/i40e: document requirement for QinQ support Bruce Richardson
2026-01-13 15:14 ` [PATCH v2 18/36] net/idpf: use common scalar Tx function Bruce Richardson
2026-01-13 15:14 ` [PATCH v2 19/36] net/intel: avoid writing the final pkt descriptor twice Bruce Richardson
2026-01-13 15:14 ` [PATCH v2 20/36] eal: add macro for marking assumed alignment Bruce Richardson
2026-01-13 15:14 ` [PATCH v2 21/36] net/intel: write descriptors using non-volatile pointers Bruce Richardson
2026-01-13 15:14 ` [PATCH v2 22/36] net/intel: remove unnecessary flag clearing Bruce Richardson
2026-01-13 15:14 ` [PATCH v2 23/36] net/intel: mark mid-burst ring cleanup as unlikely Bruce Richardson
2026-01-13 15:14 ` [PATCH v2 24/36] net/intel: add special handling for single desc packets Bruce Richardson
2026-01-13 15:14 ` [PATCH v2 25/36] net/intel: use separate array for desc status tracking Bruce Richardson
2026-01-13 15:14 ` [PATCH v2 26/36] net/ixgbe: " Bruce Richardson
2026-01-13 15:14 ` [PATCH v2 27/36] net/intel: drop unused Tx queue used count Bruce Richardson
2026-01-13 15:14 ` [PATCH v2 28/36] net/intel: remove index for tracking end of packet Bruce Richardson
2026-01-13 15:14 ` [PATCH v2 29/36] net/intel: merge ring writes in simple Tx for ice and i40e Bruce Richardson
2026-01-13 15:14 ` [PATCH v2 30/36] net/intel: consolidate ice and i40e buffer free function Bruce Richardson
2026-01-13 15:14 ` [PATCH v2 31/36] net/intel: complete merging simple Tx paths Bruce Richardson
2026-01-13 15:14 ` [PATCH v2 32/36] net/intel: use non-volatile stores in simple Tx function Bruce Richardson
2026-01-13 15:14 ` [PATCH v2 33/36] net/intel: align scalar simple Tx path with vector logic Bruce Richardson
2026-01-13 15:14 ` [PATCH v2 34/36] net/intel: use vector SW ring entry for simple path Bruce Richardson
2026-01-13 15:14 ` [PATCH v2 35/36] net/intel: use vector mbuf cleanup from simple scalar path Bruce Richardson
2026-01-13 15:15 ` [PATCH v2 36/36] net/idpf: enable simple Tx function Bruce Richardson
2026-01-13 17:17 ` [PATCH v2 00/36] combine multiple Intel scalar Tx paths Stephen Hemminger
2026-01-23 6:26 ` Stephen Hemminger
2026-01-26 9:02 ` Bruce Richardson
2026-01-30 11:41 ` [PATCH v3 " Bruce Richardson
2026-01-30 11:41 ` [PATCH v3 01/36] net/intel: create common Tx descriptor structure Bruce Richardson
2026-02-06 9:56 ` Loftus, Ciara
2026-01-30 11:41 ` [PATCH v3 02/36] net/intel: use common Tx ring structure Bruce Richardson
2026-02-06 9:59 ` Loftus, Ciara
2026-01-30 11:41 ` [PATCH v3 03/36] net/intel: create common post-Tx cleanup function Bruce Richardson
2026-02-06 10:07 ` Loftus, Ciara
2026-02-09 10:41 ` Bruce Richardson
2026-01-30 11:41 ` [PATCH v3 04/36] net/intel: consolidate definitions for Tx desc fields Bruce Richardson
2026-02-06 10:14 ` Loftus, Ciara
2026-02-09 10:43 ` Bruce Richardson
2026-01-30 11:41 ` [PATCH v3 05/36] net/intel: create separate header for Tx scalar fns Bruce Richardson
2026-02-06 10:23 ` Loftus, Ciara
2026-02-09 11:04 ` Bruce Richardson
2026-01-30 11:41 ` [PATCH v3 06/36] net/intel: add common fn to calculate needed descriptors Bruce Richardson
2026-02-06 10:25 ` Loftus, Ciara
2026-02-09 11:15 ` Bruce Richardson
2026-01-30 11:41 ` [PATCH v3 07/36] net/ice: refactor context descriptor handling Bruce Richardson
2026-02-06 10:47 ` Loftus, Ciara
2026-02-09 11:16 ` Bruce Richardson
2026-01-30 11:41 ` [PATCH v3 08/36] net/i40e: " Bruce Richardson
2026-02-06 10:54 ` Loftus, Ciara
2026-01-30 11:41 ` [PATCH v3 09/36] net/idpf: " Bruce Richardson
2026-02-06 10:59 ` Loftus, Ciara
2026-01-30 11:41 ` [PATCH v3 10/36] net/intel: consolidate checksum mask definition Bruce Richardson
2026-02-06 11:25 ` Loftus, Ciara
2026-02-09 11:40 ` Bruce Richardson
2026-02-09 15:00 ` Bruce Richardson
2026-01-30 11:41 ` [PATCH v3 11/36] net/intel: create common checksum Tx offload function Bruce Richardson
2026-02-06 11:37 ` Loftus, Ciara
2026-02-09 11:41 ` Bruce Richardson
2026-01-30 11:41 ` [PATCH v3 12/36] net/intel: create a common scalar Tx function Bruce Richardson
2026-02-06 12:01 ` Loftus, Ciara
2026-02-06 12:13 ` Bruce Richardson
2026-01-30 11:41 ` [PATCH v3 13/36] net/i40e: use " Bruce Richardson
2026-01-30 11:41 ` [PATCH v3 14/36] net/intel: add IPsec hooks to common " Bruce Richardson
2026-01-30 11:41 ` [PATCH v3 15/36] net/intel: support configurable VLAN tag insertion on Tx Bruce Richardson
2026-01-30 11:41 ` [PATCH v3 16/36] net/iavf: use common scalar Tx function Bruce Richardson
2026-01-30 11:41 ` [PATCH v3 17/36] net/i40e: document requirement for QinQ support Bruce Richardson
2026-01-30 11:41 ` [PATCH v3 18/36] net/idpf: use common scalar Tx function Bruce Richardson
2026-01-30 11:41 ` [PATCH v3 19/36] net/intel: avoid writing the final pkt descriptor twice Bruce Richardson
2026-01-30 11:41 ` [PATCH v3 20/36] eal: add macro for marking assumed alignment Bruce Richardson
2026-01-30 11:41 ` [PATCH v3 21/36] net/intel: write descriptors using non-volatile pointers Bruce Richardson
2026-01-30 11:41 ` [PATCH v3 22/36] net/intel: remove unnecessary flag clearing Bruce Richardson
2026-01-30 11:41 ` [PATCH v3 23/36] net/intel: mark mid-burst ring cleanup as unlikely Bruce Richardson
2026-01-30 11:41 ` [PATCH v3 24/36] net/intel: add special handling for single desc packets Bruce Richardson
2026-01-30 11:41 ` [PATCH v3 25/36] net/intel: use separate array for desc status tracking Bruce Richardson
2026-01-30 11:41 ` [PATCH v3 26/36] net/ixgbe: " Bruce Richardson
2026-01-30 11:41 ` [PATCH v3 27/36] net/intel: drop unused Tx queue used count Bruce Richardson
2026-01-30 11:41 ` [PATCH v3 28/36] net/intel: remove index for tracking end of packet Bruce Richardson
2026-01-30 11:41 ` [PATCH v3 29/36] net/intel: merge ring writes in simple Tx for ice and i40e Bruce Richardson
2026-01-30 11:41 ` [PATCH v3 30/36] net/intel: consolidate ice and i40e buffer free function Bruce Richardson
2026-01-30 11:41 ` [PATCH v3 31/36] net/intel: complete merging simple Tx paths Bruce Richardson
2026-01-30 11:41 ` [PATCH v3 32/36] net/intel: use non-volatile stores in simple Tx function Bruce Richardson
2026-01-30 11:42 ` [PATCH v3 33/36] net/intel: align scalar simple Tx path with vector logic Bruce Richardson
2026-01-30 11:42 ` [PATCH v3 34/36] net/intel: use vector SW ring entry for simple path Bruce Richardson
2026-01-30 11:42 ` [PATCH v3 35/36] net/intel: use vector mbuf cleanup from simple scalar path Bruce Richardson
2026-01-30 11:42 ` [PATCH v3 36/36] net/idpf: enable simple Tx function Bruce Richardson
2026-01-30 17:56 ` [REVIEW] " Stephen Hemminger
2026-02-09 16:44 ` [PATCH v4 00/35] combine multiple Intel scalar Tx paths Bruce Richardson
2026-02-09 16:44 ` [PATCH v4 01/35] net/intel: create common Tx descriptor structure Bruce Richardson
2026-02-09 16:45 ` [PATCH v4 02/35] net/intel: use common Tx ring structure Bruce Richardson
2026-02-09 16:45 ` [PATCH v4 03/35] net/intel: create common post-Tx cleanup function Bruce Richardson
2026-02-10 12:18 ` Burakov, Anatoly
2026-02-09 16:45 ` [PATCH v4 04/35] net/intel: consolidate definitions for Tx desc fields Bruce Richardson
2026-02-10 12:26 ` Burakov, Anatoly
2026-02-10 16:47 ` Bruce Richardson
2026-02-09 16:45 ` [PATCH v4 05/35] net/intel: add common fn to calculate needed descriptors Bruce Richardson
2026-02-10 12:29 ` Burakov, Anatoly
2026-02-10 14:08 ` Bruce Richardson
2026-02-10 14:17 ` Burakov, Anatoly
2026-02-10 17:25 ` Bruce Richardson
2026-02-11 9:14 ` Burakov, Anatoly
2026-02-09 16:45 ` [PATCH v4 06/35] net/ice: refactor context descriptor handling Bruce Richardson
2026-02-10 12:42 ` Burakov, Anatoly
2026-02-10 17:40 ` Bruce Richardson
2026-02-11 9:17 ` Burakov, Anatoly
2026-02-11 10:38 ` Bruce Richardson
2026-02-09 16:45 ` [PATCH v4 07/35] net/i40e: " Bruce Richardson
2026-02-10 12:48 ` Burakov, Anatoly
2026-02-10 14:10 ` Bruce Richardson
2026-02-10 14:19 ` Burakov, Anatoly
2026-02-10 17:54 ` Bruce Richardson
2026-02-11 9:20 ` Burakov, Anatoly
2026-02-11 12:04 ` Bruce Richardson
2026-02-09 16:45 ` [PATCH v4 08/35] net/idpf: " Bruce Richardson
2026-02-10 12:52 ` Burakov, Anatoly
2026-02-09 16:45 ` [PATCH v4 09/35] net/intel: consolidate checksum mask definition Bruce Richardson
2026-02-10 13:00 ` Burakov, Anatoly
2026-02-09 16:45 ` [PATCH v4 10/35] net/intel: create common checksum Tx offload function Bruce Richardson
2026-02-10 13:04 ` Burakov, Anatoly
2026-02-10 17:56 ` Bruce Richardson
2026-02-09 16:45 ` [PATCH v4 11/35] net/intel: create a common scalar Tx function Bruce Richardson
2026-02-10 13:14 ` Burakov, Anatoly
2026-02-10 18:03 ` Bruce Richardson
2026-02-11 9:26 ` Burakov, Anatoly
2026-02-09 16:45 ` [PATCH v4 12/35] net/i40e: use " Bruce Richardson
2026-02-10 13:14 ` Burakov, Anatoly
2026-02-09 16:45 ` [PATCH v4 13/35] net/intel: add IPsec hooks to common " Bruce Richardson
2026-02-10 13:16 ` Burakov, Anatoly
2026-02-09 16:45 ` [PATCH v4 14/35] net/intel: support configurable VLAN tag insertion on Tx Bruce Richardson
2026-02-10 13:21 ` Burakov, Anatoly
2026-02-10 18:20 ` Bruce Richardson
2026-02-11 9:29 ` Burakov, Anatoly
2026-02-11 14:19 ` Bruce Richardson
2026-02-09 16:45 ` [PATCH v4 15/35] net/iavf: use common scalar Tx function Bruce Richardson
2026-02-10 13:27 ` Burakov, Anatoly
2026-02-10 18:31 ` Bruce Richardson
2026-02-09 16:45 ` [PATCH v4 16/35] net/i40e: document requirement for QinQ support Bruce Richardson
2026-02-10 13:27 ` Burakov, Anatoly
2026-02-09 16:45 ` [PATCH v4 17/35] net/idpf: use common scalar Tx function Bruce Richardson
2026-02-10 13:30 ` Burakov, Anatoly
2026-02-09 16:45 ` [PATCH v4 18/35] net/intel: avoid writing the final pkt descriptor twice Bruce Richardson
2026-02-10 13:31 ` Burakov, Anatoly
2026-02-09 16:45 ` [PATCH v4 19/35] eal: add macro for marking assumed alignment Bruce Richardson
2026-02-09 22:35 ` Morten Brørup
2026-02-11 14:45 ` Bruce Richardson
2026-02-09 16:45 ` [PATCH v4 20/35] net/intel: write descriptors using non-volatile pointers Bruce Richardson
2026-02-09 23:08 ` Morten Brørup
2026-02-10 9:03 ` Bruce Richardson
2026-02-10 9:28 ` Morten Brørup
2026-02-11 14:44 ` Bruce Richardson
2026-02-11 14:44 ` Bruce Richardson
2026-02-09 16:45 ` [PATCH v4 21/35] net/intel: remove unnecessary flag clearing Bruce Richardson
2026-02-10 13:33 ` Burakov, Anatoly
2026-02-09 16:45 ` [PATCH v4 22/35] net/intel: mark mid-burst ring cleanup as unlikely Bruce Richardson
2026-02-10 13:36 ` Burakov, Anatoly
2026-02-10 14:13 ` Bruce Richardson
2026-02-11 18:12 ` Bruce Richardson
2026-02-09 16:45 ` [PATCH v4 23/35] net/intel: add special handling for single desc packets Bruce Richardson
2026-02-10 13:57 ` Burakov, Anatoly
2026-02-09 16:45 ` [PATCH v4 24/35] net/intel: use separate array for desc status tracking Bruce Richardson
2026-02-10 14:11 ` Burakov, Anatoly
2026-02-09 16:45 ` [PATCH v4 25/35] net/ixgbe: " Bruce Richardson
2026-02-10 14:12 ` Burakov, Anatoly
2026-02-09 16:45 ` [PATCH v4 26/35] net/intel: drop unused Tx queue used count Bruce Richardson
2026-02-10 14:14 ` Burakov, Anatoly
2026-02-09 16:45 ` [PATCH v4 27/35] net/intel: remove index for tracking end of packet Bruce Richardson
2026-02-10 14:15 ` Burakov, Anatoly
2026-02-09 16:45 ` [PATCH v4 28/35] net/intel: merge ring writes in simple Tx for ice and i40e Bruce Richardson
2026-02-09 23:18 ` Medvedkin, Vladimir
2026-02-09 16:45 ` [PATCH v4 29/35] net/intel: consolidate ice and i40e buffer free function Bruce Richardson
2026-02-09 23:19 ` Medvedkin, Vladimir
2026-02-09 16:45 ` [PATCH v4 30/35] net/intel: complete merging simple Tx paths Bruce Richardson
2026-02-09 23:19 ` Medvedkin, Vladimir
2026-02-09 16:45 ` [PATCH v4 31/35] net/intel: use non-volatile stores in simple Tx function Bruce Richardson
2026-02-09 23:19 ` Medvedkin, Vladimir
2026-02-09 16:45 ` [PATCH v4 32/35] net/intel: align scalar simple Tx path with vector logic Bruce Richardson
2026-02-09 23:19 ` Medvedkin, Vladimir
2026-02-09 16:45 ` [PATCH v4 33/35] net/intel: use vector SW ring entry for simple path Bruce Richardson
2026-02-09 23:19 ` Medvedkin, Vladimir
2026-02-09 16:45 ` [PATCH v4 34/35] net/intel: use vector mbuf cleanup from simple scalar path Bruce Richardson
2026-02-09 23:19 ` Medvedkin, Vladimir
2026-02-09 16:45 ` [PATCH v4 35/35] net/idpf: enable simple Tx function Bruce Richardson
2026-02-09 23:20 ` Medvedkin, Vladimir
2026-02-11 18:12 ` [PATCH v5 00/35] combine multiple Intel scalar Tx paths Bruce Richardson
2026-02-11 18:12 ` [PATCH v5 01/35] net/intel: create common Tx descriptor structure Bruce Richardson
2026-02-11 18:12 ` [PATCH v5 02/35] net/intel: fix memory leak on TX queue setup failure Bruce Richardson
2026-02-12 12:14 ` Burakov, Anatoly
2026-02-11 18:12 ` [PATCH v5 03/35] net/intel: use common Tx ring structure Bruce Richardson
2026-02-11 18:12 ` [PATCH v5 04/35] net/intel: create common post-Tx cleanup function Bruce Richardson
2026-02-11 18:12 ` [PATCH v5 05/35] net/intel: consolidate definitions for Tx desc fields Bruce Richardson
2026-02-11 18:12 ` [PATCH v5 06/35] net/intel: add common fn to calculate needed descriptors Bruce Richardson
2026-02-11 18:12 ` [PATCH v5 07/35] net/ice: refactor context descriptor handling Bruce Richardson
2026-02-12 12:16 ` Burakov, Anatoly
2026-02-11 18:12 ` [PATCH v5 08/35] net/i40e: " Bruce Richardson
2026-02-12 12:19 ` Burakov, Anatoly
2026-02-11 18:12 ` [PATCH v5 09/35] net/idpf: " Bruce Richardson
2026-02-11 18:12 ` [PATCH v5 10/35] net/intel: consolidate checksum mask definition Bruce Richardson
2026-02-11 18:12 ` [PATCH v5 11/35] net/intel: create common checksum Tx offload function Bruce Richardson
2026-02-11 18:12 ` [PATCH v5 12/35] net/intel: create a common scalar Tx function Bruce Richardson
2026-02-11 18:12 ` [PATCH v5 13/35] net/i40e: use " Bruce Richardson
2026-02-11 18:12 ` [PATCH v5 14/35] net/intel: add IPsec hooks to common " Bruce Richardson
2026-02-11 18:12 ` [PATCH v5 15/35] net/intel: support configurable VLAN tag insertion on Tx Bruce Richardson
2026-02-12 12:20 ` Burakov, Anatoly
2026-02-11 18:12 ` [PATCH v5 16/35] net/iavf: use common scalar Tx function Bruce Richardson
2026-02-11 18:12 ` [PATCH v5 17/35] net/i40e: document requirement for QinQ support Bruce Richardson
2026-02-11 18:12 ` [PATCH v5 18/35] net/idpf: use common scalar Tx function Bruce Richardson
2026-02-11 18:12 ` [PATCH v5 19/35] net/intel: avoid writing the final pkt descriptor twice Bruce Richardson
2026-02-11 18:12 ` [PATCH v5 20/35] net/intel: write descriptors using non-volatile pointers Bruce Richardson
2026-02-11 21:14 ` Morten Brørup
2026-02-12 8:43 ` Bruce Richardson
2026-02-11 18:12 ` [PATCH v5 21/35] net/intel: remove unnecessary flag clearing Bruce Richardson
2026-02-11 18:12 ` [PATCH v5 22/35] net/intel: add special handling for single desc packets Bruce Richardson
2026-02-11 18:12 ` [PATCH v5 23/35] net/intel: use separate array for desc status tracking Bruce Richardson
2026-02-11 21:51 ` Morten Brørup
2026-02-12 9:15 ` Bruce Richardson
2026-02-12 12:38 ` Morten Brørup
2026-02-11 18:12 ` [PATCH v5 24/35] net/ixgbe: " Bruce Richardson
2026-02-11 18:12 ` [PATCH v5 25/35] net/intel: drop unused Tx queue used count Bruce Richardson
2026-02-11 18:12 ` [PATCH v5 26/35] net/intel: remove index for tracking end of packet Bruce Richardson
2026-02-11 18:12 ` Bruce Richardson [this message]
2026-02-11 18:12 ` [PATCH v5 28/35] net/intel: consolidate ice and i40e buffer free function Bruce Richardson
2026-02-11 18:12 ` [PATCH v5 29/35] net/intel: complete merging simple Tx paths Bruce Richardson
2026-02-11 18:12 ` [PATCH v5 30/35] net/intel: use non-volatile stores in simple Tx function Bruce Richardson
2026-02-11 18:13 ` [PATCH v5 31/35] net/intel: align scalar simple Tx path with vector logic Bruce Richardson
2026-02-11 18:13 ` [PATCH v5 32/35] net/intel: use vector SW ring entry for simple path Bruce Richardson
2026-02-11 18:13 ` [PATCH v5 33/35] net/intel: use vector mbuf cleanup from simple scalar path Bruce Richardson
2026-02-11 18:13 ` [PATCH v5 34/35] net/idpf: enable simple Tx function Bruce Richardson
2026-02-12 12:28 ` Burakov, Anatoly
2026-02-11 18:13 ` [PATCH v5 35/35] net/cpfl: " Bruce Richardson
2026-02-12 12:30 ` Burakov, Anatoly
2026-02-12 14:45 ` [PATCH v5 00/35] combine multiple Intel scalar Tx paths Bruce Richardson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260211181309.2838042-28-bruce.richardson@intel.com \
--to=bruce.richardson@intel.com \
--cc=anatoly.burakov@intel.com \
--cc=dev@dpdk.org \
--cc=vladimir.medvedkin@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox