public inbox for dev@dpdk.org
 help / color / mirror / Atom feed
From: Gregory Etelson <getelson@nvidia.com>
To: <dev@dpdk.org>
Cc: <getelson@nvidia.com>, <mkashani@nvidia.com>,
	Claude Sonnet 4.5 <noreply@anthropic.com>,
	Aman Singh <aman.deep.singh@intel.com>
Subject: [PATCH 2/2] app/testpmd: support selective Rx data
Date: Mon, 2 Feb 2026 18:09:03 +0200	[thread overview]
Message-ID: <20260202160903.254621-2-getelson@nvidia.com> (raw)
In-Reply-To: <20260202160903.254621-1-getelson@nvidia.com>

Add support for selective Rx data using existing rxoffs and rxpkts
command line parameters.

When both rxoffs and rxpkts are specified on PMDs supporting
selective Rx data (selective_read capability), testpmd automatically:
1. Inserts segments with NULL mempool for gaps between configured
   segments to discard unwanted data.
2. Adds a trailing segment with NULL mempool to cover any remaining
   data up to MTU.

Example usage to receive only Ethernet header and a segment at
offset 128:
  --rxoffs=0,128 --rxpkts=14,64

This creates segments:
- [0-13]: 14 bytes with mempool (received)
- [14-127]: 114 bytes with NULL mempool (discarded)
- [128-191]: 64 bytes with mempool (received)
- [192-MTU]: remaining bytes with NULL mempool (discarded)

Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Signed-off-by: Claude Sonnet 4.5 <noreply@anthropic.com>
---
 app/test-pmd/testpmd.c                | 74 +++++++++++++++++++++++++--
 doc/guides/testpmd_app_ug/run_app.rst | 19 +++++++
 2 files changed, 88 insertions(+), 5 deletions(-)

diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index 1fe41d852a..62129f0d28 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -2676,11 +2676,58 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
 	int ret;
 
 
-	if ((rx_pkt_nb_segs > 1) &&
+	if ((rx_pkt_nb_segs > 1 || rx_pkt_nb_offs > 0) &&
 	    (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) {
+		struct rte_eth_dev_info dev_info;
+		uint16_t seg_idx = 0;
+		uint16_t next_offset = 0;
+		uint16_t mtu = 0;
+		bool selective_rx;
+
+		ret = rte_eth_dev_info_get(port_id, &dev_info);
+		if (ret != 0)
+			return ret;
+
+		selective_rx = rx_pkt_nb_offs > 0 &&
+			       dev_info.rx_seg_capa.selective_read != 0;
+
+		if (selective_rx) {
+			ret = rte_eth_dev_get_mtu(port_id, &mtu);
+			if (ret != 0)
+				return ret;
+		}
+
 		/* multi-segment configuration */
 		for (i = 0; i < rx_pkt_nb_segs; i++) {
-			struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split;
+			struct rte_eth_rxseg_split *rx_seg;
+			uint16_t seg_offset;
+
+			seg_offset = i < rx_pkt_nb_offs ?
+				     rx_pkt_seg_offsets[i] : next_offset;
+
+			/* Insert gap segment if selective Rx and there's a gap */
+			if (selective_rx && seg_offset > next_offset) {
+				if (seg_idx >= MAX_SEGS_BUFFER_SPLIT) {
+					fprintf(stderr,
+						"Too many segments (max %u)\n",
+						MAX_SEGS_BUFFER_SPLIT);
+					return -EINVAL;
+				}
+				rx_seg = &rx_useg[seg_idx++].split;
+				rx_seg->offset = next_offset;
+				rx_seg->length = seg_offset - next_offset;
+				rx_seg->mp = NULL; /* Discard gap data */
+				next_offset = seg_offset;
+			}
+
+			/* Add the actual data segment */
+			if (seg_idx >= MAX_SEGS_BUFFER_SPLIT) {
+				fprintf(stderr,
+					"Too many segments (max %u)\n",
+					MAX_SEGS_BUFFER_SPLIT);
+				return -EINVAL;
+			}
+			rx_seg = &rx_useg[seg_idx++].split;
 			/*
 			 * Use last valid pool for the segments with number
 			 * exceeding the pool index.
@@ -2688,8 +2735,7 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
 			mp_n = (i >= mbuf_data_size_n) ? mbuf_data_size_n - 1 : i;
 			mpx = mbuf_pool_find(socket_id, mp_n);
 			/* Handle zero as mbuf data buffer size. */
-			rx_seg->offset = i < rx_pkt_nb_offs ?
-					   rx_pkt_seg_offsets[i] : 0;
+			rx_seg->offset = seg_offset;
 			rx_seg->mp = mpx ? mpx : mp;
 			if (rx_pkt_hdr_protos[i] != 0 && rx_pkt_seg_lengths[i] == 0) {
 				rx_seg->proto_hdr = rx_pkt_hdr_protos[i] & ~prev_hdrs;
@@ -2699,8 +2745,26 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
 						rx_pkt_seg_lengths[i] :
 						mbuf_data_size[mp_n];
 			}
+
+			if (selective_rx)
+				next_offset = seg_offset + rx_seg->length;
 		}
-		rx_conf->rx_nseg = rx_pkt_nb_segs;
+
+		/* Add trailing segment to MTU if selective Rx enabled */
+		if (selective_rx && next_offset < mtu) {
+			if (seg_idx >= MAX_SEGS_BUFFER_SPLIT) {
+				fprintf(stderr,
+					"Too many segments (max %u)\n",
+					MAX_SEGS_BUFFER_SPLIT);
+				return -EINVAL;
+			}
+			rx_useg[seg_idx].split.offset = next_offset;
+			rx_useg[seg_idx].split.length = mtu - next_offset;
+			rx_useg[seg_idx].split.mp = NULL; /* Discard trailing data */
+			seg_idx++;
+		}
+
+		rx_conf->rx_nseg = seg_idx;
 		rx_conf->rx_seg = rx_useg;
 		rx_conf->rx_mempools = NULL;
 		rx_conf->rx_nmempool = 0;
diff --git a/doc/guides/testpmd_app_ug/run_app.rst b/doc/guides/testpmd_app_ug/run_app.rst
index 97d6c75716..638c0b0eb3 100644
--- a/doc/guides/testpmd_app_ug/run_app.rst
+++ b/doc/guides/testpmd_app_ug/run_app.rst
@@ -364,6 +364,11 @@ The command line options are:
     feature is engaged. Affects only the queues configured
     with split offloads (currently BUFFER_SPLIT is supported only).
 
+    When used with ``--rxpkts`` on PMDs supporting selective Rx data,
+    enables receiving only specific packet segments and discarding the rest.
+    Gaps between configured segments and any trailing data up to MTU are
+    automatically filled with NULL mempool segments (data is discarded).
+
 *   ``--rxpkts=X[,Y]``
 
     Set the length of segments to scatter packets on receiving if split
@@ -373,6 +378,20 @@ The command line options are:
     command line parameter and the mbufs to receive will be allocated
     sequentially from these extra memory pools.
 
+    **Selective Rx Data Example:**
+
+    To receive only the Ethernet header (14 bytes at offset 0) and
+    a 64-byte segment starting at offset 128, while discarding the rest::
+
+        --rxoffs=0,128 --rxpkts=14,64
+
+    This configuration will:
+
+    * Receive 14 bytes at offset 0 (Ethernet header)
+    * Discard bytes 14-127 (inserted NULL mempool segment)
+    * Receive 64 bytes at offset 128
+    * Discard bytes 192-MTU (inserted NULL mempool segment)
+
 *   ``--txpkts=X[,Y]``
 
     Set TX segment sizes or total packet length. Valid for ``tx-only``
-- 
2.51.0


  reply	other threads:[~2026-02-02 16:09 UTC|newest]

Thread overview: 4+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-02-02 16:09 [PATCH 1/2] ethdev: support selective Rx data Gregory Etelson
2026-02-02 16:09 ` Gregory Etelson [this message]
2026-02-02 17:37   ` [PATCH 2/2] app/testpmd: " Stephen Hemminger
2026-02-02 18:17 ` [PATCH 1/2] ethdev: " Stephen Hemminger

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20260202160903.254621-2-getelson@nvidia.com \
    --to=getelson@nvidia.com \
    --cc=aman.deep.singh@intel.com \
    --cc=dev@dpdk.org \
    --cc=mkashani@nvidia.com \
    --cc=noreply@anthropic.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox