public inbox for dev@dpdk.org
 help / color / mirror / Atom feed
From: Dariusz Sosnowski <dsosnowski@nvidia.com>
To: Aman Singh <aman.deep.singh@intel.com>
Cc: <dev@dpdk.org>, Thomas Monjalon <thomas@monjalon.net>,
	Raslan Darawsheh <rasland@nvidia.com>,
	Stephen Hemminger <stephen@networkplumber.org>,
	"Adrian Schollmeyer" <a.schollmeyer@syseleven.de>
Subject: [PATCH v2 1/2] app/testpmd: assign share group dynamically
Date: Tue, 24 Mar 2026 17:56:56 +0100	[thread overview]
Message-ID: <20260324165657.23945-2-dsosnowski@nvidia.com> (raw)
In-Reply-To: <20260324165657.23945-1-dsosnowski@nvidia.com>

Testpmd exposes "--rxq-share=[N]" parameter which controls
sharing Rx queues. Before this patch logic was that either:

- all queues were assigned to the same share group
  (when N was not passed),
- or ports were grouped in subsets of N ports,
  each subset got different share group index.

2nd option did not work well with dynamic representor probing,
where new representors would be assigned to new share group.

This patch changes the logic in testpmd to dynamically
assign share group index. Each unique switch and Rx domain
will get different share group.

Signed-off-by: Dariusz Sosnowski <dsosnowski@nvidia.com>
---
 app/test-pmd/parameters.c             | 12 +---
 app/test-pmd/testpmd.c                | 82 ++++++++++++++++++++++++++-
 app/test-pmd/testpmd.h                |  2 +-
 doc/guides/testpmd_app_ug/run_app.rst | 10 ++--
 4 files changed, 87 insertions(+), 19 deletions(-)

diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c
index 3617860830..5d9a5f2501 100644
--- a/app/test-pmd/parameters.c
+++ b/app/test-pmd/parameters.c
@@ -507,7 +507,7 @@ usage(char* progname)
 	printf("  --tx-ip=src,dst: IP addresses in Tx-only mode\n");
 	printf("  --tx-udp=src[,dst]: UDP ports in Tx-only mode\n");
 	printf("  --eth-link-speed: force link speed.\n");
-	printf("  --rxq-share=X: number of ports per shared Rx queue groups, defaults to UINT32_MAX (1 group)\n");
+	printf("  --rxq-share: enable Rx queue sharing per switch and Rx domain\n");
 	printf("  --disable-link-check: disable check on link status when "
 	       "starting/stopping ports.\n");
 	printf("  --disable-device-start: do not automatically start port\n");
@@ -1579,15 +1579,7 @@ launch_args_parse(int argc, char** argv)
 				rte_exit(EXIT_FAILURE, "txonly-flows must be >= 1 and <= 64\n");
 			break;
 		case TESTPMD_OPT_RXQ_SHARE_NUM:
-			if (optarg == NULL) {
-				rxq_share = UINT32_MAX;
-			} else {
-				n = atoi(optarg);
-				if (n >= 0)
-					rxq_share = (uint32_t)n;
-				else
-					rte_exit(EXIT_FAILURE, "rxq-share must be >= 0\n");
-			}
+			rxq_share = 1;
 			break;
 		case TESTPMD_OPT_NO_FLUSH_RX_NUM:
 			no_flush_rx = 1;
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index aad880aa34..81b220466f 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -545,9 +545,17 @@ uint8_t record_core_cycles;
 uint8_t record_burst_stats;
 
 /*
- * Number of ports per shared Rx queue group, 0 disable.
+ * Enable Rx queue sharing between ports in the same switch and Rx domain.
  */
-uint32_t rxq_share;
+uint8_t rxq_share;
+
+struct share_group_slot {
+	uint16_t domain_id;
+	uint16_t rx_domain;
+	uint16_t share_group;
+};
+
+static struct share_group_slot share_group_slots[RTE_MAX_ETHPORTS];
 
 unsigned int num_sockets = 0;
 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
@@ -586,6 +594,73 @@ int proc_id;
  */
 unsigned int num_procs = 1;
 
+static uint16_t
+assign_share_group(struct rte_eth_dev_info *dev_info)
+{
+	unsigned int first_free = RTE_DIM(share_group_slots);
+	bool found = false;
+	unsigned int i;
+
+	for (i = 0; i < RTE_DIM(share_group_slots); i++) {
+		if (share_group_slots[i].share_group > 0) {
+			if (dev_info->switch_info.domain_id == share_group_slots[i].domain_id &&
+			    dev_info->switch_info.rx_domain == share_group_slots[i].rx_domain) {
+				found = true;
+				break;
+			}
+		} else if (first_free == RTE_DIM(share_group_slots)) {
+			first_free = i;
+		}
+	}
+
+	if (found)
+		return share_group_slots[i].share_group;
+
+	/*
+	 * testpmd assigns all queues on a given port to single share group.
+	 * There are RTE_MAX_ETHPORTS share group slots,
+	 * so at least one should always be available.
+	 */
+	RTE_ASSERT(first_free < RTE_DIM(share_group_slots));
+
+	share_group_slots[first_free].domain_id = dev_info->switch_info.domain_id;
+	share_group_slots[first_free].rx_domain = dev_info->switch_info.rx_domain;
+	share_group_slots[first_free].share_group = first_free + 1;
+	return share_group_slots[first_free].share_group;
+}
+
+static void
+try_release_share_group(struct share_group_slot *slot)
+{
+	uint16_t pi;
+	bool group_not_used = true;
+
+	/* Check if any port still uses this share group. */
+	RTE_ETH_FOREACH_DEV(pi) {
+		if (ports[pi].dev_info.switch_info.domain_id == slot->domain_id &&
+		    ports[pi].dev_info.switch_info.rx_domain == slot->rx_domain) {
+			group_not_used = false;
+			break;
+		}
+	}
+	if (group_not_used) {
+		slot->share_group = 0;
+		slot->domain_id = 0;
+		slot->rx_domain = 0;
+	}
+}
+
+static void
+try_release_share_groups(void)
+{
+	unsigned int i;
+
+	/* Try release each used share group. */
+	for (i = 0; i < RTE_DIM(share_group_slots); i++)
+		if (share_group_slots[i].share_group > 0)
+			try_release_share_group(&share_group_slots[i]);
+}
+
 static void
 eth_rx_metadata_negotiate_mp(uint16_t port_id)
 {
@@ -3315,6 +3390,7 @@ remove_invalid_ports(void)
 	remove_invalid_ports_in(ports_ids, &nb_ports);
 	remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports);
 	nb_cfg_ports = nb_fwd_ports;
+	try_release_share_groups();
 }
 
 static void
@@ -4097,7 +4173,7 @@ rxtx_port_config(portid_t pid)
 		if (rxq_share > 0 &&
 		    (port->dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE)) {
 			/* Non-zero share group to enable RxQ share. */
-			port->rxq[qid].conf.share_group = pid / rxq_share + 1;
+			port->rxq[qid].conf.share_group = assign_share_group(&port->dev_info);
 			port->rxq[qid].conf.share_qid = qid; /* Equal mapping. */
 		}
 
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index af185540c3..9b60ebd7fc 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -675,7 +675,7 @@ extern enum tx_pkt_split tx_pkt_split;
 extern uint8_t txonly_multi_flow;
 extern uint16_t txonly_flows;
 
-extern uint32_t rxq_share;
+extern uint8_t rxq_share;
 
 extern uint16_t nb_pkt_per_burst;
 extern uint16_t nb_pkt_flowgen_clones;
diff --git a/doc/guides/testpmd_app_ug/run_app.rst b/doc/guides/testpmd_app_ug/run_app.rst
index ae3ef8cdf8..f4a30e5da9 100644
--- a/doc/guides/testpmd_app_ug/run_app.rst
+++ b/doc/guides/testpmd_app_ug/run_app.rst
@@ -393,13 +393,13 @@ The command line options are:
     Valid range is 1 to 64. Default is 64.
     Reducing this value limits the number of unique UDP source ports generated.
 
-*   ``--rxq-share=[X]``
+*   ``--rxq-share``
 
     Create queues in shared Rx queue mode if device supports.
-    Shared Rx queues are grouped per X ports. X defaults to UINT32_MAX,
-    implies all ports join share group 1. Forwarding engine "shared-rxq"
-    should be used for shared Rx queues. This engine does Rx only and
-    update stream statistics accordingly.
+    Testpmd will assign unique share group index per each
+    unique switch and Rx domain.
+    Forwarding engine "shared-rxq" should be used for shared Rx queues.
+    This engine does Rx only and update stream statistics accordingly.
 
 *   ``--eth-link-speed``
 
-- 
2.47.3


  reply	other threads:[~2026-03-24 16:57 UTC|newest]

Thread overview: 22+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-03-24 12:37 [PATCH 0/2] app/testpmd: assign share group dynamically Dariusz Sosnowski
2026-03-24 12:37 ` [PATCH 1/2] " Dariusz Sosnowski
2026-03-24 15:15   ` Stephen Hemminger
2026-03-25 16:45   ` Stephen Hemminger
2026-03-24 12:37 ` [PATCH 2/2] app/testpmd: revert switch domain mismatch check Dariusz Sosnowski
2026-03-24 15:17   ` Stephen Hemminger
2026-03-24 16:56 ` [PATCH v2 0/2] app/testpmd: assign share group dynamically Dariusz Sosnowski
2026-03-24 16:56   ` Dariusz Sosnowski [this message]
2026-03-25 16:49     ` [PATCH v2 1/2] " Stephen Hemminger
2026-03-25 18:06       ` Dariusz Sosnowski
2026-03-25 16:50     ` Stephen Hemminger
2026-03-25 18:12       ` Dariusz Sosnowski
2026-03-24 16:56   ` [PATCH v2 2/2] app/testpmd: revert switch domain mismatch check Dariusz Sosnowski
2026-03-25 18:02   ` [PATCH v3 0/2] app/testpmd: assign share group dynamically Dariusz Sosnowski
2026-03-25 18:02     ` [PATCH v3 1/2] " Dariusz Sosnowski
2026-03-25 18:51       ` Stephen Hemminger
2026-03-25 19:11         ` Dariusz Sosnowski
2026-03-25 18:02     ` [PATCH v3 2/2] app/testpmd: revert switch domain mismatch check Dariusz Sosnowski
2026-03-25 19:09     ` [PATCH v4 0/2] app/testpmd: assign share group dynamically Dariusz Sosnowski
2026-03-25 19:09       ` [PATCH v4 1/2] " Dariusz Sosnowski
2026-03-25 19:09       ` [PATCH v4 2/2] app/testpmd: revert switch domain mismatch check Dariusz Sosnowski
2026-03-25 20:16       ` [PATCH v4 0/2] app/testpmd: assign share group dynamically Stephen Hemminger

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20260324165657.23945-2-dsosnowski@nvidia.com \
    --to=dsosnowski@nvidia.com \
    --cc=a.schollmeyer@syseleven.de \
    --cc=aman.deep.singh@intel.com \
    --cc=dev@dpdk.org \
    --cc=rasland@nvidia.com \
    --cc=stephen@networkplumber.org \
    --cc=thomas@monjalon.net \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox