From: Dariusz Sosnowski <dsosnowski@nvidia.com>
To: Aman Singh <aman.deep.singh@intel.com>
Cc: <dev@dpdk.org>, Thomas Monjalon <thomas@monjalon.net>,
Raslan Darawsheh <rasland@nvidia.com>,
Stephen Hemminger <stephen@networkplumber.org>,
"Adrian Schollmeyer" <a.schollmeyer@syseleven.de>
Subject: [PATCH v4 1/2] app/testpmd: assign share group dynamically
Date: Wed, 25 Mar 2026 20:09:05 +0100 [thread overview]
Message-ID: <20260325190906.68531-2-dsosnowski@nvidia.com> (raw)
In-Reply-To: <20260325190906.68531-1-dsosnowski@nvidia.com>
Testpmd exposes "--rxq-share=[N]" parameter which controls
sharing Rx queues. Before this patch logic was that either:
- all queues were assigned to the same share group
(when N was not passed),
- or ports were grouped in subsets of N ports,
each subset got different share group index.
2nd option did not work well with dynamic representor probing,
where new representors would be assigned to new share group.
This patch changes the logic in testpmd to dynamically
assign share group index. Each unique switch and Rx domain
will get different share group.
Signed-off-by: Dariusz Sosnowski <dsosnowski@nvidia.com>
---
app/test-pmd/parameters.c | 14 +----
app/test-pmd/testpmd.c | 81 +++++++++++++++++++++++++--
app/test-pmd/testpmd.h | 2 +-
doc/guides/testpmd_app_ug/run_app.rst | 10 ++--
4 files changed, 86 insertions(+), 21 deletions(-)
diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c
index 3617860830..ecbd618f00 100644
--- a/app/test-pmd/parameters.c
+++ b/app/test-pmd/parameters.c
@@ -351,7 +351,7 @@ static const struct option long_options[] = {
NO_ARG(TESTPMD_OPT_MULTI_RX_MEMPOOL),
NO_ARG(TESTPMD_OPT_TXONLY_MULTI_FLOW),
REQUIRED_ARG(TESTPMD_OPT_TXONLY_FLOWS),
- OPTIONAL_ARG(TESTPMD_OPT_RXQ_SHARE),
+ NO_ARG(TESTPMD_OPT_RXQ_SHARE),
REQUIRED_ARG(TESTPMD_OPT_ETH_LINK_SPEED),
NO_ARG(TESTPMD_OPT_DISABLE_LINK_CHECK),
NO_ARG(TESTPMD_OPT_DISABLE_DEVICE_START),
@@ -507,7 +507,7 @@ usage(char* progname)
printf(" --tx-ip=src,dst: IP addresses in Tx-only mode\n");
printf(" --tx-udp=src[,dst]: UDP ports in Tx-only mode\n");
printf(" --eth-link-speed: force link speed.\n");
- printf(" --rxq-share=X: number of ports per shared Rx queue groups, defaults to UINT32_MAX (1 group)\n");
+ printf(" --rxq-share: enable Rx queue sharing per switch and Rx domain\n");
printf(" --disable-link-check: disable check on link status when "
"starting/stopping ports.\n");
printf(" --disable-device-start: do not automatically start port\n");
@@ -1579,15 +1579,7 @@ launch_args_parse(int argc, char** argv)
rte_exit(EXIT_FAILURE, "txonly-flows must be >= 1 and <= 64\n");
break;
case TESTPMD_OPT_RXQ_SHARE_NUM:
- if (optarg == NULL) {
- rxq_share = UINT32_MAX;
- } else {
- n = atoi(optarg);
- if (n >= 0)
- rxq_share = (uint32_t)n;
- else
- rte_exit(EXIT_FAILURE, "rxq-share must be >= 0\n");
- }
+ rxq_share = 1;
break;
case TESTPMD_OPT_NO_FLUSH_RX_NUM:
no_flush_rx = 1;
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index aad880aa34..e655ddd247 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -545,9 +545,17 @@ uint8_t record_core_cycles;
uint8_t record_burst_stats;
/*
- * Number of ports per shared Rx queue group, 0 disable.
+ * Enable Rx queue sharing between ports in the same switch and Rx domain.
*/
-uint32_t rxq_share;
+uint8_t rxq_share;
+
+struct share_group_slot {
+ uint16_t domain_id;
+ uint16_t rx_domain;
+ uint16_t share_group;
+};
+
+static struct share_group_slot share_group_slots[RTE_MAX_ETHPORTS];
unsigned int num_sockets = 0;
unsigned int socket_ids[RTE_MAX_NUMA_NODES];
@@ -586,6 +594,64 @@ int proc_id;
*/
unsigned int num_procs = 1;
+static int
+assign_share_group(struct rte_eth_dev_info *dev_info, uint16_t *share_group)
+{
+ unsigned int first_free = RTE_DIM(share_group_slots);
+ unsigned int i;
+
+ for (i = 0; i < RTE_DIM(share_group_slots); i++) {
+ if (share_group_slots[i].share_group > 0) {
+ if (dev_info->switch_info.domain_id == share_group_slots[i].domain_id &&
+ dev_info->switch_info.rx_domain == share_group_slots[i].rx_domain) {
+ *share_group = share_group_slots[i].share_group;
+ return 0;
+ }
+ } else if (first_free == RTE_DIM(share_group_slots)) {
+ first_free = i;
+ }
+ }
+
+ if (first_free == RTE_DIM(share_group_slots))
+ return -ENOSPC;
+
+ share_group_slots[first_free].domain_id = dev_info->switch_info.domain_id;
+ share_group_slots[first_free].rx_domain = dev_info->switch_info.rx_domain;
+ share_group_slots[first_free].share_group = first_free + 1;
+ *share_group = share_group_slots[first_free].share_group;
+
+ return 0;
+}
+
+static void
+try_release_share_group(struct share_group_slot *slot)
+{
+ uint16_t pi;
+
+ /* Check if any port still uses this share group. */
+ RTE_ETH_FOREACH_DEV(pi) {
+ if (ports[pi].dev_info.switch_info.domain_id == slot->domain_id &&
+ ports[pi].dev_info.switch_info.rx_domain == slot->rx_domain) {
+ return;
+ }
+ }
+
+ slot->share_group = 0;
+ slot->domain_id = 0;
+ slot->rx_domain = 0;
+}
+
+static void
+try_release_share_groups(void)
+{
+ unsigned int i;
+
+ /* Try release each used share group. */
+ for (i = 0; i < RTE_DIM(share_group_slots); i++)
+ if (share_group_slots[i].share_group > 0)
+ try_release_share_group(&share_group_slots[i]);
+}
+
static void
eth_rx_metadata_negotiate_mp(uint16_t port_id)
{
@@ -3315,6 +3381,7 @@ remove_invalid_ports(void)
remove_invalid_ports_in(ports_ids, &nb_ports);
remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports);
nb_cfg_ports = nb_fwd_ports;
+ try_release_share_groups();
}
static void
@@ -4097,8 +4164,14 @@ rxtx_port_config(portid_t pid)
if (rxq_share > 0 &&
(port->dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE)) {
/* Non-zero share group to enable RxQ share. */
- port->rxq[qid].conf.share_group = pid / rxq_share + 1;
- port->rxq[qid].conf.share_qid = qid; /* Equal mapping. */
+ uint16_t share_group;
+
+ if (assign_share_group(&port->dev_info, &share_group) == 0) {
+ port->rxq[qid].conf.share_group = share_group;
+ port->rxq[qid].conf.share_qid = qid; /* Equal mapping. */
+ } else {
+ TESTPMD_LOG(INFO, "port %u: failed assigning share group\n", pid);
+ }
}
if (offloads != 0)
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index af185540c3..9b60ebd7fc 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -675,7 +675,7 @@ extern enum tx_pkt_split tx_pkt_split;
extern uint8_t txonly_multi_flow;
extern uint16_t txonly_flows;
-extern uint32_t rxq_share;
+extern uint8_t rxq_share;
extern uint16_t nb_pkt_per_burst;
extern uint16_t nb_pkt_flowgen_clones;
diff --git a/doc/guides/testpmd_app_ug/run_app.rst b/doc/guides/testpmd_app_ug/run_app.rst
index ae3ef8cdf8..d0a05d6311 100644
--- a/doc/guides/testpmd_app_ug/run_app.rst
+++ b/doc/guides/testpmd_app_ug/run_app.rst
@@ -393,13 +393,13 @@ The command line options are:
Valid range is 1 to 64. Default is 64.
Reducing this value limits the number of unique UDP source ports generated.
-* ``--rxq-share=[X]``
+* ``--rxq-share``
Create queues in shared Rx queue mode if device supports.
- Shared Rx queues are grouped per X ports. X defaults to UINT32_MAX,
- implies all ports join share group 1. Forwarding engine "shared-rxq"
- should be used for shared Rx queues. This engine does Rx only and
- update stream statistics accordingly.
+ Testpmd will assign unique share group index per each
+ unique switch and Rx domain.
+ Forwarding engine "shared-rxq" should be used for shared Rx queues.
+ This engine does Rx only and updates stream statistics accordingly.
* ``--eth-link-speed``
--
2.47.3
next prev parent reply other threads:[~2026-03-25 19:10 UTC|newest]
Thread overview: 22+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-03-24 12:37 [PATCH 0/2] app/testpmd: assign share group dynamically Dariusz Sosnowski
2026-03-24 12:37 ` [PATCH 1/2] " Dariusz Sosnowski
2026-03-24 15:15 ` Stephen Hemminger
2026-03-25 16:45 ` Stephen Hemminger
2026-03-24 12:37 ` [PATCH 2/2] app/testpmd: revert switch domain mismatch check Dariusz Sosnowski
2026-03-24 15:17 ` Stephen Hemminger
2026-03-24 16:56 ` [PATCH v2 0/2] app/testpmd: assign share group dynamically Dariusz Sosnowski
2026-03-24 16:56 ` [PATCH v2 1/2] " Dariusz Sosnowski
2026-03-25 16:49 ` Stephen Hemminger
2026-03-25 18:06 ` Dariusz Sosnowski
2026-03-25 16:50 ` Stephen Hemminger
2026-03-25 18:12 ` Dariusz Sosnowski
2026-03-24 16:56 ` [PATCH v2 2/2] app/testpmd: revert switch domain mismatch check Dariusz Sosnowski
2026-03-25 18:02 ` [PATCH v3 0/2] app/testpmd: assign share group dynamically Dariusz Sosnowski
2026-03-25 18:02 ` [PATCH v3 1/2] " Dariusz Sosnowski
2026-03-25 18:51 ` Stephen Hemminger
2026-03-25 19:11 ` Dariusz Sosnowski
2026-03-25 18:02 ` [PATCH v3 2/2] app/testpmd: revert switch domain mismatch check Dariusz Sosnowski
2026-03-25 19:09 ` [PATCH v4 0/2] app/testpmd: assign share group dynamically Dariusz Sosnowski
2026-03-25 19:09 ` Dariusz Sosnowski [this message]
2026-03-25 19:09 ` [PATCH v4 2/2] app/testpmd: revert switch domain mismatch check Dariusz Sosnowski
2026-03-25 20:16 ` [PATCH v4 0/2] app/testpmd: assign share group dynamically Stephen Hemminger
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260325190906.68531-2-dsosnowski@nvidia.com \
--to=dsosnowski@nvidia.com \
--cc=a.schollmeyer@syseleven.de \
--cc=aman.deep.singh@intel.com \
--cc=dev@dpdk.org \
--cc=rasland@nvidia.com \
--cc=stephen@networkplumber.org \
--cc=thomas@monjalon.net \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox