From: Dariusz Sosnowski <dsosnowski@nvidia.com>
To: Viacheslav Ovsiienko <viacheslavo@nvidia.com>,
Bing Zhao <bingz@nvidia.com>, Ori Kam <orika@nvidia.com>,
Suanming Mou <suanmingm@nvidia.com>,
Matan Azrad <matan@nvidia.com>
Cc: <dev@dpdk.org>, Raslan Darawsheh <rasland@nvidia.com>
Subject: [PATCH 3/5] net/mlx5: calculate number of uplinks and host PFs
Date: Mon, 2 Mar 2026 12:34:41 +0100 [thread overview]
Message-ID: <20260302113443.16648-4-dsosnowski@nvidia.com> (raw)
In-Reply-To: <20260302113443.16648-1-dsosnowski@nvidia.com>
Add counting of number of uplinks (IB ports for physical ports)
and host PFs (IB ports for host PF representors visible
on BlueField DPU) for all probed IB devices.
This information will be used to generate a proper DPDK port name,
instead of specific setup type, in the follow up patches.
To facilitate correct counting the following changes are also made:
- Checking RTE_ETH_DEVARG_REPRESENTOR_IGNORE_PF flag is moved to
mlx5_representor_match() so all uplink IB ports are kept in the list.
- IB ports for VF/SF/Host PF representors related to all PFs are kept
in the list.
Signed-off-by: Dariusz Sosnowski <dsosnowski@nvidia.com>
---
drivers/net/mlx5/linux/mlx5_os.c | 74 +++++++++++++++++++++++++-------
drivers/net/mlx5/mlx5.h | 2 +
2 files changed, 60 insertions(+), 16 deletions(-)
diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c
index 405aa9799c..f9f3b2c38b 100644
--- a/drivers/net/mlx5/linux/mlx5_os.c
+++ b/drivers/net/mlx5/linux/mlx5_os.c
@@ -1047,6 +1047,12 @@ mlx5_queue_counter_id_prepare(struct rte_eth_dev *dev)
"available.", dev->data->port_id);
}
+static inline bool
+mlx5_ignore_pf_representor(const struct rte_eth_devargs *eth_da)
+{
+ return (eth_da->flags & RTE_ETH_DEVARG_REPRESENTOR_IGNORE_PF) != 0;
+}
+
/**
* Check if representor spawn info match devargs.
*
@@ -1075,6 +1081,10 @@ mlx5_representor_match(struct mlx5_dev_spawn_data *spawn,
*/
if (mlx5_is_probed_port_on_mpesw_device(spawn) &&
switch_info->name_type == MLX5_PHYS_PORT_NAME_TYPE_UPLINK) {
+ if (!switch_info->master && mlx5_ignore_pf_representor(eth_da)) {
+ rte_errno = EBUSY;
+ return false;
+ }
for (p = 0; p < eth_da->nb_ports; ++p)
if (switch_info->port_name == eth_da->ports[p])
return true;
@@ -2297,10 +2307,45 @@ mlx5_device_mpesw_pci_match(struct ibv_device *ibv,
return -1;
}
-static inline bool
-mlx5_ignore_pf_representor(const struct rte_eth_devargs *eth_da)
+static void
+calc_nb_uplinks_hpfs(struct ibv_device **ibv_match,
+ unsigned int nd,
+ struct mlx5_dev_spawn_data *list,
+ unsigned int ns)
{
- return (eth_da->flags & RTE_ETH_DEVARG_REPRESENTOR_IGNORE_PF) != 0;
+ for (unsigned int i = 0; i != nd; i++) {
+ uint32_t nb_uplinks = 0;
+ uint32_t nb_hpfs = 0;
+ uint32_t j;
+
+ for (unsigned int j = 0; j != ns; j++) {
+ if (strcmp(ibv_match[i]->name, list[j].phys_dev_name) != 0)
+ continue;
+
+ if (list[j].info.name_type == MLX5_PHYS_PORT_NAME_TYPE_UPLINK)
+ nb_uplinks++;
+ else if (list[j].info.name_type == MLX5_PHYS_PORT_NAME_TYPE_PFHPF)
+ nb_hpfs++;
+ }
+
+ if (nb_uplinks > 0 || nb_hpfs > 0) {
+ for (j = 0; j != ns; j++) {
+ if (strcmp(ibv_match[i]->name, list[j].phys_dev_name) != 0)
+ continue;
+
+ list[j].nb_uplinks = nb_uplinks;
+ list[j].nb_hpfs = nb_hpfs;
+ }
+
+ DRV_LOG(DEBUG, "IB device %s has %u uplinks, %u host PFs",
+ ibv_match[i]->name,
+ nb_uplinks,
+ nb_hpfs);
+ } else {
+ DRV_LOG(DEBUG, "IB device %s unable to recognize uplinks/host PFs",
+ ibv_match[i]->name);
+ }
+ }
}
/**
@@ -2611,8 +2656,6 @@ mlx5_os_pci_probe_pf(struct mlx5_common_device *cdev,
if (list[ns].info.port_name == mpesw) {
list[ns].info.master = 1;
list[ns].info.representor = 0;
- } else if (mlx5_ignore_pf_representor(ð_da)) {
- continue;
} else {
list[ns].info.master = 0;
list[ns].info.representor = 1;
@@ -2629,17 +2672,14 @@ mlx5_os_pci_probe_pf(struct mlx5_common_device *cdev,
case MLX5_PHYS_PORT_NAME_TYPE_PFHPF:
case MLX5_PHYS_PORT_NAME_TYPE_PFVF:
case MLX5_PHYS_PORT_NAME_TYPE_PFSF:
- /* Only spawn representors related to the probed PF. */
- if (list[ns].info.pf_num == owner_id) {
- /*
- * Ports of this type have PF index encoded in name,
- * which translate to the related uplink port index.
- */
- list[ns].mpesw_port = list[ns].info.pf_num;
- /* MPESW owner is also saved but not used now. */
- list[ns].info.mpesw_owner = mpesw;
- ns++;
- }
+ /*
+ * Ports of this type have PF index encoded in name,
+ * which translate to the related uplink port index.
+ */
+ list[ns].mpesw_port = list[ns].info.pf_num;
+ /* MPESW owner is also saved but not used now. */
+ list[ns].info.mpesw_owner = mpesw;
+ ns++;
break;
default:
break;
@@ -2773,6 +2813,8 @@ mlx5_os_pci_probe_pf(struct mlx5_common_device *cdev,
}
}
MLX5_ASSERT(ns);
+ /* Calculate number of uplinks and host PFs for each matched IB device. */
+ calc_nb_uplinks_hpfs(ibv_match, nd, list, ns);
/*
* Sort list to probe devices in natural order for users convenience
* (i.e. master first, then representors from lowest to highest ID).
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index b83dda5652..bef0088164 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -215,6 +215,8 @@ struct mlx5_dev_cap {
struct mlx5_dev_spawn_data {
uint32_t ifindex; /**< Network interface index. */
uint32_t max_port; /**< Device maximal port index. */
+ uint32_t nb_uplinks; /**< Number of uplinks associated with IB device. */
+ uint32_t nb_hpfs; /**< Number of host PFs associated with IB device. */
uint32_t phys_port; /**< Device physical port index. */
int pf_bond; /**< bonding device PF index. < 0 - no bonding */
int mpesw_port; /**< MPESW uplink index. Valid if mpesw_owner_port >= 0. */
--
2.47.3
next prev parent reply other threads:[~2026-03-02 11:35 UTC|newest]
Thread overview: 12+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-03-02 11:34 [PATCH 0/5] net/mlx5: add BlueField socket direct support Dariusz Sosnowski
2026-03-02 11:34 ` [PATCH 1/5] common/mlx5: fix bond check Dariusz Sosnowski
2026-03-02 11:34 ` [PATCH 2/5] net/mlx5: " Dariusz Sosnowski
2026-03-02 11:34 ` Dariusz Sosnowski [this message]
2026-03-02 11:34 ` [PATCH 4/5] net/mlx5: compare representors explicitly Dariusz Sosnowski
2026-03-02 11:34 ` [PATCH 5/5] net/mlx5: build port name dynamically Dariusz Sosnowski
2026-03-04 7:26 ` [PATCH 0/5] net/mlx5: add BlueField socket direct support Bing Zhao
2026-03-04 10:57 ` [PATCH v2 0/3] net/mlx5: net/mlx5: fix probing to allow BlueField Socket Direct Dariusz Sosnowski
2026-03-04 10:57 ` [PATCH v2 1/3] common/mlx5: fix bond check Dariusz Sosnowski
2026-03-04 10:57 ` [PATCH v2 2/3] net/mlx5: " Dariusz Sosnowski
2026-03-04 10:57 ` [PATCH v2 3/3] net/mlx5: fix probing to allow BlueField Socket Direct Dariusz Sosnowski
2026-03-10 8:16 ` [PATCH v2 0/3] net/mlx5: " Raslan Darawsheh
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260302113443.16648-4-dsosnowski@nvidia.com \
--to=dsosnowski@nvidia.com \
--cc=bingz@nvidia.com \
--cc=dev@dpdk.org \
--cc=matan@nvidia.com \
--cc=orika@nvidia.com \
--cc=rasland@nvidia.com \
--cc=suanmingm@nvidia.com \
--cc=viacheslavo@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox