public inbox for dev@dpdk.org
 help / color / mirror / Atom feed
* [PATCH 0/4] Remove limitations coming from legacy VMDq
@ 2026-04-03  9:18 David Marchand
  2026-04-03  9:18 ` [PATCH 1/4] ethdev: skip VMDq pools unless configured David Marchand
                   ` (4 more replies)
  0 siblings, 5 replies; 7+ messages in thread
From: David Marchand @ 2026-04-03  9:18 UTC (permalink / raw)
  To: dev; +Cc: rjarry, cfontain

Since the commit 88ac4396ad29 ("ethdev: add VMDq support"),
VMDq has been imposing a maximum number of mac addresses in the
mac_addr_add/del API.

Nowadays, new Intel drivers do not support the feature and few other
drivers implement this feature.

This series proposes to flag drivers that support the feature, and
remove the limit of number of mac addresses for others.

Next step could be to remove the VMDq pool notion from the generic API.
However I have some concern about this, as changing the quite stable
mac_addr_add/del API now seems a lot of noise for not much benefit.


-- 
David Marchand

David Marchand (4):
  ethdev: skip VMDq pools unless configured
  ethdev: announce VMDq capability
  ethdev: hide VMDq internal sizes
  net/iavf: accept up to 32k unicast MAC addresses

 drivers/net/bnxt/bnxt_ethdev.c                |  3 +-
 drivers/net/bnxt/bnxt_reps.c                  |  1 +
 drivers/net/cnxk/cnxk_ethdev_ops.c            |  1 -
 drivers/net/intel/e1000/em_ethdev.c           |  1 +
 drivers/net/intel/e1000/igb_ethdev.c          |  1 +
 drivers/net/intel/fm10k/fm10k_ethdev.c        |  1 +
 drivers/net/intel/i40e/i40e_ethdev.c          |  3 +-
 drivers/net/intel/i40e/i40e_vf_representor.c  |  1 +
 drivers/net/intel/iavf/iavf.h                 |  5 ++-
 drivers/net/intel/iavf/iavf_ethdev.c          | 10 ++---
 drivers/net/intel/iavf/iavf_vchnl.c           |  6 +--
 drivers/net/intel/ipn3ke/ipn3ke_representor.c |  3 +-
 drivers/net/intel/ixgbe/ixgbe_ethdev.c        |  2 +
 drivers/net/txgbe/txgbe_ethdev.c              |  1 +
 drivers/net/txgbe/txgbe_ethdev_vf.c           |  1 +
 lib/ethdev/ethdev_driver.h                    |  8 +++-
 lib/ethdev/rte_ethdev.c                       | 45 ++++++++++++++++---
 lib/ethdev/rte_ethdev.h                       |  8 +---
 18 files changed, 73 insertions(+), 28 deletions(-)

-- 
2.53.0


^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH 1/4] ethdev: skip VMDq pools unless configured
  2026-04-03  9:18 [PATCH 0/4] Remove limitations coming from legacy VMDq David Marchand
@ 2026-04-03  9:18 ` David Marchand
  2026-04-03  9:18 ` [PATCH 2/4] ethdev: announce VMDq capability David Marchand
                   ` (3 subsequent siblings)
  4 siblings, 0 replies; 7+ messages in thread
From: David Marchand @ 2026-04-03  9:18 UTC (permalink / raw)
  To: dev
  Cc: rjarry, cfontain, Nithin Dabilpuram, Kiran Kumar K,
	Sunil Kumar Kori, Satha Rao, Harman Kalra, Thomas Monjalon,
	Andrew Rybchenko

The mac_addr_add API describes that only the 0 pool should be passed
unless VMDq has been enabled, though there was no validation so far.
Add such a check, then cleanup the related operations (adding, removing,
restoring).

As a side effect, the net/cnxk does not need to manually reset the
mac_pool_sel[] array.

Signed-off-by: David Marchand <david.marchand@redhat.com>
---
 drivers/net/cnxk/cnxk_ethdev_ops.c |  1 -
 lib/ethdev/rte_ethdev.c            | 28 +++++++++++++++++++++-------
 2 files changed, 21 insertions(+), 8 deletions(-)

diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c b/drivers/net/cnxk/cnxk_ethdev_ops.c
index 49e77e49a6..75decf7098 100644
--- a/drivers/net/cnxk/cnxk_ethdev_ops.c
+++ b/drivers/net/cnxk/cnxk_ethdev_ops.c
@@ -1240,7 +1240,6 @@ cnxk_nix_mc_addr_list_configure(struct rte_eth_dev *eth_dev, struct rte_ether_ad
 		/* Update address in NIC data structure */
 		rte_ether_addr_copy(&mc_addr_set[i], &data->mac_addrs[j]);
 		rte_ether_addr_copy(&mc_addr_set[i], &dev->dmac_addrs[j]);
-		data->mac_pool_sel[j] = RTE_BIT64(0);
 	}
 
 	roc_nix_npc_promisc_ena_dis(nix, true);
diff --git a/lib/ethdev/rte_ethdev.c b/lib/ethdev/rte_ethdev.c
index 2edc7a362e..9577b7d848 100644
--- a/lib/ethdev/rte_ethdev.c
+++ b/lib/ethdev/rte_ethdev.c
@@ -1680,7 +1680,10 @@ eth_dev_mac_restore(struct rte_eth_dev *dev,
 				continue;
 
 			pool = 0;
-			pool_mask = dev->data->mac_pool_sel[i];
+			if ((dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG) != 0)
+				pool_mask = dev->data->mac_pool_sel[i];
+			else
+				pool_mask = 1;
 
 			do {
 				if (pool_mask & UINT64_C(1))
@@ -5390,8 +5393,9 @@ rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr,
 			uint32_t pool)
 {
 	struct rte_eth_dev *dev;
-	int index;
 	uint64_t pool_mask;
+	bool vmdq;
+	int index;
 	int ret;
 
 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
@@ -5416,6 +5420,12 @@ rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr,
 		RTE_ETHDEV_LOG_LINE(ERR, "Pool ID must be 0-%d", RTE_ETH_64_POOLS - 1);
 		return -EINVAL;
 	}
+	vmdq = (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG) != 0;
+	if (!vmdq && pool != 0) {
+		RTE_ETHDEV_LOG_LINE(ERR, "Port %u: VMDq is not configured (pool %d)",
+			port_id, pool);
+		return -EINVAL;
+	}
 
 	index = eth_dev_get_mac_addr_index(port_id, addr);
 	if (index < 0) {
@@ -5425,7 +5435,7 @@ rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr,
 				port_id);
 			return -ENOSPC;
 		}
-	} else {
+	} else if (vmdq) {
 		pool_mask = dev->data->mac_pool_sel[index];
 
 		/* Check if both MAC address and pool is already there, and do nothing */
@@ -5440,8 +5450,10 @@ rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr,
 		/* Update address in NIC data structure */
 		rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]);
 
-		/* Update pool bitmap in NIC data structure */
-		dev->data->mac_pool_sel[index] |= RTE_BIT64(pool);
+		if (vmdq) {
+			/* Update pool bitmap in NIC data structure */
+			dev->data->mac_pool_sel[index] |= RTE_BIT64(pool);
+		}
 	}
 
 	ret = eth_err(port_id, ret);
@@ -5486,8 +5498,10 @@ rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr)
 	/* Update address in NIC data structure */
 	rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
 
-	/* reset pool bitmap */
-	dev->data->mac_pool_sel[index] = 0;
+	if ((dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG) != 0) {
+		/* reset pool bitmap */
+		dev->data->mac_pool_sel[index] = 0;
+	}
 
 	rte_ethdev_trace_mac_addr_remove(port_id, addr);
 
-- 
2.53.0


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH 2/4] ethdev: announce VMDq capability
  2026-04-03  9:18 [PATCH 0/4] Remove limitations coming from legacy VMDq David Marchand
  2026-04-03  9:18 ` [PATCH 1/4] ethdev: skip VMDq pools unless configured David Marchand
@ 2026-04-03  9:18 ` David Marchand
  2026-04-06 22:22   ` Kishore Padmanabha
  2026-04-03  9:18 ` [PATCH 3/4] ethdev: hide VMDq internal sizes David Marchand
                   ` (2 subsequent siblings)
  4 siblings, 1 reply; 7+ messages in thread
From: David Marchand @ 2026-04-03  9:18 UTC (permalink / raw)
  To: dev
  Cc: rjarry, cfontain, Kishore Padmanabha, Ajit Khaparde,
	Bruce Richardson, Rosen Xu, Anatoly Burakov, Vladimir Medvedkin,
	Jiawen Wu, Zaiyu Wang, Thomas Monjalon, Andrew Rybchenko

Let's mark VMDq feature availability as a per device capability.
We can then enforce API calls related to this feature are done on device
with such capability.

Signed-off-by: David Marchand <david.marchand@redhat.com>
---
 drivers/net/bnxt/bnxt_ethdev.c                |  3 ++-
 drivers/net/bnxt/bnxt_reps.c                  |  1 +
 drivers/net/intel/e1000/em_ethdev.c           |  1 +
 drivers/net/intel/e1000/igb_ethdev.c          |  1 +
 drivers/net/intel/fm10k/fm10k_ethdev.c        |  1 +
 drivers/net/intel/i40e/i40e_ethdev.c          |  3 ++-
 drivers/net/intel/i40e/i40e_vf_representor.c  |  1 +
 drivers/net/intel/ipn3ke/ipn3ke_representor.c |  3 ++-
 drivers/net/intel/ixgbe/ixgbe_ethdev.c        |  2 ++
 drivers/net/txgbe/txgbe_ethdev.c              |  1 +
 drivers/net/txgbe/txgbe_ethdev_vf.c           |  1 +
 lib/ethdev/rte_ethdev.c                       | 17 +++++++++++++++++
 lib/ethdev/rte_ethdev.h                       |  2 ++
 13 files changed, 34 insertions(+), 3 deletions(-)

diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index b677f9491d..0f783b9e98 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -1214,7 +1214,8 @@ static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
 
 	dev_info->speed_capa = bnxt_get_speed_capabilities(bp);
 	dev_info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
-			     RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
+			     RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP |
+			     RTE_ETH_DEV_CAPA_VMDQ;
 	dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
diff --git a/drivers/net/bnxt/bnxt_reps.c b/drivers/net/bnxt/bnxt_reps.c
index e26a086f41..5e834830e2 100644
--- a/drivers/net/bnxt/bnxt_reps.c
+++ b/drivers/net/bnxt/bnxt_reps.c
@@ -649,6 +649,7 @@ int bnxt_rep_dev_info_get_op(struct rte_eth_dev *eth_dev,
 	dev_info->max_tx_queues = max_rx_rings;
 	dev_info->reta_size = bnxt_rss_hash_tbl_size(parent_bp);
 	dev_info->hash_key_size = 40;
+	dev_info->dev_capa = RTE_ETH_DEV_CAPA_VMDQ;
 	dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
 
 	/* MTU specifics */
diff --git a/drivers/net/intel/e1000/em_ethdev.c b/drivers/net/intel/e1000/em_ethdev.c
index 9e15e882b9..389744ad5e 100644
--- a/drivers/net/intel/e1000/em_ethdev.c
+++ b/drivers/net/intel/e1000/em_ethdev.c
@@ -1175,6 +1175,7 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 			RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M |
 			RTE_ETH_LINK_SPEED_1G;
 
+	dev_info->dev_capa = RTE_ETH_DEV_CAPA_VMDQ;
 	dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
 
 	/* Preferred queue parameters */
diff --git a/drivers/net/intel/e1000/igb_ethdev.c b/drivers/net/intel/e1000/igb_ethdev.c
index ef1599ac38..fe68c18417 100644
--- a/drivers/net/intel/e1000/igb_ethdev.c
+++ b/drivers/net/intel/e1000/igb_ethdev.c
@@ -2324,6 +2324,7 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->tx_queue_offload_capa = igb_get_tx_queue_offloads_capa(dev);
 	dev_info->tx_offload_capa = igb_get_tx_port_offloads_capa(dev) |
 				    dev_info->tx_queue_offload_capa;
+	dev_info->dev_capa = RTE_ETH_DEV_CAPA_VMDQ;
 	dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
 
 	switch (hw->mac.type) {
diff --git a/drivers/net/intel/fm10k/fm10k_ethdev.c b/drivers/net/intel/fm10k/fm10k_ethdev.c
index 97f61afec2..037d2206fd 100644
--- a/drivers/net/intel/fm10k/fm10k_ethdev.c
+++ b/drivers/net/intel/fm10k/fm10k_ethdev.c
@@ -1444,6 +1444,7 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev,
 	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_2_5G |
 			RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G |
 			RTE_ETH_LINK_SPEED_40G | RTE_ETH_LINK_SPEED_100G;
+	dev_info->dev_capa = RTE_ETH_DEV_CAPA_VMDQ;
 
 	return 0;
 }
diff --git a/drivers/net/intel/i40e/i40e_ethdev.c b/drivers/net/intel/i40e/i40e_ethdev.c
index 100a751225..64c29c6e85 100644
--- a/drivers/net/intel/i40e/i40e_ethdev.c
+++ b/drivers/net/intel/i40e/i40e_ethdev.c
@@ -3878,7 +3878,8 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 
 	dev_info->dev_capa =
 		RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
-		RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
+		RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP |
+		RTE_ETH_DEV_CAPA_VMDQ;
 	dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
 
 	dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
diff --git a/drivers/net/intel/i40e/i40e_vf_representor.c b/drivers/net/intel/i40e/i40e_vf_representor.c
index e8f0bb62a0..d31148acb5 100644
--- a/drivers/net/intel/i40e/i40e_vf_representor.c
+++ b/drivers/net/intel/i40e/i40e_vf_representor.c
@@ -33,6 +33,7 @@ i40e_vf_representor_dev_infos_get(struct rte_eth_dev *ethdev,
 	/* get dev info for the vdev */
 	dev_info->device = ethdev->device;
 
+	dev_info->dev_capa = RTE_ETH_DEV_CAPA_VMDQ;
 	dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
 
 	dev_info->max_rx_queues = ethdev->data->nb_rx_queues;
diff --git a/drivers/net/intel/ipn3ke/ipn3ke_representor.c b/drivers/net/intel/ipn3ke/ipn3ke_representor.c
index cd34d08055..d581ee3c37 100644
--- a/drivers/net/intel/ipn3ke/ipn3ke_representor.c
+++ b/drivers/net/intel/ipn3ke/ipn3ke_representor.c
@@ -95,7 +95,8 @@ ipn3ke_rpst_dev_infos_get(struct rte_eth_dev *ethdev,
 
 	dev_info->dev_capa =
 		RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
-		RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
+		RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP |
+		RTE_ETH_DEV_CAPA_VMDQ;
 	dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
 
 	dev_info->switch_info.name = ethdev->device->name;
diff --git a/drivers/net/intel/ixgbe/ixgbe_ethdev.c b/drivers/net/intel/ixgbe/ixgbe_ethdev.c
index 57d929cf2c..5d886b3e28 100644
--- a/drivers/net/intel/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/intel/ixgbe/ixgbe_ethdev.c
@@ -3997,6 +3997,7 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_mtu =  dev_info->max_rx_pktlen - IXGBE_ETH_OVERHEAD;
 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
 	dev_info->vmdq_queue_num = dev_info->max_rx_queues;
+	dev_info->dev_capa = RTE_ETH_DEV_CAPA_VMDQ;
 	dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
 	dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
 				     dev_info->rx_queue_offload_capa);
@@ -4115,6 +4116,7 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
 		dev_info->max_vmdq_pools = RTE_ETH_16_POOLS;
 	else
 		dev_info->max_vmdq_pools = RTE_ETH_64_POOLS;
+	dev_info->dev_capa = RTE_ETH_DEV_CAPA_VMDQ;
 	dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
 	dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
 				     dev_info->rx_queue_offload_capa);
diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index 5d360f8305..bd818e8269 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -2836,6 +2836,7 @@ txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_vfs = pci_dev->max_vfs;
 	dev_info->max_vmdq_pools = RTE_ETH_64_POOLS;
 	dev_info->vmdq_queue_num = dev_info->max_rx_queues;
+	dev_info->dev_capa = RTE_ETH_DEV_CAPA_VMDQ;
 	dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
 	dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
 	dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
diff --git a/drivers/net/txgbe/txgbe_ethdev_vf.c b/drivers/net/txgbe/txgbe_ethdev_vf.c
index 39a5fff65c..934763574c 100644
--- a/drivers/net/txgbe/txgbe_ethdev_vf.c
+++ b/drivers/net/txgbe/txgbe_ethdev_vf.c
@@ -572,6 +572,7 @@ txgbevf_dev_info_get(struct rte_eth_dev *dev,
 	dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
 	dev_info->max_vfs = pci_dev->max_vfs;
 	dev_info->max_vmdq_pools = RTE_ETH_64_POOLS;
+	dev_info->dev_capa = RTE_ETH_DEV_CAPA_VMDQ;
 	dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
 	dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
 	dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
diff --git a/lib/ethdev/rte_ethdev.c b/lib/ethdev/rte_ethdev.c
index 9577b7d848..7ba539e796 100644
--- a/lib/ethdev/rte_ethdev.c
+++ b/lib/ethdev/rte_ethdev.c
@@ -158,6 +158,7 @@ static const struct {
 	{RTE_ETH_DEV_CAPA_RXQ_SHARE, "RXQ_SHARE"},
 	{RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP, "FLOW_RULE_KEEP"},
 	{RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP, "FLOW_SHARED_OBJECT_KEEP"},
+	{RTE_ETH_DEV_CAPA_VMDQ, "VMDQ"},
 };
 
 enum {
@@ -1581,6 +1582,22 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 		goto rollback;
 	}
 
+	if (!(dev_info.dev_capa & RTE_ETH_DEV_CAPA_VMDQ)) {
+		if ((dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG) != 0) {
+			RTE_ETHDEV_LOG_LINE(ERR, "Ethdev port_id=%u does not support VMDq rx mode",
+				port_id);
+			ret = -EINVAL;
+			goto rollback;
+		}
+		if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB ||
+				dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_ONLY) {
+			RTE_ETHDEV_LOG_LINE(ERR, "Ethdev port_id=%u does not support VMDq tx mode",
+				port_id);
+			ret = -EINVAL;
+			goto rollback;
+		}
+	}
+
 	/*
 	 * Setup new number of Rx/Tx queues and reconfigure device.
 	 */
diff --git a/lib/ethdev/rte_ethdev.h b/lib/ethdev/rte_ethdev.h
index 0d8e2d0236..62c72de0e5 100644
--- a/lib/ethdev/rte_ethdev.h
+++ b/lib/ethdev/rte_ethdev.h
@@ -1696,6 +1696,8 @@ struct rte_eth_conf {
 #define RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP         RTE_BIT64(3)
 /** Device supports keeping shared flow objects across restart. */
 #define RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP RTE_BIT64(4)
+/** Device supports VMDq. */
+#define RTE_ETH_DEV_CAPA_VMDQ RTE_BIT64(5)
 /**@}*/
 
 /*
-- 
2.53.0


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH 3/4] ethdev: hide VMDq internal sizes
  2026-04-03  9:18 [PATCH 0/4] Remove limitations coming from legacy VMDq David Marchand
  2026-04-03  9:18 ` [PATCH 1/4] ethdev: skip VMDq pools unless configured David Marchand
  2026-04-03  9:18 ` [PATCH 2/4] ethdev: announce VMDq capability David Marchand
@ 2026-04-03  9:18 ` David Marchand
  2026-04-03  9:18 ` [PATCH 4/4] net/iavf: accept up to 32k unicast MAC addresses David Marchand
  2026-04-05 18:47 ` [PATCH 0/4] Remove limitations coming from legacy VMDq Stephen Hemminger
  4 siblings, 0 replies; 7+ messages in thread
From: David Marchand @ 2026-04-03  9:18 UTC (permalink / raw)
  To: dev; +Cc: rjarry, cfontain, Thomas Monjalon, Andrew Rybchenko

Hide RTE_ETH_NUM_RECEIVE_MAC_ADDR and RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY
in the driver API as those (ambiguous) macros are only a driver concern.

In practice, this is only used by the bnxt and ixgbe (+ clones) drivers.

Signed-off-by: David Marchand <david.marchand@redhat.com>
---
 lib/ethdev/ethdev_driver.h | 8 +++++++-
 lib/ethdev/rte_ethdev.h    | 6 ------
 2 files changed, 7 insertions(+), 7 deletions(-)

diff --git a/lib/ethdev/ethdev_driver.h b/lib/ethdev/ethdev_driver.h
index 1255cd6f2c..a4e9cf5b90 100644
--- a/lib/ethdev/ethdev_driver.h
+++ b/lib/ethdev/ethdev_driver.h
@@ -119,6 +119,12 @@ struct __rte_cache_aligned rte_eth_dev {
 struct rte_eth_dev_sriov;
 struct rte_eth_dev_owner;
 
+/* Definitions used for receive MAC address */
+#define RTE_ETH_NUM_RECEIVE_MAC_ADDR   128 /**< Maximum nb. of receive mac addr. */
+
+/* Definitions used for unicast hash */
+#define RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY 128 /**< Maximum nb. of UC hash array. */
+
 /**
  * @internal
  * The data part, with no function pointers, associated with each Ethernet
@@ -153,7 +159,7 @@ struct __rte_cache_aligned rte_eth_dev_data {
 	 * The first entry (index zero) is the default address.
 	 */
 	struct rte_ether_addr *mac_addrs;
-	/** Bitmap associating MAC addresses to pools */
+	/** Bitmap associating MAC addresses to VMDq pools */
 	uint64_t mac_pool_sel[RTE_ETH_NUM_RECEIVE_MAC_ADDR];
 	/**
 	 * Device Ethernet MAC addresses of hash filtering.
diff --git a/lib/ethdev/rte_ethdev.h b/lib/ethdev/rte_ethdev.h
index 62c72de0e5..6c1984e679 100644
--- a/lib/ethdev/rte_ethdev.h
+++ b/lib/ethdev/rte_ethdev.h
@@ -903,12 +903,6 @@ rte_eth_rss_hf_refine(uint64_t rss_hf)
 #define RTE_ETH_VLAN_ID_MAX          0x0FFF /**< VLAN ID is in lower 12 bits*/
 /**@}*/
 
-/* Definitions used for receive MAC address */
-#define RTE_ETH_NUM_RECEIVE_MAC_ADDR   128 /**< Maximum nb. of receive mac addr. */
-
-/* Definitions used for unicast hash */
-#define RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY 128 /**< Maximum nb. of UC hash array. */
-
 /**@{@name VMDq Rx mode
  * @see rte_eth_vmdq_rx_conf.rx_mode
  */
-- 
2.53.0


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH 4/4] net/iavf: accept up to 32k unicast MAC addresses
  2026-04-03  9:18 [PATCH 0/4] Remove limitations coming from legacy VMDq David Marchand
                   ` (2 preceding siblings ...)
  2026-04-03  9:18 ` [PATCH 3/4] ethdev: hide VMDq internal sizes David Marchand
@ 2026-04-03  9:18 ` David Marchand
  2026-04-05 18:47 ` [PATCH 0/4] Remove limitations coming from legacy VMDq Stephen Hemminger
  4 siblings, 0 replies; 7+ messages in thread
From: David Marchand @ 2026-04-03  9:18 UTC (permalink / raw)
  To: dev; +Cc: rjarry, cfontain, Vladimir Medvedkin

E810 hardware provides 32k switch lookups.
Thanks to this, it is possible to allow a lot more secondary mac
addresses than what is possible today.

In practice, the maximum number of macs available per port may be lower
and depends on usage by other VFs on the same PF.
There is no way to figure out this limit but to try adding a mac address
and get an error from the PF driver.

Signed-off-by: David Marchand <david.marchand@redhat.com>
---
 drivers/net/intel/iavf/iavf.h        |  5 +++--
 drivers/net/intel/iavf/iavf_ethdev.c | 10 +++++-----
 drivers/net/intel/iavf/iavf_vchnl.c  |  6 +++---
 3 files changed, 11 insertions(+), 10 deletions(-)

diff --git a/drivers/net/intel/iavf/iavf.h b/drivers/net/intel/iavf/iavf.h
index 403c61e2e8..f1dede0694 100644
--- a/drivers/net/intel/iavf/iavf.h
+++ b/drivers/net/intel/iavf/iavf.h
@@ -31,7 +31,8 @@
 #define IAVF_IRQ_MAP_NUM_PER_BUF	 128
 #define IAVF_RXTX_QUEUE_CHUNKS_NUM	 2
 
-#define IAVF_NUM_MACADDR_MAX      64
+#define IAVF_UC_MACADDR_MAX      32768
+#define IAVF_MC_MACADDR_MAX      64
 
 #define IAVF_DEV_WATCHDOG_PERIOD     2000 /* microseconds, set 0 to disable*/
 
@@ -253,7 +254,7 @@ struct iavf_info {
 	uint32_t link_speed;
 
 	/* Multicast addrs */
-	struct rte_ether_addr mc_addrs[IAVF_NUM_MACADDR_MAX];
+	struct rte_ether_addr mc_addrs[IAVF_MC_MACADDR_MAX];
 	uint16_t mc_addrs_num;   /* Multicast mac addresses number */
 
 	struct iavf_vsi vsi;
diff --git a/drivers/net/intel/iavf/iavf_ethdev.c b/drivers/net/intel/iavf/iavf_ethdev.c
index 1eca20bc9a..c69a012d50 100644
--- a/drivers/net/intel/iavf/iavf_ethdev.c
+++ b/drivers/net/intel/iavf/iavf_ethdev.c
@@ -379,10 +379,10 @@ iavf_set_mc_addr_list(struct rte_eth_dev *dev,
 		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	int err, ret;
 
-	if (mc_addrs_num > IAVF_NUM_MACADDR_MAX) {
+	if (mc_addrs_num > IAVF_MC_MACADDR_MAX) {
 		PMD_DRV_LOG(ERR,
 			    "can't add more than a limited number (%u) of addresses.",
-			    (uint32_t)IAVF_NUM_MACADDR_MAX);
+			    (uint32_t)IAVF_MC_MACADDR_MAX);
 		return -EINVAL;
 	}
 
@@ -1120,7 +1120,7 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->hash_key_size = vf->vf_res->rss_key_size;
 	dev_info->reta_size = vf->vf_res->rss_lut_size;
 	dev_info->flow_type_rss_offloads = IAVF_RSS_OFFLOAD_ALL;
-	dev_info->max_mac_addrs = IAVF_NUM_MACADDR_MAX;
+	dev_info->max_mac_addrs = IAVF_UC_MACADDR_MAX;
 	dev_info->dev_capa =
 		RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
 		RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
@@ -2822,11 +2822,11 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
 
 	/* copy mac addr */
 	eth_dev->data->mac_addrs = rte_zmalloc(
-		"iavf_mac", RTE_ETHER_ADDR_LEN * IAVF_NUM_MACADDR_MAX, 0);
+		"iavf_mac", RTE_ETHER_ADDR_LEN * IAVF_UC_MACADDR_MAX, 0);
 	if (!eth_dev->data->mac_addrs) {
 		PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to"
 			     " store MAC addresses",
-			     RTE_ETHER_ADDR_LEN * IAVF_NUM_MACADDR_MAX);
+			     RTE_ETHER_ADDR_LEN * IAVF_UC_MACADDR_MAX);
 		ret = -ENOMEM;
 		goto init_vf_err;
 	}
diff --git a/drivers/net/intel/iavf/iavf_vchnl.c b/drivers/net/intel/iavf/iavf_vchnl.c
index 08dd6f2d7f..c70f2fbbc0 100644
--- a/drivers/net/intel/iavf/iavf_vchnl.c
+++ b/drivers/net/intel/iavf/iavf_vchnl.c
@@ -1442,7 +1442,7 @@ iavf_add_del_all_mac_addr(struct iavf_adapter *adapter, bool add)
 {
 	struct {
 		struct virtchnl_ether_addr_list list;
-		struct virtchnl_ether_addr addr[IAVF_NUM_MACADDR_MAX];
+		struct virtchnl_ether_addr addr[IAVF_UC_MACADDR_MAX];
 	} list_req = {0};
 	struct virtchnl_ether_addr_list *list = &list_req.list;
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
@@ -1450,7 +1450,7 @@ iavf_add_del_all_mac_addr(struct iavf_adapter *adapter, bool add)
 	int err, i;
 	size_t buf_len;
 
-	for (i = 0; i < IAVF_NUM_MACADDR_MAX; i++) {
+	for (i = 0; i < IAVF_UC_MACADDR_MAX; i++) {
 		struct rte_ether_addr *addr = &adapter->dev_data->mac_addrs[i];
 		struct virtchnl_ether_addr *vc_addr = &list->list[list->num_elements];
 
@@ -2060,7 +2060,7 @@ iavf_add_del_mc_addr_list(struct iavf_adapter *adapter,
 {
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
 	uint8_t cmd_buffer[sizeof(struct virtchnl_ether_addr_list) +
-		(IAVF_NUM_MACADDR_MAX * sizeof(struct virtchnl_ether_addr))];
+		(IAVF_MC_MACADDR_MAX * sizeof(struct virtchnl_ether_addr))];
 	struct virtchnl_ether_addr_list *list;
 	struct iavf_cmd_info args;
 	uint32_t i;
-- 
2.53.0


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH 0/4] Remove limitations coming from legacy VMDq
  2026-04-03  9:18 [PATCH 0/4] Remove limitations coming from legacy VMDq David Marchand
                   ` (3 preceding siblings ...)
  2026-04-03  9:18 ` [PATCH 4/4] net/iavf: accept up to 32k unicast MAC addresses David Marchand
@ 2026-04-05 18:47 ` Stephen Hemminger
  4 siblings, 0 replies; 7+ messages in thread
From: Stephen Hemminger @ 2026-04-05 18:47 UTC (permalink / raw)
  To: David Marchand; +Cc: dev, rjarry, cfontain

On Fri,  3 Apr 2026 11:18:31 +0200
David Marchand <david.marchand@redhat.com> wrote:

> Since the commit 88ac4396ad29 ("ethdev: add VMDq support"),
> VMDq has been imposing a maximum number of mac addresses in the
> mac_addr_add/del API.
> 
> Nowadays, new Intel drivers do not support the feature and few other
> drivers implement this feature.
> 
> This series proposes to flag drivers that support the feature, and
> remove the limit of number of mac addresses for others.
> 
> Next step could be to remove the VMDq pool notion from the generic API.
> However I have some concern about this, as changing the quite stable
> mac_addr_add/del API now seems a lot of noise for not much benefit.
> 
> 

Make sense. The AI review found a couple of things.
Had to poke at it to make a good description

Subject: Re: [PATCH 1/4] ethdev: skip VMDq pools unless configured

Patches 1/4 and 3/4 look good to me.

Patch 2/4 has two issues:

1) In bnxt_reps.c, the new line:

  dev_info->dev_capa = RTE_ETH_DEV_CAPA_VMDQ;

overwrites any default capabilities that were previously set before
the driver callback. The other drivers in this patch had the same
pre-existing pattern (plain assignment before &= ~FLOW_RULE_KEEP),
so for them it's no worse. But bnxt_reps.c previously only did the
&= ~ clear, so this is a new regression. Should be |= instead of =.

2) Several drivers that receive RTE_ETH_DEV_CAPA_VMDQ don't actually
support VMDq: e1000/em, bnxt representors, and i40e VF representors
have no max_vmdq_pools or VMDq configuration. Marking them as
VMDq-capable seems incorrect and would allow users to attempt VMDq
configuration on devices that can't handle it.

Patch 4/4 has a stack overflow:

iavf_add_del_all_mac_addr() allocates list_req on the stack with
addr[IAVF_UC_MACADDR_MAX]. At 32768 entries of ~8 bytes each,
that's roughly 256 KiB on the stack. This will blow the stack
on most configurations. Needs to be heap-allocated.

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH 2/4] ethdev: announce VMDq capability
  2026-04-03  9:18 ` [PATCH 2/4] ethdev: announce VMDq capability David Marchand
@ 2026-04-06 22:22   ` Kishore Padmanabha
  0 siblings, 0 replies; 7+ messages in thread
From: Kishore Padmanabha @ 2026-04-06 22:22 UTC (permalink / raw)
  To: David Marchand
  Cc: dev, rjarry, cfontain, Ajit Khaparde, Bruce Richardson, Rosen Xu,
	Anatoly Burakov, Vladimir Medvedkin, Jiawen Wu, Zaiyu Wang,
	Thomas Monjalon, Andrew Rybchenko


[-- Attachment #1.1: Type: text/plain, Size: 11777 bytes --]

On Fri, Apr 3, 2026 at 5:19 AM David Marchand <david.marchand@redhat.com>
wrote:

> Let's mark VMDq feature availability as a per device capability.
> We can then enforce API calls related to this feature are done on device
> with such capability.
>
> Signed-off-by: David Marchand <david.marchand@redhat.com>
> ---
>  drivers/net/bnxt/bnxt_ethdev.c                |  3 ++-
>  drivers/net/bnxt/bnxt_reps.c                  |  1 +
>  drivers/net/intel/e1000/em_ethdev.c           |  1 +
>  drivers/net/intel/e1000/igb_ethdev.c          |  1 +
>  drivers/net/intel/fm10k/fm10k_ethdev.c        |  1 +
>  drivers/net/intel/i40e/i40e_ethdev.c          |  3 ++-
>  drivers/net/intel/i40e/i40e_vf_representor.c  |  1 +
>  drivers/net/intel/ipn3ke/ipn3ke_representor.c |  3 ++-
>  drivers/net/intel/ixgbe/ixgbe_ethdev.c        |  2 ++
>  drivers/net/txgbe/txgbe_ethdev.c              |  1 +
>  drivers/net/txgbe/txgbe_ethdev_vf.c           |  1 +
>  lib/ethdev/rte_ethdev.c                       | 17 +++++++++++++++++
>  lib/ethdev/rte_ethdev.h                       |  2 ++
>  13 files changed, 34 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/net/bnxt/bnxt_ethdev.c
> b/drivers/net/bnxt/bnxt_ethdev.c
> index b677f9491d..0f783b9e98 100644
> --- a/drivers/net/bnxt/bnxt_ethdev.c
> +++ b/drivers/net/bnxt/bnxt_ethdev.c
> @@ -1214,7 +1214,8 @@ static int bnxt_dev_info_get_op(struct rte_eth_dev
> *eth_dev,
>
>         dev_info->speed_capa = bnxt_get_speed_capabilities(bp);
>         dev_info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
> -                            RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
> +                            RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP |
> +                            RTE_ETH_DEV_CAPA_VMDQ;
>
We have not been testing the VMDq feature for sometime, planning to
deprecate this feature. Please remove this change.

>         dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
>
>         dev_info->default_rxconf = (struct rte_eth_rxconf) {
> diff --git a/drivers/net/bnxt/bnxt_reps.c b/drivers/net/bnxt/bnxt_reps.c
> index e26a086f41..5e834830e2 100644
> --- a/drivers/net/bnxt/bnxt_reps.c
> +++ b/drivers/net/bnxt/bnxt_reps.c
> @@ -649,6 +649,7 @@ int bnxt_rep_dev_info_get_op(struct rte_eth_dev
> *eth_dev,
>         dev_info->max_tx_queues = max_rx_rings;
>         dev_info->reta_size = bnxt_rss_hash_tbl_size(parent_bp);
>         dev_info->hash_key_size = 40;
> +       dev_info->dev_capa = RTE_ETH_DEV_CAPA_VMDQ;
>         dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
>
> We have not been testing the VMDq feature for sometime, planning to
deprecate this feature. Please remove this change.

>         /* MTU specifics */
> diff --git a/drivers/net/intel/e1000/em_ethdev.c
> b/drivers/net/intel/e1000/em_ethdev.c
> index 9e15e882b9..389744ad5e 100644
> --- a/drivers/net/intel/e1000/em_ethdev.c
> +++ b/drivers/net/intel/e1000/em_ethdev.c
> @@ -1175,6 +1175,7 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct
> rte_eth_dev_info *dev_info)
>                         RTE_ETH_LINK_SPEED_100M_HD |
> RTE_ETH_LINK_SPEED_100M |
>                         RTE_ETH_LINK_SPEED_1G;
>
> +       dev_info->dev_capa = RTE_ETH_DEV_CAPA_VMDQ;
>         dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
>
>         /* Preferred queue parameters */
> diff --git a/drivers/net/intel/e1000/igb_ethdev.c
> b/drivers/net/intel/e1000/igb_ethdev.c
> index ef1599ac38..fe68c18417 100644
> --- a/drivers/net/intel/e1000/igb_ethdev.c
> +++ b/drivers/net/intel/e1000/igb_ethdev.c
> @@ -2324,6 +2324,7 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct
> rte_eth_dev_info *dev_info)
>         dev_info->tx_queue_offload_capa =
> igb_get_tx_queue_offloads_capa(dev);
>         dev_info->tx_offload_capa = igb_get_tx_port_offloads_capa(dev) |
>                                     dev_info->tx_queue_offload_capa;
> +       dev_info->dev_capa = RTE_ETH_DEV_CAPA_VMDQ;
>         dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
>
>         switch (hw->mac.type) {
> diff --git a/drivers/net/intel/fm10k/fm10k_ethdev.c
> b/drivers/net/intel/fm10k/fm10k_ethdev.c
> index 97f61afec2..037d2206fd 100644
> --- a/drivers/net/intel/fm10k/fm10k_ethdev.c
> +++ b/drivers/net/intel/fm10k/fm10k_ethdev.c
> @@ -1444,6 +1444,7 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev,
>         dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G |
> RTE_ETH_LINK_SPEED_2_5G |
>                         RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G |
>                         RTE_ETH_LINK_SPEED_40G | RTE_ETH_LINK_SPEED_100G;
> +       dev_info->dev_capa = RTE_ETH_DEV_CAPA_VMDQ;
>
>         return 0;
>  }
> diff --git a/drivers/net/intel/i40e/i40e_ethdev.c
> b/drivers/net/intel/i40e/i40e_ethdev.c
> index 100a751225..64c29c6e85 100644
> --- a/drivers/net/intel/i40e/i40e_ethdev.c
> +++ b/drivers/net/intel/i40e/i40e_ethdev.c
> @@ -3878,7 +3878,8 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct
> rte_eth_dev_info *dev_info)
>
>         dev_info->dev_capa =
>                 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
> -               RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
> +               RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP |
> +               RTE_ETH_DEV_CAPA_VMDQ;
>         dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
>
>         dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
> diff --git a/drivers/net/intel/i40e/i40e_vf_representor.c
> b/drivers/net/intel/i40e/i40e_vf_representor.c
> index e8f0bb62a0..d31148acb5 100644
> --- a/drivers/net/intel/i40e/i40e_vf_representor.c
> +++ b/drivers/net/intel/i40e/i40e_vf_representor.c
> @@ -33,6 +33,7 @@ i40e_vf_representor_dev_infos_get(struct rte_eth_dev
> *ethdev,
>         /* get dev info for the vdev */
>         dev_info->device = ethdev->device;
>
> +       dev_info->dev_capa = RTE_ETH_DEV_CAPA_VMDQ;
>         dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
>
>         dev_info->max_rx_queues = ethdev->data->nb_rx_queues;
> diff --git a/drivers/net/intel/ipn3ke/ipn3ke_representor.c
> b/drivers/net/intel/ipn3ke/ipn3ke_representor.c
> index cd34d08055..d581ee3c37 100644
> --- a/drivers/net/intel/ipn3ke/ipn3ke_representor.c
> +++ b/drivers/net/intel/ipn3ke/ipn3ke_representor.c
> @@ -95,7 +95,8 @@ ipn3ke_rpst_dev_infos_get(struct rte_eth_dev *ethdev,
>
>         dev_info->dev_capa =
>                 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
> -               RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
> +               RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP |
> +               RTE_ETH_DEV_CAPA_VMDQ;
>         dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
>
>         dev_info->switch_info.name = ethdev->device->name;
> diff --git a/drivers/net/intel/ixgbe/ixgbe_ethdev.c
> b/drivers/net/intel/ixgbe/ixgbe_ethdev.c
> index 57d929cf2c..5d886b3e28 100644
> --- a/drivers/net/intel/ixgbe/ixgbe_ethdev.c
> +++ b/drivers/net/intel/ixgbe/ixgbe_ethdev.c
> @@ -3997,6 +3997,7 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct
> rte_eth_dev_info *dev_info)
>         dev_info->max_mtu =  dev_info->max_rx_pktlen - IXGBE_ETH_OVERHEAD;
>         dev_info->min_mtu = RTE_ETHER_MIN_MTU;
>         dev_info->vmdq_queue_num = dev_info->max_rx_queues;
> +       dev_info->dev_capa = RTE_ETH_DEV_CAPA_VMDQ;
>         dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
>         dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
>                                      dev_info->rx_queue_offload_capa);
> @@ -4115,6 +4116,7 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
>                 dev_info->max_vmdq_pools = RTE_ETH_16_POOLS;
>         else
>                 dev_info->max_vmdq_pools = RTE_ETH_64_POOLS;
> +       dev_info->dev_capa = RTE_ETH_DEV_CAPA_VMDQ;
>         dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
>         dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
>                                      dev_info->rx_queue_offload_capa);
> diff --git a/drivers/net/txgbe/txgbe_ethdev.c
> b/drivers/net/txgbe/txgbe_ethdev.c
> index 5d360f8305..bd818e8269 100644
> --- a/drivers/net/txgbe/txgbe_ethdev.c
> +++ b/drivers/net/txgbe/txgbe_ethdev.c
> @@ -2836,6 +2836,7 @@ txgbe_dev_info_get(struct rte_eth_dev *dev, struct
> rte_eth_dev_info *dev_info)
>         dev_info->max_vfs = pci_dev->max_vfs;
>         dev_info->max_vmdq_pools = RTE_ETH_64_POOLS;
>         dev_info->vmdq_queue_num = dev_info->max_rx_queues;
> +       dev_info->dev_capa = RTE_ETH_DEV_CAPA_VMDQ;
>         dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
>         dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
>         dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
> diff --git a/drivers/net/txgbe/txgbe_ethdev_vf.c
> b/drivers/net/txgbe/txgbe_ethdev_vf.c
> index 39a5fff65c..934763574c 100644
> --- a/drivers/net/txgbe/txgbe_ethdev_vf.c
> +++ b/drivers/net/txgbe/txgbe_ethdev_vf.c
> @@ -572,6 +572,7 @@ txgbevf_dev_info_get(struct rte_eth_dev *dev,
>         dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
>         dev_info->max_vfs = pci_dev->max_vfs;
>         dev_info->max_vmdq_pools = RTE_ETH_64_POOLS;
> +       dev_info->dev_capa = RTE_ETH_DEV_CAPA_VMDQ;
>         dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
>         dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
>         dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
> diff --git a/lib/ethdev/rte_ethdev.c b/lib/ethdev/rte_ethdev.c
> index 9577b7d848..7ba539e796 100644
> --- a/lib/ethdev/rte_ethdev.c
> +++ b/lib/ethdev/rte_ethdev.c
> @@ -158,6 +158,7 @@ static const struct {
>         {RTE_ETH_DEV_CAPA_RXQ_SHARE, "RXQ_SHARE"},
>         {RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP, "FLOW_RULE_KEEP"},
>         {RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP,
> "FLOW_SHARED_OBJECT_KEEP"},
> +       {RTE_ETH_DEV_CAPA_VMDQ, "VMDQ"},
>  };
>
>  enum {
> @@ -1581,6 +1582,22 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t
> nb_rx_q, uint16_t nb_tx_q,
>                 goto rollback;
>         }
>
> +       if (!(dev_info.dev_capa & RTE_ETH_DEV_CAPA_VMDQ)) {
> +               if ((dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG)
> != 0) {
> +                       RTE_ETHDEV_LOG_LINE(ERR, "Ethdev port_id=%u does
> not support VMDq rx mode",
> +                               port_id);
> +                       ret = -EINVAL;
> +                       goto rollback;
> +               }
> +               if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB ||
> +                               dev_conf->txmode.mq_mode ==
> RTE_ETH_MQ_TX_VMDQ_ONLY) {
> +                       RTE_ETHDEV_LOG_LINE(ERR, "Ethdev port_id=%u does
> not support VMDq tx mode",
> +                               port_id);
> +                       ret = -EINVAL;
> +                       goto rollback;
> +               }
> +       }
> +
>         /*
>          * Setup new number of Rx/Tx queues and reconfigure device.
>          */
> diff --git a/lib/ethdev/rte_ethdev.h b/lib/ethdev/rte_ethdev.h
> index 0d8e2d0236..62c72de0e5 100644
> --- a/lib/ethdev/rte_ethdev.h
> +++ b/lib/ethdev/rte_ethdev.h
> @@ -1696,6 +1696,8 @@ struct rte_eth_conf {
>  #define RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP         RTE_BIT64(3)
>  /** Device supports keeping shared flow objects across restart. */
>  #define RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP RTE_BIT64(4)
> +/** Device supports VMDq. */
> +#define RTE_ETH_DEV_CAPA_VMDQ RTE_BIT64(5)
>  /**@}*/
>
>  /*
> --
> 2.53.0
>
>

[-- Attachment #1.2: Type: text/html, Size: 13987 bytes --]

[-- Attachment #2: S/MIME Cryptographic Signature --]
[-- Type: application/pkcs7-signature, Size: 5493 bytes --]

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2026-04-06 22:22 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-04-03  9:18 [PATCH 0/4] Remove limitations coming from legacy VMDq David Marchand
2026-04-03  9:18 ` [PATCH 1/4] ethdev: skip VMDq pools unless configured David Marchand
2026-04-03  9:18 ` [PATCH 2/4] ethdev: announce VMDq capability David Marchand
2026-04-06 22:22   ` Kishore Padmanabha
2026-04-03  9:18 ` [PATCH 3/4] ethdev: hide VMDq internal sizes David Marchand
2026-04-03  9:18 ` [PATCH 4/4] net/iavf: accept up to 32k unicast MAC addresses David Marchand
2026-04-05 18:47 ` [PATCH 0/4] Remove limitations coming from legacy VMDq Stephen Hemminger

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox