From: Maxime Peim <maxime.peim@gmail.com>
To: dev@dpdk.org
Cc: dsosnowski@nvidia.com, viacheslavo@nvidia.com, bingz@nvidia.com,
orika@nvidia.com, suanmingm@nvidia.com, matan@nvidia.com
Subject: [PATCH v2] net/mlx5: prepend implicit items in sync flow creation path
Date: Mon, 20 Apr 2026 10:52:36 +0200 [thread overview]
Message-ID: <20260420085236.2356342-1-maxime.peim@gmail.com> (raw)
In-Reply-To: <20260409185634.3996187-1-maxime.peim@gmail.com>
In eSwitch mode, the async (template) flow creation path automatically
prepends implicit match items to scope flow rules to the correct
representor port:
- Ingress: REPRESENTED_PORT item matching dev->data->port_id
- Egress: REG_C_0 TAG item matching the port's tx tag value
The sync path (flow_hw_list_create) was missing this logic, causing all
flow rules created via the non-template API to match traffic from all
ports rather than being scoped to the specific representor.
Add the same implicit item prepending to flow_hw_list_create, right
after pattern validation and before any branching (sample/RSS/single/
prefix), mirroring the behavior of flow_hw_pattern_template_create
and flow_hw_get_rule_items. The ingress case prepends
REPRESENTED_PORT with the current port_id; the egress case prepends
MLX5_RTE_FLOW_ITEM_TYPE_TAG with REG_C_0 value/mask (skipped when
user provides an explicit SQ item).
Also fix a pre-existing bug where 'return split' on metadata split
failure returned a negative int cast to uintptr_t, which callers
would treat as a valid flow handle instead of an error.
Signed-off-by: Maxime Peim <maxime.peim@gmail.com>
---
drivers/net/mlx5/mlx5_flow_hw.c | 76 ++++++++++++++++++++++++++++++---
1 file changed, 71 insertions(+), 5 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index bca5b2769e..21cadcc5bd 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -14275,6 +14275,7 @@ static uintptr_t flow_hw_list_create(struct rte_eth_dev *dev,
uint64_t item_flags = 0;
uint64_t action_flags = mlx5_flow_hw_action_flags_get(actions, &qrss, &mark,
&encap_idx, &actions_n, error);
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_hw_split_resource resource = {
.suffix = {
.attr = attr,
@@ -14282,6 +14283,28 @@ static uintptr_t flow_hw_list_create(struct rte_eth_dev *dev,
.actions = actions,
},
};
+ struct rte_flow_item *prepend_items = NULL;
+ struct rte_flow_item_ethdev port_spec = {.port_id = dev->data->port_id};
+ struct rte_flow_item port = {
+ .type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
+ .spec = &port_spec,
+ .mask = &rte_flow_item_ethdev_mask,
+ };
+ struct mlx5_rte_flow_item_tag tag_v = {
+ .id = REG_C_0,
+ .data = flow_hw_tx_tag_regc_value(dev),
+ };
+ struct mlx5_rte_flow_item_tag tag_m = {
+ .id = REG_C_0,
+ .data = flow_hw_tx_tag_regc_mask(dev),
+ };
+ struct rte_flow_item tag = {
+ .type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_TAG,
+ .spec = &tag_v,
+ .mask = &tag_m,
+ .last = NULL,
+ };
+ uint32_t nb_items;
struct rte_flow_error shadow_error = {0, };
const struct rte_flow_pattern_template_attr pattern_template_attr = {
.relaxed_matching = 0,
@@ -14296,13 +14319,48 @@ static uintptr_t flow_hw_list_create(struct rte_eth_dev *dev,
if (ret < 0)
return 0;
+ nb_items = ret;
+
+ /*
+ * In eSwitch mode, the async (template) path automatically prepends
+ * implicit items to scope flow rules to the correct representor port:
+ * - Ingress: REPRESENTED_PORT item matching dev->data->port_id
+ * - Egress: REG_C_0 TAG item matching the port's tx tag value
+ * Mirror this behavior in the sync path so rules are not shared
+ * across all eSwitch ports.
+ */
+ if (priv->sh->config.dv_esw_en &&
+ attr->ingress && !attr->egress && !attr->transfer) {
+ prepend_items = flow_hw_prepend_item(items, nb_items,
+ &port, error);
+ if (!prepend_items)
+ return 0;
+ items = prepend_items;
+ } else if (priv->sh->config.dv_esw_en &&
+ !attr->ingress && attr->egress && !attr->transfer) {
+ if (item_flags & MLX5_FLOW_ITEM_SQ) {
+ DRV_LOG(DEBUG,
+ "Port %u omitting implicit REG_C_0 match for egress "
+ "pattern template",
+ dev->data->port_id);
+ goto setup_pattern;
+ }
+ prepend_items = flow_hw_prepend_item(items, nb_items,
+ &tag, error);
+ if (!prepend_items)
+ return 0;
+ items = prepend_items;
+ }
+setup_pattern:
RTE_SET_USED(encap_idx);
if (!error)
error = &shadow_error;
split = mlx5_flow_nta_split_metadata(dev, attr, actions, qrss, action_flags,
actions_n, external, &resource, error);
- if (split < 0)
- return split;
+ if (split < 0) {
+ mlx5_free(prepend_items);
+ return 0;
+ }
/* Update the metadata copy table - MLX5_FLOW_MREG_CP_TABLE_GROUP */
if (((attr->ingress && attr->group != MLX5_FLOW_MREG_CP_TABLE_GROUP) ||
@@ -14315,8 +14373,10 @@ static uintptr_t flow_hw_list_create(struct rte_eth_dev *dev,
if (action_flags & MLX5_FLOW_ACTION_SAMPLE) {
flow = mlx5_nta_sample_flow_list_create(dev, type, attr, items, actions,
item_flags, action_flags, error);
- if (flow != NULL)
+ if (flow != NULL) {
+ mlx5_free(prepend_items);
return (uintptr_t)flow;
+ }
goto free;
}
if (action_flags & MLX5_FLOW_ACTION_RSS) {
@@ -14328,8 +14388,10 @@ static uintptr_t flow_hw_list_create(struct rte_eth_dev *dev,
if (flow) {
flow->nt2hws->rix_mreg_copy = cpy_idx;
cpy_idx = 0;
- if (!split)
+ if (!split) {
+ mlx5_free(prepend_items);
return (uintptr_t)flow;
+ }
goto prefix_flow;
}
goto free;
@@ -14343,8 +14405,10 @@ static uintptr_t flow_hw_list_create(struct rte_eth_dev *dev,
if (flow) {
flow->nt2hws->rix_mreg_copy = cpy_idx;
cpy_idx = 0;
- if (!split)
+ if (!split) {
+ mlx5_free(prepend_items);
return (uintptr_t)flow;
+ }
/* Fall Through to prefix flow creation. */
}
prefix_flow:
@@ -14357,6 +14421,7 @@ static uintptr_t flow_hw_list_create(struct rte_eth_dev *dev,
flow->nt2hws->chaned_flow = 1;
SLIST_INSERT_AFTER(prfx_flow, flow, nt2hws->next);
mlx5_flow_nta_split_resource_free(dev, &resource);
+ mlx5_free(prepend_items);
return (uintptr_t)prfx_flow;
}
free:
@@ -14368,6 +14433,7 @@ static uintptr_t flow_hw_list_create(struct rte_eth_dev *dev,
mlx5_flow_nta_del_copy_action(dev, cpy_idx);
if (split > 0)
mlx5_flow_nta_split_resource_free(dev, &resource);
+ mlx5_free(prepend_items);
return 0;
}
--
2.43.0
next prev parent reply other threads:[~2026-04-24 7:06 UTC|newest]
Thread overview: 6+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-04-09 18:56 [PATCH] net/mlx5: prepend implicit items in sync flow creation path Maxime Peim
2026-04-20 8:52 ` Maxime Peim [this message]
2026-04-24 9:37 ` [PATCH v2] " Dariusz Sosnowski
2026-04-27 12:32 ` [PATCH v3] " Maxime Peim
2026-05-08 12:40 ` Dariusz Sosnowski
2026-05-11 15:08 ` [PATCH v4] " Maxime Peim
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260420085236.2356342-1-maxime.peim@gmail.com \
--to=maxime.peim@gmail.com \
--cc=bingz@nvidia.com \
--cc=dev@dpdk.org \
--cc=dsosnowski@nvidia.com \
--cc=matan@nvidia.com \
--cc=orika@nvidia.com \
--cc=suanmingm@nvidia.com \
--cc=viacheslavo@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.