* [PATCH] net/mlx5: prepend implicit items in sync flow creation path
@ 2026-04-09 18:56 Maxime Peim
2026-04-20 8:52 ` [PATCH v2] " Maxime Peim
0 siblings, 1 reply; 6+ messages in thread
From: Maxime Peim @ 2026-04-09 18:56 UTC (permalink / raw)
To: dev; +Cc: dsosnowski, viacheslavo, bingz, orika, suanmingm, matan
In eSwitch mode, the async (template) flow creation path automatically
prepends implicit match items to scope flow rules to the correct
representor port:
- Ingress: REPRESENTED_PORT item matching dev->data->port_id
- Egress: REG_C_0 TAG item matching the port's tx tag value
The sync path (flow_hw_list_create) was missing this logic, causing all
flow rules created via the non-template API to match traffic from all
ports rather than being scoped to the specific representor.
Add the same implicit item prepending to flow_hw_list_create, right
after pattern validation and before any branching (sample/RSS/single/
prefix), mirroring the behavior of flow_hw_pattern_template_create
and flow_hw_get_rule_items. The ingress case prepends
REPRESENTED_PORT with the current port_id; the egress case prepends
MLX5_RTE_FLOW_ITEM_TYPE_TAG with REG_C_0 value/mask (skipped when
user provides an explicit SQ item).
Also fix a pre-existing bug where 'return split' on metadata split
failure returned a negative int cast to uintptr_t, which callers
would treat as a valid flow handle instead of an error.
Signed-off-by: Maxime Peim <maxime.peim@gmail.com>
---
drivers/net/mlx5/mlx5_flow_hw.c | 78 ++++++++++++++++++++++++++++++---
1 file changed, 73 insertions(+), 5 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index bca5b2769e..d05cd2075c 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -14275,6 +14275,7 @@ static uintptr_t flow_hw_list_create(struct rte_eth_dev *dev,
uint64_t item_flags = 0;
uint64_t action_flags = mlx5_flow_hw_action_flags_get(actions, &qrss, &mark,
&encap_idx, &actions_n, error);
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_hw_split_resource resource = {
.suffix = {
.attr = attr,
@@ -14282,6 +14283,28 @@ static uintptr_t flow_hw_list_create(struct rte_eth_dev *dev,
.actions = actions,
},
};
+ struct rte_flow_item *prepend_items = NULL;
+ struct rte_flow_item_ethdev port_spec = {.port_id = dev->data->port_id};
+ struct rte_flow_item port = {
+ .type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
+ .spec = &port_spec,
+ .mask = &rte_flow_item_ethdev_mask,
+ };
+ struct mlx5_rte_flow_item_tag tag_v = {
+ .id = REG_C_0,
+ .data = flow_hw_tx_tag_regc_value(dev),
+ };
+ struct mlx5_rte_flow_item_tag tag_m = {
+ .id = REG_C_0,
+ .data = flow_hw_tx_tag_regc_mask(dev),
+ };
+ struct rte_flow_item tag = {
+ .type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_TAG,
+ .spec = &tag_v,
+ .mask = &tag_m,
+ .last = NULL,
+ };
+ uint32_t nb_items;
struct rte_flow_error shadow_error = {0, };
const struct rte_flow_pattern_template_attr pattern_template_attr = {
.relaxed_matching = 0,
@@ -14296,13 +14319,50 @@ static uintptr_t flow_hw_list_create(struct rte_eth_dev *dev,
if (ret < 0)
return 0;
+ nb_items = ret;
+
+ /*
+ * In eSwitch mode, the async (template) path automatically prepends
+ * implicit items to scope flow rules to the correct representor port:
+ * - Ingress: REPRESENTED_PORT item matching dev->data->port_id
+ * - Egress: REG_C_0 TAG item matching the port's tx tag value
+ * Mirror this behavior in the sync path so rules are not shared
+ * across all eSwitch ports.
+ */
+ if (priv->sh->config.dv_esw_en &&
+ attr->ingress && !attr->egress && !attr->transfer) {
+ prepend_items = flow_hw_prepend_item(items, nb_items,
+ &port, error);
+ if (!prepend_items)
+ return 0;
+ items = prepend_items;
+ resource.suffix.items = items;
+ } else if (priv->sh->config.dv_esw_en &&
+ !attr->ingress && attr->egress && !attr->transfer) {
+ if (item_flags & MLX5_FLOW_ITEM_SQ) {
+ DRV_LOG(DEBUG,
+ "Port %u omitting implicit REG_C_0 match for egress "
+ "pattern template",
+ dev->data->port_id);
+ goto setup_pattern;
+ }
+ prepend_items = flow_hw_prepend_item(items, nb_items,
+ &tag, error);
+ if (!prepend_items)
+ return 0;
+ items = prepend_items;
+ resource.suffix.items = items;
+ }
+setup_pattern:
RTE_SET_USED(encap_idx);
if (!error)
error = &shadow_error;
split = mlx5_flow_nta_split_metadata(dev, attr, actions, qrss, action_flags,
actions_n, external, &resource, error);
- if (split < 0)
- return split;
+ if (split < 0) {
+ mlx5_free(prepend_items);
+ return 0;
+ }
/* Update the metadata copy table - MLX5_FLOW_MREG_CP_TABLE_GROUP */
if (((attr->ingress && attr->group != MLX5_FLOW_MREG_CP_TABLE_GROUP) ||
@@ -14315,8 +14375,10 @@ static uintptr_t flow_hw_list_create(struct rte_eth_dev *dev,
if (action_flags & MLX5_FLOW_ACTION_SAMPLE) {
flow = mlx5_nta_sample_flow_list_create(dev, type, attr, items, actions,
item_flags, action_flags, error);
- if (flow != NULL)
+ if (flow != NULL) {
+ mlx5_free(prepend_items);
return (uintptr_t)flow;
+ }
goto free;
}
if (action_flags & MLX5_FLOW_ACTION_RSS) {
@@ -14328,8 +14390,10 @@ static uintptr_t flow_hw_list_create(struct rte_eth_dev *dev,
if (flow) {
flow->nt2hws->rix_mreg_copy = cpy_idx;
cpy_idx = 0;
- if (!split)
+ if (!split) {
+ mlx5_free(prepend_items);
return (uintptr_t)flow;
+ }
goto prefix_flow;
}
goto free;
@@ -14343,8 +14407,10 @@ static uintptr_t flow_hw_list_create(struct rte_eth_dev *dev,
if (flow) {
flow->nt2hws->rix_mreg_copy = cpy_idx;
cpy_idx = 0;
- if (!split)
+ if (!split) {
+ mlx5_free(prepend_items);
return (uintptr_t)flow;
+ }
/* Fall Through to prefix flow creation. */
}
prefix_flow:
@@ -14357,6 +14423,7 @@ static uintptr_t flow_hw_list_create(struct rte_eth_dev *dev,
flow->nt2hws->chaned_flow = 1;
SLIST_INSERT_AFTER(prfx_flow, flow, nt2hws->next);
mlx5_flow_nta_split_resource_free(dev, &resource);
+ mlx5_free(prepend_items);
return (uintptr_t)prfx_flow;
}
free:
@@ -14368,6 +14435,7 @@ static uintptr_t flow_hw_list_create(struct rte_eth_dev *dev,
mlx5_flow_nta_del_copy_action(dev, cpy_idx);
if (split > 0)
mlx5_flow_nta_split_resource_free(dev, &resource);
+ mlx5_free(prepend_items);
return 0;
}
--
2.43.0
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [PATCH v2] net/mlx5: prepend implicit items in sync flow creation path
2026-04-09 18:56 [PATCH] net/mlx5: prepend implicit items in sync flow creation path Maxime Peim
@ 2026-04-20 8:52 ` Maxime Peim
2026-04-24 9:37 ` Dariusz Sosnowski
2026-04-27 12:32 ` [PATCH v3] " Maxime Peim
0 siblings, 2 replies; 6+ messages in thread
From: Maxime Peim @ 2026-04-20 8:52 UTC (permalink / raw)
To: dev; +Cc: dsosnowski, viacheslavo, bingz, orika, suanmingm, matan
In eSwitch mode, the async (template) flow creation path automatically
prepends implicit match items to scope flow rules to the correct
representor port:
- Ingress: REPRESENTED_PORT item matching dev->data->port_id
- Egress: REG_C_0 TAG item matching the port's tx tag value
The sync path (flow_hw_list_create) was missing this logic, causing all
flow rules created via the non-template API to match traffic from all
ports rather than being scoped to the specific representor.
Add the same implicit item prepending to flow_hw_list_create, right
after pattern validation and before any branching (sample/RSS/single/
prefix), mirroring the behavior of flow_hw_pattern_template_create
and flow_hw_get_rule_items. The ingress case prepends
REPRESENTED_PORT with the current port_id; the egress case prepends
MLX5_RTE_FLOW_ITEM_TYPE_TAG with REG_C_0 value/mask (skipped when
user provides an explicit SQ item).
Also fix a pre-existing bug where 'return split' on metadata split
failure returned a negative int cast to uintptr_t, which callers
would treat as a valid flow handle instead of an error.
Signed-off-by: Maxime Peim <maxime.peim@gmail.com>
---
drivers/net/mlx5/mlx5_flow_hw.c | 76 ++++++++++++++++++++++++++++++---
1 file changed, 71 insertions(+), 5 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index bca5b2769e..21cadcc5bd 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -14275,6 +14275,7 @@ static uintptr_t flow_hw_list_create(struct rte_eth_dev *dev,
uint64_t item_flags = 0;
uint64_t action_flags = mlx5_flow_hw_action_flags_get(actions, &qrss, &mark,
&encap_idx, &actions_n, error);
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_hw_split_resource resource = {
.suffix = {
.attr = attr,
@@ -14282,6 +14283,28 @@ static uintptr_t flow_hw_list_create(struct rte_eth_dev *dev,
.actions = actions,
},
};
+ struct rte_flow_item *prepend_items = NULL;
+ struct rte_flow_item_ethdev port_spec = {.port_id = dev->data->port_id};
+ struct rte_flow_item port = {
+ .type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
+ .spec = &port_spec,
+ .mask = &rte_flow_item_ethdev_mask,
+ };
+ struct mlx5_rte_flow_item_tag tag_v = {
+ .id = REG_C_0,
+ .data = flow_hw_tx_tag_regc_value(dev),
+ };
+ struct mlx5_rte_flow_item_tag tag_m = {
+ .id = REG_C_0,
+ .data = flow_hw_tx_tag_regc_mask(dev),
+ };
+ struct rte_flow_item tag = {
+ .type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_TAG,
+ .spec = &tag_v,
+ .mask = &tag_m,
+ .last = NULL,
+ };
+ uint32_t nb_items;
struct rte_flow_error shadow_error = {0, };
const struct rte_flow_pattern_template_attr pattern_template_attr = {
.relaxed_matching = 0,
@@ -14296,13 +14319,48 @@ static uintptr_t flow_hw_list_create(struct rte_eth_dev *dev,
if (ret < 0)
return 0;
+ nb_items = ret;
+
+ /*
+ * In eSwitch mode, the async (template) path automatically prepends
+ * implicit items to scope flow rules to the correct representor port:
+ * - Ingress: REPRESENTED_PORT item matching dev->data->port_id
+ * - Egress: REG_C_0 TAG item matching the port's tx tag value
+ * Mirror this behavior in the sync path so rules are not shared
+ * across all eSwitch ports.
+ */
+ if (priv->sh->config.dv_esw_en &&
+ attr->ingress && !attr->egress && !attr->transfer) {
+ prepend_items = flow_hw_prepend_item(items, nb_items,
+ &port, error);
+ if (!prepend_items)
+ return 0;
+ items = prepend_items;
+ } else if (priv->sh->config.dv_esw_en &&
+ !attr->ingress && attr->egress && !attr->transfer) {
+ if (item_flags & MLX5_FLOW_ITEM_SQ) {
+ DRV_LOG(DEBUG,
+ "Port %u omitting implicit REG_C_0 match for egress "
+ "pattern template",
+ dev->data->port_id);
+ goto setup_pattern;
+ }
+ prepend_items = flow_hw_prepend_item(items, nb_items,
+ &tag, error);
+ if (!prepend_items)
+ return 0;
+ items = prepend_items;
+ }
+setup_pattern:
RTE_SET_USED(encap_idx);
if (!error)
error = &shadow_error;
split = mlx5_flow_nta_split_metadata(dev, attr, actions, qrss, action_flags,
actions_n, external, &resource, error);
- if (split < 0)
- return split;
+ if (split < 0) {
+ mlx5_free(prepend_items);
+ return 0;
+ }
/* Update the metadata copy table - MLX5_FLOW_MREG_CP_TABLE_GROUP */
if (((attr->ingress && attr->group != MLX5_FLOW_MREG_CP_TABLE_GROUP) ||
@@ -14315,8 +14373,10 @@ static uintptr_t flow_hw_list_create(struct rte_eth_dev *dev,
if (action_flags & MLX5_FLOW_ACTION_SAMPLE) {
flow = mlx5_nta_sample_flow_list_create(dev, type, attr, items, actions,
item_flags, action_flags, error);
- if (flow != NULL)
+ if (flow != NULL) {
+ mlx5_free(prepend_items);
return (uintptr_t)flow;
+ }
goto free;
}
if (action_flags & MLX5_FLOW_ACTION_RSS) {
@@ -14328,8 +14388,10 @@ static uintptr_t flow_hw_list_create(struct rte_eth_dev *dev,
if (flow) {
flow->nt2hws->rix_mreg_copy = cpy_idx;
cpy_idx = 0;
- if (!split)
+ if (!split) {
+ mlx5_free(prepend_items);
return (uintptr_t)flow;
+ }
goto prefix_flow;
}
goto free;
@@ -14343,8 +14405,10 @@ static uintptr_t flow_hw_list_create(struct rte_eth_dev *dev,
if (flow) {
flow->nt2hws->rix_mreg_copy = cpy_idx;
cpy_idx = 0;
- if (!split)
+ if (!split) {
+ mlx5_free(prepend_items);
return (uintptr_t)flow;
+ }
/* Fall Through to prefix flow creation. */
}
prefix_flow:
@@ -14357,6 +14421,7 @@ static uintptr_t flow_hw_list_create(struct rte_eth_dev *dev,
flow->nt2hws->chaned_flow = 1;
SLIST_INSERT_AFTER(prfx_flow, flow, nt2hws->next);
mlx5_flow_nta_split_resource_free(dev, &resource);
+ mlx5_free(prepend_items);
return (uintptr_t)prfx_flow;
}
free:
@@ -14368,6 +14433,7 @@ static uintptr_t flow_hw_list_create(struct rte_eth_dev *dev,
mlx5_flow_nta_del_copy_action(dev, cpy_idx);
if (split > 0)
mlx5_flow_nta_split_resource_free(dev, &resource);
+ mlx5_free(prepend_items);
return 0;
}
--
2.43.0
^ permalink raw reply related [flat|nested] 6+ messages in thread
* Re: [PATCH v2] net/mlx5: prepend implicit items in sync flow creation path
2026-04-20 8:52 ` [PATCH v2] " Maxime Peim
@ 2026-04-24 9:37 ` Dariusz Sosnowski
2026-04-27 12:32 ` [PATCH v3] " Maxime Peim
1 sibling, 0 replies; 6+ messages in thread
From: Dariusz Sosnowski @ 2026-04-24 9:37 UTC (permalink / raw)
To: Maxime Peim; +Cc: dev, viacheslavo, bingz, orika, suanmingm, matan
Thank you for the contribution.
Please see comments below.
On Mon, Apr 20, 2026 at 10:52:36AM +0200, Maxime Peim wrote:
> In eSwitch mode, the async (template) flow creation path automatically
> prepends implicit match items to scope flow rules to the correct
> representor port:
> - Ingress: REPRESENTED_PORT item matching dev->data->port_id
> - Egress: REG_C_0 TAG item matching the port's tx tag value
>
> The sync path (flow_hw_list_create) was missing this logic, causing all
> flow rules created via the non-template API to match traffic from all
> ports rather than being scoped to the specific representor.
>
> Add the same implicit item prepending to flow_hw_list_create, right
> after pattern validation and before any branching (sample/RSS/single/
> prefix), mirroring the behavior of flow_hw_pattern_template_create
> and flow_hw_get_rule_items. The ingress case prepends
> REPRESENTED_PORT with the current port_id; the egress case prepends
> MLX5_RTE_FLOW_ITEM_TYPE_TAG with REG_C_0 value/mask (skipped when
> user provides an explicit SQ item).
>
> Also fix a pre-existing bug where 'return split' on metadata split
> failure returned a negative int cast to uintptr_t, which callers
> would treat as a valid flow handle instead of an error.
Since this is a fix, this patch should be backported to LTS releases.
Please add the following "Fixes:" tags to the commit message,
which would help LTS maintainers to pick this patch up.
Fixes: e38776c36c8a ("net/mlx5: introduce HWS for non-template flow API")
Fixes: 821a6a5cc495 ("net/mlx5: add metadata split for compatibility")
>
> Signed-off-by: Maxime Peim <maxime.peim@gmail.com>
> ---
> drivers/net/mlx5/mlx5_flow_hw.c | 76 ++++++++++++++++++++++++++++++---
> 1 file changed, 71 insertions(+), 5 deletions(-)
>
> diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
> index bca5b2769e..21cadcc5bd 100644
> --- a/drivers/net/mlx5/mlx5_flow_hw.c
> +++ b/drivers/net/mlx5/mlx5_flow_hw.c
> @@ -14275,6 +14275,7 @@ static uintptr_t flow_hw_list_create(struct rte_eth_dev *dev,
> uint64_t item_flags = 0;
> uint64_t action_flags = mlx5_flow_hw_action_flags_get(actions, &qrss, &mark,
> &encap_idx, &actions_n, error);
> + struct mlx5_priv *priv = dev->data->dev_private;
> struct mlx5_flow_hw_split_resource resource = {
> .suffix = {
> .attr = attr,
> @@ -14282,6 +14283,28 @@ static uintptr_t flow_hw_list_create(struct rte_eth_dev *dev,
> .actions = actions,
> },
> };
> + struct rte_flow_item *prepend_items = NULL;
> + struct rte_flow_item_ethdev port_spec = {.port_id = dev->data->port_id};
> + struct rte_flow_item port = {
> + .type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
> + .spec = &port_spec,
> + .mask = &rte_flow_item_ethdev_mask,
> + };
> + struct mlx5_rte_flow_item_tag tag_v = {
> + .id = REG_C_0,
> + .data = flow_hw_tx_tag_regc_value(dev),
> + };
> + struct mlx5_rte_flow_item_tag tag_m = {
> + .id = REG_C_0,
> + .data = flow_hw_tx_tag_regc_mask(dev),
> + };
Please use rte_flow_item_tag struct here,
instead of mlx5_rte_flow_item_tag
(same as in flow_hw_pattern_template_create()).
Underlying HWS code expects the item spec and mask to be
rte_flow_item_tag struct for MLX5_RTE_FLOW_ITEM_TYPE_TAG item type.
(See mlx5dr_definer_conv_item_tag()).
This misalignment should be fixed at some point and it is in our
backlog, but for now rte_flow_item_tag should be used.
> + struct rte_flow_item tag = {
> + .type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_TAG,
> + .spec = &tag_v,
> + .mask = &tag_m,
> + .last = NULL,
> + };
> + uint32_t nb_items;
> struct rte_flow_error shadow_error = {0, };
> const struct rte_flow_pattern_template_attr pattern_template_attr = {
> .relaxed_matching = 0,
> @@ -14296,13 +14319,48 @@ static uintptr_t flow_hw_list_create(struct rte_eth_dev *dev,
> if (ret < 0)
> return 0;
>
> + nb_items = ret;
> +
> + /*
> + * In eSwitch mode, the async (template) path automatically prepends
> + * implicit items to scope flow rules to the correct representor port:
> + * - Ingress: REPRESENTED_PORT item matching dev->data->port_id
> + * - Egress: REG_C_0 TAG item matching the port's tx tag value
> + * Mirror this behavior in the sync path so rules are not shared
> + * across all eSwitch ports.
> + */
> + if (priv->sh->config.dv_esw_en &&
> + attr->ingress && !attr->egress && !attr->transfer) {
> + prepend_items = flow_hw_prepend_item(items, nb_items,
> + &port, error);
> + if (!prepend_items)
> + return 0;
> + items = prepend_items;
> + } else if (priv->sh->config.dv_esw_en &&
> + !attr->ingress && attr->egress && !attr->transfer) {
> + if (item_flags & MLX5_FLOW_ITEM_SQ) {
> + DRV_LOG(DEBUG,
> + "Port %u omitting implicit REG_C_0 match for egress "
> + "pattern template",
> + dev->data->port_id);
> + goto setup_pattern;
> + }
> + prepend_items = flow_hw_prepend_item(items, nb_items,
> + &tag, error);
> + if (!prepend_items)
> + return 0;
> + items = prepend_items;
> + }
Could you please introduce a helper function which could be used
both here and in flow_hw_pattern_template_create()?
I'm thinking about a function with this signature:
static struct rte_flow_item *
flow_hw_adjust_pattern(struct rte_eth_dev *dev,
const struct rte_flow_item *items,
const uint32_t nb_items,
struct rte_flow_item **copied_items)
This function would:
- Define port and tag items locally, in a single place.
- Check prepend conditions.
- If prepending was needed:
- Pointer to allocated items will be stored in *copied_items.
- *copied_items will be returned.
- Otherwise, original items will be returned.
> +setup_pattern:
> RTE_SET_USED(encap_idx);
> if (!error)
> error = &shadow_error;
> split = mlx5_flow_nta_split_metadata(dev, attr, actions, qrss, action_flags,
> actions_n, external, &resource, error);
> - if (split < 0)
> - return split;
> + if (split < 0) {
> + mlx5_free(prepend_items);
> + return 0;
> + }
>
> /* Update the metadata copy table - MLX5_FLOW_MREG_CP_TABLE_GROUP */
> if (((attr->ingress && attr->group != MLX5_FLOW_MREG_CP_TABLE_GROUP) ||
> @@ -14315,8 +14373,10 @@ static uintptr_t flow_hw_list_create(struct rte_eth_dev *dev,
> if (action_flags & MLX5_FLOW_ACTION_SAMPLE) {
> flow = mlx5_nta_sample_flow_list_create(dev, type, attr, items, actions,
> item_flags, action_flags, error);
> - if (flow != NULL)
> + if (flow != NULL) {
> + mlx5_free(prepend_items);
> return (uintptr_t)flow;
> + }
> goto free;
> }
> if (action_flags & MLX5_FLOW_ACTION_RSS) {
> @@ -14328,8 +14388,10 @@ static uintptr_t flow_hw_list_create(struct rte_eth_dev *dev,
> if (flow) {
> flow->nt2hws->rix_mreg_copy = cpy_idx;
> cpy_idx = 0;
> - if (!split)
> + if (!split) {
> + mlx5_free(prepend_items);
> return (uintptr_t)flow;
> + }
> goto prefix_flow;
> }
> goto free;
> @@ -14343,8 +14405,10 @@ static uintptr_t flow_hw_list_create(struct rte_eth_dev *dev,
> if (flow) {
> flow->nt2hws->rix_mreg_copy = cpy_idx;
> cpy_idx = 0;
> - if (!split)
> + if (!split) {
> + mlx5_free(prepend_items);
> return (uintptr_t)flow;
> + }
> /* Fall Through to prefix flow creation. */
> }
> prefix_flow:
> @@ -14357,6 +14421,7 @@ static uintptr_t flow_hw_list_create(struct rte_eth_dev *dev,
> flow->nt2hws->chaned_flow = 1;
> SLIST_INSERT_AFTER(prfx_flow, flow, nt2hws->next);
> mlx5_flow_nta_split_resource_free(dev, &resource);
> + mlx5_free(prepend_items);
> return (uintptr_t)prfx_flow;
> }
> free:
> @@ -14368,6 +14433,7 @@ static uintptr_t flow_hw_list_create(struct rte_eth_dev *dev,
> mlx5_flow_nta_del_copy_action(dev, cpy_idx);
> if (split > 0)
> mlx5_flow_nta_split_resource_free(dev, &resource);
> + mlx5_free(prepend_items);
> return 0;
> }
>
Best regards,
Dariusz Sosnowski
^ permalink raw reply [flat|nested] 6+ messages in thread
* [PATCH v3] net/mlx5: prepend implicit items in sync flow creation path
2026-04-20 8:52 ` [PATCH v2] " Maxime Peim
2026-04-24 9:37 ` Dariusz Sosnowski
@ 2026-04-27 12:32 ` Maxime Peim
2026-05-08 12:40 ` Dariusz Sosnowski
2026-05-11 15:08 ` [PATCH v4] " Maxime Peim
1 sibling, 2 replies; 6+ messages in thread
From: Maxime Peim @ 2026-04-27 12:32 UTC (permalink / raw)
To: dev; +Cc: dsosnowski, viacheslavo, bingz, orika, suanmingm, matan
In eSwitch mode, the async (template) flow creation path automatically
prepends implicit match items to scope flow rules to the correct
representor port:
- Ingress: REPRESENTED_PORT item matching dev->data->port_id
- Egress: REG_C_0 TAG item matching the port's tx tag value
The sync path (flow_hw_list_create) was missing this logic, causing all
flow rules created via the non-template API to match traffic from all
ports rather than being scoped to the specific representor.
Add the same implicit item prepending to flow_hw_list_create, right
after pattern validation and before any branching (sample/RSS/single/
prefix), mirroring the behavior of flow_hw_pattern_template_create
and flow_hw_get_rule_items. The ingress case prepends
REPRESENTED_PORT with the current port_id; the egress case prepends
MLX5_RTE_FLOW_ITEM_TYPE_TAG with REG_C_0 value/mask (skipped when
user provides an explicit SQ item).
Also fix a pre-existing bug where 'return split' on metadata split
failure returned a negative int cast to uintptr_t, which callers
would treat as a valid flow handle instead of an error.
Fixes: e38776c36c8a ("net/mlx5: introduce HWS for non-template flow API")
Fixes: 821a6a5cc495 ("net/mlx5: add metadata split for compatibility")
Signed-off-by: Maxime Peim <maxime.peim@gmail.com>
---
v3:
- Factor the implicit-item prepend logic out of
flow_hw_pattern_template_create() into a new helper
flow_hw_adjust_pattern() and reuse it from flow_hw_list_create(),
instead of duplicating the prepend logic inline in the sync path.
- Zero-initialize item_flags in both callers. The validator is
read-modify-write on item_flags (reads MLX5_FLOW_LAYER_TUNNEL on
the first iteration), so leaving it uninitialized was UB.
- Call __flow_hw_pattern_validate() with nt_flow=true from the sync
path (was effectively nt_flow=false via the wrapper), restoring the
previous behavior that skips GENEVE_OPT TLV parser validation on
the non-template path.
- Document flow_hw_adjust_pattern(): the dual role of the nt_flow
parameter (template spec-left-zero vs. sync spec-filled + validator
flag), the three-way return, and the caller's ownership of
*copied_items across every exit path.
- Clarify the "omitting implicit REG_C_0 match" debug log now that
the helper runs on both the template and sync paths.
- Add Fixes: tags for the two original commits.
drivers/net/mlx5/mlx5_flow_hw.c | 192 +++++++++++++++++++++-----------
1 file changed, 130 insertions(+), 62 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index bca5b2769e..ffd7a0076f 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -9255,33 +9255,40 @@ pattern_template_validate(struct rte_eth_dev *dev,
return -ret;
}
-/**
- * Create flow item template.
+/*
+ * Validate the user-supplied items and, in eSwitch mode, prepend the implicit
+ * scoping item so the rule/template is bound to the current representor port:
+ * - ingress -> RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT (dev->data->port_id)
+ * - egress -> MLX5_RTE_FLOW_ITEM_TYPE_TAG on REG_C_0 (tx vport tag),
+ * skipped when the user already supplied an SQ item.
*
- * @param[in] dev
- * Pointer to the rte_eth_dev structure.
- * @param[in] attr
- * Pointer to the item template attributes.
- * @param[in] items
- * The template item pattern.
- * @param[out] error
- * Pointer to error structure.
+ * @param nt_flow
+ * Selects between the two call paths that share this helper:
+ * false -> pattern template creation (async API). The prepended item's
+ * spec is left zeroed so mlx5dr matches any value; the live
+ * port_id / tx-tag value is substituted later by
+ * flow_hw_get_rule_items() at rule-create time.
+ * true -> sync (non-template) flow creation. The prepended item's spec
+ * is filled immediately with the live values, and the flag is
+ * forwarded to __flow_hw_pattern_validate() so that validation
+ * paths gated on nt_flow (e.g. GENEVE_OPT TLV parser creation)
+ * take the non-template branch.
*
- * @return
- * Item template pointer on success, NULL otherwise and rte_errno is set.
+ * Return / ownership:
+ * - NULL on validation or allocation failure (error populated).
+ * - `items` unchanged when no prepending is required; *copied_items == NULL.
+ * - A newly-allocated array otherwise; also stored in *copied_items. The
+ * caller must mlx5_free(*copied_items) on every path (it is safe to call
+ * with NULL). Do not free the returned pointer directly.
*/
-static struct rte_flow_pattern_template *
-flow_hw_pattern_template_create(struct rte_eth_dev *dev,
- const struct rte_flow_pattern_template_attr *attr,
- const struct rte_flow_item items[],
- bool external,
- struct rte_flow_error *error)
+static const struct rte_flow_item *
+flow_hw_adjust_pattern(struct rte_eth_dev *dev, const struct rte_flow_pattern_template_attr *attr,
+ bool nt_flow, const struct rte_flow_item *items, uint64_t *item_flags,
+ uint64_t *nb_items, struct rte_flow_item **copied_items,
+ struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct rte_flow_pattern_template *it;
- struct rte_flow_item *copied_items = NULL;
- const struct rte_flow_item *tmpl_items;
- uint64_t orig_item_nb, item_flags = 0;
+ struct rte_flow_item_ethdev port_spec = {.port_id = dev->data->port_id};
struct rte_flow_item port = {
.type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
.mask = &rte_flow_item_ethdev_mask,
@@ -9298,39 +9305,89 @@ flow_hw_pattern_template_create(struct rte_eth_dev *dev,
.type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_TAG,
.spec = &tag_v,
.mask = &tag_m,
- .last = NULL
+ .last = NULL,
};
- int it_items_size;
- unsigned int i = 0;
int rc;
+ if (!copied_items || !item_flags || !nb_items)
+ return NULL;
+
+ if (nt_flow) {
+ port.spec = &port_spec;
+ tag_v.data = flow_hw_tx_tag_regc_value(dev);
+ }
+
+ /*
+ * item_flags must be zero-initialized: __flow_hw_pattern_validate()
+ * OR-accumulates bits into it and reads it (MLX5_FLOW_LAYER_TUNNEL)
+ * on the first iteration.
+ */
+ *item_flags = 0;
+
/* Validate application items only */
- rc = flow_hw_pattern_validate(dev, attr, items, &item_flags, error);
+ rc = __flow_hw_pattern_validate(dev, attr, items, item_flags, nt_flow, error);
if (rc < 0)
return NULL;
- orig_item_nb = rc;
- if (priv->sh->config.dv_esw_en &&
- attr->ingress && !attr->egress && !attr->transfer) {
- copied_items = flow_hw_prepend_item(items, orig_item_nb, &port, error);
- if (!copied_items)
+ *nb_items = rc;
+
+ if (priv->sh->config.dv_esw_en && attr->ingress && !attr->egress && !attr->transfer) {
+ *copied_items = flow_hw_prepend_item(items, *nb_items, &port, error);
+ if (!*copied_items)
return NULL;
- tmpl_items = copied_items;
- } else if (priv->sh->config.dv_esw_en &&
- !attr->ingress && attr->egress && !attr->transfer) {
- if (item_flags & MLX5_FLOW_ITEM_SQ) {
- DRV_LOG(DEBUG, "Port %u omitting implicit REG_C_0 match for egress "
- "pattern template", dev->data->port_id);
- tmpl_items = items;
- goto setup_pattern_template;
+ return *copied_items;
+ } else if (priv->sh->config.dv_esw_en && !attr->ingress && attr->egress &&
+ !attr->transfer) {
+ if (*item_flags & MLX5_FLOW_ITEM_SQ) {
+ DRV_LOG(DEBUG,
+ "Port %u: explicit SQ item present, omitting implicit "
+ "REG_C_0 match for egress pattern",
+ dev->data->port_id);
+ return items;
}
- copied_items = flow_hw_prepend_item(items, orig_item_nb, &tag, error);
- if (!copied_items)
+ *copied_items = flow_hw_prepend_item(items, *nb_items, &tag, error);
+ if (!*copied_items)
return NULL;
- tmpl_items = copied_items;
- } else {
- tmpl_items = items;
+ return *copied_items;
}
-setup_pattern_template:
+ return items;
+}
+
+/**
+ * Create flow item template.
+ *
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
+ * @param[in] attr
+ * Pointer to the item template attributes.
+ * @param[in] items
+ * The template item pattern.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * Item template pointer on success, NULL otherwise and rte_errno is set.
+ */
+static struct rte_flow_pattern_template *
+flow_hw_pattern_template_create(struct rte_eth_dev *dev,
+ const struct rte_flow_pattern_template_attr *attr,
+ const struct rte_flow_item items[],
+ bool external,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct rte_flow_pattern_template *it;
+ struct rte_flow_item *copied_items = NULL;
+ const struct rte_flow_item *tmpl_items;
+ int it_items_size;
+ uint64_t orig_item_nb, item_flags;
+ unsigned int i = 0;
+ int rc;
+
+ tmpl_items = flow_hw_adjust_pattern(dev, attr, false, items, &orig_item_nb, &item_flags,
+ &copied_items, error);
+ if (!tmpl_items)
+ return NULL;
+
it = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*it), 0, SOCKET_ID_ANY);
if (!it) {
rte_flow_error_set(error, ENOMEM,
@@ -14272,7 +14329,6 @@ static uintptr_t flow_hw_list_create(struct rte_eth_dev *dev,
struct rte_flow_hw *prfx_flow = NULL;
const struct rte_flow_action *qrss = NULL;
const struct rte_flow_action *mark = NULL;
- uint64_t item_flags = 0;
uint64_t action_flags = mlx5_flow_hw_action_flags_get(actions, &qrss, &mark,
&encap_idx, &actions_n, error);
struct mlx5_flow_hw_split_resource resource = {
@@ -14289,20 +14345,25 @@ static uintptr_t flow_hw_list_create(struct rte_eth_dev *dev,
.egress = attr->egress,
.transfer = attr->transfer,
};
-
- /* Validate application items only */
- ret = __flow_hw_pattern_validate(dev, &pattern_template_attr, items,
- &item_flags, true, error);
- if (ret < 0)
- return 0;
+ struct rte_flow_item *copied_items = NULL;
+ const struct rte_flow_item *prepend_items;
+ uint64_t orig_item_nb, item_flags;
RTE_SET_USED(encap_idx);
if (!error)
error = &shadow_error;
+
+ prepend_items = flow_hw_adjust_pattern(dev, &pattern_template_attr, true, items,
+ &orig_item_nb, &item_flags, &copied_items, error);
+ if (!prepend_items)
+ return 0;
+
split = mlx5_flow_nta_split_metadata(dev, attr, actions, qrss, action_flags,
actions_n, external, &resource, error);
- if (split < 0)
- return split;
+ if (split < 0) {
+ mlx5_free(copied_items);
+ return 0;
+ }
/* Update the metadata copy table - MLX5_FLOW_MREG_CP_TABLE_GROUP */
if (((attr->ingress && attr->group != MLX5_FLOW_MREG_CP_TABLE_GROUP) ||
@@ -14313,23 +14374,26 @@ static uintptr_t flow_hw_list_create(struct rte_eth_dev *dev,
goto free;
}
if (action_flags & MLX5_FLOW_ACTION_SAMPLE) {
- flow = mlx5_nta_sample_flow_list_create(dev, type, attr, items, actions,
+ flow = mlx5_nta_sample_flow_list_create(dev, type, attr, prepend_items, actions,
item_flags, action_flags, error);
- if (flow != NULL)
+ if (flow != NULL) {
+ mlx5_free(copied_items);
return (uintptr_t)flow;
+ }
goto free;
}
if (action_flags & MLX5_FLOW_ACTION_RSS) {
const struct rte_flow_action_rss
*rss_conf = mlx5_flow_nta_locate_rss(dev, actions, error);
- flow = mlx5_flow_nta_handle_rss(dev, attr, items, actions, rss_conf,
- item_flags, action_flags, external,
- type, error);
+ flow = mlx5_flow_nta_handle_rss(dev, attr, prepend_items, actions, rss_conf,
+ item_flags, action_flags, external, type, error);
if (flow) {
flow->nt2hws->rix_mreg_copy = cpy_idx;
cpy_idx = 0;
- if (!split)
+ if (!split) {
+ mlx5_free(copied_items);
return (uintptr_t)flow;
+ }
goto prefix_flow;
}
goto free;
@@ -14343,12 +14407,14 @@ static uintptr_t flow_hw_list_create(struct rte_eth_dev *dev,
if (flow) {
flow->nt2hws->rix_mreg_copy = cpy_idx;
cpy_idx = 0;
- if (!split)
+ if (!split) {
+ mlx5_free(copied_items);
return (uintptr_t)flow;
+ }
/* Fall Through to prefix flow creation. */
}
prefix_flow:
- ret = mlx5_flow_hw_create_flow(dev, type, attr, items, resource.prefix.actions,
+ ret = mlx5_flow_hw_create_flow(dev, type, attr, prepend_items, resource.prefix.actions,
item_flags, action_flags, external, &prfx_flow, error);
if (ret)
goto free;
@@ -14357,6 +14423,7 @@ static uintptr_t flow_hw_list_create(struct rte_eth_dev *dev,
flow->nt2hws->chaned_flow = 1;
SLIST_INSERT_AFTER(prfx_flow, flow, nt2hws->next);
mlx5_flow_nta_split_resource_free(dev, &resource);
+ mlx5_free(copied_items);
return (uintptr_t)prfx_flow;
}
free:
@@ -14368,6 +14435,7 @@ static uintptr_t flow_hw_list_create(struct rte_eth_dev *dev,
mlx5_flow_nta_del_copy_action(dev, cpy_idx);
if (split > 0)
mlx5_flow_nta_split_resource_free(dev, &resource);
+ mlx5_free(copied_items);
return 0;
}
--
2.43.0
^ permalink raw reply related [flat|nested] 6+ messages in thread
* Re: [PATCH v3] net/mlx5: prepend implicit items in sync flow creation path
2026-04-27 12:32 ` [PATCH v3] " Maxime Peim
@ 2026-05-08 12:40 ` Dariusz Sosnowski
2026-05-11 15:08 ` [PATCH v4] " Maxime Peim
1 sibling, 0 replies; 6+ messages in thread
From: Dariusz Sosnowski @ 2026-05-08 12:40 UTC (permalink / raw)
To: Maxime Peim; +Cc: dev, viacheslavo, bingz, orika, suanmingm, matan
Hi,
Thank you for making the changes. Please see a comment below.
On Mon, Apr 27, 2026 at 02:32:17PM +0200, Maxime Peim wrote:
> +
> + prepend_items = flow_hw_adjust_pattern(dev, &pattern_template_attr, true, items,
> + &orig_item_nb, &item_flags, &copied_items, error);
Adjusted items should also be stored in resource.suffix.items.
In case metadata split is not needed - when mlx5_flow_nta_split_metadata() returns 0 -
only single flow rule will be created, at line 14392 (after applying your patch).
In this case, items are taken from resource.suffix.items.
resource.suffix.items holds pointer to original pattern, so adjusted
pattern is not taken into account.
This case be reproduced as follows:
# --flow-isolate-all disables default flow rules which can
# obfuscate the issue
dpdk-testpmd -a 08:00.0,dv_flow_en=2,representor=vf0-1 -- --flow-isolate-all -i
testpmd> flow create 0 group 0 priority 0 ingress pattern end actions jump group 1 / end
testpmd> flow create 0 group 1 priority 0 ingress pattern end actions queue index 0 / end
Packets sent from VF0 or VF1 will arrive at PF's queue 0.
Best regards,
Dariusz Sosnowski
^ permalink raw reply [flat|nested] 6+ messages in thread
* [PATCH v4] net/mlx5: prepend implicit items in sync flow creation path
2026-04-27 12:32 ` [PATCH v3] " Maxime Peim
2026-05-08 12:40 ` Dariusz Sosnowski
@ 2026-05-11 15:08 ` Maxime Peim
1 sibling, 0 replies; 6+ messages in thread
From: Maxime Peim @ 2026-05-11 15:08 UTC (permalink / raw)
To: dev; +Cc: dsosnowski, viacheslavo, bingz, orika, suanmingm, matan
In eSwitch mode, the async (template) flow creation path automatically
prepends implicit match items to scope flow rules to the correct
representor port:
- Ingress: REPRESENTED_PORT item matching dev->data->port_id
- Egress: REG_C_0 TAG item matching the port's tx tag value
The sync path (flow_hw_list_create) was missing this logic, causing all
flow rules created via the non-template API to match traffic from all
ports rather than being scoped to the specific representor.
Add the same implicit item prepending to flow_hw_list_create, right
after pattern validation and before any branching (sample/RSS/single/
prefix), mirroring the behavior of flow_hw_pattern_template_create
and flow_hw_get_rule_items. The ingress case prepends
REPRESENTED_PORT with the current port_id; the egress case prepends
MLX5_RTE_FLOW_ITEM_TYPE_TAG with REG_C_0 value/mask (skipped when
user provides an explicit SQ item).
Also fix a pre-existing bug where 'return split' on metadata split
failure returned a negative int cast to uintptr_t, which callers
would treat as a valid flow handle instead of an error.
Fixes: e38776c36c8a ("net/mlx5: introduce HWS for non-template flow API")
Fixes: 821a6a5cc495 ("net/mlx5: add metadata split for compatibility")
Signed-off-by: Maxime Peim <maxime.peim@gmail.com>
---
v3:
- Factor the implicit-item prepend logic out of
flow_hw_pattern_template_create() into a new helper
flow_hw_adjust_pattern() and reuse it from flow_hw_list_create(),
instead of duplicating the prepend logic inline in the sync path.
- Zero-initialize item_flags in both callers. The validator is
read-modify-write on item_flags (reads MLX5_FLOW_LAYER_TUNNEL on
the first iteration), so leaving it uninitialized was UB.
- Call __flow_hw_pattern_validate() with nt_flow=true from the sync
path (was effectively nt_flow=false via the wrapper), restoring the
previous behavior that skips GENEVE_OPT TLV parser validation on
the non-template path.
- Document flow_hw_adjust_pattern(): the dual role of the nt_flow
parameter (template spec-left-zero vs. sync spec-filled + validator
flag), the three-way return, and the caller's ownership of
*copied_items across every exit path.
- Clarify the "omitting implicit REG_C_0 match" debug log now that
the helper runs on both the template and sync paths.
- Add Fixes: tags for the two original commits.
v4:
- Fix items in case splitted metadata are not needed.
drivers/net/mlx5/mlx5_flow_hw.c | 194 ++++++++++++++++++++++----------
1 file changed, 132 insertions(+), 62 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index bca5b2769e..6b3fcb43a7 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -9255,33 +9255,40 @@ pattern_template_validate(struct rte_eth_dev *dev,
return -ret;
}
-/**
- * Create flow item template.
+/*
+ * Validate the user-supplied items and, in eSwitch mode, prepend the implicit
+ * scoping item so the rule/template is bound to the current representor port:
+ * - ingress -> RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT (dev->data->port_id)
+ * - egress -> MLX5_RTE_FLOW_ITEM_TYPE_TAG on REG_C_0 (tx vport tag),
+ * skipped when the user already supplied an SQ item.
*
- * @param[in] dev
- * Pointer to the rte_eth_dev structure.
- * @param[in] attr
- * Pointer to the item template attributes.
- * @param[in] items
- * The template item pattern.
- * @param[out] error
- * Pointer to error structure.
+ * @param nt_flow
+ * Selects between the two call paths that share this helper:
+ * false -> pattern template creation (async API). The prepended item's
+ * spec is left zeroed so mlx5dr matches any value; the live
+ * port_id / tx-tag value is substituted later by
+ * flow_hw_get_rule_items() at rule-create time.
+ * true -> sync (non-template) flow creation. The prepended item's spec
+ * is filled immediately with the live values, and the flag is
+ * forwarded to __flow_hw_pattern_validate() so that validation
+ * paths gated on nt_flow (e.g. GENEVE_OPT TLV parser creation)
+ * take the non-template branch.
*
- * @return
- * Item template pointer on success, NULL otherwise and rte_errno is set.
+ * Return / ownership:
+ * - NULL on validation or allocation failure (error populated).
+ * - `items` unchanged when no prepending is required; *copied_items == NULL.
+ * - A newly-allocated array otherwise; also stored in *copied_items. The
+ * caller must mlx5_free(*copied_items) on every path (it is safe to call
+ * with NULL). Do not free the returned pointer directly.
*/
-static struct rte_flow_pattern_template *
-flow_hw_pattern_template_create(struct rte_eth_dev *dev,
- const struct rte_flow_pattern_template_attr *attr,
- const struct rte_flow_item items[],
- bool external,
- struct rte_flow_error *error)
+static const struct rte_flow_item *
+flow_hw_adjust_pattern(struct rte_eth_dev *dev, const struct rte_flow_pattern_template_attr *attr,
+ bool nt_flow, const struct rte_flow_item *items, uint64_t *item_flags,
+ uint64_t *nb_items, struct rte_flow_item **copied_items,
+ struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct rte_flow_pattern_template *it;
- struct rte_flow_item *copied_items = NULL;
- const struct rte_flow_item *tmpl_items;
- uint64_t orig_item_nb, item_flags = 0;
+ struct rte_flow_item_ethdev port_spec = {.port_id = dev->data->port_id};
struct rte_flow_item port = {
.type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
.mask = &rte_flow_item_ethdev_mask,
@@ -9298,39 +9305,89 @@ flow_hw_pattern_template_create(struct rte_eth_dev *dev,
.type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_TAG,
.spec = &tag_v,
.mask = &tag_m,
- .last = NULL
+ .last = NULL,
};
- int it_items_size;
- unsigned int i = 0;
int rc;
+ if (!copied_items || !item_flags || !nb_items)
+ return NULL;
+
+ if (nt_flow) {
+ port.spec = &port_spec;
+ tag_v.data = flow_hw_tx_tag_regc_value(dev);
+ }
+
+ /*
+ * item_flags must be zero-initialized: __flow_hw_pattern_validate()
+ * OR-accumulates bits into it and reads it (MLX5_FLOW_LAYER_TUNNEL)
+ * on the first iteration.
+ */
+ *item_flags = 0;
+
/* Validate application items only */
- rc = flow_hw_pattern_validate(dev, attr, items, &item_flags, error);
+ rc = __flow_hw_pattern_validate(dev, attr, items, item_flags, nt_flow, error);
if (rc < 0)
return NULL;
- orig_item_nb = rc;
- if (priv->sh->config.dv_esw_en &&
- attr->ingress && !attr->egress && !attr->transfer) {
- copied_items = flow_hw_prepend_item(items, orig_item_nb, &port, error);
- if (!copied_items)
+ *nb_items = rc;
+
+ if (priv->sh->config.dv_esw_en && attr->ingress && !attr->egress && !attr->transfer) {
+ *copied_items = flow_hw_prepend_item(items, *nb_items, &port, error);
+ if (!*copied_items)
return NULL;
- tmpl_items = copied_items;
- } else if (priv->sh->config.dv_esw_en &&
- !attr->ingress && attr->egress && !attr->transfer) {
- if (item_flags & MLX5_FLOW_ITEM_SQ) {
- DRV_LOG(DEBUG, "Port %u omitting implicit REG_C_0 match for egress "
- "pattern template", dev->data->port_id);
- tmpl_items = items;
- goto setup_pattern_template;
+ return *copied_items;
+ } else if (priv->sh->config.dv_esw_en && !attr->ingress && attr->egress &&
+ !attr->transfer) {
+ if (*item_flags & MLX5_FLOW_ITEM_SQ) {
+ DRV_LOG(DEBUG,
+ "Port %u: explicit SQ item present, omitting implicit "
+ "REG_C_0 match for egress pattern",
+ dev->data->port_id);
+ return items;
}
- copied_items = flow_hw_prepend_item(items, orig_item_nb, &tag, error);
- if (!copied_items)
+ *copied_items = flow_hw_prepend_item(items, *nb_items, &tag, error);
+ if (!*copied_items)
return NULL;
- tmpl_items = copied_items;
- } else {
- tmpl_items = items;
+ return *copied_items;
}
-setup_pattern_template:
+ return items;
+}
+
+/**
+ * Create flow item template.
+ *
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
+ * @param[in] attr
+ * Pointer to the item template attributes.
+ * @param[in] items
+ * The template item pattern.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * Item template pointer on success, NULL otherwise and rte_errno is set.
+ */
+static struct rte_flow_pattern_template *
+flow_hw_pattern_template_create(struct rte_eth_dev *dev,
+ const struct rte_flow_pattern_template_attr *attr,
+ const struct rte_flow_item items[],
+ bool external,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct rte_flow_pattern_template *it;
+ struct rte_flow_item *copied_items = NULL;
+ const struct rte_flow_item *tmpl_items;
+ int it_items_size;
+ uint64_t orig_item_nb, item_flags;
+ unsigned int i = 0;
+ int rc;
+
+ tmpl_items = flow_hw_adjust_pattern(dev, attr, false, items, &orig_item_nb, &item_flags,
+ &copied_items, error);
+ if (!tmpl_items)
+ return NULL;
+
it = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*it), 0, SOCKET_ID_ANY);
if (!it) {
rte_flow_error_set(error, ENOMEM,
@@ -14272,7 +14329,6 @@ static uintptr_t flow_hw_list_create(struct rte_eth_dev *dev,
struct rte_flow_hw *prfx_flow = NULL;
const struct rte_flow_action *qrss = NULL;
const struct rte_flow_action *mark = NULL;
- uint64_t item_flags = 0;
uint64_t action_flags = mlx5_flow_hw_action_flags_get(actions, &qrss, &mark,
&encap_idx, &actions_n, error);
struct mlx5_flow_hw_split_resource resource = {
@@ -14289,20 +14345,27 @@ static uintptr_t flow_hw_list_create(struct rte_eth_dev *dev,
.egress = attr->egress,
.transfer = attr->transfer,
};
-
- /* Validate application items only */
- ret = __flow_hw_pattern_validate(dev, &pattern_template_attr, items,
- &item_flags, true, error);
- if (ret < 0)
- return 0;
+ struct rte_flow_item *copied_items = NULL;
+ const struct rte_flow_item *prepend_items;
+ uint64_t orig_item_nb, item_flags;
RTE_SET_USED(encap_idx);
if (!error)
error = &shadow_error;
+
+ prepend_items = flow_hw_adjust_pattern(dev, &pattern_template_attr, true, items,
+ &orig_item_nb, &item_flags, &copied_items, error);
+ if (!prepend_items)
+ return 0;
+
split = mlx5_flow_nta_split_metadata(dev, attr, actions, qrss, action_flags,
actions_n, external, &resource, error);
- if (split < 0)
- return split;
+ if (split < 0) {
+ mlx5_free(copied_items);
+ return 0;
+ } else if (!split) {
+ resource.suffix.items = prepend_items;
+ }
/* Update the metadata copy table - MLX5_FLOW_MREG_CP_TABLE_GROUP */
if (((attr->ingress && attr->group != MLX5_FLOW_MREG_CP_TABLE_GROUP) ||
@@ -14313,23 +14376,26 @@ static uintptr_t flow_hw_list_create(struct rte_eth_dev *dev,
goto free;
}
if (action_flags & MLX5_FLOW_ACTION_SAMPLE) {
- flow = mlx5_nta_sample_flow_list_create(dev, type, attr, items, actions,
+ flow = mlx5_nta_sample_flow_list_create(dev, type, attr, prepend_items, actions,
item_flags, action_flags, error);
- if (flow != NULL)
+ if (flow != NULL) {
+ mlx5_free(copied_items);
return (uintptr_t)flow;
+ }
goto free;
}
if (action_flags & MLX5_FLOW_ACTION_RSS) {
const struct rte_flow_action_rss
*rss_conf = mlx5_flow_nta_locate_rss(dev, actions, error);
- flow = mlx5_flow_nta_handle_rss(dev, attr, items, actions, rss_conf,
- item_flags, action_flags, external,
- type, error);
+ flow = mlx5_flow_nta_handle_rss(dev, attr, prepend_items, actions, rss_conf,
+ item_flags, action_flags, external, type, error);
if (flow) {
flow->nt2hws->rix_mreg_copy = cpy_idx;
cpy_idx = 0;
- if (!split)
+ if (!split) {
+ mlx5_free(copied_items);
return (uintptr_t)flow;
+ }
goto prefix_flow;
}
goto free;
@@ -14343,12 +14409,14 @@ static uintptr_t flow_hw_list_create(struct rte_eth_dev *dev,
if (flow) {
flow->nt2hws->rix_mreg_copy = cpy_idx;
cpy_idx = 0;
- if (!split)
+ if (!split) {
+ mlx5_free(copied_items);
return (uintptr_t)flow;
+ }
/* Fall Through to prefix flow creation. */
}
prefix_flow:
- ret = mlx5_flow_hw_create_flow(dev, type, attr, items, resource.prefix.actions,
+ ret = mlx5_flow_hw_create_flow(dev, type, attr, prepend_items, resource.prefix.actions,
item_flags, action_flags, external, &prfx_flow, error);
if (ret)
goto free;
@@ -14357,6 +14425,7 @@ static uintptr_t flow_hw_list_create(struct rte_eth_dev *dev,
flow->nt2hws->chaned_flow = 1;
SLIST_INSERT_AFTER(prfx_flow, flow, nt2hws->next);
mlx5_flow_nta_split_resource_free(dev, &resource);
+ mlx5_free(copied_items);
return (uintptr_t)prfx_flow;
}
free:
@@ -14368,6 +14437,7 @@ static uintptr_t flow_hw_list_create(struct rte_eth_dev *dev,
mlx5_flow_nta_del_copy_action(dev, cpy_idx);
if (split > 0)
mlx5_flow_nta_split_resource_free(dev, &resource);
+ mlx5_free(copied_items);
return 0;
}
--
2.43.0
^ permalink raw reply related [flat|nested] 6+ messages in thread
end of thread, other threads:[~2026-05-12 7:56 UTC | newest]
Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-04-09 18:56 [PATCH] net/mlx5: prepend implicit items in sync flow creation path Maxime Peim
2026-04-20 8:52 ` [PATCH v2] " Maxime Peim
2026-04-24 9:37 ` Dariusz Sosnowski
2026-04-27 12:32 ` [PATCH v3] " Maxime Peim
2026-05-08 12:40 ` Dariusz Sosnowski
2026-05-11 15:08 ` [PATCH v4] " Maxime Peim
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox