From: Saeed Mahameed <saeed@kernel.org>
To: "David S. Miller" <davem@davemloft.net>,
Jakub Kicinski <kuba@kernel.org>, Paolo Abeni <pabeni@redhat.com>,
Eric Dumazet <edumazet@google.com>
Cc: Saeed Mahameed <saeedm@nvidia.com>,
netdev@vger.kernel.org, Tariq Toukan <tariqt@nvidia.com>,
Moshe Shemesh <moshe@nvidia.com>
Subject: [net-next 12/15] net/mlx5: SRIOV, Remove two unused ingress flow group
Date: Sat, 3 Dec 2022 14:13:34 -0800 [thread overview]
Message-ID: <20221203221337.29267-13-saeed@kernel.org> (raw)
In-Reply-To: <20221203221337.29267-1-saeed@kernel.org>
From: Moshe Shemesh <moshe@nvidia.com>
As in SRIOV ingress ACL table we use only one rule for allowed traffic
and one drop rule, there is no need in four flow groups. Since the
groups can be created dynamically on configuration changes, the group
layout can be changed dynamically as well, instead of creating four
different groups with four different layouts statically.
Set two flow groups according to the needed flow steering rules and
remove the other unused groups. To avoid resetting the flow counter
while recreating the flow table, take the flow counter handling
separately.
Signed-off-by: Moshe Shemesh <moshe@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
---
.../mellanox/mlx5/core/esw/acl/ingress_lgcy.c | 173 +++++++-----------
.../mellanox/mlx5/core/esw/acl/lgcy.h | 4 +
.../ethernet/mellanox/mlx5/core/esw/legacy.c | 3 +
.../net/ethernet/mellanox/mlx5/core/eswitch.h | 4 +-
4 files changed, 72 insertions(+), 112 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
index b1a5199260f6..0b37edb9490d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
@@ -33,9 +33,12 @@ static int esw_acl_ingress_lgcy_groups_create(struct mlx5_eswitch *esw,
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
MLX5_MATCH_OUTER_HEADERS);
- MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
- MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
- MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
+ if (vport->info.vlan || vport->info.qos)
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
+ if (vport->info.spoofchk) {
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
+ }
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
@@ -44,47 +47,14 @@ static int esw_acl_ingress_lgcy_groups_create(struct mlx5_eswitch *esw,
err = PTR_ERR(g);
esw_warn(dev, "vport[%d] ingress create untagged spoofchk flow group, err(%d)\n",
vport->vport, err);
- goto spoof_err;
+ goto allow_err;
}
- vport->ingress.legacy.allow_untagged_spoofchk_grp = g;
+ vport->ingress.legacy.allow_grp = g;
memset(flow_group_in, 0, inlen);
- MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
- MLX5_MATCH_OUTER_HEADERS);
- MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
- g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
- if (IS_ERR(g)) {
- err = PTR_ERR(g);
- esw_warn(dev, "vport[%d] ingress create untagged flow group, err(%d)\n",
- vport->vport, err);
- goto untagged_err;
- }
- vport->ingress.legacy.allow_untagged_only_grp = g;
-
- memset(flow_group_in, 0, inlen);
- MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
- MLX5_MATCH_OUTER_HEADERS);
- MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
- MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
- MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 2);
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2);
-
- g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
- if (IS_ERR(g)) {
- err = PTR_ERR(g);
- esw_warn(dev, "vport[%d] ingress create spoofchk flow group, err(%d)\n",
- vport->vport, err);
- goto allow_spoof_err;
- }
- vport->ingress.legacy.allow_spoofchk_only_grp = g;
-
- memset(flow_group_in, 0, inlen);
- MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 3);
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3);
-
g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
if (IS_ERR(g)) {
err = PTR_ERR(g);
@@ -97,38 +67,20 @@ static int esw_acl_ingress_lgcy_groups_create(struct mlx5_eswitch *esw,
return 0;
drop_err:
- if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_spoofchk_only_grp)) {
- mlx5_destroy_flow_group(vport->ingress.legacy.allow_spoofchk_only_grp);
- vport->ingress.legacy.allow_spoofchk_only_grp = NULL;
+ if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_grp)) {
+ mlx5_destroy_flow_group(vport->ingress.legacy.allow_grp);
+ vport->ingress.legacy.allow_grp = NULL;
}
-allow_spoof_err:
- if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_untagged_only_grp)) {
- mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_only_grp);
- vport->ingress.legacy.allow_untagged_only_grp = NULL;
- }
-untagged_err:
- if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_untagged_spoofchk_grp)) {
- mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_spoofchk_grp);
- vport->ingress.legacy.allow_untagged_spoofchk_grp = NULL;
- }
-spoof_err:
+allow_err:
kvfree(flow_group_in);
return err;
}
static void esw_acl_ingress_lgcy_groups_destroy(struct mlx5_vport *vport)
{
- if (vport->ingress.legacy.allow_spoofchk_only_grp) {
- mlx5_destroy_flow_group(vport->ingress.legacy.allow_spoofchk_only_grp);
- vport->ingress.legacy.allow_spoofchk_only_grp = NULL;
- }
- if (vport->ingress.legacy.allow_untagged_only_grp) {
- mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_only_grp);
- vport->ingress.legacy.allow_untagged_only_grp = NULL;
- }
- if (vport->ingress.legacy.allow_untagged_spoofchk_grp) {
- mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_spoofchk_grp);
- vport->ingress.legacy.allow_untagged_spoofchk_grp = NULL;
+ if (vport->ingress.legacy.allow_grp) {
+ mlx5_destroy_flow_group(vport->ingress.legacy.allow_grp);
+ vport->ingress.legacy.allow_grp = NULL;
}
if (vport->ingress.legacy.drop_grp) {
mlx5_destroy_flow_group(vport->ingress.legacy.drop_grp);
@@ -143,56 +95,33 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw,
struct mlx5_flow_destination *dst = NULL;
struct mlx5_flow_act flow_act = {};
struct mlx5_flow_spec *spec = NULL;
- struct mlx5_fc *counter = NULL;
- /* The ingress acl table contains 4 groups
- * (2 active rules at the same time -
- * 1 allow rule from one of the first 3 groups.
- * 1 drop rule from the last group):
- * 1)Allow untagged traffic with smac=original mac.
- * 2)Allow untagged traffic.
- * 3)Allow traffic with smac=original mac.
- * 4)Drop all other traffic.
+ struct mlx5_fc *counter;
+ /* The ingress acl table contains 2 groups
+ * 1)Allowed traffic according to tagging and spoofcheck settings
+ * 2)Drop all other traffic.
*/
- int table_size = 4;
+ int table_size = 2;
int dest_num = 0;
int err = 0;
u8 *smac_v;
- esw_acl_ingress_lgcy_rules_destroy(vport);
-
- if (vport->ingress.legacy.drop_counter) {
- counter = vport->ingress.legacy.drop_counter;
- } else if (MLX5_CAP_ESW_INGRESS_ACL(esw->dev, flow_counter)) {
- counter = mlx5_fc_create(esw->dev, false);
- if (IS_ERR(counter)) {
- esw_warn(esw->dev,
- "vport[%d] configure ingress drop rule counter failed\n",
- vport->vport);
- counter = NULL;
- }
- vport->ingress.legacy.drop_counter = counter;
- }
-
- if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) {
- esw_acl_ingress_lgcy_cleanup(esw, vport);
+ esw_acl_ingress_lgcy_cleanup(esw, vport);
+ if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk)
return 0;
- }
- if (!vport->ingress.acl) {
- vport->ingress.acl = esw_acl_table_create(esw, vport,
- MLX5_FLOW_NAMESPACE_ESW_INGRESS,
- table_size);
- if (IS_ERR(vport->ingress.acl)) {
- err = PTR_ERR(vport->ingress.acl);
- vport->ingress.acl = NULL;
- return err;
- }
-
- err = esw_acl_ingress_lgcy_groups_create(esw, vport);
- if (err)
- goto out;
+ vport->ingress.acl = esw_acl_table_create(esw, vport,
+ MLX5_FLOW_NAMESPACE_ESW_INGRESS,
+ table_size);
+ if (IS_ERR(vport->ingress.acl)) {
+ err = PTR_ERR(vport->ingress.acl);
+ vport->ingress.acl = NULL;
+ return err;
}
+ err = esw_acl_ingress_lgcy_groups_create(esw, vport);
+ if (err)
+ goto out;
+
esw_debug(esw->dev,
"vport[%d] configure ingress rules, vlan(%d) qos(%d)\n",
vport->vport, vport->info.vlan, vport->info.qos);
@@ -235,6 +164,7 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw,
memset(&flow_act, 0, sizeof(flow_act));
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
/* Attach drop flow counter */
+ counter = vport->ingress.legacy.drop_counter;
if (counter) {
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
@@ -266,17 +196,42 @@ void esw_acl_ingress_lgcy_cleanup(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
if (IS_ERR_OR_NULL(vport->ingress.acl))
- goto clean_drop_counter;
+ return;
esw_debug(esw->dev, "Destroy vport[%d] E-Switch ingress ACL\n", vport->vport);
esw_acl_ingress_lgcy_rules_destroy(vport);
esw_acl_ingress_lgcy_groups_destroy(vport);
esw_acl_ingress_table_destroy(vport);
+}
+
+void esw_acl_ingress_lgcy_create_counter(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ struct mlx5_fc *counter;
-clean_drop_counter:
- if (vport->ingress.legacy.drop_counter) {
- mlx5_fc_destroy(esw->dev, vport->ingress.legacy.drop_counter);
- vport->ingress.legacy.drop_counter = NULL;
+ vport->ingress.legacy.drop_counter = NULL;
+
+ if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, flow_counter))
+ return;
+
+ counter = mlx5_fc_create(esw->dev, false);
+ if (IS_ERR(counter)) {
+ esw_warn(esw->dev,
+ "vport[%d] configure ingress drop rule counter failed\n",
+ vport->vport);
+ return;
}
+
+ vport->ingress.legacy.drop_counter = counter;
+}
+
+void esw_acl_ingress_lgcy_destroy_counter(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ if (!vport->ingress.legacy.drop_counter)
+ return;
+
+ mlx5_fc_destroy(esw->dev, vport->ingress.legacy.drop_counter);
+ vport->ingress.legacy.drop_counter = NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/lgcy.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/lgcy.h
index 44c152da3d83..c4a624ffca43 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/lgcy.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/lgcy.h
@@ -13,5 +13,9 @@ void esw_acl_egress_lgcy_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vp
/* Eswitch acl ingress external APIs */
int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
void esw_acl_ingress_lgcy_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
+void esw_acl_ingress_lgcy_create_counter(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport);
+void esw_acl_ingress_lgcy_destroy_counter(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport);
#endif /* __MLX5_ESWITCH_ACL_LGCY_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
index fabe49a35a5c..97a104668723 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
@@ -356,6 +356,7 @@ int esw_legacy_vport_acl_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vpor
if (mlx5_esw_is_manager_vport(esw, vport->vport))
return 0;
+ esw_acl_ingress_lgcy_create_counter(esw, vport);
ret = esw_acl_ingress_lgcy_setup(esw, vport);
if (ret)
goto ingress_err;
@@ -369,6 +370,7 @@ int esw_legacy_vport_acl_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vpor
egress_err:
esw_acl_ingress_lgcy_cleanup(esw, vport);
ingress_err:
+ esw_acl_ingress_lgcy_destroy_counter(esw, vport);
return ret;
}
@@ -379,6 +381,7 @@ void esw_legacy_vport_acl_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *v
esw_acl_egress_lgcy_cleanup(esw, vport);
esw_acl_ingress_lgcy_cleanup(esw, vport);
+ esw_acl_ingress_lgcy_destroy_counter(esw, vport);
}
int mlx5_esw_query_vport_drop_stats(struct mlx5_core_dev *dev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 42d9df417e20..b7779826e725 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -97,9 +97,7 @@ struct vport_ingress {
struct mlx5_flow_table *acl;
struct mlx5_flow_handle *allow_rule;
struct {
- struct mlx5_flow_group *allow_spoofchk_only_grp;
- struct mlx5_flow_group *allow_untagged_spoofchk_grp;
- struct mlx5_flow_group *allow_untagged_only_grp;
+ struct mlx5_flow_group *allow_grp;
struct mlx5_flow_group *drop_grp;
struct mlx5_flow_handle *drop_rule;
struct mlx5_fc *drop_counter;
--
2.38.1
next prev parent reply other threads:[~2022-12-03 22:14 UTC|newest]
Thread overview: 25+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-12-03 22:13 [pull request][net-next 00/15] mlx5 updates 2022-12-03 Saeed Mahameed
2022-12-03 22:13 ` [net-next 01/15] net/mlx5e: E-Switch, handle flow attribute with no destinations Saeed Mahameed
2022-12-03 22:13 ` [net-next 02/15] net/mlx5: fs, assert null dest pointer when dest_num is 0 Saeed Mahameed
2022-12-03 22:13 ` [net-next 03/15] net/mlx5e: TC, reuse flow attribute post parser processing Saeed Mahameed
2022-12-03 22:13 ` [net-next 04/15] net/mlx5e: TC, add terminating actions Saeed Mahameed
2022-12-03 22:13 ` [net-next 05/15] net/mlx5e: TC, validate action list per attribute Saeed Mahameed
2022-12-03 22:13 ` [net-next 06/15] net/mlx5e: TC, set control params for branching actions Saeed Mahameed
2022-12-03 22:13 ` [net-next 07/15] net/mlx5e: TC, initialize branch flow attributes Saeed Mahameed
2022-12-03 22:13 ` [net-next 08/15] net/mlx5e: TC, initialize branching action with target attr Saeed Mahameed
2022-12-03 22:13 ` [net-next 09/15] net/mlx5e: TC, rename post_meter actions Saeed Mahameed
2022-12-03 22:13 ` [net-next 10/15] net/mlx5e: TC, init post meter rules with branching attributes Saeed Mahameed
2022-12-03 22:13 ` [net-next 11/15] net/mlx5e: TC, allow meter jump control action Saeed Mahameed
2022-12-03 22:13 ` Saeed Mahameed [this message]
2022-12-03 22:13 ` [net-next 13/15] net/mlx5: SRIOV, Recreate egress ACL table on config change Saeed Mahameed
2022-12-03 22:13 ` [net-next 14/15] net/mlx5: SRIOV, Add 802.1ad VST support Saeed Mahameed
2022-12-07 4:34 ` Jakub Kicinski
2022-12-07 5:20 ` Saeed Mahameed
2022-12-07 17:25 ` Jakub Kicinski
2022-12-08 8:28 ` Saeed Mahameed
2022-12-09 1:04 ` Jakub Kicinski
2022-12-09 1:57 ` Saeed Mahameed
2022-12-09 2:04 ` Jakub Kicinski
2022-12-09 5:05 ` Saeed Mahameed
2022-12-09 16:34 ` Jakub Kicinski
2022-12-03 22:13 ` [net-next 15/15] net/mlx5: SRIOV, Allow ingress tagged packets on VST Saeed Mahameed
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20221203221337.29267-13-saeed@kernel.org \
--to=saeed@kernel.org \
--cc=davem@davemloft.net \
--cc=edumazet@google.com \
--cc=kuba@kernel.org \
--cc=moshe@nvidia.com \
--cc=netdev@vger.kernel.org \
--cc=pabeni@redhat.com \
--cc=saeedm@nvidia.com \
--cc=tariqt@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).