From: Saeed Mahameed <saeed@kernel.org>
To: "David S. Miller" <davem@davemloft.net>,
Jakub Kicinski <kuba@kernel.org>, Paolo Abeni <pabeni@redhat.com>,
Eric Dumazet <edumazet@google.com>
Cc: Saeed Mahameed <saeedm@nvidia.com>,
netdev@vger.kernel.org, Tariq Toukan <tariqt@nvidia.com>,
Vlad Buslov <vladbu@nvidia.com>, Oz Shlomo <ozsh@nvidia.com>,
Paul Blakey <paulb@nvidia.com>
Subject: [net-next 5/9] net/mlx5e: Implement CT entry update
Date: Wed, 15 Feb 2023 16:09:14 -0800 [thread overview]
Message-ID: <20230216000918.235103-6-saeed@kernel.org> (raw)
In-Reply-To: <20230216000918.235103-1-saeed@kernel.org>
From: Vlad Buslov <vladbu@nvidia.com>
With support for UDP NEW offload the flow_table may now send updates for
existing flows. Support properly replacing existing entries by updating
flow restore_cookie and replacing the rule with new one with the same match
but new mod_hdr action that sets updated ctinfo.
Signed-off-by: Vlad Buslov <vladbu@nvidia.com>
Reviewed-by: Oz Shlomo <ozsh@nvidia.com>
Reviewed-by: Paul Blakey <paulb@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
---
.../ethernet/mellanox/mlx5/core/en/tc_ct.c | 118 +++++++++++++++++-
1 file changed, 117 insertions(+), 1 deletion(-)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index 193562c14c44..76e86f83b6ac 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -871,6 +871,68 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
return err;
}
+static int
+mlx5_tc_ct_entry_replace_rule(struct mlx5_tc_ct_priv *ct_priv,
+ struct flow_rule *flow_rule,
+ struct mlx5_ct_entry *entry,
+ bool nat, u8 zone_restore_id)
+{
+ struct mlx5_ct_zone_rule *zone_rule = &entry->zone_rules[nat];
+ struct mlx5_flow_attr *attr = zone_rule->attr, *old_attr;
+ struct mlx5e_mod_hdr_handle *mh;
+ struct mlx5_ct_fs_rule *rule;
+ struct mlx5_flow_spec *spec;
+ int err;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ old_attr = mlx5_alloc_flow_attr(ct_priv->ns_type);
+ if (!attr) {
+ err = -ENOMEM;
+ goto err_attr;
+ }
+ *old_attr = *attr;
+
+ err = mlx5_tc_ct_entry_create_mod_hdr(ct_priv, attr, flow_rule, &mh, zone_restore_id,
+ nat, mlx5_tc_ct_entry_has_nat(entry));
+ if (err) {
+ ct_dbg("Failed to create ct entry mod hdr");
+ goto err_mod_hdr;
+ }
+
+ mlx5_tc_ct_set_tuple_match(ct_priv, spec, flow_rule);
+ mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG, entry->tuple.zone, MLX5_CT_ZONE_MASK);
+
+ rule = ct_priv->fs_ops->ct_rule_add(ct_priv->fs, spec, attr, flow_rule);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ ct_dbg("Failed to add replacement ct entry rule, nat: %d", nat);
+ goto err_rule;
+ }
+
+ ct_priv->fs_ops->ct_rule_del(ct_priv->fs, zone_rule->rule);
+ zone_rule->rule = rule;
+ mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, old_attr, zone_rule->mh);
+ zone_rule->mh = mh;
+
+ kfree(old_attr);
+ kvfree(spec);
+ ct_dbg("Replaced ct entry rule in zone %d", entry->tuple.zone);
+
+ return 0;
+
+err_rule:
+ mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, zone_rule->attr, mh);
+ mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
+err_mod_hdr:
+ kfree(old_attr);
+err_attr:
+ kvfree(spec);
+ return err;
+}
+
static bool
mlx5_tc_ct_entry_valid(struct mlx5_ct_entry *entry)
{
@@ -1065,6 +1127,52 @@ mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv,
return err;
}
+static int
+mlx5_tc_ct_entry_replace_rules(struct mlx5_tc_ct_priv *ct_priv,
+ struct flow_rule *flow_rule,
+ struct mlx5_ct_entry *entry,
+ u8 zone_restore_id)
+{
+ int err;
+
+ err = mlx5_tc_ct_entry_replace_rule(ct_priv, flow_rule, entry, false,
+ zone_restore_id);
+ if (err)
+ return err;
+
+ err = mlx5_tc_ct_entry_replace_rule(ct_priv, flow_rule, entry, true,
+ zone_restore_id);
+ if (err)
+ mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
+ return err;
+}
+
+static int
+mlx5_tc_ct_block_flow_offload_replace(struct mlx5_ct_ft *ft, struct flow_rule *flow_rule,
+ struct mlx5_ct_entry *entry, unsigned long cookie)
+{
+ struct mlx5_tc_ct_priv *ct_priv = ft->ct_priv;
+ int err;
+
+ err = mlx5_tc_ct_entry_replace_rules(ct_priv, flow_rule, entry, ft->zone_restore_id);
+ if (!err)
+ return 0;
+
+ /* If failed to update the entry, then look it up again under ht_lock
+ * protection and properly delete it.
+ */
+ spin_lock_bh(&ct_priv->ht_lock);
+ entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie, cts_ht_params);
+ if (entry) {
+ rhashtable_remove_fast(&ft->ct_entries_ht, &entry->node, cts_ht_params);
+ spin_unlock_bh(&ct_priv->ht_lock);
+ mlx5_tc_ct_entry_put(entry);
+ } else {
+ spin_unlock_bh(&ct_priv->ht_lock);
+ }
+ return err;
+}
+
static int
mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
struct flow_cls_offload *flow)
@@ -1087,9 +1195,17 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
spin_lock_bh(&ct_priv->ht_lock);
entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie, cts_ht_params);
if (entry && refcount_inc_not_zero(&entry->refcnt)) {
+ if (entry->restore_cookie == meta_action->ct_metadata.cookie) {
+ spin_unlock_bh(&ct_priv->ht_lock);
+ mlx5_tc_ct_entry_put(entry);
+ return -EEXIST;
+ }
+ entry->restore_cookie = meta_action->ct_metadata.cookie;
spin_unlock_bh(&ct_priv->ht_lock);
+
+ err = mlx5_tc_ct_block_flow_offload_replace(ft, flow_rule, entry, cookie);
mlx5_tc_ct_entry_put(entry);
- return -EEXIST;
+ return err;
}
spin_unlock_bh(&ct_priv->ht_lock);
--
2.39.1
next prev parent reply other threads:[~2023-02-16 0:09 UTC|newest]
Thread overview: 21+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-02-16 0:09 [pull request][net-next 0/9] mlx5 updates 2023-02-15 Saeed Mahameed
2023-02-16 0:09 ` [net-next 1/9] net/mlx5e: Switch to using napi_build_skb() Saeed Mahameed
2023-02-16 14:55 ` Maciej Fijalkowski
2023-02-16 17:26 ` Alexander Lobakin
2023-02-16 17:53 ` Jakub Kicinski
2023-02-16 17:59 ` Alexander Lobakin
2023-02-16 0:09 ` [net-next 2/9] net/mlx5e: Remove redundant page argument in mlx5e_xmit_xdp_buff() Saeed Mahameed
2023-02-16 15:04 ` Maciej Fijalkowski
2023-02-16 0:09 ` [net-next 3/9] net/mlx5e: Remove redundant page argument in mlx5e_xdp_handle() Saeed Mahameed
2023-02-16 15:13 ` Maciej Fijalkowski
2023-02-16 0:09 ` [net-next 4/9] net/mlx5: Simplify eq list traversal Saeed Mahameed
2023-02-16 15:17 ` Maciej Fijalkowski
2023-02-16 0:09 ` Saeed Mahameed [this message]
2023-02-16 15:51 ` [net-next 5/9] net/mlx5e: Implement CT entry update Maciej Fijalkowski
2023-02-16 17:15 ` Vlad Buslov
2023-02-17 11:35 ` Maciej Fijalkowski
2023-02-16 0:09 ` [net-next 6/9] net/mlx5e: Allow offloading of ct 'new' match Saeed Mahameed
2023-02-16 0:09 ` [net-next 7/9] net/mlx5e: Remove unused function mlx5e_sq_xmit_simple Saeed Mahameed
2023-02-16 0:09 ` [net-next 8/9] net/mlx5e: Fix outdated TLS comment Saeed Mahameed
2023-02-16 0:09 ` [net-next 9/9] net/mlx5e: RX, Remove doubtful unlikely call Saeed Mahameed
2023-02-16 16:05 ` Maciej Fijalkowski
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230216000918.235103-6-saeed@kernel.org \
--to=saeed@kernel.org \
--cc=davem@davemloft.net \
--cc=edumazet@google.com \
--cc=kuba@kernel.org \
--cc=netdev@vger.kernel.org \
--cc=ozsh@nvidia.com \
--cc=pabeni@redhat.com \
--cc=paulb@nvidia.com \
--cc=saeedm@nvidia.com \
--cc=tariqt@nvidia.com \
--cc=vladbu@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).