netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Saeed Mahameed <saeed@kernel.org>
To: "David S. Miller" <davem@davemloft.net>,
	Jakub Kicinski <kuba@kernel.org>, Paolo Abeni <pabeni@redhat.com>,
	Eric Dumazet <edumazet@google.com>
Cc: Saeed Mahameed <saeedm@nvidia.com>,
	netdev@vger.kernel.org, Tariq Toukan <tariqt@nvidia.com>,
	Lior Nahmanson <liorna@nvidia.com>, Raed Salem <raeds@nvidia.com>
Subject: [PATCH net-next V2 15/17] net/mlx5e: Add MACsec offload SecY support
Date: Mon,  5 Sep 2022 22:21:27 -0700	[thread overview]
Message-ID: <20220906052129.104507-16-saeed@kernel.org> (raw)
In-Reply-To: <20220906052129.104507-1-saeed@kernel.org>

From: Lior Nahmanson <liorna@nvidia.com>

Add offload support for MACsec SecY callbacks - add/update/delete.
add_secy is called when need to create a new MACsec interface.
upd_secy is called when source MAC address or tx SC was changed.
del_secy is called when need to destroy the MACsec interface.

Signed-off-by: Lior Nahmanson <liorna@nvidia.com>
Reviewed-by: Raed Salem <raeds@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
---
 .../mellanox/mlx5/core/en_accel/macsec.c      | 229 ++++++++++++++++++
 1 file changed, 229 insertions(+)

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
index d5559b4fce05..90ce4fe618b3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
@@ -66,6 +66,7 @@ struct mlx5e_macsec {
 	/* Rx fs_id -> rx_sc mapping */
 	struct xarray sc_xarray;
 
+	unsigned char *dev_addr;
 	struct mlx5_core_dev *mdev;
 };
 
@@ -243,6 +244,42 @@ static int mlx5e_macsec_update_rx_sa(struct mlx5e_macsec *macsec,
 	return 0;
 }
 
+static bool mlx5e_macsec_secy_features_validate(struct macsec_context *ctx)
+{
+	const struct net_device *netdev = ctx->netdev;
+	const struct macsec_secy *secy = ctx->secy;
+
+	if (secy->validate_frames != MACSEC_VALIDATE_STRICT) {
+		netdev_err(netdev,
+			   "MACsec offload is supported only when validate_frame is in strict mode\n");
+		return false;
+	}
+
+	if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN) {
+		netdev_err(netdev, "MACsec offload is supported only when icv_len is %d\n",
+			   MACSEC_DEFAULT_ICV_LEN);
+		return false;
+	}
+
+	if (!secy->protect_frames) {
+		netdev_err(netdev,
+			   "MACsec offload is supported only when protect_frames is set\n");
+		return false;
+	}
+
+	if (secy->xpn) {
+		netdev_err(netdev, "MACsec offload: xpn is not supported\n");
+		return false;
+	}
+
+	if (secy->replay_protect) {
+		netdev_err(netdev, "MACsec offload: replay protection is not supported\n");
+		return false;
+	}
+
+	return true;
+}
+
 static int mlx5e_macsec_add_txsa(struct macsec_context *ctx)
 {
 	const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc;
@@ -764,6 +801,195 @@ static int mlx5e_macsec_del_rxsa(struct macsec_context *ctx)
 	return err;
 }
 
+static int mlx5e_macsec_add_secy(struct macsec_context *ctx)
+{
+	struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+	const struct net_device *dev = ctx->secy->netdev;
+	const struct net_device *netdev = ctx->netdev;
+	struct mlx5e_macsec *macsec;
+	int err = 0;
+
+	if (ctx->prepare)
+		return 0;
+
+	if (!mlx5e_macsec_secy_features_validate(ctx))
+		return -EINVAL;
+
+	mutex_lock(&priv->macsec->lock);
+
+	macsec = priv->macsec;
+
+	if (macsec->dev_addr) {
+		netdev_err(netdev, "Currently, only one MACsec offload device can be set\n");
+		err = -EINVAL;
+	}
+
+	macsec->dev_addr = kzalloc(dev->addr_len, GFP_KERNEL);
+	if (!macsec->dev_addr) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	memcpy(macsec->dev_addr, dev->dev_addr, dev->addr_len);
+out:
+	mutex_unlock(&macsec->lock);
+
+	return err;
+}
+
+static int macsec_upd_secy_hw_address(struct macsec_context *ctx)
+{
+	struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+	const struct net_device *dev = ctx->secy->netdev;
+	struct mlx5e_macsec *macsec = priv->macsec;
+	struct mlx5e_macsec_rx_sc *rx_sc, *tmp;
+	struct mlx5e_macsec_sa *rx_sa;
+	struct list_head *list;
+	int i, err = 0;
+
+
+	list = &macsec->macsec_rx_sc_list_head;
+	list_for_each_entry_safe(rx_sc, tmp, list, rx_sc_list_element) {
+		for (i = 0; i < MACSEC_NUM_AN; ++i) {
+			rx_sa = rx_sc->rx_sa[i];
+			if (!rx_sa || !rx_sa->macsec_rule)
+				continue;
+
+			mlx5e_macsec_cleanup_sa(macsec, rx_sa, false);
+		}
+	}
+
+	list_for_each_entry_safe(rx_sc, tmp, list, rx_sc_list_element) {
+		for (i = 0; i < MACSEC_NUM_AN; ++i) {
+			rx_sa = rx_sc->rx_sa[i];
+			if (!rx_sa)
+				continue;
+
+			if (rx_sa->active) {
+				err = mlx5e_macsec_init_sa(ctx, rx_sa, false, false);
+				if (err)
+					goto out;
+			}
+		}
+	}
+
+	memcpy(macsec->dev_addr, dev->dev_addr, dev->addr_len);
+out:
+	return err;
+}
+
+/* this function is called from 2 macsec ops functions:
+ *  macsec_set_mac_address – MAC address was changed, therefore we need to destroy
+ *  and create new Tx contexts(macsec object + steering).
+ *  macsec_changelink – in this case the tx SC or SecY may be changed, therefore need to
+ *  destroy Tx and Rx contexts(macsec object + steering)
+ */
+static int mlx5e_macsec_upd_secy(struct macsec_context *ctx)
+{
+	const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc;
+	struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+	const struct net_device *dev = ctx->secy->netdev;
+	struct mlx5e_macsec_sa *tx_sa;
+	struct mlx5e_macsec *macsec;
+	int i, err = 0;
+
+	if (ctx->prepare)
+		return 0;
+
+	if (!mlx5e_macsec_secy_features_validate(ctx))
+		return -EINVAL;
+
+	mutex_lock(&priv->macsec->lock);
+
+	macsec = priv->macsec;
+
+	/* if the dev_addr hasn't change, it mean the callback is from macsec_changelink */
+	if (!memcmp(macsec->dev_addr, dev->dev_addr, dev->addr_len)) {
+		err = macsec_upd_secy_hw_address(ctx);
+		if (err)
+			goto out;
+	}
+
+	for (i = 0; i < MACSEC_NUM_AN; ++i) {
+		tx_sa = macsec->tx_sa[i];
+		if (!tx_sa)
+			continue;
+
+		mlx5e_macsec_cleanup_sa(macsec, tx_sa, true);
+	}
+
+	for (i = 0; i < MACSEC_NUM_AN; ++i) {
+		tx_sa = macsec->tx_sa[i];
+		if (!tx_sa)
+			continue;
+
+		if (tx_sa->assoc_num == tx_sc->encoding_sa && tx_sa->active) {
+			err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true);
+			if (err)
+				goto out;
+		}
+	}
+
+out:
+	mutex_unlock(&macsec->lock);
+
+	return err;
+}
+
+static int mlx5e_macsec_del_secy(struct macsec_context *ctx)
+{
+	struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+	struct mlx5e_macsec_rx_sc *rx_sc, *tmp;
+	struct mlx5e_macsec_sa *rx_sa;
+	struct mlx5e_macsec_sa *tx_sa;
+	struct mlx5e_macsec *macsec;
+	struct list_head *list;
+	int i;
+
+	if (ctx->prepare)
+		return 0;
+
+	mutex_lock(&priv->macsec->lock);
+
+	macsec = priv->macsec;
+
+	for (i = 0; i < MACSEC_NUM_AN; ++i) {
+		tx_sa = macsec->tx_sa[i];
+		if (!tx_sa)
+			continue;
+
+		mlx5e_macsec_cleanup_sa(macsec, tx_sa, true);
+		mlx5_destroy_encryption_key(macsec->mdev, tx_sa->enc_key_id);
+		kfree(tx_sa);
+		macsec->tx_sa[i] = NULL;
+	}
+
+	list = &macsec->macsec_rx_sc_list_head;
+	list_for_each_entry_safe(rx_sc, tmp, list, rx_sc_list_element) {
+		for (i = 0; i < MACSEC_NUM_AN; ++i) {
+			rx_sa = rx_sc->rx_sa[i];
+			if (!rx_sa)
+				continue;
+
+			mlx5e_macsec_cleanup_sa(macsec, rx_sa, false);
+			mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id);
+			kfree(rx_sa);
+			rx_sc->rx_sa[i] = NULL;
+		}
+
+		list_del_rcu(&rx_sc->rx_sc_list_element);
+
+		kfree_rcu(rx_sc);
+	}
+
+	kfree(macsec->dev_addr);
+	macsec->dev_addr = NULL;
+
+	mutex_unlock(&macsec->lock);
+
+	return 0;
+}
+
 static bool mlx5e_is_macsec_device(const struct mlx5_core_dev *mdev)
 {
 	if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) &
@@ -805,6 +1031,9 @@ static const struct macsec_ops macsec_offload_ops = {
 	.mdo_add_rxsa = mlx5e_macsec_add_rxsa,
 	.mdo_upd_rxsa = mlx5e_macsec_upd_rxsa,
 	.mdo_del_rxsa = mlx5e_macsec_del_rxsa,
+	.mdo_add_secy = mlx5e_macsec_add_secy,
+	.mdo_upd_secy = mlx5e_macsec_upd_secy,
+	.mdo_del_secy = mlx5e_macsec_del_secy,
 };
 
 bool mlx5e_macsec_handle_tx_skb(struct mlx5e_macsec *macsec, struct sk_buff *skb)
-- 
2.37.2


  parent reply	other threads:[~2022-09-06  5:23 UTC|newest]

Thread overview: 28+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-09-06  5:21 [PATCH net-next V2 00/17] Introduce MACsec skb_metadata_dst and mlx5 macsec offload Saeed Mahameed
2022-09-06  5:21 ` [PATCH net-next V2 01/17] net/macsec: Add MACsec skb_metadata_dst Tx Data path support Saeed Mahameed
2022-09-06  5:21 ` [PATCH net-next V2 02/17] net/macsec: Add MACsec skb_metadata_dst Rx " Saeed Mahameed
2022-09-06  5:21 ` [PATCH net-next V2 03/17] net/macsec: Move some code for sharing with various drivers that implements offload Saeed Mahameed
2022-09-06  5:21 ` [PATCH net-next V2 04/17] net/mlx5: Removed esp_id from struct mlx5_flow_act Saeed Mahameed
2022-09-06  5:21 ` [PATCH net-next V2 05/17] net/mlx5: Generalize Flow Context for new crypto fields Saeed Mahameed
2022-09-06  5:21 ` [PATCH net-next V2 06/17] net/mlx5: Introduce MACsec Connect-X offload hardware bits and structures Saeed Mahameed
2022-09-06  5:21 ` [PATCH net-next V2 07/17] net/mlx5: Add MACsec offload Tx command support Saeed Mahameed
2022-09-14 14:39   ` sundeep subbaraya
2022-09-14 20:38     ` Saeed Mahameed
2022-09-15  5:14       ` sundeep subbaraya
2022-09-15  5:20         ` sundeep subbaraya
2022-09-15  8:02           ` Antoine Tenart
2022-09-19  9:01             ` sundeep subbaraya
2022-09-19 13:26               ` Raed Salem
2022-09-20  8:14                 ` Antoine Tenart
2022-09-21 13:43                   ` sundeep subbaraya
2022-09-06  5:21 ` [PATCH net-next V2 08/17] net/mlx5: Add MACsec Tx tables support to fs_core Saeed Mahameed
2022-09-06  5:21 ` [PATCH net-next V2 09/17] net/mlx5e: Add MACsec TX steering rules Saeed Mahameed
2022-09-06  5:21 ` [PATCH net-next V2 10/17] net/mlx5e: Implement MACsec Tx data path using MACsec skb_metadata_dst Saeed Mahameed
2022-09-06  5:21 ` [PATCH net-next V2 11/17] net/mlx5e: Add MACsec offload Rx command support Saeed Mahameed
2022-09-06  5:21 ` [PATCH net-next V2 12/17] net/mlx5: Add MACsec Rx tables support to fs_core Saeed Mahameed
2022-09-06  5:21 ` [PATCH net-next V2 13/17] net/mlx5e: Add MACsec RX steering rules Saeed Mahameed
2022-09-06  5:21 ` [PATCH net-next V2 14/17] net/mlx5e: Implement MACsec Rx data path using MACsec skb_metadata_dst Saeed Mahameed
2022-09-06  5:21 ` Saeed Mahameed [this message]
2022-09-06  5:21 ` [PATCH net-next V2 16/17] net/mlx5e: Add MACsec stats support for Rx/Tx flows Saeed Mahameed
2022-09-06  5:21 ` [PATCH net-next V2 17/17] net/mlx5e: Add support to configure more than one macsec offload device Saeed Mahameed
2022-09-07 13:20 ` [PATCH net-next V2 00/17] Introduce MACsec skb_metadata_dst and mlx5 macsec offload patchwork-bot+netdevbpf

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220906052129.104507-16-saeed@kernel.org \
    --to=saeed@kernel.org \
    --cc=davem@davemloft.net \
    --cc=edumazet@google.com \
    --cc=kuba@kernel.org \
    --cc=liorna@nvidia.com \
    --cc=netdev@vger.kernel.org \
    --cc=pabeni@redhat.com \
    --cc=raeds@nvidia.com \
    --cc=saeedm@nvidia.com \
    --cc=tariqt@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).