netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH xfrm-next 00/13] mlx5 IPsec packet offload support (Part I)
@ 2022-12-02 20:14 Leon Romanovsky
  2022-12-02 20:14 ` [PATCH xfrm-next 01/13] net/mlx5e: Create IPsec policy offload tables Leon Romanovsky
                   ` (14 more replies)
  0 siblings, 15 replies; 16+ messages in thread
From: Leon Romanovsky @ 2022-12-02 20:14 UTC (permalink / raw)
  To: Steffen Klassert
  Cc: Leon Romanovsky, David S. Miller, Eric Dumazet, Herbert Xu,
	Jakub Kicinski, netdev, Bharat Bhushan

From: Leon Romanovsky <leonro@nvidia.com>

Hi,

This is second part with implementation of packet offload.

Thanks

Leon Romanovsky (12):
  net/mlx5e: Create IPsec policy offload tables
  net/mlx5e: Add XFRM policy offload logic
  net/mlx5e: Use same coding pattern for Rx and Tx flows
  net/mlx5e: Configure IPsec packet offload flow steering
  net/mlx5e: Improve IPsec flow steering autogroup
  net/mlx5e: Skip IPsec encryption for TX path without matching policy
  net/mlx5e: Provide intermediate pointer to access IPsec struct
  net/mlx5e: Store all XFRM SAs in Xarray
  net/mlx5e: Update IPsec soft and hard limits
  net/mlx5e: Handle hardware IPsec limits events
  net/mlx5e: Handle ESN update events
  net/mlx5e: Open mlx5 driver to accept IPsec packet offload

Raed Salem (1):
  net/mlx5e: Add statistics for Rx/Tx IPsec offloaded flows

 .../net/ethernet/mellanox/mlx5/core/en/fs.h   |   3 +-
 .../mellanox/mlx5/core/en_accel/ipsec.c       | 312 +++++++++---
 .../mellanox/mlx5/core/en_accel/ipsec.h       |  84 ++-
 .../mellanox/mlx5/core/en_accel/ipsec_fs.c    | 482 +++++++++++++++++-
 .../mlx5/core/en_accel/ipsec_offload.c        | 196 +++++++
 .../mellanox/mlx5/core/en_accel/ipsec_rxtx.c  |  22 +-
 .../mellanox/mlx5/core/en_accel/ipsec_stats.c |  52 ++
 .../ethernet/mellanox/mlx5/core/en_stats.c    |   1 +
 .../ethernet/mellanox/mlx5/core/en_stats.h    |   1 +
 drivers/net/ethernet/mellanox/mlx5/core/eq.c  |   5 +
 .../net/ethernet/mellanox/mlx5/core/fs_core.c |   6 +-
 .../net/ethernet/mellanox/mlx5/core/lib/aso.h |   1 +
 12 files changed, 1047 insertions(+), 118 deletions(-)

-- 
2.38.1


^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH xfrm-next 01/13] net/mlx5e: Create IPsec policy offload tables
  2022-12-02 20:14 [PATCH xfrm-next 00/13] mlx5 IPsec packet offload support (Part I) Leon Romanovsky
@ 2022-12-02 20:14 ` Leon Romanovsky
  2022-12-02 20:14 ` [PATCH xfrm-next 02/13] net/mlx5e: Add XFRM policy offload logic Leon Romanovsky
                   ` (13 subsequent siblings)
  14 siblings, 0 replies; 16+ messages in thread
From: Leon Romanovsky @ 2022-12-02 20:14 UTC (permalink / raw)
  To: Steffen Klassert
  Cc: Leon Romanovsky, David S. Miller, Eric Dumazet, Herbert Xu,
	Jakub Kicinski, netdev, Bharat Bhushan

From: Leon Romanovsky <leonro@nvidia.com>

Add empty table to be used for IPsec policy offload.

Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
 .../net/ethernet/mellanox/mlx5/core/en/fs.h   |  3 +-
 .../mellanox/mlx5/core/en_accel/ipsec_fs.c    | 53 ++++++++++++++++++-
 .../net/ethernet/mellanox/mlx5/core/fs_core.c |  6 +--
 3 files changed, 56 insertions(+), 6 deletions(-)

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
index bf2741eb7f9b..379c6dc9a3be 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -84,7 +84,8 @@ enum {
 	MLX5E_ARFS_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
 #endif
 #ifdef CONFIG_MLX5_EN_IPSEC
-	MLX5E_ACCEL_FS_ESP_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
+	MLX5E_ACCEL_FS_POL_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
+	MLX5E_ACCEL_FS_ESP_FT_LEVEL,
 	MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL,
 #endif
 };
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index 5bc6f9d1f5a6..a3c7d0f142c0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -11,6 +11,7 @@
 
 struct mlx5e_ipsec_ft {
 	struct mutex mutex; /* Protect changes to this struct */
+	struct mlx5_flow_table *pol;
 	struct mlx5_flow_table *sa;
 	struct mlx5_flow_table *status;
 	u32 refcnt;
@@ -23,12 +24,14 @@ struct mlx5e_ipsec_miss {
 
 struct mlx5e_ipsec_rx {
 	struct mlx5e_ipsec_ft ft;
+	struct mlx5e_ipsec_miss pol;
 	struct mlx5e_ipsec_miss sa;
 	struct mlx5e_ipsec_rule status;
 };
 
 struct mlx5e_ipsec_tx {
 	struct mlx5e_ipsec_ft ft;
+	struct mlx5e_ipsec_miss pol;
 	struct mlx5_flow_namespace *ns;
 };
 
@@ -157,6 +160,10 @@ static int ipsec_miss_create(struct mlx5_core_dev *mdev,
 
 static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_rx *rx)
 {
+	mlx5_del_flow_rules(rx->pol.rule);
+	mlx5_destroy_flow_group(rx->pol.group);
+	mlx5_destroy_flow_table(rx->ft.pol);
+
 	mlx5_del_flow_rules(rx->sa.rule);
 	mlx5_destroy_flow_group(rx->sa.group);
 	mlx5_destroy_flow_table(rx->ft.sa);
@@ -200,8 +207,27 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
 	if (err)
 		goto err_fs;
 
+	ft = ipsec_ft_create(ns, MLX5E_ACCEL_FS_POL_FT_LEVEL, MLX5E_NIC_PRIO,
+			     1);
+	if (IS_ERR(ft)) {
+		err = PTR_ERR(ft);
+		goto err_pol_ft;
+	}
+	rx->ft.pol = ft;
+	memset(&dest, 0x00, sizeof(dest));
+	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+	dest.ft = rx->ft.sa;
+	err = ipsec_miss_create(mdev, rx->ft.pol, &rx->pol, &dest);
+	if (err)
+		goto err_pol_miss;
+
 	return 0;
 
+err_pol_miss:
+	mlx5_destroy_flow_table(rx->ft.pol);
+err_pol_ft:
+	mlx5_del_flow_rules(rx->sa.rule);
+	mlx5_destroy_flow_group(rx->sa.group);
 err_fs:
 	mlx5_destroy_flow_table(rx->ft.sa);
 err_fs_ft:
@@ -236,7 +262,7 @@ static struct mlx5e_ipsec_rx *rx_ft_get(struct mlx5_core_dev *mdev,
 
 	/* connect */
 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
-	dest.ft = rx->ft.sa;
+	dest.ft = rx->ft.pol;
 	mlx5_ttc_fwd_dest(ttc, family2tt(family), &dest);
 
 skip:
@@ -277,14 +303,34 @@ static void rx_ft_put(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
 /* IPsec TX flow steering */
 static int tx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx)
 {
+	struct mlx5_flow_destination dest = {};
 	struct mlx5_flow_table *ft;
+	int err;
 
-	ft = ipsec_ft_create(tx->ns, 0, 0, 1);
+	ft = ipsec_ft_create(tx->ns, 1, 0, 1);
 	if (IS_ERR(ft))
 		return PTR_ERR(ft);
 
 	tx->ft.sa = ft;
+
+	ft = ipsec_ft_create(tx->ns, 0, 0, 1);
+	if (IS_ERR(ft)) {
+		err = PTR_ERR(ft);
+		goto err_pol_ft;
+	}
+	tx->ft.pol = ft;
+	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+	dest.ft = tx->ft.sa;
+	err = ipsec_miss_create(mdev, tx->ft.pol, &tx->pol, &dest);
+	if (err)
+		goto err_pol_miss;
 	return 0;
+
+err_pol_miss:
+	mlx5_destroy_flow_table(tx->ft.pol);
+err_pol_ft:
+	mlx5_destroy_flow_table(tx->ft.sa);
+	return err;
 }
 
 static struct mlx5e_ipsec_tx *tx_ft_get(struct mlx5_core_dev *mdev,
@@ -318,6 +364,9 @@ static void tx_ft_put(struct mlx5e_ipsec *ipsec)
 	if (tx->ft.refcnt)
 		goto out;
 
+	mlx5_del_flow_rules(tx->pol.rule);
+	mlx5_destroy_flow_group(tx->pol.group);
+	mlx5_destroy_flow_table(tx->ft.pol);
 	mlx5_destroy_flow_table(tx->ft.sa);
 out:
 	mutex_unlock(&tx->ft.mutex);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index d53749248fa0..9995307d374b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -111,8 +111,8 @@
 #define ETHTOOL_PRIO_NUM_LEVELS 1
 #define ETHTOOL_NUM_PRIOS 11
 #define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
-/* Promiscuous, Vlan, mac, ttc, inner ttc, {UDP/ANY/aRFS/accel/{esp, esp_err}} */
-#define KERNEL_NIC_PRIO_NUM_LEVELS 7
+/* Promiscuous, Vlan, mac, ttc, inner ttc, {UDP/ANY/aRFS/accel/{esp, esp_err}}, IPsec policy */
+#define KERNEL_NIC_PRIO_NUM_LEVELS 8
 #define KERNEL_NIC_NUM_PRIOS 1
 /* One more level for tc */
 #define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
@@ -133,7 +133,7 @@
 #define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + KERNEL_RX_MACSEC_MIN_LEVEL + 1)
 
 #define KERNEL_TX_IPSEC_NUM_PRIOS  1
-#define KERNEL_TX_IPSEC_NUM_LEVELS 1
+#define KERNEL_TX_IPSEC_NUM_LEVELS 2
 #define KERNEL_TX_IPSEC_MIN_LEVEL        (KERNEL_TX_IPSEC_NUM_LEVELS)
 
 #define KERNEL_TX_MACSEC_NUM_PRIOS  1
-- 
2.38.1


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH xfrm-next 02/13] net/mlx5e: Add XFRM policy offload logic
  2022-12-02 20:14 [PATCH xfrm-next 00/13] mlx5 IPsec packet offload support (Part I) Leon Romanovsky
  2022-12-02 20:14 ` [PATCH xfrm-next 01/13] net/mlx5e: Create IPsec policy offload tables Leon Romanovsky
@ 2022-12-02 20:14 ` Leon Romanovsky
  2022-12-02 20:14 ` [PATCH xfrm-next 03/13] net/mlx5e: Use same coding pattern for Rx and Tx flows Leon Romanovsky
                   ` (12 subsequent siblings)
  14 siblings, 0 replies; 16+ messages in thread
From: Leon Romanovsky @ 2022-12-02 20:14 UTC (permalink / raw)
  To: Steffen Klassert
  Cc: Leon Romanovsky, David S. Miller, Eric Dumazet, Herbert Xu,
	Jakub Kicinski, netdev, Bharat Bhushan, Raed Salem,
	Saeed Mahameed

From: Leon Romanovsky <leonro@nvidia.com>

Implement mlx5 flow steering logic and mlx5 IPsec code support
XFRM policy offload.

Reviewed-by: Raed Salem <raeds@nvidia.com>
Reviewed-by: Saeed Mahameed <saeedm@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
 .../mellanox/mlx5/core/en_accel/ipsec.c       | 118 +++++++++++++-
 .../mellanox/mlx5/core/en_accel/ipsec.h       |  32 ++++
 .../mellanox/mlx5/core/en_accel/ipsec_fs.c    | 146 ++++++++++++++++++
 3 files changed, 295 insertions(+), 1 deletion(-)

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index c5bccc0df60d..b22a31e178cf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -45,6 +45,11 @@ static struct mlx5e_ipsec_sa_entry *to_ipsec_sa_entry(struct xfrm_state *x)
 	return (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
 }
 
+static struct mlx5e_ipsec_pol_entry *to_ipsec_pol_entry(struct xfrm_policy *x)
+{
+	return (struct mlx5e_ipsec_pol_entry *)x->xdo.offload_handle;
+}
+
 struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *ipsec,
 					      unsigned int handle)
 {
@@ -446,6 +451,101 @@ static void mlx5e_xfrm_advance_esn_state(struct xfrm_state *x)
 	queue_work(sa_entry->ipsec->wq, &modify_work->work);
 }
 
+static int mlx5e_xfrm_validate_policy(struct xfrm_policy *x)
+{
+	struct net_device *netdev = x->xdo.real_dev;
+
+	if (x->type != XFRM_POLICY_TYPE_MAIN) {
+		netdev_info(netdev, "Cannot offload non-main policy types\n");
+		return -EINVAL;
+	}
+
+	/* Please pay attention that we support only one template */
+	if (x->xfrm_nr > 1) {
+		netdev_info(netdev, "Cannot offload more than one template\n");
+		return -EINVAL;
+	}
+
+	if (x->xdo.dir != XFRM_DEV_OFFLOAD_IN &&
+	    x->xdo.dir != XFRM_DEV_OFFLOAD_OUT) {
+		netdev_info(netdev, "Cannot offload forward policy\n");
+		return -EINVAL;
+	}
+
+	if (!x->xfrm_vec[0].reqid) {
+		netdev_info(netdev, "Cannot offload policy without reqid\n");
+		return -EINVAL;
+	}
+
+	if (x->xdo.type != XFRM_DEV_OFFLOAD_PACKET) {
+		netdev_info(netdev, "Unsupported xfrm offload type\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void
+mlx5e_ipsec_build_accel_pol_attrs(struct mlx5e_ipsec_pol_entry *pol_entry,
+				  struct mlx5_accel_pol_xfrm_attrs *attrs)
+{
+	struct xfrm_policy *x = pol_entry->x;
+	struct xfrm_selector *sel;
+
+	sel = &x->selector;
+	memset(attrs, 0, sizeof(*attrs));
+
+	memcpy(&attrs->saddr, sel->saddr.a6, sizeof(attrs->saddr));
+	memcpy(&attrs->daddr, sel->daddr.a6, sizeof(attrs->daddr));
+	attrs->family = sel->family;
+	attrs->dir = x->xdo.dir;
+	attrs->action = x->action;
+	attrs->type = XFRM_DEV_OFFLOAD_PACKET;
+}
+
+static int mlx5e_xfrm_add_policy(struct xfrm_policy *x)
+{
+	struct net_device *netdev = x->xdo.real_dev;
+	struct mlx5e_ipsec_pol_entry *pol_entry;
+	struct mlx5e_priv *priv;
+	int err;
+
+	priv = netdev_priv(netdev);
+	if (!priv->ipsec)
+		return -EOPNOTSUPP;
+
+	err = mlx5e_xfrm_validate_policy(x);
+	if (err)
+		return err;
+
+	pol_entry = kzalloc(sizeof(*pol_entry), GFP_KERNEL);
+	if (!pol_entry)
+		return -ENOMEM;
+
+	pol_entry->x = x;
+	pol_entry->ipsec = priv->ipsec;
+
+	mlx5e_ipsec_build_accel_pol_attrs(pol_entry, &pol_entry->attrs);
+	err = mlx5e_accel_ipsec_fs_add_pol(pol_entry);
+	if (err)
+		goto err_fs;
+
+	x->xdo.offload_handle = (unsigned long)pol_entry;
+	return 0;
+
+err_fs:
+	kfree(pol_entry);
+	return err;
+}
+
+static void mlx5e_xfrm_free_policy(struct xfrm_policy *x)
+{
+	struct mlx5e_ipsec_pol_entry *pol_entry = to_ipsec_pol_entry(x);
+
+	mlx5e_accel_ipsec_fs_del_pol(pol_entry);
+	kfree(pol_entry);
+}
+
 static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = {
 	.xdo_dev_state_add	= mlx5e_xfrm_add_state,
 	.xdo_dev_state_delete	= mlx5e_xfrm_del_state,
@@ -454,6 +554,17 @@ static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = {
 	.xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state,
 };
 
+static const struct xfrmdev_ops mlx5e_ipsec_packet_xfrmdev_ops = {
+	.xdo_dev_state_add	= mlx5e_xfrm_add_state,
+	.xdo_dev_state_delete	= mlx5e_xfrm_del_state,
+	.xdo_dev_state_free	= mlx5e_xfrm_free_state,
+	.xdo_dev_offload_ok	= mlx5e_ipsec_offload_ok,
+	.xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state,
+
+	.xdo_dev_policy_add = mlx5e_xfrm_add_policy,
+	.xdo_dev_policy_free = mlx5e_xfrm_free_policy,
+};
+
 void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
 {
 	struct mlx5_core_dev *mdev = priv->mdev;
@@ -463,7 +574,12 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
 		return;
 
 	mlx5_core_info(mdev, "mlx5e: IPSec ESP acceleration enabled\n");
-	netdev->xfrmdev_ops = &mlx5e_ipsec_xfrmdev_ops;
+
+	if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)
+		netdev->xfrmdev_ops = &mlx5e_ipsec_packet_xfrmdev_ops;
+	else
+		netdev->xfrmdev_ops = &mlx5e_ipsec_xfrmdev_ops;
+
 	netdev->features |= NETIF_F_HW_ESP;
 	netdev->hw_enc_features |= NETIF_F_HW_ESP;
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index 990378d52fd4..8d1a0d053eb4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -149,6 +149,30 @@ struct mlx5e_ipsec_sa_entry {
 	struct mlx5e_ipsec_modify_state_work modify_work;
 };
 
+struct mlx5_accel_pol_xfrm_attrs {
+	union {
+		__be32 a4;
+		__be32 a6[4];
+	} saddr;
+
+	union {
+		__be32 a4;
+		__be32 a6[4];
+	} daddr;
+
+	u8 family;
+	u8 action;
+	u8 type : 2;
+	u8 dir : 2;
+};
+
+struct mlx5e_ipsec_pol_entry {
+	struct xfrm_policy *x;
+	struct mlx5e_ipsec *ipsec;
+	struct mlx5_flow_handle *rule;
+	struct mlx5_accel_pol_xfrm_attrs attrs;
+};
+
 void mlx5e_ipsec_init(struct mlx5e_priv *priv);
 void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv);
 void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv);
@@ -160,6 +184,8 @@ void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec);
 int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec);
 int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry);
 void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry);
+int mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry *pol_entry);
+void mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry *pol_entry);
 
 int mlx5_ipsec_create_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry);
 void mlx5_ipsec_free_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry);
@@ -177,6 +203,12 @@ mlx5e_ipsec_sa2dev(struct mlx5e_ipsec_sa_entry *sa_entry)
 {
 	return sa_entry->ipsec->mdev;
 }
+
+static inline struct mlx5_core_dev *
+mlx5e_ipsec_pol2dev(struct mlx5e_ipsec_pol_entry *pol_entry)
+{
+	return pol_entry->ipsec->mdev;
+}
 #else
 static inline void mlx5e_ipsec_init(struct mlx5e_priv *priv)
 {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index a3c7d0f142c0..27f866a158de 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -597,6 +597,130 @@ static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
 	return err;
 }
 
+static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
+{
+	struct mlx5_accel_pol_xfrm_attrs *attrs = &pol_entry->attrs;
+	struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry);
+	struct mlx5_flow_destination dest = {};
+	struct mlx5_flow_act flow_act = {};
+	struct mlx5_flow_handle *rule;
+	struct mlx5_flow_spec *spec;
+	struct mlx5e_ipsec_tx *tx;
+	int err;
+
+	tx = tx_ft_get(mdev, pol_entry->ipsec);
+	if (IS_ERR(tx))
+		return PTR_ERR(tx);
+
+	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+	if (!spec) {
+		err = -ENOMEM;
+		goto err_alloc;
+	}
+
+	if (attrs->family == AF_INET)
+		setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
+	else
+		setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
+
+	setup_fte_no_frags(spec);
+
+	switch (attrs->action) {
+	case XFRM_POLICY_ALLOW:
+		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+		break;
+	case XFRM_POLICY_BLOCK:
+		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
+		break;
+	default:
+		WARN_ON(true);
+		err = -EINVAL;
+		goto err_action;
+	}
+
+	flow_act.flags |= FLOW_ACT_NO_APPEND;
+	dest.ft = tx->ft.sa;
+	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+	rule = mlx5_add_flow_rules(tx->ft.pol, spec, &flow_act, &dest, 1);
+	if (IS_ERR(rule)) {
+		err = PTR_ERR(rule);
+		mlx5_core_err(mdev, "fail to add TX ipsec rule err=%d\n", err);
+		goto err_action;
+	}
+
+	kvfree(spec);
+	pol_entry->rule = rule;
+	return 0;
+
+err_action:
+	kvfree(spec);
+err_alloc:
+	tx_ft_put(pol_entry->ipsec);
+	return err;
+}
+
+static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
+{
+	struct mlx5_accel_pol_xfrm_attrs *attrs = &pol_entry->attrs;
+	struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry);
+	struct mlx5_flow_destination dest = {};
+	struct mlx5_flow_act flow_act = {};
+	struct mlx5_flow_handle *rule;
+	struct mlx5_flow_spec *spec;
+	struct mlx5e_ipsec_rx *rx;
+	int err;
+
+	rx = rx_ft_get(mdev, pol_entry->ipsec, attrs->family);
+	if (IS_ERR(rx))
+		return PTR_ERR(rx);
+
+	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+	if (!spec) {
+		err = -ENOMEM;
+		goto err_alloc;
+	}
+
+	if (attrs->family == AF_INET)
+		setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
+	else
+		setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
+
+	setup_fte_no_frags(spec);
+
+	switch (attrs->action) {
+	case XFRM_POLICY_ALLOW:
+		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+		break;
+	case XFRM_POLICY_BLOCK:
+		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
+		break;
+	default:
+		WARN_ON(true);
+		err = -EINVAL;
+		goto err_action;
+	}
+
+	flow_act.flags |= FLOW_ACT_NO_APPEND;
+	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+	dest.ft = rx->ft.sa;
+	rule = mlx5_add_flow_rules(rx->ft.pol, spec, &flow_act, &dest, 1);
+	if (IS_ERR(rule)) {
+		err = PTR_ERR(rule);
+		mlx5_core_err(mdev, "Fail to add RX IPsec policy rule err=%d\n", err);
+		goto err_action;
+	}
+
+	kvfree(spec);
+	pol_entry->rule = rule;
+	return 0;
+
+err_action:
+	kvfree(spec);
+err_alloc:
+	rx_ft_put(mdev, pol_entry->ipsec, attrs->family);
+	return err;
+}
+
 int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
 {
 	if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT)
@@ -621,6 +745,28 @@ void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
 	rx_ft_put(mdev, sa_entry->ipsec, sa_entry->attrs.family);
 }
 
+int mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry *pol_entry)
+{
+	if (pol_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT)
+		return tx_add_policy(pol_entry);
+
+	return rx_add_policy(pol_entry);
+}
+
+void mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry *pol_entry)
+{
+	struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry);
+
+	mlx5_del_flow_rules(pol_entry->rule);
+
+	if (pol_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT) {
+		tx_ft_put(pol_entry->ipsec);
+		return;
+	}
+
+	rx_ft_put(mdev, pol_entry->ipsec, pol_entry->attrs.family);
+}
+
 void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec)
 {
 	if (!ipsec->tx)
-- 
2.38.1


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH xfrm-next 03/13] net/mlx5e: Use same coding pattern for Rx and Tx flows
  2022-12-02 20:14 [PATCH xfrm-next 00/13] mlx5 IPsec packet offload support (Part I) Leon Romanovsky
  2022-12-02 20:14 ` [PATCH xfrm-next 01/13] net/mlx5e: Create IPsec policy offload tables Leon Romanovsky
  2022-12-02 20:14 ` [PATCH xfrm-next 02/13] net/mlx5e: Add XFRM policy offload logic Leon Romanovsky
@ 2022-12-02 20:14 ` Leon Romanovsky
  2022-12-02 20:14 ` [PATCH xfrm-next 04/13] net/mlx5e: Configure IPsec packet offload flow steering Leon Romanovsky
                   ` (11 subsequent siblings)
  14 siblings, 0 replies; 16+ messages in thread
From: Leon Romanovsky @ 2022-12-02 20:14 UTC (permalink / raw)
  To: Steffen Klassert
  Cc: Leon Romanovsky, David S. Miller, Eric Dumazet, Herbert Xu,
	Jakub Kicinski, netdev, Bharat Bhushan, Raed Salem,
	Saeed Mahameed

From: Leon Romanovsky <leonro@nvidia.com>

Remove intermediate variable in favor of having similar coding style
for Rx and Tx add rule functions.

Reviewed-by: Raed Salem <raeds@nvidia.com>
Reviewed-by: Saeed Mahameed <saeedm@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index 27f866a158de..893c1862e211 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -482,7 +482,6 @@ static int setup_modify_header(struct mlx5_core_dev *mdev, u32 val, u8 dir,
 
 static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
 {
-	struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
 	struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
 	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
@@ -532,8 +531,8 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
 	}
 	kvfree(spec);
 
-	ipsec_rule->rule = rule;
-	ipsec_rule->modify_hdr = flow_act.modify_hdr;
+	sa_entry->ipsec_rule.rule = rule;
+	sa_entry->ipsec_rule.modify_hdr = flow_act.modify_hdr;
 	return 0;
 
 err_add_flow:
-- 
2.38.1


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH xfrm-next 04/13] net/mlx5e: Configure IPsec packet offload flow steering
  2022-12-02 20:14 [PATCH xfrm-next 00/13] mlx5 IPsec packet offload support (Part I) Leon Romanovsky
                   ` (2 preceding siblings ...)
  2022-12-02 20:14 ` [PATCH xfrm-next 03/13] net/mlx5e: Use same coding pattern for Rx and Tx flows Leon Romanovsky
@ 2022-12-02 20:14 ` Leon Romanovsky
  2022-12-02 20:14 ` [PATCH xfrm-next 05/13] net/mlx5e: Improve IPsec flow steering autogroup Leon Romanovsky
                   ` (10 subsequent siblings)
  14 siblings, 0 replies; 16+ messages in thread
From: Leon Romanovsky @ 2022-12-02 20:14 UTC (permalink / raw)
  To: Steffen Klassert
  Cc: Leon Romanovsky, David S. Miller, Eric Dumazet, Herbert Xu,
	Jakub Kicinski, netdev, Bharat Bhushan, Raed Salem,
	Saeed Mahameed

From: Leon Romanovsky <leonro@nvidia.com>

In packet offload mode, the HW is responsible to handle ESP headers,
SPI numbers and trailers (ICV) together with different logic for
RX and TX paths.

In order to support packet offload mode, special logic is added
to flow steering rules.

Reviewed-by: Raed Salem <raeds@nvidia.com>
Reviewed-by: Saeed Mahameed <saeedm@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
 .../mellanox/mlx5/core/en_accel/ipsec.c       |  2 +
 .../mellanox/mlx5/core/en_accel/ipsec.h       |  2 +
 .../mellanox/mlx5/core/en_accel/ipsec_fs.c    | 97 +++++++++++++++++--
 3 files changed, 91 insertions(+), 10 deletions(-)

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index b22a31e178cf..65f73a5c29ba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -162,6 +162,8 @@ mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
 	memcpy(&aes_gcm->salt, x->aead->alg_key + key_len,
 	       sizeof(aes_gcm->salt));
 
+	attrs->authsize = crypto_aead_authsize(aead) / 4; /* in dwords */
+
 	/* iv len */
 	aes_gcm->icv_len = x->aead->alg_icv_len;
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index 8d1a0d053eb4..25b865590488 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -76,6 +76,7 @@ struct mlx5_accel_esp_xfrm_attrs {
 	u8 type : 2;
 	u8 family;
 	u32 replay_window;
+	u32 authsize;
 };
 
 enum mlx5_ipsec_cap {
@@ -127,6 +128,7 @@ struct mlx5e_ipsec_esn_state {
 struct mlx5e_ipsec_rule {
 	struct mlx5_flow_handle *rule;
 	struct mlx5_modify_hdr *modify_hdr;
+	struct mlx5_pkt_reformat *pkt_reformat;
 };
 
 struct mlx5e_ipsec_modify_state_work {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index 893c1862e211..dbe35accaebf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -480,6 +480,48 @@ static int setup_modify_header(struct mlx5_core_dev *mdev, u32 val, u8 dir,
 	return 0;
 }
 
+static int setup_pkt_reformat(struct mlx5_core_dev *mdev,
+			      struct mlx5_accel_esp_xfrm_attrs *attrs,
+			      struct mlx5_flow_act *flow_act)
+{
+	enum mlx5_flow_namespace_type ns_type = MLX5_FLOW_NAMESPACE_EGRESS;
+	struct mlx5_pkt_reformat_params reformat_params = {};
+	struct mlx5_pkt_reformat *pkt_reformat;
+	u8 reformatbf[16] = {};
+	__be32 spi;
+
+	if (attrs->dir == XFRM_DEV_OFFLOAD_IN) {
+		reformat_params.type = MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT;
+		ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
+		goto cmd;
+	}
+
+	if (attrs->family == AF_INET)
+		reformat_params.type =
+			MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV4;
+	else
+		reformat_params.type =
+			MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV6;
+
+	/* convert to network format */
+	spi = htonl(attrs->spi);
+	memcpy(reformatbf, &spi, 4);
+
+	reformat_params.param_0 = attrs->authsize;
+	reformat_params.size = sizeof(reformatbf);
+	reformat_params.data = &reformatbf;
+
+cmd:
+	pkt_reformat =
+		mlx5_packet_reformat_alloc(mdev, &reformat_params, ns_type);
+	if (IS_ERR(pkt_reformat))
+		return PTR_ERR(pkt_reformat);
+
+	flow_act->pkt_reformat = pkt_reformat;
+	flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+	return 0;
+}
+
 static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
 {
 	struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
@@ -516,6 +558,16 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
 	if (err)
 		goto err_mod_header;
 
+	switch (attrs->type) {
+	case XFRM_DEV_OFFLOAD_PACKET:
+		err = setup_pkt_reformat(mdev, attrs, &flow_act);
+		if (err)
+			goto err_pkt_reformat;
+		break;
+	default:
+		break;
+	}
+
 	flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC;
 	flow_act.crypto.obj_id = sa_entry->ipsec_obj_id;
 	flow_act.flags |= FLOW_ACT_NO_APPEND;
@@ -533,9 +585,13 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
 
 	sa_entry->ipsec_rule.rule = rule;
 	sa_entry->ipsec_rule.modify_hdr = flow_act.modify_hdr;
+	sa_entry->ipsec_rule.pkt_reformat = flow_act.pkt_reformat;
 	return 0;
 
 err_add_flow:
+	if (flow_act.pkt_reformat)
+		mlx5_packet_reformat_dealloc(mdev, flow_act.pkt_reformat);
+err_pkt_reformat:
 	mlx5_modify_header_dealloc(mdev, flow_act.modify_hdr);
 err_mod_header:
 	kvfree(spec);
@@ -562,7 +618,7 @@ static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
 	if (!spec) {
 		err = -ENOMEM;
-		goto out;
+		goto err_alloc;
 	}
 
 	if (attrs->family == AF_INET)
@@ -570,29 +626,47 @@ static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
 	else
 		setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
 
-	setup_fte_spi(spec, attrs->spi);
-	setup_fte_esp(spec);
 	setup_fte_no_frags(spec);
-	setup_fte_reg_a(spec);
+
+	switch (attrs->type) {
+	case XFRM_DEV_OFFLOAD_CRYPTO:
+		setup_fte_spi(spec, attrs->spi);
+		setup_fte_esp(spec);
+		setup_fte_reg_a(spec);
+		break;
+	case XFRM_DEV_OFFLOAD_PACKET:
+		err = setup_pkt_reformat(mdev, attrs, &flow_act);
+		if (err)
+			goto err_pkt_reformat;
+		break;
+	default:
+		break;
+	}
 
 	flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC;
 	flow_act.crypto.obj_id = sa_entry->ipsec_obj_id;
 	flow_act.flags |= FLOW_ACT_NO_APPEND;
-	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW |
-			  MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT;
+	flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW |
+			   MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT;
 	rule = mlx5_add_flow_rules(tx->ft.sa, spec, &flow_act, NULL, 0);
 	if (IS_ERR(rule)) {
 		err = PTR_ERR(rule);
 		mlx5_core_err(mdev, "fail to add TX ipsec rule err=%d\n", err);
-		goto out;
+		goto err_add_flow;
 	}
 
+	kvfree(spec);
 	sa_entry->ipsec_rule.rule = rule;
+	sa_entry->ipsec_rule.pkt_reformat = flow_act.pkt_reformat;
+	return 0;
 
-out:
+err_add_flow:
+	if (flow_act.pkt_reformat)
+		mlx5_packet_reformat_dealloc(mdev, flow_act.pkt_reformat);
+err_pkt_reformat:
 	kvfree(spec);
-	if (err)
-		tx_ft_put(ipsec);
+err_alloc:
+	tx_ft_put(ipsec);
 	return err;
 }
 
@@ -735,6 +809,9 @@ void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
 
 	mlx5_del_flow_rules(ipsec_rule->rule);
 
+	if (ipsec_rule->pkt_reformat)
+		mlx5_packet_reformat_dealloc(mdev, ipsec_rule->pkt_reformat);
+
 	if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT) {
 		tx_ft_put(sa_entry->ipsec);
 		return;
-- 
2.38.1


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH xfrm-next 05/13] net/mlx5e: Improve IPsec flow steering autogroup
  2022-12-02 20:14 [PATCH xfrm-next 00/13] mlx5 IPsec packet offload support (Part I) Leon Romanovsky
                   ` (3 preceding siblings ...)
  2022-12-02 20:14 ` [PATCH xfrm-next 04/13] net/mlx5e: Configure IPsec packet offload flow steering Leon Romanovsky
@ 2022-12-02 20:14 ` Leon Romanovsky
  2022-12-02 20:14 ` [PATCH xfrm-next 06/13] net/mlx5e: Add statistics for Rx/Tx IPsec offloaded flows Leon Romanovsky
                   ` (9 subsequent siblings)
  14 siblings, 0 replies; 16+ messages in thread
From: Leon Romanovsky @ 2022-12-02 20:14 UTC (permalink / raw)
  To: Steffen Klassert
  Cc: Leon Romanovsky, David S. Miller, Eric Dumazet, Herbert Xu,
	Jakub Kicinski, netdev, Bharat Bhushan, Raed Salem

From: Leon Romanovsky <leonro@nvidia.com>

Flow steering API separates newly created rules based on their
match criteria. Right now, all IPsec tables are created with one
group and suffers from not-optimal FS performance.

Count number of different match criteria for relevant tables, and
set proper value at the table creation.

Reviewed-by: Raed Salem <raeds@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
 .../net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c   | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index dbe35accaebf..0af831128c4e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -196,7 +196,7 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
 
 	/* Create FT */
 	ft = ipsec_ft_create(ns, MLX5E_ACCEL_FS_ESP_FT_LEVEL, MLX5E_NIC_PRIO,
-			     1);
+			     2);
 	if (IS_ERR(ft)) {
 		err = PTR_ERR(ft);
 		goto err_fs_ft;
@@ -208,7 +208,7 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
 		goto err_fs;
 
 	ft = ipsec_ft_create(ns, MLX5E_ACCEL_FS_POL_FT_LEVEL, MLX5E_NIC_PRIO,
-			     1);
+			     2);
 	if (IS_ERR(ft)) {
 		err = PTR_ERR(ft);
 		goto err_pol_ft;
@@ -307,13 +307,13 @@ static int tx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx)
 	struct mlx5_flow_table *ft;
 	int err;
 
-	ft = ipsec_ft_create(tx->ns, 1, 0, 1);
+	ft = ipsec_ft_create(tx->ns, 1, 0, 4);
 	if (IS_ERR(ft))
 		return PTR_ERR(ft);
 
 	tx->ft.sa = ft;
 
-	ft = ipsec_ft_create(tx->ns, 0, 0, 1);
+	ft = ipsec_ft_create(tx->ns, 0, 0, 2);
 	if (IS_ERR(ft)) {
 		err = PTR_ERR(ft);
 		goto err_pol_ft;
-- 
2.38.1


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH xfrm-next 06/13] net/mlx5e: Add statistics for Rx/Tx IPsec offloaded flows
  2022-12-02 20:14 [PATCH xfrm-next 00/13] mlx5 IPsec packet offload support (Part I) Leon Romanovsky
                   ` (4 preceding siblings ...)
  2022-12-02 20:14 ` [PATCH xfrm-next 05/13] net/mlx5e: Improve IPsec flow steering autogroup Leon Romanovsky
@ 2022-12-02 20:14 ` Leon Romanovsky
  2022-12-02 20:14 ` [PATCH xfrm-next 07/13] net/mlx5e: Skip IPsec encryption for TX path without matching policy Leon Romanovsky
                   ` (8 subsequent siblings)
  14 siblings, 0 replies; 16+ messages in thread
From: Leon Romanovsky @ 2022-12-02 20:14 UTC (permalink / raw)
  To: Steffen Klassert
  Cc: Raed Salem, Leon Romanovsky, David S. Miller, Eric Dumazet,
	Herbert Xu, Jakub Kicinski, netdev, Bharat Bhushan

From: Raed Salem <raeds@nvidia.com>

Add the following statistics:
RX successfully IPsec flows:
ipsec_rx_pkts  : Number of packets passed Rx IPsec flow
ipsec_rx_bytes : Number of bytes passed Rx IPsec flow

Rx dropped IPsec policy packets:
ipsec_rx_drop_pkts: Number of packets dropped in Rx datapath due to IPsec drop policy
ipsec_rx_drop_bytes: Number of bytes dropped in Rx datapath due to IPsec drop policy

TX successfully encrypted and encapsulated IPsec packets:
ipsec_tx_pkts  : Number of packets encrypted and encapsulated successfully
ipsec_tx_bytes : Number of bytes encrypted and encapsulated successfully

Tx dropped IPsec policy packets:
ipsec_tx_drop_pkts: Number of packets dropped in Tx datapath due to IPsec drop policy
ipsec_tx_drop_bytes: Number of bytes dropped in Tx datapath due to IPsec drop policy

The above can be seen using:
ethtool -S <ifc> |grep ipsec

Signed-off-by: Raed Salem <raeds@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
 .../mellanox/mlx5/core/en_accel/ipsec.h       |  15 ++
 .../mellanox/mlx5/core/en_accel/ipsec_fs.c    | 188 +++++++++++++++---
 .../mellanox/mlx5/core/en_accel/ipsec_stats.c |  52 +++++
 .../ethernet/mellanox/mlx5/core/en_stats.c    |   1 +
 .../ethernet/mellanox/mlx5/core/en_stats.h    |   1 +
 5 files changed, 233 insertions(+), 24 deletions(-)

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index 25b865590488..2e7654e90314 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -87,6 +87,17 @@ enum mlx5_ipsec_cap {
 
 struct mlx5e_priv;
 
+struct mlx5e_ipsec_hw_stats {
+	u64 ipsec_rx_pkts;
+	u64 ipsec_rx_bytes;
+	u64 ipsec_rx_drop_pkts;
+	u64 ipsec_rx_drop_bytes;
+	u64 ipsec_tx_pkts;
+	u64 ipsec_tx_bytes;
+	u64 ipsec_tx_drop_pkts;
+	u64 ipsec_tx_drop_bytes;
+};
+
 struct mlx5e_ipsec_sw_stats {
 	atomic64_t ipsec_rx_drop_sp_alloc;
 	atomic64_t ipsec_rx_drop_sadb_miss;
@@ -111,6 +122,7 @@ struct mlx5e_ipsec {
 	DECLARE_HASHTABLE(sadb_rx, MLX5E_IPSEC_SADB_RX_BITS);
 	spinlock_t sadb_rx_lock; /* Protects sadb_rx */
 	struct mlx5e_ipsec_sw_stats sw_stats;
+	struct mlx5e_ipsec_hw_stats hw_stats;
 	struct workqueue_struct *wq;
 	struct mlx5e_flow_steering *fs;
 	struct mlx5e_ipsec_rx *rx_ipv4;
@@ -200,6 +212,9 @@ void mlx5_accel_esp_modify_xfrm(struct mlx5e_ipsec_sa_entry *sa_entry,
 int mlx5e_ipsec_aso_init(struct mlx5e_ipsec *ipsec);
 void mlx5e_ipsec_aso_cleanup(struct mlx5e_ipsec *ipsec);
 
+void mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv *priv,
+				     void *ipsec_stats);
+
 static inline struct mlx5_core_dev *
 mlx5e_ipsec_sa2dev(struct mlx5e_ipsec_sa_entry *sa_entry)
 {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index 0af831128c4e..8ba92b00afdb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -9,6 +9,11 @@
 
 #define NUM_IPSEC_FTE BIT(15)
 
+struct mlx5e_ipsec_fc {
+	struct mlx5_fc *cnt;
+	struct mlx5_fc *drop;
+};
+
 struct mlx5e_ipsec_ft {
 	struct mutex mutex; /* Protect changes to this struct */
 	struct mlx5_flow_table *pol;
@@ -27,12 +32,14 @@ struct mlx5e_ipsec_rx {
 	struct mlx5e_ipsec_miss pol;
 	struct mlx5e_ipsec_miss sa;
 	struct mlx5e_ipsec_rule status;
+	struct mlx5e_ipsec_fc *fc;
 };
 
 struct mlx5e_ipsec_tx {
 	struct mlx5e_ipsec_ft ft;
 	struct mlx5e_ipsec_miss pol;
 	struct mlx5_flow_namespace *ns;
+	struct mlx5e_ipsec_fc *fc;
 };
 
 /* IPsec RX flow steering */
@@ -93,9 +100,10 @@ static int ipsec_status_rule(struct mlx5_core_dev *mdev,
 
 	/* create fte */
 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
-			  MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+			  MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+			  MLX5_FLOW_CONTEXT_ACTION_COUNT;
 	flow_act.modify_hdr = modify_hdr;
-	fte = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 1);
+	fte = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2);
 	if (IS_ERR(fte)) {
 		err = PTR_ERR(fte);
 		mlx5_core_err(mdev, "fail to add ipsec rx err copy rule err=%d\n", err);
@@ -178,7 +186,7 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
 {
 	struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(ipsec->fs, false);
 	struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
-	struct mlx5_flow_destination dest;
+	struct mlx5_flow_destination dest[2];
 	struct mlx5_flow_table *ft;
 	int err;
 
@@ -189,8 +197,10 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
 
 	rx->ft.status = ft;
 
-	dest = mlx5_ttc_get_default_dest(ttc, family2tt(family));
-	err = ipsec_status_rule(mdev, rx, &dest);
+	dest[0] = mlx5_ttc_get_default_dest(ttc, family2tt(family));
+	dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+	dest[1].counter_id = mlx5_fc_id(rx->fc->cnt);
+	err = ipsec_status_rule(mdev, rx, dest);
 	if (err)
 		goto err_add;
 
@@ -203,7 +213,7 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
 	}
 	rx->ft.sa = ft;
 
-	err = ipsec_miss_create(mdev, rx->ft.sa, &rx->sa, &dest);
+	err = ipsec_miss_create(mdev, rx->ft.sa, &rx->sa, dest);
 	if (err)
 		goto err_fs;
 
@@ -214,10 +224,10 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
 		goto err_pol_ft;
 	}
 	rx->ft.pol = ft;
-	memset(&dest, 0x00, sizeof(dest));
-	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
-	dest.ft = rx->ft.sa;
-	err = ipsec_miss_create(mdev, rx->ft.pol, &rx->pol, &dest);
+	memset(dest, 0x00, 2 * sizeof(*dest));
+	dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+	dest[0].ft = rx->ft.sa;
+	err = ipsec_miss_create(mdev, rx->ft.pol, &rx->pol, dest);
 	if (err)
 		goto err_pol_miss;
 
@@ -605,6 +615,7 @@ static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
 	struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
 	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
+	struct mlx5_flow_destination dest = {};
 	struct mlx5_flow_act flow_act = {};
 	struct mlx5_flow_handle *rule;
 	struct mlx5_flow_spec *spec;
@@ -647,8 +658,11 @@ static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
 	flow_act.crypto.obj_id = sa_entry->ipsec_obj_id;
 	flow_act.flags |= FLOW_ACT_NO_APPEND;
 	flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW |
-			   MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT;
-	rule = mlx5_add_flow_rules(tx->ft.sa, spec, &flow_act, NULL, 0);
+			   MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT |
+			   MLX5_FLOW_CONTEXT_ACTION_COUNT;
+	dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+	dest.counter_id = mlx5_fc_id(tx->fc->cnt);
+	rule = mlx5_add_flow_rules(tx->ft.sa, spec, &flow_act, &dest, 1);
 	if (IS_ERR(rule)) {
 		err = PTR_ERR(rule);
 		mlx5_core_err(mdev, "fail to add TX ipsec rule err=%d\n", err);
@@ -674,12 +688,12 @@ static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
 {
 	struct mlx5_accel_pol_xfrm_attrs *attrs = &pol_entry->attrs;
 	struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry);
-	struct mlx5_flow_destination dest = {};
+	struct mlx5_flow_destination dest[2] = {};
 	struct mlx5_flow_act flow_act = {};
 	struct mlx5_flow_handle *rule;
 	struct mlx5_flow_spec *spec;
 	struct mlx5e_ipsec_tx *tx;
-	int err;
+	int err, dstn = 0;
 
 	tx = tx_ft_get(mdev, pol_entry->ipsec);
 	if (IS_ERR(tx))
@@ -703,7 +717,11 @@ static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
 		break;
 	case XFRM_POLICY_BLOCK:
-		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
+		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
+				   MLX5_FLOW_CONTEXT_ACTION_COUNT;
+		dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+		dest[dstn].counter_id = mlx5_fc_id(tx->fc->drop);
+		dstn++;
 		break;
 	default:
 		WARN_ON(true);
@@ -712,9 +730,10 @@ static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
 	}
 
 	flow_act.flags |= FLOW_ACT_NO_APPEND;
-	dest.ft = tx->ft.sa;
-	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
-	rule = mlx5_add_flow_rules(tx->ft.pol, spec, &flow_act, &dest, 1);
+	dest[dstn].ft = tx->ft.sa;
+	dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+	dstn++;
+	rule = mlx5_add_flow_rules(tx->ft.pol, spec, &flow_act, dest, dstn);
 	if (IS_ERR(rule)) {
 		err = PTR_ERR(rule);
 		mlx5_core_err(mdev, "fail to add TX ipsec rule err=%d\n", err);
@@ -736,12 +755,12 @@ static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
 {
 	struct mlx5_accel_pol_xfrm_attrs *attrs = &pol_entry->attrs;
 	struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry);
-	struct mlx5_flow_destination dest = {};
+	struct mlx5_flow_destination dest[2];
 	struct mlx5_flow_act flow_act = {};
 	struct mlx5_flow_handle *rule;
 	struct mlx5_flow_spec *spec;
 	struct mlx5e_ipsec_rx *rx;
-	int err;
+	int err, dstn = 0;
 
 	rx = rx_ft_get(mdev, pol_entry->ipsec, attrs->family);
 	if (IS_ERR(rx))
@@ -765,7 +784,10 @@ static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
 		break;
 	case XFRM_POLICY_BLOCK:
-		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
+		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
+		dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+		dest[dstn].counter_id = mlx5_fc_id(rx->fc->drop);
+		dstn++;
 		break;
 	default:
 		WARN_ON(true);
@@ -774,9 +796,10 @@ static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
 	}
 
 	flow_act.flags |= FLOW_ACT_NO_APPEND;
-	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
-	dest.ft = rx->ft.sa;
-	rule = mlx5_add_flow_rules(rx->ft.pol, spec, &flow_act, &dest, 1);
+	dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+	dest[dstn].ft = rx->ft.sa;
+	dstn++;
+	rule = mlx5_add_flow_rules(rx->ft.pol, spec, &flow_act, dest, dstn);
 	if (IS_ERR(rule)) {
 		err = PTR_ERR(rule);
 		mlx5_core_err(mdev, "Fail to add RX IPsec policy rule err=%d\n", err);
@@ -794,6 +817,116 @@ static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
 	return err;
 }
 
+static void ipsec_fs_destroy_counters(struct mlx5e_ipsec *ipsec)
+{
+	struct mlx5e_ipsec_rx *rx_ipv4 = ipsec->rx_ipv4;
+	struct mlx5_core_dev *mdev = ipsec->mdev;
+	struct mlx5e_ipsec_tx *tx = ipsec->tx;
+
+	mlx5_fc_destroy(mdev, tx->fc->drop);
+	mlx5_fc_destroy(mdev, tx->fc->cnt);
+	kfree(tx->fc);
+	mlx5_fc_destroy(mdev, rx_ipv4->fc->drop);
+	mlx5_fc_destroy(mdev, rx_ipv4->fc->cnt);
+	kfree(rx_ipv4->fc);
+}
+
+static int ipsec_fs_init_counters(struct mlx5e_ipsec *ipsec)
+{
+	struct mlx5e_ipsec_rx *rx_ipv4 = ipsec->rx_ipv4;
+	struct mlx5e_ipsec_rx *rx_ipv6 = ipsec->rx_ipv6;
+	struct mlx5_core_dev *mdev = ipsec->mdev;
+	struct mlx5e_ipsec_tx *tx = ipsec->tx;
+	struct mlx5e_ipsec_fc *fc;
+	struct mlx5_fc *counter;
+	int err;
+
+	fc = kzalloc(sizeof(*rx_ipv4->fc), GFP_KERNEL);
+	if (!fc)
+		return -ENOMEM;
+
+	/* Both IPv4 and IPv6 point to same flow counters struct. */
+	rx_ipv4->fc = fc;
+	rx_ipv6->fc = fc;
+	counter = mlx5_fc_create(mdev, false);
+	if (IS_ERR(counter)) {
+		err = PTR_ERR(counter);
+		goto err_rx_cnt;
+	}
+
+	fc->cnt = counter;
+	counter = mlx5_fc_create(mdev, false);
+	if (IS_ERR(counter)) {
+		err = PTR_ERR(counter);
+		goto err_rx_drop;
+	}
+
+	fc->drop = counter;
+	fc = kzalloc(sizeof(*tx->fc), GFP_KERNEL);
+	if (!fc) {
+		err = -ENOMEM;
+		goto err_tx_fc;
+	}
+
+	tx->fc = fc;
+	counter = mlx5_fc_create(mdev, false);
+	if (IS_ERR(counter)) {
+		err = PTR_ERR(counter);
+		goto err_tx_cnt;
+	}
+
+	fc->cnt = counter;
+	counter = mlx5_fc_create(mdev, false);
+	if (IS_ERR(counter)) {
+		err = PTR_ERR(counter);
+		goto err_tx_drop;
+	}
+
+	fc->drop = counter;
+	return 0;
+
+err_tx_drop:
+	mlx5_fc_destroy(mdev, tx->fc->cnt);
+err_tx_cnt:
+	kfree(tx->fc);
+err_tx_fc:
+	mlx5_fc_destroy(mdev, rx_ipv4->fc->drop);
+err_rx_drop:
+	mlx5_fc_destroy(mdev, rx_ipv4->fc->cnt);
+err_rx_cnt:
+	kfree(rx_ipv4->fc);
+	return err;
+}
+
+void mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv *priv, void *ipsec_stats)
+{
+	struct mlx5_core_dev *mdev = priv->mdev;
+	struct mlx5e_ipsec *ipsec = priv->ipsec;
+	struct mlx5e_ipsec_hw_stats *stats;
+	struct mlx5e_ipsec_fc *fc;
+
+	stats = (struct mlx5e_ipsec_hw_stats *)ipsec_stats;
+
+	stats->ipsec_rx_pkts = 0;
+	stats->ipsec_rx_bytes = 0;
+	stats->ipsec_rx_drop_pkts = 0;
+	stats->ipsec_rx_drop_bytes = 0;
+	stats->ipsec_tx_pkts = 0;
+	stats->ipsec_tx_bytes = 0;
+	stats->ipsec_tx_drop_pkts = 0;
+	stats->ipsec_tx_drop_bytes = 0;
+
+	fc = ipsec->rx_ipv4->fc;
+	mlx5_fc_query(mdev, fc->cnt, &stats->ipsec_rx_pkts, &stats->ipsec_rx_bytes);
+	mlx5_fc_query(mdev, fc->drop, &stats->ipsec_rx_drop_pkts,
+		      &stats->ipsec_rx_drop_bytes);
+
+	fc = ipsec->tx->fc;
+	mlx5_fc_query(mdev, fc->cnt, &stats->ipsec_tx_pkts, &stats->ipsec_tx_bytes);
+	mlx5_fc_query(mdev, fc->drop, &stats->ipsec_tx_drop_pkts,
+		      &stats->ipsec_tx_drop_bytes);
+}
+
 int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
 {
 	if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT)
@@ -848,6 +981,7 @@ void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec)
 	if (!ipsec->tx)
 		return;
 
+	ipsec_fs_destroy_counters(ipsec);
 	mutex_destroy(&ipsec->tx->ft.mutex);
 	WARN_ON(ipsec->tx->ft.refcnt);
 	kfree(ipsec->tx);
@@ -883,6 +1017,10 @@ int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec)
 	if (!ipsec->rx_ipv6)
 		goto err_rx_ipv6;
 
+	err = ipsec_fs_init_counters(ipsec);
+	if (err)
+		goto err_counters;
+
 	mutex_init(&ipsec->tx->ft.mutex);
 	mutex_init(&ipsec->rx_ipv4->ft.mutex);
 	mutex_init(&ipsec->rx_ipv6->ft.mutex);
@@ -890,6 +1028,8 @@ int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec)
 
 	return 0;
 
+err_counters:
+	kfree(ipsec->rx_ipv6);
 err_rx_ipv6:
 	kfree(ipsec->rx_ipv4);
 err_rx_ipv4:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
index 9de84821dafb..e0e36a09721c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
@@ -37,6 +37,17 @@
 #include "en.h"
 #include "ipsec.h"
 
+static const struct counter_desc mlx5e_ipsec_hw_stats_desc[] = {
+	{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_rx_pkts) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_rx_bytes) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_rx_drop_pkts) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_rx_drop_bytes) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_tx_pkts) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_tx_bytes) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_tx_drop_pkts) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_tx_drop_bytes) },
+};
+
 static const struct counter_desc mlx5e_ipsec_sw_stats_desc[] = {
 	{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_rx_drop_sp_alloc) },
 	{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_rx_drop_sadb_miss) },
@@ -50,8 +61,48 @@ static const struct counter_desc mlx5e_ipsec_sw_stats_desc[] = {
 #define MLX5E_READ_CTR_ATOMIC64(ptr, dsc, i) \
 	atomic64_read((atomic64_t *)((char *)(ptr) + (dsc)[i].offset))
 
+#define NUM_IPSEC_HW_COUNTERS ARRAY_SIZE(mlx5e_ipsec_hw_stats_desc)
 #define NUM_IPSEC_SW_COUNTERS ARRAY_SIZE(mlx5e_ipsec_sw_stats_desc)
 
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_hw)
+{
+	if (!priv->ipsec)
+		return 0;
+
+	return NUM_IPSEC_HW_COUNTERS;
+}
+
+static inline MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ipsec_hw) {}
+
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ipsec_hw)
+{
+	unsigned int i;
+
+	if (!priv->ipsec)
+		return idx;
+
+	for (i = 0; i < NUM_IPSEC_HW_COUNTERS; i++)
+		strcpy(data + (idx++) * ETH_GSTRING_LEN,
+		       mlx5e_ipsec_hw_stats_desc[i].format);
+
+	return idx;
+}
+
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_hw)
+{
+	int i;
+
+	if (!priv->ipsec)
+		return idx;
+
+	mlx5e_accel_ipsec_fs_read_stats(priv, &priv->ipsec->hw_stats);
+	for (i = 0; i < NUM_IPSEC_HW_COUNTERS; i++)
+		data[idx++] = MLX5E_READ_CTR_ATOMIC64(&priv->ipsec->hw_stats,
+						      mlx5e_ipsec_hw_stats_desc, i);
+
+	return idx;
+}
+
 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_sw)
 {
 	return priv->ipsec ? NUM_IPSEC_SW_COUNTERS : 0;
@@ -81,4 +132,5 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_sw)
 	return idx;
 }
 
+MLX5E_DEFINE_STATS_GRP(ipsec_hw, 0);
 MLX5E_DEFINE_STATS_GRP(ipsec_sw, 0);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 70c4ea3841d7..6687b8136e44 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -2480,6 +2480,7 @@ mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = {
 	&MLX5E_STATS_GRP(per_prio),
 	&MLX5E_STATS_GRP(pme),
 #ifdef CONFIG_MLX5_EN_IPSEC
+	&MLX5E_STATS_GRP(ipsec_hw),
 	&MLX5E_STATS_GRP(ipsec_sw),
 #endif
 	&MLX5E_STATS_GRP(tls),
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index cbc831ca646b..712cac10ba49 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -490,6 +490,7 @@ extern MLX5E_DECLARE_STATS_GRP(per_prio);
 extern MLX5E_DECLARE_STATS_GRP(pme);
 extern MLX5E_DECLARE_STATS_GRP(channels);
 extern MLX5E_DECLARE_STATS_GRP(per_port_buff_congest);
+extern MLX5E_DECLARE_STATS_GRP(ipsec_hw);
 extern MLX5E_DECLARE_STATS_GRP(ipsec_sw);
 extern MLX5E_DECLARE_STATS_GRP(ptp);
 extern MLX5E_DECLARE_STATS_GRP(macsec_hw);
-- 
2.38.1


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH xfrm-next 07/13] net/mlx5e: Skip IPsec encryption for TX path without matching policy
  2022-12-02 20:14 [PATCH xfrm-next 00/13] mlx5 IPsec packet offload support (Part I) Leon Romanovsky
                   ` (5 preceding siblings ...)
  2022-12-02 20:14 ` [PATCH xfrm-next 06/13] net/mlx5e: Add statistics for Rx/Tx IPsec offloaded flows Leon Romanovsky
@ 2022-12-02 20:14 ` Leon Romanovsky
  2022-12-02 20:14 ` [PATCH xfrm-next 08/13] net/mlx5e: Provide intermediate pointer to access IPsec struct Leon Romanovsky
                   ` (7 subsequent siblings)
  14 siblings, 0 replies; 16+ messages in thread
From: Leon Romanovsky @ 2022-12-02 20:14 UTC (permalink / raw)
  To: Steffen Klassert
  Cc: Leon Romanovsky, David S. Miller, Eric Dumazet, Herbert Xu,
	Jakub Kicinski, netdev, Bharat Bhushan, Raed Salem,
	Saeed Mahameed

From: Leon Romanovsky <leonro@nvidia.com>

Software implementation of IPsec skips encryption of packets in TX
path if no matching policy is found. So align HW implementation to
this behavior, by requiring matching reqid for offloaded policy and
SA.

Reviewed-by: Raed Salem <raeds@nvidia.com>
Reviewed-by: Saeed Mahameed <saeedm@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
 .../mellanox/mlx5/core/en_accel/ipsec.c       |  7 ++++
 .../mellanox/mlx5/core/en_accel/ipsec.h       |  4 +-
 .../mellanox/mlx5/core/en_accel/ipsec_fs.c    | 39 ++++++++++++++++---
 3 files changed, 43 insertions(+), 7 deletions(-)

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index 65f73a5c29ba..8f08dbf2206e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -184,6 +184,7 @@ mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
 	memcpy(&attrs->daddr, x->id.daddr.a6, sizeof(attrs->daddr));
 	attrs->family = x->props.family;
 	attrs->type = x->xso.type;
+	attrs->reqid = x->props.reqid;
 }
 
 static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
@@ -267,6 +268,11 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
 				    x->replay_esn->replay_window);
 			return -EINVAL;
 		}
+
+		if (!x->props.reqid) {
+			netdev_info(netdev, "Cannot offload without reqid\n");
+			return -EINVAL;
+		}
 	}
 	return 0;
 }
@@ -503,6 +509,7 @@ mlx5e_ipsec_build_accel_pol_attrs(struct mlx5e_ipsec_pol_entry *pol_entry,
 	attrs->dir = x->xdo.dir;
 	attrs->action = x->action;
 	attrs->type = XFRM_DEV_OFFLOAD_PACKET;
+	attrs->reqid = x->xfrm_vec[0].reqid;
 }
 
 static int mlx5e_xfrm_add_policy(struct xfrm_policy *x)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index 2e7654e90314..492be255d267 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -77,6 +77,7 @@ struct mlx5_accel_esp_xfrm_attrs {
 	u8 family;
 	u32 replay_window;
 	u32 authsize;
+	u32 reqid;
 };
 
 enum mlx5_ipsec_cap {
@@ -178,12 +179,13 @@ struct mlx5_accel_pol_xfrm_attrs {
 	u8 action;
 	u8 type : 2;
 	u8 dir : 2;
+	u32 reqid;
 };
 
 struct mlx5e_ipsec_pol_entry {
 	struct xfrm_policy *x;
 	struct mlx5e_ipsec *ipsec;
-	struct mlx5_flow_handle *rule;
+	struct mlx5e_ipsec_rule ipsec_rule;
 	struct mlx5_accel_pol_xfrm_attrs attrs;
 };
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index 8ba92b00afdb..9f19f4b59a70 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -456,6 +456,17 @@ static void setup_fte_reg_a(struct mlx5_flow_spec *spec)
 		 misc_parameters_2.metadata_reg_a, MLX5_ETH_WQE_FT_META_IPSEC);
 }
 
+static void setup_fte_reg_c0(struct mlx5_flow_spec *spec, u32 reqid)
+{
+	/* Pass policy check before choosing this SA */
+	spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
+
+	MLX5_SET(fte_match_param, spec->match_criteria,
+		 misc_parameters_2.metadata_reg_c_0, reqid);
+	MLX5_SET(fte_match_param, spec->match_value,
+		 misc_parameters_2.metadata_reg_c_0, reqid);
+}
+
 static int setup_modify_header(struct mlx5_core_dev *mdev, u32 val, u8 dir,
 			       struct mlx5_flow_act *flow_act)
 {
@@ -470,6 +481,11 @@ static int setup_modify_header(struct mlx5_core_dev *mdev, u32 val, u8 dir,
 			 MLX5_ACTION_IN_FIELD_METADATA_REG_B);
 		ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
 		break;
+	case XFRM_DEV_OFFLOAD_OUT:
+		MLX5_SET(set_action_in, action, field,
+			 MLX5_ACTION_IN_FIELD_METADATA_REG_C_0);
+		ns_type = MLX5_FLOW_NAMESPACE_EGRESS;
+		break;
 	default:
 		return -EINVAL;
 	}
@@ -646,6 +662,7 @@ static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
 		setup_fte_reg_a(spec);
 		break;
 	case XFRM_DEV_OFFLOAD_PACKET:
+		setup_fte_reg_c0(spec, attrs->reqid);
 		err = setup_pkt_reformat(mdev, attrs, &flow_act);
 		if (err)
 			goto err_pkt_reformat;
@@ -712,6 +729,11 @@ static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
 
 	setup_fte_no_frags(spec);
 
+	err = setup_modify_header(mdev, attrs->reqid, XFRM_DEV_OFFLOAD_OUT,
+				  &flow_act);
+	if (err)
+		goto err_mod_header;
+
 	switch (attrs->action) {
 	case XFRM_POLICY_ALLOW:
 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
@@ -741,10 +763,13 @@ static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
 	}
 
 	kvfree(spec);
-	pol_entry->rule = rule;
+	pol_entry->ipsec_rule.rule = rule;
+	pol_entry->ipsec_rule.modify_hdr = flow_act.modify_hdr;
 	return 0;
 
 err_action:
+	mlx5_modify_header_dealloc(mdev, flow_act.modify_hdr);
+err_mod_header:
 	kvfree(spec);
 err_alloc:
 	tx_ft_put(pol_entry->ipsec);
@@ -807,7 +832,7 @@ static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
 	}
 
 	kvfree(spec);
-	pol_entry->rule = rule;
+	pol_entry->ipsec_rule.rule = rule;
 	return 0;
 
 err_action:
@@ -964,16 +989,18 @@ int mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry *pol_entry)
 
 void mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry *pol_entry)
 {
+	struct mlx5e_ipsec_rule *ipsec_rule = &pol_entry->ipsec_rule;
 	struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry);
 
-	mlx5_del_flow_rules(pol_entry->rule);
+	mlx5_del_flow_rules(ipsec_rule->rule);
 
-	if (pol_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT) {
-		tx_ft_put(pol_entry->ipsec);
+	if (pol_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) {
+		rx_ft_put(mdev, pol_entry->ipsec, pol_entry->attrs.family);
 		return;
 	}
 
-	rx_ft_put(mdev, pol_entry->ipsec, pol_entry->attrs.family);
+	mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr);
+	tx_ft_put(pol_entry->ipsec);
 }
 
 void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec)
-- 
2.38.1


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH xfrm-next 08/13] net/mlx5e: Provide intermediate pointer to access IPsec struct
  2022-12-02 20:14 [PATCH xfrm-next 00/13] mlx5 IPsec packet offload support (Part I) Leon Romanovsky
                   ` (6 preceding siblings ...)
  2022-12-02 20:14 ` [PATCH xfrm-next 07/13] net/mlx5e: Skip IPsec encryption for TX path without matching policy Leon Romanovsky
@ 2022-12-02 20:14 ` Leon Romanovsky
  2022-12-02 20:14 ` [PATCH xfrm-next 09/13] net/mlx5e: Store all XFRM SAs in Xarray Leon Romanovsky
                   ` (6 subsequent siblings)
  14 siblings, 0 replies; 16+ messages in thread
From: Leon Romanovsky @ 2022-12-02 20:14 UTC (permalink / raw)
  To: Steffen Klassert
  Cc: Leon Romanovsky, David S. Miller, Eric Dumazet, Herbert Xu,
	Jakub Kicinski, netdev, Bharat Bhushan, Saeed Mahameed

From: Leon Romanovsky <leonro@nvidia.com>

Improve readability by providing direct pointer to struct mlx5e_ipsec.

Reviewed-by: Saeed Mahameed <saeedm@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
 .../mellanox/mlx5/core/en_accel/ipsec_rxtx.c         | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
index 6859f1c1a831..9f07e58f7737 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
@@ -312,23 +312,23 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
 				       struct mlx5_cqe64 *cqe)
 {
 	u32 ipsec_meta_data = be32_to_cpu(cqe->ft_metadata);
-	struct mlx5e_priv *priv;
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+	struct mlx5e_ipsec *ipsec = priv->ipsec;
 	struct xfrm_offload *xo;
 	struct xfrm_state *xs;
 	struct sec_path *sp;
 	u32  sa_handle;
 
 	sa_handle = MLX5_IPSEC_METADATA_HANDLE(ipsec_meta_data);
-	priv = netdev_priv(netdev);
 	sp = secpath_set(skb);
 	if (unlikely(!sp)) {
-		atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sp_alloc);
+		atomic64_inc(&ipsec->sw_stats.ipsec_rx_drop_sp_alloc);
 		return;
 	}
 
-	xs = mlx5e_ipsec_sadb_rx_lookup(priv->ipsec, sa_handle);
+	xs = mlx5e_ipsec_sadb_rx_lookup(ipsec, sa_handle);
 	if (unlikely(!xs)) {
-		atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sadb_miss);
+		atomic64_inc(&ipsec->sw_stats.ipsec_rx_drop_sadb_miss);
 		return;
 	}
 
@@ -349,6 +349,6 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
 		xo->status = CRYPTO_INVALID_PACKET_SYNTAX;
 		break;
 	default:
-		atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_syndrome);
+		atomic64_inc(&ipsec->sw_stats.ipsec_rx_drop_syndrome);
 	}
 }
-- 
2.38.1


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH xfrm-next 09/13] net/mlx5e: Store all XFRM SAs in Xarray
  2022-12-02 20:14 [PATCH xfrm-next 00/13] mlx5 IPsec packet offload support (Part I) Leon Romanovsky
                   ` (7 preceding siblings ...)
  2022-12-02 20:14 ` [PATCH xfrm-next 08/13] net/mlx5e: Provide intermediate pointer to access IPsec struct Leon Romanovsky
@ 2022-12-02 20:14 ` Leon Romanovsky
  2022-12-02 20:14 ` [PATCH xfrm-next 10/13] net/mlx5e: Update IPsec soft and hard limits Leon Romanovsky
                   ` (5 subsequent siblings)
  14 siblings, 0 replies; 16+ messages in thread
From: Leon Romanovsky @ 2022-12-02 20:14 UTC (permalink / raw)
  To: Steffen Klassert
  Cc: Leon Romanovsky, David S. Miller, Eric Dumazet, Herbert Xu,
	Jakub Kicinski, netdev, Bharat Bhushan

From: Leon Romanovsky <leonro@nvidia.com>

Instead of performing custom hash calculations, rely on FW that returns
unique identifier to every created SA. That identifier is Xarray ready,
which provides better semantic with efficient access.

In addition, store both TX and RX SAs to allow correlation between event
generated by HW when limits are armed and XFRM states.

Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
 .../mellanox/mlx5/core/en_accel/ipsec.c       | 82 +++++--------------
 .../mellanox/mlx5/core/en_accel/ipsec.h       |  8 +-
 .../mellanox/mlx5/core/en_accel/ipsec_rxtx.c  | 12 ++-
 3 files changed, 28 insertions(+), 74 deletions(-)

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index 8f08dbf2206e..fe10f1a2a04a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -50,57 +50,6 @@ static struct mlx5e_ipsec_pol_entry *to_ipsec_pol_entry(struct xfrm_policy *x)
 	return (struct mlx5e_ipsec_pol_entry *)x->xdo.offload_handle;
 }
 
-struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *ipsec,
-					      unsigned int handle)
-{
-	struct mlx5e_ipsec_sa_entry *sa_entry;
-	struct xfrm_state *ret = NULL;
-
-	rcu_read_lock();
-	hash_for_each_possible_rcu(ipsec->sadb_rx, sa_entry, hlist, handle)
-		if (sa_entry->handle == handle) {
-			ret = sa_entry->x;
-			xfrm_state_hold(ret);
-			break;
-		}
-	rcu_read_unlock();
-
-	return ret;
-}
-
-static int mlx5e_ipsec_sadb_rx_add(struct mlx5e_ipsec_sa_entry *sa_entry)
-{
-	unsigned int handle = sa_entry->ipsec_obj_id;
-	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
-	struct mlx5e_ipsec_sa_entry *_sa_entry;
-	unsigned long flags;
-
-	rcu_read_lock();
-	hash_for_each_possible_rcu(ipsec->sadb_rx, _sa_entry, hlist, handle)
-		if (_sa_entry->handle == handle) {
-			rcu_read_unlock();
-			return  -EEXIST;
-		}
-	rcu_read_unlock();
-
-	spin_lock_irqsave(&ipsec->sadb_rx_lock, flags);
-	sa_entry->handle = handle;
-	hash_add_rcu(ipsec->sadb_rx, &sa_entry->hlist, sa_entry->handle);
-	spin_unlock_irqrestore(&ipsec->sadb_rx_lock, flags);
-
-	return 0;
-}
-
-static void mlx5e_ipsec_sadb_rx_del(struct mlx5e_ipsec_sa_entry *sa_entry)
-{
-	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
-	unsigned long flags;
-
-	spin_lock_irqsave(&ipsec->sadb_rx_lock, flags);
-	hash_del_rcu(&sa_entry->hlist);
-	spin_unlock_irqrestore(&ipsec->sadb_rx_lock, flags);
-}
-
 static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
 {
 	struct xfrm_replay_state_esn *replay_esn;
@@ -291,6 +240,7 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
 {
 	struct mlx5e_ipsec_sa_entry *sa_entry = NULL;
 	struct net_device *netdev = x->xso.real_dev;
+	struct mlx5e_ipsec *ipsec;
 	struct mlx5e_priv *priv;
 	int err;
 
@@ -298,6 +248,7 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
 	if (!priv->ipsec)
 		return -EOPNOTSUPP;
 
+	ipsec = priv->ipsec;
 	err = mlx5e_xfrm_validate_state(x);
 	if (err)
 		return err;
@@ -309,7 +260,7 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
 	}
 
 	sa_entry->x = x;
-	sa_entry->ipsec = priv->ipsec;
+	sa_entry->ipsec = ipsec;
 
 	/* check esn */
 	mlx5e_ipsec_update_esn_state(sa_entry);
@@ -324,18 +275,22 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
 	if (err)
 		goto err_hw_ctx;
 
-	if (x->xso.dir == XFRM_DEV_OFFLOAD_IN) {
-		err = mlx5e_ipsec_sadb_rx_add(sa_entry);
-		if (err)
-			goto err_add_rule;
-	} else {
+	/* We use *_bh() variant because xfrm_timer_handler(), which runs
+	 * in softirq context, can reach our state delete logic and we need
+	 * xa_erase_bh() there.
+	 */
+	err = xa_insert_bh(&ipsec->sadb, sa_entry->ipsec_obj_id, sa_entry,
+			   GFP_KERNEL);
+	if (err)
+		goto err_add_rule;
+
+	if (x->xso.dir == XFRM_DEV_OFFLOAD_OUT)
 		sa_entry->set_iv_op = (x->props.flags & XFRM_STATE_ESN) ?
 				mlx5e_ipsec_set_iv_esn : mlx5e_ipsec_set_iv;
-	}
 
 	INIT_WORK(&sa_entry->modify_work.work, _update_xfrm_state);
 	x->xso.offload_handle = (unsigned long)sa_entry;
-	goto out;
+	return 0;
 
 err_add_rule:
 	mlx5e_accel_ipsec_fs_del_rule(sa_entry);
@@ -350,9 +305,11 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
 static void mlx5e_xfrm_del_state(struct xfrm_state *x)
 {
 	struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
+	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
+	struct mlx5e_ipsec_sa_entry *old;
 
-	if (x->xso.dir == XFRM_DEV_OFFLOAD_IN)
-		mlx5e_ipsec_sadb_rx_del(sa_entry);
+	old = xa_erase_bh(&ipsec->sadb, sa_entry->ipsec_obj_id);
+	WARN_ON(old != sa_entry);
 }
 
 static void mlx5e_xfrm_free_state(struct xfrm_state *x)
@@ -379,8 +336,7 @@ void mlx5e_ipsec_init(struct mlx5e_priv *priv)
 	if (!ipsec)
 		return;
 
-	hash_init(ipsec->sadb_rx);
-	spin_lock_init(&ipsec->sadb_rx_lock);
+	xa_init_flags(&ipsec->sadb, XA_FLAGS_ALLOC);
 	ipsec->mdev = priv->mdev;
 	ipsec->wq = alloc_ordered_workqueue("mlx5e_ipsec: %s", 0,
 					    priv->netdev->name);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index 492be255d267..724f2df14a97 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -120,8 +120,7 @@ struct mlx5e_ipsec_aso {
 
 struct mlx5e_ipsec {
 	struct mlx5_core_dev *mdev;
-	DECLARE_HASHTABLE(sadb_rx, MLX5E_IPSEC_SADB_RX_BITS);
-	spinlock_t sadb_rx_lock; /* Protects sadb_rx */
+	struct xarray sadb;
 	struct mlx5e_ipsec_sw_stats sw_stats;
 	struct mlx5e_ipsec_hw_stats hw_stats;
 	struct workqueue_struct *wq;
@@ -150,9 +149,7 @@ struct mlx5e_ipsec_modify_state_work {
 };
 
 struct mlx5e_ipsec_sa_entry {
-	struct hlist_node hlist; /* Item in SADB_RX hashtable */
 	struct mlx5e_ipsec_esn_state esn_state;
-	unsigned int handle; /* Handle in SADB_RX */
 	struct xfrm_state *x;
 	struct mlx5e_ipsec *ipsec;
 	struct mlx5_accel_esp_xfrm_attrs attrs;
@@ -193,9 +190,6 @@ void mlx5e_ipsec_init(struct mlx5e_priv *priv);
 void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv);
 void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv);
 
-struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *dev,
-					      unsigned int handle);
-
 void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec);
 int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec);
 int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
index 9f07e58f7737..eab5bc718771 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
@@ -314,8 +314,8 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
 	u32 ipsec_meta_data = be32_to_cpu(cqe->ft_metadata);
 	struct mlx5e_priv *priv = netdev_priv(netdev);
 	struct mlx5e_ipsec *ipsec = priv->ipsec;
+	struct mlx5e_ipsec_sa_entry *sa_entry;
 	struct xfrm_offload *xo;
-	struct xfrm_state *xs;
 	struct sec_path *sp;
 	u32  sa_handle;
 
@@ -326,13 +326,17 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
 		return;
 	}
 
-	xs = mlx5e_ipsec_sadb_rx_lookup(ipsec, sa_handle);
-	if (unlikely(!xs)) {
+	rcu_read_lock();
+	sa_entry = xa_load(&ipsec->sadb, sa_handle);
+	if (unlikely(!sa_entry)) {
+		rcu_read_unlock();
 		atomic64_inc(&ipsec->sw_stats.ipsec_rx_drop_sadb_miss);
 		return;
 	}
+	xfrm_state_hold(sa_entry->x);
+	rcu_read_unlock();
 
-	sp->xvec[sp->len++] = xs;
+	sp->xvec[sp->len++] = sa_entry->x;
 	sp->olen++;
 
 	xo = xfrm_offload(skb);
-- 
2.38.1


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH xfrm-next 10/13] net/mlx5e: Update IPsec soft and hard limits
  2022-12-02 20:14 [PATCH xfrm-next 00/13] mlx5 IPsec packet offload support (Part I) Leon Romanovsky
                   ` (8 preceding siblings ...)
  2022-12-02 20:14 ` [PATCH xfrm-next 09/13] net/mlx5e: Store all XFRM SAs in Xarray Leon Romanovsky
@ 2022-12-02 20:14 ` Leon Romanovsky
  2022-12-02 20:14 ` [PATCH xfrm-next 11/13] net/mlx5e: Handle hardware IPsec limits events Leon Romanovsky
                   ` (4 subsequent siblings)
  14 siblings, 0 replies; 16+ messages in thread
From: Leon Romanovsky @ 2022-12-02 20:14 UTC (permalink / raw)
  To: Steffen Klassert
  Cc: Leon Romanovsky, David S. Miller, Eric Dumazet, Herbert Xu,
	Jakub Kicinski, netdev, Bharat Bhushan

From: Leon Romanovsky <leonro@nvidia.com>

Implement mlx5 IPsec callback to update current lifetime counters.

Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
 .../mellanox/mlx5/core/en_accel/ipsec.c       | 63 +++++++++++++++++++
 .../mellanox/mlx5/core/en_accel/ipsec.h       |  6 ++
 .../mlx5/core/en_accel/ipsec_offload.c        | 57 +++++++++++++++++
 .../net/ethernet/mellanox/mlx5/core/lib/aso.h |  1 +
 4 files changed, 127 insertions(+)

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index fe10f1a2a04a..8d0c605d4cdb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -83,6 +83,31 @@ static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
 	return false;
 }
 
+static void mlx5e_ipsec_init_limits(struct mlx5e_ipsec_sa_entry *sa_entry,
+				    struct mlx5_accel_esp_xfrm_attrs *attrs)
+{
+	struct xfrm_state *x = sa_entry->x;
+
+	attrs->hard_packet_limit = x->lft.hard_packet_limit;
+	if (x->lft.soft_packet_limit == XFRM_INF)
+		return;
+
+	/* Hardware decrements hard_packet_limit counter through
+	 * the operation. While fires an event when soft_packet_limit
+	 * is reached. It emans that we need substitute the numbers
+	 * in order to properly count soft limit.
+	 *
+	 * As an example:
+	 * XFRM user sets soft limit is 2 and hard limit is 9 and
+	 * expects to see soft event after 2 packets and hard event
+	 * after 9 packets. In our case, the hard limit will be set
+	 * to 9 and soft limit is comparator to 7 so user gets the
+	 * soft event after 2 packeta
+	 */
+	attrs->soft_packet_limit =
+		x->lft.hard_packet_limit - x->lft.soft_packet_limit;
+}
+
 static void
 mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
 				   struct mlx5_accel_esp_xfrm_attrs *attrs)
@@ -134,6 +159,8 @@ mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
 	attrs->family = x->props.family;
 	attrs->type = x->xso.type;
 	attrs->reqid = x->props.reqid;
+
+	mlx5e_ipsec_init_limits(sa_entry, attrs);
 }
 
 static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
@@ -222,6 +249,21 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
 			netdev_info(netdev, "Cannot offload without reqid\n");
 			return -EINVAL;
 		}
+
+		if (x->lft.hard_byte_limit != XFRM_INF ||
+		    x->lft.soft_byte_limit != XFRM_INF) {
+			netdev_info(netdev,
+				    "Device doesn't support limits in bytes\n");
+			return -EINVAL;
+		}
+
+		if (x->lft.soft_packet_limit >= x->lft.hard_packet_limit &&
+		    x->lft.hard_packet_limit != XFRM_INF) {
+			/* XFRM stack doesn't prevent such configuration :(. */
+			netdev_info(netdev,
+				    "Hard packet limit must be greater than soft one\n");
+			return -EINVAL;
+		}
 	}
 	return 0;
 }
@@ -415,6 +457,26 @@ static void mlx5e_xfrm_advance_esn_state(struct xfrm_state *x)
 	queue_work(sa_entry->ipsec->wq, &modify_work->work);
 }
 
+static void mlx5e_xfrm_update_curlft(struct xfrm_state *x)
+{
+	struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
+	int err;
+
+	lockdep_assert_held(&x->lock);
+
+	if (sa_entry->attrs.soft_packet_limit == XFRM_INF)
+		/* Limits are not configured, as soft limit
+		 * must be lowever than hard limit.
+		 */
+		return;
+
+	err = mlx5e_ipsec_aso_query(sa_entry);
+	if (err)
+		return;
+
+	mlx5e_ipsec_aso_update_curlft(sa_entry, &x->curlft.packets);
+}
+
 static int mlx5e_xfrm_validate_policy(struct xfrm_policy *x)
 {
 	struct net_device *netdev = x->xdo.real_dev;
@@ -526,6 +588,7 @@ static const struct xfrmdev_ops mlx5e_ipsec_packet_xfrmdev_ops = {
 	.xdo_dev_offload_ok	= mlx5e_ipsec_offload_ok,
 	.xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state,
 
+	.xdo_dev_state_update_curlft = mlx5e_xfrm_update_curlft,
 	.xdo_dev_policy_add = mlx5e_xfrm_add_policy,
 	.xdo_dev_policy_free = mlx5e_xfrm_free_policy,
 };
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index 724f2df14a97..aac1e6a83631 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -78,6 +78,8 @@ struct mlx5_accel_esp_xfrm_attrs {
 	u32 replay_window;
 	u32 authsize;
 	u32 reqid;
+	u64 hard_packet_limit;
+	u64 soft_packet_limit;
 };
 
 enum mlx5_ipsec_cap {
@@ -208,6 +210,10 @@ void mlx5_accel_esp_modify_xfrm(struct mlx5e_ipsec_sa_entry *sa_entry,
 int mlx5e_ipsec_aso_init(struct mlx5e_ipsec *ipsec);
 void mlx5e_ipsec_aso_cleanup(struct mlx5e_ipsec *ipsec);
 
+int mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry *sa_entry);
+void mlx5e_ipsec_aso_update_curlft(struct mlx5e_ipsec_sa_entry *sa_entry,
+				   u64 *packets);
+
 void mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv *priv,
 				     void *ipsec_stats);
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
index fc88454aaf8d..8790558ea859 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
@@ -83,6 +83,20 @@ static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn,
 	MLX5_SET(ipsec_obj, obj, aso_return_reg, MLX5_IPSEC_ASO_REG_C_4_5);
 	if (attrs->dir == XFRM_DEV_OFFLOAD_OUT)
 		MLX5_SET(ipsec_aso, aso_ctx, mode, MLX5_IPSEC_ASO_INC_SN);
+
+	if (attrs->hard_packet_limit != XFRM_INF) {
+		MLX5_SET(ipsec_aso, aso_ctx, remove_flow_pkt_cnt,
+			 lower_32_bits(attrs->hard_packet_limit));
+		MLX5_SET(ipsec_aso, aso_ctx, hard_lft_arm, 1);
+		MLX5_SET(ipsec_aso, aso_ctx, remove_flow_enable, 1);
+	}
+
+	if (attrs->soft_packet_limit != XFRM_INF) {
+		MLX5_SET(ipsec_aso, aso_ctx, remove_flow_soft_lft,
+			 lower_32_bits(attrs->soft_packet_limit));
+
+		MLX5_SET(ipsec_aso, aso_ctx, soft_lft_arm, 1);
+	}
 }
 
 static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
@@ -298,3 +312,46 @@ void mlx5e_ipsec_aso_cleanup(struct mlx5e_ipsec *ipsec)
 			 DMA_BIDIRECTIONAL);
 	kfree(aso);
 }
+
+int mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry *sa_entry)
+{
+	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
+	struct mlx5e_ipsec_aso *aso = ipsec->aso;
+	struct mlx5_core_dev *mdev = ipsec->mdev;
+	struct mlx5_wqe_aso_ctrl_seg *ctrl;
+	struct mlx5e_hw_objs *res;
+	struct mlx5_aso_wqe *wqe;
+	u8 ds_cnt;
+
+	res = &mdev->mlx5e_res.hw_objs;
+
+	memset(aso->ctx, 0, sizeof(aso->ctx));
+	wqe = mlx5_aso_get_wqe(aso->aso);
+	ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
+	mlx5_aso_build_wqe(aso->aso, ds_cnt, wqe, sa_entry->ipsec_obj_id,
+			   MLX5_ACCESS_ASO_OPC_MOD_IPSEC);
+
+	ctrl = &wqe->aso_ctrl;
+	ctrl->va_l =
+		cpu_to_be32(lower_32_bits(aso->dma_addr) | ASO_CTRL_READ_EN);
+	ctrl->va_h = cpu_to_be32(upper_32_bits(aso->dma_addr));
+	ctrl->l_key = cpu_to_be32(res->mkey);
+
+	mlx5_aso_post_wqe(aso->aso, false, &wqe->ctrl);
+	return mlx5_aso_poll_cq(aso->aso, false);
+}
+
+void mlx5e_ipsec_aso_update_curlft(struct mlx5e_ipsec_sa_entry *sa_entry,
+				   u64 *packets)
+{
+	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
+	struct mlx5e_ipsec_aso *aso = ipsec->aso;
+	u64 hard_cnt;
+
+	hard_cnt = MLX5_GET(ipsec_aso, aso->ctx, remove_flow_pkt_cnt);
+	/* HW decresases the limit till it reaches zero to fire an avent.
+	 * We need to fix the calculations, so the returned count is a total
+	 * number of passed packets and not how much left.
+	 */
+	*packets = sa_entry->attrs.hard_packet_limit - hard_cnt;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h
index c8fc3c838642..afb078bbb8ef 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h
@@ -15,6 +15,7 @@
 #define MLX5_WQE_CTRL_WQE_OPC_MOD_SHIFT 24
 #define MLX5_MACSEC_ASO_DS_CNT (DIV_ROUND_UP(sizeof(struct mlx5_aso_wqe), MLX5_SEND_WQE_DS))
 
+#define ASO_CTRL_READ_EN BIT(0)
 struct mlx5_wqe_aso_ctrl_seg {
 	__be32  va_h;
 	__be32  va_l; /* include read_enable */
-- 
2.38.1


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH xfrm-next 11/13] net/mlx5e: Handle hardware IPsec limits events
  2022-12-02 20:14 [PATCH xfrm-next 00/13] mlx5 IPsec packet offload support (Part I) Leon Romanovsky
                   ` (9 preceding siblings ...)
  2022-12-02 20:14 ` [PATCH xfrm-next 10/13] net/mlx5e: Update IPsec soft and hard limits Leon Romanovsky
@ 2022-12-02 20:14 ` Leon Romanovsky
  2022-12-02 20:14 ` [PATCH xfrm-next 12/13] net/mlx5e: Handle ESN update events Leon Romanovsky
                   ` (3 subsequent siblings)
  14 siblings, 0 replies; 16+ messages in thread
From: Leon Romanovsky @ 2022-12-02 20:14 UTC (permalink / raw)
  To: Steffen Klassert
  Cc: Leon Romanovsky, David S. Miller, Eric Dumazet, Herbert Xu,
	Jakub Kicinski, netdev, Bharat Bhushan

From: Leon Romanovsky <leonro@nvidia.com>

Enable object changed event to signal IPsec about hitting
soft and hard limits.

Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
 .../mellanox/mlx5/core/en_accel/ipsec.c       |  2 +-
 .../mellanox/mlx5/core/en_accel/ipsec.h       | 19 +++-
 .../mlx5/core/en_accel/ipsec_offload.c        | 97 ++++++++++++++++++-
 drivers/net/ethernet/mellanox/mlx5/core/eq.c  |  5 +
 4 files changed, 118 insertions(+), 5 deletions(-)

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index 8d0c605d4cdb..4f176bd8395a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -470,7 +470,7 @@ static void mlx5e_xfrm_update_curlft(struct xfrm_state *x)
 		 */
 		return;
 
-	err = mlx5e_ipsec_aso_query(sa_entry);
+	err = mlx5e_ipsec_aso_query(sa_entry, NULL);
 	if (err)
 		return;
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index aac1e6a83631..e7f21e449268 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -34,8 +34,6 @@
 #ifndef __MLX5E_IPSEC_H__
 #define __MLX5E_IPSEC_H__
 
-#ifdef CONFIG_MLX5_EN_IPSEC
-
 #include <linux/mlx5/device.h>
 #include <net/xfrm.h>
 #include <linux/idr.h>
@@ -114,10 +112,21 @@ struct mlx5e_ipsec_sw_stats {
 struct mlx5e_ipsec_rx;
 struct mlx5e_ipsec_tx;
 
+struct mlx5e_ipsec_work {
+	struct work_struct work;
+	struct mlx5e_ipsec *ipsec;
+	u32 id;
+};
+
 struct mlx5e_ipsec_aso {
 	u8 ctx[MLX5_ST_SZ_BYTES(ipsec_aso)];
 	dma_addr_t dma_addr;
 	struct mlx5_aso *aso;
+	/* IPsec ASO caches data on every query call,
+	 * so in nested calls, we can use this boolean to save
+	 * recursive calls to mlx5e_ipsec_aso_query()
+	 */
+	u8 use_cache : 1;
 };
 
 struct mlx5e_ipsec {
@@ -131,6 +140,7 @@ struct mlx5e_ipsec {
 	struct mlx5e_ipsec_rx *rx_ipv6;
 	struct mlx5e_ipsec_tx *tx;
 	struct mlx5e_ipsec_aso *aso;
+	struct notifier_block nb;
 };
 
 struct mlx5e_ipsec_esn_state {
@@ -188,6 +198,8 @@ struct mlx5e_ipsec_pol_entry {
 	struct mlx5_accel_pol_xfrm_attrs attrs;
 };
 
+#ifdef CONFIG_MLX5_EN_IPSEC
+
 void mlx5e_ipsec_init(struct mlx5e_priv *priv);
 void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv);
 void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv);
@@ -210,7 +222,8 @@ void mlx5_accel_esp_modify_xfrm(struct mlx5e_ipsec_sa_entry *sa_entry,
 int mlx5e_ipsec_aso_init(struct mlx5e_ipsec *ipsec);
 void mlx5e_ipsec_aso_cleanup(struct mlx5e_ipsec *ipsec);
 
-int mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry *sa_entry);
+int mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry *sa_entry,
+			  struct mlx5_wqe_aso_ctrl_seg *data);
 void mlx5e_ipsec_aso_update_curlft(struct mlx5e_ipsec_sa_entry *sa_entry,
 				   u64 *packets);
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
index 8790558ea859..1b5014ffa257 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
@@ -260,6 +260,73 @@ void mlx5_accel_esp_modify_xfrm(struct mlx5e_ipsec_sa_entry *sa_entry,
 	memcpy(&sa_entry->attrs, attrs, sizeof(sa_entry->attrs));
 }
 
+static void mlx5e_ipsec_handle_event(struct work_struct *_work)
+{
+	struct mlx5e_ipsec_work *work =
+		container_of(_work, struct mlx5e_ipsec_work, work);
+	struct mlx5_accel_esp_xfrm_attrs *attrs;
+	struct mlx5e_ipsec_sa_entry *sa_entry;
+	struct mlx5e_ipsec_aso *aso;
+	struct mlx5e_ipsec *ipsec;
+	int ret;
+
+	sa_entry = xa_load(&work->ipsec->sadb, work->id);
+	if (!sa_entry)
+		goto out;
+
+	ipsec = sa_entry->ipsec;
+	aso = ipsec->aso;
+	attrs = &sa_entry->attrs;
+
+	spin_lock(&sa_entry->x->lock);
+	ret = mlx5e_ipsec_aso_query(sa_entry, NULL);
+	if (ret)
+		goto unlock;
+
+	aso->use_cache = true;
+	if (attrs->soft_packet_limit != XFRM_INF)
+		if (!MLX5_GET(ipsec_aso, aso->ctx, soft_lft_arm) ||
+		    !MLX5_GET(ipsec_aso, aso->ctx, hard_lft_arm) ||
+		    !MLX5_GET(ipsec_aso, aso->ctx, remove_flow_enable))
+			xfrm_state_check_expire(sa_entry->x);
+	aso->use_cache = false;
+
+unlock:
+	spin_unlock(&sa_entry->x->lock);
+out:
+	kfree(work);
+}
+
+static int mlx5e_ipsec_event(struct notifier_block *nb, unsigned long event,
+			     void *data)
+{
+	struct mlx5e_ipsec *ipsec = container_of(nb, struct mlx5e_ipsec, nb);
+	struct mlx5_eqe_obj_change *object;
+	struct mlx5e_ipsec_work *work;
+	struct mlx5_eqe *eqe = data;
+	u16 type;
+
+	if (event != MLX5_EVENT_TYPE_OBJECT_CHANGE)
+		return NOTIFY_DONE;
+
+	object = &eqe->data.obj_change;
+	type = be16_to_cpu(object->obj_type);
+
+	if (type != MLX5_GENERAL_OBJECT_TYPES_IPSEC)
+		return NOTIFY_DONE;
+
+	work = kmalloc(sizeof(*work), GFP_ATOMIC);
+	if (!work)
+		return NOTIFY_DONE;
+
+	INIT_WORK(&work->work, mlx5e_ipsec_handle_event);
+	work->ipsec = ipsec;
+	work->id = be32_to_cpu(object->obj_id);
+
+	queue_work(ipsec->wq, &work->work);
+	return NOTIFY_OK;
+}
+
 int mlx5e_ipsec_aso_init(struct mlx5e_ipsec *ipsec)
 {
 	struct mlx5_core_dev *mdev = ipsec->mdev;
@@ -287,6 +354,9 @@ int mlx5e_ipsec_aso_init(struct mlx5e_ipsec *ipsec)
 		goto err_aso_create;
 	}
 
+	ipsec->nb.notifier_call = mlx5e_ipsec_event;
+	mlx5_notifier_register(mdev, &ipsec->nb);
+
 	ipsec->aso = aso;
 	return 0;
 
@@ -307,13 +377,33 @@ void mlx5e_ipsec_aso_cleanup(struct mlx5e_ipsec *ipsec)
 	aso = ipsec->aso;
 	pdev = mlx5_core_dma_dev(mdev);
 
+	mlx5_notifier_unregister(mdev, &ipsec->nb);
 	mlx5_aso_destroy(aso->aso);
 	dma_unmap_single(pdev, aso->dma_addr, sizeof(aso->ctx),
 			 DMA_BIDIRECTIONAL);
 	kfree(aso);
 }
 
-int mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry *sa_entry)
+static void mlx5e_ipsec_aso_copy(struct mlx5_wqe_aso_ctrl_seg *ctrl,
+				 struct mlx5_wqe_aso_ctrl_seg *data)
+{
+	if (!data)
+		return;
+
+	ctrl->data_mask_mode = data->data_mask_mode;
+	ctrl->condition_1_0_operand = data->condition_1_0_operand;
+	ctrl->condition_1_0_offset = data->condition_1_0_offset;
+	ctrl->data_offset_condition_operand = data->data_offset_condition_operand;
+	ctrl->condition_0_data = data->condition_0_data;
+	ctrl->condition_0_mask = data->condition_0_mask;
+	ctrl->condition_1_data = data->condition_1_data;
+	ctrl->condition_1_mask = data->condition_1_mask;
+	ctrl->bitwise_data = data->bitwise_data;
+	ctrl->data_mask = data->data_mask;
+}
+
+int mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry *sa_entry,
+			  struct mlx5_wqe_aso_ctrl_seg *data)
 {
 	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
 	struct mlx5e_ipsec_aso *aso = ipsec->aso;
@@ -323,6 +413,10 @@ int mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry *sa_entry)
 	struct mlx5_aso_wqe *wqe;
 	u8 ds_cnt;
 
+	lockdep_assert_held(&sa_entry->x->lock);
+	if (aso->use_cache)
+		return 0;
+
 	res = &mdev->mlx5e_res.hw_objs;
 
 	memset(aso->ctx, 0, sizeof(aso->ctx));
@@ -336,6 +430,7 @@ int mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry *sa_entry)
 		cpu_to_be32(lower_32_bits(aso->dma_addr) | ASO_CTRL_READ_EN);
 	ctrl->va_h = cpu_to_be32(upper_32_bits(aso->dma_addr));
 	ctrl->l_key = cpu_to_be32(res->mkey);
+	mlx5e_ipsec_aso_copy(ctrl, data);
 
 	mlx5_aso_post_wqe(aso->aso, false, &wqe->ctrl);
 	return mlx5_aso_poll_cq(aso->aso, false);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index a0242dc15741..8f7580fec193 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -19,6 +19,7 @@
 #include "diag/fw_tracer.h"
 #include "mlx5_irq.h"
 #include "devlink.h"
+#include "en_accel/ipsec.h"
 
 enum {
 	MLX5_EQE_OWNER_INIT_VAL	= 0x1,
@@ -578,6 +579,10 @@ static void gather_async_events_mask(struct mlx5_core_dev *dev, u64 mask[4])
 	if (MLX5_CAP_MACSEC(dev, log_max_macsec_offload))
 		async_event_mask |= (1ull << MLX5_EVENT_TYPE_OBJECT_CHANGE);
 
+	if (mlx5_ipsec_device_caps(dev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)
+		async_event_mask |=
+			(1ull << MLX5_EVENT_TYPE_OBJECT_CHANGE);
+
 	mask[0] = async_event_mask;
 
 	if (MLX5_CAP_GEN(dev, event_cap))
-- 
2.38.1


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH xfrm-next 12/13] net/mlx5e: Handle ESN update events
  2022-12-02 20:14 [PATCH xfrm-next 00/13] mlx5 IPsec packet offload support (Part I) Leon Romanovsky
                   ` (10 preceding siblings ...)
  2022-12-02 20:14 ` [PATCH xfrm-next 11/13] net/mlx5e: Handle hardware IPsec limits events Leon Romanovsky
@ 2022-12-02 20:14 ` Leon Romanovsky
  2022-12-02 20:14 ` [PATCH xfrm-next 13/13] net/mlx5e: Open mlx5 driver to accept IPsec packet offload Leon Romanovsky
                   ` (2 subsequent siblings)
  14 siblings, 0 replies; 16+ messages in thread
From: Leon Romanovsky @ 2022-12-02 20:14 UTC (permalink / raw)
  To: Steffen Klassert
  Cc: Leon Romanovsky, David S. Miller, Eric Dumazet, Herbert Xu,
	Jakub Kicinski, netdev, Bharat Bhushan, Saeed Mahameed

From: Leon Romanovsky <leonro@nvidia.com>

Extend event logic to update ESN state (esn_msb, esn_overlap)
for an IPsec Offload context.

Reviewed-by: Saeed Mahameed <saeedm@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
 .../mellanox/mlx5/core/en_accel/ipsec.c       |  5 +--
 .../mellanox/mlx5/core/en_accel/ipsec.h       |  2 +
 .../mlx5/core/en_accel/ipsec_offload.c        | 44 +++++++++++++++++++
 3 files changed, 48 insertions(+), 3 deletions(-)

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index 4f176bd8395a..f5f930ea3f0f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -108,9 +108,8 @@ static void mlx5e_ipsec_init_limits(struct mlx5e_ipsec_sa_entry *sa_entry,
 		x->lft.hard_packet_limit - x->lft.soft_packet_limit;
 }
 
-static void
-mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
-				   struct mlx5_accel_esp_xfrm_attrs *attrs)
+void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
+					struct mlx5_accel_esp_xfrm_attrs *attrs)
 {
 	struct xfrm_state *x = sa_entry->x;
 	struct aes_gcm_keymat *aes_gcm = &attrs->aes_gcm;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index e7f21e449268..a92e19c4c499 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -230,6 +230,8 @@ void mlx5e_ipsec_aso_update_curlft(struct mlx5e_ipsec_sa_entry *sa_entry,
 void mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv *priv,
 				     void *ipsec_stats);
 
+void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
+					struct mlx5_accel_esp_xfrm_attrs *attrs);
 static inline struct mlx5_core_dev *
 mlx5e_ipsec_sa2dev(struct mlx5e_ipsec_sa_entry *sa_entry)
 {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
index 1b5014ffa257..8e3614218fc4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
@@ -6,6 +6,10 @@
 #include "ipsec.h"
 #include "lib/mlx5.h"
 
+enum {
+	MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET,
+};
+
 u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
 {
 	u32 caps = 0;
@@ -260,6 +264,39 @@ void mlx5_accel_esp_modify_xfrm(struct mlx5e_ipsec_sa_entry *sa_entry,
 	memcpy(&sa_entry->attrs, attrs, sizeof(sa_entry->attrs));
 }
 
+static void
+mlx5e_ipsec_aso_update_esn(struct mlx5e_ipsec_sa_entry *sa_entry,
+			   const struct mlx5_accel_esp_xfrm_attrs *attrs)
+{
+	struct mlx5_wqe_aso_ctrl_seg data = {};
+
+	data.data_mask_mode = MLX5_ASO_DATA_MASK_MODE_BITWISE_64BIT << 6;
+	data.condition_1_0_operand = MLX5_ASO_ALWAYS_TRUE | MLX5_ASO_ALWAYS_TRUE
+								    << 4;
+	data.data_offset_condition_operand = MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET;
+	data.bitwise_data = cpu_to_be64(BIT_ULL(54));
+	data.data_mask = data.bitwise_data;
+
+	mlx5e_ipsec_aso_query(sa_entry, &data);
+}
+
+static void mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry,
+					 u32 mode_param)
+{
+	struct mlx5_accel_esp_xfrm_attrs attrs = {};
+
+	if (mode_param < MLX5E_IPSEC_ESN_SCOPE_MID) {
+		sa_entry->esn_state.esn++;
+		sa_entry->esn_state.overlap = 0;
+	} else {
+		sa_entry->esn_state.overlap = 1;
+	}
+
+	mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &attrs);
+	mlx5_accel_esp_modify_xfrm(sa_entry, &attrs);
+	mlx5e_ipsec_aso_update_esn(sa_entry, &attrs);
+}
+
 static void mlx5e_ipsec_handle_event(struct work_struct *_work)
 {
 	struct mlx5e_ipsec_work *work =
@@ -284,6 +321,13 @@ static void mlx5e_ipsec_handle_event(struct work_struct *_work)
 		goto unlock;
 
 	aso->use_cache = true;
+	if (attrs->esn_trigger &&
+	    !MLX5_GET(ipsec_aso, aso->ctx, esn_event_arm)) {
+		u32 mode_param = MLX5_GET(ipsec_aso, aso->ctx, mode_parameter);
+
+		mlx5e_ipsec_update_esn_state(sa_entry, mode_param);
+	}
+
 	if (attrs->soft_packet_limit != XFRM_INF)
 		if (!MLX5_GET(ipsec_aso, aso->ctx, soft_lft_arm) ||
 		    !MLX5_GET(ipsec_aso, aso->ctx, hard_lft_arm) ||
-- 
2.38.1


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH xfrm-next 13/13] net/mlx5e: Open mlx5 driver to accept IPsec packet offload
  2022-12-02 20:14 [PATCH xfrm-next 00/13] mlx5 IPsec packet offload support (Part I) Leon Romanovsky
                   ` (11 preceding siblings ...)
  2022-12-02 20:14 ` [PATCH xfrm-next 12/13] net/mlx5e: Handle ESN update events Leon Romanovsky
@ 2022-12-02 20:14 ` Leon Romanovsky
  2022-12-02 20:19 ` [PATCH xfrm-next 00/13] mlx5 IPsec packet offload support (Part I) Leon Romanovsky
  2022-12-09  7:18 ` Steffen Klassert
  14 siblings, 0 replies; 16+ messages in thread
From: Leon Romanovsky @ 2022-12-02 20:14 UTC (permalink / raw)
  To: Steffen Klassert
  Cc: Leon Romanovsky, David S. Miller, Eric Dumazet, Herbert Xu,
	Jakub Kicinski, netdev, Bharat Bhushan, Raed Salem

From: Leon Romanovsky <leonro@nvidia.com>

Enable configuration of IPsec packet offload through XFRM state add
interface together with moving specific to IPsec packet mode limitations
to specific switch-case section.

Reviewed-by: Raed Salem <raeds@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
 .../mellanox/mlx5/core/en_accel/ipsec.c       | 41 ++++++++++++++-----
 1 file changed, 31 insertions(+), 10 deletions(-)

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index f5f930ea3f0f..bb9023957f74 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -191,11 +191,6 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
 		netdev_info(netdev, "Only IPv4/6 xfrm states may be offloaded\n");
 		return -EINVAL;
 	}
-	if (x->props.mode != XFRM_MODE_TRANSPORT &&
-	    x->props.mode != XFRM_MODE_TUNNEL) {
-		dev_info(&netdev->dev, "Only transport and tunnel xfrm states may be offloaded\n");
-		return -EINVAL;
-	}
 	if (x->id.proto != IPPROTO_ESP) {
 		netdev_info(netdev, "Only ESP xfrm state may be offloaded\n");
 		return -EINVAL;
@@ -229,11 +224,32 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
 		netdev_info(netdev, "Cannot offload xfrm states with geniv other than seqiv\n");
 		return -EINVAL;
 	}
-	if (x->xso.type != XFRM_DEV_OFFLOAD_CRYPTO) {
-		netdev_info(netdev, "Unsupported xfrm offload type\n");
-		return -EINVAL;
-	}
-	if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET) {
+	switch (x->xso.type) {
+	case XFRM_DEV_OFFLOAD_CRYPTO:
+		if (!(mlx5_ipsec_device_caps(priv->mdev) &
+		      MLX5_IPSEC_CAP_CRYPTO)) {
+			netdev_info(netdev, "Crypto offload is not supported\n");
+			return -EINVAL;
+		}
+
+		if (x->props.mode != XFRM_MODE_TRANSPORT &&
+		    x->props.mode != XFRM_MODE_TUNNEL) {
+			netdev_info(netdev, "Only transport and tunnel xfrm states may be offloaded\n");
+			return -EINVAL;
+		}
+		break;
+	case XFRM_DEV_OFFLOAD_PACKET:
+		if (!(mlx5_ipsec_device_caps(priv->mdev) &
+		      MLX5_IPSEC_CAP_PACKET_OFFLOAD)) {
+			netdev_info(netdev, "Packet offload is not supported\n");
+			return -EINVAL;
+		}
+
+		if (x->props.mode != XFRM_MODE_TRANSPORT) {
+			netdev_info(netdev, "Only transport xfrm states may be offloaded in packet mode\n");
+			return -EINVAL;
+		}
+
 		if (x->replay_esn && x->replay_esn->replay_window != 32 &&
 		    x->replay_esn->replay_window != 64 &&
 		    x->replay_esn->replay_window != 128 &&
@@ -263,6 +279,11 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
 				    "Hard packet limit must be greater than soft one\n");
 			return -EINVAL;
 		}
+		break;
+	default:
+		netdev_info(netdev, "Unsupported xfrm offload type %d\n",
+			    x->xso.type);
+		return -EINVAL;
 	}
 	return 0;
 }
-- 
2.38.1


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* Re: [PATCH xfrm-next 00/13] mlx5 IPsec packet offload support (Part I)
  2022-12-02 20:14 [PATCH xfrm-next 00/13] mlx5 IPsec packet offload support (Part I) Leon Romanovsky
                   ` (12 preceding siblings ...)
  2022-12-02 20:14 ` [PATCH xfrm-next 13/13] net/mlx5e: Open mlx5 driver to accept IPsec packet offload Leon Romanovsky
@ 2022-12-02 20:19 ` Leon Romanovsky
  2022-12-09  7:18 ` Steffen Klassert
  14 siblings, 0 replies; 16+ messages in thread
From: Leon Romanovsky @ 2022-12-02 20:19 UTC (permalink / raw)
  To: Steffen Klassert
  Cc: David S. Miller, Eric Dumazet, Herbert Xu, Jakub Kicinski, netdev,
	Bharat Bhushan

On Fri, Dec 02, 2022 at 10:14:44PM +0200, Leon Romanovsky wrote:
> From: Leon Romanovsky <leonro@nvidia.com>
> 
> Hi,
> 
> This is second part with implementation of packet offload.


The title is misleading, it should be written "Part II".
Sorry for that.

Thanks

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH xfrm-next 00/13] mlx5 IPsec packet offload support (Part I)
  2022-12-02 20:14 [PATCH xfrm-next 00/13] mlx5 IPsec packet offload support (Part I) Leon Romanovsky
                   ` (13 preceding siblings ...)
  2022-12-02 20:19 ` [PATCH xfrm-next 00/13] mlx5 IPsec packet offload support (Part I) Leon Romanovsky
@ 2022-12-09  7:18 ` Steffen Klassert
  14 siblings, 0 replies; 16+ messages in thread
From: Steffen Klassert @ 2022-12-09  7:18 UTC (permalink / raw)
  To: Leon Romanovsky
  Cc: Leon Romanovsky, David S. Miller, Eric Dumazet, Herbert Xu,
	Jakub Kicinski, netdev, Bharat Bhushan

On Fri, Dec 02, 2022 at 10:14:44PM +0200, Leon Romanovsky wrote:
> From: Leon Romanovsky <leonro@nvidia.com>
> 
> Hi,
> 
> This is second part with implementation of packet offload.
> 
> Thanks
> 
> Leon Romanovsky (12):
>   net/mlx5e: Create IPsec policy offload tables
>   net/mlx5e: Add XFRM policy offload logic
>   net/mlx5e: Use same coding pattern for Rx and Tx flows
>   net/mlx5e: Configure IPsec packet offload flow steering
>   net/mlx5e: Improve IPsec flow steering autogroup
>   net/mlx5e: Skip IPsec encryption for TX path without matching policy
>   net/mlx5e: Provide intermediate pointer to access IPsec struct
>   net/mlx5e: Store all XFRM SAs in Xarray
>   net/mlx5e: Update IPsec soft and hard limits
>   net/mlx5e: Handle hardware IPsec limits events
>   net/mlx5e: Handle ESN update events
>   net/mlx5e: Open mlx5 driver to accept IPsec packet offload
> 
> Raed Salem (1):
>   net/mlx5e: Add statistics for Rx/Tx IPsec offloaded flows

Series applied, thanks a lot!

^ permalink raw reply	[flat|nested] 16+ messages in thread

end of thread, other threads:[~2022-12-09  7:19 UTC | newest]

Thread overview: 16+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2022-12-02 20:14 [PATCH xfrm-next 00/13] mlx5 IPsec packet offload support (Part I) Leon Romanovsky
2022-12-02 20:14 ` [PATCH xfrm-next 01/13] net/mlx5e: Create IPsec policy offload tables Leon Romanovsky
2022-12-02 20:14 ` [PATCH xfrm-next 02/13] net/mlx5e: Add XFRM policy offload logic Leon Romanovsky
2022-12-02 20:14 ` [PATCH xfrm-next 03/13] net/mlx5e: Use same coding pattern for Rx and Tx flows Leon Romanovsky
2022-12-02 20:14 ` [PATCH xfrm-next 04/13] net/mlx5e: Configure IPsec packet offload flow steering Leon Romanovsky
2022-12-02 20:14 ` [PATCH xfrm-next 05/13] net/mlx5e: Improve IPsec flow steering autogroup Leon Romanovsky
2022-12-02 20:14 ` [PATCH xfrm-next 06/13] net/mlx5e: Add statistics for Rx/Tx IPsec offloaded flows Leon Romanovsky
2022-12-02 20:14 ` [PATCH xfrm-next 07/13] net/mlx5e: Skip IPsec encryption for TX path without matching policy Leon Romanovsky
2022-12-02 20:14 ` [PATCH xfrm-next 08/13] net/mlx5e: Provide intermediate pointer to access IPsec struct Leon Romanovsky
2022-12-02 20:14 ` [PATCH xfrm-next 09/13] net/mlx5e: Store all XFRM SAs in Xarray Leon Romanovsky
2022-12-02 20:14 ` [PATCH xfrm-next 10/13] net/mlx5e: Update IPsec soft and hard limits Leon Romanovsky
2022-12-02 20:14 ` [PATCH xfrm-next 11/13] net/mlx5e: Handle hardware IPsec limits events Leon Romanovsky
2022-12-02 20:14 ` [PATCH xfrm-next 12/13] net/mlx5e: Handle ESN update events Leon Romanovsky
2022-12-02 20:14 ` [PATCH xfrm-next 13/13] net/mlx5e: Open mlx5 driver to accept IPsec packet offload Leon Romanovsky
2022-12-02 20:19 ` [PATCH xfrm-next 00/13] mlx5 IPsec packet offload support (Part I) Leon Romanovsky
2022-12-09  7:18 ` Steffen Klassert

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).