* [PATCH xfrm-next 1/9] xfrm: generalize xdo_dev_state_update_curlft to allow statistics update
2023-10-16 9:15 [PATCH xfrm-next 0/9] mlx5 IPsec replay window enhancement and XFRM core statistics Leon Romanovsky
@ 2023-10-16 9:15 ` Leon Romanovsky
2023-10-16 9:15 ` [PATCH xfrm-next 2/9] xfrm: get global statistics from the offloaded device Leon Romanovsky
` (7 subsequent siblings)
8 siblings, 0 replies; 15+ messages in thread
From: Leon Romanovsky @ 2023-10-16 9:15 UTC (permalink / raw)
To: Steffen Klassert
Cc: Leon Romanovsky, Herbert Xu, netdev, Patrisious Haddad,
Raed Salem, Saeed Mahameed
From: Leon Romanovsky <leonro@nvidia.com>
In order to allow drivers to fill all statistics, change the name
of xdo_dev_state_update_curlft to be xdo_dev_state_update_stats.
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
Documentation/networking/xfrm_device.rst | 4 ++--
.../net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c | 8 +++++---
include/linux/netdevice.h | 2 +-
include/net/xfrm.h | 11 ++++-------
net/xfrm/xfrm_state.c | 4 ++--
net/xfrm/xfrm_user.c | 2 +-
6 files changed, 15 insertions(+), 16 deletions(-)
diff --git a/Documentation/networking/xfrm_device.rst b/Documentation/networking/xfrm_device.rst
index 535077cbeb07..bfea9d8579ed 100644
--- a/Documentation/networking/xfrm_device.rst
+++ b/Documentation/networking/xfrm_device.rst
@@ -71,9 +71,9 @@ Callbacks to implement
bool (*xdo_dev_offload_ok) (struct sk_buff *skb,
struct xfrm_state *x);
void (*xdo_dev_state_advance_esn) (struct xfrm_state *x);
+ void (*xdo_dev_state_update_stats) (struct xfrm_state *x);
/* Solely packet offload callbacks */
- void (*xdo_dev_state_update_curlft) (struct xfrm_state *x);
int (*xdo_dev_policy_add) (struct xfrm_policy *x, struct netlink_ext_ack *extack);
void (*xdo_dev_policy_delete) (struct xfrm_policy *x);
void (*xdo_dev_policy_free) (struct xfrm_policy *x);
@@ -191,6 +191,6 @@ xdo_dev_policy_free() on any remaining offloaded states.
Outcome of HW handling packets, the XFRM core can't count hard, soft limits.
The HW/driver are responsible to perform it and provide accurate data when
-xdo_dev_state_update_curlft() is called. In case of one of these limits
+xdo_dev_state_update_stats() is called. In case of one of these limits
occuried, the driver needs to call to xfrm_state_check_expire() to make sure
that XFRM performs rekeying sequence.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index 655496598c68..11a7a8bcda0f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -948,7 +948,7 @@ static void mlx5e_xfrm_advance_esn_state(struct xfrm_state *x)
queue_work(sa_entry->ipsec->wq, &work->work);
}
-static void mlx5e_xfrm_update_curlft(struct xfrm_state *x)
+static void mlx5e_xfrm_update_stats(struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
@@ -957,7 +957,8 @@ static void mlx5e_xfrm_update_curlft(struct xfrm_state *x)
lockdep_assert(lockdep_is_held(&x->lock) ||
lockdep_is_held(&dev_net(x->xso.real_dev)->xfrm.xfrm_cfg_mutex));
- if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ)
+ if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ ||
+ x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
return;
mlx5_fc_query_cached(ipsec_rule->fc, &bytes, &packets, &lastuse);
@@ -1113,6 +1114,7 @@ static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = {
.xdo_dev_state_free = mlx5e_xfrm_free_state,
.xdo_dev_offload_ok = mlx5e_ipsec_offload_ok,
.xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state,
+ .xdo_dev_state_update_stats = mlx5e_xfrm_update_stats,
};
static const struct xfrmdev_ops mlx5e_ipsec_packet_xfrmdev_ops = {
@@ -1122,7 +1124,7 @@ static const struct xfrmdev_ops mlx5e_ipsec_packet_xfrmdev_ops = {
.xdo_dev_offload_ok = mlx5e_ipsec_offload_ok,
.xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state,
- .xdo_dev_state_update_curlft = mlx5e_xfrm_update_curlft,
+ .xdo_dev_state_update_stats = mlx5e_xfrm_update_stats,
.xdo_dev_policy_add = mlx5e_xfrm_add_policy,
.xdo_dev_policy_delete = mlx5e_xfrm_del_policy,
.xdo_dev_policy_free = mlx5e_xfrm_free_policy,
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 1c7681263d30..55dcb2c2d810 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1034,7 +1034,7 @@ struct xfrmdev_ops {
bool (*xdo_dev_offload_ok) (struct sk_buff *skb,
struct xfrm_state *x);
void (*xdo_dev_state_advance_esn) (struct xfrm_state *x);
- void (*xdo_dev_state_update_curlft) (struct xfrm_state *x);
+ void (*xdo_dev_state_update_stats) (struct xfrm_state *x);
int (*xdo_dev_policy_add) (struct xfrm_policy *x, struct netlink_ext_ack *extack);
void (*xdo_dev_policy_delete) (struct xfrm_policy *x);
void (*xdo_dev_policy_free) (struct xfrm_policy *x);
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 98d7aa78adda..1b86409d79bb 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -1578,21 +1578,18 @@ struct xfrm_state *xfrm_state_lookup_byspi(struct net *net, __be32 spi,
unsigned short family);
int xfrm_state_check_expire(struct xfrm_state *x);
#ifdef CONFIG_XFRM_OFFLOAD
-static inline void xfrm_dev_state_update_curlft(struct xfrm_state *x)
+static inline void xfrm_dev_state_update_stats(struct xfrm_state *x)
{
struct xfrm_dev_offload *xdo = &x->xso;
struct net_device *dev = xdo->dev;
- if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
- return;
-
if (dev && dev->xfrmdev_ops &&
- dev->xfrmdev_ops->xdo_dev_state_update_curlft)
- dev->xfrmdev_ops->xdo_dev_state_update_curlft(x);
+ dev->xfrmdev_ops->xdo_dev_state_update_stats)
+ dev->xfrmdev_ops->xdo_dev_state_update_stats(x);
}
#else
-static inline void xfrm_dev_state_update_curlft(struct xfrm_state *x) {}
+static inline void xfrm_dev_state_update_stats(struct xfrm_state *x) {}
#endif
void xfrm_state_insert(struct xfrm_state *x);
int xfrm_state_add(struct xfrm_state *x);
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index bda5327bf34d..d8701b2d0d57 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -570,7 +570,7 @@ static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me)
int err = 0;
spin_lock(&x->lock);
- xfrm_dev_state_update_curlft(x);
+ xfrm_dev_state_update_stats(x);
if (x->km.state == XFRM_STATE_DEAD)
goto out;
@@ -1935,7 +1935,7 @@ EXPORT_SYMBOL(xfrm_state_update);
int xfrm_state_check_expire(struct xfrm_state *x)
{
- xfrm_dev_state_update_curlft(x);
+ xfrm_dev_state_update_stats(x);
if (!READ_ONCE(x->curlft.use_time))
WRITE_ONCE(x->curlft.use_time, ktime_get_real_seconds());
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index ad01997c3aa9..dc4f9b8d7cb0 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -902,7 +902,7 @@ static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
memcpy(&p->sel, &x->sel, sizeof(p->sel));
memcpy(&p->lft, &x->lft, sizeof(p->lft));
if (x->xso.dev)
- xfrm_dev_state_update_curlft(x);
+ xfrm_dev_state_update_stats(x);
memcpy(&p->curlft, &x->curlft, sizeof(p->curlft));
put_unaligned(x->stats.replay_window, &p->stats.replay_window);
put_unaligned(x->stats.replay, &p->stats.replay);
--
2.41.0
^ permalink raw reply related [flat|nested] 15+ messages in thread* [PATCH xfrm-next 2/9] xfrm: get global statistics from the offloaded device
2023-10-16 9:15 [PATCH xfrm-next 0/9] mlx5 IPsec replay window enhancement and XFRM core statistics Leon Romanovsky
2023-10-16 9:15 ` [PATCH xfrm-next 1/9] xfrm: generalize xdo_dev_state_update_curlft to allow statistics update Leon Romanovsky
@ 2023-10-16 9:15 ` Leon Romanovsky
2023-10-16 9:15 ` [PATCH xfrm-next 3/9] net/mlx5e: Honor user choice of IPsec replay window size Leon Romanovsky
` (6 subsequent siblings)
8 siblings, 0 replies; 15+ messages in thread
From: Leon Romanovsky @ 2023-10-16 9:15 UTC (permalink / raw)
To: Steffen Klassert
Cc: Leon Romanovsky, Herbert Xu, netdev, Patrisious Haddad,
Raed Salem, Saeed Mahameed
From: Leon Romanovsky <leonro@nvidia.com>
Iterate over all SAs in order to fill global IPsec statistics.
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
.../ethernet/mellanox/mlx5/core/en_accel/ipsec.c | 3 ++-
include/net/xfrm.h | 3 +++
net/xfrm/xfrm_proc.c | 1 +
net/xfrm/xfrm_state.c | 13 +++++++++++++
4 files changed, 19 insertions(+), 1 deletion(-)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index 11a7a8bcda0f..9bfffdeb0fe8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -955,7 +955,8 @@ static void mlx5e_xfrm_update_stats(struct xfrm_state *x)
u64 packets, bytes, lastuse;
lockdep_assert(lockdep_is_held(&x->lock) ||
- lockdep_is_held(&dev_net(x->xso.real_dev)->xfrm.xfrm_cfg_mutex));
+ lockdep_is_held(&dev_net(x->xso.real_dev)->xfrm.xfrm_cfg_mutex) ||
+ lockdep_is_held(&dev_net(x->xso.real_dev)->xfrm.xfrm_state_lock));
if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ ||
x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 1b86409d79bb..dcfde5bd7138 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -51,8 +51,10 @@
#ifdef CONFIG_XFRM_STATISTICS
#define XFRM_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.xfrm_statistics, field)
+#define XFRM_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.xfrm_statistics, field, val)
#else
#define XFRM_INC_STATS(net, field) ((void)(net))
+#define XFRM_ADD_STATS(net, field, val) ((void)(net))
#endif
@@ -1577,6 +1579,7 @@ struct xfrm_state *xfrm_stateonly_find(struct net *net, u32 mark, u32 if_id,
struct xfrm_state *xfrm_state_lookup_byspi(struct net *net, __be32 spi,
unsigned short family);
int xfrm_state_check_expire(struct xfrm_state *x);
+void xfrm_state_update_stats(struct net *net);
#ifdef CONFIG_XFRM_OFFLOAD
static inline void xfrm_dev_state_update_stats(struct xfrm_state *x)
{
diff --git a/net/xfrm/xfrm_proc.c b/net/xfrm/xfrm_proc.c
index fee9b5cf37a7..5f9bf8e5c933 100644
--- a/net/xfrm/xfrm_proc.c
+++ b/net/xfrm/xfrm_proc.c
@@ -52,6 +52,7 @@ static int xfrm_statistics_seq_show(struct seq_file *seq, void *v)
memset(buff, 0, sizeof(unsigned long) * LINUX_MIB_XFRMMAX);
+ xfrm_state_update_stats(net);
snmp_get_cpu_field_batch(buff, xfrm_mib_list,
net->mib.xfrm_statistics);
for (i = 0; xfrm_mib_list[i].name; i++)
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index d8701b2d0d57..0c306473a79d 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -1957,6 +1957,19 @@ int xfrm_state_check_expire(struct xfrm_state *x)
}
EXPORT_SYMBOL(xfrm_state_check_expire);
+void xfrm_state_update_stats(struct net *net)
+{
+ struct xfrm_state *x;
+ int i;
+
+ spin_lock_bh(&net->xfrm.xfrm_state_lock);
+ for (i = 0; i <= net->xfrm.state_hmask; i++) {
+ hlist_for_each_entry(x, net->xfrm.state_bydst + i, bydst)
+ xfrm_dev_state_update_stats(x);
+ }
+ spin_unlock_bh(&net->xfrm.xfrm_state_lock);
+}
+
struct xfrm_state *
xfrm_state_lookup(struct net *net, u32 mark, const xfrm_address_t *daddr, __be32 spi,
u8 proto, unsigned short family)
--
2.41.0
^ permalink raw reply related [flat|nested] 15+ messages in thread* [PATCH xfrm-next 3/9] net/mlx5e: Honor user choice of IPsec replay window size
2023-10-16 9:15 [PATCH xfrm-next 0/9] mlx5 IPsec replay window enhancement and XFRM core statistics Leon Romanovsky
2023-10-16 9:15 ` [PATCH xfrm-next 1/9] xfrm: generalize xdo_dev_state_update_curlft to allow statistics update Leon Romanovsky
2023-10-16 9:15 ` [PATCH xfrm-next 2/9] xfrm: get global statistics from the offloaded device Leon Romanovsky
@ 2023-10-16 9:15 ` Leon Romanovsky
2023-10-16 9:15 ` [PATCH xfrm-next 4/9] net/mlx5e: Ensure that IPsec sequence packet number starts from 1 Leon Romanovsky
` (5 subsequent siblings)
8 siblings, 0 replies; 15+ messages in thread
From: Leon Romanovsky @ 2023-10-16 9:15 UTC (permalink / raw)
To: Steffen Klassert
Cc: Leon Romanovsky, Herbert Xu, netdev, Patrisious Haddad,
Raed Salem, Saeed Mahameed
From: Leon Romanovsky <leonro@nvidia.com>
Users can configure IPsec replay window size, but mlx5 driver didn't
honor their choice and set always 32bits. Fix assignment logic to
configure right size from the beginning.
Fixes: 7db21ef4566e ("net/mlx5e: Set IPsec replay sequence numbers")
Reviewed-by: Patrisious Haddad <phaddad@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
.../mellanox/mlx5/core/en_accel/ipsec.c | 21 +++++++++++++++++++
.../mlx5/core/en_accel/ipsec_offload.c | 2 +-
include/linux/mlx5/mlx5_ifc.h | 7 +++++++
3 files changed, 29 insertions(+), 1 deletion(-)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index 9bfffdeb0fe8..ddd2230f04aa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -335,6 +335,27 @@ void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
attrs->replay_esn.esn = sa_entry->esn_state.esn;
attrs->replay_esn.esn_msb = sa_entry->esn_state.esn_msb;
attrs->replay_esn.overlap = sa_entry->esn_state.overlap;
+ switch (x->replay_esn->replay_window) {
+ case 32:
+ attrs->replay_esn.replay_window =
+ MLX5_IPSEC_ASO_REPLAY_WIN_32BIT;
+ break;
+ case 64:
+ attrs->replay_esn.replay_window =
+ MLX5_IPSEC_ASO_REPLAY_WIN_64BIT;
+ break;
+ case 128:
+ attrs->replay_esn.replay_window =
+ MLX5_IPSEC_ASO_REPLAY_WIN_128BIT;
+ break;
+ case 256:
+ attrs->replay_esn.replay_window =
+ MLX5_IPSEC_ASO_REPLAY_WIN_256BIT;
+ break;
+ default:
+ WARN_ON(true);
+ return;
+ }
}
attrs->dir = x->xso.dir;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
index a91f772dc981..4e018fba2d5f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
@@ -95,7 +95,7 @@ static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn,
if (attrs->dir == XFRM_DEV_OFFLOAD_IN) {
MLX5_SET(ipsec_aso, aso_ctx, window_sz,
- attrs->replay_esn.replay_window / 64);
+ attrs->replay_esn.replay_window);
MLX5_SET(ipsec_aso, aso_ctx, mode,
MLX5_IPSEC_ASO_REPLAY_PROTECTION);
}
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 4df6d1c12437..6ceafbd8edc5 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -11995,6 +11995,13 @@ enum {
MLX5_IPSEC_ASO_INC_SN = 0x2,
};
+enum {
+ MLX5_IPSEC_ASO_REPLAY_WIN_32BIT = 0x0,
+ MLX5_IPSEC_ASO_REPLAY_WIN_64BIT = 0x1,
+ MLX5_IPSEC_ASO_REPLAY_WIN_128BIT = 0x2,
+ MLX5_IPSEC_ASO_REPLAY_WIN_256BIT = 0x3,
+};
+
struct mlx5_ifc_ipsec_aso_bits {
u8 valid[0x1];
u8 reserved_at_201[0x1];
--
2.41.0
^ permalink raw reply related [flat|nested] 15+ messages in thread* [PATCH xfrm-next 4/9] net/mlx5e: Ensure that IPsec sequence packet number starts from 1
2023-10-16 9:15 [PATCH xfrm-next 0/9] mlx5 IPsec replay window enhancement and XFRM core statistics Leon Romanovsky
` (2 preceding siblings ...)
2023-10-16 9:15 ` [PATCH xfrm-next 3/9] net/mlx5e: Honor user choice of IPsec replay window size Leon Romanovsky
@ 2023-10-16 9:15 ` Leon Romanovsky
2023-10-16 9:15 ` [PATCH xfrm-next 5/9] net/mlx5e: Unify esw and normal IPsec status table creation/destruction Leon Romanovsky
` (4 subsequent siblings)
8 siblings, 0 replies; 15+ messages in thread
From: Leon Romanovsky @ 2023-10-16 9:15 UTC (permalink / raw)
To: Steffen Klassert
Cc: Leon Romanovsky, Herbert Xu, netdev, Patrisious Haddad,
Raed Salem, Saeed Mahameed
From: Leon Romanovsky <leonro@nvidia.com>
According to RFC4303, section "3.3.3. Sequence Number Generation",
the first packet sent using a given SA will contain a sequence
number of 1.
However if user didn't set seq/oseq, the HW used zero as first sequence
packet number. Such misconfiguration causes to drop of first packet
if replay window protection was enabled in SA.
To fix it, set sequence number to be at least 1.
Fixes: 7db21ef4566e ("net/mlx5e: Set IPsec replay sequence numbers")
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c | 9 ++++++++-
1 file changed, 8 insertions(+), 1 deletion(-)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index ddd2230f04aa..bf88232a2fc2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -121,7 +121,14 @@ static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
if (x->xso.type == XFRM_DEV_OFFLOAD_CRYPTO)
esn_msb = xfrm_replay_seqhi(x, htonl(seq_bottom));
- sa_entry->esn_state.esn = esn;
+ if (sa_entry->esn_state.esn_msb)
+ sa_entry->esn_state.esn = esn;
+ else
+ /* According to RFC4303, section "3.3.3. Sequence Number Generation",
+ * the first packet sent using a given SA will contain a sequence
+ * number of 1.
+ */
+ sa_entry->esn_state.esn = max_t(u32, esn, 1);
sa_entry->esn_state.esn_msb = esn_msb;
if (unlikely(overlap && seq_bottom < MLX5E_IPSEC_ESN_SCOPE_MID)) {
--
2.41.0
^ permalink raw reply related [flat|nested] 15+ messages in thread* [PATCH xfrm-next 5/9] net/mlx5e: Unify esw and normal IPsec status table creation/destruction
2023-10-16 9:15 [PATCH xfrm-next 0/9] mlx5 IPsec replay window enhancement and XFRM core statistics Leon Romanovsky
` (3 preceding siblings ...)
2023-10-16 9:15 ` [PATCH xfrm-next 4/9] net/mlx5e: Ensure that IPsec sequence packet number starts from 1 Leon Romanovsky
@ 2023-10-16 9:15 ` Leon Romanovsky
2023-10-17 9:38 ` Steffen Klassert
2023-10-16 9:15 ` [PATCH xfrm-next 6/9] net/mlx5e: Remove exposure of IPsec RX flow steering struct Leon Romanovsky
` (3 subsequent siblings)
8 siblings, 1 reply; 15+ messages in thread
From: Leon Romanovsky @ 2023-10-16 9:15 UTC (permalink / raw)
To: Steffen Klassert
Cc: Patrisious Haddad, Herbert Xu, netdev, Raed Salem, Saeed Mahameed
From: Patrisious Haddad <phaddad@nvidia.com>
Change normal IPsec flow to use the same creation/destruction functions
for status flow table as that of ESW, which first of all refines the
code to have less code duplication.
And more importantly, the ESW status table handles IPsec syndrome
checks at steering by HW, which is more efficient than the previous
behaviour we had where it was copied to WQE meta data and checked
by the driver.
Signed-off-by: Patrisious Haddad <phaddad@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
.../mellanox/mlx5/core/en_accel/ipsec_fs.c | 187 +++++++++++++-----
.../mellanox/mlx5/core/esw/ipsec_fs.c | 152 --------------
.../mellanox/mlx5/core/esw/ipsec_fs.h | 15 --
3 files changed, 141 insertions(+), 213 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index f41c976dc33f..85ed5171e835 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -128,63 +128,166 @@ static struct mlx5_flow_table *ipsec_ft_create(struct mlx5_flow_namespace *ns,
return mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
}
-static int ipsec_status_rule(struct mlx5_core_dev *mdev,
- struct mlx5e_ipsec_rx *rx,
- struct mlx5_flow_destination *dest)
+static void ipsec_rx_status_drop_destroy(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx)
{
- u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
+ mlx5_del_flow_rules(rx->status_drop.rule);
+ mlx5_destroy_flow_group(rx->status_drop.group);
+ mlx5_fc_destroy(ipsec->mdev, rx->status_drop_cnt);
+}
+
+static void ipsec_rx_status_pass_destroy(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx)
+{
+ mlx5_del_flow_rules(rx->status.rule);
+
+ if (rx != ipsec->rx_esw)
+ return;
+
+#ifdef CONFIG_MLX5_ESWITCH
+ mlx5_chains_put_table(esw_chains(ipsec->mdev->priv.eswitch), 0, 1, 0);
+#endif
+}
+
+static int ipsec_rx_status_drop_create(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_table *ft = rx->ft.status;
+ struct mlx5_core_dev *mdev = ipsec->mdev;
+ struct mlx5_flow_destination dest = {};
struct mlx5_flow_act flow_act = {};
- struct mlx5_modify_hdr *modify_hdr;
- struct mlx5_flow_handle *fte;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_fc *flow_counter;
struct mlx5_flow_spec *spec;
- int err;
+ struct mlx5_flow_group *g;
+ u32 *flow_group_in;
+ int err = 0;
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
- if (!spec)
- return -ENOMEM;
+ if (!flow_group_in || !spec) {
+ err = -ENOMEM;
+ goto err_out;
+ }
- /* Action to copy 7 bit ipsec_syndrome to regB[24:30] */
- MLX5_SET(copy_action_in, action, action_type, MLX5_ACTION_TYPE_COPY);
- MLX5_SET(copy_action_in, action, src_field, MLX5_ACTION_IN_FIELD_IPSEC_SYNDROME);
- MLX5_SET(copy_action_in, action, src_offset, 0);
- MLX5_SET(copy_action_in, action, length, 7);
- MLX5_SET(copy_action_in, action, dst_field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
- MLX5_SET(copy_action_in, action, dst_offset, 24);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1);
+ g = mlx5_create_flow_group(ft, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ mlx5_core_err(mdev,
+ "Failed to add ipsec rx status drop flow group, err=%d\n", err);
+ goto err_out;
+ }
- modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_KERNEL,
- 1, action);
+ flow_counter = mlx5_fc_create(mdev, false);
+ if (IS_ERR(flow_counter)) {
+ err = PTR_ERR(flow_counter);
+ mlx5_core_err(mdev,
+ "Failed to add ipsec rx status drop rule counter, err=%d\n", err);
+ goto err_cnt;
+ }
- if (IS_ERR(modify_hdr)) {
- err = PTR_ERR(modify_hdr);
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest.counter_id = mlx5_fc_id(flow_counter);
+ if (rx == ipsec->rx_esw)
+ spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
+ rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
mlx5_core_err(mdev,
- "fail to alloc ipsec copy modify_header_id err=%d\n", err);
- goto out_spec;
+ "Failed to add ipsec rx status drop rule, err=%d\n", err);
+ goto err_rule;
}
- /* create fte */
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ rx->status_drop.group = g;
+ rx->status_drop.rule = rule;
+ rx->status_drop_cnt = flow_counter;
+
+ kvfree(flow_group_in);
+ kvfree(spec);
+ return 0;
+
+err_rule:
+ mlx5_fc_destroy(mdev, flow_counter);
+err_cnt:
+ mlx5_destroy_flow_group(g);
+err_out:
+ kvfree(flow_group_in);
+ kvfree(spec);
+ return err;
+}
+
+static int ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5_flow_destination *dest)
+{
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ int err;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ misc_parameters_2.ipsec_syndrome);
+ MLX5_SET(fte_match_param, spec->match_value,
+ misc_parameters_2.ipsec_syndrome, 0);
+ if (rx == ipsec->rx_esw)
+ spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+ flow_act.flags = FLOW_ACT_NO_APPEND;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
- flow_act.modify_hdr = modify_hdr;
- fte = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2);
- if (IS_ERR(fte)) {
- err = PTR_ERR(fte);
- mlx5_core_err(mdev, "fail to add ipsec rx err copy rule err=%d\n", err);
- goto out;
+ rule = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_warn(ipsec->mdev,
+ "Failed to add ipsec rx status pass rule, err=%d\n", err);
+ goto err_rule;
}
+ rx->status.rule = rule;
kvfree(spec);
- rx->status.rule = fte;
- rx->status.modify_hdr = modify_hdr;
return 0;
-out:
- mlx5_modify_header_dealloc(mdev, modify_hdr);
-out_spec:
+err_rule:
kvfree(spec);
return err;
}
+static void mlx5_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx)
+{
+ ipsec_rx_status_pass_destroy(ipsec, rx);
+ ipsec_rx_status_drop_destroy(ipsec, rx);
+}
+
+static int mlx5_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5_flow_destination *dest)
+{
+ int err;
+
+ err = ipsec_rx_status_drop_create(ipsec, rx);
+ if (err)
+ return err;
+
+ err = ipsec_rx_status_pass_create(ipsec, rx, dest);
+ if (err)
+ goto err_pass_create;
+
+ return 0;
+
+err_pass_create:
+ ipsec_rx_status_drop_destroy(ipsec, rx);
+ return err;
+}
+
static int ipsec_miss_create(struct mlx5_core_dev *mdev,
struct mlx5_flow_table *ft,
struct mlx5e_ipsec_miss *miss,
@@ -333,12 +436,7 @@ static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
mlx5_destroy_flow_table(rx->ft.sa);
if (rx->allow_tunnel_mode)
mlx5_eswitch_unblock_encap(mdev);
- if (rx == ipsec->rx_esw) {
- mlx5_esw_ipsec_rx_status_destroy(ipsec, rx);
- } else {
- mlx5_del_flow_rules(rx->status.rule);
- mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr);
- }
+ mlx5_ipsec_rx_status_destroy(ipsec, rx);
mlx5_destroy_flow_table(rx->ft.status);
mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, mdev);
@@ -428,10 +526,7 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
dest[1].counter_id = mlx5_fc_id(rx->fc->cnt);
- if (rx == ipsec->rx_esw)
- err = mlx5_esw_ipsec_rx_status_create(ipsec, rx, dest);
- else
- err = ipsec_status_rule(mdev, rx, dest);
+ err = mlx5_ipsec_rx_status_create(ipsec, rx, dest);
if (err)
goto err_add;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
index 095f31f380fa..13b5916b64e2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
@@ -21,158 +21,6 @@ enum {
MLX5_ESW_IPSEC_TX_ESP_FT_CNT_LEVEL,
};
-static void esw_ipsec_rx_status_drop_destroy(struct mlx5e_ipsec *ipsec,
- struct mlx5e_ipsec_rx *rx)
-{
- mlx5_del_flow_rules(rx->status_drop.rule);
- mlx5_destroy_flow_group(rx->status_drop.group);
- mlx5_fc_destroy(ipsec->mdev, rx->status_drop_cnt);
-}
-
-static void esw_ipsec_rx_status_pass_destroy(struct mlx5e_ipsec *ipsec,
- struct mlx5e_ipsec_rx *rx)
-{
- mlx5_del_flow_rules(rx->status.rule);
- mlx5_chains_put_table(esw_chains(ipsec->mdev->priv.eswitch), 0, 1, 0);
-}
-
-static int esw_ipsec_rx_status_drop_create(struct mlx5e_ipsec *ipsec,
- struct mlx5e_ipsec_rx *rx)
-{
- int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
- struct mlx5_flow_table *ft = rx->ft.status;
- struct mlx5_core_dev *mdev = ipsec->mdev;
- struct mlx5_flow_destination dest = {};
- struct mlx5_flow_act flow_act = {};
- struct mlx5_flow_handle *rule;
- struct mlx5_fc *flow_counter;
- struct mlx5_flow_spec *spec;
- struct mlx5_flow_group *g;
- u32 *flow_group_in;
- int err = 0;
-
- flow_group_in = kvzalloc(inlen, GFP_KERNEL);
- spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
- if (!flow_group_in || !spec) {
- err = -ENOMEM;
- goto err_out;
- }
-
- MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1);
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1);
- g = mlx5_create_flow_group(ft, flow_group_in);
- if (IS_ERR(g)) {
- err = PTR_ERR(g);
- mlx5_core_err(mdev,
- "Failed to add ipsec rx status drop flow group, err=%d\n", err);
- goto err_out;
- }
-
- flow_counter = mlx5_fc_create(mdev, false);
- if (IS_ERR(flow_counter)) {
- err = PTR_ERR(flow_counter);
- mlx5_core_err(mdev,
- "Failed to add ipsec rx status drop rule counter, err=%d\n", err);
- goto err_cnt;
- }
-
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
- dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter_id = mlx5_fc_id(flow_counter);
- spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
- rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
- if (IS_ERR(rule)) {
- err = PTR_ERR(rule);
- mlx5_core_err(mdev,
- "Failed to add ipsec rx status drop rule, err=%d\n", err);
- goto err_rule;
- }
-
- rx->status_drop.group = g;
- rx->status_drop.rule = rule;
- rx->status_drop_cnt = flow_counter;
-
- kvfree(flow_group_in);
- kvfree(spec);
- return 0;
-
-err_rule:
- mlx5_fc_destroy(mdev, flow_counter);
-err_cnt:
- mlx5_destroy_flow_group(g);
-err_out:
- kvfree(flow_group_in);
- kvfree(spec);
- return err;
-}
-
-static int esw_ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec,
- struct mlx5e_ipsec_rx *rx,
- struct mlx5_flow_destination *dest)
-{
- struct mlx5_flow_act flow_act = {};
- struct mlx5_flow_handle *rule;
- struct mlx5_flow_spec *spec;
- int err;
-
- spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
- if (!spec)
- return -ENOMEM;
-
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
- misc_parameters_2.ipsec_syndrome);
- MLX5_SET(fte_match_param, spec->match_value,
- misc_parameters_2.ipsec_syndrome, 0);
- spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
- spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
- flow_act.flags = FLOW_ACT_NO_APPEND;
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
- rule = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2);
- if (IS_ERR(rule)) {
- err = PTR_ERR(rule);
- mlx5_core_warn(ipsec->mdev,
- "Failed to add ipsec rx status pass rule, err=%d\n", err);
- goto err_rule;
- }
-
- rx->status.rule = rule;
- kvfree(spec);
- return 0;
-
-err_rule:
- kvfree(spec);
- return err;
-}
-
-void mlx5_esw_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
- struct mlx5e_ipsec_rx *rx)
-{
- esw_ipsec_rx_status_pass_destroy(ipsec, rx);
- esw_ipsec_rx_status_drop_destroy(ipsec, rx);
-}
-
-int mlx5_esw_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec,
- struct mlx5e_ipsec_rx *rx,
- struct mlx5_flow_destination *dest)
-{
- int err;
-
- err = esw_ipsec_rx_status_drop_create(ipsec, rx);
- if (err)
- return err;
-
- err = esw_ipsec_rx_status_pass_create(ipsec, rx, dest);
- if (err)
- goto err_pass_create;
-
- return 0;
-
-err_pass_create:
- esw_ipsec_rx_status_drop_destroy(ipsec, rx);
- return err;
-}
-
void mlx5_esw_ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx_create_attr *attr)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h
index 0c90f7a8b0d3..ac9c65b89166 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h
@@ -8,11 +8,6 @@ struct mlx5e_ipsec;
struct mlx5e_ipsec_sa_entry;
#ifdef CONFIG_MLX5_ESWITCH
-void mlx5_esw_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
- struct mlx5e_ipsec_rx *rx);
-int mlx5_esw_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec,
- struct mlx5e_ipsec_rx *rx,
- struct mlx5_flow_destination *dest);
void mlx5_esw_ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx_create_attr *attr);
int mlx5_esw_ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ipsec,
@@ -26,16 +21,6 @@ void mlx5_esw_ipsec_tx_create_attr_set(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_tx_create_attr *attr);
void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev);
#else
-static inline void mlx5_esw_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
- struct mlx5e_ipsec_rx *rx) {}
-
-static inline int mlx5_esw_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec,
- struct mlx5e_ipsec_rx *rx,
- struct mlx5_flow_destination *dest)
-{
- return -EINVAL;
-}
-
static inline void mlx5_esw_ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx_create_attr *attr) {}
--
2.41.0
^ permalink raw reply related [flat|nested] 15+ messages in thread* Re: [PATCH xfrm-next 5/9] net/mlx5e: Unify esw and normal IPsec status table creation/destruction
2023-10-16 9:15 ` [PATCH xfrm-next 5/9] net/mlx5e: Unify esw and normal IPsec status table creation/destruction Leon Romanovsky
@ 2023-10-17 9:38 ` Steffen Klassert
2023-10-17 12:13 ` Leon Romanovsky
0 siblings, 1 reply; 15+ messages in thread
From: Steffen Klassert @ 2023-10-17 9:38 UTC (permalink / raw)
To: Leon Romanovsky
Cc: Patrisious Haddad, Herbert Xu, netdev, Raed Salem, Saeed Mahameed
On Mon, Oct 16, 2023 at 12:15:13PM +0300, Leon Romanovsky wrote:
> From: Patrisious Haddad <phaddad@nvidia.com>
>
> Change normal IPsec flow to use the same creation/destruction functions
> for status flow table as that of ESW, which first of all refines the
> code to have less code duplication.
>
> And more importantly, the ESW status table handles IPsec syndrome
> checks at steering by HW, which is more efficient than the previous
> behaviour we had where it was copied to WQE meta data and checked
> by the driver.
>
> Signed-off-by: Patrisious Haddad <phaddad@nvidia.com>
> Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
This one does not apply to the ipsec-next tree.
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH xfrm-next 5/9] net/mlx5e: Unify esw and normal IPsec status table creation/destruction
2023-10-17 9:38 ` Steffen Klassert
@ 2023-10-17 12:13 ` Leon Romanovsky
2023-10-18 5:13 ` Leon Romanovsky
2023-10-19 8:46 ` Steffen Klassert
0 siblings, 2 replies; 15+ messages in thread
From: Leon Romanovsky @ 2023-10-17 12:13 UTC (permalink / raw)
To: Steffen Klassert
Cc: Patrisious Haddad, Herbert Xu, netdev, Raed Salem, Saeed Mahameed
On Tue, Oct 17, 2023 at 11:38:51AM +0200, Steffen Klassert wrote:
> On Mon, Oct 16, 2023 at 12:15:13PM +0300, Leon Romanovsky wrote:
> > From: Patrisious Haddad <phaddad@nvidia.com>
> >
> > Change normal IPsec flow to use the same creation/destruction functions
> > for status flow table as that of ESW, which first of all refines the
> > code to have less code duplication.
> >
> > And more importantly, the ESW status table handles IPsec syndrome
> > checks at steering by HW, which is more efficient than the previous
> > behaviour we had where it was copied to WQE meta data and checked
> > by the driver.
> >
> > Signed-off-by: Patrisious Haddad <phaddad@nvidia.com>
> > Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
>
> This one does not apply to the ipsec-next tree.
You are right, sorry about that. It is based on two net-next series
and I didn't expect such a fast response.
1. https://lore.kernel.org/netdev/20231002083832.19746-1-leon@kernel.org/ - accepted.
2. https://lore.kernel.org/netdev/20231014171908.290428-16-saeed@kernel.org/#t - not accepted yet.
Do you feel comfortable with the series/xfrm patches? If yes, Saeed can
resend the series directly to net-next once patch #2 is accepted.
Thanks
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH xfrm-next 5/9] net/mlx5e: Unify esw and normal IPsec status table creation/destruction
2023-10-17 12:13 ` Leon Romanovsky
@ 2023-10-18 5:13 ` Leon Romanovsky
2023-10-19 8:46 ` Steffen Klassert
1 sibling, 0 replies; 15+ messages in thread
From: Leon Romanovsky @ 2023-10-18 5:13 UTC (permalink / raw)
To: Steffen Klassert
Cc: Patrisious Haddad, Herbert Xu, netdev, Raed Salem, Saeed Mahameed
On Tue, Oct 17, 2023 at 03:13:57PM +0300, Leon Romanovsky wrote:
> On Tue, Oct 17, 2023 at 11:38:51AM +0200, Steffen Klassert wrote:
> > On Mon, Oct 16, 2023 at 12:15:13PM +0300, Leon Romanovsky wrote:
> > > From: Patrisious Haddad <phaddad@nvidia.com>
> > >
> > > Change normal IPsec flow to use the same creation/destruction functions
> > > for status flow table as that of ESW, which first of all refines the
> > > code to have less code duplication.
> > >
> > > And more importantly, the ESW status table handles IPsec syndrome
> > > checks at steering by HW, which is more efficient than the previous
> > > behaviour we had where it was copied to WQE meta data and checked
> > > by the driver.
> > >
> > > Signed-off-by: Patrisious Haddad <phaddad@nvidia.com>
> > > Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
> >
> > This one does not apply to the ipsec-next tree.
>
> You are right, sorry about that. It is based on two net-next series
> and I didn't expect such a fast response.
>
> 1. https://lore.kernel.org/netdev/20231002083832.19746-1-leon@kernel.org/ - accepted.
> 2. https://lore.kernel.org/netdev/20231014171908.290428-16-saeed@kernel.org/#t - not accepted yet.
>
> Do you feel comfortable with the series/xfrm patches? If yes, Saeed can
> resend the series directly to net-next once patch #2 is accepted.
Steffen, second patch was accepted too.
https://lore.kernel.org/all/169759322682.7564.2475141741118387188.git-patchwork-notify@kernel.org
This series applies cleanly to net-next now.
Thanks
>
> Thanks
>
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH xfrm-next 5/9] net/mlx5e: Unify esw and normal IPsec status table creation/destruction
2023-10-17 12:13 ` Leon Romanovsky
2023-10-18 5:13 ` Leon Romanovsky
@ 2023-10-19 8:46 ` Steffen Klassert
2023-10-19 11:01 ` Leon Romanovsky
1 sibling, 1 reply; 15+ messages in thread
From: Steffen Klassert @ 2023-10-19 8:46 UTC (permalink / raw)
To: Leon Romanovsky
Cc: Patrisious Haddad, Herbert Xu, netdev, Raed Salem, Saeed Mahameed
On Tue, Oct 17, 2023 at 03:13:57PM +0300, Leon Romanovsky wrote:
> On Tue, Oct 17, 2023 at 11:38:51AM +0200, Steffen Klassert wrote:
> > On Mon, Oct 16, 2023 at 12:15:13PM +0300, Leon Romanovsky wrote:
> > > From: Patrisious Haddad <phaddad@nvidia.com>
> > >
> > > Change normal IPsec flow to use the same creation/destruction functions
> > > for status flow table as that of ESW, which first of all refines the
> > > code to have less code duplication.
> > >
> > > And more importantly, the ESW status table handles IPsec syndrome
> > > checks at steering by HW, which is more efficient than the previous
> > > behaviour we had where it was copied to WQE meta data and checked
> > > by the driver.
> > >
> > > Signed-off-by: Patrisious Haddad <phaddad@nvidia.com>
> > > Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
> >
> > This one does not apply to the ipsec-next tree.
>
> You are right, sorry about that. It is based on two net-next series
> and I didn't expect such a fast response.
>
> 1. https://lore.kernel.org/netdev/20231002083832.19746-1-leon@kernel.org/ - accepted.
> 2. https://lore.kernel.org/netdev/20231014171908.290428-16-saeed@kernel.org/#t - not accepted yet.
>
> Do you feel comfortable with the series/xfrm patches? If yes, Saeed can
> resend the series directly to net-next once patch #2 is accepted.
The xfrm changes look good and it does not conflict
to anything that is in ipsec-next currently. So
send it to net-next and I'll Ack it.
Thanks!
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH xfrm-next 5/9] net/mlx5e: Unify esw and normal IPsec status table creation/destruction
2023-10-19 8:46 ` Steffen Klassert
@ 2023-10-19 11:01 ` Leon Romanovsky
0 siblings, 0 replies; 15+ messages in thread
From: Leon Romanovsky @ 2023-10-19 11:01 UTC (permalink / raw)
To: Steffen Klassert
Cc: Patrisious Haddad, Herbert Xu, netdev, Raed Salem, Saeed Mahameed
On Thu, Oct 19, 2023 at 10:46:05AM +0200, Steffen Klassert wrote:
> On Tue, Oct 17, 2023 at 03:13:57PM +0300, Leon Romanovsky wrote:
> > On Tue, Oct 17, 2023 at 11:38:51AM +0200, Steffen Klassert wrote:
> > > On Mon, Oct 16, 2023 at 12:15:13PM +0300, Leon Romanovsky wrote:
> > > > From: Patrisious Haddad <phaddad@nvidia.com>
> > > >
> > > > Change normal IPsec flow to use the same creation/destruction functions
> > > > for status flow table as that of ESW, which first of all refines the
> > > > code to have less code duplication.
> > > >
> > > > And more importantly, the ESW status table handles IPsec syndrome
> > > > checks at steering by HW, which is more efficient than the previous
> > > > behaviour we had where it was copied to WQE meta data and checked
> > > > by the driver.
> > > >
> > > > Signed-off-by: Patrisious Haddad <phaddad@nvidia.com>
> > > > Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
> > >
> > > This one does not apply to the ipsec-next tree.
> >
> > You are right, sorry about that. It is based on two net-next series
> > and I didn't expect such a fast response.
> >
> > 1. https://lore.kernel.org/netdev/20231002083832.19746-1-leon@kernel.org/ - accepted.
> > 2. https://lore.kernel.org/netdev/20231014171908.290428-16-saeed@kernel.org/#t - not accepted yet.
> >
> > Do you feel comfortable with the series/xfrm patches? If yes, Saeed can
> > resend the series directly to net-next once patch #2 is accepted.
>
> The xfrm changes look good and it does not conflict
> to anything that is in ipsec-next currently. So
> send it to net-next and I'll Ack it.
Thanks a lot.
>
> Thanks!
^ permalink raw reply [flat|nested] 15+ messages in thread
* [PATCH xfrm-next 6/9] net/mlx5e: Remove exposure of IPsec RX flow steering struct
2023-10-16 9:15 [PATCH xfrm-next 0/9] mlx5 IPsec replay window enhancement and XFRM core statistics Leon Romanovsky
` (4 preceding siblings ...)
2023-10-16 9:15 ` [PATCH xfrm-next 5/9] net/mlx5e: Unify esw and normal IPsec status table creation/destruction Leon Romanovsky
@ 2023-10-16 9:15 ` Leon Romanovsky
2023-10-16 9:15 ` [PATCH xfrm-next 7/9] net/mlx5e: Add IPsec and ASO syndromes check in HW Leon Romanovsky
` (2 subsequent siblings)
8 siblings, 0 replies; 15+ messages in thread
From: Leon Romanovsky @ 2023-10-16 9:15 UTC (permalink / raw)
To: Steffen Klassert
Cc: Leon Romanovsky, Herbert Xu, netdev, Patrisious Haddad,
Raed Salem, Saeed Mahameed
From: Leon Romanovsky <leonro@nvidia.com>
After previous commit, which unified various IPsec creation modes,
there is no need to have struct mlx5e_ipsec_rx exposed in global
IPsec header. Move it to ipsec_fs.c to be placed together with
already existing struct mlx5e_ipsec_tx.
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
.../ethernet/mellanox/mlx5/core/en_accel/ipsec.h | 14 +-------------
.../mellanox/mlx5/core/en_accel/ipsec_fs.c | 16 ++++++++++++++--
.../ethernet/mellanox/mlx5/core/esw/ipsec_fs.c | 8 ++++----
3 files changed, 19 insertions(+), 19 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index 8f4a37bceaf4..c3a40bf11952 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -201,19 +201,6 @@ struct mlx5e_ipsec_miss {
struct mlx5_flow_handle *rule;
};
-struct mlx5e_ipsec_rx {
- struct mlx5e_ipsec_ft ft;
- struct mlx5e_ipsec_miss pol;
- struct mlx5e_ipsec_miss sa;
- struct mlx5e_ipsec_rule status;
- struct mlx5e_ipsec_miss status_drop;
- struct mlx5_fc *status_drop_cnt;
- struct mlx5e_ipsec_fc *fc;
- struct mlx5_fs_chains *chains;
- u8 allow_tunnel_mode : 1;
- struct xarray ipsec_obj_id_map;
-};
-
struct mlx5e_ipsec_tx_create_attr {
int prio;
int pol_level;
@@ -248,6 +235,7 @@ struct mlx5e_ipsec {
struct mlx5_ipsec_fs *roce;
u8 is_uplink_rep: 1;
struct mlx5e_ipsec_mpv_work mpv_work;
+ struct xarray ipsec_obj_id_map;
};
struct mlx5e_ipsec_esn_state {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index 85ed5171e835..aa74a2422869 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -32,6 +32,18 @@ struct mlx5e_ipsec_tx {
u8 allow_tunnel_mode : 1;
};
+struct mlx5e_ipsec_rx {
+ struct mlx5e_ipsec_ft ft;
+ struct mlx5e_ipsec_miss pol;
+ struct mlx5e_ipsec_miss sa;
+ struct mlx5e_ipsec_rule status;
+ struct mlx5e_ipsec_miss status_drop;
+ struct mlx5_fc *status_drop_cnt;
+ struct mlx5e_ipsec_fc *fc;
+ struct mlx5_fs_chains *chains;
+ u8 allow_tunnel_mode : 1;
+};
+
/* IPsec RX flow steering */
static enum mlx5_traffic_types family2tt(u32 family)
{
@@ -2052,7 +2064,7 @@ void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec)
kfree(ipsec->rx_ipv6);
if (ipsec->is_uplink_rep) {
- xa_destroy(&ipsec->rx_esw->ipsec_obj_id_map);
+ xa_destroy(&ipsec->ipsec_obj_id_map);
mutex_destroy(&ipsec->tx_esw->ft.mutex);
WARN_ON(ipsec->tx_esw->ft.refcnt);
@@ -2115,7 +2127,7 @@ int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec,
mutex_init(&ipsec->tx_esw->ft.mutex);
mutex_init(&ipsec->rx_esw->ft.mutex);
ipsec->tx_esw->ns = ns_esw;
- xa_init_flags(&ipsec->rx_esw->ipsec_obj_id_map, XA_FLAGS_ALLOC1);
+ xa_init_flags(&ipsec->ipsec_obj_id_map, XA_FLAGS_ALLOC1);
} else if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ROCE) {
ipsec->roce = mlx5_ipsec_fs_roce_init(mdev, devcom);
} else {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
index 13b5916b64e2..5a0047bdcb51 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
@@ -50,7 +50,7 @@ int mlx5_esw_ipsec_rx_setup_modify_header(struct mlx5e_ipsec_sa_entry *sa_entry,
u32 mapped_id;
int err;
- err = xa_alloc_bh(&ipsec->rx_esw->ipsec_obj_id_map, &mapped_id,
+ err = xa_alloc_bh(&ipsec->ipsec_obj_id_map, &mapped_id,
xa_mk_value(sa_entry->ipsec_obj_id),
XA_LIMIT(1, ESW_IPSEC_RX_MAPPED_ID_MASK), 0);
if (err)
@@ -81,7 +81,7 @@ int mlx5_esw_ipsec_rx_setup_modify_header(struct mlx5e_ipsec_sa_entry *sa_entry,
return 0;
err_header_alloc:
- xa_erase_bh(&ipsec->rx_esw->ipsec_obj_id_map, mapped_id);
+ xa_erase_bh(&ipsec->ipsec_obj_id_map, mapped_id);
return err;
}
@@ -90,7 +90,7 @@ void mlx5_esw_ipsec_rx_id_mapping_remove(struct mlx5e_ipsec_sa_entry *sa_entry)
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
if (sa_entry->rx_mapped_id)
- xa_erase_bh(&ipsec->rx_esw->ipsec_obj_id_map,
+ xa_erase_bh(&ipsec->ipsec_obj_id_map,
sa_entry->rx_mapped_id);
}
@@ -100,7 +100,7 @@ int mlx5_esw_ipsec_rx_ipsec_obj_id_search(struct mlx5e_priv *priv, u32 id,
struct mlx5e_ipsec *ipsec = priv->ipsec;
void *val;
- val = xa_load(&ipsec->rx_esw->ipsec_obj_id_map, id);
+ val = xa_load(&ipsec->ipsec_obj_id_map, id);
if (!val)
return -ENOENT;
--
2.41.0
^ permalink raw reply related [flat|nested] 15+ messages in thread* [PATCH xfrm-next 7/9] net/mlx5e: Add IPsec and ASO syndromes check in HW
2023-10-16 9:15 [PATCH xfrm-next 0/9] mlx5 IPsec replay window enhancement and XFRM core statistics Leon Romanovsky
` (5 preceding siblings ...)
2023-10-16 9:15 ` [PATCH xfrm-next 6/9] net/mlx5e: Remove exposure of IPsec RX flow steering struct Leon Romanovsky
@ 2023-10-16 9:15 ` Leon Romanovsky
2023-10-16 9:15 ` [PATCH xfrm-next 8/9] net/mlx5e: Connect mlx5 IPsec statistics with XFRM core Leon Romanovsky
2023-10-16 9:15 ` [PATCH xfrm-next 9/9] net/mlx5e: Delete obsolete IPsec code Leon Romanovsky
8 siblings, 0 replies; 15+ messages in thread
From: Leon Romanovsky @ 2023-10-16 9:15 UTC (permalink / raw)
To: Steffen Klassert
Cc: Patrisious Haddad, Herbert Xu, netdev, Raed Salem, Saeed Mahameed
From: Patrisious Haddad <phaddad@nvidia.com>
After IPsec decryption it isn't enough to only check the IPsec syndrome
but need to also check the ASO syndrome in order to verify that the
operation was actually successful.
Verify that both syndromes are actually zero and in case not drop the
packet and increment the appropriate flow counter for the drop reason.
Signed-off-by: Patrisious Haddad <phaddad@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
.../mellanox/mlx5/core/en_accel/ipsec.h | 8 +
.../mellanox/mlx5/core/en_accel/ipsec_fs.c | 235 ++++++++++++++++--
2 files changed, 223 insertions(+), 20 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index c3a40bf11952..adaea3493193 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -189,11 +189,19 @@ struct mlx5e_ipsec_ft {
u32 refcnt;
};
+struct mlx5e_ipsec_drop {
+ struct mlx5_flow_handle *rule;
+ struct mlx5_fc *fc;
+};
+
struct mlx5e_ipsec_rule {
struct mlx5_flow_handle *rule;
struct mlx5_modify_hdr *modify_hdr;
struct mlx5_pkt_reformat *pkt_reformat;
struct mlx5_fc *fc;
+ struct mlx5e_ipsec_drop replay;
+ struct mlx5e_ipsec_drop auth;
+ struct mlx5e_ipsec_drop trailer;
};
struct mlx5e_ipsec_miss {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index aa74a2422869..aeb399d8dae5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -32,13 +32,17 @@ struct mlx5e_ipsec_tx {
u8 allow_tunnel_mode : 1;
};
+struct mlx5e_ipsec_status_checks {
+ struct mlx5_flow_group *drop_all_group;
+ struct mlx5e_ipsec_drop all;
+};
+
struct mlx5e_ipsec_rx {
struct mlx5e_ipsec_ft ft;
struct mlx5e_ipsec_miss pol;
struct mlx5e_ipsec_miss sa;
struct mlx5e_ipsec_rule status;
- struct mlx5e_ipsec_miss status_drop;
- struct mlx5_fc *status_drop_cnt;
+ struct mlx5e_ipsec_status_checks status_drops;
struct mlx5e_ipsec_fc *fc;
struct mlx5_fs_chains *chains;
u8 allow_tunnel_mode : 1;
@@ -143,9 +147,9 @@ static struct mlx5_flow_table *ipsec_ft_create(struct mlx5_flow_namespace *ns,
static void ipsec_rx_status_drop_destroy(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx)
{
- mlx5_del_flow_rules(rx->status_drop.rule);
- mlx5_destroy_flow_group(rx->status_drop.group);
- mlx5_fc_destroy(ipsec->mdev, rx->status_drop_cnt);
+ mlx5_del_flow_rules(rx->status_drops.all.rule);
+ mlx5_fc_destroy(ipsec->mdev, rx->status_drops.all.fc);
+ mlx5_destroy_flow_group(rx->status_drops.drop_all_group);
}
static void ipsec_rx_status_pass_destroy(struct mlx5e_ipsec *ipsec,
@@ -161,8 +165,149 @@ static void ipsec_rx_status_pass_destroy(struct mlx5e_ipsec *ipsec,
#endif
}
-static int ipsec_rx_status_drop_create(struct mlx5e_ipsec *ipsec,
- struct mlx5e_ipsec_rx *rx)
+static int rx_add_rule_drop_auth_trailer(struct mlx5e_ipsec_sa_entry *sa_entry,
+ struct mlx5e_ipsec_rx *rx)
+{
+ struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
+ struct mlx5_flow_table *ft = rx->ft.status;
+ struct mlx5_core_dev *mdev = ipsec->mdev;
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *rule;
+ struct mlx5_fc *flow_counter;
+ struct mlx5_flow_spec *spec;
+ int err;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ flow_counter = mlx5_fc_create(mdev, true);
+ if (IS_ERR(flow_counter)) {
+ err = PTR_ERR(flow_counter);
+ mlx5_core_err(mdev,
+ "Failed to add ipsec rx status drop rule counter, err=%d\n", err);
+ goto err_cnt;
+ }
+ sa_entry->ipsec_rule.auth.fc = flow_counter;
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ flow_act.flags = FLOW_ACT_NO_APPEND;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest.counter_id = mlx5_fc_id(flow_counter);
+ if (rx == ipsec->rx_esw)
+ spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.ipsec_syndrome);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.ipsec_syndrome, 1);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_2);
+ MLX5_SET(fte_match_param, spec->match_value,
+ misc_parameters_2.metadata_reg_c_2,
+ sa_entry->ipsec_obj_id | BIT(31));
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+ rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev,
+ "Failed to add ipsec rx status drop rule, err=%d\n", err);
+ goto err_rule;
+ }
+ sa_entry->ipsec_rule.auth.rule = rule;
+
+ flow_counter = mlx5_fc_create(mdev, true);
+ if (IS_ERR(flow_counter)) {
+ err = PTR_ERR(flow_counter);
+ mlx5_core_err(mdev,
+ "Failed to add ipsec rx status drop rule counter, err=%d\n", err);
+ goto err_cnt_2;
+ }
+ sa_entry->ipsec_rule.trailer.fc = flow_counter;
+
+ dest.counter_id = mlx5_fc_id(flow_counter);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.ipsec_syndrome, 2);
+ rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev,
+ "Failed to add ipsec rx status drop rule, err=%d\n", err);
+ goto err_rule_2;
+ }
+ sa_entry->ipsec_rule.trailer.rule = rule;
+
+ kvfree(spec);
+ return 0;
+
+err_rule_2:
+ mlx5_fc_destroy(mdev, sa_entry->ipsec_rule.trailer.fc);
+err_cnt_2:
+ mlx5_del_flow_rules(sa_entry->ipsec_rule.auth.rule);
+err_rule:
+ mlx5_fc_destroy(mdev, sa_entry->ipsec_rule.auth.fc);
+err_cnt:
+ kvfree(spec);
+ return err;
+}
+
+static int rx_add_rule_drop_replay(struct mlx5e_ipsec_sa_entry *sa_entry, struct mlx5e_ipsec_rx *rx)
+{
+ struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
+ struct mlx5_flow_table *ft = rx->ft.status;
+ struct mlx5_core_dev *mdev = ipsec->mdev;
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *rule;
+ struct mlx5_fc *flow_counter;
+ struct mlx5_flow_spec *spec;
+ int err;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ flow_counter = mlx5_fc_create(mdev, true);
+ if (IS_ERR(flow_counter)) {
+ err = PTR_ERR(flow_counter);
+ mlx5_core_err(mdev,
+ "Failed to add ipsec rx status drop rule counter, err=%d\n", err);
+ goto err_cnt;
+ }
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ flow_act.flags = FLOW_ACT_NO_APPEND;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest.counter_id = mlx5_fc_id(flow_counter);
+ if (rx == ipsec->rx_esw)
+ spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_4);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_4, 1);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_2);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_2,
+ sa_entry->ipsec_obj_id | BIT(31));
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+ rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev,
+ "Failed to add ipsec rx status drop rule, err=%d\n", err);
+ goto err_rule;
+ }
+
+ sa_entry->ipsec_rule.replay.rule = rule;
+ sa_entry->ipsec_rule.replay.fc = flow_counter;
+
+ kvfree(spec);
+ return 0;
+
+err_rule:
+ mlx5_fc_destroy(mdev, flow_counter);
+err_cnt:
+ kvfree(spec);
+ return err;
+}
+
+static int ipsec_rx_status_drop_all_create(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_table *ft = rx->ft.status;
@@ -214,9 +359,9 @@ static int ipsec_rx_status_drop_create(struct mlx5e_ipsec *ipsec,
goto err_rule;
}
- rx->status_drop.group = g;
- rx->status_drop.rule = rule;
- rx->status_drop_cnt = flow_counter;
+ rx->status_drops.drop_all_group = g;
+ rx->status_drops.all.rule = rule;
+ rx->status_drops.all.fc = flow_counter;
kvfree(flow_group_in);
kvfree(spec);
@@ -247,8 +392,12 @@ static int ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec,
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
misc_parameters_2.ipsec_syndrome);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ misc_parameters_2.metadata_reg_c_4);
MLX5_SET(fte_match_param, spec->match_value,
misc_parameters_2.ipsec_syndrome, 0);
+ MLX5_SET(fte_match_param, spec->match_value,
+ misc_parameters_2.metadata_reg_c_4, 0);
if (rx == ipsec->rx_esw)
spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
@@ -285,7 +434,7 @@ static int mlx5_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec,
{
int err;
- err = ipsec_rx_status_drop_create(ipsec, rx);
+ err = ipsec_rx_status_drop_all_create(ipsec, rx);
if (err)
return err;
@@ -529,7 +678,7 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
if (err)
return err;
- ft = ipsec_ft_create(attr.ns, attr.status_level, attr.prio, 1, 0);
+ ft = ipsec_ft_create(attr.ns, attr.status_level, attr.prio, 3, 0);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
goto err_fs_ft_status;
@@ -1159,29 +1308,48 @@ static int setup_modify_header(struct mlx5e_ipsec *ipsec, int type, u32 val, u8
struct mlx5_flow_act *flow_act)
{
enum mlx5_flow_namespace_type ns_type = ipsec_fs_get_ns(ipsec, type, dir);
- u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
+ u8 action[3][MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
struct mlx5_core_dev *mdev = ipsec->mdev;
struct mlx5_modify_hdr *modify_hdr;
+ u8 num_of_actions = 1;
- MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
+ MLX5_SET(set_action_in, action[0], action_type, MLX5_ACTION_TYPE_SET);
switch (dir) {
case XFRM_DEV_OFFLOAD_IN:
- MLX5_SET(set_action_in, action, field,
+ MLX5_SET(set_action_in, action[0], field,
MLX5_ACTION_IN_FIELD_METADATA_REG_B);
+
+ num_of_actions++;
+ MLX5_SET(set_action_in, action[1], action_type, MLX5_ACTION_TYPE_SET);
+ MLX5_SET(set_action_in, action[1], field, MLX5_ACTION_IN_FIELD_METADATA_REG_C_2);
+ MLX5_SET(set_action_in, action[1], data, val);
+ MLX5_SET(set_action_in, action[1], offset, 0);
+ MLX5_SET(set_action_in, action[1], length, 32);
+
+ if (type == XFRM_DEV_OFFLOAD_CRYPTO) {
+ num_of_actions++;
+ MLX5_SET(set_action_in, action[2], action_type,
+ MLX5_ACTION_TYPE_SET);
+ MLX5_SET(set_action_in, action[2], field,
+ MLX5_ACTION_IN_FIELD_METADATA_REG_C_4);
+ MLX5_SET(set_action_in, action[2], data, 0);
+ MLX5_SET(set_action_in, action[2], offset, 0);
+ MLX5_SET(set_action_in, action[2], length, 32);
+ }
break;
case XFRM_DEV_OFFLOAD_OUT:
- MLX5_SET(set_action_in, action, field,
+ MLX5_SET(set_action_in, action[0], field,
MLX5_ACTION_IN_FIELD_METADATA_REG_C_4);
break;
default:
return -EINVAL;
}
- MLX5_SET(set_action_in, action, data, val);
- MLX5_SET(set_action_in, action, offset, 0);
- MLX5_SET(set_action_in, action, length, 32);
+ MLX5_SET(set_action_in, action[0], data, val);
+ MLX5_SET(set_action_in, action[0], offset, 0);
+ MLX5_SET(set_action_in, action[0], length, 32);
- modify_hdr = mlx5_modify_header_alloc(mdev, ns_type, 1, action);
+ modify_hdr = mlx5_modify_header_alloc(mdev, ns_type, num_of_actions, action);
if (IS_ERR(modify_hdr)) {
mlx5_core_err(mdev, "Failed to allocate modify_header %ld\n",
PTR_ERR(modify_hdr));
@@ -1479,6 +1647,15 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
mlx5_core_err(mdev, "fail to add RX ipsec rule err=%d\n", err);
goto err_add_flow;
}
+ if (attrs->type == XFRM_DEV_OFFLOAD_PACKET)
+ err = rx_add_rule_drop_replay(sa_entry, rx);
+ if (err)
+ goto err_add_replay;
+
+ err = rx_add_rule_drop_auth_trailer(sa_entry, rx);
+ if (err)
+ goto err_drop_reason;
+
kvfree(spec);
sa_entry->ipsec_rule.rule = rule;
@@ -1487,6 +1664,13 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
sa_entry->ipsec_rule.pkt_reformat = flow_act.pkt_reformat;
return 0;
+err_drop_reason:
+ if (sa_entry->ipsec_rule.replay.rule) {
+ mlx5_del_flow_rules(sa_entry->ipsec_rule.replay.rule);
+ mlx5_fc_destroy(mdev, sa_entry->ipsec_rule.replay.fc);
+ }
+err_add_replay:
+ mlx5_del_flow_rules(rule);
err_add_flow:
mlx5_fc_destroy(mdev, counter);
err_add_cnt:
@@ -1994,6 +2178,17 @@ void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
if (ipsec_rule->modify_hdr)
mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr);
+
+ mlx5_del_flow_rules(ipsec_rule->trailer.rule);
+ mlx5_fc_destroy(mdev, ipsec_rule->trailer.fc);
+
+ mlx5_del_flow_rules(ipsec_rule->auth.rule);
+ mlx5_fc_destroy(mdev, ipsec_rule->auth.fc);
+
+ if (ipsec_rule->replay.rule) {
+ mlx5_del_flow_rules(ipsec_rule->replay.rule);
+ mlx5_fc_destroy(mdev, ipsec_rule->replay.fc);
+ }
mlx5_esw_ipsec_rx_id_mapping_remove(sa_entry);
rx_ft_put(sa_entry->ipsec, sa_entry->attrs.family, sa_entry->attrs.type);
}
--
2.41.0
^ permalink raw reply related [flat|nested] 15+ messages in thread* [PATCH xfrm-next 8/9] net/mlx5e: Connect mlx5 IPsec statistics with XFRM core
2023-10-16 9:15 [PATCH xfrm-next 0/9] mlx5 IPsec replay window enhancement and XFRM core statistics Leon Romanovsky
` (6 preceding siblings ...)
2023-10-16 9:15 ` [PATCH xfrm-next 7/9] net/mlx5e: Add IPsec and ASO syndromes check in HW Leon Romanovsky
@ 2023-10-16 9:15 ` Leon Romanovsky
2023-10-16 9:15 ` [PATCH xfrm-next 9/9] net/mlx5e: Delete obsolete IPsec code Leon Romanovsky
8 siblings, 0 replies; 15+ messages in thread
From: Leon Romanovsky @ 2023-10-16 9:15 UTC (permalink / raw)
To: Steffen Klassert
Cc: Leon Romanovsky, Herbert Xu, netdev, Patrisious Haddad,
Raed Salem, Saeed Mahameed
From: Leon Romanovsky <leonro@nvidia.com>
Fill integrity, replay and bad trailer counters.
As an example, after simulating replay window attack with 5 packets:
[leonro@c ~]$ grep XfrmInStateSeqError /proc/net/xfrm_stat
XfrmInStateSeqError 5
[leonro@c ~]$ sudo ip -s x s
<...>
stats:
replay-window 0 replay 5 failed 0
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
.../mellanox/mlx5/core/en_accel/ipsec.c | 22 +++++++++++++++++--
1 file changed, 20 insertions(+), 2 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index bf88232a2fc2..5b2660662811 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -980,19 +980,37 @@ static void mlx5e_xfrm_update_stats(struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
+ struct net *net = dev_net(x->xso.dev);
u64 packets, bytes, lastuse;
lockdep_assert(lockdep_is_held(&x->lock) ||
lockdep_is_held(&dev_net(x->xso.real_dev)->xfrm.xfrm_cfg_mutex) ||
lockdep_is_held(&dev_net(x->xso.real_dev)->xfrm.xfrm_state_lock));
- if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ ||
- x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
+ if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ)
+ return;
+
+ if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) {
+ mlx5_fc_query_cached(ipsec_rule->auth.fc, &bytes, &packets, &lastuse);
+ x->stats.integrity_failed += packets;
+ XFRM_ADD_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR, packets);
+
+ mlx5_fc_query_cached(ipsec_rule->trailer.fc, &bytes, &packets, &lastuse);
+ XFRM_ADD_STATS(net, LINUX_MIB_XFRMINHDRERROR, packets);
+ }
+
+ if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
return;
mlx5_fc_query_cached(ipsec_rule->fc, &bytes, &packets, &lastuse);
x->curlft.packets += packets;
x->curlft.bytes += bytes;
+
+ if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) {
+ mlx5_fc_query_cached(ipsec_rule->replay.fc, &bytes, &packets, &lastuse);
+ x->stats.replay += packets;
+ XFRM_ADD_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR, packets);
+ }
}
static int mlx5e_xfrm_validate_policy(struct mlx5_core_dev *mdev,
--
2.41.0
^ permalink raw reply related [flat|nested] 15+ messages in thread* [PATCH xfrm-next 9/9] net/mlx5e: Delete obsolete IPsec code
2023-10-16 9:15 [PATCH xfrm-next 0/9] mlx5 IPsec replay window enhancement and XFRM core statistics Leon Romanovsky
` (7 preceding siblings ...)
2023-10-16 9:15 ` [PATCH xfrm-next 8/9] net/mlx5e: Connect mlx5 IPsec statistics with XFRM core Leon Romanovsky
@ 2023-10-16 9:15 ` Leon Romanovsky
8 siblings, 0 replies; 15+ messages in thread
From: Leon Romanovsky @ 2023-10-16 9:15 UTC (permalink / raw)
To: Steffen Klassert
Cc: Leon Romanovsky, Herbert Xu, netdev, Patrisious Haddad,
Raed Salem, Saeed Mahameed
From: Leon Romanovsky <leonro@nvidia.com>
After addition of HW managed counters and implementation drop
in flow steering logic, the code in driver which checks syndrome
is not reachable anymore.
Let's delete it.
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
.../mellanox/mlx5/core/en_accel/ipsec.h | 1 -
.../mellanox/mlx5/core/en_accel/ipsec_rxtx.c | 25 ++-----------------
.../mellanox/mlx5/core/en_accel/ipsec_rxtx.h | 1 -
.../mellanox/mlx5/core/en_accel/ipsec_stats.c | 1 -
4 files changed, 2 insertions(+), 26 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index adaea3493193..7d943e93cf6d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -137,7 +137,6 @@ struct mlx5e_ipsec_hw_stats {
struct mlx5e_ipsec_sw_stats {
atomic64_t ipsec_rx_drop_sp_alloc;
atomic64_t ipsec_rx_drop_sadb_miss;
- atomic64_t ipsec_rx_drop_syndrome;
atomic64_t ipsec_tx_drop_bundle;
atomic64_t ipsec_tx_drop_no_state;
atomic64_t ipsec_tx_drop_not_ip;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
index 51a144246ea6..727fa7c18523 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
@@ -304,12 +304,6 @@ bool mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
return false;
}
-enum {
- MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED,
- MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_AUTH_FAILED,
- MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_BAD_TRAILER,
-};
-
void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
struct sk_buff *skb,
u32 ipsec_meta_data)
@@ -343,20 +337,7 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
xo = xfrm_offload(skb);
xo->flags = CRYPTO_DONE;
-
- switch (MLX5_IPSEC_METADATA_SYNDROM(ipsec_meta_data)) {
- case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED:
- xo->status = CRYPTO_SUCCESS;
- break;
- case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_AUTH_FAILED:
- xo->status = CRYPTO_TUNNEL_ESP_AUTH_FAILED;
- break;
- case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_BAD_TRAILER:
- xo->status = CRYPTO_INVALID_PACKET_SYNTAX;
- break;
- default:
- atomic64_inc(&ipsec->sw_stats.ipsec_rx_drop_syndrome);
- }
+ xo->status = CRYPTO_SUCCESS;
}
int mlx5_esw_ipsec_rx_make_metadata(struct mlx5e_priv *priv, u32 id, u32 *metadata)
@@ -374,8 +355,6 @@ int mlx5_esw_ipsec_rx_make_metadata(struct mlx5e_priv *priv, u32 id, u32 *metada
return err;
}
- *metadata = MLX5_IPSEC_METADATA_CREATE(ipsec_obj_id,
- MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED);
-
+ *metadata = ipsec_obj_id;
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
index 9ee014a8ad24..ddd9fad19cc5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
@@ -43,7 +43,6 @@
#define MLX5_IPSEC_METADATA_MARKER(metadata) (((metadata) >> 31) & 0x1)
#define MLX5_IPSEC_METADATA_SYNDROM(metadata) (((metadata) >> 24) & GENMASK(5, 0))
#define MLX5_IPSEC_METADATA_HANDLE(metadata) ((metadata) & GENMASK(23, 0))
-#define MLX5_IPSEC_METADATA_CREATE(id, syndrome) ((id) | ((syndrome) << 24))
struct mlx5e_accel_tx_ipsec_state {
struct xfrm_offload *xo;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
index e0e36a09721c..dd36b04e30a0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
@@ -51,7 +51,6 @@ static const struct counter_desc mlx5e_ipsec_hw_stats_desc[] = {
static const struct counter_desc mlx5e_ipsec_sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_rx_drop_sp_alloc) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_rx_drop_sadb_miss) },
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_rx_drop_syndrome) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_bundle) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_no_state) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_not_ip) },
--
2.41.0
^ permalink raw reply related [flat|nested] 15+ messages in thread