From: Jakub Kicinski <kuba@kernel.org>
To: cratiu@nvidia.com, rrameshbabu@nvidia.com, Raed Salem <raeds@nvidia.com>
Cc: Willem de Bruijn <willemdebruijn.kernel@gmail.com>,
netdev@vger.kernel.org, pabeni@redhat.com, borisp@nvidia.com,
gal@nvidia.com, steffen.klassert@secunet.com, tariqt@nvidia.com
Subject: Re: [RFC net-next 14/15] net/mlx5e: Add Rx data path offload
Date: Wed, 29 May 2024 11:38:54 -0700 [thread overview]
Message-ID: <20240529113854.14fd929e@kernel.org> (raw)
In-Reply-To: <664172ded406f_1d6c6729412@willemb.c.googlers.com.notmuch>
On Sun, 12 May 2024 21:54:38 -0400 Willem de Bruijn wrote:
> > + /* TBD: report errors as SW counters to ethtool, any further handling ? */
> > + switch (MLX5_NISP_METADATA_SYNDROM(nisp_meta_data)) {
> > + case MLX5E_NISP_OFFLOAD_RX_SYNDROME_DECRYPTED:
> > + if (psp_rcv(skb))
> > + netdev_warn_once(netdev, "PSP handling failed");
> > + skb->decrypted = 1;
>
> Do not set skb->decrypted if psp_rcv failed? But drop the packet and
> account the drop, likely.
nVidia folks does this seem reasonable?
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nisp_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nisp_rxtx.c
index 7ae3e8246d8f..8cf6a8daf721 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nisp_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nisp_rxtx.c
@@ -172,22 +172,24 @@ static int psp_rcv(struct sk_buff *skb)
return 0;
}
-void mlx5e_nisp_offload_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb,
+bool mlx5e_nisp_offload_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb,
struct mlx5_cqe64 *cqe)
{
u32 nisp_meta_data = be32_to_cpu(cqe->ft_metadata);
/* TBD: report errors as SW counters to ethtool, any further handling ? */
- switch (MLX5_NISP_METADATA_SYNDROM(nisp_meta_data)) {
- case MLX5E_NISP_OFFLOAD_RX_SYNDROME_DECRYPTED:
- if (psp_rcv(skb))
- netdev_warn_once(netdev, "PSP handling failed");
- skb->decrypted = 1;
- break;
- default:
- WARN_ON_ONCE(true);
- break;
- }
+ if (MLX5_NISP_METADATA_SYNDROM(nisp_meta_data) != MLX5E_NISP_OFFLOAD_RX_SYNDROME_DECRYPTED)
+ goto drop;
+
+ if (psp_rcv(skb))
+ goto drop;
+
+ skb->decrypted = 1;
+ return false;
+
+drop:
+ kfree_skb(skb);
+ return true;
}
void mlx5e_nisp_tx_build_eseg(struct mlx5e_priv *priv, struct sk_buff *skb,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nisp_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nisp_rxtx.h
index 834481232b21..1e13b09b3522 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nisp_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nisp_rxtx.h
@@ -86,7 +86,7 @@ static inline bool mlx5e_nisp_is_rx_flow(struct mlx5_cqe64 *cqe)
return MLX5_NISP_METADATA_MARKER(be32_to_cpu(cqe->ft_metadata));
}
-void mlx5e_nisp_offload_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb,
+bool mlx5e_nisp_offload_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb,
struct mlx5_cqe64 *cqe);
void mlx5e_nisp_csum_complete(struct net_device *netdev, struct sk_buff *skb);
@@ -113,10 +113,11 @@ static inline bool mlx5e_nisp_is_rx_flow(struct mlx5_cqe64 *cqe)
return false;
}
-static inline void mlx5e_nisp_offload_handle_rx_skb(struct net_device *netdev,
+static inline bool mlx5e_nisp_offload_handle_rx_skb(struct net_device *netdev,
struct sk_buff *skb,
struct mlx5_cqe64 *cqe)
{
+ return false;
}
static inline void mlx5e_nisp_csum_complete(struct net_device *netdev, struct sk_buff *skb) { }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index ed3c7d8cf99d..22cf1c563844 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -1552,7 +1552,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
#define MLX5E_CE_BIT_MASK 0x80
-static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
+static inline bool mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
u32 cqe_bcnt,
struct mlx5e_rq *rq,
struct sk_buff *skb)
@@ -1566,8 +1566,10 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
if (unlikely(get_cqe_tls_offload(cqe)))
mlx5e_ktls_handle_rx_skb(rq, skb, cqe, &cqe_bcnt);
- if (unlikely(mlx5e_nisp_is_rx_flow(cqe)))
- mlx5e_nisp_offload_handle_rx_skb(netdev, skb, cqe);
+ if (unlikely(mlx5e_nisp_is_rx_flow(cqe))) {
+ if (mlx5e_nisp_offload_handle_rx_skb(netdev, skb, cqe))
+ return true;
+ }
if (unlikely(mlx5_ipsec_is_rx_flow(cqe)))
mlx5e_ipsec_offload_handle_rx_skb(netdev, skb,
@@ -1612,9 +1614,11 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
if (unlikely(mlx5e_skb_is_multicast(skb)))
stats->mcast_packets++;
+
+ return false;
}
-static void mlx5e_shampo_complete_rx_cqe(struct mlx5e_rq *rq,
+static bool mlx5e_shampo_complete_rx_cqe(struct mlx5e_rq *rq,
struct mlx5_cqe64 *cqe,
u32 cqe_bcnt,
struct sk_buff *skb)
@@ -1626,16 +1630,20 @@ static void mlx5e_shampo_complete_rx_cqe(struct mlx5e_rq *rq,
stats->bytes += cqe_bcnt;
stats->gro_bytes += cqe_bcnt;
if (NAPI_GRO_CB(skb)->count != 1)
- return;
- mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
+ return false;
+
+ if (mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb))
+ return true;
+
skb_reset_network_header(skb);
if (!skb_flow_dissect_flow_keys(skb, &rq->hw_gro_data->fk, 0)) {
napi_gro_receive(rq->cq.napi, skb);
rq->hw_gro_data->skb = NULL;
}
+ return false;
}
-static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
+static inline bool mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
struct mlx5_cqe64 *cqe,
u32 cqe_bcnt,
struct sk_buff *skb)
@@ -1644,7 +1652,7 @@ static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
stats->packets++;
stats->bytes += cqe_bcnt;
- mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
+ return mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
}
static inline
@@ -1858,7 +1866,8 @@ static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
goto wq_cyc_pop;
}
- mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+ if (mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb))
+ goto wq_cyc_pop;
if (mlx5e_cqe_regb_chain(cqe))
if (!mlx5e_tc_update_skb_nic(cqe, skb)) {
@@ -1905,7 +1914,8 @@ static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
goto wq_cyc_pop;
}
- mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+ if (mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb))
+ goto wq_cyc_pop;
if (rep->vlan && skb_vlan_tag_present(skb))
skb_vlan_pop(skb);
@@ -1954,7 +1964,8 @@ static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64
if (!skb)
goto mpwrq_cqe_out;
- mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+ if (mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb))
+ goto mpwrq_cqe_out;
mlx5e_rep_tc_receive(cqe, rq, skb);
@@ -2375,7 +2386,10 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
mlx5e_fill_skb_data(*skb, rq, frag_page, data_bcnt, data_offset);
}
- mlx5e_shampo_complete_rx_cqe(rq, cqe, cqe_bcnt, *skb);
+ if (mlx5e_shampo_complete_rx_cqe(rq, cqe, cqe_bcnt, *skb)) {
+ *skb = NULL;
+ goto free_hd_entry;
+ }
if (flush)
mlx5e_shampo_flush_skb(rq, cqe, match);
free_hd_entry:
@@ -2429,7 +2443,8 @@ static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cq
if (!skb)
goto mpwrq_cqe_out;
- mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+ if (mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb))
+ goto mpwrq_cqe_out;
if (mlx5e_cqe_regb_chain(cqe))
if (!mlx5e_tc_update_skb_nic(cqe, skb)) {
@@ -2762,7 +2777,8 @@ static void mlx5e_trap_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe
if (!skb)
goto wq_cyc_pop;
- mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+ if (mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb))
+ goto wq_cyc_pop;
skb_push(skb, ETH_HLEN);
mlx5_devlink_trap_report(rq->mdev, trap_id, skb,
next prev parent reply other threads:[~2024-05-29 18:38 UTC|newest]
Thread overview: 49+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-05-10 3:04 [RFC net-next 00/15] add basic PSP encryption for TCP connections Jakub Kicinski
2024-05-10 3:04 ` [RFC net-next 01/15] psp: add documentation Jakub Kicinski
2024-05-10 22:19 ` Saeed Mahameed
2024-05-11 0:11 ` Jakub Kicinski
2024-05-11 9:41 ` Vadim Fedorenko
2024-05-11 16:25 ` David Ahern
2024-06-26 13:57 ` Sasha Levin
2024-05-13 1:24 ` Willem de Bruijn
2024-05-29 17:35 ` Jakub Kicinski
2024-05-30 0:47 ` Willem de Bruijn
2024-05-30 19:51 ` Jakub Kicinski
2024-05-30 20:15 ` Jakub Kicinski
2024-05-30 21:03 ` Willem de Bruijn
2024-05-31 13:56 ` Willem de Bruijn
2024-06-05 0:08 ` Jakub Kicinski
2024-06-05 20:11 ` Willem de Bruijn
2024-06-05 22:24 ` Jakub Kicinski
2024-06-06 2:40 ` Willem de Bruijn
2024-06-27 15:14 ` Lance Richardson
2024-06-27 22:33 ` Jakub Kicinski
2024-06-28 19:33 ` Lance Richardson
2024-06-28 23:41 ` Jakub Kicinski
2024-05-10 3:04 ` [RFC net-next 02/15] psp: base PSP device support Jakub Kicinski
2024-05-10 3:04 ` [RFC net-next 03/15] net: modify core data structures for PSP datapath support Jakub Kicinski
2024-05-10 3:04 ` [RFC net-next 04/15] tcp: add datapath logic for PSP with inline key exchange Jakub Kicinski
2024-05-10 3:04 ` [RFC net-next 05/15] psp: add op for rotation of secret state Jakub Kicinski
2024-05-16 19:59 ` Lance Richardson
2024-05-29 17:43 ` Jakub Kicinski
2024-05-10 3:04 ` [RFC net-next 06/15] net: psp: add socket security association code Jakub Kicinski
2024-05-10 3:04 ` [RFC net-next 07/15] net: psp: update the TCP MSS to reflect PSP packet overhead Jakub Kicinski
2024-05-13 1:47 ` Willem de Bruijn
2024-05-29 17:48 ` Jakub Kicinski
2024-05-30 0:52 ` Willem de Bruijn
2024-05-10 3:04 ` [RFC net-next 08/15] psp: track generations of secret state Jakub Kicinski
2024-05-10 3:04 ` [RFC net-next 09/15] net/mlx5e: Support PSP offload functionality Jakub Kicinski
2024-05-10 3:04 ` [RFC net-next 10/15] net/mlx5e: Implement PSP operations .assoc_add and .assoc_del Jakub Kicinski
2024-05-10 3:04 ` [RFC net-next 11/15] net/mlx5e: Implement PSP Tx data path Jakub Kicinski
2024-05-10 3:04 ` [RFC net-next 12/15] net/mlx5e: Add PSP steering in local NIC RX Jakub Kicinski
2024-05-13 1:52 ` Willem de Bruijn
2024-05-10 3:04 ` [RFC net-next 13/15] net/mlx5e: Configure PSP Rx flow steering rules Jakub Kicinski
2024-05-10 3:04 ` [RFC net-next 14/15] net/mlx5e: Add Rx data path offload Jakub Kicinski
2024-05-13 1:54 ` Willem de Bruijn
2024-05-29 18:38 ` Jakub Kicinski [this message]
2024-05-30 9:04 ` Cosmin Ratiu
2024-05-10 3:04 ` [RFC net-next 15/15] net/mlx5e: Implement PSP key_rotate operation Jakub Kicinski
2024-05-29 9:16 ` [RFC net-next 00/15] add basic PSP encryption for TCP connections Boris Pismenny
2024-05-29 18:50 ` Jakub Kicinski
2024-05-29 20:01 ` Boris Pismenny
2024-05-29 20:38 ` Jakub Kicinski
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240529113854.14fd929e@kernel.org \
--to=kuba@kernel.org \
--cc=borisp@nvidia.com \
--cc=cratiu@nvidia.com \
--cc=gal@nvidia.com \
--cc=netdev@vger.kernel.org \
--cc=pabeni@redhat.com \
--cc=raeds@nvidia.com \
--cc=rrameshbabu@nvidia.com \
--cc=steffen.klassert@secunet.com \
--cc=tariqt@nvidia.com \
--cc=willemdebruijn.kernel@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).