From: Mark Bloch <mbloch@nvidia.com>
To: "David S. Miller" <davem@davemloft.net>,
Jakub Kicinski <kuba@kernel.org>, Paolo Abeni <pabeni@redhat.com>,
Eric Dumazet <edumazet@google.com>,
"Andrew Lunn" <andrew+netdev@lunn.ch>
Cc: <saeedm@nvidia.com>, <gal@nvidia.com>, <leonro@nvidia.com>,
<tariqt@nvidia.com>, Leon Romanovsky <leon@kernel.org>,
Simon Horman <horms@kernel.org>,
Richard Cochran <richardcochran@gmail.com>,
"Alexei Starovoitov" <ast@kernel.org>,
Daniel Borkmann <daniel@iogearbox.net>,
"Jesper Dangaard Brouer" <hawk@kernel.org>,
John Fastabend <john.fastabend@gmail.com>,
<netdev@vger.kernel.org>, <linux-rdma@vger.kernel.org>,
<linux-kernel@vger.kernel.org>, <bpf@vger.kernel.org>,
Dragos Tatulea <dtatulea@nvidia.com>,
Cosmin Ratiu <cratiu@nvidia.com>, Mark Bloch <mbloch@nvidia.com>
Subject: [PATCH net-next v3 03/12] net/mlx5e: SHAMPO: Reorganize mlx5_rq_shampo_alloc
Date: Mon, 9 Jun 2025 17:58:24 +0300 [thread overview]
Message-ID: <20250609145833.990793-4-mbloch@nvidia.com> (raw)
In-Reply-To: <20250609145833.990793-1-mbloch@nvidia.com>
From: Saeed Mahameed <saeedm@nvidia.com>
Drop redundant SHAMPO structure alloc/free functions.
Gather together function calls pertaining to header split info, pass
header per WQE (hd_per_wqe) as parameter to those function to avoid use
before initialization future mistakes.
Allocate HW GRO related info outside of the header related info scope.
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
Reviewed-by: Dragos Tatulea <dtatulea@nvidia.com>
Signed-off-by: Cosmin Ratiu <cratiu@nvidia.com>
Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
Signed-off-by: Mark Bloch <mbloch@nvidia.com>
---
drivers/net/ethernet/mellanox/mlx5/core/en.h | 1 -
.../net/ethernet/mellanox/mlx5/core/en_main.c | 135 +++++++++---------
2 files changed, 66 insertions(+), 70 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 5b0d03b3efe8..211ea429ea89 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -638,7 +638,6 @@ struct mlx5e_shampo_hd {
struct mlx5e_frag_page *pages;
u32 hd_per_wq;
u16 hd_per_wqe;
- u16 pages_per_wq;
unsigned long *bitmap;
u16 pi;
u16 ci;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index ea822c69d137..3d11c9f87171 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -331,47 +331,6 @@ static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
}
-static int mlx5e_rq_shampo_hd_alloc(struct mlx5e_rq *rq, int node)
-{
- rq->mpwqe.shampo = kvzalloc_node(sizeof(*rq->mpwqe.shampo),
- GFP_KERNEL, node);
- if (!rq->mpwqe.shampo)
- return -ENOMEM;
- return 0;
-}
-
-static void mlx5e_rq_shampo_hd_free(struct mlx5e_rq *rq)
-{
- kvfree(rq->mpwqe.shampo);
-}
-
-static int mlx5e_rq_shampo_hd_info_alloc(struct mlx5e_rq *rq, int node)
-{
- struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
-
- shampo->bitmap = bitmap_zalloc_node(shampo->hd_per_wq, GFP_KERNEL,
- node);
- shampo->pages = kvzalloc_node(array_size(shampo->hd_per_wq,
- sizeof(*shampo->pages)),
- GFP_KERNEL, node);
- if (!shampo->bitmap || !shampo->pages)
- goto err_nomem;
-
- return 0;
-
-err_nomem:
- bitmap_free(shampo->bitmap);
- kvfree(shampo->pages);
-
- return -ENOMEM;
-}
-
-static void mlx5e_rq_shampo_hd_info_free(struct mlx5e_rq *rq)
-{
- bitmap_free(rq->mpwqe.shampo->bitmap);
- kvfree(rq->mpwqe.shampo->pages);
-}
-
static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, int node)
{
int wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
@@ -584,19 +543,18 @@ static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq
}
static int mlx5e_create_rq_hd_umr_mkey(struct mlx5_core_dev *mdev,
- struct mlx5e_rq *rq)
+ u16 hd_per_wq, u32 *umr_mkey)
{
u32 max_ksm_size = BIT(MLX5_CAP_GEN(mdev, log_max_klm_list_size));
- if (max_ksm_size < rq->mpwqe.shampo->hd_per_wq) {
+ if (max_ksm_size < hd_per_wq) {
mlx5_core_err(mdev, "max ksm list size 0x%x is smaller than shampo header buffer list size 0x%x\n",
- max_ksm_size, rq->mpwqe.shampo->hd_per_wq);
+ max_ksm_size, hd_per_wq);
return -EINVAL;
}
-
- return mlx5e_create_umr_ksm_mkey(mdev, rq->mpwqe.shampo->hd_per_wq,
+ return mlx5e_create_umr_ksm_mkey(mdev, hd_per_wq,
MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE,
- &rq->mpwqe.shampo->mkey);
+ umr_mkey);
}
static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
@@ -758,6 +716,35 @@ static int mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *param
xdp_frag_size);
}
+static int mlx5e_rq_shampo_hd_info_alloc(struct mlx5e_rq *rq, u16 hd_per_wq,
+ int node)
+{
+ struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
+
+ shampo->hd_per_wq = hd_per_wq;
+
+ shampo->bitmap = bitmap_zalloc_node(hd_per_wq, GFP_KERNEL, node);
+ shampo->pages = kvzalloc_node(array_size(hd_per_wq,
+ sizeof(*shampo->pages)),
+ GFP_KERNEL, node);
+ if (!shampo->bitmap || !shampo->pages)
+ goto err_nomem;
+
+ return 0;
+
+err_nomem:
+ kvfree(shampo->pages);
+ bitmap_free(shampo->bitmap);
+
+ return -ENOMEM;
+}
+
+static void mlx5e_rq_shampo_hd_info_free(struct mlx5e_rq *rq)
+{
+ kvfree(rq->mpwqe.shampo->pages);
+ bitmap_free(rq->mpwqe.shampo->bitmap);
+}
+
static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_rq_param *rqp,
@@ -765,42 +752,52 @@ static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev,
u32 *pool_size,
int node)
{
+ void *wqc = MLX5_ADDR_OF(rqc, rqp->rqc, wq);
+ u16 hd_per_wq;
+ int wq_size;
int err;
if (!test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
return 0;
- err = mlx5e_rq_shampo_hd_alloc(rq, node);
- if (err)
- goto out;
- rq->mpwqe.shampo->hd_per_wq =
- mlx5e_shampo_hd_per_wq(mdev, params, rqp);
- err = mlx5e_create_rq_hd_umr_mkey(mdev, rq);
+
+ rq->mpwqe.shampo = kvzalloc_node(sizeof(*rq->mpwqe.shampo),
+ GFP_KERNEL, node);
+ if (!rq->mpwqe.shampo)
+ return -ENOMEM;
+
+ /* split headers data structures */
+ hd_per_wq = mlx5e_shampo_hd_per_wq(mdev, params, rqp);
+ err = mlx5e_rq_shampo_hd_info_alloc(rq, hd_per_wq, node);
if (err)
- goto err_shampo_hd;
- err = mlx5e_rq_shampo_hd_info_alloc(rq, node);
+ goto err_shampo_hd_info_alloc;
+
+ err = mlx5e_create_rq_hd_umr_mkey(mdev, hd_per_wq,
+ &rq->mpwqe.shampo->mkey);
if (err)
- goto err_shampo_info;
+ goto err_umr_mkey;
+
+ rq->mpwqe.shampo->key = cpu_to_be32(rq->mpwqe.shampo->mkey);
+ rq->mpwqe.shampo->hd_per_wqe =
+ mlx5e_shampo_hd_per_wqe(mdev, params, rqp);
+ wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz));
+ *pool_size += (rq->mpwqe.shampo->hd_per_wqe * wq_size) /
+ MLX5E_SHAMPO_WQ_HEADER_PER_PAGE;
+
+ /* gro only data structures */
rq->hw_gro_data = kvzalloc_node(sizeof(*rq->hw_gro_data), GFP_KERNEL, node);
if (!rq->hw_gro_data) {
err = -ENOMEM;
goto err_hw_gro_data;
}
- rq->mpwqe.shampo->key =
- cpu_to_be32(rq->mpwqe.shampo->mkey);
- rq->mpwqe.shampo->hd_per_wqe =
- mlx5e_shampo_hd_per_wqe(mdev, params, rqp);
- rq->mpwqe.shampo->pages_per_wq =
- rq->mpwqe.shampo->hd_per_wq / MLX5E_SHAMPO_WQ_HEADER_PER_PAGE;
- *pool_size += rq->mpwqe.shampo->pages_per_wq;
+
return 0;
err_hw_gro_data:
- mlx5e_rq_shampo_hd_info_free(rq);
-err_shampo_info:
mlx5_core_destroy_mkey(mdev, rq->mpwqe.shampo->mkey);
-err_shampo_hd:
- mlx5e_rq_shampo_hd_free(rq);
-out:
+err_umr_mkey:
+ mlx5e_rq_shampo_hd_info_free(rq);
+err_shampo_hd_info_alloc:
+ kvfree(rq->mpwqe.shampo);
return err;
}
@@ -812,7 +809,7 @@ static void mlx5e_rq_free_shampo(struct mlx5e_rq *rq)
kvfree(rq->hw_gro_data);
mlx5e_rq_shampo_hd_info_free(rq);
mlx5_core_destroy_mkey(rq->mdev, rq->mpwqe.shampo->mkey);
- mlx5e_rq_shampo_hd_free(rq);
+ kvfree(rq->mpwqe.shampo);
}
static int mlx5e_alloc_rq(struct mlx5e_params *params,
--
2.34.1
next prev parent reply other threads:[~2025-06-09 14:59 UTC|newest]
Thread overview: 20+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-06-09 14:58 [PATCH net-next v3 00/12] net/mlx5e: Add support for devmem and io_uring TCP zero-copy Mark Bloch
2025-06-09 14:58 ` [PATCH net-next v3 01/12] net: Allow const args for of page_to_netmem() Mark Bloch
2025-06-09 14:58 ` [PATCH net-next v3 02/12] net: Add skb_can_coalesce for netmem Mark Bloch
2025-06-09 14:58 ` Mark Bloch [this message]
2025-06-09 14:58 ` [PATCH net-next v3 04/12] net/mlx5e: SHAMPO: Remove redundant params Mark Bloch
2025-06-09 14:58 ` [PATCH net-next v3 05/12] net/mlx5e: SHAMPO: Improve hw gro capability checking Mark Bloch
2025-06-09 14:58 ` [PATCH net-next v3 06/12] net/mlx5e: SHAMPO: Separate pool for headers Mark Bloch
2025-06-09 14:58 ` [PATCH net-next v3 07/12] net/mlx5e: SHAMPO: Headers page pool stats Mark Bloch
2025-06-09 15:21 ` Jakub Kicinski
2025-06-09 15:31 ` Dragos Tatulea
2025-06-09 23:22 ` Jakub Kicinski
2025-06-09 14:58 ` [PATCH net-next v3 08/12] net/mlx5e: Convert over to netmem Mark Bloch
2025-06-09 14:58 ` [PATCH net-next v3 09/12] net/mlx5e: Add support for UNREADABLE netmem page pools Mark Bloch
2025-06-09 14:58 ` [PATCH net-next v3 10/12] net/mlx5e: Implement queue mgmt ops and single channel swap Mark Bloch
2025-06-12 5:33 ` Mina Almasry
2025-06-12 9:05 ` Cosmin Ratiu
2025-06-12 20:44 ` Mina Almasry
2025-06-12 22:52 ` Jakub Kicinski
2025-06-09 14:58 ` [PATCH net-next v3 11/12] net/mlx5e: Support ethtool tcp-data-split settings Mark Bloch
2025-06-09 14:58 ` [PATCH net-next v3 12/12] net/mlx5e: Add TX support for netmems Mark Bloch
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250609145833.990793-4-mbloch@nvidia.com \
--to=mbloch@nvidia.com \
--cc=andrew+netdev@lunn.ch \
--cc=ast@kernel.org \
--cc=bpf@vger.kernel.org \
--cc=cratiu@nvidia.com \
--cc=daniel@iogearbox.net \
--cc=davem@davemloft.net \
--cc=dtatulea@nvidia.com \
--cc=edumazet@google.com \
--cc=gal@nvidia.com \
--cc=hawk@kernel.org \
--cc=horms@kernel.org \
--cc=john.fastabend@gmail.com \
--cc=kuba@kernel.org \
--cc=leon@kernel.org \
--cc=leonro@nvidia.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-rdma@vger.kernel.org \
--cc=netdev@vger.kernel.org \
--cc=pabeni@redhat.com \
--cc=richardcochran@gmail.com \
--cc=saeedm@nvidia.com \
--cc=tariqt@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).