From: Tariq Toukan <tariqt@nvidia.com>
To: Eric Dumazet <edumazet@google.com>,
Jakub Kicinski <kuba@kernel.org>, Paolo Abeni <pabeni@redhat.com>,
Andrew Lunn <andrew+netdev@lunn.ch>,
"David S. Miller" <davem@davemloft.net>
Cc: Saeed Mahameed <saeedm@nvidia.com>,
Leon Romanovsky <leon@kernel.org>,
Tariq Toukan <tariqt@nvidia.com>, Mark Bloch <mbloch@nvidia.com>,
"Alexei Starovoitov" <ast@kernel.org>,
Daniel Borkmann <daniel@iogearbox.net>,
"Jesper Dangaard Brouer" <hawk@kernel.org>,
John Fastabend <john.fastabend@gmail.com>,
Richard Cochran <richardcochran@gmail.com>,
<netdev@vger.kernel.org>, <linux-rdma@vger.kernel.org>,
<linux-kernel@vger.kernel.org>, <bpf@vger.kernel.org>,
Gal Pressman <gal@nvidia.com>,
Dragos Tatulea <dtatulea@nvidia.com>,
Cosmin Ratiu <cratiu@nvidia.com>,
Pavel Begunkov <asml.silence@gmail.com>,
David Wei <dw@davidwei.uk>
Subject: [PATCH net-next 02/15] net/mlx5e: Extract striding rq param calculation in function
Date: Mon, 23 Feb 2026 22:41:42 +0200 [thread overview]
Message-ID: <20260223204155.1783580-3-tariqt@nvidia.com> (raw)
In-Reply-To: <20260223204155.1783580-1-tariqt@nvidia.com>
From: Dragos Tatulea <dtatulea@nvidia.com>
Calculating parameters for striding rq is large enough
to deserve its own function. As the names are also very long
it is very easy to hit on the 80 char limitation every time
a change is made. This is an additional sign that it should
be extracted into its own function.
This patch has no functional change.
Signed-off-by: Dragos Tatulea <dtatulea@nvidia.com>
Reviewed-by: Cosmin Ratiu <cratiu@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
---
.../ethernet/mellanox/mlx5/core/en/params.c | 106 ++++++++++--------
1 file changed, 62 insertions(+), 44 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index 3fdaf003e1d0..07d75a85ee7f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -880,13 +880,70 @@ static u8 rq_end_pad_mode(struct mlx5_core_dev *mdev, struct mlx5e_params *param
MLX5_WQ_END_PAD_MODE_NONE : MLX5_WQ_END_PAD_MODE_ALIGN;
}
+static int mlx5e_mpwqe_build_rq_param(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk,
+ struct mlx5e_rq_param *rq_param)
+{
+ u8 log_rq_sz = mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk);
+ u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
+ u8 log_wqe_num_of_strides, log_wqe_stride_size;
+ enum mlx5e_mpwrq_umr_mode umr_mode;
+ void *rqc = rq_param->rqc;
+ u32 lro_timeout;
+ void *wq;
+
+ log_wqe_num_of_strides = mlx5e_mpwqe_get_log_num_strides(mdev, params,
+ xsk);
+ log_wqe_stride_size = mlx5e_mpwqe_get_log_stride_size(mdev, params,
+ xsk);
+ umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
+
+ wq = MLX5_ADDR_OF(rqc, rqc, wq);
+ if (!mlx5e_verify_rx_mpwqe_strides(mdev, log_wqe_stride_size,
+ log_wqe_num_of_strides,
+ page_shift, umr_mode)) {
+ mlx5_core_err(mdev,
+ "Bad RX MPWQE params: log_stride_size %u, log_num_strides %u, umr_mode %d\n",
+ log_wqe_stride_size, log_wqe_num_of_strides,
+ umr_mode);
+ return -EINVAL;
+ }
+
+ MLX5_SET(wq, wq, log_wqe_num_of_strides,
+ log_wqe_num_of_strides - MLX5_MPWQE_LOG_NUM_STRIDES_BASE);
+ MLX5_SET(wq, wq, log_wqe_stride_size,
+ log_wqe_stride_size - MLX5_MPWQE_LOG_STRIDE_SZ_BASE);
+ MLX5_SET(wq, wq, log_wq_sz, log_rq_sz);
+ if (params->packet_merge.type != MLX5E_PACKET_MERGE_SHAMPO)
+ return 0;
+
+ MLX5_SET(wq, wq, shampo_enable, true);
+ MLX5_SET(wq, wq, log_reservation_size,
+ MLX5E_SHAMPO_WQ_LOG_RESRV_SIZE -
+ MLX5E_SHAMPO_WQ_RESRV_SIZE_BASE_SHIFT);
+ MLX5_SET(wq, wq, log_max_num_of_packets_per_reservation,
+ mlx5e_shampo_get_log_pkt_per_rsrv(params));
+ MLX5_SET(wq, wq, log_headers_entry_size,
+ MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE -
+ MLX5E_SHAMPO_WQ_BASE_HEAD_ENTRY_SIZE_SHIFT);
+ lro_timeout = mlx5e_choose_lro_timeout(mdev,
+ MLX5E_DEFAULT_SHAMPO_TIMEOUT);
+ MLX5_SET(rqc, rqc, reservation_timeout, lro_timeout);
+ MLX5_SET(rqc, rqc, shampo_match_criteria_type,
+ MLX5_RQC_SHAMPO_MATCH_CRITERIA_TYPE_EXTENDED);
+ MLX5_SET(rqc, rqc, shampo_no_match_alignment_granularity,
+ MLX5_RQC_SHAMPO_NO_MATCH_ALIGNMENT_GRANULARITY_STRIDE);
+
+ return 0;
+}
+
int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk,
struct mlx5e_rq_param *rq_param)
{
void *rqc = rq_param->rqc;
- u32 lro_timeout;
int ndsegs = 1;
void *wq;
int err;
@@ -894,50 +951,11 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
wq = MLX5_ADDR_OF(rqc, rqc, wq);
switch (params->rq_wq_type) {
- case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: {
- u8 log_wqe_num_of_strides = mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
- u8 log_wqe_stride_size = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
- enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
- u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
-
- if (!mlx5e_verify_rx_mpwqe_strides(mdev, log_wqe_stride_size,
- log_wqe_num_of_strides,
- page_shift, umr_mode)) {
- mlx5_core_err(mdev,
- "Bad RX MPWQE params: log_stride_size %u, log_num_strides %u, umr_mode %d\n",
- log_wqe_stride_size, log_wqe_num_of_strides,
- umr_mode);
- return -EINVAL;
- }
-
- MLX5_SET(wq, wq, log_wqe_num_of_strides,
- log_wqe_num_of_strides - MLX5_MPWQE_LOG_NUM_STRIDES_BASE);
- MLX5_SET(wq, wq, log_wqe_stride_size,
- log_wqe_stride_size - MLX5_MPWQE_LOG_STRIDE_SZ_BASE);
- MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk));
- if (params->packet_merge.type != MLX5E_PACKET_MERGE_SHAMPO)
- break;
-
- MLX5_SET(wq, wq, shampo_enable, true);
- MLX5_SET(wq, wq, log_reservation_size,
- MLX5E_SHAMPO_WQ_LOG_RESRV_SIZE -
- MLX5E_SHAMPO_WQ_RESRV_SIZE_BASE_SHIFT);
- MLX5_SET(wq, wq,
- log_max_num_of_packets_per_reservation,
- mlx5e_shampo_get_log_pkt_per_rsrv(params));
- MLX5_SET(wq, wq, log_headers_entry_size,
- MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE -
- MLX5E_SHAMPO_WQ_BASE_HEAD_ENTRY_SIZE_SHIFT);
- lro_timeout =
- mlx5e_choose_lro_timeout(mdev,
- MLX5E_DEFAULT_SHAMPO_TIMEOUT);
- MLX5_SET(rqc, rqc, reservation_timeout, lro_timeout);
- MLX5_SET(rqc, rqc, shampo_match_criteria_type,
- MLX5_RQC_SHAMPO_MATCH_CRITERIA_TYPE_EXTENDED);
- MLX5_SET(rqc, rqc, shampo_no_match_alignment_granularity,
- MLX5_RQC_SHAMPO_NO_MATCH_ALIGNMENT_GRANULARITY_STRIDE);
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ err = mlx5e_mpwqe_build_rq_param(mdev, params, xsk, rq_param);
+ if (err)
+ return err;
break;
- }
default: /* MLX5_WQ_TYPE_CYCLIC */
MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames);
err = mlx5e_build_rq_frags_info(mdev, params, xsk,
--
2.44.0
next prev parent reply other threads:[~2026-02-23 20:43 UTC|newest]
Thread overview: 17+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-02-23 20:41 [PATCH net-next 00/15] net/mlx5e: SHAMPO, Allow high order pages in zerocopy mode Tariq Toukan
2026-02-23 20:41 ` [PATCH net-next 01/15] net/mlx5e: Make mlx5e_rq_param naming consistent Tariq Toukan
2026-02-23 20:41 ` Tariq Toukan [this message]
2026-02-23 20:41 ` [PATCH net-next 03/15] net/mlx5e: Extract max_xsk_wqebbs into its own function Tariq Toukan
2026-02-23 20:41 ` [PATCH net-next 04/15] net/mlx5e: Expose and rename xsk channel parameter function Tariq Toukan
2026-02-23 20:41 ` [PATCH net-next 05/15] net/mlx5e: Alloc xsk channel param out of mlx5e_open_xsk() Tariq Toukan
2026-02-23 20:41 ` [PATCH net-next 06/15] net/mlx5e: Move xsk param into new option container struct Tariq Toukan
2026-02-23 20:41 ` [PATCH net-next 07/15] net/mlx5e: Drop unused channel parameters Tariq Toukan
2026-02-23 20:41 ` [PATCH net-next 08/15] net/mlx5e: SHAMPO, Always calculate page size Tariq Toukan
2026-02-23 20:41 ` [PATCH net-next 09/15] net/mlx5e: Set page_pool order based on calculated page_shift Tariq Toukan
2026-02-23 20:41 ` [PATCH net-next 10/15] net/mlx5e: Alloc rq drop page " Tariq Toukan
2026-02-23 20:41 ` [PATCH net-next 11/15] net/mlx5e: RX, Make page frag bias more robust Tariq Toukan
2026-02-23 20:41 ` [PATCH net-next 12/15] net/mlx5e: Add queue config ops for page size Tariq Toukan
2026-02-23 20:41 ` [PATCH net-next 13/15] net/mlx5e: Pass netdev queue config to param calculations Tariq Toukan
2026-02-23 20:41 ` [PATCH net-next 14/15] net/mlx5e: Add param helper to calculate max page size Tariq Toukan
2026-02-23 20:41 ` [PATCH net-next 15/15] net/mlx5e: SHAMPO, Allow high order pages in zerocopy mode Tariq Toukan
2026-02-26 10:10 ` [PATCH net-next 00/15] " patchwork-bot+netdevbpf
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260223204155.1783580-3-tariqt@nvidia.com \
--to=tariqt@nvidia.com \
--cc=andrew+netdev@lunn.ch \
--cc=asml.silence@gmail.com \
--cc=ast@kernel.org \
--cc=bpf@vger.kernel.org \
--cc=cratiu@nvidia.com \
--cc=daniel@iogearbox.net \
--cc=davem@davemloft.net \
--cc=dtatulea@nvidia.com \
--cc=dw@davidwei.uk \
--cc=edumazet@google.com \
--cc=gal@nvidia.com \
--cc=hawk@kernel.org \
--cc=john.fastabend@gmail.com \
--cc=kuba@kernel.org \
--cc=leon@kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-rdma@vger.kernel.org \
--cc=mbloch@nvidia.com \
--cc=netdev@vger.kernel.org \
--cc=pabeni@redhat.com \
--cc=richardcochran@gmail.com \
--cc=saeedm@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox