From: Tariq Toukan <tariqt@nvidia.com>
To: Eric Dumazet <edumazet@google.com>,
Jakub Kicinski <kuba@kernel.org>, Paolo Abeni <pabeni@redhat.com>,
Andrew Lunn <andrew+netdev@lunn.ch>,
"David S. Miller" <davem@davemloft.net>
Cc: Saeed Mahameed <saeedm@nvidia.com>,
Leon Romanovsky <leon@kernel.org>,
Tariq Toukan <tariqt@nvidia.com>, Mark Bloch <mbloch@nvidia.com>,
"Alexei Starovoitov" <ast@kernel.org>,
Daniel Borkmann <daniel@iogearbox.net>,
"Jesper Dangaard Brouer" <hawk@kernel.org>,
John Fastabend <john.fastabend@gmail.com>,
Richard Cochran <richardcochran@gmail.com>,
<netdev@vger.kernel.org>, <linux-rdma@vger.kernel.org>,
<linux-kernel@vger.kernel.org>, <bpf@vger.kernel.org>,
Gal Pressman <gal@nvidia.com>,
Dragos Tatulea <dtatulea@nvidia.com>,
Cosmin Ratiu <cratiu@nvidia.com>,
Pavel Begunkov <asml.silence@gmail.com>,
David Wei <dw@davidwei.uk>
Subject: [PATCH net-next 01/15] net/mlx5e: Make mlx5e_rq_param naming consistent
Date: Mon, 23 Feb 2026 22:41:41 +0200 [thread overview]
Message-ID: <20260223204155.1783580-2-tariqt@nvidia.com> (raw)
In-Reply-To: <20260223204155.1783580-1-tariqt@nvidia.com>
From: Dragos Tatulea <dtatulea@nvidia.com>
This structure is used under different names: rq_param, rq_params,
param, rqp. Refactor the code to use a single name: rq_param.
This patch has no functional change.
Signed-off-by: Dragos Tatulea <dtatulea@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
---
drivers/net/ethernet/mellanox/mlx5/core/en.h | 5 +-
.../ethernet/mellanox/mlx5/core/en/params.c | 27 +++++-----
.../net/ethernet/mellanox/mlx5/core/en/ptp.c | 4 +-
.../mellanox/mlx5/core/en/xsk/setup.c | 9 ++--
.../net/ethernet/mellanox/mlx5/core/en_main.c | 50 +++++++++----------
5 files changed, 51 insertions(+), 44 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index ea2cd1f5d1d0..550426979627 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -1060,13 +1060,14 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv);
struct mlx5e_xsk_param;
struct mlx5e_rq_param;
-int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param,
+int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *rq_param,
struct mlx5e_xsk_param *xsk, int node, u16 q_counter,
struct mlx5e_rq *rq);
#define MLX5E_RQ_WQES_TIMEOUT 20000 /* msecs */
int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time);
void mlx5e_close_rq(struct mlx5e_rq *rq);
-int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param, u16 q_counter);
+int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *rq_param,
+ u16 q_counter);
void mlx5e_destroy_rq(struct mlx5e_rq *rq);
bool mlx5e_reset_rx_moderation(struct dim_cq_moder *cq_moder, u8 cq_period_mode,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index 8e99d07586fa..3fdaf003e1d0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -883,14 +883,16 @@ static u8 rq_end_pad_mode(struct mlx5_core_dev *mdev, struct mlx5e_params *param
int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk,
- struct mlx5e_rq_param *param)
+ struct mlx5e_rq_param *rq_param)
{
- void *rqc = param->rqc;
- void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
+ void *rqc = rq_param->rqc;
u32 lro_timeout;
int ndsegs = 1;
+ void *wq;
int err;
+ wq = MLX5_ADDR_OF(rqc, rqc, wq);
+
switch (params->rq_wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: {
u8 log_wqe_num_of_strides = mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
@@ -938,11 +940,12 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
}
default: /* MLX5_WQ_TYPE_CYCLIC */
MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames);
- err = mlx5e_build_rq_frags_info(mdev, params, xsk, ¶m->frags_info,
- ¶m->xdp_frag_size);
+ err = mlx5e_build_rq_frags_info(mdev, params, xsk,
+ &rq_param->frags_info,
+ &rq_param->xdp_frag_size);
if (err)
return err;
- ndsegs = param->frags_info.num_frags;
+ ndsegs = rq_param->frags_info.num_frags;
}
MLX5_SET(wq, wq, wq_type, params->rq_wq_type);
@@ -953,23 +956,23 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable);
MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en);
- param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
- mlx5e_build_rx_cq_param(mdev, params, xsk, ¶m->cqp);
+ rq_param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
+ mlx5e_build_rx_cq_param(mdev, params, xsk, &rq_param->cqp);
return 0;
}
void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev,
- struct mlx5e_rq_param *param)
+ struct mlx5e_rq_param *rq_param)
{
- void *rqc = param->rqc;
+ void *rqc = rq_param->rqc;
void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
MLX5_SET(wq, wq, log_wq_stride,
mlx5e_get_rqwq_log_stride(MLX5_WQ_TYPE_CYCLIC, 1));
- param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
+ rq_param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
}
void mlx5e_build_tx_cq_param(struct mlx5_core_dev *mdev,
@@ -1097,7 +1100,7 @@ static u32 mlx5e_mpwrq_total_umr_wqebbs(struct mlx5_core_dev *mdev,
static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- struct mlx5e_rq_param *rqp)
+ struct mlx5e_rq_param *rq_param)
{
u32 wqebbs, total_pages, useful_space;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index 74660e7fe674..13add74d1b97 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -660,13 +660,13 @@ static void mlx5e_ptp_build_rq_param(struct mlx5_core_dev *mdev,
struct net_device *netdev,
struct mlx5e_ptp_params *ptp_params)
{
- struct mlx5e_rq_param *rq_params = &ptp_params->rq_param;
+ struct mlx5e_rq_param *rq_param = &ptp_params->rq_param;
struct mlx5e_params *params = &ptp_params->params;
params->rq_wq_type = MLX5_WQ_TYPE_CYCLIC;
mlx5e_init_rq_type_params(mdev, params);
params->sw_mtu = netdev->max_mtu;
- mlx5e_build_rq_param(mdev, params, NULL, rq_params);
+ mlx5e_build_rq_param(mdev, params, NULL, rq_param);
}
static void mlx5e_ptp_build_params(struct mlx5e_ptp *c,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
index 5981c71cae2d..50c14ad29ed6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
@@ -90,8 +90,10 @@ static int mlx5e_init_xsk_rq(struct mlx5e_channel *c,
return xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq_xdp_ix, c->napi.napi_id);
}
-static int mlx5e_open_xsk_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
- struct mlx5e_rq_param *rq_params, struct xsk_buff_pool *pool,
+static int mlx5e_open_xsk_rq(struct mlx5e_channel *c,
+ struct mlx5e_params *params,
+ struct mlx5e_rq_param *rq_param,
+ struct xsk_buff_pool *pool,
struct mlx5e_xsk_param *xsk)
{
u16 q_counter = c->priv->q_counter[c->sd_ix];
@@ -102,7 +104,8 @@ static int mlx5e_open_xsk_rq(struct mlx5e_channel *c, struct mlx5e_params *param
if (err)
return err;
- err = mlx5e_open_rq(params, rq_params, xsk, cpu_to_node(c->cpu), q_counter, xskrq);
+ err = mlx5e_open_rq(params, rq_param, xsk, cpu_to_node(c->cpu),
+ q_counter, xskrq);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 7eb691c2a1bd..f2ce24cf56ce 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -780,7 +780,7 @@ static int mlx5e_create_rq_hd_mkey(struct mlx5_core_dev *mdev,
static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- struct mlx5e_rq_param *rqp,
+ struct mlx5e_rq_param *rq_param,
struct mlx5e_rq *rq,
int node)
{
@@ -791,7 +791,7 @@ static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev,
if (!test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
return 0;
- hd_per_wq = mlx5e_shampo_hd_per_wq(mdev, params, rqp);
+ hd_per_wq = mlx5e_shampo_hd_per_wq(mdev, params, rq_param);
hd_buf_size = hd_per_wq * BIT(MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE);
nentries = hd_buf_size / PAGE_SIZE;
if (!nentries) {
@@ -852,18 +852,17 @@ static void mlx5e_rq_free_shampo(struct mlx5e_rq *rq)
static int mlx5e_alloc_rq(struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk,
- struct mlx5e_rq_param *rqp,
+ struct mlx5e_rq_param *rq_param,
int node, struct mlx5e_rq *rq)
{
+ void *rqc_wq = MLX5_ADDR_OF(rqc, rq_param->rqc, wq);
struct mlx5_core_dev *mdev = rq->mdev;
- void *rqc = rqp->rqc;
- void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
u32 pool_size;
int wq_sz;
int err;
int i;
- rqp->wq.db_numa_node = node;
+ rq_param->wq.db_numa_node = node;
INIT_WORK(&rq->recover_work, mlx5e_rq_err_cqe_work);
INIT_WORK(&rq->rx_timeout_work, mlx5e_rq_timeout_work);
@@ -879,8 +878,8 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->mpwqe.wq,
- &rq->wq_ctrl);
+ err = mlx5_wq_ll_create(mdev, &rq_param->wq, rqc_wq,
+ &rq->mpwqe.wq, &rq->wq_ctrl);
if (err)
goto err_rq_xdp_prog;
@@ -925,14 +924,14 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
if (err)
goto err_rq_mkey;
- err = mlx5_rq_shampo_alloc(mdev, params, rqp, rq, node);
+ err = mlx5_rq_shampo_alloc(mdev, params, rq_param, rq, node);
if (err)
goto err_free_mpwqe_info;
break;
default: /* MLX5_WQ_TYPE_CYCLIC */
- err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq,
- &rq->wq_ctrl);
+ err = mlx5_wq_cyc_create(mdev, &rq_param->wq, rqc_wq,
+ &rq->wqe.wq, &rq->wq_ctrl);
if (err)
goto err_rq_xdp_prog;
@@ -940,7 +939,7 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
wq_sz = mlx5_wq_cyc_get_size(&rq->wqe.wq);
- rq->wqe.info = rqp->frags_info;
+ rq->wqe.info = rq_param->frags_info;
rq->buff.frame0_sz = rq->wqe.info.arr[0].frag_stride;
err = mlx5e_init_wqe_alloc_info(rq, node);
@@ -1085,7 +1084,8 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
xdp_rxq_info_unreg(&rq->xdp_rxq);
}
-int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param, u16 q_counter)
+int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *rq_param,
+ u16 q_counter)
{
struct mlx5_core_dev *mdev = rq->mdev;
u8 ts_format;
@@ -1107,7 +1107,7 @@ int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param, u16 q_cou
rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
wq = MLX5_ADDR_OF(rqc, rqc, wq);
- memcpy(rqc, param->rqc, sizeof(param->rqc));
+ memcpy(rqc, rq_param->rqc, sizeof(rq_param->rqc));
MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn);
MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
@@ -1323,7 +1323,7 @@ void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
}
-int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param,
+int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *rq_param,
struct mlx5e_xsk_param *xsk, int node, u16 q_counter,
struct mlx5e_rq *rq)
{
@@ -1333,11 +1333,11 @@ int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param,
if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO)
__set_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state);
- err = mlx5e_alloc_rq(params, xsk, param, node, rq);
+ err = mlx5e_alloc_rq(params, xsk, rq_param, node, rq);
if (err)
return err;
- err = mlx5e_create_rq(rq, param, q_counter);
+ err = mlx5e_create_rq(rq, rq_param, q_counter);
if (err)
goto err_free_rq;
@@ -2507,16 +2507,17 @@ static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
}
static int mlx5e_open_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
- struct mlx5e_rq_param *rq_params)
+ struct mlx5e_rq_param *rq_param)
{
u16 q_counter = c->priv->q_counter[c->sd_ix];
int err;
- err = mlx5e_init_rxq_rq(c, params, rq_params->xdp_frag_size, &c->rq);
+ err = mlx5e_init_rxq_rq(c, params, rq_param->xdp_frag_size, &c->rq);
if (err)
return err;
- return mlx5e_open_rq(params, rq_params, NULL, cpu_to_node(c->cpu), q_counter, &c->rq);
+ return mlx5e_open_rq(params, rq_param, NULL, cpu_to_node(c->cpu),
+ q_counter, &c->rq);
}
static struct mlx5e_icosq *
@@ -3577,15 +3578,14 @@ static void mlx5e_free_drop_rq(struct mlx5e_rq *rq)
static int mlx5e_alloc_drop_rq(struct mlx5_core_dev *mdev,
struct mlx5e_rq *rq,
- struct mlx5e_rq_param *param)
+ struct mlx5e_rq_param *rq_param)
{
- void *rqc = param->rqc;
- void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
+ void *rqc_wq = MLX5_ADDR_OF(rqc, rq_param->rqc, wq);
int err;
- param->wq.db_numa_node = param->wq.buf_numa_node;
+ rq_param->wq.db_numa_node = rq_param->wq.buf_numa_node;
- err = mlx5_wq_cyc_create(mdev, ¶m->wq, rqc_wq, &rq->wqe.wq,
+ err = mlx5_wq_cyc_create(mdev, &rq_param->wq, rqc_wq, &rq->wqe.wq,
&rq->wq_ctrl);
if (err)
return err;
--
2.44.0
next prev parent reply other threads:[~2026-02-23 20:43 UTC|newest]
Thread overview: 17+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-02-23 20:41 [PATCH net-next 00/15] net/mlx5e: SHAMPO, Allow high order pages in zerocopy mode Tariq Toukan
2026-02-23 20:41 ` Tariq Toukan [this message]
2026-02-23 20:41 ` [PATCH net-next 02/15] net/mlx5e: Extract striding rq param calculation in function Tariq Toukan
2026-02-23 20:41 ` [PATCH net-next 03/15] net/mlx5e: Extract max_xsk_wqebbs into its own function Tariq Toukan
2026-02-23 20:41 ` [PATCH net-next 04/15] net/mlx5e: Expose and rename xsk channel parameter function Tariq Toukan
2026-02-23 20:41 ` [PATCH net-next 05/15] net/mlx5e: Alloc xsk channel param out of mlx5e_open_xsk() Tariq Toukan
2026-02-23 20:41 ` [PATCH net-next 06/15] net/mlx5e: Move xsk param into new option container struct Tariq Toukan
2026-02-23 20:41 ` [PATCH net-next 07/15] net/mlx5e: Drop unused channel parameters Tariq Toukan
2026-02-23 20:41 ` [PATCH net-next 08/15] net/mlx5e: SHAMPO, Always calculate page size Tariq Toukan
2026-02-23 20:41 ` [PATCH net-next 09/15] net/mlx5e: Set page_pool order based on calculated page_shift Tariq Toukan
2026-02-23 20:41 ` [PATCH net-next 10/15] net/mlx5e: Alloc rq drop page " Tariq Toukan
2026-02-23 20:41 ` [PATCH net-next 11/15] net/mlx5e: RX, Make page frag bias more robust Tariq Toukan
2026-02-23 20:41 ` [PATCH net-next 12/15] net/mlx5e: Add queue config ops for page size Tariq Toukan
2026-02-23 20:41 ` [PATCH net-next 13/15] net/mlx5e: Pass netdev queue config to param calculations Tariq Toukan
2026-02-23 20:41 ` [PATCH net-next 14/15] net/mlx5e: Add param helper to calculate max page size Tariq Toukan
2026-02-23 20:41 ` [PATCH net-next 15/15] net/mlx5e: SHAMPO, Allow high order pages in zerocopy mode Tariq Toukan
2026-02-26 10:10 ` [PATCH net-next 00/15] " patchwork-bot+netdevbpf
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260223204155.1783580-2-tariqt@nvidia.com \
--to=tariqt@nvidia.com \
--cc=andrew+netdev@lunn.ch \
--cc=asml.silence@gmail.com \
--cc=ast@kernel.org \
--cc=bpf@vger.kernel.org \
--cc=cratiu@nvidia.com \
--cc=daniel@iogearbox.net \
--cc=davem@davemloft.net \
--cc=dtatulea@nvidia.com \
--cc=dw@davidwei.uk \
--cc=edumazet@google.com \
--cc=gal@nvidia.com \
--cc=hawk@kernel.org \
--cc=john.fastabend@gmail.com \
--cc=kuba@kernel.org \
--cc=leon@kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-rdma@vger.kernel.org \
--cc=mbloch@nvidia.com \
--cc=netdev@vger.kernel.org \
--cc=pabeni@redhat.com \
--cc=richardcochran@gmail.com \
--cc=saeedm@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox