From: Tariq Toukan <tariqt@nvidia.com>
To: Eric Dumazet <edumazet@google.com>,
Jakub Kicinski <kuba@kernel.org>, Paolo Abeni <pabeni@redhat.com>,
Andrew Lunn <andrew+netdev@lunn.ch>,
"David S. Miller" <davem@davemloft.net>
Cc: Saeed Mahameed <saeedm@nvidia.com>,
Leon Romanovsky <leon@kernel.org>,
Tariq Toukan <tariqt@nvidia.com>, Mark Bloch <mbloch@nvidia.com>,
"Alexei Starovoitov" <ast@kernel.org>,
Daniel Borkmann <daniel@iogearbox.net>,
"Jesper Dangaard Brouer" <hawk@kernel.org>,
John Fastabend <john.fastabend@gmail.com>,
Stanislav Fomichev <sdf@fomichev.me>,
Dragos Tatulea <dtatulea@nvidia.com>,
Cosmin Ratiu <cratiu@nvidia.com>, Simon Horman <horms@kernel.org>,
"Jacob Keller" <jacob.e.keller@intel.com>,
Lama Kayal <lkayal@nvidia.com>,
"Michal Swiatkowski" <michal.swiatkowski@linux.intel.com>,
Carolina Jubran <cjubran@nvidia.com>,
Nathan Chancellor <nathan@kernel.org>,
Daniel Zahka <daniel.zahka@gmail.com>,
Rahul Rameshbabu <rrameshbabu@nvidia.com>,
"Raed Salem" <raeds@nvidia.com>, <netdev@vger.kernel.org>,
<linux-rdma@vger.kernel.org>, <linux-kernel@vger.kernel.org>,
<bpf@vger.kernel.org>, Gal Pressman <gal@nvidia.com>
Subject: [PATCH net-next V2 4/5] net/mlx5e: XDP, Use a single linear page per rq
Date: Fri, 3 Apr 2026 12:09:26 +0300 [thread overview]
Message-ID: <20260403090927.139042-5-tariqt@nvidia.com> (raw)
In-Reply-To: <20260403090927.139042-1-tariqt@nvidia.com>
From: Dragos Tatulea <dtatulea@nvidia.com>
Currently in striding rq there is one mlx5e_frag_page member per WQE for
the linear page. This linear page is used only in XDP multi-buffer mode.
This is wasteful because only one linear page is needed per rq: the page
gets refreshed on every packet, regardless of WQE. Furthermore, it is
not needed in other modes (non-XDP, XDP single-buffer).
This change moves the linear page into its own structure (struct
mlx5_mpw_linear_info) and allocates it only when necessary.
A special structure is created because an upcoming patch will extend
this structure to support fragmentation of the linear page.
This patch has no functional changes.
Signed-off-by: Dragos Tatulea <dtatulea@nvidia.com>
Reviewed-by: Carolina Jubran <cjubran@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
---
drivers/net/ethernet/mellanox/mlx5/core/en.h | 6 ++-
.../net/ethernet/mellanox/mlx5/core/en_main.c | 37 ++++++++++++++++---
.../net/ethernet/mellanox/mlx5/core/en_rx.c | 17 +++++----
3 files changed, 47 insertions(+), 13 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index c7ac6ebe8290..592234780f2b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -591,10 +591,13 @@ union mlx5e_alloc_units {
struct mlx5e_mpw_info {
u16 consumed_strides;
DECLARE_BITMAP(skip_release_bitmap, MLX5_MPWRQ_MAX_PAGES_PER_WQE);
- struct mlx5e_frag_page linear_page;
union mlx5e_alloc_units alloc_units;
};
+struct mlx5e_mpw_linear_info {
+ struct mlx5e_frag_page frag_page;
+};
+
#define MLX5E_MAX_RX_FRAGS 4
struct mlx5e_rq;
@@ -689,6 +692,7 @@ struct mlx5e_rq {
u8 umr_wqebbs;
u8 mtts_per_wqe;
u8 umr_mode;
+ struct mlx5e_mpw_linear_info *linear_info;
struct mlx5e_shampo_hd *shampo;
} mpwqe;
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 1238e5356012..aa8359a48b12 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -369,6 +369,29 @@ static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, int node)
return 0;
}
+static int mlx5e_rq_alloc_mpwqe_linear_info(struct mlx5e_rq *rq, int node,
+ struct mlx5e_params *params,
+ struct mlx5e_rq_opt_param *rqo,
+ u32 *pool_size)
+{
+ struct mlx5_core_dev *mdev = rq->mdev;
+ struct mlx5e_mpw_linear_info *li;
+
+ if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, rqo) ||
+ !params->xdp_prog)
+ return 0;
+
+ li = kvzalloc_node(sizeof(*li), GFP_KERNEL, node);
+ if (!li)
+ return -ENOMEM;
+
+ rq->mpwqe.linear_info = li;
+
+ /* additional page per packet for the linear part */
+ *pool_size *= 2;
+
+ return 0;
+}
static u8 mlx5e_mpwrq_access_mode(enum mlx5e_mpwrq_umr_mode umr_mode)
{
@@ -915,10 +938,6 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
mlx5e_mpwqe_get_log_rq_size(mdev, params, rqo);
pool_order = rq->mpwqe.page_shift - PAGE_SHIFT;
- if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, rqo) &&
- params->xdp_prog)
- pool_size *= 2; /* additional page per packet for the linear part */
-
rq->mpwqe.log_stride_sz =
mlx5e_mpwqe_get_log_stride_size(mdev, params,
rqo);
@@ -936,10 +955,15 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
if (err)
goto err_rq_mkey;
- err = mlx5_rq_shampo_alloc(mdev, params, rq_param, rq, node);
+ err = mlx5e_rq_alloc_mpwqe_linear_info(rq, node, params, rqo,
+ &pool_size);
if (err)
goto err_free_mpwqe_info;
+ err = mlx5_rq_shampo_alloc(mdev, params, rq_param, rq, node);
+ if (err)
+ goto err_free_mpwqe_linear_info;
+
break;
default: /* MLX5_WQ_TYPE_CYCLIC */
err = mlx5_wq_cyc_create(mdev, &rq_param->wq, rqc_wq,
@@ -1054,6 +1078,8 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
mlx5e_rq_free_shampo(rq);
+err_free_mpwqe_linear_info:
+ kvfree(rq->mpwqe.linear_info);
err_free_mpwqe_info:
kvfree(rq->mpwqe.info);
err_rq_mkey:
@@ -1081,6 +1107,7 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
mlx5e_rq_free_shampo(rq);
+ kvfree(rq->mpwqe.linear_info);
kvfree(rq->mpwqe.info);
mlx5_core_destroy_mkey(rq->mdev, be32_to_cpu(rq->mpwqe.umr_mkey_be));
mlx5e_free_mpwqe_rq_drop_page(rq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index f5c0e2a0ada9..feb042d84b8e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -1869,6 +1869,7 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
struct mlx5e_frag_page *frag_page = &wi->alloc_units.frag_pages[page_idx];
u16 headlen = min_t(u16, MLX5E_RX_MAX_HEAD, cqe_bcnt);
struct mlx5e_frag_page *head_page = frag_page;
+ struct mlx5e_frag_page *linear_page = NULL;
struct mlx5e_xdp_buff *mxbuf = &rq->mxbuf;
u32 page_size = BIT(rq->mpwqe.page_shift);
u32 frag_offset = head_offset;
@@ -1897,13 +1898,15 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
if (prog) {
/* area for bpf_xdp_[store|load]_bytes */
net_prefetchw(netmem_address(frag_page->netmem) + frag_offset);
+
+ linear_page = &rq->mpwqe.linear_info->frag_page;
if (unlikely(mlx5e_page_alloc_fragmented(rq->page_pool,
- &wi->linear_page))) {
+ linear_page))) {
rq->stats->buff_alloc_err++;
return NULL;
}
- va = netmem_address(wi->linear_page.netmem);
+ va = netmem_address(linear_page->netmem);
net_prefetchw(va); /* xdp_frame data area */
linear_hr = XDP_PACKET_HEADROOM;
linear_data_len = 0;
@@ -1966,10 +1969,10 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
for (pfp = head_page; pfp < frag_page; pfp++)
pfp->frags++;
- wi->linear_page.frags++;
+ linear_page->frags++;
}
mlx5e_page_release_fragmented(rq->page_pool,
- &wi->linear_page);
+ linear_page);
return NULL; /* page/packet was consumed by XDP */
}
@@ -1988,13 +1991,13 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
mxbuf->xdp.data - mxbuf->xdp.data_meta);
if (unlikely(!skb)) {
mlx5e_page_release_fragmented(rq->page_pool,
- &wi->linear_page);
+ linear_page);
return NULL;
}
skb_mark_for_recycle(skb);
- wi->linear_page.frags++;
- mlx5e_page_release_fragmented(rq->page_pool, &wi->linear_page);
+ linear_page->frags++;
+ mlx5e_page_release_fragmented(rq->page_pool, linear_page);
if (xdp_buff_has_frags(&mxbuf->xdp)) {
struct mlx5e_frag_page *pagep;
--
2.44.0
next prev parent reply other threads:[~2026-04-03 9:11 UTC|newest]
Thread overview: 16+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-04-03 9:09 [PATCH net-next V2 0/5] net/mlx5e: XDP, Add support for multi-packet per page Tariq Toukan
2026-04-03 9:09 ` [PATCH net-next V2 1/5] net/mlx5e: XSK, Increase size for chunk_size param Tariq Toukan
2026-04-05 6:30 ` Dragos Tatulea
2026-04-03 9:09 ` [PATCH net-next V2 2/5] net/mlx5e: XDP, Improve dma address calculation of linear part for XDP_TX Tariq Toukan
2026-04-03 9:09 ` [PATCH net-next V2 3/5] net/mlx5e: XDP, Remove stride size limitation Tariq Toukan
2026-04-03 9:09 ` Tariq Toukan [this message]
2026-04-05 6:08 ` [PATCH net-next V2 4/5] net/mlx5e: XDP, Use a single linear page per rq Dragos Tatulea
2026-04-06 15:43 ` Jakub Kicinski
2026-04-06 16:31 ` Mark Bloch
2026-04-06 18:30 ` Jakub Kicinski
2026-04-06 19:50 ` Mark Bloch
2026-04-06 19:13 ` Nicolai Buchwitz
2026-04-06 19:52 ` Mark Bloch
2026-04-07 0:43 ` Jakub Kicinski
2026-04-03 9:09 ` [PATCH net-next V2 5/5] net/mlx5e: XDP, Use page fragments for linear data in multibuf-mode Tariq Toukan
2026-04-07 11:50 ` [PATCH net-next V2 0/5] net/mlx5e: XDP, Add support for multi-packet per page patchwork-bot+netdevbpf
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260403090927.139042-5-tariqt@nvidia.com \
--to=tariqt@nvidia.com \
--cc=andrew+netdev@lunn.ch \
--cc=ast@kernel.org \
--cc=bpf@vger.kernel.org \
--cc=cjubran@nvidia.com \
--cc=cratiu@nvidia.com \
--cc=daniel.zahka@gmail.com \
--cc=daniel@iogearbox.net \
--cc=davem@davemloft.net \
--cc=dtatulea@nvidia.com \
--cc=edumazet@google.com \
--cc=gal@nvidia.com \
--cc=hawk@kernel.org \
--cc=horms@kernel.org \
--cc=jacob.e.keller@intel.com \
--cc=john.fastabend@gmail.com \
--cc=kuba@kernel.org \
--cc=leon@kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-rdma@vger.kernel.org \
--cc=lkayal@nvidia.com \
--cc=mbloch@nvidia.com \
--cc=michal.swiatkowski@linux.intel.com \
--cc=nathan@kernel.org \
--cc=netdev@vger.kernel.org \
--cc=pabeni@redhat.com \
--cc=raeds@nvidia.com \
--cc=rrameshbabu@nvidia.com \
--cc=saeedm@nvidia.com \
--cc=sdf@fomichev.me \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.