From: Saeed Mahameed <saeed@kernel.org>
To: Mina Almasry <almasrymina@google.com>
Cc: Tariq Toukan <tariqt@nvidia.com>,
"David S. Miller" <davem@davemloft.net>,
Jakub Kicinski <kuba@kernel.org>, Paolo Abeni <pabeni@redhat.com>,
Eric Dumazet <edumazet@google.com>,
Andrew Lunn <andrew+netdev@lunn.ch>,
Saeed Mahameed <saeedm@nvidia.com>,
Leon Romanovsky <leon@kernel.org>,
Richard Cochran <richardcochran@gmail.com>,
Alexei Starovoitov <ast@kernel.org>,
Daniel Borkmann <daniel@iogearbox.net>,
Jesper Dangaard Brouer <hawk@kernel.org>,
John Fastabend <john.fastabend@gmail.com>,
netdev@vger.kernel.org, linux-rdma@vger.kernel.org,
linux-kernel@vger.kernel.org, bpf@vger.kernel.org,
Moshe Shemesh <moshe@nvidia.com>, Mark Bloch <mbloch@nvidia.com>,
Gal Pressman <gal@nvidia.com>, Cosmin Ratiu <cratiu@nvidia.com>,
Dragos Tatulea <dtatulea@nvidia.com>
Subject: Re: [PATCH net-next V2 08/11] net/mlx5e: Convert over to netmem
Date: Thu, 22 May 2025 16:54:31 -0700 [thread overview]
Message-ID: <aC-5N9GuwbP73vV7@x130> (raw)
In-Reply-To: <CAHS8izNeKdsys4VCEW5F1gDoK7dPJZ6fAew3700TwmH3=tT_ag@mail.gmail.com>
On 22 May 16:18, Mina Almasry wrote:
>On Thu, May 22, 2025 at 2:46 PM Tariq Toukan <tariqt@nvidia.com> wrote:
>>
>> From: Saeed Mahameed <saeedm@nvidia.com>
>>
>> mlx5e_page_frag holds the physical page itself, to naturally support
>> zc page pools, remove physical page reference from mlx5 and replace it
>> with netmem_ref, to avoid internal handling in mlx5 for net_iov backed
>> pages.
>>
>> SHAMPO can issue packets that are not split into header and data. These
>> packets will be dropped if the data part resides in a net_iov as the
>> driver can't read into this area.
>>
>> No performance degradation observed.
>>
>> Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
>> Signed-off-by: Dragos Tatulea <dtatulea@nvidia.com>
>> Signed-off-by: Cosmin Ratiu <cratiu@nvidia.com>
>> Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
>> ---
>> drivers/net/ethernet/mellanox/mlx5/core/en.h | 2 +-
>> .../net/ethernet/mellanox/mlx5/core/en_rx.c | 103 ++++++++++--------
>> 2 files changed, 61 insertions(+), 44 deletions(-)
>>
>> diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
>> index c329de1d4f0a..65a73913b9a2 100644
>> --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
>> +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
>> @@ -553,7 +553,7 @@ struct mlx5e_icosq {
>> } ____cacheline_aligned_in_smp;
>>
>> struct mlx5e_frag_page {
>
>omega nit: maybe this should be renamed now to mlx5e_frag_netmem. Up to you.
>
30+ occurrences need changing, Tariq, Cosmin up to you. I agree with the
comment though.
>> - struct page *page;
>> + netmem_ref netmem;
>> u16 frags;
>> };
>>
>> diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
>> index e34ef53ebd0e..75e753adedef 100644
>> --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
>> +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
>> @@ -273,33 +273,33 @@ static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
>>
>> #define MLX5E_PAGECNT_BIAS_MAX (PAGE_SIZE / 64)
>>
>> -static int mlx5e_page_alloc_fragmented(struct page_pool *pool,
>> +static int mlx5e_page_alloc_fragmented(struct page_pool *pp,
>> struct mlx5e_frag_page *frag_page)
>> {
>> - struct page *page;
>> + netmem_ref netmem = page_pool_alloc_netmems(pp,
>> + GFP_ATOMIC | __GFP_NOWARN);
>>
>> - page = page_pool_dev_alloc_pages(pool);
>> - if (unlikely(!page))
>> + if (unlikely(!netmem))
>> return -ENOMEM;
>>
>> - page_pool_fragment_page(page, MLX5E_PAGECNT_BIAS_MAX);
>> + page_pool_fragment_netmem(netmem, MLX5E_PAGECNT_BIAS_MAX);
>>
>> *frag_page = (struct mlx5e_frag_page) {
>> - .page = page,
>> + .netmem = netmem,
>> .frags = 0,
>> };
>>
>> return 0;
>> }
>>
>> -static void mlx5e_page_release_fragmented(struct page_pool *pool,
>> +static void mlx5e_page_release_fragmented(struct page_pool *pp,
>> struct mlx5e_frag_page *frag_page)
>> {
>> u16 drain_count = MLX5E_PAGECNT_BIAS_MAX - frag_page->frags;
>> - struct page *page = frag_page->page;
>> + netmem_ref netmem = frag_page->netmem;
>>
>> - if (page_pool_unref_page(page, drain_count) == 0)
>> - page_pool_put_unrefed_page(pool, page, -1, true);
>> + if (page_pool_unref_netmem(netmem, drain_count) == 0)
>> + page_pool_put_unrefed_netmem(pp, netmem, -1, true);
>> }
>>
>> static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq,
>> @@ -359,7 +359,7 @@ static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe_cyc *wqe,
>> frag->flags &= ~BIT(MLX5E_WQE_FRAG_SKIP_RELEASE);
>>
>> headroom = i == 0 ? rq->buff.headroom : 0;
>> - addr = page_pool_get_dma_addr(frag->frag_page->page);
>> + addr = page_pool_get_dma_addr_netmem(frag->frag_page->netmem);
>> wqe->data[i].addr = cpu_to_be64(addr + frag->offset + headroom);
>> }
>>
>> @@ -500,9 +500,10 @@ mlx5e_add_skb_shared_info_frag(struct mlx5e_rq *rq, struct skb_shared_info *sinf
>> struct xdp_buff *xdp, struct mlx5e_frag_page *frag_page,
>> u32 frag_offset, u32 len)
>> {
>> + netmem_ref netmem = frag_page->netmem;
>> skb_frag_t *frag;
>>
>> - dma_addr_t addr = page_pool_get_dma_addr(frag_page->page);
>> + dma_addr_t addr = page_pool_get_dma_addr_netmem(netmem);
>>
>> dma_sync_single_for_cpu(rq->pdev, addr + frag_offset, len, rq->buff.map_dir);
>> if (!xdp_buff_has_frags(xdp)) {
>> @@ -515,9 +516,9 @@ mlx5e_add_skb_shared_info_frag(struct mlx5e_rq *rq, struct skb_shared_info *sinf
>> }
>>
>> frag = &sinfo->frags[sinfo->nr_frags++];
>> - skb_frag_fill_page_desc(frag, frag_page->page, frag_offset, len);
>> + skb_frag_fill_netmem_desc(frag, netmem, frag_offset, len);
>>
>> - if (page_is_pfmemalloc(frag_page->page))
>> + if (!netmem_is_net_iov(netmem) && netmem_is_pfmemalloc(netmem))
>
>nit: unnecessary net_iov check AFAICT?
>
yep, redundant.
>> xdp_buff_set_frag_pfmemalloc(xdp);
>> sinfo->xdp_frags_size += len;
>> }
>> @@ -528,27 +529,29 @@ mlx5e_add_skb_frag(struct mlx5e_rq *rq, struct sk_buff *skb,
>> u32 frag_offset, u32 len,
>> unsigned int truesize)
>> {
>> - dma_addr_t addr = page_pool_get_dma_addr(frag_page->page);
>> + dma_addr_t addr = page_pool_get_dma_addr_netmem(frag_page->netmem);
>> u8 next_frag = skb_shinfo(skb)->nr_frags;
>> + netmem_ref netmem = frag_page->netmem;
>>
>> dma_sync_single_for_cpu(rq->pdev, addr + frag_offset, len,
>> rq->buff.map_dir);
>>
>> - if (skb_can_coalesce(skb, next_frag, frag_page->page, frag_offset)) {
>> + if (skb_can_coalesce_netmem(skb, next_frag, netmem, frag_offset)) {
>> skb_coalesce_rx_frag(skb, next_frag - 1, len, truesize);
>> - } else {
>> - frag_page->frags++;
>> - skb_add_rx_frag(skb, next_frag, frag_page->page,
>> - frag_offset, len, truesize);
>> + return;
>> }
>> +
>> + frag_page->frags++;
>> + skb_add_rx_frag_netmem(skb, next_frag, netmem,
>> + frag_offset, len, truesize);
>> }
>>
>> static inline void
>> mlx5e_copy_skb_header(struct mlx5e_rq *rq, struct sk_buff *skb,
>> - struct page *page, dma_addr_t addr,
>> + netmem_ref netmem, dma_addr_t addr,
>> int offset_from, int dma_offset, u32 headlen)
>> {
>> - const void *from = page_address(page) + offset_from;
>> + const void *from = netmem_address(netmem) + offset_from;
>
>I think this needs a check that netmem_address != NULL and safe error
>handling in case it is? If the netmem is unreadable, netmem_address
>will return NULL, and because you add offset_from to it, you can't
>NULL check from as well.
>
Nope, this code path is not for GRO_HW, it is always safe to assume this is
not iov_netmem.
>> /* Aligning len to sizeof(long) optimizes memcpy performance */
>> unsigned int len = ALIGN(headlen, sizeof(long));
>>
>> @@ -685,7 +688,7 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
>> if (unlikely(err))
>> goto err_unmap;
>>
>> - addr = page_pool_get_dma_addr(frag_page->page);
>> + addr = page_pool_get_dma_addr_netmem(frag_page->netmem);
>>
>> for (int j = 0; j < MLX5E_SHAMPO_WQ_HEADER_PER_PAGE; j++) {
>> header_offset = mlx5e_shampo_hd_offset(index++);
>> @@ -796,7 +799,8 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
>> err = mlx5e_page_alloc_fragmented(rq->page_pool, frag_page);
>> if (unlikely(err))
>> goto err_unmap;
>> - addr = page_pool_get_dma_addr(frag_page->page);
>> +
>> + addr = page_pool_get_dma_addr_netmem(frag_page->netmem);
>> umr_wqe->inline_mtts[i] = (struct mlx5_mtt) {
>> .ptag = cpu_to_be64(addr | MLX5_EN_WR),
>> };
>> @@ -1216,7 +1220,7 @@ static void *mlx5e_shampo_get_packet_hd(struct mlx5e_rq *rq, u16 header_index)
>> struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, header_index);
>> u16 head_offset = mlx5e_shampo_hd_offset(header_index) + rq->buff.headroom;
>>
>> - return page_address(frag_page->page) + head_offset;
>> + return netmem_address(frag_page->netmem) + head_offset;
>
>Similar concern here. netmem_address may be NULL, especially when you
>enable unreadable netmem support in the later patches. There are a
>couple of call sites below as well.
This is the header path, so we are good.
We only need to check one location in mlx5, which is the data payload path of
GRO_HW and it's covered.
next prev parent reply other threads:[~2025-05-22 23:54 UTC|newest]
Thread overview: 50+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-05-22 21:41 [PATCH net-next V2 00/11] net/mlx5e: Add support for devmem and io_uring TCP zero-copy Tariq Toukan
2025-05-22 21:41 ` [PATCH net-next V2 01/11] net: Kconfig NET_DEVMEM selects GENERIC_ALLOCATOR Tariq Toukan
2025-05-22 23:07 ` Mina Almasry
2025-05-22 21:41 ` [PATCH net-next V2 02/11] net: Add skb_can_coalesce for netmem Tariq Toukan
2025-05-22 23:09 ` Mina Almasry
2025-05-25 13:03 ` Dragos Tatulea
2025-05-25 17:44 ` Mina Almasry
2025-05-28 9:13 ` Dragos Tatulea
2025-05-22 21:41 ` [PATCH net-next V2 03/11] net/mlx5e: SHAMPO: Reorganize mlx5_rq_shampo_alloc Tariq Toukan
2025-05-22 21:41 ` [PATCH net-next V2 04/11] net/mlx5e: SHAMPO: Remove redundant params Tariq Toukan
2025-05-22 21:41 ` [PATCH net-next V2 05/11] net/mlx5e: SHAMPO: Improve hw gro capability checking Tariq Toukan
2025-05-22 21:41 ` [PATCH net-next V2 06/11] net/mlx5e: SHAMPO: Separate pool for headers Tariq Toukan
2025-05-22 22:30 ` Jakub Kicinski
2025-05-22 23:08 ` Saeed Mahameed
2025-05-22 23:24 ` Mina Almasry
2025-05-22 23:43 ` Saeed Mahameed
2025-05-27 15:29 ` Jakub Kicinski
2025-05-27 15:53 ` Dragos Tatulea
2025-05-22 21:41 ` [PATCH net-next V2 07/11] net/mlx5e: SHAMPO: Headers page pool stats Tariq Toukan
2025-05-22 22:31 ` Jakub Kicinski
2025-05-22 22:58 ` Saeed Mahameed
2025-06-06 10:43 ` Cosmin Ratiu
2025-06-08 10:09 ` Tariq Toukan
2025-06-09 15:20 ` Jakub Kicinski
2025-05-22 21:41 ` [PATCH net-next V2 08/11] net/mlx5e: Convert over to netmem Tariq Toukan
2025-05-22 23:18 ` Mina Almasry
2025-05-22 23:54 ` Saeed Mahameed [this message]
2025-05-23 17:58 ` Mina Almasry
2025-05-23 19:22 ` Saeed Mahameed
2025-05-22 21:41 ` [PATCH net-next V2 09/11] net/mlx5e: Add support for UNREADABLE netmem page pools Tariq Toukan
2025-05-22 23:26 ` Mina Almasry
2025-05-22 23:56 ` Saeed Mahameed
2025-05-22 21:41 ` [PATCH net-next V2 10/11] net/mlx5e: Implement queue mgmt ops and single channel swap Tariq Toukan
2025-05-22 21:41 ` [PATCH net-next V2 11/11] net/mlx5e: Support ethtool tcp-data-split settings Tariq Toukan
2025-05-22 22:55 ` Jakub Kicinski
2025-05-22 23:19 ` Saeed Mahameed
2025-05-23 16:17 ` Cosmin Ratiu
2025-05-23 19:35 ` saeed
2025-05-27 16:10 ` Jakub Kicinski
2025-05-28 5:10 ` Gal Pressman
2025-05-29 0:12 ` Jakub Kicinski
2025-05-27 16:05 ` [PATCH net-next V2 00/11] net/mlx5e: Add support for devmem and io_uring TCP zero-copy Stanislav Fomichev
2025-05-28 9:17 ` Dragos Tatulea
2025-05-28 15:45 ` Stanislav Fomichev
2025-05-28 22:59 ` Mina Almasry
2025-05-28 23:04 ` Stanislav Fomichev
2025-05-29 11:11 ` Dragos Tatulea
2025-06-06 9:00 ` Cosmin Ratiu
2025-05-28 0:31 ` Jakub Kicinski
2025-05-28 1:20 ` patchwork-bot+netdevbpf
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=aC-5N9GuwbP73vV7@x130 \
--to=saeed@kernel.org \
--cc=almasrymina@google.com \
--cc=andrew+netdev@lunn.ch \
--cc=ast@kernel.org \
--cc=bpf@vger.kernel.org \
--cc=cratiu@nvidia.com \
--cc=daniel@iogearbox.net \
--cc=davem@davemloft.net \
--cc=dtatulea@nvidia.com \
--cc=edumazet@google.com \
--cc=gal@nvidia.com \
--cc=hawk@kernel.org \
--cc=john.fastabend@gmail.com \
--cc=kuba@kernel.org \
--cc=leon@kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-rdma@vger.kernel.org \
--cc=mbloch@nvidia.com \
--cc=moshe@nvidia.com \
--cc=netdev@vger.kernel.org \
--cc=pabeni@redhat.com \
--cc=richardcochran@gmail.com \
--cc=saeedm@nvidia.com \
--cc=tariqt@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).