From: Jakub Kicinski <kuba@kernel.org>
To: davem@davemloft.net
Cc: netdev@vger.kernel.org, edumazet@google.com, pabeni@redhat.com,
andrew+netdev@lunn.ch, horms@kernel.org, almasrymina@google.com,
tariqt@nvidia.com, dtatulea@nvidia.com, hawk@kernel.org,
ilias.apalodimas@linaro.org, alexanderduyck@fb.com,
sdf@fomichev.me, Jakub Kicinski <kuba@kernel.org>
Subject: [PATCH net-next v2 04/14] eth: fbnic: use netmem_ref where applicable
Date: Thu, 28 Aug 2025 18:22:54 -0700 [thread overview]
Message-ID: <20250829012304.4146195-5-kuba@kernel.org> (raw)
In-Reply-To: <20250829012304.4146195-1-kuba@kernel.org>
Use netmem_ref instead of struct page pointer in prep for
unreadable memory. fbnic has separate free buffer submission
queues for headers and for data. Refactor the helper which
returns page pointer for a submission buffer to take the
high level queue container, create a separate handler
for header and payload rings. This ties the "upcast" from
netmem to system page to use of sub0 which we know has
system pages.
Reviewed-by: Mina Almasry <almasrymina@google.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
---
drivers/net/ethernet/meta/fbnic/fbnic_txrx.h | 2 +-
drivers/net/ethernet/meta/fbnic/fbnic_txrx.c | 65 ++++++++++++--------
2 files changed, 40 insertions(+), 27 deletions(-)
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
index a935a1acfb3e..58ae7f9c8f54 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
@@ -100,7 +100,7 @@ struct fbnic_queue_stats {
#define FBNIC_PAGECNT_BIAS_MAX PAGE_SIZE
struct fbnic_rx_buf {
- struct page *page;
+ netmem_ref netmem;
long pagecnt_bias;
};
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
index 15ebbaa0bed2..8dbe83bc2be1 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
@@ -715,35 +715,47 @@ static void fbnic_clean_tsq(struct fbnic_napi_vector *nv,
}
static void fbnic_page_pool_init(struct fbnic_ring *ring, unsigned int idx,
- struct page *page)
+ netmem_ref netmem)
{
struct fbnic_rx_buf *rx_buf = &ring->rx_buf[idx];
- page_pool_fragment_page(page, FBNIC_PAGECNT_BIAS_MAX);
+ page_pool_fragment_netmem(netmem, FBNIC_PAGECNT_BIAS_MAX);
rx_buf->pagecnt_bias = FBNIC_PAGECNT_BIAS_MAX;
- rx_buf->page = page;
+ rx_buf->netmem = netmem;
}
-static struct page *fbnic_page_pool_get(struct fbnic_ring *ring,
- unsigned int idx)
+static struct page *
+fbnic_page_pool_get_head(struct fbnic_q_triad *qt, unsigned int idx)
{
- struct fbnic_rx_buf *rx_buf = &ring->rx_buf[idx];
+ struct fbnic_rx_buf *rx_buf = &qt->sub0.rx_buf[idx];
rx_buf->pagecnt_bias--;
- return rx_buf->page;
+ /* sub0 is always fed system pages, from the NAPI-level page_pool */
+ return netmem_to_page(rx_buf->netmem);
+}
+
+static netmem_ref
+fbnic_page_pool_get_data(struct fbnic_q_triad *qt, unsigned int idx)
+{
+ struct fbnic_rx_buf *rx_buf = &qt->sub1.rx_buf[idx];
+
+ rx_buf->pagecnt_bias--;
+
+ return rx_buf->netmem;
}
static void fbnic_page_pool_drain(struct fbnic_ring *ring, unsigned int idx,
int budget)
{
struct fbnic_rx_buf *rx_buf = &ring->rx_buf[idx];
- struct page *page = rx_buf->page;
+ netmem_ref netmem = rx_buf->netmem;
- if (!page_pool_unref_page(page, rx_buf->pagecnt_bias))
- page_pool_put_unrefed_page(ring->page_pool, page, -1, !!budget);
+ if (!page_pool_unref_netmem(netmem, rx_buf->pagecnt_bias))
+ page_pool_put_unrefed_netmem(ring->page_pool, netmem, -1,
+ !!budget);
- rx_buf->page = NULL;
+ rx_buf->netmem = 0;
}
static void fbnic_clean_twq(struct fbnic_napi_vector *nv, int napi_budget,
@@ -844,10 +856,10 @@ static void fbnic_clean_bdq(struct fbnic_ring *ring, unsigned int hw_head,
ring->head = head;
}
-static void fbnic_bd_prep(struct fbnic_ring *bdq, u16 id, struct page *page)
+static void fbnic_bd_prep(struct fbnic_ring *bdq, u16 id, netmem_ref netmem)
{
__le64 *bdq_desc = &bdq->desc[id * FBNIC_BD_FRAG_COUNT];
- dma_addr_t dma = page_pool_get_dma_addr(page);
+ dma_addr_t dma = page_pool_get_dma_addr_netmem(netmem);
u64 bd, i = FBNIC_BD_FRAG_COUNT;
bd = (FBNIC_BD_PAGE_ADDR_MASK & dma) |
@@ -874,10 +886,10 @@ static void fbnic_fill_bdq(struct fbnic_ring *bdq)
return;
do {
- struct page *page;
+ netmem_ref netmem;
- page = page_pool_dev_alloc_pages(bdq->page_pool);
- if (!page) {
+ netmem = page_pool_dev_alloc_netmems(bdq->page_pool);
+ if (!netmem) {
u64_stats_update_begin(&bdq->stats.syncp);
bdq->stats.rx.alloc_failed++;
u64_stats_update_end(&bdq->stats.syncp);
@@ -885,8 +897,8 @@ static void fbnic_fill_bdq(struct fbnic_ring *bdq)
break;
}
- fbnic_page_pool_init(bdq, i, page);
- fbnic_bd_prep(bdq, i, page);
+ fbnic_page_pool_init(bdq, i, netmem);
+ fbnic_bd_prep(bdq, i, netmem);
i++;
i &= bdq->size_mask;
@@ -933,7 +945,7 @@ static void fbnic_pkt_prepare(struct fbnic_napi_vector *nv, u64 rcd,
{
unsigned int hdr_pg_idx = FIELD_GET(FBNIC_RCD_AL_BUFF_PAGE_MASK, rcd);
unsigned int hdr_pg_off = FIELD_GET(FBNIC_RCD_AL_BUFF_OFF_MASK, rcd);
- struct page *page = fbnic_page_pool_get(&qt->sub0, hdr_pg_idx);
+ struct page *page = fbnic_page_pool_get_head(qt, hdr_pg_idx);
unsigned int len = FIELD_GET(FBNIC_RCD_AL_BUFF_LEN_MASK, rcd);
unsigned int frame_sz, hdr_pg_start, hdr_pg_end, headroom;
unsigned char *hdr_start;
@@ -974,7 +986,7 @@ static void fbnic_add_rx_frag(struct fbnic_napi_vector *nv, u64 rcd,
unsigned int pg_idx = FIELD_GET(FBNIC_RCD_AL_BUFF_PAGE_MASK, rcd);
unsigned int pg_off = FIELD_GET(FBNIC_RCD_AL_BUFF_OFF_MASK, rcd);
unsigned int len = FIELD_GET(FBNIC_RCD_AL_BUFF_LEN_MASK, rcd);
- struct page *page = fbnic_page_pool_get(&qt->sub1, pg_idx);
+ netmem_ref netmem = fbnic_page_pool_get_data(qt, pg_idx);
unsigned int truesize;
bool added;
@@ -985,11 +997,11 @@ static void fbnic_add_rx_frag(struct fbnic_napi_vector *nv, u64 rcd,
FBNIC_BD_FRAG_SIZE;
/* Sync DMA buffer */
- dma_sync_single_range_for_cpu(nv->dev, page_pool_get_dma_addr(page),
+ dma_sync_single_range_for_cpu(nv->dev,
+ page_pool_get_dma_addr_netmem(netmem),
pg_off, truesize, DMA_BIDIRECTIONAL);
- added = xdp_buff_add_frag(&pkt->buff, page_to_netmem(page), pg_off, len,
- truesize);
+ added = xdp_buff_add_frag(&pkt->buff, netmem, pg_off, len, truesize);
if (unlikely(!added)) {
pkt->add_frag_failed = true;
netdev_err_once(nv->napi.dev,
@@ -1007,15 +1019,16 @@ static void fbnic_put_pkt_buff(struct fbnic_q_triad *qt,
if (xdp_buff_has_frags(&pkt->buff)) {
struct skb_shared_info *shinfo;
+ netmem_ref netmem;
int nr_frags;
shinfo = xdp_get_shared_info_from_buff(&pkt->buff);
nr_frags = shinfo->nr_frags;
while (nr_frags--) {
- page = skb_frag_page(&shinfo->frags[nr_frags]);
- page_pool_put_full_page(qt->sub1.page_pool, page,
- !!budget);
+ netmem = skb_frag_netmem(&shinfo->frags[nr_frags]);
+ page_pool_put_full_netmem(qt->sub1.page_pool, netmem,
+ !!budget);
}
}
--
2.51.0
next prev parent reply other threads:[~2025-08-29 1:23 UTC|newest]
Thread overview: 18+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-08-29 1:22 [PATCH net-next v2 00/14] eth: fbnic: support queue API and zero-copy Rx Jakub Kicinski
2025-08-29 1:22 ` [PATCH net-next v2 01/14] eth: fbnic: move page pool pointer from NAPI to the ring struct Jakub Kicinski
2025-08-29 1:22 ` [PATCH net-next v2 02/14] eth: fbnic: move xdp_rxq_info_reg() to resource alloc Jakub Kicinski
2025-08-29 1:22 ` [PATCH net-next v2 03/14] eth: fbnic: move page pool alloc to fbnic_alloc_rx_qt_resources() Jakub Kicinski
2025-08-29 1:22 ` Jakub Kicinski [this message]
2025-08-29 1:22 ` [PATCH net-next v2 05/14] eth: fbnic: request ops lock Jakub Kicinski
2025-08-29 1:22 ` [PATCH net-next v2 06/14] eth: fbnic: split fbnic_disable() Jakub Kicinski
2025-08-29 1:22 ` [PATCH net-next v2 07/14] eth: fbnic: split fbnic_flush() Jakub Kicinski
2025-08-29 1:22 ` [PATCH net-next v2 08/14] eth: fbnic: split fbnic_enable() Jakub Kicinski
2025-08-29 1:22 ` [PATCH net-next v2 09/14] eth: fbnic: split fbnic_fill() Jakub Kicinski
2025-08-29 1:23 ` [PATCH net-next v2 10/14] net: add helper to pre-check if PP for an Rx queue will be unreadable Jakub Kicinski
2025-08-29 21:56 ` Mina Almasry
2025-08-29 1:23 ` [PATCH net-next v2 11/14] eth: fbnic: allocate unreadable page pool for the payloads Jakub Kicinski
2025-08-29 1:23 ` [PATCH net-next v2 12/14] eth: fbnic: defer page pool recycling activation to queue start Jakub Kicinski
2025-08-29 21:57 ` Mina Almasry
2025-08-29 1:23 ` [PATCH net-next v2 13/14] eth: fbnic: don't pass NAPI into pp alloc Jakub Kicinski
2025-08-29 1:23 ` [PATCH net-next v2 14/14] eth: fbnic: support queue ops / zero-copy Rx Jakub Kicinski
2025-08-29 22:09 ` Mina Almasry
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250829012304.4146195-5-kuba@kernel.org \
--to=kuba@kernel.org \
--cc=alexanderduyck@fb.com \
--cc=almasrymina@google.com \
--cc=andrew+netdev@lunn.ch \
--cc=davem@davemloft.net \
--cc=dtatulea@nvidia.com \
--cc=edumazet@google.com \
--cc=hawk@kernel.org \
--cc=horms@kernel.org \
--cc=ilias.apalodimas@linaro.org \
--cc=netdev@vger.kernel.org \
--cc=pabeni@redhat.com \
--cc=sdf@fomichev.me \
--cc=tariqt@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).