From: Paolo Valerio <pvalerio@redhat.com>
To: netdev@vger.kernel.org
Cc: "Nicolas Ferre" <nicolas.ferre@microchip.com>,
"Claudiu Beznea" <claudiu.beznea@tuxon.dev>,
"Andrew Lunn" <andrew+netdev@lunn.ch>,
"David S. Miller" <davem@davemloft.net>,
"Eric Dumazet" <edumazet@google.com>,
"Jakub Kicinski" <kuba@kernel.org>,
"Paolo Abeni" <pabeni@redhat.com>,
"Lorenzo Bianconi" <lorenzo@kernel.org>,
"Théo Lebrun" <theo.lebrun@bootlin.com>
Subject: [PATCH net-next 1/8] net: macb: move Rx buffers alloc from link up to open
Date: Thu, 15 Jan 2026 23:25:24 +0100 [thread overview]
Message-ID: <20260115222531.313002-2-pvalerio@redhat.com> (raw)
In-Reply-To: <20260115222531.313002-1-pvalerio@redhat.com>
From: Théo Lebrun <theo.lebrun@bootlin.com>
mog_alloc_rx_buffers(), getting called at open, does not do rx buffer
alloc on GEM. The bulk of the work is done by gem_rx_refill() filling
up all slots with valid buffers.
gem_rx_refill() is called at link up by
gem_init_rings() == bp->macbgem_ops.mog_init_rings().
Move operation to macb_open(), mostly to allow it to fail early and
loudly rather than init the device with Rx mostly broken.
About `bool fail_early`:
- When called from macb_open(), ring init fails as soon as a queue
cannot be refilled.
- When called from macb_hresp_error_task(), we do our best to reinit
the device: we still iterate over all queues and try refilling all
even if a previous queue failed.
Signed-off-by: Théo Lebrun <theo.lebrun@bootlin.com>
Signed-off-by: Paolo Valerio <pvalerio@redhat.com>
---
drivers/net/ethernet/cadence/macb.h | 2 +-
drivers/net/ethernet/cadence/macb_main.c | 40 +++++++++++++++++-------
2 files changed, 30 insertions(+), 12 deletions(-)
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 87414a2ddf6e..2cb65ec37d44 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -1180,7 +1180,7 @@ struct macb_queue;
struct macb_or_gem_ops {
int (*mog_alloc_rx_buffers)(struct macb *bp);
void (*mog_free_rx_buffers)(struct macb *bp);
- void (*mog_init_rings)(struct macb *bp);
+ int (*mog_init_rings)(struct macb *bp, bool fail_early);
int (*mog_rx)(struct macb_queue *queue, struct napi_struct *napi,
int budget);
};
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 2d5f3eb09530..5947c2b44bb3 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -705,8 +705,8 @@ static void macb_mac_link_up(struct phylink_config *config,
if (rx_pause)
ctrl |= MACB_BIT(PAE);
- /* Initialize rings & buffers as clearing MACB_BIT(TE) in link down
- * cleared the pipeline and control registers.
+ /* Initialize buffer registers as clearing MACB_BIT(TE) in link
+ * down cleared the pipeline and control registers.
*/
macb_init_buffers(bp);
@@ -1249,13 +1249,14 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
return packets;
}
-static void gem_rx_refill(struct macb_queue *queue)
+static int gem_rx_refill(struct macb_queue *queue)
{
unsigned int entry;
struct sk_buff *skb;
dma_addr_t paddr;
struct macb *bp = queue->bp;
struct macb_dma_desc *desc;
+ int err = 0;
while (CIRC_SPACE(queue->rx_prepared_head, queue->rx_tail,
bp->rx_ring_size) > 0) {
@@ -1272,6 +1273,7 @@ static void gem_rx_refill(struct macb_queue *queue)
if (unlikely(!skb)) {
netdev_err(bp->dev,
"Unable to allocate sk_buff\n");
+ err = -ENOMEM;
break;
}
@@ -1321,6 +1323,7 @@ static void gem_rx_refill(struct macb_queue *queue)
netdev_vdbg(bp->dev, "rx ring: queue: %p, prepared head %d, tail %d\n",
queue, queue->rx_prepared_head, queue->rx_tail);
+ return err;
}
/* Mark DMA descriptors from begin up to and not including end as unused */
@@ -1773,7 +1776,7 @@ static void macb_hresp_error_task(struct work_struct *work)
netif_tx_stop_all_queues(dev);
netif_carrier_off(dev);
- bp->macbgem_ops.mog_init_rings(bp);
+ bp->macbgem_ops.mog_init_rings(bp, false);
/* Initialize TX and RX buffers */
macb_init_buffers(bp);
@@ -2546,8 +2549,6 @@ static int macb_alloc_consistent(struct macb *bp)
if (!queue->tx_skb)
goto out_err;
}
- if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
- goto out_err;
/* Required for tie off descriptor for PM cases */
if (!(bp->caps & MACB_CAPS_QUEUE_DISABLE)) {
@@ -2559,6 +2560,11 @@ static int macb_alloc_consistent(struct macb *bp)
goto out_err;
}
+ if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
+ goto out_err;
+ if (bp->macbgem_ops.mog_init_rings(bp, true))
+ goto out_err;
+
return 0;
out_err:
@@ -2579,11 +2585,13 @@ static void macb_init_tieoff(struct macb *bp)
desc->ctrl = 0;
}
-static void gem_init_rings(struct macb *bp)
+static int gem_init_rings(struct macb *bp, bool fail_early)
{
struct macb_queue *queue;
struct macb_dma_desc *desc = NULL;
+ int last_err = 0;
unsigned int q;
+ int err;
int i;
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
@@ -2599,13 +2607,24 @@ static void gem_init_rings(struct macb *bp)
queue->rx_tail = 0;
queue->rx_prepared_head = 0;
- gem_rx_refill(queue);
+ /* We get called in two cases:
+ * - open: we can propagate alloc errors (so fail early),
+ * - HRESP error: cannot propagate, we attempt to reinit
+ * all queues in case of failure.
+ */
+ err = gem_rx_refill(queue);
+ if (err) {
+ last_err = err;
+ if (fail_early)
+ break;
+ }
}
macb_init_tieoff(bp);
+ return last_err;
}
-static void macb_init_rings(struct macb *bp)
+static int macb_init_rings(struct macb *bp, bool fail_early)
{
int i;
struct macb_dma_desc *desc = NULL;
@@ -2622,6 +2641,7 @@ static void macb_init_rings(struct macb *bp)
desc->ctrl |= MACB_BIT(TX_WRAP);
macb_init_tieoff(bp);
+ return 0;
}
static void macb_reset_hw(struct macb *bp)
@@ -2953,8 +2973,6 @@ static int macb_open(struct net_device *dev)
goto pm_exit;
}
- bp->macbgem_ops.mog_init_rings(bp);
-
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
napi_enable(&queue->napi_rx);
napi_enable(&queue->napi_tx);
--
2.52.0
next prev parent reply other threads:[~2026-01-15 22:26 UTC|newest]
Thread overview: 39+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-01-15 22:25 [PATCH net-next 0/8] net: macb: Add XDP support and page pool integration Paolo Valerio
2026-01-15 22:25 ` Paolo Valerio [this message]
2026-01-15 22:25 ` [PATCH net-next 2/8] net: macb: rename rx_skbuff into rx_buff Paolo Valerio
2026-01-15 22:25 ` [PATCH net-next 3/8] cadence: macb: Add page pool support handle multi-descriptor frame rx Paolo Valerio
2026-01-16 17:16 ` Andrew Lunn
2026-01-19 18:58 ` Paolo Valerio
2026-01-22 22:24 ` Paolo Valerio
2026-01-22 23:04 ` Andrew Lunn
2026-01-25 19:02 ` Paolo Valerio
2026-01-26 14:29 ` Andrew Lunn
2026-01-26 18:45 ` Théo Lebrun
2026-01-26 23:51 ` Paolo Valerio
2026-01-27 15:48 ` Théo Lebrun
2026-01-26 23:34 ` Paolo Valerio
2026-01-19 19:36 ` [net-next,3/8] " Jakub Kicinski
2026-01-22 14:39 ` Théo Lebrun
2026-01-22 15:16 ` Jakub Kicinski
2026-01-26 14:55 ` [PATCH net-next 3/8] " Théo Lebrun
2026-02-20 15:45 ` Théo Lebrun
2026-01-15 22:25 ` [PATCH net-next 4/8] cadence: macb: use the current queue number for stats Paolo Valerio
2026-01-15 22:25 ` [PATCH net-next 5/8] cadence: macb: add XDP support for gem Paolo Valerio
2026-01-19 19:36 ` [net-next,5/8] " Jakub Kicinski
2026-01-15 22:25 ` [PATCH net-next 6/8] cadence: macb: make macb_tx_skb generic Paolo Valerio
2026-01-15 22:25 ` [PATCH net-next 7/8] cadence: macb: make tx path skb agnostic Paolo Valerio
2026-01-15 22:25 ` [PATCH net-next 8/8] cadence: macb: introduce xmit support Paolo Valerio
2026-01-19 19:36 ` [net-next,8/8] " Jakub Kicinski
2026-02-02 16:31 ` [PATCH net-next 0/8] net: macb: Add XDP support and page pool integration Théo Lebrun
2026-02-13 16:57 ` [PATCH 1/6] net: macb: rename release_buff() -> macb_tx_release_buff() Théo Lebrun
2026-02-13 16:57 ` [PATCH 2/6] net: macb: drop two labels in gem_rx() Théo Lebrun
2026-02-13 16:57 ` [PATCH 3/6] net: macb: always use DMA_BIDIRECTIONAL on page pool buffers Théo Lebrun
2026-02-13 16:57 ` [PATCH 4/6] net: macb: account for stats in Rx XDP codepaths Théo Lebrun
2026-02-13 16:57 ` [PATCH 5/6] net: macb: improve Rx refill error message Théo Lebrun
2026-02-13 16:57 ` [PATCH 6/6] net: macb: rework macb_tx_complete() processing loop Théo Lebrun
2026-02-13 16:57 ` [PATCH net-next 0/8] net: macb: Add XDP support and page pool integration Théo Lebrun
2026-02-13 17:02 ` Théo Lebrun
2026-02-14 15:37 ` Paolo Valerio
2026-02-16 9:17 ` Théo Lebrun
2026-02-19 18:05 ` Paolo Valerio
2026-02-20 15:58 ` Théo Lebrun
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260115222531.313002-2-pvalerio@redhat.com \
--to=pvalerio@redhat.com \
--cc=andrew+netdev@lunn.ch \
--cc=claudiu.beznea@tuxon.dev \
--cc=davem@davemloft.net \
--cc=edumazet@google.com \
--cc=kuba@kernel.org \
--cc=lorenzo@kernel.org \
--cc=netdev@vger.kernel.org \
--cc=nicolas.ferre@microchip.com \
--cc=pabeni@redhat.com \
--cc=theo.lebrun@bootlin.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox