From: Paolo Valerio <pvalerio@redhat.com>
To: netdev@vger.kernel.org
Cc: "Nicolas Ferre" <nicolas.ferre@microchip.com>,
"Claudiu Beznea" <claudiu.beznea@tuxon.dev>,
"Andrew Lunn" <andrew+netdev@lunn.ch>,
"David S. Miller" <davem@davemloft.net>,
"Eric Dumazet" <edumazet@google.com>,
"Jakub Kicinski" <kuba@kernel.org>,
"Paolo Abeni" <pabeni@redhat.com>,
"Lorenzo Bianconi" <lorenzo@kernel.org>,
"Théo Lebrun" <theo.lebrun@bootlin.com>
Subject: [PATCH net-next v3 1/8] net: macb: move Rx buffers alloc from link up to open
Date: Mon, 2 Mar 2026 12:52:25 +0100 [thread overview]
Message-ID: <20260302115232.1430640-2-pvalerio@redhat.com> (raw)
In-Reply-To: <20260302115232.1430640-1-pvalerio@redhat.com>
From: Théo Lebrun <theo.lebrun@bootlin.com>
mog_alloc_rx_buffers(), getting called at open, does not do rx buffer
alloc on GEM. The bulk of the work is done by gem_rx_refill() filling
up all slots with valid buffers.
gem_rx_refill() is called at link up by
gem_init_rings() == bp->macbgem_ops.mog_init_rings().
Move operation to macb_open(), mostly to allow it to fail early and
loudly rather than init the device with Rx mostly broken.
About `bool fail_early`:
- When called from macb_open(), ring init fails as soon as a queue
cannot be refilled.
- When called from macb_hresp_error_task(), we do our best to reinit
the device: we still iterate over all queues and try refilling all
even if a previous queue failed.
Signed-off-by: Théo Lebrun <theo.lebrun@bootlin.com>
Signed-off-by: Paolo Valerio <pvalerio@redhat.com>
---
drivers/net/ethernet/cadence/macb.h | 2 +-
drivers/net/ethernet/cadence/macb_main.c | 35 ++++++++++++++++++------
2 files changed, 28 insertions(+), 9 deletions(-)
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 87414a2ddf6e..2cb65ec37d44 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -1180,7 +1180,7 @@ struct macb_queue;
struct macb_or_gem_ops {
int (*mog_alloc_rx_buffers)(struct macb *bp);
void (*mog_free_rx_buffers)(struct macb *bp);
- void (*mog_init_rings)(struct macb *bp);
+ int (*mog_init_rings)(struct macb *bp, bool fail_early);
int (*mog_rx)(struct macb_queue *queue, struct napi_struct *napi,
int budget);
};
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 02eab26fd98b..347b510c0c25 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -1263,13 +1263,14 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
return packets;
}
-static void gem_rx_refill(struct macb_queue *queue)
+static int gem_rx_refill(struct macb_queue *queue)
{
unsigned int entry;
struct sk_buff *skb;
dma_addr_t paddr;
struct macb *bp = queue->bp;
struct macb_dma_desc *desc;
+ int err = 0;
while (CIRC_SPACE(queue->rx_prepared_head, queue->rx_tail,
bp->rx_ring_size) > 0) {
@@ -1286,6 +1287,7 @@ static void gem_rx_refill(struct macb_queue *queue)
if (unlikely(!skb)) {
netdev_err(bp->dev,
"Unable to allocate sk_buff\n");
+ err = -ENOMEM;
break;
}
@@ -1335,6 +1337,7 @@ static void gem_rx_refill(struct macb_queue *queue)
netdev_vdbg(bp->dev, "rx ring: queue: %p, prepared head %d, tail %d\n",
queue, queue->rx_prepared_head, queue->rx_tail);
+ return err;
}
/* Mark DMA descriptors from begin up to and not including end as unused */
@@ -1787,7 +1790,7 @@ static void macb_hresp_error_task(struct work_struct *work)
netif_tx_stop_all_queues(dev);
netif_carrier_off(dev);
- bp->macbgem_ops.mog_init_rings(bp);
+ bp->macbgem_ops.mog_init_rings(bp, false);
/* Initialize TX and RX buffers */
macb_init_buffers(bp);
@@ -2560,8 +2563,6 @@ static int macb_alloc_consistent(struct macb *bp)
if (!queue->tx_skb)
goto out_err;
}
- if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
- goto out_err;
/* Required for tie off descriptor for PM cases */
if (!(bp->caps & MACB_CAPS_QUEUE_DISABLE)) {
@@ -2573,6 +2574,11 @@ static int macb_alloc_consistent(struct macb *bp)
goto out_err;
}
+ if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
+ goto out_err;
+ if (bp->macbgem_ops.mog_init_rings(bp, true))
+ goto out_err;
+
return 0;
out_err:
@@ -2593,11 +2599,13 @@ static void macb_init_tieoff(struct macb *bp)
desc->ctrl = 0;
}
-static void gem_init_rings(struct macb *bp)
+static int gem_init_rings(struct macb *bp, bool fail_early)
{
struct macb_queue *queue;
struct macb_dma_desc *desc = NULL;
+ int last_err = 0;
unsigned int q;
+ int err;
int i;
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
@@ -2613,13 +2621,24 @@ static void gem_init_rings(struct macb *bp)
queue->rx_tail = 0;
queue->rx_prepared_head = 0;
- gem_rx_refill(queue);
+ /* We get called in two cases:
+ * - open: we can propagate alloc errors (so fail early),
+ * - HRESP error: cannot propagate, we attempt to reinit
+ * all queues in case of failure.
+ */
+ err = gem_rx_refill(queue);
+ if (err) {
+ last_err = err;
+ if (fail_early)
+ break;
+ }
}
macb_init_tieoff(bp);
+ return last_err;
}
-static void macb_init_rings(struct macb *bp)
+static int macb_init_rings(struct macb *bp, bool fail_early)
{
int i;
struct macb_dma_desc *desc = NULL;
@@ -2636,6 +2655,7 @@ static void macb_init_rings(struct macb *bp)
desc->ctrl |= MACB_BIT(TX_WRAP);
macb_init_tieoff(bp);
+ return 0;
}
static void macb_reset_hw(struct macb *bp)
@@ -2967,7 +2987,6 @@ static int macb_open(struct net_device *dev)
goto pm_exit;
}
- bp->macbgem_ops.mog_init_rings(bp);
macb_init_buffers(bp);
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
--
2.52.0
next prev parent reply other threads:[~2026-03-02 11:52 UTC|newest]
Thread overview: 11+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-03-02 11:52 [PATCH net-next v3 0/8] net: macb: Add XDP support and page pool integration Paolo Valerio
2026-03-02 11:52 ` Paolo Valerio [this message]
2026-03-02 11:52 ` [PATCH net-next v3 2/8] net: macb: rename rx_skbuff into rx_buff Paolo Valerio
2026-03-02 11:52 ` [PATCH net-next v3 3/8] net: macb: Add page pool support handle multi-descriptor frame rx Paolo Valerio
2026-03-05 10:53 ` Paolo Abeni
2026-03-02 11:52 ` [PATCH net-next v3 4/8] net: macb: use the current queue number for stats Paolo Valerio
2026-03-02 11:52 ` [PATCH net-next v3 5/8] net: macb: make macb_tx_skb generic Paolo Valerio
2026-03-02 11:52 ` [PATCH net-next v3 6/8] net: macb: generalize tx buffer handling Paolo Valerio
2026-03-02 11:52 ` [PATCH net-next v3 7/8] net: macb: add XDP support for gem Paolo Valerio
2026-03-05 11:01 ` [net-next,v3,7/8] " Paolo Abeni
2026-03-02 11:52 ` [PATCH net-next v3 8/8] net: macb: introduce ndo_xdp_xmit support Paolo Valerio
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260302115232.1430640-2-pvalerio@redhat.com \
--to=pvalerio@redhat.com \
--cc=andrew+netdev@lunn.ch \
--cc=claudiu.beznea@tuxon.dev \
--cc=davem@davemloft.net \
--cc=edumazet@google.com \
--cc=kuba@kernel.org \
--cc=lorenzo@kernel.org \
--cc=netdev@vger.kernel.org \
--cc=nicolas.ferre@microchip.com \
--cc=pabeni@redhat.com \
--cc=theo.lebrun@bootlin.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.