From: Paolo Valerio <pvalerio@redhat.com>
To: netdev@vger.kernel.org
Cc: "Nicolas Ferre" <nicolas.ferre@microchip.com>,
"Claudiu Beznea" <claudiu.beznea@tuxon.dev>,
"Andrew Lunn" <andrew+netdev@lunn.ch>,
"David S. Miller" <davem@davemloft.net>,
"Eric Dumazet" <edumazet@google.com>,
"Jakub Kicinski" <kuba@kernel.org>,
"Paolo Abeni" <pabeni@redhat.com>,
"Lorenzo Bianconi" <lorenzo@kernel.org>,
"Théo Lebrun" <theo.lebrun@bootlin.com>
Subject: [PATCH net-next v3 3/8] net: macb: Add page pool support handle multi-descriptor frame rx
Date: Mon, 2 Mar 2026 12:52:27 +0100 [thread overview]
Message-ID: <20260302115232.1430640-4-pvalerio@redhat.com> (raw)
In-Reply-To: <20260302115232.1430640-1-pvalerio@redhat.com>
Use the page pool allocator for the data buffers and enable skb
recycling support, instead of relying on netdev_alloc_skb
allocating the entire skb during the refill.
The patch also add support for receiving network frames that span
multiple DMA descriptors in the Cadence MACB/GEM Ethernet driver.
The patch removes the requirement that limited frame reception to
a single descriptor (RX_SOF && RX_EOF), also avoiding potential
contiguous multi-page allocation for large frames.
Signed-off-by: Paolo Valerio <pvalerio@redhat.com>
---
drivers/net/ethernet/cadence/Kconfig | 1 +
drivers/net/ethernet/cadence/macb.h | 5 +
drivers/net/ethernet/cadence/macb_main.c | 401 ++++++++++++++++-------
3 files changed, 283 insertions(+), 124 deletions(-)
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig
index 5b2a461dfd28..ae500f717433 100644
--- a/drivers/net/ethernet/cadence/Kconfig
+++ b/drivers/net/ethernet/cadence/Kconfig
@@ -25,6 +25,7 @@ config MACB
depends on PTP_1588_CLOCK_OPTIONAL
select PHYLINK
select CRC32
+ select PAGE_POOL
help
The Cadence MACB ethernet interface is found on many Atmel AT32 and
AT91 parts. This driver also supports the Cadence GEM (Gigabit
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 3b184e9ac771..a78ad00f53b1 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -14,6 +14,7 @@
#include <linux/interrupt.h>
#include <linux/phy/phy.h>
#include <linux/workqueue.h>
+#include <net/page_pool/helpers.h>
#define MACB_GREGS_NBR 16
#define MACB_GREGS_VERSION 2
@@ -1266,6 +1267,8 @@ struct macb_queue {
void *rx_buffers;
struct napi_struct napi_rx;
struct queue_stats stats;
+ struct page_pool *page_pool;
+ struct sk_buff *skb;
};
struct ethtool_rx_fs_item {
@@ -1289,6 +1292,8 @@ struct macb {
struct macb_dma_desc *rx_ring_tieoff;
dma_addr_t rx_ring_tieoff_dma;
size_t rx_buffer_size;
+ size_t rx_headroom;
+ unsigned int rx_ip_align;
unsigned int rx_ring_size;
unsigned int tx_ring_size;
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index fbd4872901c2..621bca2e1844 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -1263,14 +1263,54 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
return packets;
}
-static int gem_rx_refill(struct macb_queue *queue)
+static inline int gem_rx_data_len(struct macb *bp, struct macb_queue *queue,
+ u32 desc_ctrl, bool rx_sof, bool rx_eof)
+{
+ int len;
+
+ if (unlikely(!rx_sof && !queue->skb)) {
+ netdev_err(bp->dev,
+ "Received non-starting frame while expecting a starting one\n");
+ return -1;
+ }
+
+ if (rx_eof) {
+ len = desc_ctrl & bp->rx_frm_len_mask;
+ } else {
+ len = bp->rx_buffer_size;
+ /* First frame on !RSC skips NET_IP_ALIGN */
+ if (rx_sof)
+ len -= bp->rx_ip_align;
+ }
+
+ if (rx_eof && !rx_sof) {
+ if (unlikely(queue->skb->len > len)) {
+ netdev_err(bp->dev, "Unexpected frame len: %d\n", len);
+ return -1;
+ }
+
+ len -= queue->skb->len;
+ }
+
+ return len;
+}
+
+static unsigned int gem_total_rx_buffer_size(struct macb *bp)
+{
+ return SKB_HEAD_ALIGN(bp->rx_buffer_size + NET_SKB_PAD);
+}
+
+static int gem_rx_refill(struct macb_queue *queue, bool napi)
{
- unsigned int entry;
- struct sk_buff *skb;
- dma_addr_t paddr;
struct macb *bp = queue->bp;
struct macb_dma_desc *desc;
+ unsigned int entry;
+ struct page *page;
+ dma_addr_t paddr;
+ gfp_t gfp_alloc;
int err = 0;
+ void *data;
+ int offset;
while (CIRC_SPACE(queue->rx_prepared_head, queue->rx_tail,
bp->rx_ring_size) > 0) {
@@ -1282,25 +1322,25 @@ static int gem_rx_refill(struct macb_queue *queue)
desc = macb_rx_desc(queue, entry);
if (!queue->rx_buff[entry]) {
- /* allocate sk_buff for this free entry in ring */
- skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
- if (unlikely(!skb)) {
+ gfp_alloc = napi ? GFP_ATOMIC : GFP_KERNEL;
+ page = page_pool_alloc_frag(queue->page_pool, &offset,
+ gem_total_rx_buffer_size(bp),
+ gfp_alloc | __GFP_NOWARN);
+ if (!page) {
netdev_err(bp->dev,
- "Unable to allocate sk_buff\n");
+ "Unable to allocate rx buffer\n");
err = -ENOMEM;
break;
}
- /* now fill corresponding descriptor entry */
- paddr = dma_map_single(&bp->pdev->dev, skb->data,
- bp->rx_buffer_size,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(&bp->pdev->dev, paddr)) {
- dev_kfree_skb(skb);
- break;
- }
+ paddr = page_pool_get_dma_addr(page) + NET_SKB_PAD + offset;
+
+ dma_sync_single_for_device(&bp->pdev->dev,
+ paddr, bp->rx_buffer_size,
+ page_pool_get_dma_dir(queue->page_pool));
- queue->rx_buff[entry] = skb;
+ data = page_address(page) + offset;
+ queue->rx_buff[entry] = data;
if (entry == bp->rx_ring_size - 1)
paddr |= MACB_BIT(RX_WRAP);
@@ -1310,20 +1350,6 @@ static int gem_rx_refill(struct macb_queue *queue)
*/
dma_wmb();
macb_set_addr(bp, desc, paddr);
-
- /* Properly align Ethernet header.
- *
- * Hardware can add dummy bytes if asked using the RBOF
- * field inside the NCFGR register. That feature isn't
- * available if hardware is RSC capable.
- *
- * We cannot fallback to doing the 2-byte shift before
- * DMA mapping because the address field does not allow
- * setting the low 2/3 bits.
- * It is 3 bits if HW_DMA_CAP_PTP, else 2 bits.
- */
- if (!(bp->caps & MACB_CAPS_RSC))
- skb_reserve(skb, NET_IP_ALIGN);
} else {
desc->ctrl = 0;
dma_wmb();
@@ -1364,17 +1390,21 @@ static void discard_partial_frame(struct macb_queue *queue, unsigned int begin,
static int gem_rx(struct macb_queue *queue, struct napi_struct *napi,
int budget)
{
+ struct skb_shared_info *shinfo;
struct macb *bp = queue->bp;
- unsigned int len;
- unsigned int entry;
- struct sk_buff *skb;
- struct macb_dma_desc *desc;
- int count = 0;
+ struct macb_dma_desc *desc;
+ unsigned int entry;
+ struct page *page;
+ void *buff_head;
+ int count = 0;
+ int data_len;
+ int nr_frags;
+
while (count < budget) {
- u32 ctrl;
+ bool rxused, first_frame, last_frame;
dma_addr_t addr;
- bool rxused;
+ u32 ctrl;
entry = macb_rx_ring_wrap(bp, queue->rx_tail);
desc = macb_rx_desc(queue, entry);
@@ -1396,58 +1426,121 @@ static int gem_rx(struct macb_queue *queue, struct napi_struct *napi,
queue->rx_tail++;
count++;
- if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) {
- netdev_err(bp->dev,
- "not whole frame pointed by descriptor\n");
- bp->dev->stats.rx_dropped++;
- queue->stats.rx_dropped++;
- break;
- }
- skb = queue->rx_buff[entry];
- if (unlikely(!skb)) {
+ buff_head = queue->rx_buff[entry];
+ if (unlikely(!buff_head)) {
netdev_err(bp->dev,
"inconsistent Rx descriptor chain\n");
bp->dev->stats.rx_dropped++;
queue->stats.rx_dropped++;
break;
}
- /* now everything is ready for receiving packet */
- queue->rx_buff[entry] = NULL;
- len = ctrl & bp->rx_frm_len_mask;
- netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len);
+ first_frame = ctrl & MACB_BIT(RX_SOF);
+ last_frame = ctrl & MACB_BIT(RX_EOF);
- skb_put(skb, len);
- dma_unmap_single(&bp->pdev->dev, addr,
- bp->rx_buffer_size, DMA_FROM_DEVICE);
+ data_len = gem_rx_data_len(bp, queue, ctrl, first_frame,
+ last_frame);
+ if (data_len < 0)
+ goto free_frags;
- skb->protocol = eth_type_trans(skb, bp->dev);
- skb_checksum_none_assert(skb);
- if (bp->dev->features & NETIF_F_RXCSUM &&
- !(bp->dev->flags & IFF_PROMISC) &&
- GEM_BFEXT(RX_CSUM, ctrl) & GEM_RX_CSUM_CHECKED_MASK)
- skb->ip_summed = CHECKSUM_UNNECESSARY;
+ addr += first_frame ? bp->rx_ip_align : 0;
- bp->dev->stats.rx_packets++;
- queue->stats.rx_packets++;
- bp->dev->stats.rx_bytes += skb->len;
- queue->stats.rx_bytes += skb->len;
+ dma_sync_single_for_cpu(&bp->pdev->dev, addr, data_len,
+ page_pool_get_dma_dir(queue->page_pool));
- gem_ptp_do_rxstamp(bp, skb, desc);
+ if (first_frame) {
+ if (unlikely(queue->skb)) {
+ netdev_warn(bp->dev, "Previous packet incomplete\n");
+ dev_kfree_skb(queue->skb);
+ bp->dev->stats.rx_dropped++;
+ queue->stats.rx_dropped++;
+ }
+
+ queue->skb = napi_build_skb(buff_head, gem_total_rx_buffer_size(bp));
+ if (unlikely(!queue->skb)) {
+ netdev_err(bp->dev,
+ "Unable to allocate sk_buff\n");
+ goto free_frags;
+ }
+ /* Properly align Ethernet header.
+ *
+ * Hardware can add dummy bytes if asked using the RBOF
+ * field inside the NCFGR register. That feature isn't
+ * available if hardware is RSC capable.
+ *
+ * We cannot fallback to doing the 2-byte shift before
+ * DMA mapping because the address field does not allow
+ * setting the low 2/3 bits.
+ * It is 3 bits if HW_DMA_CAP_PTP, else 2 bits.
+ */
+ skb_reserve(queue->skb, bp->rx_headroom);
+ skb_mark_for_recycle(queue->skb);
+ skb_put(queue->skb, data_len);
+ } else {
+ shinfo = skb_shinfo(queue->skb);
+ page = virt_to_head_page(buff_head);
+ nr_frags = shinfo->nr_frags;
+
+ if (unlikely(nr_frags >= ARRAY_SIZE(shinfo->frags)))
+ goto free_frags;
+
+ skb_add_rx_frag(queue->skb, nr_frags, page,
+ buff_head - page_address(page) + NET_SKB_PAD,
+ data_len, gem_total_rx_buffer_size(bp));
+ }
+
+ /* now everything is ready for receiving packet */
+ queue->rx_buff[entry] = NULL;
+
+ netdev_vdbg(bp->dev, "%s %u (len %u)\n", __func__, entry, data_len);
+
+ if (last_frame) {
+ bp->dev->stats.rx_packets++;
+ queue->stats.rx_packets++;
+ bp->dev->stats.rx_bytes += queue->skb->len;
+ queue->stats.rx_bytes += queue->skb->len;
+
+ queue->skb->protocol = eth_type_trans(queue->skb, bp->dev);
+ skb_checksum_none_assert(queue->skb);
+ if (bp->dev->features & NETIF_F_RXCSUM &&
+ !(bp->dev->flags & IFF_PROMISC) &&
+ GEM_BFEXT(RX_CSUM, ctrl) & GEM_RX_CSUM_CHECKED_MASK)
+ queue->skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ gem_ptp_do_rxstamp(bp, queue->skb, desc);
#if defined(DEBUG) && defined(VERBOSE_DEBUG)
- netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
- skb->len, skb->csum);
- print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1,
- skb_mac_header(skb), 16, true);
- print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1,
- skb->data, 32, true);
+ netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
+ queue->skb->len, queue->skb->csum);
+ print_hex_dump_debug(" mac: ", DUMP_PREFIX_ADDRESS, 16, 1,
+ skb_mac_header(queue->skb), 16, true);
+ print_hex_dump_debug("data: ", DUMP_PREFIX_ADDRESS, 16, 1,
+ queue->skb->data, 32, true);
#endif
- napi_gro_receive(napi, skb);
+ napi_gro_receive(napi, queue->skb);
+ queue->skb = NULL;
+ }
+
+ continue;
+
+free_frags:
+ if (queue->skb) {
+ dev_kfree_skb(queue->skb);
+ queue->skb = NULL;
+ }
+
+ if (buff_head)
+ page_pool_put_full_page(queue->page_pool,
+ virt_to_head_page(buff_head),
+ false);
+
+ bp->dev->stats.rx_dropped++;
+ queue->stats.rx_dropped++;
+ queue->rx_buff[entry] = NULL;
}
- gem_rx_refill(queue);
+ gem_rx_refill(queue, true);
return count;
}
@@ -2381,12 +2474,22 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
return ret;
}
-static void macb_init_rx_buffer_size(struct macb *bp, size_t size)
+static void macb_init_rx_buffer_size(struct macb *bp, unsigned int mtu)
{
+ unsigned int overhead;
+ size_t size;
+
if (!macb_is_gem(bp)) {
bp->rx_buffer_size = MACB_RX_BUFFER_SIZE;
} else {
- bp->rx_buffer_size = size;
+ size = mtu + ETH_HLEN + ETH_FCS_LEN;
+ bp->rx_buffer_size = SKB_DATA_ALIGN(size + bp->rx_ip_align);
+ if (gem_total_rx_buffer_size(bp) > PAGE_SIZE) {
+ overhead = bp->rx_headroom +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ bp->rx_buffer_size = rounddown(PAGE_SIZE - overhead,
+ RX_BUFFER_MULTIPLE);
+ }
if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) {
netdev_dbg(bp->dev,
@@ -2397,17 +2500,16 @@ static void macb_init_rx_buffer_size(struct macb *bp, size_t size)
}
}
- netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%zu]\n",
- bp->dev->mtu, bp->rx_buffer_size);
+ netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%zu] rx_headroom [%zu] total [%u]\n",
+ bp->dev->mtu, bp->rx_buffer_size, bp->rx_headroom,
+ gem_total_rx_buffer_size(bp));
}
static void gem_free_rx_buffers(struct macb *bp)
{
- struct sk_buff *skb;
- struct macb_dma_desc *desc;
struct macb_queue *queue;
- dma_addr_t addr;
unsigned int q;
+ void *data;
int i;
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
@@ -2415,22 +2517,25 @@ static void gem_free_rx_buffers(struct macb *bp)
continue;
for (i = 0; i < bp->rx_ring_size; i++) {
- skb = queue->rx_buff[i];
-
- if (!skb)
+ data = queue->rx_buff[i];
+ if (!data)
continue;
- desc = macb_rx_desc(queue, i);
- addr = macb_get_addr(bp, desc);
+ page_pool_put_full_page(queue->page_pool,
+ virt_to_head_page(data),
+ false);
+ queue->rx_buff[i] = NULL;
+ }
- dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
- DMA_FROM_DEVICE);
- dev_kfree_skb_any(skb);
- skb = NULL;
+ if (queue->skb) {
+ dev_kfree_skb(queue->skb);
+ queue->skb = NULL;
}
kfree(queue->rx_buff);
queue->rx_buff = NULL;
+ page_pool_destroy(queue->page_pool);
+ queue->page_pool = NULL;
}
}
@@ -2492,13 +2597,12 @@ static int gem_alloc_rx_buffers(struct macb *bp)
int size;
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
- size = bp->rx_ring_size * sizeof(struct sk_buff *);
+ size = bp->rx_ring_size * sizeof(*queue->rx_buff);
queue->rx_buff = kzalloc(size, GFP_KERNEL);
if (!queue->rx_buff)
return -ENOMEM;
else
- netdev_dbg(bp->dev,
- "Allocated %d RX buff entries at %p\n",
+ netdev_dbg(bp->dev, "Allocated %d RX buff entries at %p\n",
bp->rx_ring_size, queue->rx_buff);
}
return 0;
@@ -2586,6 +2690,40 @@ static int macb_alloc_consistent(struct macb *bp)
return -ENOMEM;
}
+static int gem_create_page_pool(struct macb_queue *queue)
+{
+ struct page_pool_params pp_params = {
+ .order = 0,
+ .flags = PP_FLAG_DMA_MAP,
+ .pool_size = queue->bp->rx_ring_size,
+ .nid = NUMA_NO_NODE,
+ .dma_dir = DMA_FROM_DEVICE,
+ .dev = &queue->bp->pdev->dev,
+ .netdev = queue->bp->dev,
+ .napi = &queue->napi_rx,
+ .max_len = PAGE_SIZE,
+ };
+ struct page_pool *pool;
+ int err = 0;
+
+ /* This can happen in the case of HRESP error.
+ * Do nothing as page pool is already existing.
+ */
+ if (queue->page_pool)
+ return err;
+
+ pool = page_pool_create(&pp_params);
+ if (IS_ERR(pool)) {
+ netdev_err(queue->bp->dev, "cannot create rx page pool\n");
+ err = PTR_ERR(pool);
+ pool = NULL;
+ }
+
+ queue->page_pool = pool;
+
+ return err;
+}
+
static void macb_init_tieoff(struct macb *bp)
{
struct macb_dma_desc *desc = bp->rx_ring_tieoff;
@@ -2621,12 +2759,19 @@ static int gem_init_rings(struct macb *bp, bool fail_early)
queue->rx_tail = 0;
queue->rx_prepared_head = 0;
+ /* This is a hard failure. In case of HRESP error
+ * recovery we always reuse the existing page pool.
+ */
+ last_err = gem_create_page_pool(queue);
+ if (last_err)
+ break;
+
/* We get called in two cases:
* - open: we can propagate alloc errors (so fail early),
* - HRESP error: cannot propagate, we attempt to reinit
* all queues in case of failure.
*/
- err = gem_rx_refill(queue);
+ err = gem_rx_refill(queue, false);
if (err) {
last_err = err;
if (fail_early)
@@ -2770,39 +2915,40 @@ static void macb_configure_dma(struct macb *bp)
unsigned int q;
u32 dmacfg;
- buffer_size = bp->rx_buffer_size / RX_BUFFER_MULTIPLE;
- if (macb_is_gem(bp)) {
- dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
- for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
- if (q)
- queue_writel(queue, RBQS, buffer_size);
- else
- dmacfg |= GEM_BF(RXBS, buffer_size);
- }
- if (bp->dma_burst_length)
- dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg);
- dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
- dmacfg &= ~GEM_BIT(ENDIA_PKT);
+ if (!macb_is_gem((bp)))
+ return;
- if (bp->native_io)
- dmacfg &= ~GEM_BIT(ENDIA_DESC);
+ buffer_size = bp->rx_buffer_size / RX_BUFFER_MULTIPLE;
+ dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ if (q)
+ queue_writel(queue, RBQS, buffer_size);
else
- dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */
+ dmacfg |= GEM_BF(RXBS, buffer_size);
+ }
+ if (bp->dma_burst_length)
+ dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg);
+ dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
+ dmacfg &= ~GEM_BIT(ENDIA_PKT);
- if (bp->dev->features & NETIF_F_HW_CSUM)
- dmacfg |= GEM_BIT(TXCOEN);
- else
- dmacfg &= ~GEM_BIT(TXCOEN);
+ if (bp->native_io)
+ dmacfg &= ~GEM_BIT(ENDIA_DESC);
+ else
+ dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */
- dmacfg &= ~GEM_BIT(ADDR64);
- if (macb_dma64(bp))
- dmacfg |= GEM_BIT(ADDR64);
- if (macb_dma_ptp(bp))
- dmacfg |= GEM_BIT(RXEXT) | GEM_BIT(TXEXT);
- netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n",
- dmacfg);
- gem_writel(bp, DMACFG, dmacfg);
- }
+ if (bp->dev->features & NETIF_F_HW_CSUM)
+ dmacfg |= GEM_BIT(TXCOEN);
+ else
+ dmacfg &= ~GEM_BIT(TXCOEN);
+
+ dmacfg &= ~GEM_BIT(ADDR64);
+ if (macb_dma64(bp))
+ dmacfg |= GEM_BIT(ADDR64);
+ if (macb_dma_ptp(bp))
+ dmacfg |= GEM_BIT(RXEXT) | GEM_BIT(TXEXT);
+ netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n",
+ dmacfg);
+ gem_writel(bp, DMACFG, dmacfg);
}
static void macb_init_hw(struct macb *bp)
@@ -2965,7 +3111,6 @@ static void macb_set_rx_mode(struct net_device *dev)
static int macb_open(struct net_device *dev)
{
- size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN;
struct macb *bp = netdev_priv(dev);
struct macb_queue *queue;
unsigned int q;
@@ -2978,7 +3123,7 @@ static int macb_open(struct net_device *dev)
return err;
/* RX buffers initialization */
- macb_init_rx_buffer_size(bp, bufsz);
+ macb_init_rx_buffer_size(bp, dev->mtu);
err = macb_alloc_consistent(bp);
if (err) {
@@ -5643,6 +5788,14 @@ static int macb_probe(struct platform_device *pdev)
if (err)
goto err_out_phy_exit;
+ if (macb_is_gem(bp)) {
+ bp->rx_headroom = NET_SKB_PAD;
+ if (!(bp->caps & MACB_CAPS_RSC)) {
+ bp->rx_ip_align = NET_IP_ALIGN;
+ bp->rx_headroom += NET_IP_ALIGN;
+ }
+ }
+
netif_carrier_off(dev);
err = register_netdev(dev);
--
2.52.0
next prev parent reply other threads:[~2026-03-02 11:52 UTC|newest]
Thread overview: 11+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-03-02 11:52 [PATCH net-next v3 0/8] net: macb: Add XDP support and page pool integration Paolo Valerio
2026-03-02 11:52 ` [PATCH net-next v3 1/8] net: macb: move Rx buffers alloc from link up to open Paolo Valerio
2026-03-02 11:52 ` [PATCH net-next v3 2/8] net: macb: rename rx_skbuff into rx_buff Paolo Valerio
2026-03-02 11:52 ` Paolo Valerio [this message]
2026-03-05 10:53 ` [PATCH net-next v3 3/8] net: macb: Add page pool support handle multi-descriptor frame rx Paolo Abeni
2026-03-02 11:52 ` [PATCH net-next v3 4/8] net: macb: use the current queue number for stats Paolo Valerio
2026-03-02 11:52 ` [PATCH net-next v3 5/8] net: macb: make macb_tx_skb generic Paolo Valerio
2026-03-02 11:52 ` [PATCH net-next v3 6/8] net: macb: generalize tx buffer handling Paolo Valerio
2026-03-02 11:52 ` [PATCH net-next v3 7/8] net: macb: add XDP support for gem Paolo Valerio
2026-03-05 11:01 ` [net-next,v3,7/8] " Paolo Abeni
2026-03-02 11:52 ` [PATCH net-next v3 8/8] net: macb: introduce ndo_xdp_xmit support Paolo Valerio
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260302115232.1430640-4-pvalerio@redhat.com \
--to=pvalerio@redhat.com \
--cc=andrew+netdev@lunn.ch \
--cc=claudiu.beznea@tuxon.dev \
--cc=davem@davemloft.net \
--cc=edumazet@google.com \
--cc=kuba@kernel.org \
--cc=lorenzo@kernel.org \
--cc=netdev@vger.kernel.org \
--cc=nicolas.ferre@microchip.com \
--cc=pabeni@redhat.com \
--cc=theo.lebrun@bootlin.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.