From: Paolo Valerio <pvalerio@redhat.com>
To: netdev@vger.kernel.org
Cc: "Nicolas Ferre" <nicolas.ferre@microchip.com>,
"Claudiu Beznea" <claudiu.beznea@tuxon.dev>,
"Andrew Lunn" <andrew+netdev@lunn.ch>,
"David S. Miller" <davem@davemloft.net>,
"Eric Dumazet" <edumazet@google.com>,
"Jakub Kicinski" <kuba@kernel.org>,
"Paolo Abeni" <pabeni@redhat.com>,
"Lorenzo Bianconi" <lorenzo@kernel.org>,
"Théo Lebrun" <theo.lebrun@bootlin.com>
Subject: [PATCH net-next v3 5/8] net: macb: make macb_tx_skb generic
Date: Mon, 2 Mar 2026 12:52:29 +0100 [thread overview]
Message-ID: <20260302115232.1430640-6-pvalerio@redhat.com> (raw)
In-Reply-To: <20260302115232.1430640-1-pvalerio@redhat.com>
The macb_tx_skb structure is renamed to macb_tx_buff with
no functional changes.
This is a preparatory step for adding xdp xmit support.
Signed-off-by: Paolo Valerio <pvalerio@redhat.com>
---
drivers/net/ethernet/cadence/macb.h | 8 +-
drivers/net/ethernet/cadence/macb_main.c | 96 ++++++++++++------------
2 files changed, 52 insertions(+), 52 deletions(-)
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index a78ad00f53b1..7228902ae20c 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -958,7 +958,7 @@ struct macb_dma_desc_ptp {
/* Scaled PPM fraction */
#define PPM_FRACTION 16
-/* struct macb_tx_skb - data about an skb which is being transmitted
+/* struct macb_tx_buff - data about an skb which is being transmitted
* @skb: skb currently being transmitted, only set for the last buffer
* of the frame
* @mapping: DMA address of the skb's fragment buffer
@@ -966,7 +966,7 @@ struct macb_dma_desc_ptp {
* @mapped_as_page: true when buffer was mapped with skb_frag_dma_map(),
* false when buffer was mapped with dma_map_single()
*/
-struct macb_tx_skb {
+struct macb_tx_buff {
struct sk_buff *skb;
dma_addr_t mapping;
size_t size;
@@ -1252,7 +1252,7 @@ struct macb_queue {
spinlock_t tx_ptr_lock;
unsigned int tx_head, tx_tail;
struct macb_dma_desc *tx_ring;
- struct macb_tx_skb *tx_skb;
+ struct macb_tx_buff *tx_buff;
dma_addr_t tx_ring_dma;
struct work_struct tx_error_task;
bool txubr_pending;
@@ -1330,7 +1330,7 @@ struct macb {
phy_interface_t phy_interface;
/* AT91RM9200 transmit queue (1 on wire + 1 queued) */
- struct macb_tx_skb rm9200_txq[2];
+ struct macb_tx_buff rm9200_txq[2];
unsigned int max_tx_length;
u64 ethtool_stats[GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES];
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index b8b2b0111b49..2fedbea841ff 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -156,10 +156,10 @@ static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue,
return &queue->tx_ring[index];
}
-static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue,
- unsigned int index)
+static struct macb_tx_buff *macb_tx_buff(struct macb_queue *queue,
+ unsigned int index)
{
- return &queue->tx_skb[macb_tx_ring_wrap(queue->bp, index)];
+ return &queue->tx_buff[macb_tx_ring_wrap(queue->bp, index)];
}
static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index)
@@ -982,21 +982,21 @@ static int macb_halt_tx(struct macb *bp)
bp, TSR);
}
-static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb, int budget)
+static void macb_tx_unmap(struct macb *bp, struct macb_tx_buff *tx_buff, int budget)
{
- if (tx_skb->mapping) {
- if (tx_skb->mapped_as_page)
- dma_unmap_page(&bp->pdev->dev, tx_skb->mapping,
- tx_skb->size, DMA_TO_DEVICE);
+ if (tx_buff->mapping) {
+ if (tx_buff->mapped_as_page)
+ dma_unmap_page(&bp->pdev->dev, tx_buff->mapping,
+ tx_buff->size, DMA_TO_DEVICE);
else
- dma_unmap_single(&bp->pdev->dev, tx_skb->mapping,
- tx_skb->size, DMA_TO_DEVICE);
- tx_skb->mapping = 0;
+ dma_unmap_single(&bp->pdev->dev, tx_buff->mapping,
+ tx_buff->size, DMA_TO_DEVICE);
+ tx_buff->mapping = 0;
}
- if (tx_skb->skb) {
- napi_consume_skb(tx_skb->skb, budget);
- tx_skb->skb = NULL;
+ if (tx_buff->skb) {
+ napi_consume_skb(tx_buff->skb, budget);
+ tx_buff->skb = NULL;
}
}
@@ -1042,7 +1042,7 @@ static void macb_tx_error_task(struct work_struct *work)
u32 queue_index;
u32 packets = 0;
u32 bytes = 0;
- struct macb_tx_skb *tx_skb;
+ struct macb_tx_buff *tx_buff;
struct macb_dma_desc *desc;
struct sk_buff *skb;
unsigned int tail;
@@ -1082,16 +1082,16 @@ static void macb_tx_error_task(struct work_struct *work)
desc = macb_tx_desc(queue, tail);
ctrl = desc->ctrl;
- tx_skb = macb_tx_skb(queue, tail);
- skb = tx_skb->skb;
+ tx_buff = macb_tx_buff(queue, tail);
+ skb = tx_buff->skb;
if (ctrl & MACB_BIT(TX_USED)) {
/* skb is set for the last buffer of the frame */
while (!skb) {
- macb_tx_unmap(bp, tx_skb, 0);
+ macb_tx_unmap(bp, tx_buff, 0);
tail++;
- tx_skb = macb_tx_skb(queue, tail);
- skb = tx_skb->skb;
+ tx_buff = macb_tx_buff(queue, tail);
+ skb = tx_buff->skb;
}
/* ctrl still refers to the first buffer descriptor
@@ -1120,7 +1120,7 @@ static void macb_tx_error_task(struct work_struct *work)
desc->ctrl = ctrl | MACB_BIT(TX_USED);
}
- macb_tx_unmap(bp, tx_skb, 0);
+ macb_tx_unmap(bp, tx_buff, 0);
}
netdev_tx_completed_queue(netdev_get_tx_queue(bp->dev, queue_index),
@@ -1198,7 +1198,7 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
spin_lock_irqsave(&queue->tx_ptr_lock, flags);
head = queue->tx_head;
for (tail = queue->tx_tail; tail != head && packets < budget; tail++) {
- struct macb_tx_skb *tx_skb;
+ struct macb_tx_buff *tx_buff;
struct sk_buff *skb;
struct macb_dma_desc *desc;
u32 ctrl;
@@ -1218,8 +1218,8 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
/* Process all buffers of the current transmitted frame */
for (;; tail++) {
- tx_skb = macb_tx_skb(queue, tail);
- skb = tx_skb->skb;
+ tx_buff = macb_tx_buff(queue, tail);
+ skb = tx_buff->skb;
/* First, update TX stats if needed */
if (skb) {
@@ -1239,7 +1239,7 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
}
/* Now we can safely release resources */
- macb_tx_unmap(bp, tx_skb, budget);
+ macb_tx_unmap(bp, tx_buff, budget);
/* skb is set only for the last buffer of the frame.
* WARNING: at this point skb has been freed by
@@ -2107,8 +2107,8 @@ static unsigned int macb_tx_map(struct macb *bp,
unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags;
unsigned int len, i, tx_head = queue->tx_head;
u32 ctrl, lso_ctrl = 0, seq_ctrl = 0;
+ struct macb_tx_buff *tx_buff = NULL;
unsigned int eof = 1, mss_mfs = 0;
- struct macb_tx_skb *tx_skb = NULL;
struct macb_dma_desc *desc;
unsigned int offset, size;
dma_addr_t mapping;
@@ -2131,7 +2131,7 @@ static unsigned int macb_tx_map(struct macb *bp,
offset = 0;
while (len) {
- tx_skb = macb_tx_skb(queue, tx_head);
+ tx_buff = macb_tx_buff(queue, tx_head);
mapping = dma_map_single(&bp->pdev->dev,
skb->data + offset,
@@ -2140,10 +2140,10 @@ static unsigned int macb_tx_map(struct macb *bp,
goto dma_error;
/* Save info to properly release resources */
- tx_skb->skb = NULL;
- tx_skb->mapping = mapping;
- tx_skb->size = size;
- tx_skb->mapped_as_page = false;
+ tx_buff->skb = NULL;
+ tx_buff->mapping = mapping;
+ tx_buff->size = size;
+ tx_buff->mapped_as_page = false;
len -= size;
offset += size;
@@ -2160,7 +2160,7 @@ static unsigned int macb_tx_map(struct macb *bp,
offset = 0;
while (len) {
size = umin(len, bp->max_tx_length);
- tx_skb = macb_tx_skb(queue, tx_head);
+ tx_buff = macb_tx_buff(queue, tx_head);
mapping = skb_frag_dma_map(&bp->pdev->dev, frag,
offset, size, DMA_TO_DEVICE);
@@ -2168,10 +2168,10 @@ static unsigned int macb_tx_map(struct macb *bp,
goto dma_error;
/* Save info to properly release resources */
- tx_skb->skb = NULL;
- tx_skb->mapping = mapping;
- tx_skb->size = size;
- tx_skb->mapped_as_page = true;
+ tx_buff->skb = NULL;
+ tx_buff->mapping = mapping;
+ tx_buff->size = size;
+ tx_buff->mapped_as_page = true;
len -= size;
offset += size;
@@ -2180,13 +2180,13 @@ static unsigned int macb_tx_map(struct macb *bp,
}
/* Should never happen */
- if (unlikely(!tx_skb)) {
+ if (unlikely(!tx_buff)) {
netdev_err(bp->dev, "BUG! empty skb!\n");
return 0;
}
/* This is the last buffer of the frame: save socket buffer */
- tx_skb->skb = skb;
+ tx_buff->skb = skb;
/* Update TX ring: update buffer descriptors in reverse order
* to avoid race condition
@@ -2217,10 +2217,10 @@ static unsigned int macb_tx_map(struct macb *bp,
do {
i--;
- tx_skb = macb_tx_skb(queue, i);
+ tx_buff = macb_tx_buff(queue, i);
desc = macb_tx_desc(queue, i);
- ctrl = (u32)tx_skb->size;
+ ctrl = (u32)tx_buff->size;
if (eof) {
ctrl |= MACB_BIT(TX_LAST);
eof = 0;
@@ -2243,7 +2243,7 @@ static unsigned int macb_tx_map(struct macb *bp,
ctrl |= MACB_BF(MSS_MFS, mss_mfs);
/* Set TX buffer descriptor */
- macb_set_addr(bp, desc, tx_skb->mapping);
+ macb_set_addr(bp, desc, tx_buff->mapping);
/* desc->addr must be visible to hardware before clearing
* 'TX_USED' bit in desc->ctrl.
*/
@@ -2259,9 +2259,9 @@ static unsigned int macb_tx_map(struct macb *bp,
netdev_err(bp->dev, "TX DMA map failed\n");
for (i = queue->tx_head; i != tx_head; i++) {
- tx_skb = macb_tx_skb(queue, i);
+ tx_buff = macb_tx_buff(queue, i);
- macb_tx_unmap(bp, tx_skb, 0);
+ macb_tx_unmap(bp, tx_buff, 0);
}
return -ENOMEM;
@@ -2583,8 +2583,8 @@ static void macb_free_consistent(struct macb *bp)
dma_free_coherent(dev, size, bp->queues[0].rx_ring, bp->queues[0].rx_ring_dma);
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
- kfree(queue->tx_skb);
- queue->tx_skb = NULL;
+ kfree(queue->tx_buff);
+ queue->tx_buff = NULL;
queue->tx_ring = NULL;
queue->rx_ring = NULL;
}
@@ -2662,9 +2662,9 @@ static int macb_alloc_consistent(struct macb *bp)
queue->rx_ring = rx + macb_rx_ring_size_per_queue(bp) * q;
queue->rx_ring_dma = rx_dma + macb_rx_ring_size_per_queue(bp) * q;
- size = bp->tx_ring_size * sizeof(struct macb_tx_skb);
- queue->tx_skb = kmalloc(size, GFP_KERNEL);
- if (!queue->tx_skb)
+ size = bp->tx_ring_size * sizeof(struct macb_tx_buff);
+ queue->tx_buff = kmalloc(size, GFP_KERNEL);
+ if (!queue->tx_buff)
goto out_err;
}
--
2.52.0
next prev parent reply other threads:[~2026-03-02 11:52 UTC|newest]
Thread overview: 11+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-03-02 11:52 [PATCH net-next v3 0/8] net: macb: Add XDP support and page pool integration Paolo Valerio
2026-03-02 11:52 ` [PATCH net-next v3 1/8] net: macb: move Rx buffers alloc from link up to open Paolo Valerio
2026-03-02 11:52 ` [PATCH net-next v3 2/8] net: macb: rename rx_skbuff into rx_buff Paolo Valerio
2026-03-02 11:52 ` [PATCH net-next v3 3/8] net: macb: Add page pool support handle multi-descriptor frame rx Paolo Valerio
2026-03-05 10:53 ` Paolo Abeni
2026-03-02 11:52 ` [PATCH net-next v3 4/8] net: macb: use the current queue number for stats Paolo Valerio
2026-03-02 11:52 ` Paolo Valerio [this message]
2026-03-02 11:52 ` [PATCH net-next v3 6/8] net: macb: generalize tx buffer handling Paolo Valerio
2026-03-02 11:52 ` [PATCH net-next v3 7/8] net: macb: add XDP support for gem Paolo Valerio
2026-03-05 11:01 ` [net-next,v3,7/8] " Paolo Abeni
2026-03-02 11:52 ` [PATCH net-next v3 8/8] net: macb: introduce ndo_xdp_xmit support Paolo Valerio
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260302115232.1430640-6-pvalerio@redhat.com \
--to=pvalerio@redhat.com \
--cc=andrew+netdev@lunn.ch \
--cc=claudiu.beznea@tuxon.dev \
--cc=davem@davemloft.net \
--cc=edumazet@google.com \
--cc=kuba@kernel.org \
--cc=lorenzo@kernel.org \
--cc=netdev@vger.kernel.org \
--cc=nicolas.ferre@microchip.com \
--cc=pabeni@redhat.com \
--cc=theo.lebrun@bootlin.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.