From: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
To: netdev@vger.kernel.org
Cc: Andrew Lunn <andrew+netdev@lunn.ch>,
"David S. Miller" <davem@davemloft.net>,
Eric Dumazet <edumazet@google.com>,
Jakub Kicinski <kuba@kernel.org>, Paolo Abeni <pabeni@redhat.com>,
Xuan Zhuo <xuanzhuo@linux.alibaba.com>,
Wen Gu <guwen@linux.alibaba.com>,
Philo Lu <lulie@linux.alibaba.com>,
Vadim Fedorenko <vadim.fedorenko@linux.dev>,
Dong Yibo <dong100@mucse.com>,
Ethan Nelson-Moore <enelsonmoore@gmail.com>,
Heiner Kallweit <hkallweit1@gmail.com>,
Vivian Wang <wangruikang@iscas.ac.cn>,
Dust Li <dust.li@linux.alibaba.com>
Subject: [PATCH net-next v35 6/8] eea: implement packet transmit logic
Date: Mon, 23 Mar 2026 15:44:39 +0800 [thread overview]
Message-ID: <20260323074441.91691-7-xuanzhuo@linux.alibaba.com> (raw)
In-Reply-To: <20260323074441.91691-1-xuanzhuo@linux.alibaba.com>
Implement the core logic for transmitting packets in the EEA TX path,
including packet preparation and submission to the underlying transport.
Reviewed-by: Dust Li <dust.li@linux.alibaba.com>
Reviewed-by: Philo Lu <lulie@linux.alibaba.com>
Signed-off-by: Wen Gu <guwen@linux.alibaba.com>
Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
---
drivers/net/ethernet/alibaba/eea/eea_net.c | 8 +
drivers/net/ethernet/alibaba/eea/eea_tx.c | 283 ++++++++++++++++++++-
2 files changed, 288 insertions(+), 3 deletions(-)
diff --git a/drivers/net/ethernet/alibaba/eea/eea_net.c b/drivers/net/ethernet/alibaba/eea/eea_net.c
index 9f9ea0571a43..2217d53ae541 100644
--- a/drivers/net/ethernet/alibaba/eea/eea_net.c
+++ b/drivers/net/ethernet/alibaba/eea/eea_net.c
@@ -681,6 +681,12 @@ int eea_net_probe(struct eea_device *edev)
if (err)
goto err_reset_dev;
+ netif_carrier_off(enet->netdev);
+
+ err = register_netdev(enet->netdev);
+ if (err)
+ goto err_reset_dev;
+
eea_update_ts_off(edev, enet);
netdev_dbg(enet->netdev, "eea probe success.\n");
@@ -732,6 +738,8 @@ void eea_net_remove(struct eea_device *edev, bool ha)
return;
}
+ unregister_netdev(netdev);
+
if (!enet->wait_pci_ready) {
eea_device_reset(edev);
eea_destroy_adminq(enet);
diff --git a/drivers/net/ethernet/alibaba/eea/eea_tx.c b/drivers/net/ethernet/alibaba/eea/eea_tx.c
index d247e658385b..06338af8e3fa 100644
--- a/drivers/net/ethernet/alibaba/eea/eea_tx.c
+++ b/drivers/net/ethernet/alibaba/eea/eea_tx.c
@@ -11,6 +11,11 @@
#include "eea_pci.h"
#include "eea_ring.h"
+struct eea_sq_free_stats {
+ u64 packets;
+ u64 bytes;
+};
+
struct eea_tx_meta {
struct eea_tx_meta *next;
@@ -28,21 +33,293 @@ struct eea_tx_meta {
u16 dma_len;
};
+static struct eea_tx_meta *eea_tx_meta_get(struct eea_net_tx *tx)
+{
+ struct eea_tx_meta *meta;
+
+ if (!tx->free)
+ return NULL;
+
+ meta = tx->free;
+ tx->free = meta->next;
+
+ return meta;
+}
+
+static void eea_tx_meta_put_and_unmap(struct eea_net_tx *tx,
+ struct eea_tx_meta *meta)
+{
+ struct eea_tx_meta *head;
+
+ head = meta;
+
+ while (true) {
+ if (head == meta)
+ dma_unmap_single(tx->dma_dev, meta->dma_addr,
+ meta->dma_len, DMA_TO_DEVICE);
+ else
+ dma_unmap_page(tx->dma_dev, meta->dma_addr,
+ meta->dma_len, DMA_TO_DEVICE);
+
+ if (meta->next) {
+ meta = meta->next;
+ continue;
+ }
+
+ break;
+ }
+
+ meta->next = tx->free;
+ tx->free = head;
+}
+
+static void eea_meta_free_xmit(struct eea_net_tx *tx,
+ struct eea_tx_meta *meta,
+ int budget,
+ struct eea_tx_cdesc *desc,
+ struct eea_sq_free_stats *stats)
+{
+ struct sk_buff *skb = meta->skb;
+
+ if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && desc)) {
+ struct skb_shared_hwtstamps ts = {};
+
+ ts.hwtstamp = EEA_DESC_TS(desc) + tx->enet->hw_ts_offset;
+ skb_tstamp_tx(skb, &ts);
+ }
+
+ ++stats->packets;
+ stats->bytes += meta->skb->len;
+ napi_consume_skb(meta->skb, budget);
+
+ meta->data = NULL;
+}
+
+static u32 eea_clean_tx(struct eea_net_tx *tx, int budget)
+{
+ struct eea_sq_free_stats stats = {0};
+ struct eea_tx_cdesc *desc;
+ struct eea_tx_meta *meta;
+ int desc_n;
+ u16 id;
+
+ while ((desc = ering_cq_get_desc(tx->ering))) {
+ id = le16_to_cpu(desc->id);
+ if (unlikely(id >= tx->ering->num)) {
+ netdev_err(tx->enet->netdev, "tx invalid id %d\n", id);
+ ering_cq_ack_desc(tx->ering, 1);
+ continue;
+ }
+
+ meta = &tx->meta[id];
+
+ if (meta->data) {
+ eea_tx_meta_put_and_unmap(tx, meta);
+ eea_meta_free_xmit(tx, meta, budget, desc, &stats);
+ desc_n = meta->num;
+ } else {
+ netdev_err(tx->enet->netdev,
+ "tx meta->data is null. id %d num: %d\n",
+ meta->id, meta->num);
+ desc_n = 1;
+ }
+
+ ering_cq_ack_desc(tx->ering, desc_n);
+ }
+
+ return stats.packets;
+}
+
int eea_poll_tx(struct eea_net_tx *tx, int budget)
{
- /* Empty function; will be implemented in a subsequent commit. */
+ struct eea_net *enet = tx->enet;
+ u32 index = tx - enet->tx;
+ struct netdev_queue *txq;
+
+ txq = netdev_get_tx_queue(enet->netdev, index);
+
+ __netif_tx_lock(txq, smp_processor_id());
+
+ eea_clean_tx(tx, budget);
+
+ if (netif_tx_queue_stopped(txq) &&
+ tx->ering->num_free >= MAX_SKB_FRAGS + 1)
+ netif_tx_wake_queue(txq);
+
+ __netif_tx_unlock(txq);
+
+ return 0;
+}
+
+static int eea_fill_desc_from_skb(const struct sk_buff *skb,
+ struct eea_tx_desc *desc)
+{
+ if (skb_is_gso(skb)) {
+ struct skb_shared_info *sinfo = skb_shinfo(skb);
+
+ desc->gso_size = cpu_to_le16(sinfo->gso_size);
+ if (sinfo->gso_type & SKB_GSO_TCPV4)
+ desc->gso_type = EEA_TX_GSO_TCPV4;
+
+ else if (sinfo->gso_type & SKB_GSO_TCPV6)
+ desc->gso_type = EEA_TX_GSO_TCPV6;
+
+ else if (sinfo->gso_type & SKB_GSO_UDP_L4)
+ desc->gso_type = EEA_TX_GSO_UDP_L4;
+
+ else
+ return -EINVAL;
+
+ if (sinfo->gso_type & SKB_GSO_TCP_ECN)
+ desc->gso_type |= EEA_TX_GSO_ECN;
+ } else {
+ desc->gso_type = EEA_TX_GSO_NONE;
+ }
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ desc->csum_start = cpu_to_le16(skb_checksum_start_offset(skb));
+ desc->csum_offset = cpu_to_le16(skb->csum_offset);
+ }
+
+ return 0;
+}
+
+static struct eea_tx_meta *eea_tx_desc_fill(struct eea_net_tx *tx,
+ dma_addr_t addr, u32 len,
+ bool is_last, void *data, u16 flags)
+{
+ struct eea_tx_meta *meta;
+ struct eea_tx_desc *desc;
+
+ meta = eea_tx_meta_get(tx);
+
+ desc = ering_sq_alloc_desc(tx->ering, meta->id, is_last, flags);
+ desc->addr = cpu_to_le64(addr);
+ desc->len = cpu_to_le16(len);
+
+ meta->next = NULL;
+ meta->dma_len = len;
+ meta->dma_addr = addr;
+ meta->data = data;
+ meta->num = 1;
+ meta->desc = desc;
+
+ return meta;
+}
+
+static int eea_tx_add_skb_frag(struct eea_net_tx *tx,
+ struct eea_tx_meta *head_meta,
+ const skb_frag_t *frag, bool is_last)
+{
+ u32 len = skb_frag_size(frag);
+ struct eea_tx_meta *meta;
+ dma_addr_t addr;
+
+ addr = skb_frag_dma_map(tx->dma_dev, frag, 0, len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(tx->dma_dev, addr)))
+ return -ENOMEM;
+
+ meta = eea_tx_desc_fill(tx, addr, len, is_last, NULL, 0);
+
+ meta->next = head_meta->next;
+ head_meta->next = meta;
+
return 0;
}
+static int eea_tx_post_skb(struct eea_net_tx *tx, struct sk_buff *skb)
+{
+ const struct skb_shared_info *shinfo = skb_shinfo(skb);
+ u32 hlen = skb_headlen(skb);
+ struct eea_tx_meta *meta;
+ dma_addr_t addr;
+ int i, err;
+ u16 flags;
+
+ addr = dma_map_single(tx->dma_dev, skb->data, hlen, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(tx->dma_dev, addr)))
+ return -ENOMEM;
+
+ flags = skb->ip_summed == CHECKSUM_PARTIAL ? EEA_DESC_F_DO_CSUM : 0;
+
+ meta = eea_tx_desc_fill(tx, addr, hlen, !shinfo->nr_frags, skb, flags);
+
+ err = eea_fill_desc_from_skb(skb, meta->desc);
+ if (err)
+ goto err_cancel;
+
+ for (i = 0; i < shinfo->nr_frags; i++) {
+ const skb_frag_t *frag = &shinfo->frags[i];
+ bool is_last = i == (shinfo->nr_frags - 1);
+
+ err = eea_tx_add_skb_frag(tx, meta, frag, is_last);
+ if (err)
+ goto err_cancel;
+ }
+
+ meta->num = shinfo->nr_frags + 1;
+ ering_sq_commit_desc(tx->ering);
+
+ return 0;
+
+err_cancel:
+ ering_sq_cancel(tx->ering);
+ eea_tx_meta_put_and_unmap(tx, meta);
+ meta->data = NULL;
+ return err;
+}
+
+static void eea_tx_kick(struct eea_net_tx *tx)
+{
+ ering_kick(tx->ering);
+}
+
netdev_tx_t eea_tx_xmit(struct sk_buff *skb, struct net_device *netdev)
{
- /* Empty function; will be implemented in a subsequent commit. */
- dev_kfree_skb_any(skb);
+ struct eea_net *enet = netdev_priv(netdev);
+ int qnum = skb_get_queue_mapping(skb);
+ struct eea_net_tx *tx = &enet->tx[qnum];
+ struct netdev_queue *txq;
+ int err, n;
+
+ txq = netdev_get_tx_queue(netdev, qnum);
+
+ skb_tx_timestamp(skb);
+
+ err = eea_tx_post_skb(tx, skb);
+ if (unlikely(err))
+ dev_kfree_skb_any(skb);
+
+ /* NETDEV_TX_BUSY is expensive. So stop advancing the TX queue. */
+ n = MAX_SKB_FRAGS + 1;
+ netif_txq_maybe_stop(txq, tx->ering->num_free, n, n);
+
+ if (!netdev_xmit_more() || netif_xmit_stopped(txq))
+ eea_tx_kick(tx);
+
return NETDEV_TX_OK;
}
static void eea_free_meta(struct eea_net_tx *tx, struct eea_net_cfg *cfg)
{
+ struct eea_sq_free_stats stats = {0};
+ struct eea_tx_meta *meta;
+ int i;
+
+ while ((meta = eea_tx_meta_get(tx)))
+ meta->skb = NULL;
+
+ for (i = 0; i < cfg->tx_ring_depth; i++) {
+ meta = &tx->meta[i];
+
+ if (!meta->skb)
+ continue;
+
+ eea_tx_meta_put_and_unmap(tx, meta);
+
+ eea_meta_free_xmit(tx, meta, 0, NULL, &stats);
+ }
+
kvfree(tx->meta);
tx->meta = NULL;
}
--
2.32.0.3.g01195cf9f
next prev parent reply other threads:[~2026-03-23 7:44 UTC|newest]
Thread overview: 13+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-03-23 7:44 [PATCH net-next v35 0/8] eea: Add basic driver framework for Alibaba Elastic Ethernet Adaptor Xuan Zhuo
2026-03-23 7:44 ` [PATCH net-next v35 1/8] eea: introduce PCI framework Xuan Zhuo
2026-03-23 7:44 ` [PATCH net-next v35 2/8] eea: introduce ring and descriptor structures Xuan Zhuo
2026-03-23 7:44 ` [PATCH net-next v35 3/8] eea: probe the netdevice and create adminq Xuan Zhuo
2026-03-23 7:44 ` [PATCH net-next v35 4/8] eea: create/destroy rx,tx queues for netdevice open and stop Xuan Zhuo
2026-03-23 7:44 ` [PATCH net-next v35 5/8] eea: implement packet receive logic Xuan Zhuo
2026-03-23 7:44 ` Xuan Zhuo [this message]
2026-03-23 7:44 ` [PATCH net-next v35 7/8] eea: introduce ethtool support Xuan Zhuo
2026-03-23 7:44 ` [PATCH net-next v35 8/8] eea: introduce callback for ndo_get_stats64 Xuan Zhuo
2026-03-26 11:26 ` [PATCH net-next v35 0/8] eea: Add basic driver framework for Alibaba Elastic Ethernet Adaptor Paolo Abeni
2026-03-26 11:38 ` Xuan Zhuo
2026-03-26 12:06 ` Paolo Abeni
2026-03-26 12:11 ` Xuan Zhuo
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260323074441.91691-7-xuanzhuo@linux.alibaba.com \
--to=xuanzhuo@linux.alibaba.com \
--cc=andrew+netdev@lunn.ch \
--cc=davem@davemloft.net \
--cc=dong100@mucse.com \
--cc=dust.li@linux.alibaba.com \
--cc=edumazet@google.com \
--cc=enelsonmoore@gmail.com \
--cc=guwen@linux.alibaba.com \
--cc=hkallweit1@gmail.com \
--cc=kuba@kernel.org \
--cc=lulie@linux.alibaba.com \
--cc=netdev@vger.kernel.org \
--cc=pabeni@redhat.com \
--cc=vadim.fedorenko@linux.dev \
--cc=wangruikang@iscas.ac.cn \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox