From: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
To: netdev@vger.kernel.org
Cc: Andrew Lunn <andrew+netdev@lunn.ch>,
"David S. Miller" <davem@davemloft.net>,
Eric Dumazet <edumazet@google.com>,
Jakub Kicinski <kuba@kernel.org>, Paolo Abeni <pabeni@redhat.com>,
Xuan Zhuo <xuanzhuo@linux.alibaba.com>,
Wen Gu <guwen@linux.alibaba.com>,
Philo Lu <lulie@linux.alibaba.com>,
Vadim Fedorenko <vadim.fedorenko@linux.dev>,
Dong Yibo <dong100@mucse.com>,
Ethan Nelson-Moore <enelsonmoore@gmail.com>,
Heiner Kallweit <hkallweit1@gmail.com>,
Vivian Wang <wangruikang@iscas.ac.cn>,
Dust Li <dust.li@linux.alibaba.com>
Subject: [PATCH net-next v35 5/8] eea: implement packet receive logic
Date: Mon, 23 Mar 2026 15:44:38 +0800 [thread overview]
Message-ID: <20260323074441.91691-6-xuanzhuo@linux.alibaba.com> (raw)
In-Reply-To: <20260323074441.91691-1-xuanzhuo@linux.alibaba.com>
Implement the core logic for receiving packets in the EEA RX path,
including packet buffering and basic validation.
Reviewed-by: Dust Li <dust.li@linux.alibaba.com>
Reviewed-by: Philo Lu <lulie@linux.alibaba.com>
Signed-off-by: Wen Gu <guwen@linux.alibaba.com>
Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
---
drivers/net/ethernet/alibaba/eea/eea_net.h | 2 +
drivers/net/ethernet/alibaba/eea/eea_rx.c | 500 ++++++++++++++++++++-
2 files changed, 500 insertions(+), 2 deletions(-)
diff --git a/drivers/net/ethernet/alibaba/eea/eea_net.h b/drivers/net/ethernet/alibaba/eea/eea_net.h
index bff349f6747f..98b333871eda 100644
--- a/drivers/net/ethernet/alibaba/eea/eea_net.h
+++ b/drivers/net/ethernet/alibaba/eea/eea_net.h
@@ -62,6 +62,7 @@ struct eea_net_rx_pkt_ctx {
bool data_valid;
bool do_drop;
+ u32 recv_len;
struct sk_buff *head_skb;
struct sk_buff *curr_skb;
};
@@ -167,6 +168,7 @@ void eea_init_ctx(struct eea_net *enet, struct eea_net_init_ctx *ctx);
int eea_queues_check_and_reset(struct eea_device *edev);
/* rx apis */
+
void enet_rx_stop(struct eea_net_rx *rx);
void enet_rx_start(struct eea_net_rx *rx);
diff --git a/drivers/net/ethernet/alibaba/eea/eea_rx.c b/drivers/net/ethernet/alibaba/eea/eea_rx.c
index 9d2bc1ff2120..9241b1d39d48 100644
--- a/drivers/net/ethernet/alibaba/eea/eea_rx.c
+++ b/drivers/net/ethernet/alibaba/eea/eea_rx.c
@@ -17,6 +17,37 @@
#define EEA_PAGE_FRAGS_NUM 1024
+#define EEA_RX_BUF_ALIGN 128
+
+struct eea_rx_ctx {
+ u32 len;
+ u32 hdr_len;
+
+ u16 flags;
+ bool more;
+
+ struct eea_rx_meta *meta;
+};
+
+static struct eea_rx_meta *eea_rx_meta_get(struct eea_net_rx *rx)
+{
+ struct eea_rx_meta *meta;
+
+ if (!rx->free)
+ return NULL;
+
+ meta = rx->free;
+ rx->free = meta->next;
+
+ return meta;
+}
+
+static void eea_rx_meta_put(struct eea_net_rx *rx, struct eea_rx_meta *meta)
+{
+ meta->next = rx->free;
+ rx->free = meta;
+}
+
static void eea_free_rx_buffer(struct eea_net_rx *rx, struct eea_rx_meta *meta)
{
u32 drain_count;
@@ -29,6 +60,63 @@ static void eea_free_rx_buffer(struct eea_net_rx *rx, struct eea_rx_meta *meta)
meta->page = NULL;
}
+static void meta_align_offset(struct eea_net_rx *rx, struct eea_rx_meta *meta)
+{
+ int h, b;
+
+ h = rx->headroom;
+ b = meta->offset + h;
+
+ /* For better performance, we align the buffer address to
+ * EEA_RX_BUF_ALIGN, as required by the device design.
+ */
+ b = ALIGN(b, EEA_RX_BUF_ALIGN);
+
+ meta->offset = b - h;
+}
+
+static int eea_alloc_rx_buffer(struct eea_net_rx *rx, struct eea_rx_meta *meta)
+{
+ struct page *page;
+
+ if (meta->page)
+ return 0;
+
+ page = page_pool_dev_alloc_pages(rx->pp);
+ if (!page)
+ return -ENOMEM;
+
+ page_pool_fragment_page(page, EEA_PAGE_FRAGS_NUM);
+
+ meta->page = page;
+ meta->dma = page_pool_get_dma_addr(page);
+ meta->offset = 0;
+ meta->frags = 0;
+
+ meta_align_offset(rx, meta);
+
+ return 0;
+}
+
+static void eea_consume_rx_buffer(struct eea_net_rx *rx,
+ struct eea_rx_meta *meta,
+ u32 consumed)
+{
+ int min;
+
+ meta->offset += consumed;
+ ++meta->frags;
+
+ min = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ min += rx->headroom;
+ min += ETH_DATA_LEN;
+
+ meta_align_offset(rx, meta);
+
+ if (min + meta->offset > PAGE_SIZE)
+ eea_free_rx_buffer(rx, meta);
+}
+
static void eea_free_rx_hdr(struct eea_net_rx *rx, struct eea_net_cfg *cfg)
{
struct eea_rx_meta *meta;
@@ -94,12 +182,420 @@ static int eea_alloc_rx_hdr(struct eea_net_init_ctx *ctx, struct eea_net_rx *rx)
return -ENOMEM;
}
-static int eea_poll(struct napi_struct *napi, int budget)
+static void eea_rx_meta_dma_sync_for_cpu(struct eea_net_rx *rx,
+ struct eea_rx_meta *meta, u32 len)
+{
+ dma_sync_single_for_cpu(rx->enet->edev->dma_dev,
+ meta->dma + meta->offset + meta->headroom,
+ len, DMA_FROM_DEVICE);
+}
+
+static int eea_harden_check_overflow(struct eea_rx_ctx *ctx,
+ struct eea_net *enet)
+{
+ u32 max_len;
+
+ max_len = ctx->meta->truesize - ctx->meta->headroom -
+ ctx->meta->tailroom;
+
+ if (unlikely(ctx->len > max_len)) {
+ pr_debug("%s: rx error: len %u exceeds truesize %u\n",
+ enet->netdev->name, ctx->len, max_len);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int eea_harden_check_size(struct eea_rx_ctx *ctx, struct eea_net *enet)
+{
+ int err;
+
+ err = eea_harden_check_overflow(ctx, enet);
+ if (err)
+ return err;
+
+ if (ctx->hdr_len) {
+ if (unlikely(ctx->hdr_len < ETH_HLEN)) {
+ pr_debug("%s: short hdr %u\n", enet->netdev->name,
+ ctx->hdr_len);
+ return -EINVAL;
+ }
+
+ if (unlikely(ctx->hdr_len > enet->cfg.split_hdr)) {
+ pr_debug("%s: rx error: hdr len %u exceeds hdr buffer size %u\n",
+ enet->netdev->name, ctx->hdr_len,
+ enet->cfg.split_hdr);
+ return -EINVAL;
+ }
+
+ return 0;
+ }
+
+ if (unlikely(ctx->len < ETH_HLEN)) {
+ pr_debug("%s: short packet %u\n", enet->netdev->name, ctx->len);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct sk_buff *eea_build_skb(void *buf, u32 buflen, u32 headroom,
+ u32 len)
+{
+ struct sk_buff *skb;
+
+ skb = build_skb(buf, buflen);
+ if (unlikely(!skb))
+ return NULL;
+
+ skb_reserve(skb, headroom);
+ skb_put(skb, len);
+
+ return skb;
+}
+
+static struct sk_buff *eea_rx_build_split_hdr_skb(struct eea_net_rx *rx,
+ struct eea_rx_ctx *ctx)
+{
+ struct eea_rx_meta *meta = ctx->meta;
+ struct sk_buff *skb;
+ u32 truesize;
+
+ dma_sync_single_for_cpu(rx->enet->edev->dma_dev, meta->hdr_dma,
+ ctx->hdr_len, DMA_FROM_DEVICE);
+
+ skb = napi_alloc_skb(rx->napi, ctx->hdr_len);
+ if (unlikely(!skb))
+ return NULL;
+
+ truesize = meta->headroom + ctx->len;
+
+ skb_put_data(skb, ctx->meta->hdr_addr, ctx->hdr_len);
+
+ if (ctx->len) {
+ skb_add_rx_frag(skb, 0, meta->page,
+ meta->offset + meta->headroom,
+ ctx->len, truesize);
+
+ eea_consume_rx_buffer(rx, meta, truesize);
+ }
+
+ skb_mark_for_recycle(skb);
+
+ return skb;
+}
+
+static struct sk_buff *eea_rx_build_skb(struct eea_net_rx *rx,
+ struct eea_rx_ctx *ctx)
+{
+ struct eea_rx_meta *meta = ctx->meta;
+ u32 len, shinfo_size, truesize;
+ struct sk_buff *skb;
+ struct page *page;
+ void *buf, *pkt;
+
+ page = meta->page;
+ if (!page)
+ return NULL;
+
+ shinfo_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ buf = page_address(page) + meta->offset;
+ pkt = buf + meta->headroom;
+ len = ctx->len;
+ truesize = meta->headroom + ctx->len + shinfo_size;
+
+ skb = eea_build_skb(buf, truesize, pkt - buf, len);
+ if (unlikely(!skb))
+ return NULL;
+
+ eea_consume_rx_buffer(rx, meta, truesize);
+ skb_mark_for_recycle(skb);
+
+ return skb;
+}
+
+static int eea_skb_append_buf(struct eea_net_rx *rx, struct eea_rx_ctx *ctx)
+{
+ struct sk_buff *curr_skb = rx->pkt.curr_skb;
+ struct sk_buff *head_skb = rx->pkt.head_skb;
+ int num_skb_frags;
+ int offset;
+
+ if (!curr_skb)
+ curr_skb = head_skb;
+
+ num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
+ if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
+ struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
+
+ if (unlikely(!nskb))
+ return -ENOMEM;
+
+ skb_mark_for_recycle(nskb);
+
+ if (curr_skb == head_skb)
+ skb_shinfo(curr_skb)->frag_list = nskb;
+ else
+ curr_skb->next = nskb;
+
+ curr_skb = nskb;
+ head_skb->truesize += nskb->truesize;
+ num_skb_frags = 0;
+
+ rx->pkt.curr_skb = curr_skb;
+ }
+
+ if (curr_skb != head_skb) {
+ head_skb->data_len += ctx->len;
+ head_skb->len += ctx->len;
+ head_skb->truesize += ctx->meta->truesize;
+ }
+
+ offset = ctx->meta->offset + ctx->meta->headroom;
+
+ skb_add_rx_frag(curr_skb, num_skb_frags, ctx->meta->page,
+ offset, ctx->len, ctx->meta->truesize);
+
+ eea_consume_rx_buffer(rx, ctx->meta, ctx->meta->headroom + ctx->len);
+
+ return 0;
+}
+
+static int process_remain_buf(struct eea_net_rx *rx, struct eea_rx_ctx *ctx)
{
- /* Empty function; will be implemented in a subsequent commit. */
+ struct eea_net *enet = rx->enet;
+
+ if (eea_harden_check_overflow(ctx, enet))
+ goto err;
+
+ if (eea_skb_append_buf(rx, ctx))
+ goto err;
+
+ return 0;
+
+err:
+ dev_kfree_skb(rx->pkt.head_skb);
+ rx->pkt.do_drop = true;
+ rx->pkt.head_skb = NULL;
+ return 0;
+}
+
+static int process_first_buf(struct eea_net_rx *rx, struct eea_rx_ctx *ctx)
+{
+ struct eea_net *enet = rx->enet;
+ struct sk_buff *skb = NULL;
+
+ if (eea_harden_check_size(ctx, enet))
+ goto err;
+
+ rx->pkt.data_valid = ctx->flags & EEA_DESC_F_DATA_VALID;
+
+ if (ctx->hdr_len)
+ skb = eea_rx_build_split_hdr_skb(rx, ctx);
+ else
+ skb = eea_rx_build_skb(rx, ctx);
+
+ if (unlikely(!skb))
+ goto err;
+
+ rx->pkt.head_skb = skb;
+
+ return 0;
+
+err:
+ rx->pkt.do_drop = true;
+ return 0;
+}
+
+static void eea_submit_skb(struct eea_net_rx *rx, struct sk_buff *skb,
+ struct eea_rx_cdesc *desc)
+{
+ struct eea_net *enet = rx->enet;
+
+ if (rx->pkt.data_valid)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ if (enet->cfg.ts_cfg.rx_filter == HWTSTAMP_FILTER_ALL)
+ skb_hwtstamps(skb)->hwtstamp = EEA_DESC_TS(desc) +
+ enet->hw_ts_offset;
+
+ skb_record_rx_queue(skb, rx->index);
+ skb->protocol = eth_type_trans(skb, enet->netdev);
+
+ napi_gro_receive(rx->napi, skb);
+}
+
+static int eea_rx_desc_to_ctx(struct eea_net_rx *rx,
+ struct eea_rx_ctx *ctx,
+ struct eea_rx_cdesc *desc)
+{
+ u16 id;
+
+ ctx->meta = NULL;
+
+ id = le16_to_cpu(desc->id);
+ if (unlikely(id >= rx->ering->num)) {
+ netdev_err(rx->enet->netdev, "rx invalid id %d\n", id);
+ return -EINVAL;
+ }
+
+ ctx->meta = &rx->meta[id];
+ ctx->len = le16_to_cpu(desc->len);
+ if (unlikely(ctx->len > ctx->meta->len)) {
+ netdev_err(rx->enet->netdev, "rx invalid len(%d) id:%d\n",
+ ctx->len, id);
+ return -EINVAL;
+ }
+
+ ctx->flags = le16_to_cpu(desc->flags);
+
+ ctx->hdr_len = 0;
+ if (ctx->flags & EEA_DESC_F_SPLIT_HDR) {
+ ctx->hdr_len = le16_to_cpu(desc->len_ex) &
+ EEA_RX_CDESC_HDR_LEN_MASK;
+ }
+
+ ctx->more = ctx->flags & EEA_RING_DESC_F_MORE;
+
return 0;
}
+static int eea_cleanrx(struct eea_net_rx *rx, int budget,
+ struct eea_rx_ctx *ctx)
+{
+ struct eea_rx_cdesc *desc;
+ struct eea_rx_meta *meta;
+ int packets, err;
+
+ for (packets = 0; packets < budget; ) {
+ desc = ering_cq_get_desc(rx->ering);
+ if (!desc)
+ break;
+
+ err = eea_rx_desc_to_ctx(rx, ctx, desc);
+ if (unlikely(err)) {
+ if (ctx->meta)
+ eea_rx_meta_put(rx, ctx->meta);
+
+ if (rx->pkt.idx)
+ dev_kfree_skb(rx->pkt.head_skb);
+
+ ctx->more = false;
+ goto ack;
+ }
+
+ meta = ctx->meta;
+
+ if (unlikely(rx->pkt.do_drop))
+ goto skip;
+
+ eea_rx_meta_dma_sync_for_cpu(rx, meta, ctx->len);
+
+ rx->pkt.recv_len += ctx->len;
+ rx->pkt.recv_len += ctx->hdr_len;
+
+ if (!rx->pkt.idx)
+ process_first_buf(rx, ctx);
+ else
+ process_remain_buf(rx, ctx);
+
+ ++rx->pkt.idx;
+
+ if (!ctx->more && rx->pkt.head_skb) {
+ eea_submit_skb(rx, rx->pkt.head_skb, desc);
+ ++packets;
+ }
+
+skip:
+ eea_rx_meta_put(rx, meta);
+ack:
+ ering_cq_ack_desc(rx->ering, 1);
+
+ if (!ctx->more)
+ memset(&rx->pkt, 0, sizeof(rx->pkt));
+ }
+
+ return packets;
+}
+
+static bool eea_rx_post(struct eea_net_rx *rx)
+{
+ u32 tailroom, headroom, room, len;
+ struct eea_rx_meta *meta;
+ struct eea_rx_desc *desc;
+ int err = 0, num = 0;
+ dma_addr_t addr;
+
+ tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ headroom = rx->headroom;
+ room = headroom + tailroom;
+
+ while (true) {
+ meta = eea_rx_meta_get(rx);
+ if (!meta)
+ break;
+
+ err = eea_alloc_rx_buffer(rx, meta);
+ if (err) {
+ eea_rx_meta_put(rx, meta);
+ break;
+ }
+
+ len = PAGE_SIZE - meta->offset - room;
+ addr = meta->dma + meta->offset + headroom;
+
+ desc = ering_sq_alloc_desc(rx->ering, meta->id, true, 0);
+ desc->addr = cpu_to_le64(addr);
+ desc->len = cpu_to_le16(len);
+
+ if (meta->hdr_addr)
+ desc->hdr_addr = cpu_to_le64(meta->hdr_dma);
+
+ ering_sq_commit_desc(rx->ering);
+
+ meta->truesize = len + room;
+ meta->headroom = headroom;
+ meta->tailroom = tailroom;
+ meta->len = len;
+ ++num;
+ }
+
+ if (num)
+ ering_kick(rx->ering);
+
+ /* true means busy, napi should be called again. */
+ return !!err;
+}
+
+static int eea_poll(struct napi_struct *napi, int budget)
+{
+ struct eea_irq_blk *blk = container_of(napi, struct eea_irq_blk, napi);
+ struct eea_net_rx *rx = blk->rx;
+ struct eea_net_tx *tx = &rx->enet->tx[rx->index];
+ struct eea_rx_ctx ctx = {};
+ bool busy = false;
+ u32 received;
+
+ eea_poll_tx(tx, budget);
+
+ received = eea_cleanrx(rx, budget, &ctx);
+
+ if (rx->ering->num_free > budget)
+ busy |= eea_rx_post(rx);
+
+ busy |= received >= budget;
+
+ if (busy)
+ return budget;
+
+ if (napi_complete_done(napi, received))
+ ering_irq_active(rx->ering, tx->ering);
+
+ return received;
+}
+
static void eea_free_rx_buffers(struct eea_net_rx *rx, struct eea_net_cfg *cfg)
{
struct eea_rx_meta *meta;
--
2.32.0.3.g01195cf9f
next prev parent reply other threads:[~2026-03-23 7:44 UTC|newest]
Thread overview: 13+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-03-23 7:44 [PATCH net-next v35 0/8] eea: Add basic driver framework for Alibaba Elastic Ethernet Adaptor Xuan Zhuo
2026-03-23 7:44 ` [PATCH net-next v35 1/8] eea: introduce PCI framework Xuan Zhuo
2026-03-23 7:44 ` [PATCH net-next v35 2/8] eea: introduce ring and descriptor structures Xuan Zhuo
2026-03-23 7:44 ` [PATCH net-next v35 3/8] eea: probe the netdevice and create adminq Xuan Zhuo
2026-03-23 7:44 ` [PATCH net-next v35 4/8] eea: create/destroy rx,tx queues for netdevice open and stop Xuan Zhuo
2026-03-23 7:44 ` Xuan Zhuo [this message]
2026-03-23 7:44 ` [PATCH net-next v35 6/8] eea: implement packet transmit logic Xuan Zhuo
2026-03-23 7:44 ` [PATCH net-next v35 7/8] eea: introduce ethtool support Xuan Zhuo
2026-03-23 7:44 ` [PATCH net-next v35 8/8] eea: introduce callback for ndo_get_stats64 Xuan Zhuo
2026-03-26 11:26 ` [PATCH net-next v35 0/8] eea: Add basic driver framework for Alibaba Elastic Ethernet Adaptor Paolo Abeni
2026-03-26 11:38 ` Xuan Zhuo
2026-03-26 12:06 ` Paolo Abeni
2026-03-26 12:11 ` Xuan Zhuo
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260323074441.91691-6-xuanzhuo@linux.alibaba.com \
--to=xuanzhuo@linux.alibaba.com \
--cc=andrew+netdev@lunn.ch \
--cc=davem@davemloft.net \
--cc=dong100@mucse.com \
--cc=dust.li@linux.alibaba.com \
--cc=edumazet@google.com \
--cc=enelsonmoore@gmail.com \
--cc=guwen@linux.alibaba.com \
--cc=hkallweit1@gmail.com \
--cc=kuba@kernel.org \
--cc=lulie@linux.alibaba.com \
--cc=netdev@vger.kernel.org \
--cc=pabeni@redhat.com \
--cc=vadim.fedorenko@linux.dev \
--cc=wangruikang@iscas.ac.cn \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox