* [PATCH net-next 1/4] net: ethernet: mtk_eth_soc: rely on page_pool for single page buffers
2022-07-09 15:48 [PATCH net-next 0/4] mtk_eth_soc: add xdp support Lorenzo Bianconi
@ 2022-07-09 15:48 ` Lorenzo Bianconi
2022-07-09 15:48 ` [PATCH net-next 2/4] net: ethernet: mtk_eth_soc: add basic XDP support Lorenzo Bianconi
` (2 subsequent siblings)
3 siblings, 0 replies; 9+ messages in thread
From: Lorenzo Bianconi @ 2022-07-09 15:48 UTC (permalink / raw)
To: netdev
Cc: nbd, john, sean.wang, Mark-MC.Lee, davem, edumazet, kuba, pabeni,
matthias.bgg, linux-mediatek, ilias.apalodimas, lorenzo.bianconi,
jbrouer
Rely on page_pool allocator for single page buffers in order to keep
them dma mapped and add skb recycling support.
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
---
drivers/net/ethernet/mediatek/Kconfig | 1 +
drivers/net/ethernet/mediatek/mtk_eth_soc.c | 185 +++++++++++++++-----
drivers/net/ethernet/mediatek/mtk_eth_soc.h | 10 ++
3 files changed, 156 insertions(+), 40 deletions(-)
diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig
index da4ec235d146..d2422c7b31b0 100644
--- a/drivers/net/ethernet/mediatek/Kconfig
+++ b/drivers/net/ethernet/mediatek/Kconfig
@@ -17,6 +17,7 @@ config NET_MEDIATEK_SOC
select PINCTRL
select PHYLINK
select DIMLIB
+ select PAGE_POOL
help
This driver supports the gigabit ethernet MACs in the
MediaTek SoC family.
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 6beb3d4873a3..9a92d602ebd5 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -1432,6 +1432,68 @@ static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
}
}
+static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth,
+ struct xdp_rxq_info *xdp_q,
+ int id, int size)
+{
+ struct page_pool_params pp_params = {
+ .order = 0,
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ .pool_size = size,
+ .nid = NUMA_NO_NODE,
+ .dev = eth->dma_dev,
+ .dma_dir = DMA_FROM_DEVICE,
+ .offset = MTK_PP_HEADROOM,
+ .max_len = MTK_PP_MAX_BUF_SIZE,
+ };
+ struct page_pool *pp;
+ int err;
+
+ pp = page_pool_create(&pp_params);
+ if (IS_ERR(pp))
+ return pp;
+
+ err = __xdp_rxq_info_reg(xdp_q, ð->dummy_dev, eth->rx_napi.napi_id,
+ id, PAGE_SIZE);
+ if (err < 0)
+ goto err_free_pp;
+
+ err = xdp_rxq_info_reg_mem_model(xdp_q, MEM_TYPE_PAGE_POOL, pp);
+ if (err)
+ goto err_unregister_rxq;
+
+ return pp;
+
+err_unregister_rxq:
+ xdp_rxq_info_unreg(xdp_q);
+err_free_pp:
+ page_pool_destroy(pp);
+
+ return ERR_PTR(err);
+}
+
+static void *mtk_page_pool_get_buff(struct page_pool *pp, dma_addr_t *dma_addr,
+ gfp_t gfp_mask)
+{
+ struct page *page;
+
+ page = page_pool_alloc_pages(pp, gfp_mask | __GFP_NOWARN);
+ if (!page)
+ return NULL;
+
+ *dma_addr = page_pool_get_dma_addr(page) + MTK_PP_HEADROOM;
+ return page_address(page);
+}
+
+static void mtk_rx_put_buff(struct mtk_rx_ring *ring, void *data, bool napi)
+{
+ if (ring->page_pool)
+ page_pool_put_full_page(ring->page_pool,
+ virt_to_head_page(data), napi);
+ else
+ skb_free_frag(data);
+}
+
static int mtk_poll_rx(struct napi_struct *napi, int budget,
struct mtk_eth *eth)
{
@@ -1445,9 +1507,9 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
while (done < budget) {
unsigned int pktlen, *rxdcsum;
+ u32 hash, reason, reserve_len;
struct net_device *netdev;
dma_addr_t dma_addr;
- u32 hash, reason;
int mac = 0;
ring = mtk_get_rx_ring(eth);
@@ -1478,36 +1540,54 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
goto release_desc;
/* alloc new buffer */
- if (ring->frag_size <= PAGE_SIZE)
- new_data = napi_alloc_frag(ring->frag_size);
- else
- new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC);
- if (unlikely(!new_data)) {
- netdev->stats.rx_dropped++;
- goto release_desc;
- }
- dma_addr = dma_map_single(eth->dma_dev,
- new_data + NET_SKB_PAD +
- eth->ip_align,
- ring->buf_size,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) {
- skb_free_frag(new_data);
- netdev->stats.rx_dropped++;
- goto release_desc;
- }
+ if (ring->page_pool) {
+ new_data = mtk_page_pool_get_buff(ring->page_pool,
+ &dma_addr,
+ GFP_ATOMIC);
+ if (unlikely(!new_data)) {
+ netdev->stats.rx_dropped++;
+ goto release_desc;
+ }
+ } else {
+ if (ring->frag_size <= PAGE_SIZE)
+ new_data = napi_alloc_frag(ring->frag_size);
+ else
+ new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC);
+
+ if (unlikely(!new_data)) {
+ netdev->stats.rx_dropped++;
+ goto release_desc;
+ }
- dma_unmap_single(eth->dma_dev, trxd.rxd1,
- ring->buf_size, DMA_FROM_DEVICE);
+ dma_addr = dma_map_single(eth->dma_dev,
+ new_data + NET_SKB_PAD + eth->ip_align,
+ ring->buf_size, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(eth->dma_dev,
+ dma_addr))) {
+ skb_free_frag(new_data);
+ netdev->stats.rx_dropped++;
+ goto release_desc;
+ }
+
+ dma_unmap_single(eth->dma_dev, trxd.rxd1,
+ ring->buf_size, DMA_FROM_DEVICE);
+ }
/* receive data */
skb = build_skb(data, ring->frag_size);
if (unlikely(!skb)) {
- skb_free_frag(data);
+ mtk_rx_put_buff(ring, data, true);
netdev->stats.rx_dropped++;
goto skip_rx;
}
- skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
+
+ if (ring->page_pool) {
+ reserve_len = MTK_PP_HEADROOM;
+ skb_mark_for_recycle(skb);
+ } else {
+ reserve_len = NET_SKB_PAD + NET_IP_ALIGN;
+ }
+ skb_reserve(skb, reserve_len);
pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
skb->dev = netdev;
@@ -1561,7 +1641,6 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
skip_rx:
ring->data[idx] = new_data;
rxd->rxd1 = (unsigned int)dma_addr;
-
release_desc:
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
rxd->rxd2 = RX_DMA_LSO;
@@ -1569,7 +1648,6 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
ring->calc_idx = idx;
-
done++;
}
@@ -1933,13 +2011,15 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
if (!ring->data)
return -ENOMEM;
- for (i = 0; i < rx_dma_size; i++) {
- if (ring->frag_size <= PAGE_SIZE)
- ring->data[i] = netdev_alloc_frag(ring->frag_size);
- else
- ring->data[i] = mtk_max_lro_buf_alloc(GFP_KERNEL);
- if (!ring->data[i])
- return -ENOMEM;
+ if (!eth->hwlro) {
+ struct page_pool *pp;
+
+ pp = mtk_create_page_pool(eth, &ring->xdp_q, ring_no,
+ rx_dma_size);
+ if (IS_ERR(pp))
+ return PTR_ERR(pp);
+
+ ring->page_pool = pp;
}
ring->dma = dma_alloc_coherent(eth->dma_dev,
@@ -1950,16 +2030,33 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
for (i = 0; i < rx_dma_size; i++) {
struct mtk_rx_dma_v2 *rxd;
-
- dma_addr_t dma_addr = dma_map_single(eth->dma_dev,
- ring->data[i] + NET_SKB_PAD + eth->ip_align,
- ring->buf_size,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
- return -ENOMEM;
+ dma_addr_t dma_addr;
+ void *data;
rxd = ring->dma + i * eth->soc->txrx.rxd_size;
+ if (ring->page_pool) {
+ data = mtk_page_pool_get_buff(ring->page_pool,
+ &dma_addr, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ } else {
+ if (ring->frag_size <= PAGE_SIZE)
+ data = netdev_alloc_frag(ring->frag_size);
+ else
+ data = mtk_max_lro_buf_alloc(GFP_KERNEL);
+
+ if (!data)
+ return -ENOMEM;
+
+ dma_addr = dma_map_single(eth->dma_dev,
+ data + NET_SKB_PAD + eth->ip_align,
+ ring->buf_size, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(eth->dma_dev,
+ dma_addr)))
+ return -ENOMEM;
+ }
rxd->rxd1 = (unsigned int)dma_addr;
+ ring->data[i] = data;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
rxd->rxd2 = RX_DMA_LSO;
@@ -1975,6 +2072,7 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
rxd->rxd8 = 0;
}
}
+
ring->dma_size = rx_dma_size;
ring->calc_idx_update = false;
ring->calc_idx = rx_dma_size - 1;
@@ -2026,7 +2124,7 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring)
dma_unmap_single(eth->dma_dev, rxd->rxd1,
ring->buf_size, DMA_FROM_DEVICE);
- skb_free_frag(ring->data[i]);
+ mtk_rx_put_buff(ring, ring->data[i], false);
}
kfree(ring->data);
ring->data = NULL;
@@ -2038,6 +2136,13 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring)
ring->dma, ring->phys);
ring->dma = NULL;
}
+
+ if (ring->page_pool) {
+ if (xdp_rxq_info_is_reg(&ring->xdp_q))
+ xdp_rxq_info_unreg(&ring->xdp_q);
+ page_pool_destroy(ring->page_pool);
+ ring->page_pool = NULL;
+ }
}
static int mtk_hwlro_rx_init(struct mtk_eth *eth)
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 0a632896451a..26c019319055 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -18,6 +18,8 @@
#include <linux/rhashtable.h>
#include <linux/dim.h>
#include <linux/bitfield.h>
+#include <net/page_pool.h>
+#include <linux/bpf_trace.h>
#include "mtk_ppe.h"
#define MTK_QDMA_PAGE_SIZE 2048
@@ -49,6 +51,11 @@
#define MTK_HW_FEATURES_MT7628 (NETIF_F_SG | NETIF_F_RXCSUM)
#define NEXT_DESP_IDX(X, Y) (((X) + 1) & ((Y) - 1))
+#define MTK_PP_HEADROOM XDP_PACKET_HEADROOM
+#define MTK_PP_PAD (MTK_PP_HEADROOM + \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#define MTK_PP_MAX_BUF_SIZE (PAGE_SIZE - MTK_PP_PAD)
+
#define MTK_QRX_OFFSET 0x10
#define MTK_MAX_RX_RING_NUM 4
@@ -745,6 +752,9 @@ struct mtk_rx_ring {
bool calc_idx_update;
u16 calc_idx;
u32 crx_idx_reg;
+ /* page_pool */
+ struct page_pool *page_pool;
+ struct xdp_rxq_info xdp_q;
};
enum mkt_eth_capabilities {
--
2.36.1
^ permalink raw reply related [flat|nested] 9+ messages in thread* [PATCH net-next 2/4] net: ethernet: mtk_eth_soc: add basic XDP support
2022-07-09 15:48 [PATCH net-next 0/4] mtk_eth_soc: add xdp support Lorenzo Bianconi
2022-07-09 15:48 ` [PATCH net-next 1/4] net: ethernet: mtk_eth_soc: rely on page_pool for single page buffers Lorenzo Bianconi
@ 2022-07-09 15:48 ` Lorenzo Bianconi
2022-07-12 10:12 ` Paolo Abeni
2022-07-09 15:48 ` [PATCH net-next 3/4] net: ethernet: mtk_eth_soc: introduce xdp ethtool counters Lorenzo Bianconi
2022-07-09 15:48 ` [PATCH net-next 4/4] net: ethernet: mtk_eth_soc: add xmit XDP support Lorenzo Bianconi
3 siblings, 1 reply; 9+ messages in thread
From: Lorenzo Bianconi @ 2022-07-09 15:48 UTC (permalink / raw)
To: netdev
Cc: nbd, john, sean.wang, Mark-MC.Lee, davem, edumazet, kuba, pabeni,
matthias.bgg, linux-mediatek, ilias.apalodimas, lorenzo.bianconi,
jbrouer
Introduce basic XDP support to mtk_eth_soc driver.
Supported XDP verdicts:
- XDP_PASS
- XDP_DROP
- XDP_REDIRECT
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
---
drivers/net/ethernet/mediatek/mtk_eth_soc.c | 146 +++++++++++++++++---
drivers/net/ethernet/mediatek/mtk_eth_soc.h | 2 +
2 files changed, 130 insertions(+), 18 deletions(-)
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 9a92d602ebd5..3b583abb599d 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -1494,22 +1494,55 @@ static void mtk_rx_put_buff(struct mtk_rx_ring *ring, void *data, bool napi)
skb_free_frag(data);
}
+static u32 mtk_xdp_run(struct mtk_rx_ring *ring, struct bpf_prog *prog,
+ struct xdp_buff *xdp, struct net_device *dev)
+{
+ u32 act = XDP_PASS;
+
+ if (!prog)
+ return XDP_PASS;
+
+ act = bpf_prog_run_xdp(prog, xdp);
+ switch (act) {
+ case XDP_PASS:
+ return XDP_PASS;
+ case XDP_REDIRECT:
+ if (unlikely(xdp_do_redirect(dev, xdp, prog)))
+ break;
+ return XDP_REDIRECT;
+ default:
+ bpf_warn_invalid_xdp_action(dev, prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+ trace_xdp_exception(dev, prog, act);
+ fallthrough;
+ case XDP_DROP:
+ break;
+ }
+
+ page_pool_put_full_page(ring->page_pool,
+ virt_to_head_page(xdp->data), true);
+ return XDP_DROP;
+}
+
static int mtk_poll_rx(struct napi_struct *napi, int budget,
struct mtk_eth *eth)
{
+ struct bpf_prog *prog = READ_ONCE(eth->prog);
struct dim_sample dim_sample = {};
struct mtk_rx_ring *ring;
int idx;
struct sk_buff *skb;
u8 *data, *new_data;
struct mtk_rx_dma_v2 *rxd, trxd;
+ bool xdp_do_redirect = false;
int done = 0, bytes = 0;
while (done < budget) {
unsigned int pktlen, *rxdcsum;
- u32 hash, reason, reserve_len;
struct net_device *netdev;
dma_addr_t dma_addr;
+ u32 hash, reason;
int mac = 0;
ring = mtk_get_rx_ring(eth);
@@ -1539,8 +1572,14 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
if (unlikely(test_bit(MTK_RESETTING, ð->state)))
goto release_desc;
+ pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
+
/* alloc new buffer */
if (ring->page_pool) {
+ struct page *page = virt_to_head_page(data);
+ struct xdp_buff xdp;
+ u32 ret;
+
new_data = mtk_page_pool_get_buff(ring->page_pool,
&dma_addr,
GFP_ATOMIC);
@@ -1548,6 +1587,34 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
netdev->stats.rx_dropped++;
goto release_desc;
}
+
+ dma_sync_single_for_cpu(eth->dma_dev,
+ page_pool_get_dma_addr(page) + MTK_PP_HEADROOM,
+ pktlen, page_pool_get_dma_dir(ring->page_pool));
+
+ xdp_init_buff(&xdp, PAGE_SIZE, &ring->xdp_q);
+ xdp_prepare_buff(&xdp, data, MTK_PP_HEADROOM, pktlen,
+ false);
+ xdp_buff_clear_frags_flag(&xdp);
+
+ ret = mtk_xdp_run(ring, prog, &xdp, netdev);
+ if (ret != XDP_PASS) {
+ if (ret == XDP_REDIRECT)
+ xdp_do_redirect = true;
+ goto skip_rx;
+ }
+
+ skb = build_skb(data, PAGE_SIZE);
+ if (unlikely(!skb)) {
+ page_pool_put_full_page(ring->page_pool,
+ page, true);
+ netdev->stats.rx_dropped++;
+ goto skip_rx;
+ }
+
+ skb_reserve(skb, xdp.data - xdp.data_hard_start);
+ skb_put(skb, xdp.data_end - xdp.data);
+ skb_mark_for_recycle(skb);
} else {
if (ring->frag_size <= PAGE_SIZE)
new_data = napi_alloc_frag(ring->frag_size);
@@ -1571,27 +1638,20 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
dma_unmap_single(eth->dma_dev, trxd.rxd1,
ring->buf_size, DMA_FROM_DEVICE);
- }
- /* receive data */
- skb = build_skb(data, ring->frag_size);
- if (unlikely(!skb)) {
- mtk_rx_put_buff(ring, data, true);
- netdev->stats.rx_dropped++;
- goto skip_rx;
- }
+ skb = build_skb(data, ring->frag_size);
+ if (unlikely(!skb)) {
+ netdev->stats.rx_dropped++;
+ skb_free_frag(data);
+ goto skip_rx;
+ }
- if (ring->page_pool) {
- reserve_len = MTK_PP_HEADROOM;
- skb_mark_for_recycle(skb);
- } else {
- reserve_len = NET_SKB_PAD + NET_IP_ALIGN;
+ skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
+ skb_put(skb, pktlen);
}
- skb_reserve(skb, reserve_len);
- pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
skb->dev = netdev;
- skb_put(skb, pktlen);
+ bytes += skb->len;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
rxdcsum = &trxd.rxd3;
@@ -1603,7 +1663,6 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
else
skb_checksum_none_assert(skb);
skb->protocol = eth_type_trans(skb, netdev);
- bytes += pktlen;
hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
if (hash != MTK_RXD4_FOE_ENTRY) {
@@ -1666,6 +1725,9 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
&dim_sample);
net_dim(ð->rx_dim, dim_sample);
+ if (prog && xdp_do_redirect)
+ xdp_do_flush_map();
+
return done;
}
@@ -2750,6 +2812,48 @@ static int mtk_stop(struct net_device *dev)
return 0;
}
+static int mtk_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+ struct bpf_prog *old_prog;
+ bool need_update;
+
+ if (eth->hwlro) {
+ NL_SET_ERR_MSG_MOD(extack, "XDP not supported with HWLRO");
+ return -EOPNOTSUPP;
+ }
+
+ if (dev->mtu > MTK_PP_MAX_BUF_SIZE) {
+ NL_SET_ERR_MSG_MOD(extack, "MTU too large for XDP");
+ return -EOPNOTSUPP;
+ }
+
+ need_update = !!eth->prog != !!prog;
+ if (netif_running(dev) && need_update)
+ mtk_stop(dev);
+
+ old_prog = xchg(ð->prog, prog);
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ if (netif_running(dev) && need_update)
+ return mtk_open(dev);
+
+ return 0;
+}
+
+static int mtk_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return mtk_xdp_setup(dev, xdp->prog, xdp->extack);
+ default:
+ return -EINVAL;
+ }
+}
+
static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
{
regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
@@ -3045,6 +3149,11 @@ static int mtk_change_mtu(struct net_device *dev, int new_mtu)
struct mtk_eth *eth = mac->hw;
u32 mcr_cur, mcr_new;
+ if (eth->prog && length > MTK_PP_MAX_BUF_SIZE) {
+ netdev_err(dev, "Invalid MTU for XDP mode\n");
+ return -EINVAL;
+ }
+
if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
mcr_new = mcr_cur & ~MAC_MCR_MAX_RX_MASK;
@@ -3372,6 +3481,7 @@ static const struct net_device_ops mtk_netdev_ops = {
.ndo_poll_controller = mtk_poll_controller,
#endif
.ndo_setup_tc = mtk_eth_setup_tc,
+ .ndo_bpf = mtk_xdp,
};
static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 26c019319055..a1cea93300c1 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -1088,6 +1088,8 @@ struct mtk_eth {
struct mtk_ppe *ppe;
struct rhashtable flow_table;
+
+ struct bpf_prog *prog;
};
/* struct mtk_mac - the structure that holds the info about the MACs of the
--
2.36.1
^ permalink raw reply related [flat|nested] 9+ messages in thread* Re: [PATCH net-next 2/4] net: ethernet: mtk_eth_soc: add basic XDP support
2022-07-09 15:48 ` [PATCH net-next 2/4] net: ethernet: mtk_eth_soc: add basic XDP support Lorenzo Bianconi
@ 2022-07-12 10:12 ` Paolo Abeni
2022-07-12 16:15 ` Lorenzo Bianconi
0 siblings, 1 reply; 9+ messages in thread
From: Paolo Abeni @ 2022-07-12 10:12 UTC (permalink / raw)
To: Lorenzo Bianconi, netdev
Cc: nbd, john, sean.wang, Mark-MC.Lee, davem, edumazet, kuba,
matthias.bgg, linux-mediatek, ilias.apalodimas, lorenzo.bianconi,
jbrouer
On Sat, 2022-07-09 at 17:48 +0200, Lorenzo Bianconi wrote:
> Introduce basic XDP support to mtk_eth_soc driver.
> Supported XDP verdicts:
> - XDP_PASS
> - XDP_DROP
> - XDP_REDIRECT
>
> Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
> ---
> drivers/net/ethernet/mediatek/mtk_eth_soc.c | 146 +++++++++++++++++---
> drivers/net/ethernet/mediatek/mtk_eth_soc.h | 2 +
> 2 files changed, 130 insertions(+), 18 deletions(-)
>
> diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
> index 9a92d602ebd5..3b583abb599d 100644
> --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
> +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
> @@ -1494,22 +1494,55 @@ static void mtk_rx_put_buff(struct mtk_rx_ring *ring, void *data, bool napi)
> skb_free_frag(data);
> }
>
> +static u32 mtk_xdp_run(struct mtk_rx_ring *ring, struct bpf_prog *prog,
> + struct xdp_buff *xdp, struct net_device *dev)
> +{
> + u32 act = XDP_PASS;
> +
> + if (!prog)
> + return XDP_PASS;
> +
> + act = bpf_prog_run_xdp(prog, xdp);
> + switch (act) {
> + case XDP_PASS:
> + return XDP_PASS;
> + case XDP_REDIRECT:
> + if (unlikely(xdp_do_redirect(dev, xdp, prog)))
> + break;
> + return XDP_REDIRECT;
> + default:
> + bpf_warn_invalid_xdp_action(dev, prog, act);
> + fallthrough;
> + case XDP_ABORTED:
> + trace_xdp_exception(dev, prog, act);
> + fallthrough;
> + case XDP_DROP:
> + break;
> + }
> +
> + page_pool_put_full_page(ring->page_pool,
> + virt_to_head_page(xdp->data), true);
> + return XDP_DROP;
> +}
> +
> static int mtk_poll_rx(struct napi_struct *napi, int budget,
> struct mtk_eth *eth)
> {
> + struct bpf_prog *prog = READ_ONCE(eth->prog);
> struct dim_sample dim_sample = {};
> struct mtk_rx_ring *ring;
> int idx;
> struct sk_buff *skb;
> u8 *data, *new_data;
> struct mtk_rx_dma_v2 *rxd, trxd;
> + bool xdp_do_redirect = false;
> int done = 0, bytes = 0;
>
> while (done < budget) {
> unsigned int pktlen, *rxdcsum;
> - u32 hash, reason, reserve_len;
> struct net_device *netdev;
> dma_addr_t dma_addr;
> + u32 hash, reason;
> int mac = 0;
>
> ring = mtk_get_rx_ring(eth);
> @@ -1539,8 +1572,14 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
> if (unlikely(test_bit(MTK_RESETTING, ð->state)))
> goto release_desc;
>
> + pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
> +
> /* alloc new buffer */
> if (ring->page_pool) {
> + struct page *page = virt_to_head_page(data);
> + struct xdp_buff xdp;
> + u32 ret;
> +
> new_data = mtk_page_pool_get_buff(ring->page_pool,
> &dma_addr,
> GFP_ATOMIC);
> @@ -1548,6 +1587,34 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
> netdev->stats.rx_dropped++;
> goto release_desc;
> }
> +
> + dma_sync_single_for_cpu(eth->dma_dev,
> + page_pool_get_dma_addr(page) + MTK_PP_HEADROOM,
> + pktlen, page_pool_get_dma_dir(ring->page_pool));
> +
> + xdp_init_buff(&xdp, PAGE_SIZE, &ring->xdp_q);
> + xdp_prepare_buff(&xdp, data, MTK_PP_HEADROOM, pktlen,
> + false);
> + xdp_buff_clear_frags_flag(&xdp);
> +
> + ret = mtk_xdp_run(ring, prog, &xdp, netdev);
> + if (ret != XDP_PASS) {
> + if (ret == XDP_REDIRECT)
> + xdp_do_redirect = true;
> + goto skip_rx;
> + }
> +
> + skb = build_skb(data, PAGE_SIZE);
> + if (unlikely(!skb)) {
> + page_pool_put_full_page(ring->page_pool,
> + page, true);
> + netdev->stats.rx_dropped++;
> + goto skip_rx;
> + }
> +
> + skb_reserve(skb, xdp.data - xdp.data_hard_start);
> + skb_put(skb, xdp.data_end - xdp.data);
> + skb_mark_for_recycle(skb);
> } else {
> if (ring->frag_size <= PAGE_SIZE)
> new_data = napi_alloc_frag(ring->frag_size);
> @@ -1571,27 +1638,20 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
>
> dma_unmap_single(eth->dma_dev, trxd.rxd1,
> ring->buf_size, DMA_FROM_DEVICE);
> - }
>
> - /* receive data */
> - skb = build_skb(data, ring->frag_size);
> - if (unlikely(!skb)) {
> - mtk_rx_put_buff(ring, data, true);
> - netdev->stats.rx_dropped++;
> - goto skip_rx;
> - }
> + skb = build_skb(data, ring->frag_size);
> + if (unlikely(!skb)) {
> + netdev->stats.rx_dropped++;
> + skb_free_frag(data);
> + goto skip_rx;
> + }
>
> - if (ring->page_pool) {
> - reserve_len = MTK_PP_HEADROOM;
> - skb_mark_for_recycle(skb);
> - } else {
> - reserve_len = NET_SKB_PAD + NET_IP_ALIGN;
> + skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
> + skb_put(skb, pktlen);
> }
> - skb_reserve(skb, reserve_len);
>
> - pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
> skb->dev = netdev;
> - skb_put(skb, pktlen);
> + bytes += skb->len;
>
> if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
> rxdcsum = &trxd.rxd3;
> @@ -1603,7 +1663,6 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
> else
> skb_checksum_none_assert(skb);
> skb->protocol = eth_type_trans(skb, netdev);
> - bytes += pktlen;
>
> hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
> if (hash != MTK_RXD4_FOE_ENTRY) {
> @@ -1666,6 +1725,9 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
> &dim_sample);
> net_dim(ð->rx_dim, dim_sample);
>
> + if (prog && xdp_do_redirect)
> + xdp_do_flush_map();
> +
> return done;
> }
>
> @@ -2750,6 +2812,48 @@ static int mtk_stop(struct net_device *dev)
> return 0;
> }
>
> +static int mtk_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
> + struct netlink_ext_ack *extack)
> +{
> + struct mtk_mac *mac = netdev_priv(dev);
> + struct mtk_eth *eth = mac->hw;
> + struct bpf_prog *old_prog;
> + bool need_update;
> +
> + if (eth->hwlro) {
> + NL_SET_ERR_MSG_MOD(extack, "XDP not supported with HWLRO");
> + return -EOPNOTSUPP;
> + }
> +
> + if (dev->mtu > MTK_PP_MAX_BUF_SIZE) {
> + NL_SET_ERR_MSG_MOD(extack, "MTU too large for XDP");
> + return -EOPNOTSUPP;
> + }
> +
> + need_update = !!eth->prog != !!prog;
> + if (netif_running(dev) && need_update)
> + mtk_stop(dev);
> +
> + old_prog = xchg(ð->prog, prog);
> + if (old_prog)
> + bpf_prog_put(old_prog);
> +
> + if (netif_running(dev) && need_update)
> + return mtk_open(dev);
> +
> + return 0;
> +}
> +
> +static int mtk_xdp(struct net_device *dev, struct netdev_bpf *xdp)
> +{
> + switch (xdp->command) {
> + case XDP_SETUP_PROG:
> + return mtk_xdp_setup(dev, xdp->prog, xdp->extack);
> + default:
> + return -EINVAL;
> + }
> +}
> +
> static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
> {
> regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
> @@ -3045,6 +3149,11 @@ static int mtk_change_mtu(struct net_device *dev, int new_mtu)
> struct mtk_eth *eth = mac->hw;
> u32 mcr_cur, mcr_new;
>
> + if (eth->prog && length > MTK_PP_MAX_BUF_SIZE) {
> + netdev_err(dev, "Invalid MTU for XDP mode\n");
> + return -EINVAL;
> + }
> +
> if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
> mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
> mcr_new = mcr_cur & ~MAC_MCR_MAX_RX_MASK;
> @@ -3372,6 +3481,7 @@ static const struct net_device_ops mtk_netdev_ops = {
> .ndo_poll_controller = mtk_poll_controller,
> #endif
> .ndo_setup_tc = mtk_eth_setup_tc,
> + .ndo_bpf = mtk_xdp,
> };
>
> static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
> diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
> index 26c019319055..a1cea93300c1 100644
> --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
> +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
> @@ -1088,6 +1088,8 @@ struct mtk_eth {
>
> struct mtk_ppe *ppe;
> struct rhashtable flow_table;
> +
> + struct bpf_prog *prog;
The XDP program is apparently under an RCU protection schema (otherwise
you will get UaF when replacing it). Why don't you have explicit RCU
annotations? (here, and where the 'prog' field is touched).
Thanks!
Paolo
^ permalink raw reply [flat|nested] 9+ messages in thread* Re: [PATCH net-next 2/4] net: ethernet: mtk_eth_soc: add basic XDP support
2022-07-12 10:12 ` Paolo Abeni
@ 2022-07-12 16:15 ` Lorenzo Bianconi
0 siblings, 0 replies; 9+ messages in thread
From: Lorenzo Bianconi @ 2022-07-12 16:15 UTC (permalink / raw)
To: Paolo Abeni
Cc: netdev, nbd, john, sean.wang, Mark-MC.Lee, davem, edumazet, kuba,
matthias.bgg, linux-mediatek, ilias.apalodimas, lorenzo.bianconi,
jbrouer
[-- Attachment #1: Type: text/plain, Size: 369 bytes --]
> On Sat, 2022-07-09 at 17:48 +0200, Lorenzo Bianconi wrote:
[...]
>
> The XDP program is apparently under an RCU protection schema (otherwise
> you will get UaF when replacing it). Why don't you have explicit RCU
> annotations? (here, and where the 'prog' field is touched).
ack, I will fix it in v2.
Regards,
Lorenzo
>
> Thanks!
>
> Paolo
>
[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 228 bytes --]
^ permalink raw reply [flat|nested] 9+ messages in thread
* [PATCH net-next 3/4] net: ethernet: mtk_eth_soc: introduce xdp ethtool counters
2022-07-09 15:48 [PATCH net-next 0/4] mtk_eth_soc: add xdp support Lorenzo Bianconi
2022-07-09 15:48 ` [PATCH net-next 1/4] net: ethernet: mtk_eth_soc: rely on page_pool for single page buffers Lorenzo Bianconi
2022-07-09 15:48 ` [PATCH net-next 2/4] net: ethernet: mtk_eth_soc: add basic XDP support Lorenzo Bianconi
@ 2022-07-09 15:48 ` Lorenzo Bianconi
2022-07-12 10:08 ` Paolo Abeni
2022-07-09 15:48 ` [PATCH net-next 4/4] net: ethernet: mtk_eth_soc: add xmit XDP support Lorenzo Bianconi
3 siblings, 1 reply; 9+ messages in thread
From: Lorenzo Bianconi @ 2022-07-09 15:48 UTC (permalink / raw)
To: netdev
Cc: nbd, john, sean.wang, Mark-MC.Lee, davem, edumazet, kuba, pabeni,
matthias.bgg, linux-mediatek, ilias.apalodimas, lorenzo.bianconi,
jbrouer
Report xdp stats through ethtool
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
---
drivers/net/ethernet/mediatek/mtk_eth_soc.c | 54 +++++++++++++++++----
drivers/net/ethernet/mediatek/mtk_eth_soc.h | 12 +++++
2 files changed, 57 insertions(+), 9 deletions(-)
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 3b583abb599d..ae7ba2e09df8 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -34,6 +34,10 @@ MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
#define MTK_ETHTOOL_STAT(x) { #x, \
offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
+#define MTK_ETHTOOL_XDP_STAT(x) { #x, \
+ offsetof(struct mtk_hw_stats, xdp_stats.x) / \
+ sizeof(u64) }
+
static const struct mtk_reg_map mtk_reg_map = {
.tx_irq_mask = 0x1a1c,
.tx_irq_status = 0x1a18,
@@ -141,6 +145,13 @@ static const struct mtk_ethtool_stats {
MTK_ETHTOOL_STAT(rx_long_errors),
MTK_ETHTOOL_STAT(rx_checksum_errors),
MTK_ETHTOOL_STAT(rx_flow_control_packets),
+ MTK_ETHTOOL_XDP_STAT(rx_xdp_redirect),
+ MTK_ETHTOOL_XDP_STAT(rx_xdp_pass),
+ MTK_ETHTOOL_XDP_STAT(rx_xdp_drop),
+ MTK_ETHTOOL_XDP_STAT(rx_xdp_tx),
+ MTK_ETHTOOL_XDP_STAT(rx_xdp_tx_errors),
+ MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit),
+ MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit_errors),
};
static const char * const mtk_clks_source_name[] = {
@@ -1495,7 +1506,8 @@ static void mtk_rx_put_buff(struct mtk_rx_ring *ring, void *data, bool napi)
}
static u32 mtk_xdp_run(struct mtk_rx_ring *ring, struct bpf_prog *prog,
- struct xdp_buff *xdp, struct net_device *dev)
+ struct xdp_buff *xdp, struct net_device *dev,
+ struct mtk_xdp_stats *stats)
{
u32 act = XDP_PASS;
@@ -1505,10 +1517,13 @@ static u32 mtk_xdp_run(struct mtk_rx_ring *ring, struct bpf_prog *prog,
act = bpf_prog_run_xdp(prog, xdp);
switch (act) {
case XDP_PASS:
+ stats->rx_xdp_pass++;
return XDP_PASS;
case XDP_REDIRECT:
if (unlikely(xdp_do_redirect(dev, xdp, prog)))
break;
+
+ stats->rx_xdp_redirect++;
return XDP_REDIRECT;
default:
bpf_warn_invalid_xdp_action(dev, prog, act);
@@ -1520,14 +1535,38 @@ static u32 mtk_xdp_run(struct mtk_rx_ring *ring, struct bpf_prog *prog,
break;
}
+ stats->rx_xdp_drop++;
page_pool_put_full_page(ring->page_pool,
virt_to_head_page(xdp->data), true);
return XDP_DROP;
}
+static void mtk_xdp_rx_complete(struct mtk_eth *eth,
+ struct mtk_xdp_stats *stats)
+{
+ int i, xdp_do_redirect = 0;
+
+ /* update xdp ethtool stats */
+ for (i = 0; i < MTK_MAX_DEVS; i++) {
+ struct mtk_hw_stats *hw_stats = eth->mac[i]->hw_stats;
+ struct mtk_xdp_stats *xdp_stats = &hw_stats->xdp_stats;
+
+ u64_stats_update_begin(&hw_stats->syncp);
+ xdp_stats->rx_xdp_redirect += stats[i].rx_xdp_redirect;
+ xdp_do_redirect += stats[i].rx_xdp_pass;
+ xdp_stats->rx_xdp_pass += stats[i].rx_xdp_pass;
+ xdp_stats->rx_xdp_drop += stats[i].rx_xdp_drop;
+ u64_stats_update_end(&hw_stats->syncp);
+ }
+
+ if (xdp_do_redirect)
+ xdp_do_flush_map();
+}
+
static int mtk_poll_rx(struct napi_struct *napi, int budget,
struct mtk_eth *eth)
{
+ struct mtk_xdp_stats xdp_stats[MTK_MAX_DEVS] = {};
struct bpf_prog *prog = READ_ONCE(eth->prog);
struct dim_sample dim_sample = {};
struct mtk_rx_ring *ring;
@@ -1535,7 +1574,6 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
struct sk_buff *skb;
u8 *data, *new_data;
struct mtk_rx_dma_v2 *rxd, trxd;
- bool xdp_do_redirect = false;
int done = 0, bytes = 0;
while (done < budget) {
@@ -1597,12 +1635,10 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
false);
xdp_buff_clear_frags_flag(&xdp);
- ret = mtk_xdp_run(ring, prog, &xdp, netdev);
- if (ret != XDP_PASS) {
- if (ret == XDP_REDIRECT)
- xdp_do_redirect = true;
+ ret = mtk_xdp_run(ring, prog, &xdp, netdev,
+ &xdp_stats[mac]);
+ if (ret != XDP_PASS)
goto skip_rx;
- }
skb = build_skb(data, PAGE_SIZE);
if (unlikely(!skb)) {
@@ -1725,8 +1761,8 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
&dim_sample);
net_dim(ð->rx_dim, dim_sample);
- if (prog && xdp_do_redirect)
- xdp_do_flush_map();
+ if (prog)
+ mtk_xdp_rx_complete(eth, xdp_stats);
return done;
}
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index a1cea93300c1..629cdcdd632a 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -570,6 +570,16 @@ struct mtk_tx_dma_v2 {
struct mtk_eth;
struct mtk_mac;
+struct mtk_xdp_stats {
+ u64 rx_xdp_redirect;
+ u64 rx_xdp_pass;
+ u64 rx_xdp_drop;
+ u64 rx_xdp_tx;
+ u64 rx_xdp_tx_errors;
+ u64 tx_xdp_xmit;
+ u64 tx_xdp_xmit_errors;
+};
+
/* struct mtk_hw_stats - the structure that holds the traffic statistics.
* @stats_lock: make sure that stats operations are atomic
* @reg_offset: the status register offset of the SoC
@@ -593,6 +603,8 @@ struct mtk_hw_stats {
u64 rx_checksum_errors;
u64 rx_flow_control_packets;
+ struct mtk_xdp_stats xdp_stats;
+
spinlock_t stats_lock;
u32 reg_offset;
struct u64_stats_sync syncp;
--
2.36.1
^ permalink raw reply related [flat|nested] 9+ messages in thread* Re: [PATCH net-next 3/4] net: ethernet: mtk_eth_soc: introduce xdp ethtool counters
2022-07-09 15:48 ` [PATCH net-next 3/4] net: ethernet: mtk_eth_soc: introduce xdp ethtool counters Lorenzo Bianconi
@ 2022-07-12 10:08 ` Paolo Abeni
2022-07-12 16:17 ` Lorenzo Bianconi
0 siblings, 1 reply; 9+ messages in thread
From: Paolo Abeni @ 2022-07-12 10:08 UTC (permalink / raw)
To: Lorenzo Bianconi, netdev
Cc: nbd, john, sean.wang, Mark-MC.Lee, davem, edumazet, kuba,
matthias.bgg, linux-mediatek, ilias.apalodimas, lorenzo.bianconi,
jbrouer
On Sat, 2022-07-09 at 17:48 +0200, Lorenzo Bianconi wrote:
> Report xdp stats through ethtool
>
> Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
> ---
> drivers/net/ethernet/mediatek/mtk_eth_soc.c | 54 +++++++++++++++++----
> drivers/net/ethernet/mediatek/mtk_eth_soc.h | 12 +++++
> 2 files changed, 57 insertions(+), 9 deletions(-)
>
> diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
> index 3b583abb599d..ae7ba2e09df8 100644
> --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
> +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
> @@ -34,6 +34,10 @@ MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
> #define MTK_ETHTOOL_STAT(x) { #x, \
> offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
>
> +#define MTK_ETHTOOL_XDP_STAT(x) { #x, \
> + offsetof(struct mtk_hw_stats, xdp_stats.x) / \
> + sizeof(u64) }
> +
> static const struct mtk_reg_map mtk_reg_map = {
> .tx_irq_mask = 0x1a1c,
> .tx_irq_status = 0x1a18,
> @@ -141,6 +145,13 @@ static const struct mtk_ethtool_stats {
> MTK_ETHTOOL_STAT(rx_long_errors),
> MTK_ETHTOOL_STAT(rx_checksum_errors),
> MTK_ETHTOOL_STAT(rx_flow_control_packets),
> + MTK_ETHTOOL_XDP_STAT(rx_xdp_redirect),
> + MTK_ETHTOOL_XDP_STAT(rx_xdp_pass),
> + MTK_ETHTOOL_XDP_STAT(rx_xdp_drop),
> + MTK_ETHTOOL_XDP_STAT(rx_xdp_tx),
> + MTK_ETHTOOL_XDP_STAT(rx_xdp_tx_errors),
> + MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit),
> + MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit_errors),
> };
>
> static const char * const mtk_clks_source_name[] = {
> @@ -1495,7 +1506,8 @@ static void mtk_rx_put_buff(struct mtk_rx_ring *ring, void *data, bool napi)
> }
>
> static u32 mtk_xdp_run(struct mtk_rx_ring *ring, struct bpf_prog *prog,
> - struct xdp_buff *xdp, struct net_device *dev)
> + struct xdp_buff *xdp, struct net_device *dev,
> + struct mtk_xdp_stats *stats)
> {
> u32 act = XDP_PASS;
>
> @@ -1505,10 +1517,13 @@ static u32 mtk_xdp_run(struct mtk_rx_ring *ring, struct bpf_prog *prog,
> act = bpf_prog_run_xdp(prog, xdp);
> switch (act) {
> case XDP_PASS:
> + stats->rx_xdp_pass++;
> return XDP_PASS;
> case XDP_REDIRECT:
> if (unlikely(xdp_do_redirect(dev, xdp, prog)))
> break;
> +
> + stats->rx_xdp_redirect++;
> return XDP_REDIRECT;
> default:
> bpf_warn_invalid_xdp_action(dev, prog, act);
> @@ -1520,14 +1535,38 @@ static u32 mtk_xdp_run(struct mtk_rx_ring *ring, struct bpf_prog *prog,
> break;
> }
>
> + stats->rx_xdp_drop++;
> page_pool_put_full_page(ring->page_pool,
> virt_to_head_page(xdp->data), true);
> return XDP_DROP;
> }
>
> +static void mtk_xdp_rx_complete(struct mtk_eth *eth,
> + struct mtk_xdp_stats *stats)
> +{
> + int i, xdp_do_redirect = 0;
> +
> + /* update xdp ethtool stats */
> + for (i = 0; i < MTK_MAX_DEVS; i++) {
> + struct mtk_hw_stats *hw_stats = eth->mac[i]->hw_stats;
> + struct mtk_xdp_stats *xdp_stats = &hw_stats->xdp_stats;
> +
> + u64_stats_update_begin(&hw_stats->syncp);
> + xdp_stats->rx_xdp_redirect += stats[i].rx_xdp_redirect;
> + xdp_do_redirect += stats[i].rx_xdp_pass;
> + xdp_stats->rx_xdp_pass += stats[i].rx_xdp_pass;
> + xdp_stats->rx_xdp_drop += stats[i].rx_xdp_drop;
> + u64_stats_update_end(&hw_stats->syncp);
> + }
> +
> + if (xdp_do_redirect)
> + xdp_do_flush_map();
> +}
> +
> static int mtk_poll_rx(struct napi_struct *napi, int budget,
> struct mtk_eth *eth)
> {
> + struct mtk_xdp_stats xdp_stats[MTK_MAX_DEVS] = {};
This is allocating on the stack and clearing a relatively large struct
for every poll() call, which is not good.
Why can't you touch directly the eth->mac[i]->hw_stats.xdp_stats
counters where needed?
> struct bpf_prog *prog = READ_ONCE(eth->prog);
> struct dim_sample dim_sample = {};
> struct mtk_rx_ring *ring;
> @@ -1535,7 +1574,6 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
> struct sk_buff *skb;
> u8 *data, *new_data;
> struct mtk_rx_dma_v2 *rxd, trxd;
> - bool xdp_do_redirect = false;
> int done = 0, bytes = 0;
>
> while (done < budget) {
> @@ -1597,12 +1635,10 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
> false);
> xdp_buff_clear_frags_flag(&xdp);
>
> - ret = mtk_xdp_run(ring, prog, &xdp, netdev);
> - if (ret != XDP_PASS) {
> - if (ret == XDP_REDIRECT)
> - xdp_do_redirect = true;
> + ret = mtk_xdp_run(ring, prog, &xdp, netdev,
> + &xdp_stats[mac]);
> + if (ret != XDP_PASS)
> goto skip_rx;
> - }
>
> skb = build_skb(data, PAGE_SIZE);
> if (unlikely(!skb)) {
> @@ -1725,8 +1761,8 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
> &dim_sample);
> net_dim(ð->rx_dim, dim_sample);
>
> - if (prog && xdp_do_redirect)
> - xdp_do_flush_map();
> + if (prog)
> + mtk_xdp_rx_complete(eth, xdp_stats);
>
> return done;
> }
> diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
> index a1cea93300c1..629cdcdd632a 100644
> --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
> +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
> @@ -570,6 +570,16 @@ struct mtk_tx_dma_v2 {
> struct mtk_eth;
> struct mtk_mac;
>
> +struct mtk_xdp_stats {
> + u64 rx_xdp_redirect;
> + u64 rx_xdp_pass;
> + u64 rx_xdp_drop;
> + u64 rx_xdp_tx;
> + u64 rx_xdp_tx_errors;
> + u64 tx_xdp_xmit;
> + u64 tx_xdp_xmit_errors;
> +};
> +
> /* struct mtk_hw_stats - the structure that holds the traffic statistics.
> * @stats_lock: make sure that stats operations are atomic
> * @reg_offset: the status register offset of the SoC
> @@ -593,6 +603,8 @@ struct mtk_hw_stats {
> u64 rx_checksum_errors;
> u64 rx_flow_control_packets;
>
> + struct mtk_xdp_stats xdp_stats;
> +
> spinlock_t stats_lock;
> u32 reg_offset;
> struct u64_stats_sync syncp;
^ permalink raw reply [flat|nested] 9+ messages in thread* Re: [PATCH net-next 3/4] net: ethernet: mtk_eth_soc: introduce xdp ethtool counters
2022-07-12 10:08 ` Paolo Abeni
@ 2022-07-12 16:17 ` Lorenzo Bianconi
0 siblings, 0 replies; 9+ messages in thread
From: Lorenzo Bianconi @ 2022-07-12 16:17 UTC (permalink / raw)
To: Paolo Abeni
Cc: netdev, nbd, john, sean.wang, Mark-MC.Lee, davem, edumazet, kuba,
matthias.bgg, linux-mediatek, ilias.apalodimas, lorenzo.bianconi,
jbrouer
[-- Attachment #1: Type: text/plain, Size: 2740 bytes --]
[...]
>
> This is allocating on the stack and clearing a relatively large struct
> for every poll() call, which is not good.
>
> Why can't you touch directly the eth->mac[i]->hw_stats.xdp_stats
> counters where needed?
I am currently relying on xdp_stats to flush xdp maps but I can rework a bit
the code to remove this dependency. I will fix it in v2.
Regards,
Lorenzo
>
> > struct bpf_prog *prog = READ_ONCE(eth->prog);
> > struct dim_sample dim_sample = {};
> > struct mtk_rx_ring *ring;
> > @@ -1535,7 +1574,6 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
> > struct sk_buff *skb;
> > u8 *data, *new_data;
> > struct mtk_rx_dma_v2 *rxd, trxd;
> > - bool xdp_do_redirect = false;
> > int done = 0, bytes = 0;
> >
> > while (done < budget) {
> > @@ -1597,12 +1635,10 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
> > false);
> > xdp_buff_clear_frags_flag(&xdp);
> >
> > - ret = mtk_xdp_run(ring, prog, &xdp, netdev);
> > - if (ret != XDP_PASS) {
> > - if (ret == XDP_REDIRECT)
> > - xdp_do_redirect = true;
> > + ret = mtk_xdp_run(ring, prog, &xdp, netdev,
> > + &xdp_stats[mac]);
> > + if (ret != XDP_PASS)
> > goto skip_rx;
> > - }
> >
> > skb = build_skb(data, PAGE_SIZE);
> > if (unlikely(!skb)) {
> > @@ -1725,8 +1761,8 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
> > &dim_sample);
> > net_dim(ð->rx_dim, dim_sample);
> >
> > - if (prog && xdp_do_redirect)
> > - xdp_do_flush_map();
> > + if (prog)
> > + mtk_xdp_rx_complete(eth, xdp_stats);
> >
> > return done;
> > }
> > diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
> > index a1cea93300c1..629cdcdd632a 100644
> > --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
> > +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
> > @@ -570,6 +570,16 @@ struct mtk_tx_dma_v2 {
> > struct mtk_eth;
> > struct mtk_mac;
> >
> > +struct mtk_xdp_stats {
> > + u64 rx_xdp_redirect;
> > + u64 rx_xdp_pass;
> > + u64 rx_xdp_drop;
> > + u64 rx_xdp_tx;
> > + u64 rx_xdp_tx_errors;
> > + u64 tx_xdp_xmit;
> > + u64 tx_xdp_xmit_errors;
> > +};
> > +
> > /* struct mtk_hw_stats - the structure that holds the traffic statistics.
> > * @stats_lock: make sure that stats operations are atomic
> > * @reg_offset: the status register offset of the SoC
> > @@ -593,6 +603,8 @@ struct mtk_hw_stats {
> > u64 rx_checksum_errors;
> > u64 rx_flow_control_packets;
> >
> > + struct mtk_xdp_stats xdp_stats;
> > +
> > spinlock_t stats_lock;
> > u32 reg_offset;
> > struct u64_stats_sync syncp;
>
[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 228 bytes --]
^ permalink raw reply [flat|nested] 9+ messages in thread
* [PATCH net-next 4/4] net: ethernet: mtk_eth_soc: add xmit XDP support
2022-07-09 15:48 [PATCH net-next 0/4] mtk_eth_soc: add xdp support Lorenzo Bianconi
` (2 preceding siblings ...)
2022-07-09 15:48 ` [PATCH net-next 3/4] net: ethernet: mtk_eth_soc: introduce xdp ethtool counters Lorenzo Bianconi
@ 2022-07-09 15:48 ` Lorenzo Bianconi
3 siblings, 0 replies; 9+ messages in thread
From: Lorenzo Bianconi @ 2022-07-09 15:48 UTC (permalink / raw)
To: netdev
Cc: nbd, john, sean.wang, Mark-MC.Lee, davem, edumazet, kuba, pabeni,
matthias.bgg, linux-mediatek, ilias.apalodimas, lorenzo.bianconi,
jbrouer
Introduce XDP support for XDP_TX verdict and ndo_xdp_xmit function
pointer.
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
---
drivers/net/ethernet/mediatek/mtk_eth_soc.c | 211 +++++++++++++++++---
drivers/net/ethernet/mediatek/mtk_eth_soc.h | 10 +-
2 files changed, 195 insertions(+), 26 deletions(-)
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index ae7ba2e09df8..f8dcb2c63c8c 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -1031,15 +1031,26 @@ static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
}
}
- tx_buf->flags = 0;
- if (tx_buf->skb &&
- (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC)) {
- if (napi)
- napi_consume_skb(tx_buf->skb, napi);
+ if (tx_buf->type == MTK_TYPE_SKB) {
+ if (tx_buf->data &&
+ tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
+ struct sk_buff *skb = tx_buf->data;
+
+ if (napi)
+ napi_consume_skb(skb, napi);
+ else
+ dev_kfree_skb_any(skb);
+ }
+ } else if (tx_buf->data) {
+ struct xdp_frame *xdpf = tx_buf->data;
+
+ if (napi && tx_buf->type == MTK_TYPE_XDP_TX)
+ xdp_return_frame_rx_napi(xdpf);
else
- dev_kfree_skb_any(tx_buf->skb);
+ xdp_return_frame(xdpf);
}
- tx_buf->skb = NULL;
+ tx_buf->flags = 0;
+ tx_buf->data = NULL;
}
static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
@@ -1056,7 +1067,7 @@ static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
dma_unmap_len_set(tx_buf, dma_len1, size);
} else {
- tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
+ tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
txd->txd1 = mapped_addr;
txd->txd2 = TX_DMA_PLEN0(size);
dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
@@ -1232,7 +1243,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
soc->txrx.txd_size);
if (new_desc)
memset(tx_buf, 0, sizeof(*tx_buf));
- tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
+ tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
MTK_TX_FLAGS_FPORT1;
@@ -1246,7 +1257,8 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
}
/* store skb to cleanup */
- itx_buf->skb = skb;
+ itx_buf->type = MTK_TYPE_SKB;
+ itx_buf->data = skb;
if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
if (k & 0x1)
@@ -1447,13 +1459,14 @@ static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth,
struct xdp_rxq_info *xdp_q,
int id, int size)
{
+ struct bpf_prog *prog = READ_ONCE(eth->prog);
struct page_pool_params pp_params = {
.order = 0,
.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
.pool_size = size,
.nid = NUMA_NO_NODE,
.dev = eth->dma_dev,
- .dma_dir = DMA_FROM_DEVICE,
+ .dma_dir = prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
.offset = MTK_PP_HEADROOM,
.max_len = MTK_PP_MAX_BUF_SIZE,
};
@@ -1505,9 +1518,141 @@ static void mtk_rx_put_buff(struct mtk_rx_ring *ring, void *data, bool napi)
skb_free_frag(data);
}
-static u32 mtk_xdp_run(struct mtk_rx_ring *ring, struct bpf_prog *prog,
- struct xdp_buff *xdp, struct net_device *dev,
- struct mtk_xdp_stats *stats)
+static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
+ struct net_device *dev, bool dma_map)
+{
+ const struct mtk_soc_data *soc = eth->soc;
+ struct mtk_tx_ring *ring = ð->tx_ring;
+ struct mtk_tx_dma_desc_info txd_info = {
+ .size = xdpf->len,
+ .first = true,
+ .last = true,
+ };
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_tx_dma *txd, *txd_pdma;
+ int err = 0, index = 0, n_desc = 1;
+ struct mtk_tx_buf *tx_buf;
+
+ if (unlikely(test_bit(MTK_RESETTING, ð->state)))
+ return -EBUSY;
+
+ if (unlikely(atomic_read(&ring->free_count) <= 1))
+ return -EBUSY;
+
+ spin_lock(ð->page_lock);
+
+ txd = ring->next_free;
+ if (txd == ring->last_free) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
+ memset(tx_buf, 0, sizeof(*tx_buf));
+
+ if (dma_map) { /* ndo_xdp_xmit */
+ txd_info.addr = dma_map_single(eth->dma_dev, xdpf->data,
+ txd_info.size, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr))) {
+ err = -ENOMEM;
+ goto out;
+ }
+ tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
+ } else {
+ struct page *page = virt_to_head_page(xdpf->data);
+
+ txd_info.addr = page_pool_get_dma_addr(page) +
+ sizeof(*xdpf) + xdpf->headroom;
+ dma_sync_single_for_device(eth->dma_dev, txd_info.addr,
+ txd_info.size,
+ DMA_BIDIRECTIONAL);
+ }
+ mtk_tx_set_dma_desc(dev, txd, &txd_info);
+
+ tx_buf->flags |= !mac->id ? MTK_TX_FLAGS_FPORT0 : MTK_TX_FLAGS_FPORT1;
+
+ txd_pdma = qdma_to_pdma(ring, txd);
+ setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr, txd_info.size,
+ index++);
+
+ /* store xdpf for cleanup */
+ tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX;
+ tx_buf->data = xdpf;
+
+ if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
+ if (index & 1)
+ txd_pdma->txd2 |= TX_DMA_LS0;
+ else
+ txd_pdma->txd2 |= TX_DMA_LS1;
+ }
+
+ ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
+ atomic_sub(n_desc, &ring->free_count);
+
+ /* make sure that all changes to the dma ring are flushed before we
+ * continue
+ */
+ wmb();
+
+ if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
+ mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
+ } else {
+ int idx;
+
+ idx = txd_to_idx(ring, txd, soc->txrx.txd_size);
+ mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size),
+ MT7628_TX_CTX_IDX0);
+ }
+out:
+ spin_unlock(ð->page_lock);
+
+ return err;
+}
+
+static u32 mtk_xdp_tx(struct mtk_eth *eth, struct net_device *dev,
+ struct xdp_buff *xdp, struct mtk_xdp_stats *stats)
+{
+ struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
+ int err;
+
+ err = mtk_xdp_submit_frame(eth, xdpf, dev, false);
+ if (err) {
+ stats->rx_xdp_tx_errors++;
+ return XDP_DROP;
+ }
+
+ stats->rx_xdp_tx++;
+ return XDP_TX;
+}
+
+static int mtk_xdp_xmit(struct net_device *dev, int num_frame,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_hw_stats *hw_stats = mac->hw_stats;
+ struct mtk_eth *eth = mac->hw;
+ int i, nxmit = 0;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ for (i = 0; i < num_frame; i++) {
+ if (mtk_xdp_submit_frame(eth, frames[i], dev, true))
+ break;
+ nxmit++;
+ }
+
+ u64_stats_update_begin(&hw_stats->syncp);
+ hw_stats->xdp_stats.tx_xdp_xmit += nxmit;
+ hw_stats->xdp_stats.tx_xdp_xmit_errors += num_frame - nxmit;
+ u64_stats_update_end(&hw_stats->syncp);
+
+ return nxmit;
+}
+
+static u32 mtk_xdp_run(struct mtk_eth *eth, struct mtk_rx_ring *ring,
+ struct bpf_prog *prog, struct xdp_buff *xdp,
+ struct net_device *dev, struct mtk_xdp_stats *stats)
{
u32 act = XDP_PASS;
@@ -1525,6 +1670,10 @@ static u32 mtk_xdp_run(struct mtk_rx_ring *ring, struct bpf_prog *prog,
stats->rx_xdp_redirect++;
return XDP_REDIRECT;
+ case XDP_TX:
+ if (mtk_xdp_tx(eth, dev, xdp, stats) == XDP_TX)
+ return XDP_TX;
+ goto out;
default:
bpf_warn_invalid_xdp_action(dev, prog, act);
fallthrough;
@@ -1536,6 +1685,7 @@ static u32 mtk_xdp_run(struct mtk_rx_ring *ring, struct bpf_prog *prog,
}
stats->rx_xdp_drop++;
+out:
page_pool_put_full_page(ring->page_pool,
virt_to_head_page(xdp->data), true);
return XDP_DROP;
@@ -1556,6 +1706,8 @@ static void mtk_xdp_rx_complete(struct mtk_eth *eth,
xdp_do_redirect += stats[i].rx_xdp_pass;
xdp_stats->rx_xdp_pass += stats[i].rx_xdp_pass;
xdp_stats->rx_xdp_drop += stats[i].rx_xdp_drop;
+ xdp_stats->rx_xdp_tx += stats[i].rx_xdp_tx;
+ xdp_stats->rx_xdp_tx_errors += stats[i].rx_xdp_tx_errors;
u64_stats_update_end(&hw_stats->syncp);
}
@@ -1635,7 +1787,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
false);
xdp_buff_clear_frags_flag(&xdp);
- ret = mtk_xdp_run(ring, prog, &xdp, netdev,
+ ret = mtk_xdp_run(eth, ring, prog, &xdp, netdev,
&xdp_stats[mac]);
if (ret != XDP_PASS)
goto skip_rx;
@@ -1772,9 +1924,8 @@ static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
{
const struct mtk_reg_map *reg_map = eth->soc->reg_map;
struct mtk_tx_ring *ring = ð->tx_ring;
- struct mtk_tx_dma *desc;
- struct sk_buff *skb;
struct mtk_tx_buf *tx_buf;
+ struct mtk_tx_dma *desc;
u32 cpu, dma;
cpu = ring->last_free_ptr;
@@ -1795,15 +1946,21 @@ static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
mac = 1;
- skb = tx_buf->skb;
- if (!skb)
+ if (!tx_buf->data)
break;
- if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
+ if (tx_buf->type == MTK_TYPE_SKB &&
+ tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
+ struct sk_buff *skb = tx_buf->data;
+
bytes[mac] += skb->len;
done[mac]++;
budget--;
+ } else if (tx_buf->type == MTK_TYPE_XDP_TX ||
+ tx_buf->type == MTK_TYPE_XDP_NDO) {
+ budget--;
}
+
mtk_tx_unmap(eth, tx_buf, true);
ring->last_free = desc;
@@ -1822,9 +1979,8 @@ static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
unsigned int *done, unsigned int *bytes)
{
struct mtk_tx_ring *ring = ð->tx_ring;
- struct mtk_tx_dma *desc;
- struct sk_buff *skb;
struct mtk_tx_buf *tx_buf;
+ struct mtk_tx_dma *desc;
u32 cpu, dma;
cpu = ring->cpu_idx;
@@ -1832,14 +1988,18 @@ static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
while ((cpu != dma) && budget) {
tx_buf = &ring->buf[cpu];
- skb = tx_buf->skb;
- if (!skb)
+ if (!tx_buf->data)
break;
- if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
+ if (tx_buf->type == MTK_TYPE_SKB &&
+ tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
+ struct sk_buff *skb = tx_buf->data;
bytes[0] += skb->len;
done[0]++;
budget--;
+ } else if (tx_buf->type == MTK_TYPE_XDP_TX ||
+ tx_buf->type == MTK_TYPE_XDP_NDO) {
+ budget--;
}
mtk_tx_unmap(eth, tx_buf, true);
@@ -3518,6 +3678,7 @@ static const struct net_device_ops mtk_netdev_ops = {
#endif
.ndo_setup_tc = mtk_eth_setup_tc,
.ndo_bpf = mtk_xdp,
+ .ndo_xdp_xmit = mtk_xdp_xmit,
};
static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 629cdcdd632a..b60301ebe952 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -696,6 +696,12 @@ enum mtk_dev_state {
MTK_RESETTING
};
+enum mtk_tx_buf_type {
+ MTK_TYPE_SKB,
+ MTK_TYPE_XDP_TX,
+ MTK_TYPE_XDP_NDO,
+};
+
/* struct mtk_tx_buf - This struct holds the pointers to the memory pointed at
* by the TX descriptor s
* @skb: The SKB pointer of the packet being sent
@@ -705,7 +711,9 @@ enum mtk_dev_state {
* @dma_len1: The length of the second segment
*/
struct mtk_tx_buf {
- struct sk_buff *skb;
+ enum mtk_tx_buf_type type;
+ void *data;
+
u32 flags;
DEFINE_DMA_UNMAP_ADDR(dma_addr0);
DEFINE_DMA_UNMAP_LEN(dma_len0);
--
2.36.1
^ permalink raw reply related [flat|nested] 9+ messages in thread