* [RFC 1/2] net: xdp: introduce bulking for xdp tx return path
2020-10-20 9:33 [RFC 0/2] xdp: introduce bulking for page_pool tx return path Lorenzo Bianconi
@ 2020-10-20 9:33 ` Lorenzo Bianconi
2020-10-20 9:46 ` Jesper Dangaard Brouer
2020-10-20 9:33 ` [RFC 2/2] net: page_pool: add bulk support for ptr_ring Lorenzo Bianconi
1 sibling, 1 reply; 5+ messages in thread
From: Lorenzo Bianconi @ 2020-10-20 9:33 UTC (permalink / raw)
To: netdev; +Cc: bpf, davem, kuba, lorenzo.bianconi, brouer, ilias.apalodimas
Introduce bulking capability in xdp tx return path (XDP_TX and
XDP_REDIRECT). xdp_return_frame and xdp_return_frame_napi are usually
run inside the driver NAPI tx completion loop so it is possible batch
them.
Current implementation considers only page_pool memory model.
Convert mvneta driver to xdp_return_frame_bulk APIs.
Suggested-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
---
drivers/net/ethernet/marvell/mvneta.c | 8 ++---
include/net/xdp.h | 11 ++++++
net/core/xdp.c | 50 +++++++++++++++++++++++++++
3 files changed, 65 insertions(+), 4 deletions(-)
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 54b0bf574c05..af33cc62ed4c 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -663,6 +663,8 @@ struct mvneta_tx_queue {
/* Affinity mask for CPUs*/
cpumask_t affinity_mask;
+
+ struct xdp_frame_bulk bq;
};
struct mvneta_rx_queue {
@@ -1854,12 +1856,10 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp,
dev_kfree_skb_any(buf->skb);
} else if (buf->type == MVNETA_TYPE_XDP_TX ||
buf->type == MVNETA_TYPE_XDP_NDO) {
- if (napi && buf->type == MVNETA_TYPE_XDP_TX)
- xdp_return_frame_rx_napi(buf->xdpf);
- else
- xdp_return_frame(buf->xdpf);
+ xdp_return_frame_bulk(buf->xdpf, &txq->bq, napi);
}
}
+ xdp_flush_frame_bulk(&txq->bq, napi);
netdev_tx_completed_queue(nq, pkts_compl, bytes_compl);
}
diff --git a/include/net/xdp.h b/include/net/xdp.h
index 3814fb631d52..4b79d50afe36 100644
--- a/include/net/xdp.h
+++ b/include/net/xdp.h
@@ -104,6 +104,12 @@ struct xdp_frame {
struct net_device *dev_rx; /* used by cpumap */
};
+#define XDP_BULK_QUEUE_SIZE 16
+struct xdp_frame_bulk {
+ void *q[XDP_BULK_QUEUE_SIZE];
+ int count;
+ void *xa;
+};
static inline struct skb_shared_info *
xdp_get_shared_info_from_frame(struct xdp_frame *frame)
@@ -194,6 +200,11 @@ struct xdp_frame *xdp_convert_buff_to_frame(struct xdp_buff *xdp)
void xdp_return_frame(struct xdp_frame *xdpf);
void xdp_return_frame_rx_napi(struct xdp_frame *xdpf);
void xdp_return_buff(struct xdp_buff *xdp);
+void xdp_flush_frame_bulk(struct xdp_frame_bulk *bq,
+ bool napi_direct);
+void xdp_return_frame_bulk(struct xdp_frame *xdpf,
+ struct xdp_frame_bulk *bq,
+ bool napi_direct);
/* When sending xdp_frame into the network stack, then there is no
* return point callback, which is needed to release e.g. DMA-mapping
diff --git a/net/core/xdp.c b/net/core/xdp.c
index 48aba933a5a8..b05467a916b4 100644
--- a/net/core/xdp.c
+++ b/net/core/xdp.c
@@ -380,6 +380,56 @@ void xdp_return_frame_rx_napi(struct xdp_frame *xdpf)
}
EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi);
+void xdp_flush_frame_bulk(struct xdp_frame_bulk *bq,
+ bool napi_direct)
+{
+ struct xdp_mem_allocator *xa = bq->xa;
+ int i;
+
+ for (i = 0; i < bq->count; i++) {
+ napi_direct &= !xdp_return_frame_no_direct();
+ page_pool_put_full_page(xa->page_pool,
+ virt_to_head_page(bq->q[i]),
+ napi_direct);
+ }
+ bq->count = 0;
+}
+EXPORT_SYMBOL_GPL(xdp_flush_frame_bulk);
+
+void xdp_return_frame_bulk(struct xdp_frame *xdpf,
+ struct xdp_frame_bulk *bq,
+ bool napi_direct)
+{
+ struct xdp_mem_info *mem = &xdpf->mem;
+ struct xdp_mem_allocator *xa, *nxa;
+
+ if (mem->type != MEM_TYPE_PAGE_POOL) {
+ __xdp_return(xdpf->data, &xdpf->mem, napi_direct);
+ return;
+ }
+
+ rcu_read_lock();
+
+ xa = bq->xa;
+ if (unlikely(!xa || mem->id != xa->mem.id)) {
+ nxa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
+ if (unlikely(!xa)) {
+ bq->xa = nxa;
+ xa = nxa;
+ }
+ }
+
+ if (mem->id != xa->mem.id || bq->count == XDP_BULK_QUEUE_SIZE)
+ xdp_flush_frame_bulk(bq, napi_direct);
+
+ bq->q[bq->count++] = xdpf->data;
+ if (mem->id != xa->mem.id)
+ bq->xa = nxa;
+
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(xdp_return_frame_bulk);
+
void xdp_return_buff(struct xdp_buff *xdp)
{
__xdp_return(xdp->data, &xdp->rxq->mem, true);
--
2.26.2
^ permalink raw reply related [flat|nested] 5+ messages in thread* [RFC 2/2] net: page_pool: add bulk support for ptr_ring
2020-10-20 9:33 [RFC 0/2] xdp: introduce bulking for page_pool tx return path Lorenzo Bianconi
2020-10-20 9:33 ` [RFC 1/2] net: xdp: introduce bulking for xdp " Lorenzo Bianconi
@ 2020-10-20 9:33 ` Lorenzo Bianconi
1 sibling, 0 replies; 5+ messages in thread
From: Lorenzo Bianconi @ 2020-10-20 9:33 UTC (permalink / raw)
To: netdev; +Cc: bpf, davem, kuba, lorenzo.bianconi, brouer, ilias.apalodimas
Introduce the capability to batch page_pool ptr_ring refill since it is
usually run inside the driver NAPI tx completion loop.
Suggested-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
---
include/net/page_pool.h | 21 +++++++++++++++++++++
net/core/page_pool.c | 37 +++++++++++++++++++++++++++++++++++++
net/core/xdp.c | 13 ++++++-------
3 files changed, 64 insertions(+), 7 deletions(-)
diff --git a/include/net/page_pool.h b/include/net/page_pool.h
index 81d7773f96cd..1330419efec7 100644
--- a/include/net/page_pool.h
+++ b/include/net/page_pool.h
@@ -169,6 +169,8 @@ static inline void page_pool_release_page(struct page_pool *pool,
void page_pool_put_page(struct page_pool *pool, struct page *page,
unsigned int dma_sync_size, bool allow_direct);
+void page_pool_put_page_bulk(struct page_pool *pool, void **data, int count,
+ bool allow_direct);
/* Same as above but will try to sync the entire area pool->max_len */
static inline void page_pool_put_full_page(struct page_pool *pool,
@@ -215,4 +217,23 @@ static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid)
if (unlikely(pool->p.nid != new_nid))
page_pool_update_nid(pool, new_nid);
}
+
+static inline void page_pool_ring_lock(struct page_pool *pool)
+ __acquires(&pool->ring.producer_lock)
+{
+ if (in_serving_softirq())
+ spin_lock(&pool->ring.producer_lock);
+ else
+ spin_lock_bh(&pool->ring.producer_lock);
+}
+
+static inline void page_pool_ring_unlock(struct page_pool *pool)
+ __releases(&pool->ring.producer_lock)
+{
+ if (in_serving_softirq())
+ spin_unlock(&pool->ring.producer_lock);
+ else
+ spin_unlock_bh(&pool->ring.producer_lock);
+}
+
#endif /* _NET_PAGE_POOL_H */
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index ef98372facf6..03c3a92c9179 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -11,6 +11,8 @@
#include <linux/device.h>
#include <net/page_pool.h>
+#include <net/xdp.h>
+
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
#include <linux/page-flags.h>
@@ -408,6 +410,41 @@ void page_pool_put_page(struct page_pool *pool, struct page *page,
}
EXPORT_SYMBOL(page_pool_put_page);
+void page_pool_put_page_bulk(struct page_pool *pool, void **data, int count,
+ bool allow_direct)
+{
+ struct page *page_ring[XDP_BULK_QUEUE_SIZE];
+ int i, len = 0;
+
+ for (i = 0; i < count; i++) {
+ struct page *page = virt_to_head_page(data[i]);
+
+ if (unlikely(page_ref_count(page) != 1 ||
+ !pool_page_reusable(pool, page))) {
+ page_pool_release_page(pool, page);
+ put_page(page);
+ continue;
+ }
+
+ if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
+ page_pool_dma_sync_for_device(pool, page, -1);
+
+ if (allow_direct && in_serving_softirq() &&
+ page_pool_recycle_in_cache(page, pool))
+ continue;
+
+ page_ring[len++] = page;
+ }
+
+ page_pool_ring_lock(pool);
+ for (i = 0; i < len; i++) {
+ if (__ptr_ring_produce(&pool->ring, page_ring[i]))
+ page_pool_return_page(pool, page_ring[i]);
+ }
+ page_pool_ring_unlock(pool);
+}
+EXPORT_SYMBOL(page_pool_put_page_bulk);
+
static void page_pool_empty_ring(struct page_pool *pool)
{
struct page *page;
diff --git a/net/core/xdp.c b/net/core/xdp.c
index b05467a916b4..7ebe159e3835 100644
--- a/net/core/xdp.c
+++ b/net/core/xdp.c
@@ -384,14 +384,13 @@ void xdp_flush_frame_bulk(struct xdp_frame_bulk *bq,
bool napi_direct)
{
struct xdp_mem_allocator *xa = bq->xa;
- int i;
- for (i = 0; i < bq->count; i++) {
- napi_direct &= !xdp_return_frame_no_direct();
- page_pool_put_full_page(xa->page_pool,
- virt_to_head_page(bq->q[i]),
- napi_direct);
- }
+ if (unlikely(!bq->count))
+ return;
+
+ napi_direct &= !xdp_return_frame_no_direct();
+ page_pool_put_page_bulk(xa->page_pool, bq->q, bq->count,
+ napi_direct);
bq->count = 0;
}
EXPORT_SYMBOL_GPL(xdp_flush_frame_bulk);
--
2.26.2
^ permalink raw reply related [flat|nested] 5+ messages in thread