From: Ciara Loftus <ciara.loftus@intel.com>
To: netdev@vger.kernel.org, bpf@vger.kernel.org
Cc: ast@kernel.org, daniel@iogearbox.net, davem@davemloft.net,
kuba@kernel.org, hawk@kernel.org, john.fastabend@gmail.com,
toke@redhat.com, bjorn@kernel.org, magnus.karlsson@intel.com,
jonathan.lemon@gmail.com, maciej.fijalkowski@intel.com,
Ciara Loftus <ciara.loftus@intel.com>
Subject: [RFC PATCH bpf-next 5/8] xsk: implement a batched version of xsk_rcv
Date: Tue, 16 Nov 2021 07:37:39 +0000 [thread overview]
Message-ID: <20211116073742.7941-6-ciara.loftus@intel.com> (raw)
In-Reply-To: <20211116073742.7941-1-ciara.loftus@intel.com>
Introduce a batched version of xsk_rcv called xsk_rcv_batch which takes
an array of xdp_buffs and pushes them to the Rx ring. Also introduce a
batched version of xsk_buff_dma_sync_for_cpu.
Signed-off-by: Ciara Loftus <ciara.loftus@intel.com>
---
include/net/xdp_sock_drv.h | 28 ++++++++++++++++++++++++++++
include/net/xsk_buff_pool.h | 22 ++++++++++++++++++++++
net/xdp/xsk.c | 29 +++++++++++++++++++++++++++++
net/xdp/xsk_queue.h | 31 +++++++++++++++++++++++++++++++
4 files changed, 110 insertions(+)
diff --git a/include/net/xdp_sock_drv.h b/include/net/xdp_sock_drv.h
index e923f5d1adb6..0b352d7a34af 100644
--- a/include/net/xdp_sock_drv.h
+++ b/include/net/xdp_sock_drv.h
@@ -23,6 +23,7 @@ void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool);
void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool);
bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool);
int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
+int xsk_rcv_batch(struct xdp_sock *xs, struct xdp_buff **bufs, int batch_size);
void xsk_flush(struct xdp_sock *xs);
static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool)
@@ -125,6 +126,22 @@ static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp, struct xsk_bu
xp_dma_sync_for_cpu(xskb);
}
+static inline void xsk_buff_dma_sync_for_cpu_batch(struct xdp_buff **bufs,
+ struct xsk_buff_pool *pool,
+ int batch_size)
+{
+ struct xdp_buff_xsk *xskb;
+ int i;
+
+ if (!pool->dma_need_sync)
+ return;
+
+ for (i = 0; i < batch_size; i++) {
+ xskb = container_of(*(bufs + i), struct xdp_buff_xsk, xdp);
+ xp_dma_sync_for_cpu(xskb);
+ }
+}
+
static inline void xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool *pool,
dma_addr_t dma,
size_t size)
@@ -191,6 +208,11 @@ static inline int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
return 0;
}
+static inline int xsk_rcv_batch(struct xdp_sock *xs, struct xdp_buff **bufs, int batch_size)
+{
+ return 0;
+}
+
static inline void xsk_flush(struct xdp_sock *xs)
{
}
@@ -274,6 +296,12 @@ static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp, struct xsk_bu
{
}
+static inline void xsk_buff_dma_sync_for_cpu_batch(struct xdp_buff **bufs,
+ struct xsk_buff_pool *pool,
+ int batch_size)
+{
+}
+
static inline void xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool *pool,
dma_addr_t dma,
size_t size)
diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h
index ddeefc4a1040..f6d76c7eaf6b 100644
--- a/include/net/xsk_buff_pool.h
+++ b/include/net/xsk_buff_pool.h
@@ -214,6 +214,28 @@ static inline void xp_release(struct xdp_buff_xsk *xskb)
xskb->pool->free_heads[xskb->pool->free_heads_cnt++] = xskb;
}
+/* Release a batch of xdp_buffs back to an xdp_buff_pool.
+ * The batch of buffs must all come from the same xdp_buff_pool. This way
+ * it is safe to push the batch to the top of the free_heads stack, because
+ * at least the same amount will have been popped from the stack earlier in
+ * the datapath.
+ */
+static inline void xp_release_batch(struct xdp_buff **bufs, int batch_size)
+{
+ struct xdp_buff_xsk *xskb = container_of(*bufs, struct xdp_buff_xsk, xdp);
+ struct xsk_buff_pool *pool = xskb->pool;
+ u32 tail = pool->free_heads_cnt;
+ u32 i;
+
+ if (pool->unaligned) {
+ for (i = 0; i < batch_size; i++) {
+ xskb = container_of(*(bufs + i), struct xdp_buff_xsk, xdp);
+ pool->free_heads[tail + i] = xskb;
+ }
+ pool->free_heads_cnt += batch_size;
+ }
+}
+
static inline u64 xp_get_handle(struct xdp_buff_xsk *xskb)
{
u64 offset = xskb->xdp.data - xskb->xdp.data_hard_start;
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index ce004f5fae64..22d00173a96f 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -151,6 +151,20 @@ static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
return 0;
}
+static int __xsk_rcv_zc_batch(struct xdp_sock *xs, struct xdp_buff **bufs, int batch_size)
+{
+ int err;
+
+ err = xskq_prod_reserve_desc_batch(xs->rx, bufs, batch_size);
+ if (err) {
+ xs->rx_queue_full++;
+ return -1;
+ }
+
+ xp_release_batch(bufs, batch_size);
+ return 0;
+}
+
static void xsk_copy_xdp(struct xdp_buff *to, struct xdp_buff *from, u32 len)
{
void *from_buf, *to_buf;
@@ -269,6 +283,21 @@ int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
}
EXPORT_SYMBOL(xsk_rcv);
+int xsk_rcv_batch(struct xdp_sock *xs, struct xdp_buff **bufs, int batch_size)
+{
+ int err;
+
+ err = xsk_rcv_check(xs, *bufs);
+ if (err)
+ return err;
+
+ if ((*bufs)->rxq->mem.type != MEM_TYPE_XSK_BUFF_POOL)
+ return -1;
+
+ return __xsk_rcv_zc_batch(xs, bufs, batch_size);
+}
+EXPORT_SYMBOL(xsk_rcv_batch);
+
int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
{
struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
index e9aa2c236356..3be9f4a01d77 100644
--- a/net/xdp/xsk_queue.h
+++ b/net/xdp/xsk_queue.h
@@ -338,6 +338,11 @@ static inline bool xskq_prod_is_full(struct xsk_queue *q)
return xskq_prod_nb_free(q, 1) ? false : true;
}
+static inline bool xskq_prod_is_full_n(struct xsk_queue *q, u32 n)
+{
+ return xskq_prod_nb_free(q, n) ? false : true;
+}
+
static inline void xskq_prod_cancel(struct xsk_queue *q)
{
q->cached_prod--;
@@ -399,6 +404,32 @@ static inline int xskq_prod_reserve_desc(struct xsk_queue *q,
return 0;
}
+static inline int xskq_prod_reserve_desc_batch(struct xsk_queue *q, struct xdp_buff **bufs,
+ int batch_size)
+{
+ struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
+ struct xdp_buff_xsk *xskb;
+ u64 addr;
+ u32 len;
+ u32 i;
+
+ if (xskq_prod_is_full_n(q, batch_size))
+ return -ENOSPC;
+
+ /* A, matches D */
+ for (i = 0; i < batch_size; i++) {
+ len = (*(bufs + i))->data_end - (*(bufs + i))->data;
+ xskb = container_of(*(bufs + i), struct xdp_buff_xsk, xdp);
+ addr = xp_get_handle(xskb);
+ ring->desc[(q->cached_prod + i) & q->ring_mask].addr = addr;
+ ring->desc[(q->cached_prod + i) & q->ring_mask].len = len;
+ }
+
+ q->cached_prod += batch_size;
+
+ return 0;
+}
+
static inline void __xskq_prod_submit(struct xsk_queue *q, u32 idx)
{
smp_store_release(&q->ring->producer, idx); /* B, matches C */
--
2.17.1
next prev parent reply other threads:[~2021-11-16 7:38 UTC|newest]
Thread overview: 15+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-11-16 7:37 [RFC PATCH bpf-next 0/8] XDP_REDIRECT_XSK and Batched AF_XDP Rx Ciara Loftus
2021-11-16 7:37 ` [RFC PATCH bpf-next 1/8] xsk: add struct xdp_sock to netdev_rx_queue Ciara Loftus
2021-11-16 7:37 ` [RFC PATCH bpf-next 2/8] bpf: add bpf_redirect_xsk helper and XDP_REDIRECT_XSK action Ciara Loftus
2021-11-16 7:37 ` [RFC PATCH bpf-next 3/8] xsk: handle XDP_REDIRECT_XSK and expose xsk_rcv/flush Ciara Loftus
2021-11-16 7:37 ` [RFC PATCH bpf-next 4/8] i40e: handle the XDP_REDIRECT_XSK action Ciara Loftus
2021-11-16 7:37 ` Ciara Loftus [this message]
2021-11-16 7:37 ` [RFC PATCH bpf-next 6/8] i40e: isolate descriptor processing in separate function Ciara Loftus
2021-11-16 7:37 ` [RFC PATCH bpf-next 7/8] i40e: introduce batched XDP rx descriptor processing Ciara Loftus
2021-11-16 7:37 ` [RFC PATCH bpf-next 8/8] libbpf: use bpf_redirect_xsk in the default program Ciara Loftus
2021-11-16 9:43 ` [RFC PATCH bpf-next 0/8] XDP_REDIRECT_XSK and Batched AF_XDP Rx Jesper Dangaard Brouer
2021-11-16 16:29 ` Loftus, Ciara
2021-11-17 14:24 ` Toke Høiland-Jørgensen
2021-11-19 15:48 ` Loftus, Ciara
2021-11-22 14:38 ` Toke Høiland-Jørgensen
2021-11-18 2:54 ` Alexei Starovoitov
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20211116073742.7941-6-ciara.loftus@intel.com \
--to=ciara.loftus@intel.com \
--cc=ast@kernel.org \
--cc=bjorn@kernel.org \
--cc=bpf@vger.kernel.org \
--cc=daniel@iogearbox.net \
--cc=davem@davemloft.net \
--cc=hawk@kernel.org \
--cc=john.fastabend@gmail.com \
--cc=jonathan.lemon@gmail.com \
--cc=kuba@kernel.org \
--cc=maciej.fijalkowski@intel.com \
--cc=magnus.karlsson@intel.com \
--cc=netdev@vger.kernel.org \
--cc=toke@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).