netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Magnus Karlsson <magnus.karlsson@gmail.com>
To: magnus.karlsson@intel.com, bjorn@kernel.org, ast@kernel.org,
	daniel@iogearbox.net, netdev@vger.kernel.org,
	maciej.fijalkowski@intel.com, bpf@vger.kernel.org, yhs@fb.com,
	andrii@kernel.org, martin.lau@linux.dev, song@kernel.org,
	john.fastabend@gmail.com, kpsingh@kernel.org, sdf@google.com,
	haoluo@google.com, jolsa@kernel.org, tirthendu.sarkar@intel.com
Subject: [PATCH bpf-next v2 08/10] selftests/xsk: populate fill ring based on frags needed
Date: Tue, 16 May 2023 12:31:07 +0200	[thread overview]
Message-ID: <20230516103109.3066-9-magnus.karlsson@gmail.com> (raw)
In-Reply-To: <20230516103109.3066-1-magnus.karlsson@gmail.com>

From: Magnus Karlsson <magnus.karlsson@intel.com>

Populate the fill ring based on the number of frags a packet
needs. With multi-buffer support, a packet might require more than a
single fragment/buffer, so the function xsk_populate_fill_ring() needs
to consider how many buffers a packet will consume, and put that many
buffers on the fill ring for each packet it should receive. As we are
still not sending any multi-buffer packets, the function will only
produce one buffer per packet at the moment.

Signed-off-by: Magnus Karlsson <magnus.karlsson@intel.com>
---
 tools/testing/selftests/bpf/xsk.h        |  5 +++
 tools/testing/selftests/bpf/xskxceiver.c | 48 ++++++++++++++++++------
 2 files changed, 41 insertions(+), 12 deletions(-)

diff --git a/tools/testing/selftests/bpf/xsk.h b/tools/testing/selftests/bpf/xsk.h
index 04ed8b544712..8da8d557768b 100644
--- a/tools/testing/selftests/bpf/xsk.h
+++ b/tools/testing/selftests/bpf/xsk.h
@@ -134,6 +134,11 @@ static inline void xsk_ring_prod__submit(struct xsk_ring_prod *prod, __u32 nb)
 	__atomic_store_n(prod->producer, *prod->producer + nb, __ATOMIC_RELEASE);
 }
 
+static inline void xsk_ring_prod__cancel(struct xsk_ring_prod *prod, __u32 nb)
+{
+	prod->cached_prod -= nb;
+}
+
 static inline __u32 xsk_ring_cons__peek(struct xsk_ring_cons *cons, __u32 nb, __u32 *idx)
 {
 	__u32 entries = xsk_cons_nb_avail(cons, nb);
diff --git a/tools/testing/selftests/bpf/xskxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c
index f0d929cb730a..c54f25dcf134 100644
--- a/tools/testing/selftests/bpf/xskxceiver.c
+++ b/tools/testing/selftests/bpf/xskxceiver.c
@@ -531,6 +531,18 @@ static struct pkt_stream *__pkt_stream_alloc(u32 nb_pkts)
 	return pkt_stream;
 }
 
+static u32 ceil_u32(u32 a, u32 b)
+{
+	return (a + b - 1) / b;
+}
+
+static u32 pkt_nb_frags(u32 frame_size, struct pkt *pkt)
+{
+	if (!pkt || !pkt->valid)
+		return 1;
+	return ceil_u32(pkt->len, frame_size);
+}
+
 static void pkt_set(struct xsk_umem_info *umem, struct pkt *pkt, int offset, u32 len)
 {
 	pkt->offset = offset;
@@ -1159,9 +1171,11 @@ static void thread_common_ops_tx(struct test_spec *test, struct ifobject *ifobje
 	ifobject->umem->base_addr = 0;
 }
 
-static void xsk_populate_fill_ring(struct xsk_umem_info *umem, struct pkt_stream *pkt_stream)
+static void xsk_populate_fill_ring(struct xsk_umem_info *umem, struct pkt_stream *pkt_stream,
+				   bool fill_up)
 {
-	u32 idx = 0, i, buffers_to_fill, nb_pkts;
+	u32 rx_frame_size = umem->frame_size - XDP_PACKET_HEADROOM;
+	u32 idx = 0, filled = 0, buffers_to_fill, nb_pkts;
 	int ret;
 
 	if (umem->num_frames < XSK_RING_PROD__DEFAULT_NUM_DESCS)
@@ -1173,19 +1187,29 @@ static void xsk_populate_fill_ring(struct xsk_umem_info *umem, struct pkt_stream
 	if (ret != buffers_to_fill)
 		exit_with_error(ENOSPC);
 
-	for (i = 0; i < buffers_to_fill; i++) {
+	while (filled < buffers_to_fill) {
 		struct pkt *pkt = pkt_stream_get_next_rx_pkt(pkt_stream, &nb_pkts);
 		u64 addr;
+		u32 i;
+
+		for (i = 0; i < pkt_nb_frags(rx_frame_size, pkt); i++) {
+			if (!pkt) {
+				if (!fill_up)
+					break;
+				addr = filled * umem->frame_size + umem->base_addr;
+			} else if (pkt->offset >= 0) {
+				addr = pkt->offset % umem->frame_size + umem_alloc_buffer(umem);
+			} else {
+				addr = pkt->offset + umem_alloc_buffer(umem);
+			}
 
-		if (!pkt)
-			addr = i * umem->frame_size + umem->base_addr;
-		else if (pkt->offset >= 0)
-			addr = pkt->offset % umem->frame_size + umem_alloc_buffer(umem);
-		else
-			addr = pkt->offset + umem_alloc_buffer(umem);
-		*xsk_ring_prod__fill_addr(&umem->fq, idx++) = addr;
+			*xsk_ring_prod__fill_addr(&umem->fq, idx++) = addr;
+			if (++filled >= buffers_to_fill)
+				break;
+		}
 	}
-	xsk_ring_prod__submit(&umem->fq, i);
+	xsk_ring_prod__submit(&umem->fq, filled);
+	xsk_ring_prod__cancel(&umem->fq, buffers_to_fill - filled);
 
 	pkt_stream_reset(pkt_stream);
 	umem_reset_alloc(umem);
@@ -1220,7 +1244,7 @@ static void thread_common_ops(struct test_spec *test, struct ifobject *ifobject)
 	if (!ifobject->rx_on)
 		return;
 
-	xsk_populate_fill_ring(ifobject->umem, ifobject->pkt_stream);
+	xsk_populate_fill_ring(ifobject->umem, ifobject->pkt_stream, ifobject->use_fill_ring);
 
 	ret = xsk_update_xskmap(ifobject->xskmap, ifobject->xsk->xsk);
 	if (ret)
-- 
2.34.1


  parent reply	other threads:[~2023-05-16 10:32 UTC|newest]

Thread overview: 17+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-05-16 10:30 [PATCH bpf-next v2 00/10] seltests/xsk: prepare for AF_XDP multi-buffer testing Magnus Karlsson
2023-05-16 10:31 ` [PATCH bpf-next v2 01/10] selftests/xsk: do not change XDP program when not necessary Magnus Karlsson
2023-05-16 10:31 ` [PATCH bpf-next v2 02/10] selftests/xsk: generate simpler packets with variable length Magnus Karlsson
2023-05-16 10:31 ` [PATCH bpf-next v2 03/10] selftests/xsk: add varying payload pattern within packet Magnus Karlsson
2023-05-16 10:31 ` [PATCH bpf-next v2 04/10] selftests/xsk: dump packet at error Magnus Karlsson
2023-05-16 10:31 ` [PATCH bpf-next v2 05/10] selftests/xsk: add packet iterator for tx to packet stream Magnus Karlsson
2023-05-16 10:31 ` [PATCH bpf-next v2 06/10] selftests/xsk: store offset in pkt instead of addr Magnus Karlsson
2023-05-16 10:31 ` [PATCH bpf-next v2 07/10] selftests/xsx: test for huge pages only once Magnus Karlsson
2023-05-16 12:58   ` Maciej Fijalkowski
2023-05-16 13:06     ` Maciej Fijalkowski
2023-05-16 14:26       ` Magnus Karlsson
2023-05-16 14:25     ` Magnus Karlsson
2023-05-16 14:40       ` Maciej Fijalkowski
2023-05-16 10:31 ` Magnus Karlsson [this message]
2023-05-16 10:31 ` [PATCH bpf-next v2 09/10] selftests/xsk: generate data for multi-buffer packets Magnus Karlsson
2023-05-16 10:31 ` [PATCH bpf-next v2 10/10] selftests/xsk: adjust packet pacing for multi-buffer support Magnus Karlsson
2023-05-17  5:40 ` [PATCH bpf-next v2 00/10] seltests/xsk: prepare for AF_XDP multi-buffer testing patchwork-bot+netdevbpf

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230516103109.3066-9-magnus.karlsson@gmail.com \
    --to=magnus.karlsson@gmail.com \
    --cc=andrii@kernel.org \
    --cc=ast@kernel.org \
    --cc=bjorn@kernel.org \
    --cc=bpf@vger.kernel.org \
    --cc=daniel@iogearbox.net \
    --cc=haoluo@google.com \
    --cc=john.fastabend@gmail.com \
    --cc=jolsa@kernel.org \
    --cc=kpsingh@kernel.org \
    --cc=maciej.fijalkowski@intel.com \
    --cc=magnus.karlsson@intel.com \
    --cc=martin.lau@linux.dev \
    --cc=netdev@vger.kernel.org \
    --cc=sdf@google.com \
    --cc=song@kernel.org \
    --cc=tirthendu.sarkar@intel.com \
    --cc=yhs@fb.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).