* [PATCH bpf-next v3 1/8] selftests/xsk: move pkt_stream to the xsk_socket_info
2023-09-27 13:52 [PATCH bpf-next v3 0/8] Add a test for SHARED_UMEM feature Tushar Vyavahare
@ 2023-09-27 13:52 ` Tushar Vyavahare
2023-09-27 13:52 ` [PATCH bpf-next v3 2/8] selftests/xsk: rename xsk_xdp_metadata.h to xsk_xdp_common.h Tushar Vyavahare
` (8 subsequent siblings)
9 siblings, 0 replies; 11+ messages in thread
From: Tushar Vyavahare @ 2023-09-27 13:52 UTC (permalink / raw)
To: bpf
Cc: netdev, bjorn, magnus.karlsson, maciej.fijalkowski,
jonathan.lemon, davem, kuba, pabeni, ast, daniel,
tirthendu.sarkar, tushar.vyavahare
Move the packet stream from the ifobject struct to the xsk_socket_info
struct to enable the use of different streams for different sockets. This
will facilitate the sending and receiving of data from multiple sockets
simultaneously using the SHARED_XDP_UMEM feature.
Signed-off-by: Tushar Vyavahare <tushar.vyavahare@intel.com>
---
tools/testing/selftests/bpf/xskxceiver.c | 71 ++++++++++++------------
tools/testing/selftests/bpf/xskxceiver.h | 2 +-
2 files changed, 38 insertions(+), 35 deletions(-)
diff --git a/tools/testing/selftests/bpf/xskxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c
index 43e0a5796929..16a09751b093 100644
--- a/tools/testing/selftests/bpf/xskxceiver.c
+++ b/tools/testing/selftests/bpf/xskxceiver.c
@@ -260,7 +260,7 @@ static int __xsk_configure_socket(struct xsk_socket_info *xsk, struct xsk_umem_i
cfg.bind_flags = ifobject->bind_flags;
if (shared)
cfg.bind_flags |= XDP_SHARED_UMEM;
- if (ifobject->pkt_stream && ifobject->mtu > MAX_ETH_PKT_SIZE)
+ if (ifobject->mtu > MAX_ETH_PKT_SIZE)
cfg.bind_flags |= XDP_USE_SG;
txr = ifobject->tx_on ? &xsk->tx : NULL;
@@ -429,11 +429,9 @@ static void __test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx,
if (i == 0) {
ifobj->rx_on = false;
ifobj->tx_on = true;
- ifobj->pkt_stream = test->tx_pkt_stream_default;
} else {
ifobj->rx_on = true;
ifobj->tx_on = false;
- ifobj->pkt_stream = test->rx_pkt_stream_default;
}
memset(ifobj->umem, 0, sizeof(*ifobj->umem));
@@ -443,6 +441,10 @@ static void __test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx,
for (j = 0; j < MAX_SOCKETS; j++) {
memset(&ifobj->xsk_arr[j], 0, sizeof(ifobj->xsk_arr[j]));
ifobj->xsk_arr[j].rxqsize = XSK_RING_CONS__DEFAULT_NUM_DESCS;
+ if (i == 0)
+ ifobj->xsk_arr[j].pkt_stream = test->tx_pkt_stream_default;
+ else
+ ifobj->xsk_arr[j].pkt_stream = test->rx_pkt_stream_default;
}
}
@@ -557,17 +559,17 @@ static void pkt_stream_delete(struct pkt_stream *pkt_stream)
static void pkt_stream_restore_default(struct test_spec *test)
{
- struct pkt_stream *tx_pkt_stream = test->ifobj_tx->pkt_stream;
- struct pkt_stream *rx_pkt_stream = test->ifobj_rx->pkt_stream;
+ struct pkt_stream *tx_pkt_stream = test->ifobj_tx->xsk->pkt_stream;
+ struct pkt_stream *rx_pkt_stream = test->ifobj_rx->xsk->pkt_stream;
if (tx_pkt_stream != test->tx_pkt_stream_default) {
- pkt_stream_delete(test->ifobj_tx->pkt_stream);
- test->ifobj_tx->pkt_stream = test->tx_pkt_stream_default;
+ pkt_stream_delete(test->ifobj_tx->xsk->pkt_stream);
+ test->ifobj_tx->xsk->pkt_stream = test->tx_pkt_stream_default;
}
if (rx_pkt_stream != test->rx_pkt_stream_default) {
- pkt_stream_delete(test->ifobj_rx->pkt_stream);
- test->ifobj_rx->pkt_stream = test->rx_pkt_stream_default;
+ pkt_stream_delete(test->ifobj_rx->xsk->pkt_stream);
+ test->ifobj_rx->xsk->pkt_stream = test->rx_pkt_stream_default;
}
}
@@ -674,9 +676,9 @@ static void pkt_stream_replace(struct test_spec *test, u32 nb_pkts, u32 pkt_len)
struct pkt_stream *pkt_stream;
pkt_stream = pkt_stream_generate(test->ifobj_tx->umem, nb_pkts, pkt_len);
- test->ifobj_tx->pkt_stream = pkt_stream;
+ test->ifobj_tx->xsk->pkt_stream = pkt_stream;
pkt_stream = pkt_stream_generate(test->ifobj_rx->umem, nb_pkts, pkt_len);
- test->ifobj_rx->pkt_stream = pkt_stream;
+ test->ifobj_rx->xsk->pkt_stream = pkt_stream;
}
static void __pkt_stream_replace_half(struct ifobject *ifobj, u32 pkt_len,
@@ -686,11 +688,11 @@ static void __pkt_stream_replace_half(struct ifobject *ifobj, u32 pkt_len,
struct pkt_stream *pkt_stream;
u32 i;
- pkt_stream = pkt_stream_clone(umem, ifobj->pkt_stream);
- for (i = 1; i < ifobj->pkt_stream->nb_pkts; i += 2)
+ pkt_stream = pkt_stream_clone(umem, ifobj->xsk->pkt_stream);
+ for (i = 1; i < ifobj->xsk->pkt_stream->nb_pkts; i += 2)
pkt_set(umem, &pkt_stream->pkts[i], offset, pkt_len);
- ifobj->pkt_stream = pkt_stream;
+ ifobj->xsk->pkt_stream = pkt_stream;
}
static void pkt_stream_replace_half(struct test_spec *test, u32 pkt_len, int offset)
@@ -702,12 +704,12 @@ static void pkt_stream_replace_half(struct test_spec *test, u32 pkt_len, int off
static void pkt_stream_receive_half(struct test_spec *test)
{
struct xsk_umem_info *umem = test->ifobj_rx->umem;
- struct pkt_stream *pkt_stream = test->ifobj_tx->pkt_stream;
+ struct pkt_stream *pkt_stream = test->ifobj_tx->xsk->pkt_stream;
u32 i;
- test->ifobj_rx->pkt_stream = pkt_stream_generate(umem, pkt_stream->nb_pkts,
- pkt_stream->pkts[0].len);
- pkt_stream = test->ifobj_rx->pkt_stream;
+ test->ifobj_rx->xsk->pkt_stream = pkt_stream_generate(umem, pkt_stream->nb_pkts,
+ pkt_stream->pkts[0].len);
+ pkt_stream = test->ifobj_rx->xsk->pkt_stream;
for (i = 1; i < pkt_stream->nb_pkts; i += 2)
pkt_stream->pkts[i].valid = false;
}
@@ -796,10 +798,10 @@ static void pkt_stream_generate_custom(struct test_spec *test, struct pkt *pkts,
struct pkt_stream *pkt_stream;
pkt_stream = __pkt_stream_generate_custom(test->ifobj_tx, pkts, nb_pkts, true);
- test->ifobj_tx->pkt_stream = pkt_stream;
+ test->ifobj_tx->xsk->pkt_stream = pkt_stream;
pkt_stream = __pkt_stream_generate_custom(test->ifobj_rx, pkts, nb_pkts, false);
- test->ifobj_rx->pkt_stream = pkt_stream;
+ test->ifobj_rx->xsk->pkt_stream = pkt_stream;
}
static void pkt_print_data(u32 *data, u32 cnt)
@@ -1007,7 +1009,7 @@ static int complete_pkts(struct xsk_socket_info *xsk, int batch_size)
static int receive_pkts(struct test_spec *test, struct pollfd *fds)
{
struct timeval tv_end, tv_now, tv_timeout = {THREAD_TMOUT, 0};
- struct pkt_stream *pkt_stream = test->ifobj_rx->pkt_stream;
+ struct pkt_stream *pkt_stream = test->ifobj_rx->xsk->pkt_stream;
struct xsk_socket_info *xsk = test->ifobj_rx->xsk;
u32 idx_rx = 0, idx_fq = 0, rcvd, pkts_sent = 0;
struct ifobject *ifobj = test->ifobj_rx;
@@ -1139,7 +1141,7 @@ static int receive_pkts(struct test_spec *test, struct pollfd *fds)
static int __send_pkts(struct ifobject *ifobject, struct pollfd *fds, bool timeout)
{
u32 i, idx = 0, valid_pkts = 0, valid_frags = 0, buffer_len;
- struct pkt_stream *pkt_stream = ifobject->pkt_stream;
+ struct pkt_stream *pkt_stream = ifobject->xsk->pkt_stream;
struct xsk_socket_info *xsk = ifobject->xsk;
struct xsk_umem_info *umem = ifobject->umem;
bool use_poll = ifobject->use_poll;
@@ -1283,7 +1285,7 @@ static int wait_for_tx_completion(struct xsk_socket_info *xsk)
static int send_pkts(struct test_spec *test, struct ifobject *ifobject)
{
- struct pkt_stream *pkt_stream = ifobject->pkt_stream;
+ struct pkt_stream *pkt_stream = ifobject->xsk->pkt_stream;
bool timeout = !is_umem_valid(test->ifobj_rx);
struct pollfd fds = { };
u32 ret;
@@ -1347,8 +1349,8 @@ static int validate_rx_dropped(struct ifobject *ifobject)
* packet being invalid). Since the last packet may or may not have
* been dropped already, both outcomes must be allowed.
*/
- if (stats.rx_dropped == ifobject->pkt_stream->nb_pkts / 2 ||
- stats.rx_dropped == ifobject->pkt_stream->nb_pkts / 2 - 1)
+ if (stats.rx_dropped == ifobject->xsk->pkt_stream->nb_pkts / 2 ||
+ stats.rx_dropped == ifobject->xsk->pkt_stream->nb_pkts / 2 - 1)
return TEST_PASS;
return TEST_FAILURE;
@@ -1412,9 +1414,10 @@ static int validate_tx_invalid_descs(struct ifobject *ifobject)
return TEST_FAILURE;
}
- if (stats.tx_invalid_descs != ifobject->pkt_stream->nb_pkts / 2) {
+ if (stats.tx_invalid_descs != ifobject->xsk->pkt_stream->nb_pkts / 2) {
ksft_print_msg("[%s] tx_invalid_descs incorrect. Got [%u] expected [%u]\n",
- __func__, stats.tx_invalid_descs, ifobject->pkt_stream->nb_pkts);
+ __func__, stats.tx_invalid_descs,
+ ifobject->xsk->pkt_stream->nb_pkts);
return TEST_FAILURE;
}
@@ -1528,7 +1531,7 @@ static void thread_common_ops(struct test_spec *test, struct ifobject *ifobject)
if (!ifobject->rx_on)
return;
- xsk_populate_fill_ring(ifobject->umem, ifobject->pkt_stream, ifobject->use_fill_ring);
+ xsk_populate_fill_ring(ifobject->umem, ifobject->xsk->pkt_stream, ifobject->use_fill_ring);
ret = xsk_update_xskmap(ifobject->xskmap, ifobject->xsk->xsk);
if (ret)
@@ -1691,11 +1694,11 @@ static int __testapp_validate_traffic(struct test_spec *test, struct ifobject *i
if (ifobj2) {
if (pthread_barrier_init(&barr, NULL, 2))
exit_with_error(errno);
- pkt_stream_reset(ifobj2->pkt_stream);
+ pkt_stream_reset(ifobj2->xsk->pkt_stream);
}
test->current_step++;
- pkt_stream_reset(ifobj1->pkt_stream);
+ pkt_stream_reset(ifobj1->xsk->pkt_stream);
pkts_in_flight = 0;
signal(SIGUSR1, handler);
@@ -1852,8 +1855,8 @@ static int testapp_stats_tx_invalid_descs(struct test_spec *test)
static int testapp_stats_rx_full(struct test_spec *test)
{
pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, MIN_PKT_SIZE);
- test->ifobj_rx->pkt_stream = pkt_stream_generate(test->ifobj_rx->umem,
- DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE);
+ test->ifobj_rx->xsk->pkt_stream = pkt_stream_generate(test->ifobj_rx->umem,
+ DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE);
test->ifobj_rx->xsk->rxqsize = DEFAULT_UMEM_BUFFERS;
test->ifobj_rx->release_rx = false;
@@ -1864,8 +1867,8 @@ static int testapp_stats_rx_full(struct test_spec *test)
static int testapp_stats_fill_empty(struct test_spec *test)
{
pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, MIN_PKT_SIZE);
- test->ifobj_rx->pkt_stream = pkt_stream_generate(test->ifobj_rx->umem,
- DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE);
+ test->ifobj_rx->xsk->pkt_stream = pkt_stream_generate(test->ifobj_rx->umem,
+ DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE);
test->ifobj_rx->use_fill_ring = false;
test->ifobj_rx->validation_func = validate_fill_empty;
diff --git a/tools/testing/selftests/bpf/xskxceiver.h b/tools/testing/selftests/bpf/xskxceiver.h
index 8015aeea839d..06b82b39c59c 100644
--- a/tools/testing/selftests/bpf/xskxceiver.h
+++ b/tools/testing/selftests/bpf/xskxceiver.h
@@ -87,6 +87,7 @@ struct xsk_socket_info {
struct xsk_ring_prod tx;
struct xsk_umem_info *umem;
struct xsk_socket *xsk;
+ struct pkt_stream *pkt_stream;
u32 outstanding_tx;
u32 rxqsize;
};
@@ -120,7 +121,6 @@ struct ifobject {
struct xsk_umem_info *umem;
thread_func_t func_ptr;
validation_func_t validation_func;
- struct pkt_stream *pkt_stream;
struct xsk_xdp_progs *xdp_progs;
struct bpf_map *xskmap;
struct bpf_program *xdp_prog;
--
2.34.1
^ permalink raw reply related [flat|nested] 11+ messages in thread* [PATCH bpf-next v3 2/8] selftests/xsk: rename xsk_xdp_metadata.h to xsk_xdp_common.h
2023-09-27 13:52 [PATCH bpf-next v3 0/8] Add a test for SHARED_UMEM feature Tushar Vyavahare
2023-09-27 13:52 ` [PATCH bpf-next v3 1/8] selftests/xsk: move pkt_stream to the xsk_socket_info Tushar Vyavahare
@ 2023-09-27 13:52 ` Tushar Vyavahare
2023-09-27 13:52 ` [PATCH bpf-next v3 3/8] selftests/xsk: move src_mac and dst_mac to the xsk_socket_info Tushar Vyavahare
` (7 subsequent siblings)
9 siblings, 0 replies; 11+ messages in thread
From: Tushar Vyavahare @ 2023-09-27 13:52 UTC (permalink / raw)
To: bpf
Cc: netdev, bjorn, magnus.karlsson, maciej.fijalkowski,
jonathan.lemon, davem, kuba, pabeni, ast, daniel,
tirthendu.sarkar, tushar.vyavahare
Rename the header file to a generic name so that it can be used by all
future XDP programs. Ensure that the xsk_xdp_common.h header file includes
include guards.
Signed-off-by: Tushar Vyavahare <tushar.vyavahare@intel.com>
---
tools/testing/selftests/bpf/progs/xsk_xdp_progs.c | 2 +-
.../selftests/bpf/{xsk_xdp_metadata.h => xsk_xdp_common.h} | 5 +++++
tools/testing/selftests/bpf/xskxceiver.c | 2 +-
3 files changed, 7 insertions(+), 2 deletions(-)
rename tools/testing/selftests/bpf/{xsk_xdp_metadata.h => xsk_xdp_common.h} (55%)
diff --git a/tools/testing/selftests/bpf/progs/xsk_xdp_progs.c b/tools/testing/selftests/bpf/progs/xsk_xdp_progs.c
index 24369f242853..734f231a8534 100644
--- a/tools/testing/selftests/bpf/progs/xsk_xdp_progs.c
+++ b/tools/testing/selftests/bpf/progs/xsk_xdp_progs.c
@@ -3,7 +3,7 @@
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
-#include "xsk_xdp_metadata.h"
+#include "xsk_xdp_common.h"
struct {
__uint(type, BPF_MAP_TYPE_XSKMAP);
diff --git a/tools/testing/selftests/bpf/xsk_xdp_metadata.h b/tools/testing/selftests/bpf/xsk_xdp_common.h
similarity index 55%
rename from tools/testing/selftests/bpf/xsk_xdp_metadata.h
rename to tools/testing/selftests/bpf/xsk_xdp_common.h
index 943133da378a..f55d61625336 100644
--- a/tools/testing/selftests/bpf/xsk_xdp_metadata.h
+++ b/tools/testing/selftests/bpf/xsk_xdp_common.h
@@ -1,5 +1,10 @@
/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef XSK_XDP_COMMON_H_
+#define XSK_XDP_COMMON_H_
+
struct xdp_info {
__u64 count;
} __attribute__((aligned(32)));
+
+#endif /* XSK_XDP_COMMON_H_ */
diff --git a/tools/testing/selftests/bpf/xskxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c
index 16a09751b093..ad1f7f078f5f 100644
--- a/tools/testing/selftests/bpf/xskxceiver.c
+++ b/tools/testing/selftests/bpf/xskxceiver.c
@@ -102,7 +102,7 @@
#include <bpf/bpf.h>
#include <linux/filter.h>
#include "../kselftest.h"
-#include "xsk_xdp_metadata.h"
+#include "xsk_xdp_common.h"
static const char *MAC1 = "\x00\x0A\x56\x9E\xEE\x62";
static const char *MAC2 = "\x00\x0A\x56\x9E\xEE\x61";
--
2.34.1
^ permalink raw reply related [flat|nested] 11+ messages in thread* [PATCH bpf-next v3 3/8] selftests/xsk: move src_mac and dst_mac to the xsk_socket_info
2023-09-27 13:52 [PATCH bpf-next v3 0/8] Add a test for SHARED_UMEM feature Tushar Vyavahare
2023-09-27 13:52 ` [PATCH bpf-next v3 1/8] selftests/xsk: move pkt_stream to the xsk_socket_info Tushar Vyavahare
2023-09-27 13:52 ` [PATCH bpf-next v3 2/8] selftests/xsk: rename xsk_xdp_metadata.h to xsk_xdp_common.h Tushar Vyavahare
@ 2023-09-27 13:52 ` Tushar Vyavahare
2023-09-27 13:52 ` [PATCH bpf-next v3 4/8] selftests/xsk: iterate over all the sockets in the receive pkts function Tushar Vyavahare
` (6 subsequent siblings)
9 siblings, 0 replies; 11+ messages in thread
From: Tushar Vyavahare @ 2023-09-27 13:52 UTC (permalink / raw)
To: bpf
Cc: netdev, bjorn, magnus.karlsson, maciej.fijalkowski,
jonathan.lemon, davem, kuba, pabeni, ast, daniel,
tirthendu.sarkar, tushar.vyavahare
Move the src_mac and dst_mac fields from the ifobject structure to the
xsk_socket_info structure to achieve per-socket MAC address assignment.
Require this in order to steer traffic to various sockets in subsequent
patches.
Signed-off-by: Tushar Vyavahare <tushar.vyavahare@intel.com>
---
tools/testing/selftests/bpf/xskxceiver.c | 34 +++++++++++-------------
tools/testing/selftests/bpf/xskxceiver.h | 7 +++--
2 files changed, 21 insertions(+), 20 deletions(-)
diff --git a/tools/testing/selftests/bpf/xskxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c
index ad1f7f078f5f..e26f942a7b9e 100644
--- a/tools/testing/selftests/bpf/xskxceiver.c
+++ b/tools/testing/selftests/bpf/xskxceiver.c
@@ -104,9 +104,6 @@
#include "../kselftest.h"
#include "xsk_xdp_common.h"
-static const char *MAC1 = "\x00\x0A\x56\x9E\xEE\x62";
-static const char *MAC2 = "\x00\x0A\x56\x9E\xEE\x61";
-
static bool opt_verbose;
static bool opt_print_tests;
static enum test_mode opt_mode = TEST_MODE_ALL;
@@ -159,10 +156,10 @@ static void write_payload(void *dest, u32 pkt_nb, u32 start, u32 size)
ptr[i] = htonl(pkt_nb << 16 | (i + start));
}
-static void gen_eth_hdr(struct ifobject *ifobject, struct ethhdr *eth_hdr)
+static void gen_eth_hdr(struct xsk_socket_info *xsk, struct ethhdr *eth_hdr)
{
- memcpy(eth_hdr->h_dest, ifobject->dst_mac, ETH_ALEN);
- memcpy(eth_hdr->h_source, ifobject->src_mac, ETH_ALEN);
+ memcpy(eth_hdr->h_dest, xsk->dst_mac, ETH_ALEN);
+ memcpy(eth_hdr->h_source, xsk->src_mac, ETH_ALEN);
eth_hdr->h_proto = htons(ETH_P_LOOPBACK);
}
@@ -445,6 +442,11 @@ static void __test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx,
ifobj->xsk_arr[j].pkt_stream = test->tx_pkt_stream_default;
else
ifobj->xsk_arr[j].pkt_stream = test->rx_pkt_stream_default;
+
+ memcpy(ifobj->xsk_arr[j].src_mac, g_mac, ETH_ALEN);
+ memcpy(ifobj->xsk_arr[j].dst_mac, g_mac, ETH_ALEN);
+ ifobj->xsk_arr[j].src_mac[5] += ((j * 2) + 0);
+ ifobj->xsk_arr[j].dst_mac[5] += ((j * 2) + 1);
}
}
@@ -726,16 +728,16 @@ static void pkt_stream_cancel(struct pkt_stream *pkt_stream)
pkt_stream->current_pkt_nb--;
}
-static void pkt_generate(struct ifobject *ifobject, u64 addr, u32 len, u32 pkt_nb,
- u32 bytes_written)
+static void pkt_generate(struct xsk_socket_info *xsk, struct xsk_umem_info *umem, u64 addr, u32 len,
+ u32 pkt_nb, u32 bytes_written)
{
- void *data = xsk_umem__get_data(ifobject->umem->buffer, addr);
+ void *data = xsk_umem__get_data(umem->buffer, addr);
if (len < MIN_PKT_SIZE)
return;
if (!bytes_written) {
- gen_eth_hdr(ifobject, data);
+ gen_eth_hdr(xsk, data);
len -= PKT_HDR_SIZE;
data += PKT_HDR_SIZE;
@@ -1209,7 +1211,7 @@ static int __send_pkts(struct ifobject *ifobject, struct pollfd *fds, bool timeo
tx_desc->options = 0;
}
if (pkt->valid)
- pkt_generate(ifobject, tx_desc->addr, tx_desc->len, pkt->pkt_nb,
+ pkt_generate(xsk, umem, tx_desc->addr, tx_desc->len, pkt->pkt_nb,
bytes_written);
bytes_written += tx_desc->len;
@@ -2120,15 +2122,11 @@ static bool hugepages_present(void)
return true;
}
-static void init_iface(struct ifobject *ifobj, const char *dst_mac, const char *src_mac,
- thread_func_t func_ptr)
+static void init_iface(struct ifobject *ifobj, thread_func_t func_ptr)
{
LIBBPF_OPTS(bpf_xdp_query_opts, query_opts);
int err;
- memcpy(ifobj->dst_mac, dst_mac, ETH_ALEN);
- memcpy(ifobj->src_mac, src_mac, ETH_ALEN);
-
ifobj->func_ptr = func_ptr;
err = xsk_load_xdp_programs(ifobj);
@@ -2404,8 +2402,8 @@ int main(int argc, char **argv)
modes++;
}
- init_iface(ifobj_rx, MAC1, MAC2, worker_testapp_validate_rx);
- init_iface(ifobj_tx, MAC2, MAC1, worker_testapp_validate_tx);
+ init_iface(ifobj_rx, worker_testapp_validate_rx);
+ init_iface(ifobj_tx, worker_testapp_validate_tx);
test_spec_init(&test, ifobj_tx, ifobj_rx, 0, &tests[0]);
tx_pkt_stream_default = pkt_stream_generate(ifobj_tx->umem, DEFAULT_PKT_CNT, MIN_PKT_SIZE);
diff --git a/tools/testing/selftests/bpf/xskxceiver.h b/tools/testing/selftests/bpf/xskxceiver.h
index 06b82b39c59c..3d494adef792 100644
--- a/tools/testing/selftests/bpf/xskxceiver.h
+++ b/tools/testing/selftests/bpf/xskxceiver.h
@@ -59,6 +59,7 @@
#define HUGEPAGE_SIZE (2 * 1024 * 1024)
#define PKT_DUMP_NB_TO_PRINT 16
#define RUN_ALL_TESTS UINT_MAX
+#define NUM_MAC_ADDRESSES 4
#define print_verbose(x...) do { if (opt_verbose) ksft_print_msg(x); } while (0)
@@ -90,6 +91,8 @@ struct xsk_socket_info {
struct pkt_stream *pkt_stream;
u32 outstanding_tx;
u32 rxqsize;
+ u8 dst_mac[ETH_ALEN];
+ u8 src_mac[ETH_ALEN];
};
struct pkt {
@@ -140,8 +143,6 @@ struct ifobject {
bool unaligned_supp;
bool multi_buff_supp;
bool multi_buff_zc_supp;
- u8 dst_mac[ETH_ALEN];
- u8 src_mac[ETH_ALEN];
};
struct test_spec {
@@ -168,4 +169,6 @@ pthread_mutex_t pacing_mutex = PTHREAD_MUTEX_INITIALIZER;
int pkts_in_flight;
+static const u8 g_mac[ETH_ALEN] = {0x55, 0x44, 0x33, 0x22, 0x11, 0x00};
+
#endif /* XSKXCEIVER_H_ */
--
2.34.1
^ permalink raw reply related [flat|nested] 11+ messages in thread* [PATCH bpf-next v3 4/8] selftests/xsk: iterate over all the sockets in the receive pkts function
2023-09-27 13:52 [PATCH bpf-next v3 0/8] Add a test for SHARED_UMEM feature Tushar Vyavahare
` (2 preceding siblings ...)
2023-09-27 13:52 ` [PATCH bpf-next v3 3/8] selftests/xsk: move src_mac and dst_mac to the xsk_socket_info Tushar Vyavahare
@ 2023-09-27 13:52 ` Tushar Vyavahare
2023-09-27 13:52 ` [PATCH bpf-next v3 5/8] selftests/xsk: remove unnecessary parameter from pkt_set() function call Tushar Vyavahare
` (5 subsequent siblings)
9 siblings, 0 replies; 11+ messages in thread
From: Tushar Vyavahare @ 2023-09-27 13:52 UTC (permalink / raw)
To: bpf
Cc: netdev, bjorn, magnus.karlsson, maciej.fijalkowski,
jonathan.lemon, davem, kuba, pabeni, ast, daniel,
tirthendu.sarkar, tushar.vyavahare
Improve the receive_pkt() function to enable it to receive packets from
multiple sockets. Define a sock_num variable to iterate through all the
sockets in the Rx path. Add nb_valid_entries to check that all the
expected number of packets are received.
Revise the function __receive_pkts() to only inspect the receive ring
once, handle any received packets, and promptly return. Implement a bitmap
to store the value of number of sockets. Update Makefile to include
find_bit.c for compiling xskxceiver.
Signed-off-by: Tushar Vyavahare <tushar.vyavahare@intel.com>
---
tools/testing/selftests/bpf/Makefile | 4 +-
tools/testing/selftests/bpf/xskxceiver.c | 285 ++++++++++++++---------
tools/testing/selftests/bpf/xskxceiver.h | 2 +
3 files changed, 178 insertions(+), 113 deletions(-)
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index caede9b574cb..b9a30ff99208 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -640,7 +640,9 @@ $(OUTPUT)/test_verifier: test_verifier.c verifier/tests.h $(BPFOBJ) | $(OUTPUT)
$(call msg,BINARY,,$@)
$(Q)$(CC) $(CFLAGS) $(filter %.a %.o %.c,$^) $(LDLIBS) -o $@
-$(OUTPUT)/xskxceiver: xskxceiver.c xskxceiver.h $(OUTPUT)/xsk.o $(OUTPUT)/xsk_xdp_progs.skel.h $(BPFOBJ) | $(OUTPUT)
+# Include find_bit.c to compile xskxceiver.
+EXTRA_SRC := $(TOOLSDIR)/lib/find_bit.c
+$(OUTPUT)/xskxceiver: $(EXTRA_SRC) xskxceiver.c xskxceiver.h $(OUTPUT)/xsk.o $(OUTPUT)/xsk_xdp_progs.skel.h $(BPFOBJ) | $(OUTPUT)
$(call msg,BINARY,,$@)
$(Q)$(CC) $(CFLAGS) $(filter %.a %.o %.c,$^) $(LDLIBS) -o $@
diff --git a/tools/testing/selftests/bpf/xskxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c
index e26f942a7b9e..6d0cd0cc7b5f 100644
--- a/tools/testing/selftests/bpf/xskxceiver.c
+++ b/tools/testing/selftests/bpf/xskxceiver.c
@@ -80,6 +80,7 @@
#include <linux/if_ether.h>
#include <linux/mman.h>
#include <linux/netdev.h>
+#include <linux/bitmap.h>
#include <arpa/inet.h>
#include <net/if.h>
#include <locale.h>
@@ -530,8 +531,10 @@ static int test_spec_set_mtu(struct test_spec *test, int mtu)
static void pkt_stream_reset(struct pkt_stream *pkt_stream)
{
- if (pkt_stream)
+ if (pkt_stream) {
pkt_stream->current_pkt_nb = 0;
+ pkt_stream->nb_rx_pkts = 0;
+ }
}
static struct pkt *pkt_stream_get_next_tx_pkt(struct pkt_stream *pkt_stream)
@@ -631,14 +634,16 @@ static u32 pkt_nb_frags(u32 frame_size, struct pkt_stream *pkt_stream, struct pk
return nb_frags;
}
-static void pkt_set(struct xsk_umem_info *umem, struct pkt *pkt, int offset, u32 len)
+static void pkt_set(struct pkt_stream *pkt_stream, struct pkt *pkt, int offset, u32 len)
{
pkt->offset = offset;
pkt->len = len;
- if (len > MAX_ETH_JUMBO_SIZE)
+ if (len > MAX_ETH_JUMBO_SIZE) {
pkt->valid = false;
- else
+ } else {
pkt->valid = true;
+ pkt_stream->nb_valid_entries++;
+ }
}
static u32 pkt_get_buffer_len(struct xsk_umem_info *umem, u32 len)
@@ -660,7 +665,7 @@ static struct pkt_stream *pkt_stream_generate(struct xsk_umem_info *umem, u32 nb
for (i = 0; i < nb_pkts; i++) {
struct pkt *pkt = &pkt_stream->pkts[i];
- pkt_set(umem, pkt, 0, pkt_len);
+ pkt_set(pkt_stream, pkt, 0, pkt_len);
pkt->pkt_nb = i;
}
@@ -692,9 +697,10 @@ static void __pkt_stream_replace_half(struct ifobject *ifobj, u32 pkt_len,
pkt_stream = pkt_stream_clone(umem, ifobj->xsk->pkt_stream);
for (i = 1; i < ifobj->xsk->pkt_stream->nb_pkts; i += 2)
- pkt_set(umem, &pkt_stream->pkts[i], offset, pkt_len);
+ pkt_set(pkt_stream, &pkt_stream->pkts[i], offset, pkt_len);
ifobj->xsk->pkt_stream = pkt_stream;
+ pkt_stream->nb_valid_entries /= 2;
}
static void pkt_stream_replace_half(struct test_spec *test, u32 pkt_len, int offset)
@@ -714,6 +720,8 @@ static void pkt_stream_receive_half(struct test_spec *test)
pkt_stream = test->ifobj_rx->xsk->pkt_stream;
for (i = 1; i < pkt_stream->nb_pkts; i += 2)
pkt_stream->pkts[i].valid = false;
+
+ pkt_stream->nb_valid_entries /= 2;
}
static u64 pkt_get_addr(struct pkt *pkt, struct xsk_umem_info *umem)
@@ -787,6 +795,10 @@ static struct pkt_stream *__pkt_stream_generate_custom(struct ifobject *ifobj, s
if (pkt->valid && pkt->len > pkt_stream->max_pkt_len)
pkt_stream->max_pkt_len = pkt->len;
+
+ if (pkt->valid)
+ pkt_stream->nb_valid_entries++;
+
pkt_nb++;
}
@@ -1008,133 +1020,178 @@ static int complete_pkts(struct xsk_socket_info *xsk, int batch_size)
return TEST_PASS;
}
-static int receive_pkts(struct test_spec *test, struct pollfd *fds)
+static int __receive_pkts(struct test_spec *test, struct xsk_socket_info *xsk)
{
- struct timeval tv_end, tv_now, tv_timeout = {THREAD_TMOUT, 0};
- struct pkt_stream *pkt_stream = test->ifobj_rx->xsk->pkt_stream;
- struct xsk_socket_info *xsk = test->ifobj_rx->xsk;
+ u32 frags_processed = 0, nb_frags = 0, pkt_len = 0;
u32 idx_rx = 0, idx_fq = 0, rcvd, pkts_sent = 0;
+ struct pkt_stream *pkt_stream = xsk->pkt_stream;
struct ifobject *ifobj = test->ifobj_rx;
struct xsk_umem_info *umem = xsk->umem;
+ struct pollfd fds = { };
struct pkt *pkt;
+ u64 first_addr;
int ret;
- ret = gettimeofday(&tv_now, NULL);
- if (ret)
- exit_with_error(errno);
- timeradd(&tv_now, &tv_timeout, &tv_end);
-
- pkt = pkt_stream_get_next_rx_pkt(pkt_stream, &pkts_sent);
- while (pkt) {
- u32 frags_processed = 0, nb_frags = 0, pkt_len = 0;
- u64 first_addr;
+ fds.fd = xsk_socket__fd(xsk->xsk);
+ fds.events = POLLIN;
- ret = gettimeofday(&tv_now, NULL);
- if (ret)
- exit_with_error(errno);
- if (timercmp(&tv_now, &tv_end, >)) {
- ksft_print_msg("ERROR: [%s] Receive loop timed out\n", __func__);
- return TEST_FAILURE;
- }
+ ret = kick_rx(xsk);
+ if (ret)
+ return TEST_FAILURE;
- ret = kick_rx(xsk);
- if (ret)
+ if (ifobj->use_poll) {
+ ret = poll(&fds, 1, POLL_TMOUT);
+ if (ret < 0)
return TEST_FAILURE;
- if (ifobj->use_poll) {
- ret = poll(fds, 1, POLL_TMOUT);
- if (ret < 0)
- return TEST_FAILURE;
-
- if (!ret) {
- if (!is_umem_valid(test->ifobj_tx))
- return TEST_PASS;
-
- ksft_print_msg("ERROR: [%s] Poll timed out\n", __func__);
- return TEST_FAILURE;
- }
+ if (!ret) {
+ if (!is_umem_valid(test->ifobj_tx))
+ return TEST_PASS;
- if (!(fds->revents & POLLIN))
- continue;
+ ksft_print_msg("ERROR: [%s] Poll timed out\n", __func__);
+ return TEST_CONTINUE;
}
- rcvd = xsk_ring_cons__peek(&xsk->rx, BATCH_SIZE, &idx_rx);
- if (!rcvd)
- continue;
+ if (!(fds.revents & POLLIN))
+ return TEST_CONTINUE;
+ }
- if (ifobj->use_fill_ring) {
- ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
- while (ret != rcvd) {
- if (xsk_ring_prod__needs_wakeup(&umem->fq)) {
- ret = poll(fds, 1, POLL_TMOUT);
- if (ret < 0)
- return TEST_FAILURE;
- }
- ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
+ rcvd = xsk_ring_cons__peek(&xsk->rx, BATCH_SIZE, &idx_rx);
+ if (!rcvd)
+ return TEST_CONTINUE;
+
+ if (ifobj->use_fill_ring) {
+ ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
+ while (ret != rcvd) {
+ if (xsk_ring_prod__needs_wakeup(&umem->fq)) {
+ ret = poll(&fds, 1, POLL_TMOUT);
+ if (ret < 0)
+ return TEST_FAILURE;
}
+ ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
}
+ }
- while (frags_processed < rcvd) {
- const struct xdp_desc *desc = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx++);
- u64 addr = desc->addr, orig;
+ while (frags_processed < rcvd) {
+ const struct xdp_desc *desc = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx++);
+ u64 addr = desc->addr, orig;
- orig = xsk_umem__extract_addr(addr);
- addr = xsk_umem__add_offset_to_addr(addr);
+ orig = xsk_umem__extract_addr(addr);
+ addr = xsk_umem__add_offset_to_addr(addr);
+ if (!nb_frags) {
+ pkt = pkt_stream_get_next_rx_pkt(pkt_stream, &pkts_sent);
if (!pkt) {
ksft_print_msg("[%s] received too many packets addr: %lx len %u\n",
__func__, addr, desc->len);
return TEST_FAILURE;
}
+ }
- print_verbose("Rx: addr: %lx len: %u options: %u pkt_nb: %u valid: %u\n",
- addr, desc->len, desc->options, pkt->pkt_nb, pkt->valid);
+ print_verbose("Rx: addr: %lx len: %u options: %u pkt_nb: %u valid: %u\n",
+ addr, desc->len, desc->options, pkt->pkt_nb, pkt->valid);
- if (!is_frag_valid(umem, addr, desc->len, pkt->pkt_nb, pkt_len) ||
- !is_offset_correct(umem, pkt, addr) ||
- (ifobj->use_metadata && !is_metadata_correct(pkt, umem->buffer, addr)))
- return TEST_FAILURE;
+ if (!is_frag_valid(umem, addr, desc->len, pkt->pkt_nb, pkt_len) ||
+ !is_offset_correct(umem, pkt, addr) || (ifobj->use_metadata &&
+ !is_metadata_correct(pkt, umem->buffer, addr)))
+ return TEST_FAILURE;
- if (!nb_frags++)
- first_addr = addr;
- frags_processed++;
- pkt_len += desc->len;
- if (ifobj->use_fill_ring)
- *xsk_ring_prod__fill_addr(&umem->fq, idx_fq++) = orig;
+ if (!nb_frags++)
+ first_addr = addr;
+ frags_processed++;
+ pkt_len += desc->len;
+ if (ifobj->use_fill_ring)
+ *xsk_ring_prod__fill_addr(&umem->fq, idx_fq++) = orig;
- if (pkt_continues(desc->options))
- continue;
+ if (pkt_continues(desc->options))
+ continue;
- /* The complete packet has been received */
- if (!is_pkt_valid(pkt, umem->buffer, first_addr, pkt_len) ||
- !is_offset_correct(umem, pkt, addr))
- return TEST_FAILURE;
+ /* The complete packet has been received */
+ if (!is_pkt_valid(pkt, umem->buffer, first_addr, pkt_len) ||
+ !is_offset_correct(umem, pkt, addr))
+ return TEST_FAILURE;
- pkt = pkt_stream_get_next_rx_pkt(pkt_stream, &pkts_sent);
- nb_frags = 0;
- pkt_len = 0;
- }
+ pkt_stream->nb_rx_pkts++;
+ nb_frags = 0;
+ pkt_len = 0;
+ }
- if (nb_frags) {
- /* In the middle of a packet. Start over from beginning of packet. */
- idx_rx -= nb_frags;
- xsk_ring_cons__cancel(&xsk->rx, nb_frags);
- if (ifobj->use_fill_ring) {
- idx_fq -= nb_frags;
- xsk_ring_prod__cancel(&umem->fq, nb_frags);
- }
- frags_processed -= nb_frags;
+ if (nb_frags) {
+ /* In the middle of a packet. Start over from beginning of packet. */
+ idx_rx -= nb_frags;
+ xsk_ring_cons__cancel(&xsk->rx, nb_frags);
+ if (ifobj->use_fill_ring) {
+ idx_fq -= nb_frags;
+ xsk_ring_prod__cancel(&umem->fq, nb_frags);
}
+ frags_processed -= nb_frags;
+ }
- if (ifobj->use_fill_ring)
- xsk_ring_prod__submit(&umem->fq, frags_processed);
- if (ifobj->release_rx)
- xsk_ring_cons__release(&xsk->rx, frags_processed);
+ if (ifobj->use_fill_ring)
+ xsk_ring_prod__submit(&umem->fq, frags_processed);
+ if (ifobj->release_rx)
+ xsk_ring_cons__release(&xsk->rx, frags_processed);
+
+ pthread_mutex_lock(&pacing_mutex);
+ pkts_in_flight -= pkts_sent;
+ pthread_mutex_unlock(&pacing_mutex);
+ pkts_sent = 0;
+
+return TEST_CONTINUE;
+}
+
+bool all_packets_received(struct test_spec *test, struct xsk_socket_info *xsk, u32 sock_num,
+ unsigned long *bitmap)
+{
+ struct pkt_stream *pkt_stream = xsk->pkt_stream;
+
+ if (!pkt_stream) {
+ __set_bit(sock_num, bitmap);
+ return false;
+ }
+
+ if (pkt_stream->nb_rx_pkts == pkt_stream->nb_valid_entries) {
+ __set_bit(sock_num, bitmap);
+ if (bitmap_full(bitmap, test->nb_sockets))
+ return true;
+ }
+
+ return false;
+}
+
+static int receive_pkts(struct test_spec *test)
+{
+ struct timeval tv_end, tv_now, tv_timeout = {THREAD_TMOUT, 0};
+ DECLARE_BITMAP(bitmap, test->nb_sockets);
+ struct xsk_socket_info *xsk;
+ u32 sock_num = 0;
+ int res, ret;
- pthread_mutex_lock(&pacing_mutex);
- pkts_in_flight -= pkts_sent;
- pthread_mutex_unlock(&pacing_mutex);
- pkts_sent = 0;
+ ret = gettimeofday(&tv_now, NULL);
+ if (ret)
+ exit_with_error(errno);
+
+ timeradd(&tv_now, &tv_timeout, &tv_end);
+
+ while (1) {
+ xsk = &test->ifobj_rx->xsk_arr[sock_num];
+
+ if ((all_packets_received(test, xsk, sock_num, bitmap)))
+ break;
+
+ res = __receive_pkts(test, xsk);
+ if (!(res == TEST_PASS || res == TEST_CONTINUE))
+ return res;
+
+ ret = gettimeofday(&tv_now, NULL);
+ if (ret)
+ exit_with_error(errno);
+
+ if (timercmp(&tv_now, &tv_end, >)) {
+ ksft_print_msg("ERROR: [%s] Receive loop timed out\n", __func__);
+ return TEST_FAILURE;
+ }
+ sock_num = (sock_num + 1) % test->nb_sockets;
}
return TEST_PASS;
@@ -1567,7 +1624,6 @@ static void *worker_testapp_validate_rx(void *arg)
{
struct test_spec *test = (struct test_spec *)arg;
struct ifobject *ifobject = test->ifobj_rx;
- struct pollfd fds = { };
int err;
if (test->current_step == 1) {
@@ -1582,12 +1638,9 @@ static void *worker_testapp_validate_rx(void *arg)
}
}
- fds.fd = xsk_socket__fd(ifobject->xsk->xsk);
- fds.events = POLLIN;
-
pthread_barrier_wait(&barr);
- err = receive_pkts(test, &fds);
+ err = receive_pkts(test);
if (!err && ifobject->validation_func)
err = ifobject->validation_func(ifobject);
@@ -1724,9 +1777,15 @@ static int __testapp_validate_traffic(struct test_spec *test, struct ifobject *i
pthread_join(t0, NULL);
if (test->total_steps == test->current_step || test->fail) {
+ u32 i;
+
if (ifobj2)
- xsk_socket__delete(ifobj2->xsk->xsk);
- xsk_socket__delete(ifobj1->xsk->xsk);
+ for (i = 0; i < test->nb_sockets; i++)
+ xsk_socket__delete(ifobj2->xsk_arr[i].xsk);
+
+ for (i = 0; i < test->nb_sockets; i++)
+ xsk_socket__delete(ifobj1->xsk_arr[i].xsk);
+
testapp_clean_xsk_umem(ifobj1);
if (ifobj2 && !ifobj2->shared_umem)
testapp_clean_xsk_umem(ifobj2);
@@ -1798,16 +1857,18 @@ static int testapp_bidirectional(struct test_spec *test)
return res;
}
-static int swap_xsk_resources(struct ifobject *ifobj_tx, struct ifobject *ifobj_rx)
+static int swap_xsk_resources(struct test_spec *test)
{
int ret;
- xsk_socket__delete(ifobj_tx->xsk->xsk);
- xsk_socket__delete(ifobj_rx->xsk->xsk);
- ifobj_tx->xsk = &ifobj_tx->xsk_arr[1];
- ifobj_rx->xsk = &ifobj_rx->xsk_arr[1];
+ test->ifobj_tx->xsk_arr[0].pkt_stream = NULL;
+ test->ifobj_rx->xsk_arr[0].pkt_stream = NULL;
+ test->ifobj_tx->xsk_arr[1].pkt_stream = test->tx_pkt_stream_default;
+ test->ifobj_rx->xsk_arr[1].pkt_stream = test->rx_pkt_stream_default;
+ test->ifobj_tx->xsk = &test->ifobj_tx->xsk_arr[1];
+ test->ifobj_rx->xsk = &test->ifobj_rx->xsk_arr[1];
- ret = xsk_update_xskmap(ifobj_rx->xskmap, ifobj_rx->xsk->xsk);
+ ret = xsk_update_xskmap(test->ifobj_rx->xskmap, test->ifobj_rx->xsk->xsk);
if (ret)
return TEST_FAILURE;
@@ -1821,7 +1882,7 @@ static int testapp_xdp_prog_cleanup(struct test_spec *test)
if (testapp_validate_traffic(test))
return TEST_FAILURE;
- if (swap_xsk_resources(test->ifobj_tx, test->ifobj_rx))
+ if (swap_xsk_resources(test))
return TEST_FAILURE;
return testapp_validate_traffic(test);
}
diff --git a/tools/testing/selftests/bpf/xskxceiver.h b/tools/testing/selftests/bpf/xskxceiver.h
index 3d494adef792..fa409285eafd 100644
--- a/tools/testing/selftests/bpf/xskxceiver.h
+++ b/tools/testing/selftests/bpf/xskxceiver.h
@@ -108,6 +108,8 @@ struct pkt_stream {
u32 current_pkt_nb;
struct pkt *pkts;
u32 max_pkt_len;
+ u32 nb_rx_pkts;
+ u32 nb_valid_entries;
bool verbatim;
};
--
2.34.1
^ permalink raw reply related [flat|nested] 11+ messages in thread* [PATCH bpf-next v3 5/8] selftests/xsk: remove unnecessary parameter from pkt_set() function call
2023-09-27 13:52 [PATCH bpf-next v3 0/8] Add a test for SHARED_UMEM feature Tushar Vyavahare
` (3 preceding siblings ...)
2023-09-27 13:52 ` [PATCH bpf-next v3 4/8] selftests/xsk: iterate over all the sockets in the receive pkts function Tushar Vyavahare
@ 2023-09-27 13:52 ` Tushar Vyavahare
2023-09-27 13:52 ` [PATCH bpf-next v3 6/8] selftests/xsk: iterate over all the sockets in the send pkts function Tushar Vyavahare
` (4 subsequent siblings)
9 siblings, 0 replies; 11+ messages in thread
From: Tushar Vyavahare @ 2023-09-27 13:52 UTC (permalink / raw)
To: bpf
Cc: netdev, bjorn, magnus.karlsson, maciej.fijalkowski,
jonathan.lemon, davem, kuba, pabeni, ast, daniel,
tirthendu.sarkar, tushar.vyavahare
The pkt_set() function no longer needs the umem parameter. This commit
removes the umem parameter from the pkt_set() function.
Signed-off-by: Tushar Vyavahare <tushar.vyavahare@intel.com>
---
tools/testing/selftests/bpf/xskxceiver.c | 27 ++++++++++--------------
1 file changed, 11 insertions(+), 16 deletions(-)
diff --git a/tools/testing/selftests/bpf/xskxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c
index 6d0cd0cc7b5f..57a5126f6182 100644
--- a/tools/testing/selftests/bpf/xskxceiver.c
+++ b/tools/testing/selftests/bpf/xskxceiver.c
@@ -651,7 +651,7 @@ static u32 pkt_get_buffer_len(struct xsk_umem_info *umem, u32 len)
return ceil_u32(len, umem->frame_size) * umem->frame_size;
}
-static struct pkt_stream *pkt_stream_generate(struct xsk_umem_info *umem, u32 nb_pkts, u32 pkt_len)
+static struct pkt_stream *pkt_stream_generate(u32 nb_pkts, u32 pkt_len)
{
struct pkt_stream *pkt_stream;
u32 i;
@@ -672,30 +672,28 @@ static struct pkt_stream *pkt_stream_generate(struct xsk_umem_info *umem, u32 nb
return pkt_stream;
}
-static struct pkt_stream *pkt_stream_clone(struct xsk_umem_info *umem,
- struct pkt_stream *pkt_stream)
+static struct pkt_stream *pkt_stream_clone(struct pkt_stream *pkt_stream)
{
- return pkt_stream_generate(umem, pkt_stream->nb_pkts, pkt_stream->pkts[0].len);
+ return pkt_stream_generate(pkt_stream->nb_pkts, pkt_stream->pkts[0].len);
}
static void pkt_stream_replace(struct test_spec *test, u32 nb_pkts, u32 pkt_len)
{
struct pkt_stream *pkt_stream;
- pkt_stream = pkt_stream_generate(test->ifobj_tx->umem, nb_pkts, pkt_len);
+ pkt_stream = pkt_stream_generate(nb_pkts, pkt_len);
test->ifobj_tx->xsk->pkt_stream = pkt_stream;
- pkt_stream = pkt_stream_generate(test->ifobj_rx->umem, nb_pkts, pkt_len);
+ pkt_stream = pkt_stream_generate(nb_pkts, pkt_len);
test->ifobj_rx->xsk->pkt_stream = pkt_stream;
}
static void __pkt_stream_replace_half(struct ifobject *ifobj, u32 pkt_len,
int offset)
{
- struct xsk_umem_info *umem = ifobj->umem;
struct pkt_stream *pkt_stream;
u32 i;
- pkt_stream = pkt_stream_clone(umem, ifobj->xsk->pkt_stream);
+ pkt_stream = pkt_stream_clone(ifobj->xsk->pkt_stream);
for (i = 1; i < ifobj->xsk->pkt_stream->nb_pkts; i += 2)
pkt_set(pkt_stream, &pkt_stream->pkts[i], offset, pkt_len);
@@ -711,11 +709,10 @@ static void pkt_stream_replace_half(struct test_spec *test, u32 pkt_len, int off
static void pkt_stream_receive_half(struct test_spec *test)
{
- struct xsk_umem_info *umem = test->ifobj_rx->umem;
struct pkt_stream *pkt_stream = test->ifobj_tx->xsk->pkt_stream;
u32 i;
- test->ifobj_rx->xsk->pkt_stream = pkt_stream_generate(umem, pkt_stream->nb_pkts,
+ test->ifobj_rx->xsk->pkt_stream = pkt_stream_generate(pkt_stream->nb_pkts,
pkt_stream->pkts[0].len);
pkt_stream = test->ifobj_rx->xsk->pkt_stream;
for (i = 1; i < pkt_stream->nb_pkts; i += 2)
@@ -1918,8 +1915,7 @@ static int testapp_stats_tx_invalid_descs(struct test_spec *test)
static int testapp_stats_rx_full(struct test_spec *test)
{
pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, MIN_PKT_SIZE);
- test->ifobj_rx->xsk->pkt_stream = pkt_stream_generate(test->ifobj_rx->umem,
- DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE);
+ test->ifobj_rx->xsk->pkt_stream = pkt_stream_generate(DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE);
test->ifobj_rx->xsk->rxqsize = DEFAULT_UMEM_BUFFERS;
test->ifobj_rx->release_rx = false;
@@ -1930,8 +1926,7 @@ static int testapp_stats_rx_full(struct test_spec *test)
static int testapp_stats_fill_empty(struct test_spec *test)
{
pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, MIN_PKT_SIZE);
- test->ifobj_rx->xsk->pkt_stream = pkt_stream_generate(test->ifobj_rx->umem,
- DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE);
+ test->ifobj_rx->xsk->pkt_stream = pkt_stream_generate(DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE);
test->ifobj_rx->use_fill_ring = false;
test->ifobj_rx->validation_func = validate_fill_empty;
@@ -2467,8 +2462,8 @@ int main(int argc, char **argv)
init_iface(ifobj_tx, worker_testapp_validate_tx);
test_spec_init(&test, ifobj_tx, ifobj_rx, 0, &tests[0]);
- tx_pkt_stream_default = pkt_stream_generate(ifobj_tx->umem, DEFAULT_PKT_CNT, MIN_PKT_SIZE);
- rx_pkt_stream_default = pkt_stream_generate(ifobj_rx->umem, DEFAULT_PKT_CNT, MIN_PKT_SIZE);
+ tx_pkt_stream_default = pkt_stream_generate(DEFAULT_PKT_CNT, MIN_PKT_SIZE);
+ rx_pkt_stream_default = pkt_stream_generate(DEFAULT_PKT_CNT, MIN_PKT_SIZE);
if (!tx_pkt_stream_default || !rx_pkt_stream_default)
exit_with_error(ENOMEM);
test.tx_pkt_stream_default = tx_pkt_stream_default;
--
2.34.1
^ permalink raw reply related [flat|nested] 11+ messages in thread* [PATCH bpf-next v3 6/8] selftests/xsk: iterate over all the sockets in the send pkts function
2023-09-27 13:52 [PATCH bpf-next v3 0/8] Add a test for SHARED_UMEM feature Tushar Vyavahare
` (4 preceding siblings ...)
2023-09-27 13:52 ` [PATCH bpf-next v3 5/8] selftests/xsk: remove unnecessary parameter from pkt_set() function call Tushar Vyavahare
@ 2023-09-27 13:52 ` Tushar Vyavahare
2023-09-27 13:52 ` [PATCH bpf-next v3 7/8] selftests/xsk: modify xsk_update_xskmap() to accept the index as an argument Tushar Vyavahare
` (3 subsequent siblings)
9 siblings, 0 replies; 11+ messages in thread
From: Tushar Vyavahare @ 2023-09-27 13:52 UTC (permalink / raw)
To: bpf
Cc: netdev, bjorn, magnus.karlsson, maciej.fijalkowski,
jonathan.lemon, davem, kuba, pabeni, ast, daniel,
tirthendu.sarkar, tushar.vyavahare
Update send_pkts() to handle multiple sockets for sending packets.
Multiple TX sockets are utilized alternately based on the batch size for
improve packet transmission.
Signed-off-by: Tushar Vyavahare <tushar.vyavahare@intel.com>
---
tools/testing/selftests/bpf/xskxceiver.c | 57 ++++++++++++++++--------
1 file changed, 38 insertions(+), 19 deletions(-)
diff --git a/tools/testing/selftests/bpf/xskxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c
index 57a5126f6182..ce869431556c 100644
--- a/tools/testing/selftests/bpf/xskxceiver.c
+++ b/tools/testing/selftests/bpf/xskxceiver.c
@@ -1194,13 +1194,13 @@ static int receive_pkts(struct test_spec *test)
return TEST_PASS;
}
-static int __send_pkts(struct ifobject *ifobject, struct pollfd *fds, bool timeout)
+static int __send_pkts(struct ifobject *ifobject, struct xsk_socket_info *xsk, bool timeout)
{
u32 i, idx = 0, valid_pkts = 0, valid_frags = 0, buffer_len;
- struct pkt_stream *pkt_stream = ifobject->xsk->pkt_stream;
- struct xsk_socket_info *xsk = ifobject->xsk;
+ struct pkt_stream *pkt_stream = xsk->pkt_stream;
struct xsk_umem_info *umem = ifobject->umem;
bool use_poll = ifobject->use_poll;
+ struct pollfd fds = { };
int ret;
buffer_len = pkt_get_buffer_len(umem, pkt_stream->max_pkt_len);
@@ -1212,9 +1212,12 @@ static int __send_pkts(struct ifobject *ifobject, struct pollfd *fds, bool timeo
return TEST_CONTINUE;
}
+ fds.fd = xsk_socket__fd(xsk->xsk);
+ fds.events = POLLOUT;
+
while (xsk_ring_prod__reserve(&xsk->tx, BATCH_SIZE, &idx) < BATCH_SIZE) {
if (use_poll) {
- ret = poll(fds, 1, POLL_TMOUT);
+ ret = poll(&fds, 1, POLL_TMOUT);
if (timeout) {
if (ret < 0) {
ksft_print_msg("ERROR: [%s] Poll error %d\n",
@@ -1293,7 +1296,7 @@ static int __send_pkts(struct ifobject *ifobject, struct pollfd *fds, bool timeo
xsk->outstanding_tx += valid_frags;
if (use_poll) {
- ret = poll(fds, 1, POLL_TMOUT);
+ ret = poll(&fds, 1, POLL_TMOUT);
if (ret <= 0) {
if (ret == 0 && timeout)
return TEST_PASS;
@@ -1339,27 +1342,43 @@ static int wait_for_tx_completion(struct xsk_socket_info *xsk)
return TEST_PASS;
}
+bool all_packets_sent(struct test_spec *test, unsigned long *bitmap)
+{
+ return bitmap_full(bitmap, test->nb_sockets);
+}
+
static int send_pkts(struct test_spec *test, struct ifobject *ifobject)
{
- struct pkt_stream *pkt_stream = ifobject->xsk->pkt_stream;
bool timeout = !is_umem_valid(test->ifobj_rx);
- struct pollfd fds = { };
- u32 ret;
+ DECLARE_BITMAP(bitmap, test->nb_sockets);
+ u32 i, ret;
- fds.fd = xsk_socket__fd(ifobject->xsk->xsk);
- fds.events = POLLOUT;
+ while (!(all_packets_sent(test, bitmap))) {
+ for (i = 0; i < test->nb_sockets; i++) {
+ struct pkt_stream *pkt_stream;
- while (pkt_stream->current_pkt_nb < pkt_stream->nb_pkts) {
- ret = __send_pkts(ifobject, &fds, timeout);
- if (ret == TEST_CONTINUE && !test->fail)
- continue;
- if ((ret || test->fail) && !timeout)
- return TEST_FAILURE;
- if (ret == TEST_PASS && timeout)
- return ret;
+ pkt_stream = ifobject->xsk_arr[i].pkt_stream;
+ if (!pkt_stream || pkt_stream->current_pkt_nb >= pkt_stream->nb_pkts) {
+ __set_bit(i, bitmap);
+ continue;
+ }
+ ret = __send_pkts(ifobject, &ifobject->xsk_arr[i], timeout);
+ if (ret == TEST_CONTINUE && !test->fail)
+ continue;
+
+ if ((ret || test->fail) && !timeout)
+ return TEST_FAILURE;
+
+ if (ret == TEST_PASS && timeout)
+ return ret;
+
+ ret = wait_for_tx_completion(&ifobject->xsk_arr[i]);
+ if (ret)
+ return TEST_FAILURE;
+ }
}
- return wait_for_tx_completion(ifobject->xsk);
+ return TEST_PASS;
}
static int get_xsk_stats(struct xsk_socket *xsk, struct xdp_statistics *stats)
--
2.34.1
^ permalink raw reply related [flat|nested] 11+ messages in thread* [PATCH bpf-next v3 7/8] selftests/xsk: modify xsk_update_xskmap() to accept the index as an argument
2023-09-27 13:52 [PATCH bpf-next v3 0/8] Add a test for SHARED_UMEM feature Tushar Vyavahare
` (5 preceding siblings ...)
2023-09-27 13:52 ` [PATCH bpf-next v3 6/8] selftests/xsk: iterate over all the sockets in the send pkts function Tushar Vyavahare
@ 2023-09-27 13:52 ` Tushar Vyavahare
2023-09-27 13:52 ` [PATCH bpf-next v3 8/8] selftests/xsk: add a test for shared umem feature Tushar Vyavahare
` (2 subsequent siblings)
9 siblings, 0 replies; 11+ messages in thread
From: Tushar Vyavahare @ 2023-09-27 13:52 UTC (permalink / raw)
To: bpf
Cc: netdev, bjorn, magnus.karlsson, maciej.fijalkowski,
jonathan.lemon, davem, kuba, pabeni, ast, daniel,
tirthendu.sarkar, tushar.vyavahare
Modify xsk_update_xskmap() to accept the index as an argument, enabling
the addition of multiple sockets to xskmap.
Signed-off-by: Tushar Vyavahare <tushar.vyavahare@intel.com>
---
tools/testing/selftests/bpf/xsk.c | 3 +--
tools/testing/selftests/bpf/xsk.h | 2 +-
tools/testing/selftests/bpf/xskxceiver.c | 6 +++---
3 files changed, 5 insertions(+), 6 deletions(-)
diff --git a/tools/testing/selftests/bpf/xsk.c b/tools/testing/selftests/bpf/xsk.c
index d9fb2b730a2c..e574711eeb84 100644
--- a/tools/testing/selftests/bpf/xsk.c
+++ b/tools/testing/selftests/bpf/xsk.c
@@ -442,10 +442,9 @@ void xsk_clear_xskmap(struct bpf_map *map)
bpf_map_delete_elem(map_fd, &index);
}
-int xsk_update_xskmap(struct bpf_map *map, struct xsk_socket *xsk)
+int xsk_update_xskmap(struct bpf_map *map, struct xsk_socket *xsk, u32 index)
{
int map_fd, sock_fd;
- u32 index = 0;
map_fd = bpf_map__fd(map);
sock_fd = xsk_socket__fd(xsk);
diff --git a/tools/testing/selftests/bpf/xsk.h b/tools/testing/selftests/bpf/xsk.h
index d93200fdaa8d..771570bc3731 100644
--- a/tools/testing/selftests/bpf/xsk.h
+++ b/tools/testing/selftests/bpf/xsk.h
@@ -204,7 +204,7 @@ struct xsk_umem_config {
int xsk_attach_xdp_program(struct bpf_program *prog, int ifindex, u32 xdp_flags);
void xsk_detach_xdp_program(int ifindex, u32 xdp_flags);
-int xsk_update_xskmap(struct bpf_map *map, struct xsk_socket *xsk);
+int xsk_update_xskmap(struct bpf_map *map, struct xsk_socket *xsk, u32 index);
void xsk_clear_xskmap(struct bpf_map *map);
bool xsk_is_in_mode(u32 ifindex, int mode);
diff --git a/tools/testing/selftests/bpf/xskxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c
index ce869431556c..0432c516a413 100644
--- a/tools/testing/selftests/bpf/xskxceiver.c
+++ b/tools/testing/selftests/bpf/xskxceiver.c
@@ -1608,7 +1608,7 @@ static void thread_common_ops(struct test_spec *test, struct ifobject *ifobject)
xsk_populate_fill_ring(ifobject->umem, ifobject->xsk->pkt_stream, ifobject->use_fill_ring);
- ret = xsk_update_xskmap(ifobject->xskmap, ifobject->xsk->xsk);
+ ret = xsk_update_xskmap(ifobject->xskmap, ifobject->xsk->xsk, 0);
if (ret)
exit_with_error(errno);
}
@@ -1646,7 +1646,7 @@ static void *worker_testapp_validate_rx(void *arg)
thread_common_ops(test, ifobject);
} else {
xsk_clear_xskmap(ifobject->xskmap);
- err = xsk_update_xskmap(ifobject->xskmap, ifobject->xsk->xsk);
+ err = xsk_update_xskmap(ifobject->xskmap, ifobject->xsk->xsk, 0);
if (err) {
ksft_print_msg("Error: Failed to update xskmap, error %s\n",
strerror(-err));
@@ -1884,7 +1884,7 @@ static int swap_xsk_resources(struct test_spec *test)
test->ifobj_tx->xsk = &test->ifobj_tx->xsk_arr[1];
test->ifobj_rx->xsk = &test->ifobj_rx->xsk_arr[1];
- ret = xsk_update_xskmap(test->ifobj_rx->xskmap, test->ifobj_rx->xsk->xsk);
+ ret = xsk_update_xskmap(test->ifobj_rx->xskmap, test->ifobj_rx->xsk->xsk, 0);
if (ret)
return TEST_FAILURE;
--
2.34.1
^ permalink raw reply related [flat|nested] 11+ messages in thread* [PATCH bpf-next v3 8/8] selftests/xsk: add a test for shared umem feature
2023-09-27 13:52 [PATCH bpf-next v3 0/8] Add a test for SHARED_UMEM feature Tushar Vyavahare
` (6 preceding siblings ...)
2023-09-27 13:52 ` [PATCH bpf-next v3 7/8] selftests/xsk: modify xsk_update_xskmap() to accept the index as an argument Tushar Vyavahare
@ 2023-09-27 13:52 ` Tushar Vyavahare
2023-10-04 13:17 ` [PATCH bpf-next v3 0/8] Add a test for SHARED_UMEM feature Magnus Karlsson
2023-10-04 13:40 ` patchwork-bot+netdevbpf
9 siblings, 0 replies; 11+ messages in thread
From: Tushar Vyavahare @ 2023-09-27 13:52 UTC (permalink / raw)
To: bpf
Cc: netdev, bjorn, magnus.karlsson, maciej.fijalkowski,
jonathan.lemon, davem, kuba, pabeni, ast, daniel,
tirthendu.sarkar, tushar.vyavahare
Add a new test for testing shared umem feature. This is accomplished by
adding a new XDP program and using the multiple sockets.
The new XDP program redirects the packets based on the destination MAC
address.
Signed-off-by: Tushar Vyavahare <tushar.vyavahare@intel.com>
---
.../selftests/bpf/progs/xsk_xdp_progs.c | 20 ++++++-
tools/testing/selftests/bpf/xsk_xdp_common.h | 2 +
tools/testing/selftests/bpf/xskxceiver.c | 55 +++++++++++++++++--
tools/testing/selftests/bpf/xskxceiver.h | 2 +-
4 files changed, 72 insertions(+), 7 deletions(-)
diff --git a/tools/testing/selftests/bpf/progs/xsk_xdp_progs.c b/tools/testing/selftests/bpf/progs/xsk_xdp_progs.c
index 734f231a8534..ccde6a4c6319 100644
--- a/tools/testing/selftests/bpf/progs/xsk_xdp_progs.c
+++ b/tools/testing/selftests/bpf/progs/xsk_xdp_progs.c
@@ -3,11 +3,12 @@
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
+#include <linux/if_ether.h>
#include "xsk_xdp_common.h"
struct {
__uint(type, BPF_MAP_TYPE_XSKMAP);
- __uint(max_entries, 1);
+ __uint(max_entries, 2);
__uint(key_size, sizeof(int));
__uint(value_size, sizeof(int));
} xsk SEC(".maps");
@@ -52,4 +53,21 @@ SEC("xdp.frags") int xsk_xdp_populate_metadata(struct xdp_md *xdp)
return bpf_redirect_map(&xsk, 0, XDP_DROP);
}
+SEC("xdp") int xsk_xdp_shared_umem(struct xdp_md *xdp)
+{
+ void *data = (void *)(long)xdp->data;
+ void *data_end = (void *)(long)xdp->data_end;
+ struct ethhdr *eth = data;
+
+ if (eth + 1 > data_end)
+ return XDP_DROP;
+
+ /* Redirecting packets based on the destination MAC address */
+ idx = ((unsigned int)(eth->h_dest[5])) / 2;
+ if (idx > MAX_SOCKETS)
+ return XDP_DROP;
+
+ return bpf_redirect_map(&xsk, idx, XDP_DROP);
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/xsk_xdp_common.h b/tools/testing/selftests/bpf/xsk_xdp_common.h
index f55d61625336..5a6f36f07383 100644
--- a/tools/testing/selftests/bpf/xsk_xdp_common.h
+++ b/tools/testing/selftests/bpf/xsk_xdp_common.h
@@ -3,6 +3,8 @@
#ifndef XSK_XDP_COMMON_H_
#define XSK_XDP_COMMON_H_
+#define MAX_SOCKETS 2
+
struct xdp_info {
__u64 count;
} __attribute__((aligned(32)));
diff --git a/tools/testing/selftests/bpf/xskxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c
index 0432c516a413..837e0ffbdc47 100644
--- a/tools/testing/selftests/bpf/xskxceiver.c
+++ b/tools/testing/selftests/bpf/xskxceiver.c
@@ -651,7 +651,7 @@ static u32 pkt_get_buffer_len(struct xsk_umem_info *umem, u32 len)
return ceil_u32(len, umem->frame_size) * umem->frame_size;
}
-static struct pkt_stream *pkt_stream_generate(u32 nb_pkts, u32 pkt_len)
+static struct pkt_stream *__pkt_stream_generate(u32 nb_pkts, u32 pkt_len, u32 nb_start, u32 nb_off)
{
struct pkt_stream *pkt_stream;
u32 i;
@@ -666,12 +666,17 @@ static struct pkt_stream *pkt_stream_generate(u32 nb_pkts, u32 pkt_len)
struct pkt *pkt = &pkt_stream->pkts[i];
pkt_set(pkt_stream, pkt, 0, pkt_len);
- pkt->pkt_nb = i;
+ pkt->pkt_nb = nb_start + i * nb_off;
}
return pkt_stream;
}
+static struct pkt_stream *pkt_stream_generate(u32 nb_pkts, u32 pkt_len)
+{
+ return __pkt_stream_generate(nb_pkts, pkt_len, 0, 1);
+}
+
static struct pkt_stream *pkt_stream_clone(struct pkt_stream *pkt_stream)
{
return pkt_stream_generate(pkt_stream->nb_pkts, pkt_stream->pkts[0].len);
@@ -721,6 +726,24 @@ static void pkt_stream_receive_half(struct test_spec *test)
pkt_stream->nb_valid_entries /= 2;
}
+static void pkt_stream_even_odd_sequence(struct test_spec *test)
+{
+ struct pkt_stream *pkt_stream;
+ u32 i;
+
+ for (i = 0; i < test->nb_sockets; i++) {
+ pkt_stream = test->ifobj_tx->xsk_arr[i].pkt_stream;
+ pkt_stream = __pkt_stream_generate(pkt_stream->nb_pkts / 2,
+ pkt_stream->pkts[0].len, i, 2);
+ test->ifobj_tx->xsk_arr[i].pkt_stream = pkt_stream;
+
+ pkt_stream = test->ifobj_rx->xsk_arr[i].pkt_stream;
+ pkt_stream = __pkt_stream_generate(pkt_stream->nb_pkts / 2,
+ pkt_stream->pkts[0].len, i, 2);
+ test->ifobj_rx->xsk_arr[i].pkt_stream = pkt_stream;
+ }
+}
+
static u64 pkt_get_addr(struct pkt *pkt, struct xsk_umem_info *umem)
{
if (!pkt->valid)
@@ -1584,6 +1607,7 @@ static void thread_common_ops(struct test_spec *test, struct ifobject *ifobject)
LIBBPF_OPTS(bpf_xdp_query_opts, opts);
void *bufs;
int ret;
+ u32 i;
if (ifobject->umem->unaligned_mode)
mmap_flags |= MAP_HUGETLB | MAP_HUGE_2MB;
@@ -1608,9 +1632,12 @@ static void thread_common_ops(struct test_spec *test, struct ifobject *ifobject)
xsk_populate_fill_ring(ifobject->umem, ifobject->xsk->pkt_stream, ifobject->use_fill_ring);
- ret = xsk_update_xskmap(ifobject->xskmap, ifobject->xsk->xsk, 0);
- if (ret)
- exit_with_error(errno);
+ for (i = 0; i < test->nb_sockets; i++) {
+ ifobject->xsk = &ifobject->xsk_arr[i];
+ ret = xsk_update_xskmap(ifobject->xskmap, ifobject->xsk->xsk, i);
+ if (ret)
+ exit_with_error(errno);
+ }
}
static void *worker_testapp_validate_tx(void *arg)
@@ -2111,6 +2138,23 @@ static int testapp_xdp_metadata_copy(struct test_spec *test)
return testapp_validate_traffic(test);
}
+static int testapp_xdp_shared_umem(struct test_spec *test)
+{
+ struct xsk_xdp_progs *skel_rx = test->ifobj_rx->xdp_progs;
+ struct xsk_xdp_progs *skel_tx = test->ifobj_tx->xdp_progs;
+
+ test->total_steps = 1;
+ test->nb_sockets = 2;
+
+ test_spec_set_xdp_prog(test, skel_rx->progs.xsk_xdp_shared_umem,
+ skel_tx->progs.xsk_xdp_shared_umem,
+ skel_rx->maps.xsk, skel_tx->maps.xsk);
+
+ pkt_stream_even_odd_sequence(test);
+
+ return testapp_validate_traffic(test);
+}
+
static int testapp_poll_txq_tmout(struct test_spec *test)
{
test->ifobj_tx->use_poll = true;
@@ -2412,6 +2456,7 @@ static const struct test_spec tests[] = {
{.name = "STAT_FILL_EMPTY", .test_func = testapp_stats_fill_empty},
{.name = "XDP_PROG_CLEANUP", .test_func = testapp_xdp_prog_cleanup},
{.name = "XDP_DROP_HALF", .test_func = testapp_xdp_drop},
+ {.name = "XDP_SHARED_UMEM", .test_func = testapp_xdp_shared_umem},
{.name = "XDP_METADATA_COPY", .test_func = testapp_xdp_metadata},
{.name = "XDP_METADATA_COPY_MULTI_BUFF", .test_func = testapp_xdp_metadata_mb},
{.name = "SEND_RECEIVE_9K_PACKETS", .test_func = testapp_send_receive_mb},
diff --git a/tools/testing/selftests/bpf/xskxceiver.h b/tools/testing/selftests/bpf/xskxceiver.h
index fa409285eafd..f174df2d693f 100644
--- a/tools/testing/selftests/bpf/xskxceiver.h
+++ b/tools/testing/selftests/bpf/xskxceiver.h
@@ -8,6 +8,7 @@
#include <limits.h>
#include "xsk_xdp_progs.skel.h"
+#include "xsk_xdp_common.h"
#ifndef SOL_XDP
#define SOL_XDP 283
@@ -35,7 +36,6 @@
#define TEST_SKIP 2
#define MAX_INTERFACES 2
#define MAX_INTERFACE_NAME_CHARS 16
-#define MAX_SOCKETS 2
#define MAX_TEST_NAME_SIZE 48
#define MAX_TEARDOWN_ITER 10
#define PKT_HDR_SIZE (sizeof(struct ethhdr) + 2) /* Just to align the data in the packet */
--
2.34.1
^ permalink raw reply related [flat|nested] 11+ messages in thread* Re: [PATCH bpf-next v3 0/8] Add a test for SHARED_UMEM feature
2023-09-27 13:52 [PATCH bpf-next v3 0/8] Add a test for SHARED_UMEM feature Tushar Vyavahare
` (7 preceding siblings ...)
2023-09-27 13:52 ` [PATCH bpf-next v3 8/8] selftests/xsk: add a test for shared umem feature Tushar Vyavahare
@ 2023-10-04 13:17 ` Magnus Karlsson
2023-10-04 13:40 ` patchwork-bot+netdevbpf
9 siblings, 0 replies; 11+ messages in thread
From: Magnus Karlsson @ 2023-10-04 13:17 UTC (permalink / raw)
To: Tushar Vyavahare
Cc: bpf, netdev, bjorn, magnus.karlsson, maciej.fijalkowski,
jonathan.lemon, davem, kuba, pabeni, ast, daniel,
tirthendu.sarkar
On Wed, 27 Sept 2023 at 15:31, Tushar Vyavahare
<tushar.vyavahare@intel.com> wrote:
>
> Implement a test for the SHARED_UMEM feature in this patch set and make
> necessary changes/improvements. Ensure that the framework now supports
> different streams for different sockets.
>
> v2->v3
> - Set the sock_num at the end of the while loop.
> - Declare xsk at the top of the while loop.
>
> v1->v2
> - Remove generate_mac_addresses() and generate mac addresses based on
> the number of sockets in __test_spec_init() function. [Magnus]
> - Update Makefile to include find_bit.c for compiling xskxceiver.
> - Add bitmap_full() function to verify all bits are set to break the while loop
> in the receive_pkts() and send_pkts() functions.
> - Replace __test_and_set_bit() function with __set_bit() function.
> - Add single return check for wait_for_tx_completion() function call.
>
> Patch series summary:
>
> 1: Move the packet stream from the ifobject struct to the xsk_socket_info
> struct to enable the use of different streams for different sockets
> This will facilitate the sending and receiving of data from multiple
> sockets simultaneously using the SHARED_XDP_UMEM feature.
>
> It gives flexibility of send/recive individual traffic on particular
> socket.
>
> 2: Rename the header file to a generic name so that it can be used by all
> future XDP programs.
>
> 3: Move the src_mac and dst_mac fields from the ifobject structure to the
> xsk_socket_info structure to achieve per-socket MAC address assignment.
> Require this in order to steer traffic to various sockets in subsequent
> patches.
>
> 4: Improve the receive_pkt() function to enable it to receive packets from
> multiple sockets. Define a sock_num variable to iterate through all the
> sockets in the Rx path. Add nb_valid_entries to check that all the
> expected number of packets are received.
>
> 5: The pkt_set() function no longer needs the umem parameter. This commit
> removes the umem parameter from the pkt_set() function.
>
> 6: Iterate over all the sockets in the send pkts function. Update
> send_pkts() to handle multiple sockets for sending packets.
> Multiple TX sockets are utilized alternately based on the batch size
> for improve packet transmission.
>
> 7: Modify xsk_update_xskmap() to accept the index as an argument, enabling
> the addition of multiple sockets to xskmap.
>
> 8: Add a new test for testing shared umem feature. This is accomplished by
> adding a new XDP program and using the multiple sockets. The new XDP
> program redirects the packets based on the destination MAC address.
Thanks Tushar!
For the set:
Acked-by: Magnus Karlsson <magnus.karlsson@intel.com>
> Tushar Vyavahare (8):
> selftests/xsk: move pkt_stream to the xsk_socket_info
> selftests/xsk: rename xsk_xdp_metadata.h to xsk_xdp_common.h
> selftests/xsk: move src_mac and dst_mac to the xsk_socket_info
> selftests/xsk: iterate over all the sockets in the receive pkts
> function
> selftests/xsk: remove unnecessary parameter from pkt_set() function
> call
> selftests/xsk: iterate over all the sockets in the send pkts function
> selftests/xsk: modify xsk_update_xskmap() to accept the index as an
> argument
> selftests/xsk: add a test for shared umem feature
>
> tools/testing/selftests/bpf/Makefile | 4 +-
> .../selftests/bpf/progs/xsk_xdp_progs.c | 22 +-
> tools/testing/selftests/bpf/xsk.c | 3 +-
> tools/testing/selftests/bpf/xsk.h | 2 +-
> tools/testing/selftests/bpf/xsk_xdp_common.h | 12 +
> .../testing/selftests/bpf/xsk_xdp_metadata.h | 5 -
> tools/testing/selftests/bpf/xskxceiver.c | 513 +++++++++++-------
> tools/testing/selftests/bpf/xskxceiver.h | 13 +-
> 8 files changed, 363 insertions(+), 211 deletions(-)
> create mode 100644 tools/testing/selftests/bpf/xsk_xdp_common.h
> delete mode 100644 tools/testing/selftests/bpf/xsk_xdp_metadata.h
>
> --
> 2.34.1
>
>
^ permalink raw reply [flat|nested] 11+ messages in thread* Re: [PATCH bpf-next v3 0/8] Add a test for SHARED_UMEM feature
2023-09-27 13:52 [PATCH bpf-next v3 0/8] Add a test for SHARED_UMEM feature Tushar Vyavahare
` (8 preceding siblings ...)
2023-10-04 13:17 ` [PATCH bpf-next v3 0/8] Add a test for SHARED_UMEM feature Magnus Karlsson
@ 2023-10-04 13:40 ` patchwork-bot+netdevbpf
9 siblings, 0 replies; 11+ messages in thread
From: patchwork-bot+netdevbpf @ 2023-10-04 13:40 UTC (permalink / raw)
To: Tushar Vyavahare
Cc: bpf, netdev, bjorn, magnus.karlsson, maciej.fijalkowski,
jonathan.lemon, davem, kuba, pabeni, ast, daniel,
tirthendu.sarkar
Hello:
This series was applied to bpf/bpf-next.git (master)
by Daniel Borkmann <daniel@iogearbox.net>:
On Wed, 27 Sep 2023 19:22:33 +0530 you wrote:
> Implement a test for the SHARED_UMEM feature in this patch set and make
> necessary changes/improvements. Ensure that the framework now supports
> different streams for different sockets.
>
> v2->v3
> - Set the sock_num at the end of the while loop.
> - Declare xsk at the top of the while loop.
>
> [...]
Here is the summary with links:
- [bpf-next,v3,1/8] selftests/xsk: move pkt_stream to the xsk_socket_info
https://git.kernel.org/bpf/bpf-next/c/8367eb954e24
- [bpf-next,v3,2/8] selftests/xsk: rename xsk_xdp_metadata.h to xsk_xdp_common.h
https://git.kernel.org/bpf/bpf-next/c/93ba11247907
- [bpf-next,v3,3/8] selftests/xsk: move src_mac and dst_mac to the xsk_socket_info
https://git.kernel.org/bpf/bpf-next/c/985fd2145a29
- [bpf-next,v3,4/8] selftests/xsk: iterate over all the sockets in the receive pkts function
https://git.kernel.org/bpf/bpf-next/c/8913e653e9b8
- [bpf-next,v3,5/8] selftests/xsk: remove unnecessary parameter from pkt_set() function call
https://git.kernel.org/bpf/bpf-next/c/46e43786cc60
- [bpf-next,v3,6/8] selftests/xsk: iterate over all the sockets in the send pkts function
https://git.kernel.org/bpf/bpf-next/c/fd0815ae9b8a
- [bpf-next,v3,7/8] selftests/xsk: modify xsk_update_xskmap() to accept the index as an argument
https://git.kernel.org/bpf/bpf-next/c/fc2cb86495da
- [bpf-next,v3,8/8] selftests/xsk: add a test for shared umem feature
https://git.kernel.org/bpf/bpf-next/c/6d198a89c004
You are awesome, thank you!
--
Deet-doot-dot, I am a bot.
https://korg.docs.kernel.org/patchwork/pwbot.html
^ permalink raw reply [flat|nested] 11+ messages in thread