* [PATCH 0/5] selftests/xsk: foundational UMEM refactoring
@ 2026-04-28 8:03 Tushar Vyavahare
2026-04-28 8:03 ` [PATCH 1/5] selftests/xsk: Introduce helpers for setting UMEM properties Tushar Vyavahare
` (4 more replies)
0 siblings, 5 replies; 8+ messages in thread
From: Tushar Vyavahare @ 2026-04-28 8:03 UTC (permalink / raw)
To: netdev, magnus.karlsson, maciej.fijalkowski, stfomichev,
kernelxing, davem, kuba, pabeni, ast, daniel, tirthendu.sarkar,
tushar.vyavahare
Cc: bpf
This series refactors foundational UMEM handling in selftests/xsk.
It centralizes UMEM property setup through helpers, removes ifobject-level
UMEM ownership state, and normalizes umem_size/mmap_size usage across the
code paths touched here.
No functional behavior change is intended; the focus is internal
consistency and maintainability.
Signed-off-by: Magnus Karlsson <magnus.karlsson@intel.com>
Signed-off-by: Tushar Vyavahare <tushar.vyavahare@intel.com>
Tushar Vyavahare (5):
selftests/xsk: Introduce helpers for setting UMEM properties
selftests/xsk: Eliminate umem reference from ifobject
selftests/xsk: Remove umem from pkt_generate parameters
selftests/xsk: Use umem_size() helper consistently
selftests/xsk: Introduce mmap_size in umem struct
.../selftests/bpf/prog_tests/test_xsk.c | 195 +++++++++---------
.../selftests/bpf/prog_tests/test_xsk.h | 33 +--
2 files changed, 119 insertions(+), 109 deletions(-)
--
2.43.0
^ permalink raw reply [flat|nested] 8+ messages in thread
* [PATCH 1/5] selftests/xsk: Introduce helpers for setting UMEM properties
2026-04-28 8:03 [PATCH 0/5] selftests/xsk: foundational UMEM refactoring Tushar Vyavahare
@ 2026-04-28 8:03 ` Tushar Vyavahare
2026-04-28 10:28 ` bot+bpf-ci
2026-04-28 8:03 ` [PATCH 2/5] selftests/xsk: Eliminate umem reference from ifobject Tushar Vyavahare
` (3 subsequent siblings)
4 siblings, 1 reply; 8+ messages in thread
From: Tushar Vyavahare @ 2026-04-28 8:03 UTC (permalink / raw)
To: netdev, magnus.karlsson, maciej.fijalkowski, stfomichev,
kernelxing, davem, kuba, pabeni, ast, daniel, tirthendu.sarkar,
tushar.vyavahare
Cc: bpf
UMEM properties are set via open-coded field assignments in multiple test
paths, which makes updates noisy and error-prone.
Introduce three helpers to set UMEM properties through a single interface.
This keeps setup logic consistent across tests and makes future refactoring
simpler.
No functional behavior change is intended.
Signed-off-by: Magnus Karlsson <magnus.karlsson@intel.com>
Signed-off-by: Tushar Vyavahare <tushar.vyavahare@intel.com>
---
.../selftests/bpf/prog_tests/test_xsk.c | 48 +++++++++++--------
1 file changed, 28 insertions(+), 20 deletions(-)
diff --git a/tools/testing/selftests/bpf/prog_tests/test_xsk.c b/tools/testing/selftests/bpf/prog_tests/test_xsk.c
index 7950c504ed28..d5cd8f590b25 100644
--- a/tools/testing/selftests/bpf/prog_tests/test_xsk.c
+++ b/tools/testing/selftests/bpf/prog_tests/test_xsk.c
@@ -303,6 +303,22 @@ static void test_spec_reset(struct test_spec *test)
__test_spec_init(test, test->ifobj_tx, test->ifobj_rx);
}
+static void test_spec_set_unaligned(struct test_spec *test)
+{
+ test_spec_set_unaligned(test);
+}
+
+static void test_spec_set_frame_size(struct test_spec *test, u32 size)
+{
+ test->ifobj_tx->umem->frame_size = size;
+ test->ifobj_rx->umem->frame_size = size;
+}
+
+static void test_spec_set_frame_headroom(struct test_spec *test, u32 size)
+{
+ test->ifobj_rx->umem->frame_headroom = size;
+}
+
static void test_spec_set_xdp_prog(struct test_spec *test, struct bpf_program *xdp_prog_rx,
struct bpf_program *xdp_prog_tx, struct bpf_map *xskmap_rx,
struct bpf_map *xskmap_tx)
@@ -1953,7 +1969,7 @@ int testapp_xdp_prog_cleanup(struct test_spec *test)
int testapp_headroom(struct test_spec *test)
{
- test->ifobj_rx->umem->frame_headroom = UMEM_HEADROOM_TEST_SIZE;
+ test_spec_set_frame_headroom(test, UMEM_HEADROOM_TEST_SIZE);
return testapp_validate_traffic(test);
}
@@ -1968,8 +1984,8 @@ int testapp_stats_rx_dropped(struct test_spec *test)
if (pkt_stream_replace_half(test, (MIN_PKT_SIZE * 3) + umem_tr, 0))
return TEST_FAILURE;
- test->ifobj_rx->umem->frame_headroom = test->ifobj_rx->umem->frame_size -
- XDP_PACKET_HEADROOM - (MIN_PKT_SIZE * 2) - umem_tr;
+ test_spec_set_frame_headroom(test, test->ifobj_rx->umem->frame_size -
+ XDP_PACKET_HEADROOM - (MIN_PKT_SIZE * 2) - umem_tr);
if (pkt_stream_receive_half(test))
return TEST_FAILURE;
test->ifobj_rx->validation_func = validate_rx_dropped;
@@ -2025,8 +2041,7 @@ int testapp_stats_fill_empty(struct test_spec *test)
int testapp_send_receive_unaligned(struct test_spec *test)
{
- test->ifobj_tx->umem->unaligned_mode = true;
- test->ifobj_rx->umem->unaligned_mode = true;
+ test_spec_set_unaligned(test);
/* Let half of the packets straddle a 4K buffer boundary */
if (pkt_stream_replace_half(test, MIN_PKT_SIZE, -MIN_PKT_SIZE / 2))
return TEST_FAILURE;
@@ -2037,8 +2052,7 @@ int testapp_send_receive_unaligned(struct test_spec *test)
int testapp_send_receive_unaligned_mb(struct test_spec *test)
{
test->mtu = MAX_ETH_JUMBO_SIZE;
- test->ifobj_tx->umem->unaligned_mode = true;
- test->ifobj_rx->umem->unaligned_mode = true;
+ test_spec_set_unaligned(test);
if (pkt_stream_replace(test, DEFAULT_PKT_CNT, MAX_ETH_JUMBO_SIZE))
return TEST_FAILURE;
return testapp_validate_traffic(test);
@@ -2204,7 +2218,7 @@ int testapp_poll_txq_tmout(struct test_spec *test)
{
test->ifobj_tx->use_poll = true;
/* create invalid frame by set umem frame_size and pkt length equal to 2048 */
- test->ifobj_tx->umem->frame_size = 2048;
+ test_spec_set_frame_size(test, 2048);
if (pkt_stream_replace(test, 2 * DEFAULT_PKT_CNT, 2048))
return TEST_FAILURE;
return testapp_validate_traffic_single_thread(test, test->ifobj_tx);
@@ -2337,8 +2351,7 @@ int testapp_send_receive(struct test_spec *test)
int testapp_send_receive_2k_frame(struct test_spec *test)
{
- test->ifobj_tx->umem->frame_size = 2048;
- test->ifobj_rx->umem->frame_size = 2048;
+ test_spec_set_frame_size(test, 2048);
if (pkt_stream_replace(test, DEFAULT_PKT_CNT, MIN_PKT_SIZE))
return TEST_FAILURE;
return testapp_validate_traffic(test);
@@ -2363,15 +2376,13 @@ int testapp_aligned_inv_desc(struct test_spec *test)
int testapp_aligned_inv_desc_2k_frame(struct test_spec *test)
{
- test->ifobj_tx->umem->frame_size = 2048;
- test->ifobj_rx->umem->frame_size = 2048;
+ test_spec_set_frame_size(test, 2048);
return testapp_invalid_desc(test);
}
int testapp_unaligned_inv_desc(struct test_spec *test)
{
- test->ifobj_tx->umem->unaligned_mode = true;
- test->ifobj_rx->umem->unaligned_mode = true;
+ test_spec_set_unaligned(test);
return testapp_invalid_desc(test);
}
@@ -2380,10 +2391,8 @@ int testapp_unaligned_inv_desc_4001_frame(struct test_spec *test)
u64 page_size, umem_size;
/* Odd frame size so the UMEM doesn't end near a page boundary. */
- test->ifobj_tx->umem->frame_size = 4001;
- test->ifobj_rx->umem->frame_size = 4001;
- test->ifobj_tx->umem->unaligned_mode = true;
- test->ifobj_rx->umem->unaligned_mode = true;
+ test_spec_set_frame_size(test, 4001);
+ test_spec_set_unaligned(test);
/* This test exists to test descriptors that staddle the end of
* the UMEM but not a page.
*/
@@ -2402,8 +2411,7 @@ int testapp_aligned_inv_desc_mb(struct test_spec *test)
int testapp_unaligned_inv_desc_mb(struct test_spec *test)
{
- test->ifobj_tx->umem->unaligned_mode = true;
- test->ifobj_rx->umem->unaligned_mode = true;
+ test_spec_set_unaligned(test);
return testapp_invalid_desc_mb(test);
}
--
2.43.0
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH 2/5] selftests/xsk: Eliminate umem reference from ifobject
2026-04-28 8:03 [PATCH 0/5] selftests/xsk: foundational UMEM refactoring Tushar Vyavahare
2026-04-28 8:03 ` [PATCH 1/5] selftests/xsk: Introduce helpers for setting UMEM properties Tushar Vyavahare
@ 2026-04-28 8:03 ` Tushar Vyavahare
2026-04-28 8:03 ` [PATCH 3/5] selftests/xsk: Remove umem from pkt_generate parameters Tushar Vyavahare
` (2 subsequent siblings)
4 siblings, 0 replies; 8+ messages in thread
From: Tushar Vyavahare @ 2026-04-28 8:03 UTC (permalink / raw)
To: netdev, magnus.karlsson, maciej.fijalkowski, stfomichev,
kernelxing, davem, kuba, pabeni, ast, daniel, tirthendu.sarkar,
tushar.vyavahare
Cc: bpf
Each AF_XDP socket is associated with a UMEM, but ifobject currently
keeps only a single ifobject-level UMEM reference. That makes it awkward
to model cases where multiple sockets on the same interface eventually
need distinct UMEM instances.
Move UMEM storage into xsk_socket_info and keep xsk_socket_info::umem as
the active pointer used by the code paths. For now, all sockets on an
interface still point at the first socket's UMEM, which preserves
existing behaviour while removing the ifobject-level ownership.
Signed-off-by: Magnus Karlsson <magnus.karlsson@intel.com>
Signed-off-by: Tushar Vyavahare <tushar.vyavahare@intel.com>
---
.../selftests/bpf/prog_tests/test_xsk.c | 124 +++++++++---------
.../selftests/bpf/prog_tests/test_xsk.h | 32 ++---
2 files changed, 80 insertions(+), 76 deletions(-)
diff --git a/tools/testing/selftests/bpf/prog_tests/test_xsk.c b/tools/testing/selftests/bpf/prog_tests/test_xsk.c
index d5cd8f590b25..ad5ada9216fc 100644
--- a/tools/testing/selftests/bpf/prog_tests/test_xsk.c
+++ b/tools/testing/selftests/bpf/prog_tests/test_xsk.c
@@ -65,9 +65,9 @@ static void gen_eth_hdr(struct xsk_socket_info *xsk, struct ethhdr *eth_hdr)
eth_hdr->h_proto = htons(ETH_P_LOOPBACK);
}
-static bool is_umem_valid(struct ifobject *ifobj)
+static bool is_umem_valid(struct xsk_umem_info *umem)
{
- return !!ifobj->umem->umem;
+ return !!umem->umem;
}
static u32 mode_to_xdp_flags(enum test_mode mode)
@@ -229,24 +229,26 @@ static void __test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx,
ifobj->tx_on = false;
}
- memset(ifobj->umem, 0, sizeof(*ifobj->umem));
- ifobj->umem->num_frames = DEFAULT_UMEM_BUFFERS;
- ifobj->umem->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE;
-
for (j = 0; j < MAX_SOCKETS; j++) {
- memset(&ifobj->xsk_arr[j], 0, sizeof(ifobj->xsk_arr[j]));
- ifobj->xsk_arr[j].rxqsize = XSK_RING_CONS__DEFAULT_NUM_DESCS;
+ struct xsk_socket_info *xsk = &ifobj->xsk_arr[j];
+
+ memset(xsk, 0, sizeof(*xsk));
+ xsk->rxqsize = XSK_RING_CONS__DEFAULT_NUM_DESCS;
+ xsk->umem = &ifobj->xsk_arr[0].umem_real;
ifobj->xsk_arr[j].batch_size = DEFAULT_BATCH_SIZE;
if (i == 0)
- ifobj->xsk_arr[j].pkt_stream = test->tx_pkt_stream_default;
+ xsk->pkt_stream = test->tx_pkt_stream_default;
else
- ifobj->xsk_arr[j].pkt_stream = test->rx_pkt_stream_default;
+ xsk->pkt_stream = test->rx_pkt_stream_default;
- memcpy(ifobj->xsk_arr[j].src_mac, g_mac, ETH_ALEN);
- memcpy(ifobj->xsk_arr[j].dst_mac, g_mac, ETH_ALEN);
- ifobj->xsk_arr[j].src_mac[5] += ((j * 2) + 0);
- ifobj->xsk_arr[j].dst_mac[5] += ((j * 2) + 1);
+ memcpy(xsk->src_mac, g_mac, ETH_ALEN);
+ memcpy(xsk->dst_mac, g_mac, ETH_ALEN);
+ xsk->src_mac[5] += ((j * 2) + 0);
+ xsk->dst_mac[5] += ((j * 2) + 1);
}
+
+ ifobj->xsk->umem->num_frames = DEFAULT_UMEM_BUFFERS;
+ ifobj->xsk->umem->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE;
}
if (ifobj_tx->hw_ring_size_supp)
@@ -305,18 +307,19 @@ static void test_spec_reset(struct test_spec *test)
static void test_spec_set_unaligned(struct test_spec *test)
{
- test_spec_set_unaligned(test);
+ test->ifobj_tx->xsk->umem->unaligned_mode = true;
+ test->ifobj_rx->xsk->umem->unaligned_mode = true;
}
static void test_spec_set_frame_size(struct test_spec *test, u32 size)
{
- test->ifobj_tx->umem->frame_size = size;
- test->ifobj_rx->umem->frame_size = size;
+ test->ifobj_tx->xsk->umem->frame_size = size;
+ test->ifobj_rx->xsk->umem->frame_size = size;
}
static void test_spec_set_frame_headroom(struct test_spec *test, u32 size)
{
- test->ifobj_rx->umem->frame_headroom = size;
+ test->ifobj_rx->xsk->umem->frame_headroom = size;
}
static void test_spec_set_xdp_prog(struct test_spec *test, struct bpf_program *xdp_prog_rx,
@@ -1007,7 +1010,7 @@ static int __receive_pkts(struct test_spec *test, struct xsk_socket_info *xsk)
return TEST_FAILURE;
if (!ret) {
- if (!is_umem_valid(test->ifobj_tx))
+ if (!is_umem_valid(umem))
return TEST_PASS;
ksft_print_msg("ERROR: [%s] Poll timed out\n", __func__);
@@ -1167,7 +1170,7 @@ static int __send_pkts(struct ifobject *ifobject, struct xsk_socket_info *xsk, b
{
u32 i, idx = 0, valid_pkts = 0, valid_frags = 0, buffer_len;
struct pkt_stream *pkt_stream = xsk->pkt_stream;
- struct xsk_umem_info *umem = ifobject->umem;
+ struct xsk_umem_info *umem = xsk->umem;
bool use_poll = ifobject->use_poll;
struct pollfd fds = { };
int ret;
@@ -1226,7 +1229,7 @@ static int __send_pkts(struct ifobject *ifobject, struct xsk_socket_info *xsk, b
while (nb_frags_left--) {
struct xdp_desc *tx_desc = xsk_ring_prod__tx_desc(&xsk->tx, idx + i);
- tx_desc->addr = pkt_get_addr(pkt, ifobject->umem);
+ tx_desc->addr = pkt_get_addr(pkt, xsk->umem);
if (pkt_stream->verbatim) {
tx_desc->len = pkt->len;
tx_desc->options = pkt->options;
@@ -1319,7 +1322,7 @@ bool all_packets_sent(struct test_spec *test, unsigned long *bitmap)
static int send_pkts(struct test_spec *test, struct ifobject *ifobject)
{
- bool timeout = !is_umem_valid(test->ifobj_rx);
+ bool timeout = !is_umem_valid(test->ifobj_rx->xsk->umem);
DECLARE_BITMAP(bitmap, test->nb_sockets);
u32 i, ret;
@@ -1504,14 +1507,20 @@ static int xsk_configure(struct test_spec *test, struct ifobject *ifobject,
static int thread_common_ops_tx(struct test_spec *test, struct ifobject *ifobject)
{
- int ret = xsk_configure(test, ifobject, test->ifobj_rx->umem, true);
+ struct xsk_umem_info *umem_rx;
+ int ret;
+ if (!test->ifobj_rx || !test->ifobj_rx->xsk_arr[0].umem ||
+ !test->ifobj_rx->xsk_arr[0].umem->umem) {
+ ksft_print_msg("Error: RX UMEM is not initialized before shared-UMEM TX setup\n");
+ return -EINVAL;
+ }
+
+ umem_rx = test->ifobj_rx->xsk_arr[0].umem;
+ ret = xsk_configure(test, ifobject, umem_rx, true);
if (ret)
return ret;
- ifobject->xsk = &ifobject->xsk_arr[0];
ifobject->xskmap = test->ifobj_rx->xskmap;
- memcpy(ifobject->umem, test->ifobj_rx->umem, sizeof(struct xsk_umem_info));
- ifobject->umem->base_addr = 0;
return 0;
}
@@ -1564,6 +1573,7 @@ static int xsk_populate_fill_ring(struct xsk_umem_info *umem, struct pkt_stream
static int thread_common_ops(struct test_spec *test, struct ifobject *ifobject)
{
+ struct xsk_umem_info *umem = ifobject->xsk->umem;
LIBBPF_OPTS(bpf_xdp_query_opts, opts);
int mmap_flags;
u64 umem_sz;
@@ -1571,10 +1581,10 @@ static int thread_common_ops(struct test_spec *test, struct ifobject *ifobject)
int ret;
u32 i;
- umem_sz = ifobject->umem->num_frames * ifobject->umem->frame_size;
+ umem_sz = umem->num_frames * umem->frame_size;
mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE;
- if (ifobject->umem->unaligned_mode)
+ if (umem->unaligned_mode)
mmap_flags |= MAP_HUGETLB | MAP_HUGE_2MB;
if (ifobject->shared_umem)
@@ -1584,11 +1594,11 @@ static int thread_common_ops(struct test_spec *test, struct ifobject *ifobject)
if (bufs == MAP_FAILED)
return -errno;
- ret = xsk_configure_umem(ifobject, ifobject->umem, bufs, umem_sz);
+ ret = xsk_configure_umem(ifobject, umem, bufs, umem_sz);
if (ret)
return ret;
- ret = xsk_configure(test, ifobject, ifobject->umem, false);
+ ret = xsk_configure(test, ifobject, umem, false);
if (ret)
return ret;
@@ -1597,21 +1607,19 @@ static int thread_common_ops(struct test_spec *test, struct ifobject *ifobject)
if (!ifobject->rx_on)
return 0;
- ret = xsk_populate_fill_ring(ifobject->umem, ifobject->xsk->pkt_stream,
+ ret = xsk_populate_fill_ring(umem, ifobject->xsk->pkt_stream,
ifobject->use_fill_ring);
if (ret)
return ret;
for (i = 0; i < test->nb_sockets; i++) {
- ifobject->xsk = &ifobject->xsk_arr[i];
- ret = xsk_update_xskmap(ifobject->xskmap, ifobject->xsk->xsk, i);
+ ret = xsk_update_xskmap(ifobject->xskmap, ifobject->xsk_arr[i].xsk, i);
if (ret)
return ret;
}
return 0;
}
-
void *worker_testapp_validate_tx(void *arg)
{
struct test_spec *test = (struct test_spec *)arg;
@@ -1691,14 +1699,15 @@ void *worker_testapp_validate_rx(void *arg)
static void testapp_clean_xsk_umem(struct ifobject *ifobj)
{
- u64 umem_sz = ifobj->umem->num_frames * ifobj->umem->frame_size;
+ struct xsk_umem_info *umem = ifobj->xsk->umem;
+ u64 umem_sz = umem->num_frames * umem->frame_size;
if (ifobj->shared_umem)
umem_sz *= 2;
umem_sz = ceil_u64(umem_sz, HUGEPAGE_SIZE) * HUGEPAGE_SIZE;
- xsk_umem__delete(ifobj->umem->umem);
- munmap(ifobj->umem->buffer, umem_sz);
+ xsk_umem__delete(umem->umem);
+ munmap(umem->buffer, umem_sz);
}
static void handler(int signum)
@@ -1860,9 +1869,11 @@ static int testapp_validate_traffic(struct test_spec *test)
{
struct ifobject *ifobj_rx = test->ifobj_rx;
struct ifobject *ifobj_tx = test->ifobj_tx;
+ struct xsk_umem_info *umem_rx = ifobj_rx->xsk->umem;
+ struct xsk_umem_info *umem_tx = ifobj_tx->xsk->umem;
- if ((ifobj_rx->umem->unaligned_mode && !ifobj_rx->unaligned_supp) ||
- (ifobj_tx->umem->unaligned_mode && !ifobj_tx->unaligned_supp)) {
+ if ((umem_rx->unaligned_mode && !ifobj_rx->unaligned_supp) ||
+ (umem_tx->unaligned_mode && !ifobj_tx->unaligned_supp)) {
ksft_print_msg("No huge pages present.\n");
return TEST_SKIP;
}
@@ -1975,6 +1986,7 @@ int testapp_headroom(struct test_spec *test)
int testapp_stats_rx_dropped(struct test_spec *test)
{
+ struct xsk_umem_info *umem = test->ifobj_rx->xsk->umem;
u32 umem_tr = test->ifobj_tx->umem_tailroom;
if (test->mode == TEST_MODE_ZC) {
@@ -1984,8 +1996,8 @@ int testapp_stats_rx_dropped(struct test_spec *test)
if (pkt_stream_replace_half(test, (MIN_PKT_SIZE * 3) + umem_tr, 0))
return TEST_FAILURE;
- test_spec_set_frame_headroom(test, test->ifobj_rx->umem->frame_size -
- XDP_PACKET_HEADROOM - (MIN_PKT_SIZE * 2) - umem_tr);
+ test_spec_set_frame_headroom(test, umem->frame_size - XDP_PACKET_HEADROOM -
+ (MIN_PKT_SIZE * 2) - umem_tr);
if (pkt_stream_receive_half(test))
return TEST_FAILURE;
test->ifobj_rx->validation_func = validate_rx_dropped;
@@ -2078,7 +2090,7 @@ int testapp_send_receive_mb(struct test_spec *test)
int testapp_invalid_desc_mb(struct test_spec *test)
{
- struct xsk_umem_info *umem = test->ifobj_tx->umem;
+ struct xsk_umem_info *umem = test->ifobj_tx->xsk->umem;
u64 umem_size = umem->num_frames * umem->frame_size;
struct pkt pkts[] = {
/* Valid packet for synch to start with */
@@ -2119,7 +2131,7 @@ int testapp_invalid_desc_mb(struct test_spec *test)
int testapp_invalid_desc(struct test_spec *test)
{
- struct xsk_umem_info *umem = test->ifobj_tx->umem;
+ struct xsk_umem_info *umem = test->ifobj_tx->xsk->umem;
u64 umem_size = umem->num_frames * umem->frame_size;
struct pkt pkts[] = {
/* Zero packet address allowed */
@@ -2397,7 +2409,8 @@ int testapp_unaligned_inv_desc_4001_frame(struct test_spec *test)
* the UMEM but not a page.
*/
page_size = sysconf(_SC_PAGESIZE);
- umem_size = test->ifobj_tx->umem->num_frames * test->ifobj_tx->umem->frame_size;
+ umem_size = test->ifobj_tx->xsk->umem->num_frames *
+ test->ifobj_tx->xsk->umem->frame_size;
assert(umem_size % page_size > MIN_PKT_SIZE);
assert(umem_size % page_size < page_size - MIN_PKT_SIZE);
@@ -2455,9 +2468,9 @@ int testapp_hw_sw_max_ring_size(struct test_spec *test)
test->total_steps = 2;
test->ifobj_tx->ring.tx_pending = test->ifobj_tx->ring.tx_max_pending;
test->ifobj_tx->ring.rx_pending = test->ifobj_tx->ring.rx_max_pending;
- test->ifobj_rx->umem->num_frames = max_descs;
- test->ifobj_rx->umem->fill_size = max_descs;
- test->ifobj_rx->umem->comp_size = max_descs;
+ test->ifobj_rx->xsk->umem->num_frames = max_descs;
+ test->ifobj_rx->xsk->umem->fill_size = max_descs;
+ test->ifobj_rx->xsk->umem->comp_size = max_descs;
test->ifobj_tx->xsk->batch_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
test->ifobj_rx->xsk->batch_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
@@ -2595,25 +2608,16 @@ struct ifobject *ifobject_create(void)
return NULL;
ifobj->xsk_arr = calloc(MAX_SOCKETS, sizeof(*ifobj->xsk_arr));
- if (!ifobj->xsk_arr)
- goto out_xsk_arr;
-
- ifobj->umem = calloc(1, sizeof(*ifobj->umem));
- if (!ifobj->umem)
- goto out_umem;
+ if (!ifobj->xsk_arr) {
+ free(ifobj);
+ return NULL;
+ }
return ifobj;
-
-out_umem:
- free(ifobj->xsk_arr);
-out_xsk_arr:
- free(ifobj);
- return NULL;
}
void ifobject_delete(struct ifobject *ifobj)
{
- free(ifobj->umem);
free(ifobj->xsk_arr);
free(ifobj);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/test_xsk.h b/tools/testing/selftests/bpf/prog_tests/test_xsk.h
index 1ab8aee4ce56..accda2456fe6 100644
--- a/tools/testing/selftests/bpf/prog_tests/test_xsk.h
+++ b/tools/testing/selftests/bpf/prog_tests/test_xsk.h
@@ -80,9 +80,25 @@ typedef int (*validation_func_t)(struct ifobject *ifobj);
typedef void *(*thread_func_t)(void *arg);
typedef int (*test_func_t)(struct test_spec *test);
+struct xsk_umem_info {
+ struct xsk_ring_prod fq;
+ struct xsk_ring_cons cq;
+ struct xsk_umem *umem;
+ u64 next_buffer;
+ u32 num_frames;
+ u32 frame_headroom;
+ void *buffer;
+ u32 frame_size;
+ u32 base_addr;
+ u32 fill_size;
+ u32 comp_size;
+ bool unaligned_mode;
+};
+
struct xsk_socket_info {
struct xsk_ring_cons rx;
struct xsk_ring_prod tx;
+ struct xsk_umem_info umem_real;
struct xsk_umem_info *umem;
struct xsk_socket *xsk;
struct pkt_stream *pkt_stream;
@@ -97,21 +113,6 @@ struct xsk_socket_info {
int kick_rx(struct xsk_socket_info *xsk);
int kick_tx(struct xsk_socket_info *xsk);
-struct xsk_umem_info {
- struct xsk_ring_prod fq;
- struct xsk_ring_cons cq;
- struct xsk_umem *umem;
- u64 next_buffer;
- u32 num_frames;
- u32 frame_headroom;
- void *buffer;
- u32 frame_size;
- u32 base_addr;
- u32 fill_size;
- u32 comp_size;
- bool unaligned_mode;
-};
-
struct set_hw_ring {
u32 default_tx;
u32 default_rx;
@@ -123,7 +124,6 @@ struct ifobject {
char ifname[MAX_INTERFACE_NAME_CHARS];
struct xsk_socket_info *xsk;
struct xsk_socket_info *xsk_arr;
- struct xsk_umem_info *umem;
thread_func_t func_ptr;
validation_func_t validation_func;
struct xsk_xdp_progs *xdp_progs;
--
2.43.0
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH 3/5] selftests/xsk: Remove umem from pkt_generate parameters
2026-04-28 8:03 [PATCH 0/5] selftests/xsk: foundational UMEM refactoring Tushar Vyavahare
2026-04-28 8:03 ` [PATCH 1/5] selftests/xsk: Introduce helpers for setting UMEM properties Tushar Vyavahare
2026-04-28 8:03 ` [PATCH 2/5] selftests/xsk: Eliminate umem reference from ifobject Tushar Vyavahare
@ 2026-04-28 8:03 ` Tushar Vyavahare
2026-04-28 8:03 ` [PATCH 4/5] selftests/xsk: Use umem_size() helper consistently Tushar Vyavahare
2026-04-28 8:03 ` [PATCH 5/5] selftests/xsk: Introduce mmap_size in umem struct Tushar Vyavahare
4 siblings, 0 replies; 8+ messages in thread
From: Tushar Vyavahare @ 2026-04-28 8:03 UTC (permalink / raw)
To: netdev, magnus.karlsson, maciej.fijalkowski, stfomichev,
kernelxing, davem, kuba, pabeni, ast, daniel, tirthendu.sarkar,
tushar.vyavahare
Cc: bpf
After moving UMEM ownership to socket-level state, passing UMEM
explicitly into pkt_generate() is redundant and can drift from the
active socket state.
Drop the UMEM parameter and use the socket-attached UMEM directly. This
simplifies call sites and keeps packet generation aligned with the
configured socket/UMEM pairing.
Signed-off-by: Magnus Karlsson <magnus.karlsson@intel.com>
Signed-off-by: Tushar Vyavahare <tushar.vyavahare@intel.com>
---
tools/testing/selftests/bpf/prog_tests/test_xsk.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/tools/testing/selftests/bpf/prog_tests/test_xsk.c b/tools/testing/selftests/bpf/prog_tests/test_xsk.c
index ad5ada9216fc..ea229ea446ca 100644
--- a/tools/testing/selftests/bpf/prog_tests/test_xsk.c
+++ b/tools/testing/selftests/bpf/prog_tests/test_xsk.c
@@ -630,10 +630,10 @@ static void pkt_stream_cancel(struct pkt_stream *pkt_stream)
pkt_stream->current_pkt_nb--;
}
-static void pkt_generate(struct xsk_socket_info *xsk, struct xsk_umem_info *umem, u64 addr, u32 len,
- u32 pkt_nb, u32 bytes_written)
+static void pkt_generate(struct xsk_socket_info *xsk, u64 addr, u32 len, u32 pkt_nb,
+ u32 bytes_written)
{
- void *data = xsk_umem__get_data(umem->buffer, addr);
+ void *data = xsk_umem__get_data(xsk->umem->buffer, addr);
if (len < MIN_PKT_SIZE)
return;
@@ -1241,7 +1241,7 @@ static int __send_pkts(struct ifobject *ifobject, struct xsk_socket_info *xsk, b
tx_desc->options = 0;
}
if (pkt->valid)
- pkt_generate(xsk, umem, tx_desc->addr, tx_desc->len, pkt->pkt_nb,
+ pkt_generate(xsk, tx_desc->addr, tx_desc->len, pkt->pkt_nb,
bytes_written);
bytes_written += tx_desc->len;
--
2.43.0
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH 4/5] selftests/xsk: Use umem_size() helper consistently
2026-04-28 8:03 [PATCH 0/5] selftests/xsk: foundational UMEM refactoring Tushar Vyavahare
` (2 preceding siblings ...)
2026-04-28 8:03 ` [PATCH 3/5] selftests/xsk: Remove umem from pkt_generate parameters Tushar Vyavahare
@ 2026-04-28 8:03 ` Tushar Vyavahare
2026-04-28 8:03 ` [PATCH 5/5] selftests/xsk: Introduce mmap_size in umem struct Tushar Vyavahare
4 siblings, 0 replies; 8+ messages in thread
From: Tushar Vyavahare @ 2026-04-28 8:03 UTC (permalink / raw)
To: netdev, magnus.karlsson, maciej.fijalkowski, stfomichev,
kernelxing, davem, kuba, pabeni, ast, daniel, tirthendu.sarkar,
tushar.vyavahare
Cc: bpf
Replace remaining open-coded `umem->num_frames * umem->frame_size`
calculations in test_xsk.c with the existing `umem_size()` helper.
This keeps UMEM size computation centralized, avoids duplicated arithmetic,
and improves readability with no intended behavior change.
Signed-off-by: Tushar Vyavahare <tushar.vyavahare@intel.com>
---
.../selftests/bpf/prog_tests/test_xsk.c | 26 +++++++++----------
1 file changed, 13 insertions(+), 13 deletions(-)
diff --git a/tools/testing/selftests/bpf/prog_tests/test_xsk.c b/tools/testing/selftests/bpf/prog_tests/test_xsk.c
index ea229ea446ca..ba4768f83eb6 100644
--- a/tools/testing/selftests/bpf/prog_tests/test_xsk.c
+++ b/tools/testing/selftests/bpf/prog_tests/test_xsk.c
@@ -828,12 +828,12 @@ static bool is_frag_valid(struct xsk_umem_info *umem, u64 addr, u32 len, u32 exp
u32 bytes_processed)
{
u32 seqnum, pkt_nb, *pkt_data, words_to_end, expected_seqnum;
+ u64 umem_sz = umem_size(umem);
void *data = xsk_umem__get_data(umem->buffer, addr);
addr -= umem->base_addr;
- if (addr >= umem->num_frames * umem->frame_size ||
- addr + len > umem->num_frames * umem->frame_size) {
+ if (addr >= umem_sz || addr + len > umem_sz) {
ksft_print_msg("Frag invalid addr: %llx len: %u\n",
(unsigned long long)addr, len);
return false;
@@ -1581,7 +1581,7 @@ static int thread_common_ops(struct test_spec *test, struct ifobject *ifobject)
int ret;
u32 i;
- umem_sz = umem->num_frames * umem->frame_size;
+ umem_sz = umem_size(umem);
mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE;
if (umem->unaligned_mode)
@@ -1700,7 +1700,7 @@ void *worker_testapp_validate_rx(void *arg)
static void testapp_clean_xsk_umem(struct ifobject *ifobj)
{
struct xsk_umem_info *umem = ifobj->xsk->umem;
- u64 umem_sz = umem->num_frames * umem->frame_size;
+ u64 umem_sz = umem_size(umem);
if (ifobj->shared_umem)
umem_sz *= 2;
@@ -2091,7 +2091,7 @@ int testapp_send_receive_mb(struct test_spec *test)
int testapp_invalid_desc_mb(struct test_spec *test)
{
struct xsk_umem_info *umem = test->ifobj_tx->xsk->umem;
- u64 umem_size = umem->num_frames * umem->frame_size;
+ u64 umem_sz = umem_size(umem);
struct pkt pkts[] = {
/* Valid packet for synch to start with */
{0, MIN_PKT_SIZE, 0, true, 0},
@@ -2101,7 +2101,7 @@ int testapp_invalid_desc_mb(struct test_spec *test)
{0, 0, 0, false, 0},
/* Invalid address in the second frame */
{0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
- {umem_size, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
+ {umem_sz, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
/* Invalid len in the middle */
{0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
{0, XSK_UMEM__INVALID_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
@@ -2132,7 +2132,7 @@ int testapp_invalid_desc_mb(struct test_spec *test)
int testapp_invalid_desc(struct test_spec *test)
{
struct xsk_umem_info *umem = test->ifobj_tx->xsk->umem;
- u64 umem_size = umem->num_frames * umem->frame_size;
+ u64 umem_sz = umem_size(umem);
struct pkt pkts[] = {
/* Zero packet address allowed */
{0, MIN_PKT_SIZE, 0, true},
@@ -2143,11 +2143,11 @@ int testapp_invalid_desc(struct test_spec *test)
/* Packet too large */
{0, XSK_UMEM__INVALID_FRAME_SIZE, 0, false},
/* Up to end of umem allowed */
- {umem_size - MIN_PKT_SIZE - 2 * umem->frame_size, MIN_PKT_SIZE, 0, true},
+ {umem_sz - MIN_PKT_SIZE - 2 * umem->frame_size, MIN_PKT_SIZE, 0, true},
/* After umem ends */
- {umem_size, MIN_PKT_SIZE, 0, false},
+ {umem_sz, MIN_PKT_SIZE, 0, false},
/* Straddle the end of umem */
- {umem_size - MIN_PKT_SIZE / 2, MIN_PKT_SIZE, 0, false},
+ {umem_sz - MIN_PKT_SIZE / 2, MIN_PKT_SIZE, 0, false},
/* Straddle a 4K boundary */
{0x1000 - MIN_PKT_SIZE / 2, MIN_PKT_SIZE, 0, false},
/* Straddle a 2K boundary */
@@ -2165,9 +2165,9 @@ int testapp_invalid_desc(struct test_spec *test)
}
if (test->ifobj_tx->shared_umem) {
- pkts[4].offset += umem_size;
- pkts[5].offset += umem_size;
- pkts[6].offset += umem_size;
+ pkts[4].offset += umem_sz;
+ pkts[5].offset += umem_sz;
+ pkts[6].offset += umem_sz;
}
if (pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts)))
--
2.43.0
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH 5/5] selftests/xsk: Introduce mmap_size in umem struct
2026-04-28 8:03 [PATCH 0/5] selftests/xsk: foundational UMEM refactoring Tushar Vyavahare
` (3 preceding siblings ...)
2026-04-28 8:03 ` [PATCH 4/5] selftests/xsk: Use umem_size() helper consistently Tushar Vyavahare
@ 2026-04-28 8:03 ` Tushar Vyavahare
4 siblings, 0 replies; 8+ messages in thread
From: Tushar Vyavahare @ 2026-04-28 8:03 UTC (permalink / raw)
To: netdev, magnus.karlsson, maciej.fijalkowski, stfomichev,
kernelxing, davem, kuba, pabeni, ast, daniel, tirthendu.sarkar,
tushar.vyavahare
Cc: bpf
UMEM teardown currently recomputes the `munmap()` length
from frame geometry (`num_frames * frame_size`, shared-UMEM
multiplier, and hugepage rounding).
That duplicates setup logic in cleanup and makes teardown depend on
re-deriving values instead of using the exact mapping metadata.
Store the finalized mapping length in `xsk_umem_info` as `mmap_size` when
the UMEM mapping is created, and use that value during teardown.
This removes duplicated size arithmetic in cleanup and guarantees that
`munmap()` uses the same canonical size that was established at setup time.
Signed-off-by: Magnus Karlsson <magnus.karlsson@intel.com>
Signed-off-by: Tushar Vyavahare <tushar.vyavahare@intel.com>
---
tools/testing/selftests/bpf/prog_tests/test_xsk.c | 9 +++------
tools/testing/selftests/bpf/prog_tests/test_xsk.h | 1 +
2 files changed, 4 insertions(+), 6 deletions(-)
diff --git a/tools/testing/selftests/bpf/prog_tests/test_xsk.c b/tools/testing/selftests/bpf/prog_tests/test_xsk.c
index ba4768f83eb6..ba1bcfc6b74b 100644
--- a/tools/testing/selftests/bpf/prog_tests/test_xsk.c
+++ b/tools/testing/selftests/bpf/prog_tests/test_xsk.c
@@ -1594,6 +1594,8 @@ static int thread_common_ops(struct test_spec *test, struct ifobject *ifobject)
if (bufs == MAP_FAILED)
return -errno;
+ umem->mmap_size = ceil_u64(umem_sz, HUGEPAGE_SIZE) * HUGEPAGE_SIZE;
+
ret = xsk_configure_umem(ifobject, umem, bufs, umem_sz);
if (ret)
return ret;
@@ -1700,14 +1702,9 @@ void *worker_testapp_validate_rx(void *arg)
static void testapp_clean_xsk_umem(struct ifobject *ifobj)
{
struct xsk_umem_info *umem = ifobj->xsk->umem;
- u64 umem_sz = umem_size(umem);
-
- if (ifobj->shared_umem)
- umem_sz *= 2;
- umem_sz = ceil_u64(umem_sz, HUGEPAGE_SIZE) * HUGEPAGE_SIZE;
xsk_umem__delete(umem->umem);
- munmap(umem->buffer, umem_sz);
+ munmap(umem->buffer, umem->mmap_size);
}
static void handler(int signum)
diff --git a/tools/testing/selftests/bpf/prog_tests/test_xsk.h b/tools/testing/selftests/bpf/prog_tests/test_xsk.h
index accda2456fe6..0355a9d06b4e 100644
--- a/tools/testing/selftests/bpf/prog_tests/test_xsk.h
+++ b/tools/testing/selftests/bpf/prog_tests/test_xsk.h
@@ -85,6 +85,7 @@ struct xsk_umem_info {
struct xsk_ring_cons cq;
struct xsk_umem *umem;
u64 next_buffer;
+ u64 mmap_size;
u32 num_frames;
u32 frame_headroom;
void *buffer;
--
2.43.0
^ permalink raw reply related [flat|nested] 8+ messages in thread
* Re: [PATCH 1/5] selftests/xsk: Introduce helpers for setting UMEM properties
2026-04-28 8:03 ` [PATCH 1/5] selftests/xsk: Introduce helpers for setting UMEM properties Tushar Vyavahare
@ 2026-04-28 10:28 ` bot+bpf-ci
2026-04-28 12:39 ` Vyavahare, Tushar
0 siblings, 1 reply; 8+ messages in thread
From: bot+bpf-ci @ 2026-04-28 10:28 UTC (permalink / raw)
To: tushar.vyavahare, netdev, magnus.karlsson, maciej.fijalkowski,
stfomichev, kernelxing, davem, kuba, pabeni, ast, daniel,
tirthendu.sarkar, tushar.vyavahare
Cc: bpf, ast, andrii, daniel, martin.lau, eddyz87, yonghong.song, clm,
ihor.solodrai
[-- Attachment #1: Type: text/plain, Size: 3703 bytes --]
> diff --git a/tools/testing/selftests/bpf/prog_tests/test_xsk.c b/tools/testing/selftests/bpf/prog_tests/test_xsk.c
> index 7950c504ed28..d5cd8f590b25 100644
> --- a/tools/testing/selftests/bpf/prog_tests/test_xsk.c
> +++ b/tools/testing/selftests/bpf/prog_tests/test_xsk.c
> @@ -303,6 +303,22 @@ static void test_spec_reset(struct test_spec *test)
> __test_spec_init(test, test->ifobj_tx, test->ifobj_rx);
> }
>
> +static void test_spec_set_unaligned(struct test_spec *test)
> +{
> + test_spec_set_unaligned(test);
> +}
Does this function recurse unconditionally? Looking at the functions
this helper replaces, the body should set unaligned_mode = true on
both ifobj_tx->umem and ifobj_rx->umem. Instead, every call to
test_spec_set_unaligned() recurses into itself with the same argument
until the stack is exhausted.
Five tests introduced in this commit call the broken helper:
testapp_send_receive_unaligned()
testapp_send_receive_unaligned_mb()
testapp_unaligned_inv_desc()
testapp_unaligned_inv_desc_4001_frame()
testapp_unaligned_inv_desc_mb()
Each previously worked by directly setting the unaligned_mode fields.
After this patch, those direct assignments are replaced with calls to
test_spec_set_unaligned(), which crashes via stack overflow instead
of running the test.
> +
> +static void test_spec_set_frame_size(struct test_spec *test, u32 size)
> +{
> + test->ifobj_tx->umem->frame_size = size;
> + test->ifobj_rx->umem->frame_size = size;
> +}
> +
> +static void test_spec_set_frame_headroom(struct test_spec *test, u32 size)
> +{
> + test->ifobj_rx->umem->frame_headroom = size;
> +}
> +
[ ... ]
> @@ -2025,8 +2041,7 @@ int testapp_stats_fill_empty(struct test_spec *test)
>
> int testapp_send_receive_unaligned(struct test_spec *test)
> {
> - test->ifobj_tx->umem->unaligned_mode = true;
> - test->ifobj_rx->umem->unaligned_mode = true;
> + test_spec_set_unaligned(test);
> /* Let half of the packets straddle a 4K buffer boundary */
> if (pkt_stream_replace_half(test, MIN_PKT_SIZE, -MIN_PKT_SIZE / 2))
> return TEST_FAILURE;
> @@ -2037,8 +2052,7 @@ int testapp_send_receive_unaligned(struct test_spec *test)
> int testapp_send_receive_unaligned_mb(struct test_spec *test)
> {
> test->mtu = MAX_ETH_JUMBO_SIZE;
> - test->ifobj_tx->umem->unaligned_mode = true;
> - test->ifobj_rx->umem->unaligned_mode = true;
> + test_spec_set_unaligned(test);
> if (pkt_stream_replace(test, DEFAULT_PKT_CNT, MAX_ETH_JUMBO_SIZE))
> return TEST_FAILURE;
> return testapp_validate_traffic(test);
[ ... ]
> @@ -2380,10 +2391,8 @@ int testapp_unaligned_inv_desc_4001_frame(struct test_spec *test)
> u64 page_size, umem_size;
>
> /* Odd frame size so the UMEM doesn't end near a page boundary. */
> - test->ifobj_tx->umem->frame_size = 4001;
> - test->ifobj_rx->umem->frame_size = 4001;
> - test->ifobj_tx->umem->unaligned_mode = true;
> - test->ifobj_rx->umem->unaligned_mode = true;
> + test_spec_set_frame_size(test, 4001);
> + test_spec_set_unaligned(test);
> /* This test exists to test descriptors that staddle the end of
> * the UMEM but not a page.
> */
[ ... ]
> @@ -2411,8 +2422,7 @@ int testapp_aligned_inv_desc_mb(struct test_spec *test)
>
> int testapp_unaligned_inv_desc_mb(struct test_spec *test)
> {
> - test->ifobj_tx->umem->unaligned_mode = true;
> - test->ifobj_rx->umem->unaligned_mode = true;
> + test_spec_set_unaligned(test);
> return testapp_invalid_desc_mb(test);
> }
---
AI reviewed your patch. Please fix the bug or email reply why it's not a bug.
See: https://github.com/kernel-patches/vmtest/blob/master/ci/claude/README.md
CI run summary: https://github.com/kernel-patches/bpf/actions/runs/25045852068
^ permalink raw reply [flat|nested] 8+ messages in thread
* RE: [PATCH 1/5] selftests/xsk: Introduce helpers for setting UMEM properties
2026-04-28 10:28 ` bot+bpf-ci
@ 2026-04-28 12:39 ` Vyavahare, Tushar
0 siblings, 0 replies; 8+ messages in thread
From: Vyavahare, Tushar @ 2026-04-28 12:39 UTC (permalink / raw)
To: bot+bpf-ci@kernel.org, netdev@vger.kernel.org, Karlsson, Magnus,
Fijalkowski, Maciej, stfomichev@gmail.com, kernelxing@tencent.com,
davem@davemloft.net, kuba@kernel.org, pabeni@redhat.com,
ast@kernel.org, daniel@iogearbox.net, Sarkar, Tirthendu
Cc: bpf@vger.kernel.org, ast@kernel.org, andrii@kernel.org,
daniel@iogearbox.net, martin.lau@kernel.org, eddyz87@gmail.com,
yonghong.song@linux.dev, clm@meta.com, ihor.solodrai@linux.dev
Somehow, I messed up my patch set while sending it.
I have a v2 that includes this fix. I'll wait a couple of days for any
additional review comments, and then I'll send out v2.
-Tushar
> -----Original Message-----
> From: bot+bpf-ci@kernel.org <bot+bpf-ci@kernel.org>
> Sent: Tuesday, April 28, 2026 3:58 PM
> To: Vyavahare, Tushar <tushar.vyavahare@intel.com>;
> netdev@vger.kernel.org; Karlsson, Magnus <magnus.karlsson@intel.com>;
> Fijalkowski, Maciej <maciej.fijalkowski@intel.com>; stfomichev@gmail.com;
> kernelxing@tencent.com; davem@davemloft.net; kuba@kernel.org;
> pabeni@redhat.com; ast@kernel.org; daniel@iogearbox.net; Sarkar,
> Tirthendu <tirthendu.sarkar@intel.com>; Vyavahare, Tushar
> <tushar.vyavahare@intel.com>
> Cc: bpf@vger.kernel.org; ast@kernel.org; andrii@kernel.org;
> daniel@iogearbox.net; martin.lau@kernel.org; eddyz87@gmail.com;
> yonghong.song@linux.dev; clm@meta.com; ihor.solodrai@linux.dev
> Subject: Re: [PATCH 1/5] selftests/xsk: Introduce helpers for setting UMEM
> properties
>
> > diff --git a/tools/testing/selftests/bpf/prog_tests/test_xsk.c
> > b/tools/testing/selftests/bpf/prog_tests/test_xsk.c
> > index 7950c504ed28..d5cd8f590b25 100644
> > --- a/tools/testing/selftests/bpf/prog_tests/test_xsk.c
> > +++ b/tools/testing/selftests/bpf/prog_tests/test_xsk.c
> > @@ -303,6 +303,22 @@ static void test_spec_reset(struct test_spec *test)
> > __test_spec_init(test, test->ifobj_tx, test->ifobj_rx); }
> >
> > +static void test_spec_set_unaligned(struct test_spec *test) {
> > + test_spec_set_unaligned(test);
> > +}
>
> Does this function recurse unconditionally? Looking at the functions this
> helper replaces, the body should set unaligned_mode = true on both ifobj_tx-
> >umem and ifobj_rx->umem. Instead, every call to
> test_spec_set_unaligned() recurses into itself with the same argument until
> the stack is exhausted.
>
> Five tests introduced in this commit call the broken helper:
>
> testapp_send_receive_unaligned()
> testapp_send_receive_unaligned_mb()
> testapp_unaligned_inv_desc()
> testapp_unaligned_inv_desc_4001_frame()
> testapp_unaligned_inv_desc_mb()
>
> Each previously worked by directly setting the unaligned_mode fields.
> After this patch, those direct assignments are replaced with calls to
> test_spec_set_unaligned(), which crashes via stack overflow instead of
> running the test.
>
> > +
> > +static void test_spec_set_frame_size(struct test_spec *test, u32
> > +size) {
> > + test->ifobj_tx->umem->frame_size = size;
> > + test->ifobj_rx->umem->frame_size = size; }
> > +
> > +static void test_spec_set_frame_headroom(struct test_spec *test, u32
> > +size) {
> > + test->ifobj_rx->umem->frame_headroom = size; }
> > +
>
> [ ... ]
>
> > @@ -2025,8 +2041,7 @@ int testapp_stats_fill_empty(struct test_spec
> > *test)
> >
> > int testapp_send_receive_unaligned(struct test_spec *test) {
> > - test->ifobj_tx->umem->unaligned_mode = true;
> > - test->ifobj_rx->umem->unaligned_mode = true;
> > + test_spec_set_unaligned(test);
> > /* Let half of the packets straddle a 4K buffer boundary */
> > if (pkt_stream_replace_half(test, MIN_PKT_SIZE, -MIN_PKT_SIZE / 2))
> > return TEST_FAILURE;
> > @@ -2037,8 +2052,7 @@ int testapp_send_receive_unaligned(struct
> > test_spec *test) int testapp_send_receive_unaligned_mb(struct
> > test_spec *test) {
> > test->mtu = MAX_ETH_JUMBO_SIZE;
> > - test->ifobj_tx->umem->unaligned_mode = true;
> > - test->ifobj_rx->umem->unaligned_mode = true;
> > + test_spec_set_unaligned(test);
> > if (pkt_stream_replace(test, DEFAULT_PKT_CNT,
> MAX_ETH_JUMBO_SIZE))
> > return TEST_FAILURE;
> > return testapp_validate_traffic(test);
>
> [ ... ]
>
> > @@ -2380,10 +2391,8 @@ int
> testapp_unaligned_inv_desc_4001_frame(struct test_spec *test)
> > u64 page_size, umem_size;
> >
> > /* Odd frame size so the UMEM doesn't end near a page boundary. */
> > - test->ifobj_tx->umem->frame_size = 4001;
> > - test->ifobj_rx->umem->frame_size = 4001;
> > - test->ifobj_tx->umem->unaligned_mode = true;
> > - test->ifobj_rx->umem->unaligned_mode = true;
> > + test_spec_set_frame_size(test, 4001);
> > + test_spec_set_unaligned(test);
> > /* This test exists to test descriptors that staddle the end of
> > * the UMEM but not a page.
> > */
>
> [ ... ]
>
> > @@ -2411,8 +2422,7 @@ int testapp_aligned_inv_desc_mb(struct
> test_spec
> > *test)
> >
> > int testapp_unaligned_inv_desc_mb(struct test_spec *test) {
> > - test->ifobj_tx->umem->unaligned_mode = true;
> > - test->ifobj_rx->umem->unaligned_mode = true;
> > + test_spec_set_unaligned(test);
> > return testapp_invalid_desc_mb(test); }
>
>
> ---
> AI reviewed your patch. Please fix the bug or email reply why it's not a bug.
> See: https://github.com/kernel-
> patches/vmtest/blob/master/ci/claude/README.md
>
> CI run summary: https://github.com/kernel-
> patches/bpf/actions/runs/25045852068
^ permalink raw reply [flat|nested] 8+ messages in thread
end of thread, other threads:[~2026-04-28 12:39 UTC | newest]
Thread overview: 8+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-04-28 8:03 [PATCH 0/5] selftests/xsk: foundational UMEM refactoring Tushar Vyavahare
2026-04-28 8:03 ` [PATCH 1/5] selftests/xsk: Introduce helpers for setting UMEM properties Tushar Vyavahare
2026-04-28 10:28 ` bot+bpf-ci
2026-04-28 12:39 ` Vyavahare, Tushar
2026-04-28 8:03 ` [PATCH 2/5] selftests/xsk: Eliminate umem reference from ifobject Tushar Vyavahare
2026-04-28 8:03 ` [PATCH 3/5] selftests/xsk: Remove umem from pkt_generate parameters Tushar Vyavahare
2026-04-28 8:03 ` [PATCH 4/5] selftests/xsk: Use umem_size() helper consistently Tushar Vyavahare
2026-04-28 8:03 ` [PATCH 5/5] selftests/xsk: Introduce mmap_size in umem struct Tushar Vyavahare
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox