From: Mykyta Yatsenko <mykyta.yatsenko5@gmail.com>
To: bpf@vger.kernel.org, ast@kernel.org, andrii@kernel.org,
daniel@iogearbox.net, kafai@meta.com, kernel-team@meta.com,
eddyz87@gmail.com, memxor@gmail.com,
herbert@gondor.apana.org.au
Cc: Mykyta Yatsenko <yatsenko@meta.com>
Subject: [PATCH RFC bpf-next v2 16/18] selftests/bpf: Add BPF iterator tests for resizable hash map
Date: Wed, 08 Apr 2026 08:10:21 -0700 [thread overview]
Message-ID: <20260408-rhash-v2-16-3b3675da1f6e@meta.com> (raw)
In-Reply-To: <20260408-rhash-v2-0-3b3675da1f6e@meta.com>
From: Mykyta Yatsenko <yatsenko@meta.com>
Test BPF iterator functionality for BPF_MAP_TYPE_RHASH:
* Basic iteration verifying all elements are visited
* Overflow test triggering seq_file restart, validating correct
resume behavior via skip_elems tracking
Signed-off-by: Mykyta Yatsenko <yatsenko@meta.com>
---
tools/testing/selftests/bpf/prog_tests/rhash.c | 160 ++++++++++++++++++++-
.../selftests/bpf/progs/bpf_iter_bpf_rhash_map.c | 75 ++++++++++
2 files changed, 234 insertions(+), 1 deletion(-)
diff --git a/tools/testing/selftests/bpf/prog_tests/rhash.c b/tools/testing/selftests/bpf/prog_tests/rhash.c
index 53ccc9366b5a..f28296b16593 100644
--- a/tools/testing/selftests/bpf/prog_tests/rhash.c
+++ b/tools/testing/selftests/bpf/prog_tests/rhash.c
@@ -4,6 +4,7 @@
#include <string.h>
#include <stdio.h>
#include "rhash.skel.h"
+#include "bpf_iter_bpf_rhash_map.skel.h"
#include <linux/bpf.h>
#include <linux/perf_event.h>
#include <sys/syscall.h>
@@ -309,6 +310,158 @@ static void rhash_get_next_key_resize_test(void)
rhash__destroy(skel);
}
+static void rhash_iter_test(void)
+{
+ DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
+ struct bpf_iter_bpf_rhash_map *skel;
+ int err, i, len, map_fd, iter_fd;
+ union bpf_iter_link_info linfo;
+ u32 expected_key_sum = 0, key;
+ struct bpf_link *link;
+ u64 val = 0;
+ char buf[64];
+
+ skel = bpf_iter_bpf_rhash_map__open();
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_rhash_map__open"))
+ return;
+
+ err = bpf_iter_bpf_rhash_map__load(skel);
+ if (!ASSERT_OK(err, "bpf_iter_bpf_rhash_map__load"))
+ goto out;
+
+ map_fd = bpf_map__fd(skel->maps.rhashmap);
+
+ /* Populate map with test data */
+ for (i = 0; i < 64; i++) {
+ key = i + 1;
+ expected_key_sum += key;
+
+ err = bpf_map_update_elem(map_fd, &key, &val, BPF_NOEXIST);
+ if (!ASSERT_OK(err, "map_update"))
+ goto out;
+ }
+
+ memset(&linfo, 0, sizeof(linfo));
+ linfo.map.map_fd = map_fd;
+ opts.link_info = &linfo;
+ opts.link_info_len = sizeof(linfo);
+
+ link = bpf_program__attach_iter(skel->progs.dump_bpf_rhash_map, &opts);
+ if (!ASSERT_OK_PTR(link, "attach_iter"))
+ goto out;
+
+ iter_fd = bpf_iter_create(bpf_link__fd(link));
+ if (!ASSERT_GE(iter_fd, 0, "create_iter"))
+ goto free_link;
+
+ do {
+ len = read(iter_fd, buf, sizeof(buf));
+ } while (len > 0);
+
+ ASSERT_EQ(skel->bss->key_sum, expected_key_sum, "key_sum");
+ ASSERT_EQ(skel->bss->elem_count, 64, "elem_count");
+
+ close(iter_fd);
+
+free_link:
+ bpf_link__destroy(link);
+out:
+ bpf_iter_bpf_rhash_map__destroy(skel);
+}
+
+/*
+ * Test seq_file overflow handling for BPF iterator over resizable hashmap.
+ *
+ * The BPF program writes print_count * 8 bytes per element, configured so
+ * that a single element's output nearly fills the seq_file buffer (8 pages).
+ * With multiple elements, the buffer overflows mid-element, triggering
+ * seq_file's restart mechanism: it discards the partial output, enlarges or
+ * flushes the buffer, and re-invokes the BPF program starting from the
+ * element that caused the overflow.
+ *
+ * Insert few elements to avoid triggering rhashtable resize, then verify:
+ * - All elements are seen (unique_elem_count == num_elems)
+ * - Overflow occurred (total_visits > unique_elem_count)
+ * - Output is consistent (each chunk of print_count u64s has the same value)
+ */
+static void rhash_iter_overflow_test(void)
+{
+ DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
+ struct bpf_iter_bpf_rhash_map *skel;
+ u32 total_read_len, expected_read_len, write_len, num_elems = 4;
+ int err, i, j, len, map_fd, iter_fd;
+ union bpf_iter_link_info linfo;
+ struct bpf_link *link;
+ char *buf;
+
+ skel = bpf_iter_bpf_rhash_map__open();
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_rhash_map__open"))
+ return;
+
+ write_len = sysconf(_SC_PAGE_SIZE) * 8;
+ skel->bss->print_count = (write_len - 8) / 8;
+ expected_read_len = num_elems * (write_len - 8);
+
+ err = bpf_iter_bpf_rhash_map__load(skel);
+ if (!ASSERT_OK(err, "bpf_iter_bpf_rhash_map__load"))
+ goto out;
+
+ map_fd = bpf_map__fd(skel->maps.rhashmap);
+
+ for (i = 0; i < num_elems; i++) {
+ __u64 val = i;
+
+ err = bpf_map_update_elem(map_fd, &i, &val, BPF_NOEXIST);
+ if (!ASSERT_OK(err, "map_update"))
+ goto out;
+ }
+
+ memset(&linfo, 0, sizeof(linfo));
+ linfo.map.map_fd = map_fd;
+ opts.link_info = &linfo;
+ opts.link_info_len = sizeof(linfo);
+
+ link = bpf_program__attach_iter(skel->progs.dump_bpf_rhash_map_overflow, &opts);
+ if (!ASSERT_OK_PTR(link, "attach_iter"))
+ goto out;
+
+ iter_fd = bpf_iter_create(bpf_link__fd(link));
+ if (!ASSERT_GE(iter_fd, 0, "create_iter"))
+ goto free_link;
+
+ buf = malloc(expected_read_len);
+ if (!ASSERT_OK_PTR(buf, "malloc"))
+ goto close_iter;
+
+ total_read_len = 0;
+ while ((len = read(iter_fd, buf + total_read_len,
+ expected_read_len - total_read_len)) > 0)
+ total_read_len += len;
+
+ ASSERT_OK(len, "len");
+ ASSERT_EQ(total_read_len, expected_read_len, "total_read_len");
+ ASSERT_EQ(skel->bss->unique_elem_count, num_elems, "unique_elem_count");
+ ASSERT_GT(skel->bss->total_visits, skel->bss->unique_elem_count,
+ "overflow_occurred");
+
+ /* Verify each output chunk is internally consistent */
+ for (i = 0; i < num_elems; i++) {
+ __u64 *val = ((__u64 *)buf) + i * skel->bss->print_count;
+
+ ASSERT_LT(val[0], num_elems, "value_in_range");
+ for (j = 1; j < skel->bss->print_count; j++)
+ ASSERT_EQ(val[j], val[0], "consistent_value");
+ }
+
+ free(buf);
+close_iter:
+ close(iter_fd);
+free_link:
+ bpf_link__destroy(link);
+out:
+ bpf_iter_bpf_rhash_map__destroy(skel);
+}
+
void test_rhash(void)
{
if (test__start_subtest("test_rhash_lookup_update"))
@@ -340,5 +493,10 @@ void test_rhash(void)
if (test__start_subtest("test_rhash_get_next_key_stress"))
rhash_get_next_key_stress_test();
-}
+ if (test__start_subtest("test_rhash_iter"))
+ rhash_iter_test();
+
+ if (test__start_subtest("test_rhash_iter_overflow"))
+ rhash_iter_overflow_test();
+}
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_rhash_map.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_rhash_map.c
new file mode 100644
index 000000000000..30c270f12b61
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_rhash_map.c
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_RHASH);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __uint(max_entries, 64);
+ __type(key, __u32);
+ __type(value, __u64);
+} rhashmap SEC(".maps");
+
+__u32 key_sum = 0;
+__u64 val_sum = 0;
+__u32 elem_count = 0;
+__u32 err = 0;
+
+SEC("iter/bpf_map_elem")
+int dump_bpf_rhash_map(struct bpf_iter__bpf_map_elem *ctx)
+{
+ __u32 *key = ctx->key;
+ __u64 *val = ctx->value;
+
+ if (!key || !val)
+ return 0;
+
+ key_sum += *key;
+ val_sum += *val;
+ elem_count++;
+ return 0;
+}
+
+/* For overflow test: configurable print count */
+__u32 print_count = 0;
+
+__u64 seen_keys = 0;
+__u32 unique_elem_count = 0;
+__u32 total_visits = 0;
+
+SEC("iter/bpf_map_elem")
+int dump_bpf_rhash_map_overflow(struct bpf_iter__bpf_map_elem *ctx)
+{
+ struct seq_file *seq = ctx->meta->seq;
+ __u32 *key = ctx->key;
+ __u64 *val = ctx->value;
+ __u64 bit;
+ __u32 i;
+
+ if (!key || !val)
+ return 0; /* The end of iteration */
+
+ total_visits++;
+
+ /* Validate key value are as expected */
+ if (*key != *val || *key > 64) {
+ err = 1;
+ return 0;
+ }
+
+ bit = 1ULL << *key;
+ if (!(seen_keys & bit))
+ unique_elem_count++;
+ seen_keys |= bit;
+
+ /* Write print_count * 8 bytes to potentially overflow buffer */
+ bpf_for(i, 0, print_count) {
+ if (bpf_seq_write(seq, val, sizeof(__u64)))
+ return 0;
+ }
+
+ return 0;
+}
--
2.52.0
next prev parent reply other threads:[~2026-04-08 15:11 UTC|newest]
Thread overview: 69+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-04-08 15:10 [PATCH RFC bpf-next v2 00/18] bpf: Introduce resizable hash map Mykyta Yatsenko
2026-04-08 15:10 ` [PATCH RFC bpf-next v2 01/18] bpf: Register rhash map Mykyta Yatsenko
2026-04-10 22:31 ` Emil Tsalapatis
2026-04-13 8:10 ` Mykyta Yatsenko
2026-04-14 17:50 ` Emil Tsalapatis
2026-04-08 15:10 ` [PATCH RFC bpf-next v2 02/18] bpf: Add resizable hashtab skeleton Mykyta Yatsenko
2026-04-08 15:10 ` [PATCH RFC bpf-next v2 03/18] bpf: Implement lookup, delete, update for resizable hashtab Mykyta Yatsenko
2026-04-12 23:10 ` Alexei Starovoitov
2026-04-13 10:52 ` Mykyta Yatsenko
2026-04-13 16:24 ` Alexei Starovoitov
2026-04-13 16:27 ` Daniel Borkmann
2026-04-13 19:43 ` Mykyta Yatsenko
2026-04-13 20:37 ` Emil Tsalapatis
2026-04-14 8:34 ` Mykyta Yatsenko
2026-04-14 10:25 ` Leon Hwang
2026-04-14 10:28 ` Mykyta Yatsenko
2026-04-08 15:10 ` [PATCH RFC bpf-next v2 04/18] rhashtable: Add rhashtable_walk_enter_from() Mykyta Yatsenko
2026-04-12 23:13 ` Alexei Starovoitov
2026-04-13 12:22 ` Mykyta Yatsenko
2026-04-13 22:22 ` Emil Tsalapatis
2026-04-08 15:10 ` [PATCH RFC bpf-next v2 05/18] bpf: Implement get_next_key and free_internal_structs for resizable hashtab Mykyta Yatsenko
2026-04-13 22:44 ` Emil Tsalapatis
2026-04-14 8:11 ` Mykyta Yatsenko
2026-04-08 15:10 ` [PATCH RFC bpf-next v2 06/18] bpf: Implement bpf_each_rhash_elem() using walk API Mykyta Yatsenko
2026-04-13 23:02 ` Emil Tsalapatis
2026-04-08 15:10 ` [PATCH RFC bpf-next v2 07/18] bpf: Implement batch ops for resizable hashtab Mykyta Yatsenko
2026-04-13 23:25 ` Emil Tsalapatis
2026-04-14 8:08 ` Mykyta Yatsenko
2026-04-14 17:47 ` Emil Tsalapatis
2026-04-08 15:10 ` [PATCH RFC bpf-next v2 08/18] bpf: Implement iterator APIs " Mykyta Yatsenko
2026-04-14 17:49 ` Emil Tsalapatis
2026-04-15 11:15 ` Mykyta Yatsenko
2026-04-08 15:10 ` [PATCH RFC bpf-next v2 09/18] bpf: Implement alloc and free " Mykyta Yatsenko
2026-04-12 23:15 ` Alexei Starovoitov
2026-04-08 15:10 ` [PATCH RFC bpf-next v2 10/18] bpf: Allow timers, workqueues and task_work in " Mykyta Yatsenko
2026-04-08 15:10 ` [PATCH RFC bpf-next v2 11/18] libbpf: Support resizable hashtable Mykyta Yatsenko
2026-04-14 17:46 ` Emil Tsalapatis
2026-04-08 15:10 ` [PATCH RFC bpf-next v2 12/18] selftests/bpf: Add basic tests for resizable hash map Mykyta Yatsenko
2026-04-12 23:16 ` Alexei Starovoitov
2026-04-08 15:10 ` [PATCH RFC bpf-next v2 13/18] selftests/bpf: Support resizable hashtab in test_maps Mykyta Yatsenko
2026-04-12 23:17 ` Alexei Starovoitov
2026-04-08 15:10 ` [PATCH RFC bpf-next v2 14/18] selftests/bpf: Resizable hashtab BPF_F_LOCK tests Mykyta Yatsenko
2026-04-12 23:18 ` Alexei Starovoitov
2026-04-08 15:10 ` [PATCH RFC bpf-next v2 15/18] selftests/bpf: Add stress tests for resizable hash get_next_key Mykyta Yatsenko
2026-04-12 23:19 ` Alexei Starovoitov
2026-04-08 15:10 ` Mykyta Yatsenko [this message]
2026-04-12 23:20 ` [PATCH RFC bpf-next v2 16/18] selftests/bpf: Add BPF iterator tests for resizable hash map Alexei Starovoitov
2026-04-08 15:10 ` [PATCH RFC bpf-next v2 17/18] bpftool: Add rhash map documentation Mykyta Yatsenko
2026-04-14 17:51 ` Emil Tsalapatis
2026-04-08 15:10 ` [PATCH RFC bpf-next v2 18/18] selftests/bpf: Add resizable hashmap to benchmarks Mykyta Yatsenko
2026-04-12 23:25 ` Alexei Starovoitov
2026-04-12 23:11 ` [PATCH RFC bpf-next v2 00/18] bpf: Introduce resizable hash map Alexei Starovoitov
2026-04-13 8:28 ` Mykyta Yatsenko
2026-04-15 3:27 ` Herbert Xu
2026-04-15 5:13 ` Alexei Starovoitov
2026-04-16 5:18 ` Herbert Xu
2026-04-16 14:11 ` Alexei Starovoitov
2026-04-16 15:10 ` Mykyta Yatsenko
2026-04-16 15:36 ` Alexei Starovoitov
2026-04-16 16:30 ` Mykyta Yatsenko
2026-04-17 6:54 ` Herbert Xu
2026-04-17 15:16 ` Mykyta Yatsenko
2026-04-18 0:43 ` Herbert Xu
2026-04-20 11:45 ` Mykyta Yatsenko
2026-04-20 15:41 ` Alexei Starovoitov
2026-04-20 15:50 ` Mykyta Yatsenko
2026-04-20 16:06 ` Alexei Starovoitov
2026-04-20 16:37 ` Mykyta Yatsenko
2026-04-20 18:00 ` Alexei Starovoitov
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260408-rhash-v2-16-3b3675da1f6e@meta.com \
--to=mykyta.yatsenko5@gmail.com \
--cc=andrii@kernel.org \
--cc=ast@kernel.org \
--cc=bpf@vger.kernel.org \
--cc=daniel@iogearbox.net \
--cc=eddyz87@gmail.com \
--cc=herbert@gondor.apana.org.au \
--cc=kafai@meta.com \
--cc=kernel-team@meta.com \
--cc=memxor@gmail.com \
--cc=yatsenko@meta.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox