From: Mykyta Yatsenko <mykyta.yatsenko5@gmail.com>
To: bpf@vger.kernel.org, ast@kernel.org, andrii@kernel.org,
daniel@iogearbox.net, kafai@meta.com, kernel-team@meta.com,
eddyz87@gmail.com, memxor@gmail.com,
herbert@gondor.apana.org.au
Cc: Mykyta Yatsenko <yatsenko@meta.com>
Subject: [PATCH bpf-next v3 02/10] rhashtable: Add rhashtable_walk_enter_from()
Date: Fri, 24 Apr 2026 12:50:44 -0700 [thread overview]
Message-ID: <20260424-rhash-v3-2-d0fa0ce4379b@meta.com> (raw)
In-Reply-To: <20260424-rhash-v3-0-d0fa0ce4379b@meta.com>
From: Mykyta Yatsenko <yatsenko@meta.com>
BPF resizable hashmap needs efficient iteration resume for
get_next_key and seq_file iterators. rhashtable_walk_enter()
always starts from bucket 0, forcing linear skip of already-seen
elements.
Add rhashtable_walk_enter_from() that looks up the key's bucket
and positions the walker there, so walk_next returns the successor
directly. If a resize moved the key to the future table, the
walker resets to the beginning.
Refactor __rhashtable_lookup into __rhashtable_lookup_one to reuse
the single-table lookup in both the two-table search and the new
enter_from positioning.
Signed-off-by: Mykyta Yatsenko <yatsenko@meta.com>
---
include/linux/rhashtable.h | 31 +++++++++--
lib/rhashtable.c | 41 ++++++++++++++
lib/test_rhashtable.c | 131 +++++++++++++++++++++++++++++++++++++++++++++
3 files changed, 198 insertions(+), 5 deletions(-)
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 0480509a6339..6ac547d6c619 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -246,6 +246,11 @@ static inline void rhashtable_walk_start(struct rhashtable_iter *iter)
(void)rhashtable_walk_start_check(iter);
}
+void rhashtable_walk_enter_from(struct rhashtable *ht,
+ struct rhashtable_iter *iter,
+ const void *key,
+ const struct rhashtable_params params);
+
void *rhashtable_walk_next(struct rhashtable_iter *iter);
void *rhashtable_walk_peek(struct rhashtable_iter *iter);
void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases_shared(RCU);
@@ -606,8 +611,8 @@ static inline int rhashtable_compare(struct rhashtable_compare_arg *arg,
}
/* Internal function, do not use. */
-static __always_inline struct rhash_head *__rhashtable_lookup(
- struct rhashtable *ht, const void *key,
+static __always_inline struct rhash_head *__rhashtable_lookup_one(
+ struct rhashtable *ht, struct bucket_table *tbl, const void *key,
const struct rhashtable_params params,
const enum rht_lookup_freq freq)
__must_hold_shared(RCU)
@@ -617,13 +622,10 @@ static __always_inline struct rhash_head *__rhashtable_lookup(
.key = key,
};
struct rhash_lock_head __rcu *const *bkt;
- struct bucket_table *tbl;
struct rhash_head *he;
unsigned int hash;
BUILD_BUG_ON(!__builtin_constant_p(freq));
- tbl = rht_dereference_rcu(ht->tbl, ht);
-restart:
hash = rht_key_hashfn(ht, tbl, key, params);
bkt = rht_bucket(tbl, hash);
do {
@@ -639,6 +641,25 @@ static __always_inline struct rhash_head *__rhashtable_lookup(
*/
} while (he != RHT_NULLS_MARKER(bkt));
+ return NULL;
+}
+
+/* Internal function, do not use. */
+static __always_inline struct rhash_head *__rhashtable_lookup(
+ struct rhashtable *ht, const void *key,
+ const struct rhashtable_params params,
+ const enum rht_lookup_freq freq)
+ __must_hold_shared(RCU)
+{
+ struct bucket_table *tbl;
+ struct rhash_head *he;
+
+ tbl = rht_dereference_rcu(ht->tbl, ht);
+restart:
+ he = __rhashtable_lookup_one(ht, tbl, key, params, freq);
+ if (he)
+ return he;
+
/* Ensure we see any new tables. */
smp_rmb();
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 6074ed5f66f3..2a6098edf737 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -692,6 +692,47 @@ void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter)
}
EXPORT_SYMBOL_GPL(rhashtable_walk_enter);
+/**
+ * rhashtable_walk_enter_from - Initialise a walk starting at a key's bucket
+ * @ht: Table to walk over
+ * @iter: Hash table iterator
+ * @key: Key whose bucket to start from
+ * @params: Hash table parameters
+ *
+ * Like rhashtable_walk_enter(), but positions the iterator at the bucket
+ * containing @key. If @key is not present in the main table, the iterator is
+ * positioned at the beginning.
+ *
+ * Same constraints as rhashtable_walk_enter() apply.
+ */
+void rhashtable_walk_enter_from(struct rhashtable *ht,
+ struct rhashtable_iter *iter,
+ const void *key,
+ const struct rhashtable_params params)
+ __must_hold(RCU)
+{
+ struct bucket_table *tbl;
+ struct rhash_head *he;
+
+ rhashtable_walk_enter(ht, iter);
+
+ if (!key)
+ return;
+
+ tbl = iter->walker.tbl;
+ if (!tbl)
+ return;
+
+ he = __rhashtable_lookup_one(ht, tbl, key, params,
+ RHT_LOOKUP_NORMAL);
+ if (!he)
+ return;
+
+ iter->slot = rht_key_hashfn(ht, tbl, key, params);
+ iter->p = he;
+}
+EXPORT_SYMBOL_GPL(rhashtable_walk_enter_from);
+
/**
* rhashtable_walk_exit - Free an iterator
* @iter: Hash table Iterator
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index 0b33559a910b..0c4f489bf655 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -23,6 +23,7 @@
#include <linux/random.h>
#include <linux/vmalloc.h>
#include <linux/wait.h>
+#include <linux/cleanup.h>
#define MAX_ENTRIES 1000000
#define TEST_INSERT_FAIL INT_MAX
@@ -679,6 +680,133 @@ static int threadfunc(void *data)
return err;
}
+static int __init test_walk_enter_from(void)
+{
+ struct rhashtable ht;
+ struct test_obj objs[4];
+ struct rhashtable_iter iter;
+ struct test_obj *obj;
+ struct test_obj_val key = { };
+ int err, i, count;
+ int len_freq[4] = { 0, 0, 0, 0 };
+
+ err = rhashtable_init(&ht, &test_rht_params);
+ if (err)
+ return err;
+
+ /* Insert 4 elements with keys 0, 2, 4, 6 */
+ for (i = 0; i < 4; i++) {
+ objs[i].value.id = i * 2;
+ objs[i].value.tid = 0;
+ err = rhashtable_insert_fast(&ht, &objs[i].node,
+ test_rht_params);
+ if (err) {
+ pr_warn("walk_enter_from: insert %d failed: %d\n",
+ i, err);
+ goto out;
+ }
+ }
+
+ /*
+ * Test 1: walk_enter_from positions at key, walk_next returns
+ * the successor (not the key itself).
+ */
+ for (i = 0; i < 4; i++) {
+ key.id = i * 2;
+ scoped_guard(rcu) {
+ rhashtable_walk_enter_from(&ht, &iter, &key,
+ test_rht_params);
+ rhashtable_walk_start(&iter);
+ }
+
+ do {
+ obj = rhashtable_walk_next(&iter);
+ } while (PTR_ERR(obj) == -EAGAIN);
+
+ /* Successor must not be the key itself */
+ if (IS_ERR(obj) || (obj && obj->value.id == i * 2)) {
+ pr_warn("walk_enter_from: returned err %ld or key %d instead of successor\n",
+ PTR_ERR(obj), i * 2);
+ err = -EINVAL;
+ }
+
+ rhashtable_walk_stop(&iter);
+ rhashtable_walk_exit(&iter);
+
+ if (err)
+ goto out;
+ }
+
+ /* Test 2: walk_enter_from with non-existent key starts from the beginning */
+ key.id = 99;
+ scoped_guard(rcu) {
+ rhashtable_walk_enter_from(&ht, &iter, &key, test_rht_params);
+ rhashtable_walk_start(&iter);
+ }
+
+ do {
+ obj = rhashtable_walk_next(&iter);
+ } while (PTR_ERR(obj) == -EAGAIN);
+
+ if (IS_ERR(obj)) {
+ pr_warn("walk_enter_from: returned err %ld\n", PTR_ERR(obj));
+ err = -EINVAL;
+ }
+
+ /* Should still return some element (iteration from bucket start) */
+ rhashtable_walk_stop(&iter);
+ rhashtable_walk_exit(&iter);
+
+ if (err)
+ goto out;
+
+ /* Test 3: verify walk_enter_from + walk_next can iterate remaining elements */
+ for (i = 0; i < 4; i++) {
+ key.id = i * 2;
+ count = 0;
+
+ scoped_guard(rcu) {
+ rhashtable_walk_enter_from(&ht, &iter, &key, test_rht_params);
+ rhashtable_walk_start(&iter);
+ }
+
+ /* Count the number of elements after this one */
+ do {
+ obj = rhashtable_walk_next(&iter);
+ count++;
+ } while (obj && (!IS_ERR(obj) || (PTR_ERR(obj) == -EAGAIN)));
+
+ rhashtable_walk_stop(&iter);
+ rhashtable_walk_exit(&iter);
+ if (count > 4) {
+ pr_warn("walk_enter_from: walked %d elements instead of max 4\n", count);
+ goto out;
+ }
+ /*
+ * Collect the lengths of every walked path,
+ * we should have each length walked one time
+ */
+ len_freq[count - 1]++;
+ }
+
+ for (i = 0; i < 4; i++) {
+ /*
+ * Should see at least some elements after key 0.
+ * Exact count depends on hash distribution.
+ */
+ if (len_freq[i] != 1) {
+ pr_warn("walk_enter_from: duplicate tail length\n");
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+ pr_info("walk_enter_from: all tests passed\n");
+out:
+ rhashtable_destroy(&ht);
+ return err;
+}
+
static int __init test_rht_init(void)
{
unsigned int entries;
@@ -738,6 +866,9 @@ static int __init test_rht_init(void)
test_insert_duplicates_run();
+ pr_info("Testing walk_enter_from: %s\n",
+ test_walk_enter_from() == 0 ? "pass" : "FAIL");
+
if (!tcount)
return 0;
--
2.52.0
next prev parent reply other threads:[~2026-04-24 19:51 UTC|newest]
Thread overview: 27+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-04-24 19:50 [PATCH bpf-next v3 00/10] bpf: Introduce resizable hash map Mykyta Yatsenko
2026-04-24 19:50 ` [PATCH bpf-next v3 01/10] bpf: Implement resizable hashmap basic functions Mykyta Yatsenko
2026-04-24 20:40 ` sashiko-bot
2026-04-25 20:41 ` Mykyta Yatsenko
2026-04-24 20:45 ` bot+bpf-ci
2026-04-25 20:50 ` Mykyta Yatsenko
2026-04-24 19:50 ` Mykyta Yatsenko [this message]
2026-04-24 20:15 ` [PATCH bpf-next v3 02/10] rhashtable: Add rhashtable_walk_enter_from() sashiko-bot
2026-04-24 20:45 ` bot+bpf-ci
2026-04-28 10:35 ` Herbert Xu
2026-04-24 19:50 ` [PATCH bpf-next v3 03/10] bpf: Implement get_next_key() resizable hashtab Mykyta Yatsenko
2026-04-28 10:33 ` Herbert Xu
2026-04-28 13:20 ` Mykyta Yatsenko
2026-04-24 19:50 ` [PATCH bpf-next v3 04/10] bpf: Implement batch ops and iterators for " Mykyta Yatsenko
2026-04-24 20:28 ` sashiko-bot
2026-04-25 21:24 ` Mykyta Yatsenko
2026-04-27 13:36 ` Mykyta Yatsenko
2026-04-24 19:50 ` [PATCH bpf-next v3 05/10] bpf: Allow timers, workqueues and task_work in " Mykyta Yatsenko
2026-04-24 21:05 ` sashiko-bot
2026-04-25 21:29 ` Mykyta Yatsenko
2026-04-24 19:50 ` [PATCH bpf-next v3 06/10] libbpf: Support resizable hashtable Mykyta Yatsenko
2026-04-24 19:50 ` [PATCH bpf-next v3 07/10] selftests/bpf: Add basic tests for resizable hash map Mykyta Yatsenko
2026-04-24 20:02 ` sashiko-bot
2026-04-24 20:32 ` bot+bpf-ci
2026-04-24 19:50 ` [PATCH bpf-next v3 08/10] selftests/bpf: Add BPF iterator " Mykyta Yatsenko
2026-04-24 19:50 ` [PATCH bpf-next v3 09/10] bpftool: Add rhash map documentation Mykyta Yatsenko
2026-04-24 19:50 ` [PATCH bpf-next v3 10/10] selftests/bpf: Add resizable hashmap to benchmarks Mykyta Yatsenko
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260424-rhash-v3-2-d0fa0ce4379b@meta.com \
--to=mykyta.yatsenko5@gmail.com \
--cc=andrii@kernel.org \
--cc=ast@kernel.org \
--cc=bpf@vger.kernel.org \
--cc=daniel@iogearbox.net \
--cc=eddyz87@gmail.com \
--cc=herbert@gondor.apana.org.au \
--cc=kafai@meta.com \
--cc=kernel-team@meta.com \
--cc=memxor@gmail.com \
--cc=yatsenko@meta.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox