From: Mykyta Yatsenko <mykyta.yatsenko5@gmail.com>
To: bpf@vger.kernel.org, ast@kernel.org, andrii@kernel.org,
daniel@iogearbox.net, kafai@meta.com, kernel-team@meta.com,
eddyz87@gmail.com, memxor@gmail.com,
herbert@gondor.apana.org.au
Cc: Mykyta Yatsenko <yatsenko@meta.com>
Subject: [PATCH bpf-next v4 06/11] bpf: Optimize word-sized keys for resizable hashtable
Date: Wed, 13 May 2026 15:36:09 -0700 [thread overview]
Message-ID: <20260513-rhash-v4-6-dd3d541ccb0b@meta.com> (raw)
In-Reply-To: <20260513-rhash-v4-0-dd3d541ccb0b@meta.com>
From: Mykyta Yatsenko <yatsenko@meta.com>
Specialize the lookup/update/delete/get_next_key/batch/iter paths for
keys whose size matches sizeof(long) (4 bytes on 32-bit, 8 bytes on
64-bit). A static-const rhashtable_params lets the compiler inline a
custom XOR-fold hashfn and a single-word equality cmpfn, eliminating
the indirect jhash dispatch. The same hashfn and cmpfn are installed
into rhashtable's stored params so the rehash worker and slow-path
inserts agree with the inlined fast paths.
Dispatch lives in a single rhtab_next_key() helper and inline branches
on map->key_size at the other callsites.
Signed-off-by: Mykyta Yatsenko <yatsenko@meta.com>
---
kernel/bpf/hashtab.c | 74 ++++++++++++++++++++++++++++++++++++++++++----------
1 file changed, 60 insertions(+), 14 deletions(-)
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 9cc41850dc79..b3ac6af11b2a 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -2763,6 +2763,31 @@ static inline void *rhtab_elem_value(struct rhtab_elem *l, u32 key_size)
return l->data + round_up(key_size, 8);
}
+/* Specialize hash function and objcmp for long sized key */
+static __always_inline int rhtab_key_cmp_long(struct rhashtable_compare_arg *arg,
+ const void *ptr)
+{
+ const unsigned long key1 = *(const unsigned long *)arg->key;
+ const struct rhtab_elem *key2 = ptr;
+
+ return key1 != *(const unsigned long *)key2->data;
+}
+
+static __always_inline u32 rhtab_hashfn_long(const void *data, u32 len, u32 seed)
+{
+ u64 k = *(const unsigned long *)data;
+
+ return (u32)(k ^ (k >> 32)) ^ seed;
+}
+
+static const struct rhashtable_params rhtab_params_long = {
+ .head_offset = offsetof(struct rhtab_elem, node),
+ .key_offset = offsetof(struct rhtab_elem, data),
+ .key_len = sizeof(long),
+ .hashfn = rhtab_hashfn_long,
+ .obj_cmpfn = rhtab_key_cmp_long,
+};
+
static struct bpf_map *rhtab_map_alloc(union bpf_attr *attr)
{
struct rhashtable_params params;
@@ -2788,6 +2813,11 @@ static struct bpf_map *rhtab_map_alloc(union bpf_attr *attr)
params.nelem_hint = (u32)attr->map_extra;
params.automatic_shrinking = true;
+ if (rhtab->map.key_size == sizeof(long)) {
+ params.hashfn = rhtab_hashfn_long;
+ params.obj_cmpfn = rhtab_key_cmp_long;
+ }
+
err = rhashtable_init(&rhtab->ht, ¶ms);
if (err)
goto free_rhtab;
@@ -2871,6 +2901,14 @@ static void rhtab_map_free(struct bpf_map *map)
bpf_map_area_free(rhtab);
}
+static __always_inline struct rhtab_elem *
+rhtab_next_key(struct bpf_rhtab *rhtab, const void *prev_key)
+{
+ if (rhtab->map.key_size == sizeof(long))
+ return rhashtable_next_key(&rhtab->ht, prev_key, rhtab_params_long);
+ return rhashtable_next_key(&rhtab->ht, prev_key, rhtab_params);
+}
+
static void *rhtab_lookup_elem(struct bpf_map *map, void *key)
{
struct bpf_rhtab *rhtab = container_of(map, struct bpf_rhtab, map);
@@ -2878,6 +2916,9 @@ static void *rhtab_lookup_elem(struct bpf_map *map, void *key)
/* Hold RCU lock in case sleepable program calls via gen_lookup */
guard(rcu)();
+ if (map->key_size == sizeof(long))
+ return rhashtable_lookup_likely(&rhtab->ht, key, rhtab_params_long);
+
return rhashtable_lookup_likely(&rhtab->ht, key, rhtab_params);
}
@@ -2912,7 +2953,12 @@ static int rhtab_delete_elem(struct bpf_rhtab *rhtab, struct rhtab_elem *elem, v
* raw tracepoints, which we don't have in rhashtable.
*/
bpf_disable_instrumentation();
- err = rhashtable_remove_fast(&rhtab->ht, &elem->node, rhtab_params);
+
+ if (rhtab->map.key_size == sizeof(long))
+ err = rhashtable_remove_fast(&rhtab->ht, &elem->node, rhtab_params_long);
+ else
+ err = rhashtable_remove_fast(&rhtab->ht, &elem->node, rhtab_params);
+
bpf_enable_instrumentation();
if (err)
@@ -3026,7 +3072,12 @@ static long rhtab_map_update_elem(struct bpf_map *map, void *key, void *value, u
/* Prevent deadlock for NMI programs attempting to take bucket lock */
bpf_disable_instrumentation();
- tmp = rhashtable_lookup_get_insert_fast(&rhtab->ht, &elem->node, rhtab_params);
+
+ if (map->key_size == sizeof(long))
+ tmp = rhashtable_lookup_get_insert_fast(&rhtab->ht, &elem->node, rhtab_params_long);
+ else
+ tmp = rhashtable_lookup_get_insert_fast(&rhtab->ht, &elem->node, rhtab_params);
+
bpf_enable_instrumentation();
if (tmp) {
@@ -3111,11 +3162,9 @@ static int rhtab_map_get_next_key(struct bpf_map *map, void *key, void *next_key
struct bpf_rhtab *rhtab = container_of(map, struct bpf_rhtab, map);
struct rhtab_elem *elem;
- elem = rhashtable_next_key(&rhtab->ht, key, rhtab_params);
-
- /* if not found, return the first key */
+ elem = rhtab_next_key(rhtab, key);
if (PTR_ERR(elem) == -ENOENT)
- elem = rhashtable_next_key(&rhtab->ht, NULL, rhtab_params);
+ elem = rhtab_next_key(rhtab, NULL);
if (!elem)
return -ENOENT;
@@ -3168,7 +3217,7 @@ static long bpf_each_rhash_elem(struct bpf_map *map, bpf_callback_t callback_fn,
* elements are deleted/inserted, there may be missed or duplicate
* elements visited.
*/
- while ((elem = rhashtable_next_key(&rhtab->ht, prev_key, rhtab_params))) {
+ while ((elem = rhtab_next_key(rhtab, prev_key))) {
if (IS_ERR(elem))
break;
num_elems++;
@@ -3269,8 +3318,7 @@ static int __rhtab_map_lookup_and_delete_batch(struct bpf_map *map,
* returns ERR_PTR(-ENOENT); the batch terminates with no elements
* and userspace must restart from a NULL cursor.
*/
- elem = rhashtable_next_key(&rhtab->ht, ubatch ? cursor : NULL,
- rhtab_params);
+ elem = rhtab_next_key(rhtab, ubatch ? cursor : NULL);
while (elem && !IS_ERR(elem) && total < max_count) {
memcpy(dst_key, elem->data, key_size);
rhtab_read_elem_value(map, dst_val, elem, elem_map_flags);
@@ -3279,7 +3327,7 @@ static int __rhtab_map_lookup_and_delete_batch(struct bpf_map *map,
if (do_delete)
del_elems[total] = elem;
- elem = rhashtable_next_key(&rhtab->ht, dst_key, rhtab_params);
+ elem = rhtab_next_key(rhtab, dst_key);
dst_key += key_size;
dst_val += value_size;
total++;
@@ -3356,8 +3404,7 @@ static void *bpf_rhash_map_seq_start(struct seq_file *seq, loff_t *pos)
struct rhtab_elem *elem;
rcu_read_lock();
- elem = rhashtable_next_key(&info->rhtab->ht, key,
- rhtab_params);
+ elem = rhtab_next_key(info->rhtab, key);
if (IS_ERR_OR_NULL(elem))
return NULL;
if (*pos == 0)
@@ -3375,8 +3422,7 @@ static void *bpf_rhash_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
info->last_key_valid = true;
++*pos;
- next = rhashtable_next_key(&info->rhtab->ht, info->last_key,
- rhtab_params);
+ next = rhtab_next_key(info->rhtab, info->last_key);
if (IS_ERR(next))
return NULL;
return next;
--
2.53.0-Meta
next prev parent reply other threads:[~2026-05-13 22:37 UTC|newest]
Thread overview: 17+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-05-13 22:36 [PATCH bpf-next v4 00/11] bpf: Introduce resizable hash map Mykyta Yatsenko
2026-05-13 22:36 ` [PATCH bpf-next v4 01/11] rhashtable: Add rhashtable_next_key() API Mykyta Yatsenko
2026-05-13 23:21 ` bot+bpf-ci
2026-05-13 22:36 ` [PATCH bpf-next v4 02/11] rhashtable: Add selftest for rhashtable_next_key() Mykyta Yatsenko
2026-05-13 22:36 ` [PATCH bpf-next v4 03/11] bpf: Implement resizable hashmap basic functions Mykyta Yatsenko
2026-05-13 23:21 ` bot+bpf-ci
2026-05-14 10:59 ` Mykyta Yatsenko
2026-05-13 22:36 ` [PATCH bpf-next v4 04/11] bpf: Implement iteration ops for resizable hashtab Mykyta Yatsenko
2026-05-13 22:36 ` [PATCH bpf-next v4 05/11] bpf: Allow special fields in " Mykyta Yatsenko
2026-05-13 23:21 ` bot+bpf-ci
2026-05-14 11:00 ` Mykyta Yatsenko
2026-05-13 22:36 ` Mykyta Yatsenko [this message]
2026-05-13 22:36 ` [PATCH bpf-next v4 07/11] libbpf: Support resizable hashtable Mykyta Yatsenko
2026-05-13 22:36 ` [PATCH bpf-next v4 08/11] selftests/bpf: Add basic tests for resizable hash map Mykyta Yatsenko
2026-05-13 22:36 ` [PATCH bpf-next v4 09/11] selftests/bpf: Add BPF iterator " Mykyta Yatsenko
2026-05-13 22:36 ` [PATCH bpf-next v4 10/11] bpftool: Add rhash map documentation Mykyta Yatsenko
2026-05-13 22:36 ` [PATCH bpf-next v4 11/11] selftests/bpf: Add resizable hashmap to benchmarks Mykyta Yatsenko
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260513-rhash-v4-6-dd3d541ccb0b@meta.com \
--to=mykyta.yatsenko5@gmail.com \
--cc=andrii@kernel.org \
--cc=ast@kernel.org \
--cc=bpf@vger.kernel.org \
--cc=daniel@iogearbox.net \
--cc=eddyz87@gmail.com \
--cc=herbert@gondor.apana.org.au \
--cc=kafai@meta.com \
--cc=kernel-team@meta.com \
--cc=memxor@gmail.com \
--cc=yatsenko@meta.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.