From: Leon Hwang <leon.hwang@linux.dev>
To: bpf@vger.kernel.org
Cc: Alexei Starovoitov <ast@kernel.org>,
Daniel Borkmann <daniel@iogearbox.net>,
Andrii Nakryiko <andrii@kernel.org>,
Martin KaFai Lau <martin.lau@linux.dev>,
Eduard Zingerman <eddyz87@gmail.com>, Song Liu <song@kernel.org>,
Yonghong Song <yonghong.song@linux.dev>,
John Fastabend <john.fastabend@gmail.com>,
KP Singh <kpsingh@kernel.org>,
Stanislav Fomichev <sdf@fomichev.me>, Hao Luo <haoluo@google.com>,
Jiri Olsa <jolsa@kernel.org>,
Puranjay Mohan <puranjay@kernel.org>,
Xu Kuohai <xukuohai@huaweicloud.com>,
Catalin Marinas <catalin.marinas@arm.com>,
Will Deacon <will@kernel.org>, Thomas Gleixner <tglx@kernel.org>,
Ingo Molnar <mingo@redhat.com>, Borislav Petkov <bp@alien8.de>,
Dave Hansen <dave.hansen@linux.intel.com>,
x86@kernel.org, "H . Peter Anvin" <hpa@zytor.com>,
Shuah Khan <shuah@kernel.org>, Leon Hwang <leon.hwang@linux.dev>,
Peilin Ye <yepeilin@google.com>,
Luis Gerhorst <luis.gerhorst@fau.de>,
Viktor Malik <vmalik@redhat.com>,
linux-arm-kernel@lists.infradead.org,
linux-kernel@vger.kernel.org, netdev@vger.kernel.org,
linux-kselftest@vger.kernel.org, kernel-patches-bot@fb.com
Subject: [PATCH bpf-next v2 1/6] bpf: Introduce 64-bit bitops kfuncs
Date: Thu, 19 Feb 2026 22:29:23 +0800 [thread overview]
Message-ID: <20260219142933.13904-2-leon.hwang@linux.dev> (raw)
In-Reply-To: <20260219142933.13904-1-leon.hwang@linux.dev>
Add the following generic 64-bit bitops kfuncs:
* bpf_clz64(): Count leading zeros.
* bpf_ctz64(): Count trailing zeros.
* bpf_ffs64(): Find first set bit, 1-based index, returns 0 when input
is 0.
* bpf_fls64(): Find last set bit, 1-based index.
* bpf_bitrev64(): Reverse bits.
* bpf_popcnt64(): Population count.
* bpf_rol64(): Rotate left.
* bpf_ror64(): Rotate right.
Defined zero-input behavior:
* bpf_clz64(0) = 64
* bpf_ctz64(0) = 64
* bpf_ffs64(0) = 0
* bpf_fls64(0) = 0
These kfuncs are inlined by JIT backends when the required CPU features are
available. Otherwise, they fall back to regular function calls.
Signed-off-by: Leon Hwang <leon.hwang@linux.dev>
---
include/linux/filter.h | 10 ++++++++
kernel/bpf/core.c | 6 +++++
kernel/bpf/helpers.c | 50 +++++++++++++++++++++++++++++++++++++++
kernel/bpf/verifier.c | 53 +++++++++++++++++++++++++++++++++++++++++-
4 files changed, 118 insertions(+), 1 deletion(-)
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 44d7ae95ddbc..b8a538bec5c6 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -1157,6 +1157,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
void bpf_jit_compile(struct bpf_prog *prog);
bool bpf_jit_needs_zext(void);
bool bpf_jit_inlines_helper_call(s32 imm);
+bool bpf_jit_inlines_kfunc_call(void *func_addr);
bool bpf_jit_supports_subprog_tailcalls(void);
bool bpf_jit_supports_percpu_insn(void);
bool bpf_jit_supports_kfunc_call(void);
@@ -1837,4 +1838,13 @@ static inline void *bpf_skb_meta_pointer(struct sk_buff *skb, u32 offset)
}
#endif /* CONFIG_NET */
+u64 bpf_clz64(u64 x);
+u64 bpf_ctz64(u64 x);
+u64 bpf_ffs64(u64 x);
+u64 bpf_fls64(u64 x);
+u64 bpf_popcnt64(u64 x);
+u64 bpf_bitrev64(u64 x);
+u64 bpf_rol64(u64 x, u64 s);
+u64 bpf_ror64(u64 x, u64 s);
+
#endif /* __LINUX_FILTER_H__ */
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 5ab6bace7d0d..5f37309d83fc 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -3114,6 +3114,12 @@ bool __weak bpf_jit_inlines_helper_call(s32 imm)
return false;
}
+/* Return TRUE if the JIT backend inlines the kfunc. */
+bool __weak bpf_jit_inlines_kfunc_call(void *func_addr)
+{
+ return false;
+}
+
/* Return TRUE if the JIT backend supports mixing bpf2bpf and tailcalls. */
bool __weak bpf_jit_supports_subprog_tailcalls(void)
{
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 7ac32798eb04..6bf73c46af72 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -29,6 +29,8 @@
#include <linux/task_work.h>
#include <linux/irq_work.h>
#include <linux/buildid.h>
+#include <linux/bitops.h>
+#include <linux/bitrev.h>
#include "../../lib/kstrtox.h"
@@ -4501,6 +4503,46 @@ __bpf_kfunc int bpf_timer_cancel_async(struct bpf_timer *timer)
}
}
+__bpf_kfunc u64 bpf_clz64(u64 x)
+{
+ return x ? 64 - fls64(x) : 64;
+}
+
+__bpf_kfunc u64 bpf_ctz64(u64 x)
+{
+ return x ? __ffs64(x) : 64;
+}
+
+__bpf_kfunc u64 bpf_ffs64(u64 x)
+{
+ return x ? __ffs64(x) + 1 : 0;
+}
+
+__bpf_kfunc u64 bpf_fls64(u64 x)
+{
+ return fls64(x);
+}
+
+__bpf_kfunc u64 bpf_popcnt64(u64 x)
+{
+ return hweight64(x);
+}
+
+__bpf_kfunc u64 bpf_bitrev64(u64 x)
+{
+ return ((u64)bitrev32(x & 0xFFFFFFFF) << 32) | bitrev32(x >> 32);
+}
+
+__bpf_kfunc u64 bpf_rol64(u64 x, u64 s)
+{
+ return rol64(x, s);
+}
+
+__bpf_kfunc u64 bpf_ror64(u64 x, u64 s)
+{
+ return ror64(x, s);
+}
+
__bpf_kfunc_end_defs();
static void bpf_task_work_cancel_scheduled(struct irq_work *irq_work)
@@ -4578,6 +4620,14 @@ BTF_ID_FLAGS(func, bpf_key_put, KF_RELEASE)
BTF_ID_FLAGS(func, bpf_verify_pkcs7_signature, KF_SLEEPABLE)
#endif
#endif
+BTF_ID_FLAGS(func, bpf_clz64, KF_FASTCALL)
+BTF_ID_FLAGS(func, bpf_ctz64, KF_FASTCALL)
+BTF_ID_FLAGS(func, bpf_ffs64, KF_FASTCALL)
+BTF_ID_FLAGS(func, bpf_fls64, KF_FASTCALL)
+BTF_ID_FLAGS(func, bpf_popcnt64, KF_FASTCALL)
+BTF_ID_FLAGS(func, bpf_bitrev64, KF_FASTCALL)
+BTF_ID_FLAGS(func, bpf_rol64, KF_FASTCALL)
+BTF_ID_FLAGS(func, bpf_ror64, KF_FASTCALL)
BTF_KFUNCS_END(generic_btf_ids)
static const struct btf_kfunc_id_set generic_kfunc_set = {
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 0162f946032f..2cb29bc1b3c3 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -12461,6 +12461,14 @@ enum special_kfunc_type {
KF_bpf_session_is_return,
KF_bpf_stream_vprintk,
KF_bpf_stream_print_stack,
+ KF_bpf_clz64,
+ KF_bpf_ctz64,
+ KF_bpf_ffs64,
+ KF_bpf_fls64,
+ KF_bpf_bitrev64,
+ KF_bpf_popcnt64,
+ KF_bpf_rol64,
+ KF_bpf_ror64,
};
BTF_ID_LIST(special_kfunc_list)
@@ -12541,6 +12549,14 @@ BTF_ID(func, bpf_arena_reserve_pages)
BTF_ID(func, bpf_session_is_return)
BTF_ID(func, bpf_stream_vprintk)
BTF_ID(func, bpf_stream_print_stack)
+BTF_ID(func, bpf_clz64)
+BTF_ID(func, bpf_ctz64)
+BTF_ID(func, bpf_ffs64)
+BTF_ID(func, bpf_fls64)
+BTF_ID(func, bpf_bitrev64)
+BTF_ID(func, bpf_popcnt64)
+BTF_ID(func, bpf_rol64)
+BTF_ID(func, bpf_ror64)
static bool is_task_work_add_kfunc(u32 func_id)
{
@@ -18204,6 +18220,34 @@ static bool verifier_inlines_helper_call(struct bpf_verifier_env *env, s32 imm)
}
}
+static bool bpf_kfunc_is_fastcall(struct bpf_verifier_env *env, u32 func_id, u32 flags)
+{
+ if (!(flags & KF_FASTCALL))
+ return false;
+
+ if (!env->prog->jit_requested)
+ return true;
+
+ if (func_id == special_kfunc_list[KF_bpf_clz64])
+ return bpf_jit_inlines_kfunc_call(bpf_clz64);
+ if (func_id == special_kfunc_list[KF_bpf_ctz64])
+ return bpf_jit_inlines_kfunc_call(bpf_ctz64);
+ if (func_id == special_kfunc_list[KF_bpf_ffs64])
+ return bpf_jit_inlines_kfunc_call(bpf_ffs64);
+ if (func_id == special_kfunc_list[KF_bpf_fls64])
+ return bpf_jit_inlines_kfunc_call(bpf_fls64);
+ if (func_id == special_kfunc_list[KF_bpf_bitrev64])
+ return bpf_jit_inlines_kfunc_call(bpf_bitrev64);
+ if (func_id == special_kfunc_list[KF_bpf_popcnt64])
+ return bpf_jit_inlines_kfunc_call(bpf_popcnt64);
+ if (func_id == special_kfunc_list[KF_bpf_rol64])
+ return bpf_jit_inlines_kfunc_call(bpf_rol64);
+ if (func_id == special_kfunc_list[KF_bpf_ror64])
+ return bpf_jit_inlines_kfunc_call(bpf_ror64);
+
+ return true;
+}
+
struct call_summary {
u8 num_params;
bool is_void;
@@ -18246,7 +18290,7 @@ static bool get_call_summary(struct bpf_verifier_env *env, struct bpf_insn *call
/* error would be reported later */
return false;
cs->num_params = btf_type_vlen(meta.func_proto);
- cs->fastcall = meta.kfunc_flags & KF_FASTCALL;
+ cs->fastcall = bpf_kfunc_is_fastcall(env, meta.func_id, meta.kfunc_flags);
cs->is_void = btf_type_is_void(btf_type_by_id(meta.btf, meta.func_proto->type));
return true;
}
@@ -23186,6 +23230,13 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
insn_buf[4] = BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1);
insn_buf[5] = BPF_ALU64_IMM(BPF_NEG, BPF_REG_0, 0);
*cnt = 6;
+ } else if (desc->func_id == special_kfunc_list[KF_bpf_ffs64] &&
+ bpf_jit_inlines_kfunc_call(bpf_ffs64)) {
+ insn_buf[0] = BPF_MOV64_IMM(BPF_REG_0, 0);
+ insn_buf[1] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2);
+ insn_buf[2] = *insn;
+ insn_buf[3] = BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1);
+ *cnt = 4;
}
if (env->insn_aux_data[insn_idx].arg_prog) {
--
2.52.0
next prev parent reply other threads:[~2026-02-19 14:30 UTC|newest]
Thread overview: 25+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-02-19 14:29 [PATCH bpf-next v2 0/6] bpf: Introduce 64-bit bitops kfuncs Leon Hwang
2026-02-19 14:29 ` Leon Hwang [this message]
2026-02-19 17:50 ` [PATCH bpf-next v2 1/6] " Alexei Starovoitov
2026-02-20 15:34 ` Leon Hwang
2026-02-19 14:29 ` [PATCH bpf-next v2 2/6] bpf, x86: Add 64-bit bitops kfuncs support for x86_64 Leon Hwang
2026-02-19 17:47 ` Alexei Starovoitov
2026-02-20 15:54 ` Leon Hwang
2026-02-20 17:50 ` Alexei Starovoitov
2026-02-21 12:45 ` Leon Hwang
2026-02-21 16:51 ` Alexei Starovoitov
2026-02-23 16:35 ` Leon Hwang
2026-02-19 22:05 ` kernel test robot
2026-02-20 14:12 ` Leon Hwang
2026-02-20 11:59 ` kernel test robot
2026-02-19 14:29 ` [PATCH bpf-next v2 3/6] bpf, arm64: Add 64-bit bitops kfuncs support Leon Hwang
2026-02-19 15:10 ` Puranjay Mohan
2026-02-19 15:20 ` Puranjay Mohan
2026-02-19 15:25 ` Puranjay Mohan
2026-02-19 15:36 ` Leon Hwang
2026-02-19 14:29 ` [PATCH bpf-next v2 4/6] selftests/bpf: Add tests for 64-bit bitops kfuncs Leon Hwang
2026-02-19 14:29 ` [PATCH bpf-next v2 5/6] selftests/bpf: Add __cpu_feature annotation for CPU-feature-gated tests Leon Hwang
2026-02-19 14:29 ` [PATCH bpf-next v2 6/6] selftests/bpf: Add JIT disassembly tests for 64-bit bitops kfuncs Leon Hwang
-- strict thread matches above, loose matches on Subject: below --
2026-02-20 18:57 [PATCH bpf-next v2 1/6] bpf: Introduce " kernel test robot
2026-02-21 9:58 ` Dan Carpenter
2026-02-21 12:50 ` Leon Hwang
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260219142933.13904-2-leon.hwang@linux.dev \
--to=leon.hwang@linux.dev \
--cc=andrii@kernel.org \
--cc=ast@kernel.org \
--cc=bp@alien8.de \
--cc=bpf@vger.kernel.org \
--cc=catalin.marinas@arm.com \
--cc=daniel@iogearbox.net \
--cc=dave.hansen@linux.intel.com \
--cc=eddyz87@gmail.com \
--cc=haoluo@google.com \
--cc=hpa@zytor.com \
--cc=john.fastabend@gmail.com \
--cc=jolsa@kernel.org \
--cc=kernel-patches-bot@fb.com \
--cc=kpsingh@kernel.org \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-kselftest@vger.kernel.org \
--cc=luis.gerhorst@fau.de \
--cc=martin.lau@linux.dev \
--cc=mingo@redhat.com \
--cc=netdev@vger.kernel.org \
--cc=puranjay@kernel.org \
--cc=sdf@fomichev.me \
--cc=shuah@kernel.org \
--cc=song@kernel.org \
--cc=tglx@kernel.org \
--cc=vmalik@redhat.com \
--cc=will@kernel.org \
--cc=x86@kernel.org \
--cc=xukuohai@huaweicloud.com \
--cc=yepeilin@google.com \
--cc=yonghong.song@linux.dev \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.