From: Mykyta Yatsenko <mykyta.yatsenko5@gmail.com>
To: bpf@vger.kernel.org, ast@kernel.org, andrii@kernel.org,
daniel@iogearbox.net, kafai@meta.com, kernel-team@meta.com,
eddyz87@gmail.com, memxor@gmail.com
Cc: Mykyta Yatsenko <yatsenko@meta.com>
Subject: [PATCH bpf-next v4 08/10] bpf: verifier: refactor kfunc specialization
Date: Tue, 21 Oct 2025 21:03:32 +0100 [thread overview]
Message-ID: <20251021200334.220542-9-mykyta.yatsenko5@gmail.com> (raw)
In-Reply-To: <20251021200334.220542-1-mykyta.yatsenko5@gmail.com>
From: Mykyta Yatsenko <yatsenko@meta.com>
Move kfunc specialization (function address substitution) to later stage
of verification to support a new use case, where we need to take into
consideration whether kfunc is called in sleepable context.
Minor refactoring in add_kfunc_call(), making sure that if function
fails, kfunc desc is not added to tab->descs (previously it could be
added or not, depending on what failed).
Signed-off-by: Mykyta Yatsenko <yatsenko@meta.com>
---
kernel/bpf/verifier.c | 105 +++++++++++++++++++++++-------------------
1 file changed, 58 insertions(+), 47 deletions(-)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 4c8fd298b99a..ea20ab1b00d8 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -209,8 +209,6 @@ static void invalidate_non_owning_refs(struct bpf_verifier_env *env);
static bool in_rbtree_lock_required_cb(struct bpf_verifier_env *env);
static int ref_set_non_owning(struct bpf_verifier_env *env,
struct bpf_reg_state *reg);
-static void specialize_kfunc(struct bpf_verifier_env *env,
- u32 func_id, u16 offset, unsigned long *addr);
static bool is_trusted_reg(const struct bpf_reg_state *reg);
static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux)
@@ -3126,6 +3124,10 @@ struct bpf_kfunc_btf_tab {
u32 nr_descs;
};
+static unsigned long kfunc_call_imm(unsigned long func_addr, u32 func_id);
+
+static int specialize_kfunc(struct bpf_verifier_env *env, struct bpf_kfunc_desc *desc);
+
static int kfunc_desc_cmp_by_id_off(const void *a, const void *b)
{
const struct bpf_kfunc_desc *d0 = a;
@@ -3143,7 +3145,7 @@ static int kfunc_btf_cmp_by_off(const void *a, const void *b)
return d0->offset - d1->offset;
}
-static const struct bpf_kfunc_desc *
+static struct bpf_kfunc_desc *
find_kfunc_desc(const struct bpf_prog *prog, u32 func_id, u16 offset)
{
struct bpf_kfunc_desc desc = {
@@ -3266,12 +3268,12 @@ static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id, s16 offset)
{
const struct btf_type *func, *func_proto;
struct bpf_kfunc_btf_tab *btf_tab;
+ struct btf_func_model func_model;
struct bpf_kfunc_desc_tab *tab;
struct bpf_prog_aux *prog_aux;
struct bpf_kfunc_desc *desc;
const char *func_name;
struct btf *desc_btf;
- unsigned long call_imm;
unsigned long addr;
int err;
@@ -3355,19 +3357,6 @@ static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id, s16 offset)
func_name);
return -EINVAL;
}
- specialize_kfunc(env, func_id, offset, &addr);
-
- if (bpf_jit_supports_far_kfunc_call()) {
- call_imm = func_id;
- } else {
- call_imm = BPF_CALL_IMM(addr);
- /* Check whether the relative offset overflows desc->imm */
- if ((unsigned long)(s32)call_imm != call_imm) {
- verbose(env, "address of kernel function %s is out of range\n",
- func_name);
- return -EINVAL;
- }
- }
if (bpf_dev_bound_kfunc_id(func_id)) {
err = bpf_dev_bound_kfunc_check(&env->log, prog_aux);
@@ -3375,18 +3364,20 @@ static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id, s16 offset)
return err;
}
+ err = btf_distill_func_proto(&env->log, desc_btf,
+ func_proto, func_name,
+ &func_model);
+ if (err)
+ return err;
+
desc = &tab->descs[tab->nr_descs++];
desc->func_id = func_id;
- desc->imm = call_imm;
desc->offset = offset;
desc->addr = addr;
- err = btf_distill_func_proto(&env->log, desc_btf,
- func_proto, func_name,
- &desc->func_model);
- if (!err)
- sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]),
- kfunc_desc_cmp_by_id_off, NULL);
- return err;
+ desc->func_model = func_model;
+ sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]),
+ kfunc_desc_cmp_by_id_off, NULL);
+ return 0;
}
static int kfunc_desc_cmp_by_imm_off(const void *a, const void *b)
@@ -21861,47 +21852,62 @@ static int fixup_call_args(struct bpf_verifier_env *env)
return err;
}
+static unsigned long kfunc_call_imm(unsigned long func_addr, u32 func_id)
+{
+ if (bpf_jit_supports_far_kfunc_call())
+ return func_id;
+
+ return BPF_CALL_IMM(func_addr);
+}
+
/* replace a generic kfunc with a specialized version if necessary */
-static void specialize_kfunc(struct bpf_verifier_env *env,
- u32 func_id, u16 offset, unsigned long *addr)
+static int specialize_kfunc(struct bpf_verifier_env *env, struct bpf_kfunc_desc *desc)
{
struct bpf_prog *prog = env->prog;
bool seen_direct_write;
void *xdp_kfunc;
bool is_rdonly;
+ u32 func_id = desc->func_id;
+ u16 offset = desc->offset;
+ unsigned long addr = desc->addr, call_imm;
+
+ if (offset) /* return if module BTF is used */
+ goto set_imm;
if (bpf_dev_bound_kfunc_id(func_id)) {
xdp_kfunc = bpf_dev_bound_resolve_kfunc(prog, func_id);
- if (xdp_kfunc) {
- *addr = (unsigned long)xdp_kfunc;
- return;
- }
+ if (xdp_kfunc)
+ addr = (unsigned long)xdp_kfunc;
/* fallback to default kfunc when not supported by netdev */
- }
-
- if (offset)
- return;
-
- if (func_id == special_kfunc_list[KF_bpf_dynptr_from_skb]) {
+ } else if (func_id == special_kfunc_list[KF_bpf_dynptr_from_skb]) {
seen_direct_write = env->seen_direct_write;
is_rdonly = !may_access_direct_pkt_data(env, NULL, BPF_WRITE);
if (is_rdonly)
- *addr = (unsigned long)bpf_dynptr_from_skb_rdonly;
+ addr = (unsigned long)bpf_dynptr_from_skb_rdonly;
/* restore env->seen_direct_write to its original value, since
* may_access_direct_pkt_data mutates it
*/
env->seen_direct_write = seen_direct_write;
+ } else if (func_id == special_kfunc_list[KF_bpf_set_dentry_xattr]) {
+ if (bpf_lsm_has_d_inode_locked(prog))
+ addr = (unsigned long)bpf_set_dentry_xattr_locked;
+ } else if (func_id == special_kfunc_list[KF_bpf_remove_dentry_xattr]) {
+ if (bpf_lsm_has_d_inode_locked(prog))
+ addr = (unsigned long)bpf_remove_dentry_xattr_locked;
+ }
+
+set_imm:
+ call_imm = kfunc_call_imm(addr, func_id);
+ /* Check whether the relative offset overflows desc->imm */
+ if ((unsigned long)(s32)call_imm != call_imm) {
+ verbose(env, "address of kernel func_id %u is out of range\n", func_id);
+ return -EINVAL;
}
-
- if (func_id == special_kfunc_list[KF_bpf_set_dentry_xattr] &&
- bpf_lsm_has_d_inode_locked(prog))
- *addr = (unsigned long)bpf_set_dentry_xattr_locked;
-
- if (func_id == special_kfunc_list[KF_bpf_remove_dentry_xattr] &&
- bpf_lsm_has_d_inode_locked(prog))
- *addr = (unsigned long)bpf_remove_dentry_xattr_locked;
+ desc->imm = call_imm;
+ desc->addr = addr;
+ return 0;
}
static void __fixup_collection_insert_kfunc(struct bpf_insn_aux_data *insn_aux,
@@ -21924,7 +21930,8 @@ static void __fixup_collection_insert_kfunc(struct bpf_insn_aux_data *insn_aux,
static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
struct bpf_insn *insn_buf, int insn_idx, int *cnt)
{
- const struct bpf_kfunc_desc *desc;
+ struct bpf_kfunc_desc *desc;
+ int err;
if (!insn->imm) {
verbose(env, "invalid kernel function call not eliminated in verifier pass\n");
@@ -21944,6 +21951,10 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
return -EFAULT;
}
+ err = specialize_kfunc(env, desc);
+ if (err)
+ return err;
+
if (!bpf_jit_supports_far_kfunc_call())
insn->imm = BPF_CALL_IMM(desc->addr);
if (insn->off)
--
2.51.0
next prev parent reply other threads:[~2025-10-21 20:03 UTC|newest]
Thread overview: 14+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-10-21 20:03 [PATCH bpf-next v4 00/10] bpf: Introduce file dynptr Mykyta Yatsenko
2025-10-21 20:03 ` [PATCH bpf-next v4 01/10] selftests/bpf: remove unnecessary kfunc prototypes Mykyta Yatsenko
2025-10-21 20:03 ` [PATCH bpf-next v4 02/10] bpf: widen dynptr size/offset to 64 bit Mykyta Yatsenko
2025-10-21 20:03 ` [PATCH bpf-next v4 03/10] lib: move freader into buildid.h Mykyta Yatsenko
2025-10-21 20:03 ` [PATCH bpf-next v4 04/10] lib/freader: support reading more than 2 folios Mykyta Yatsenko
2025-10-21 20:03 ` [PATCH bpf-next v4 05/10] bpf: verifier: centralize const dynptr check in unmark_stack_slots_dynptr() Mykyta Yatsenko
2025-10-21 20:03 ` [PATCH bpf-next v4 06/10] bpf: add plumbing for file-backed dynptr Mykyta Yatsenko
2025-10-21 20:03 ` [PATCH bpf-next v4 07/10] bpf: add kfuncs and helpers support for file dynptrs Mykyta Yatsenko
2025-10-21 20:03 ` Mykyta Yatsenko [this message]
2025-10-22 0:42 ` [PATCH bpf-next v4 08/10] bpf: verifier: refactor kfunc specialization Eduard Zingerman
2025-10-24 22:12 ` Alexei Starovoitov
2025-10-21 20:03 ` [PATCH bpf-next v4 09/10] bpf: dispatch to sleepable file dynptr Mykyta Yatsenko
2025-10-21 20:03 ` [PATCH bpf-next v4 10/10] selftests/bpf: add file dynptr tests Mykyta Yatsenko
2025-10-22 0:46 ` Eduard Zingerman
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251021200334.220542-9-mykyta.yatsenko5@gmail.com \
--to=mykyta.yatsenko5@gmail.com \
--cc=andrii@kernel.org \
--cc=ast@kernel.org \
--cc=bpf@vger.kernel.org \
--cc=daniel@iogearbox.net \
--cc=eddyz87@gmail.com \
--cc=kafai@meta.com \
--cc=kernel-team@meta.com \
--cc=memxor@gmail.com \
--cc=yatsenko@meta.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox