* [PATCH bpf-next 1/3] bpf: Teach resolve_btfids about the setsc type
2026-04-03 17:08 [PATCH bpf-next 0/3] bpf: Refactor how the verifier matches kfunc checks Chengkaitao
@ 2026-04-03 17:08 ` Chengkaitao
2026-04-03 17:08 ` [PATCH bpf-next 2/3] bpf: Introduce BTF_SET/ID_SUB and BPF_VERIF_KFUNC_DEF Chengkaitao
` (2 subsequent siblings)
3 siblings, 0 replies; 5+ messages in thread
From: Chengkaitao @ 2026-04-03 17:08 UTC (permalink / raw)
To: arnd, ast, ihor.solodrai, daniel, andrii, martin.lau, eddyz87,
memxor, song, yonghong.song, jolsa, john.fastabend, pengdonglin
Cc: linux-kernel, linux-arch, bpf, Kaitao Cheng
From: Kaitao Cheng <chengkaitao@kylinos.cn>
Add .BTF_ids.##sfx subsections under the .BTF_ids output section. The
number of entries in each .BTF_ids.##sfx subsection is derived from
(__BTF_ids_seg_end_##sfx - sym.st_value). With this, resolve_btfids
no longer relies on BTF_SET_END(). That allows kernel code to avoid
forcing symbols with similar properties to sit strictly between
BTF_SET_START and BTF_SET_END, and it sets the stage for refactoring
the BTF_SET* and BTF_ID* macros later.
Signed-off-by: Kaitao Cheng <chengkaitao@kylinos.cn>
---
include/asm-generic/btf_ids.lds.h | 23 ++++++++
include/asm-generic/vmlinux.lds.h | 2 +
tools/bpf/resolve_btfids/main.c | 89 +++++++++++++++++++++++++++++--
3 files changed, 111 insertions(+), 3 deletions(-)
create mode 100644 include/asm-generic/btf_ids.lds.h
diff --git a/include/asm-generic/btf_ids.lds.h b/include/asm-generic/btf_ids.lds.h
new file mode 100644
index 000000000000..7579ba58f5ff
--- /dev/null
+++ b/include/asm-generic/btf_ids.lds.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2026 KylinSoft Corporation.
+ * Copyright (c) 2026 Kaitao Cheng <chengkaitao@kylinos.cn>
+ */
+#ifndef __ASM_GENERIC_BTF_IDS_LDS_H
+#define __ASM_GENERIC_BTF_IDS_LDS_H
+
+/*
+ * Linker script helpers for CONFIG_DEBUG_INFO_BTF .BTF_ids subsections.
+ * Input section .BTF_ids.##sfx must match __BTF_IDS_SUBSEC(sfx) in btf_ids.h.
+ */
+#ifdef CONFIG_DEBUG_INFO_BTF
+
+#define BTF_IDS_SUBSEG(sfx) \
+ KEEP(*(.BTF_ids.##sfx)) \
+ __BTF_ids_seg_end_##sfx = .;
+
+#define BTF_IDS_VERIFIER_SUBSEGS
+
+#endif /* CONFIG_DEBUG_INFO_BTF */
+
+#endif /* __ASM_GENERIC_BTF_IDS_LDS_H */
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 1e1580febe4b..96407498629e 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -51,6 +51,7 @@
*/
#include <asm-generic/codetag.lds.h>
+#include <asm-generic/btf_ids.lds.h>
#ifndef LOAD_OFFSET
#define LOAD_OFFSET 0
@@ -691,6 +692,7 @@
. = ALIGN(PAGE_SIZE); \
.BTF_ids : AT(ADDR(.BTF_ids) - LOAD_OFFSET) { \
*(.BTF_ids) \
+ BTF_IDS_VERIFIER_SUBSEGS \
}
#else
#define BTF
diff --git a/tools/bpf/resolve_btfids/main.c b/tools/bpf/resolve_btfids/main.c
index f8a91fa7584f..d9b0c9c25eda 100644
--- a/tools/bpf/resolve_btfids/main.c
+++ b/tools/bpf/resolve_btfids/main.c
@@ -89,6 +89,7 @@
#define BTF_FUNC "func"
#define BTF_SET "set"
#define BTF_SET8 "set8"
+#define BTF_SETSC "setsc"
#define ADDR_CNT 100
@@ -104,7 +105,8 @@ enum btf_id_kind {
BTF_ID_KIND_NONE,
BTF_ID_KIND_SYM,
BTF_ID_KIND_SET,
- BTF_ID_KIND_SET8
+ BTF_ID_KIND_SET8,
+ BTF_ID_KIND_SETSC,
};
struct btf_id {
@@ -309,6 +311,77 @@ static int get_id(const char *prefix_end, char *buf, size_t buf_sz)
return 0;
}
+static int elf_sym_value_by_name(struct object *obj, const char *sym_name,
+ GElf_Addr *val)
+{
+ Elf_Scn *scn = NULL;
+ GElf_Shdr sh;
+ int n, i;
+
+ scn = elf_getscn(obj->efile.elf, obj->efile.symbols_shndx);
+ if (!scn || gelf_getshdr(scn, &sh) != &sh)
+ return -1;
+
+ n = sh.sh_size / sh.sh_entsize;
+ for (i = 0; i < n; i++) {
+ GElf_Sym sym;
+ char *name;
+
+ if (!gelf_getsym(obj->efile.symbols, i, &sym))
+ return -1;
+ if (sym.st_shndx == SHN_UNDEF)
+ continue;
+ name = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
+ sym.st_name);
+ if (name && !strcmp(name, sym_name)) {
+ *val = sym.st_value;
+ return 0;
+ }
+ }
+ return -1;
+}
+
+/*
+ * BTF_ID_KIND_SETSC: begin is __BTF_ID__setsc__<name> (sym.st_value); end is
+ * __BTF_ids_seg_end_<name> from the linker script (btf_ids.lds.h).
+ */
+static int scatter_set_cnt_from_seg(struct object *obj, struct btf_id *id,
+ GElf_Addr begin)
+{
+ GElf_Addr end;
+ char sym_end[KSYM_NAME_LEN];
+ int cnt;
+
+ if (snprintf(sym_end, sizeof(sym_end), "__BTF_ids_seg_end_%s",
+ id->name) >= (int)sizeof(sym_end)) {
+ pr_err("FAILED scatter set symbol name overflow: %s\n", id->name);
+ return -1;
+ }
+
+ if (elf_sym_value_by_name(obj, sym_end, &end)) {
+ pr_err("FAILED scatter set %s: missing %s in %s\n",
+ id->name, sym_end, obj->path);
+ return -1;
+ }
+
+ if (end <= begin || (end - begin) % sizeof(int)) {
+ pr_err("FAILED scatter set %s: bad span (begin %#lx .. end %s %#lx)\n",
+ id->name, (unsigned long)begin, sym_end, (unsigned long)end);
+ return -1;
+ }
+
+ cnt = (int)((unsigned long)(end - begin) / sizeof(int) - 1);
+ if (cnt < 0) {
+ pr_err("FAILED scatter set %s: negative cnt\n", id->name);
+ return -1;
+ }
+ id->cnt = cnt;
+ pr_debug("scatter set %s cnt %d span %lu\n", id->name, cnt,
+ (unsigned long)(end - begin));
+
+ return 0;
+}
+
static struct btf_id *add_set(struct object *obj, char *name, enum btf_id_kind kind)
{
int len = strlen(name);
@@ -327,6 +400,9 @@ static struct btf_id *add_set(struct object *obj, char *name, enum btf_id_kind k
case BTF_ID_KIND_SET8:
prefixlen = sizeof(BTF_SET8 "__") - 1;
break;
+ case BTF_ID_KIND_SETSC:
+ prefixlen = sizeof(BTF_SETSC "__") - 1;
+ break;
default:
pr_err("Unexpected kind %d passed to %s() for symbol %s\n", kind, __func__, name);
return NULL;
@@ -549,6 +625,11 @@ static int symbols_collect(struct object *obj)
*/
if (id)
id->cnt = sym.st_size / sizeof(uint64_t) - 1;
+ /* setsc */
+ } else if (!strncmp(prefix, BTF_SETSC, sizeof(BTF_SETSC) - 1)) {
+ id = add_set(obj, prefix, BTF_ID_KIND_SETSC);
+ if (id && scatter_set_cnt_from_seg(obj, id, sym.st_value))
+ return -1;
/* set */
} else if (!strncmp(prefix, BTF_SET, sizeof(BTF_SET) - 1)) {
id = add_set(obj, prefix, BTF_ID_KIND_SET);
@@ -690,8 +771,9 @@ static int id_patch(struct object *obj, struct btf_id *id)
int *ptr = data->d_buf;
int i;
- /* For set, set8, id->id may be 0 */
- if (!id->id && id->kind != BTF_ID_KIND_SET && id->kind != BTF_ID_KIND_SET8) {
+ /* For set, set8, setsc, id->id may be 0 */
+ if (!id->id && id->kind != BTF_ID_KIND_SET &&
+ id->kind != BTF_ID_KIND_SET8 && id->kind != BTF_ID_KIND_SETSC) {
pr_err("WARN: resolve_btfids: unresolved symbol %s\n", id->name);
warnings++;
}
@@ -766,6 +848,7 @@ static int sets_patch(struct object *obj)
switch (id->kind) {
case BTF_ID_KIND_SET:
+ case BTF_ID_KIND_SETSC:
set = data->d_buf + off;
cnt = set->cnt;
qsort(set->ids, set->cnt, sizeof(set->ids[0]), cmp_id);
--
2.43.0
^ permalink raw reply related [flat|nested] 5+ messages in thread* [PATCH bpf-next 2/3] bpf: Introduce BTF_SET/ID_SUB and BPF_VERIF_KFUNC_DEF
2026-04-03 17:08 [PATCH bpf-next 0/3] bpf: Refactor how the verifier matches kfunc checks Chengkaitao
2026-04-03 17:08 ` [PATCH bpf-next 1/3] bpf: Teach resolve_btfids about the setsc type Chengkaitao
@ 2026-04-03 17:08 ` Chengkaitao
2026-04-03 17:08 ` [PATCH bpf-next 3/3] bpf: classify rbtree kfuncs with BPF_VERIF_KFUNC_DEF sets Chengkaitao
2026-04-10 2:22 ` [PATCH bpf-next 0/3] bpf: Refactor how the verifier matches kfunc checks Chengkaitao
3 siblings, 0 replies; 5+ messages in thread
From: Chengkaitao @ 2026-04-03 17:08 UTC (permalink / raw)
To: arnd, ast, ihor.solodrai, daniel, andrii, martin.lau, eddyz87,
memxor, song, yonghong.song, jolsa, john.fastabend, pengdonglin
Cc: linux-kernel, linux-arch, bpf, Kaitao Cheng
From: Kaitao Cheng <chengkaitao@kylinos.cn>
Use BTF_SET_SUB to provide the same kind of grouping as BTF_SET_START/END,
so BTF_ID_SUB no longer has to follow the strict sequence (BTF_SET_START,
BTF_ID, BTF_SET_END). That lets us scatter BTF_ID_SUB definitions across
the file and collect entries with similar properties together, which
reduces maintenance overhead.
Signed-off-by: Kaitao Cheng <chengkaitao@kylinos.cn>
---
include/linux/btf_ids.h | 92 +++++++++++++++++++++++++++++++----
tools/include/linux/btf_ids.h | 83 +++++++++++++++++++++++++++----
2 files changed, 156 insertions(+), 19 deletions(-)
diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h
index af011db39ab3..c45275c88649 100644
--- a/include/linux/btf_ids.h
+++ b/include/linux/btf_ids.h
@@ -40,9 +40,9 @@ struct btf_id_set8 {
#define BTF_IDS_SECTION ".BTF_ids"
-#define ____BTF_ID(symbol, word) \
+#define ____BTF_ID(symbol, word, sec) \
asm( \
-".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \
+".pushsection " sec ",\"a\"; \n" \
".local " #symbol " ; \n" \
".type " #symbol ", STT_OBJECT; \n" \
".size " #symbol ", 4; \n" \
@@ -52,7 +52,7 @@ word \
".popsection; \n");
#define __BTF_ID(symbol, word) \
- ____BTF_ID(symbol, word)
+ ____BTF_ID(symbol, word, BTF_IDS_SECTION)
#define __ID(prefix) \
__PASTE(__PASTE(prefix, __COUNTER__), __LINE__)
@@ -86,19 +86,19 @@ word \
* .zero 4
*
*/
-#define __BTF_ID_LIST(name, scope) \
+#define __BTF_ID_LIST(name, scope, sec) \
asm( \
-".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \
+".pushsection " sec ",\"a\"; \n" \
"." #scope " " #name "; \n" \
#name ":; \n" \
".popsection; \n");
#define BTF_ID_LIST(name) \
-__BTF_ID_LIST(name, local) \
+__BTF_ID_LIST(name, local, BTF_IDS_SECTION) \
extern u32 name[];
#define BTF_ID_LIST_GLOBAL(name, n) \
-__BTF_ID_LIST(name, globl)
+__BTF_ID_LIST(name, globl, BTF_IDS_SECTION)
/* The BTF_ID_LIST_SINGLE macro defines a BTF_ID_LIST with
* a single entry.
@@ -154,11 +154,11 @@ asm( \
".popsection; \n");
#define BTF_SET_START(name) \
-__BTF_ID_LIST(name, local) \
+__BTF_ID_LIST(name, local, BTF_IDS_SECTION) \
__BTF_SET_START(name, local)
#define BTF_SET_START_GLOBAL(name) \
-__BTF_ID_LIST(name, globl) \
+__BTF_ID_LIST(name, globl, BTF_IDS_SECTION) \
__BTF_SET_START(name, globl)
#define BTF_SET_END(name) \
@@ -168,6 +168,75 @@ asm( \
".popsection; \n"); \
extern struct btf_id_set name;
+/*
+ * BTF_SET_SUB — place a set in .BTF_ids.<name> so vmlinux.lds.h can merge
+ * multiple input sections into one output .BTF_ids in a fixed order.
+ * <name> must be a single preprocessor token (e.g. bpf_verif_kfunc_arena).
+ *
+ * Member count: begin is __BTF_ID__setsc__<name> (first in .BTF_ids.<name>);
+ * end is linker symbol __BTF_ids_seg_end_<name> (see btf_ids.lds.h). resolve_btfids
+ * uses cnt = (end - begin) / 4 - 1. <name> must not contain "__seg__".
+ *
+ * extern struct btf_id_set name is emitted by BTF_SET_SUB. BTF_ID_SUB(name, ...)
+ * must use the same <name> as the subsection token.
+ */
+#define __BTF_IDS_SUBSEC(sub) ".BTF_ids." #sub
+
+/* Indirection so __ID() expands before ____BTF_ID() stringifies its symbol arg. */
+#define __BTF_ID_SUB(sym, sec) ____BTF_ID(sym, "", sec)
+
+#define BTF_ID_SUB(sub, prefix, name) \
+ __BTF_ID_SUB(__ID(__BTF_ID__##prefix##__##name##__), __BTF_IDS_SUBSEC(sub))
+
+#define __BTF_ID_LIST_SUB(name, scope) \
+ __BTF_ID_LIST(name, scope, __BTF_IDS_SUBSEC(name))
+
+#define __BTF_SET_SUB(name, scope) \
+asm( \
+".pushsection " __BTF_IDS_SUBSEC(name) ",\"a\"; \n" \
+"." #scope " __BTF_ID__setsc__" #name "; \n" \
+"__BTF_ID__setsc__" #name ":; \n" \
+".zero 4 \n" \
+".popsection; \n");
+
+#define BTF_SET_SUB(name) \
+extern struct btf_id_set name; \
+__BTF_ID_LIST_SUB(name, local) \
+__BTF_SET_SUB(name, local)
+
+#include <linux/args.h> /* CONCATENATE, COUNT_ARGS */
+
+/* bpf_verif_kfunc_<sub> (e.g. rbtree_add → bpf_verif_kfunc_rbtree_add) */
+#define __BPF_VERIF_KFUNC_SUB(sub) bpf_verif_kfunc_##sub
+
+/* Cascade: emit first subsection, recurse on the rest (same kfunc @name). Up to 6 subs. */
+#define __BPF_VERIF_KFUNC_DEF_1(name, s1) \
+ BTF_ID_SUB(__BPF_VERIF_KFUNC_SUB(s1), func, name)
+
+#define __BPF_VERIF_KFUNC_DEF_2(name, s1, s2) \
+ BTF_ID_SUB(__BPF_VERIF_KFUNC_SUB(s1), func, name) \
+ __BPF_VERIF_KFUNC_DEF_1(name, s2)
+
+#define __BPF_VERIF_KFUNC_DEF_3(name, s1, s2, s3) \
+ BTF_ID_SUB(__BPF_VERIF_KFUNC_SUB(s1), func, name) \
+ __BPF_VERIF_KFUNC_DEF_2(name, s2, s3)
+
+#define __BPF_VERIF_KFUNC_DEF_4(name, s1, s2, s3, s4) \
+ BTF_ID_SUB(__BPF_VERIF_KFUNC_SUB(s1), func, name) \
+ __BPF_VERIF_KFUNC_DEF_3(name, s2, s3, s4)
+
+#define __BPF_VERIF_KFUNC_DEF_5(name, s1, s2, s3, s4, s5) \
+ BTF_ID_SUB(__BPF_VERIF_KFUNC_SUB(s1), func, name) \
+ __BPF_VERIF_KFUNC_DEF_4(name, s2, s3, s4, s5)
+
+#define __BPF_VERIF_KFUNC_DEF_6(name, s1, s2, s3, s4, s5, s6) \
+ BTF_ID_SUB(__BPF_VERIF_KFUNC_SUB(s1), func, name) \
+ __BPF_VERIF_KFUNC_DEF_5(name, s2, s3, s4, s5, s6)
+
+/* First arg: kfunc symbol; rest: subsection suffix tokens matching bpf_verif_kfunc_<s>. */
+#define BPF_VERIF_KFUNC_DEF(name, ...) \
+ CONCATENATE(__BPF_VERIF_KFUNC_DEF_, COUNT_ARGS(__VA_ARGS__))(name, __VA_ARGS__)
+
/*
* The BTF_SET8_START/END macros pair defines sorted list of
* BTF IDs and their flags plus its members count, with the
@@ -190,7 +259,7 @@ extern struct btf_id_set name;
*
*/
#define __BTF_SET8_START(name, scope, flags) \
-__BTF_ID_LIST(name, local) \
+__BTF_ID_LIST(name, local, BTF_IDS_SECTION) \
asm( \
".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \
"." #scope " __BTF_ID__set8__" #name "; \n" \
@@ -227,6 +296,9 @@ BTF_SET8_END(name)
#define BTF_SET_START(name) static struct btf_id_set __maybe_unused name = { 0 };
#define BTF_SET_START_GLOBAL(name) static struct btf_id_set __maybe_unused name = { 0 };
#define BTF_SET_END(name)
+#define BTF_SET_SUB(name) static struct btf_id_set __maybe_unused name = { 0 };
+#define BTF_ID_SUB(sub, prefix, name)
+#define BPF_VERIF_KFUNC_DEF(name, ...)
#define BTF_SET8_START(name) static struct btf_id_set8 __maybe_unused name = { 0 };
#define BTF_SET8_END(name)
#define BTF_KFUNCS_START(name) static struct btf_id_set8 __maybe_unused name = { .flags = BTF_SET8_KFUNCS };
diff --git a/tools/include/linux/btf_ids.h b/tools/include/linux/btf_ids.h
index 72ea363d434d..026fd5bfc6d5 100644
--- a/tools/include/linux/btf_ids.h
+++ b/tools/include/linux/btf_ids.h
@@ -35,9 +35,9 @@ struct btf_id_set8 {
#define BTF_IDS_SECTION ".BTF_ids"
-#define ____BTF_ID(symbol) \
+#define ____BTF_ID(symbol, sec) \
asm( \
-".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \
+".pushsection " sec ",\"a\"; \n" \
".local " #symbol " ; \n" \
".type " #symbol ", STT_OBJECT; \n" \
".size " #symbol ", 4; \n" \
@@ -46,7 +46,7 @@ asm( \
".popsection; \n");
#define __BTF_ID(symbol) \
- ____BTF_ID(symbol)
+ ____BTF_ID(symbol, BTF_IDS_SECTION)
#define __ID(prefix) \
__PASTE(__PASTE(prefix, __COUNTER__), __LINE__)
@@ -73,19 +73,19 @@ asm( \
* .zero 4
*
*/
-#define __BTF_ID_LIST(name, scope) \
+#define __BTF_ID_LIST(name, scope, sec) \
asm( \
-".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \
+".pushsection " sec ",\"a\"; \n" \
"." #scope " " #name "; \n" \
#name ":; \n" \
".popsection; \n");
#define BTF_ID_LIST(name) \
-__BTF_ID_LIST(name, local) \
+__BTF_ID_LIST(name, local, BTF_IDS_SECTION) \
extern u32 name[];
#define BTF_ID_LIST_GLOBAL(name, n) \
-__BTF_ID_LIST(name, globl)
+__BTF_ID_LIST(name, globl, BTF_IDS_SECTION)
/* The BTF_ID_LIST_SINGLE macro defines a BTF_ID_LIST with
* a single entry.
@@ -141,11 +141,11 @@ asm( \
".popsection; \n");
#define BTF_SET_START(name) \
-__BTF_ID_LIST(name, local) \
+__BTF_ID_LIST(name, local, BTF_IDS_SECTION) \
__BTF_SET_START(name, local)
#define BTF_SET_START_GLOBAL(name) \
-__BTF_ID_LIST(name, globl) \
+__BTF_ID_LIST(name, globl, BTF_IDS_SECTION) \
__BTF_SET_START(name, globl)
#define BTF_SET_END(name) \
@@ -155,6 +155,68 @@ asm( \
".popsection; \n"); \
extern struct btf_id_set name;
+/*
+ * See include/linux/btf_ids.h: BTF_SET_SUB uses __BTF_ids_seg_end_<name> in
+ * btf_ids.lds.h; begin is __BTF_ID__setsc__<name>. extern struct btf_id_set name
+ * is in BTF_SET_SUB.
+ */
+#define __BTF_IDS_SUBSEC(sub) ".BTF_ids." #sub
+
+/* Indirection so __ID() expands before ____BTF_ID() stringifies its symbol arg. */
+#define __BTF_ID_SUB(sym, sec) ____BTF_ID(sym, sec)
+
+#define BTF_ID_SUB(sub, prefix, name) \
+ __BTF_ID_SUB(__ID(__BTF_ID__##prefix##__##name##__), __BTF_IDS_SUBSEC(sub))
+
+#define __BTF_ID_LIST_SUB(name, scope) \
+ __BTF_ID_LIST(name, scope, __BTF_IDS_SUBSEC(name))
+
+#define __BTF_SET_SUB(name, scope) \
+asm( \
+".pushsection " __BTF_IDS_SUBSEC(name) ",\"a\"; \n" \
+"." #scope " __BTF_ID__setsc__" #name "; \n" \
+"__BTF_ID__setsc__" #name ":; \n" \
+".zero 4 \n" \
+".popsection; \n");
+
+#define BTF_SET_SUB(name) \
+extern struct btf_id_set name; \
+__BTF_ID_LIST_SUB(name, local) \
+__BTF_SET_SUB(name, local)
+
+#include <linux/args.h> /* CONCATENATE, COUNT_ARGS */
+
+/* bpf_verif_kfunc_<sub> (e.g. rbtree_add → bpf_verif_kfunc_rbtree_add) */
+#define __BPF_VERIF_KFUNC_SUB(sub) bpf_verif_kfunc_##sub
+
+/* Cascade: emit first subsection, recurse on the rest (same kfunc @name). Up to 6 subs. */
+#define __BPF_VERIF_KFUNC_DEF_1(name, s1) \
+ BTF_ID_SUB(__BPF_VERIF_KFUNC_SUB(s1), func, name)
+
+#define __BPF_VERIF_KFUNC_DEF_2(name, s1, s2) \
+ BTF_ID_SUB(__BPF_VERIF_KFUNC_SUB(s1), func, name) \
+ __BPF_VERIF_KFUNC_DEF_1(name, s2)
+
+#define __BPF_VERIF_KFUNC_DEF_3(name, s1, s2, s3) \
+ BTF_ID_SUB(__BPF_VERIF_KFUNC_SUB(s1), func, name) \
+ __BPF_VERIF_KFUNC_DEF_2(name, s2, s3)
+
+#define __BPF_VERIF_KFUNC_DEF_4(name, s1, s2, s3, s4) \
+ BTF_ID_SUB(__BPF_VERIF_KFUNC_SUB(s1), func, name) \
+ __BPF_VERIF_KFUNC_DEF_3(name, s2, s3, s4)
+
+#define __BPF_VERIF_KFUNC_DEF_5(name, s1, s2, s3, s4, s5) \
+ BTF_ID_SUB(__BPF_VERIF_KFUNC_SUB(s1), func, name) \
+ __BPF_VERIF_KFUNC_DEF_4(name, s2, s3, s4, s5)
+
+#define __BPF_VERIF_KFUNC_DEF_6(name, s1, s2, s3, s4, s5, s6) \
+ BTF_ID_SUB(__BPF_VERIF_KFUNC_SUB(s1), func, name) \
+ __BPF_VERIF_KFUNC_DEF_5(name, s2, s3, s4, s5, s6)
+
+/* First arg: kfunc symbol; rest: subsection suffix tokens matching bpf_verif_kfunc_<s>. */
+#define BPF_VERIF_KFUNC_DEF(name, ...) \
+ CONCATENATE(__BPF_VERIF_KFUNC_DEF_, COUNT_ARGS(__VA_ARGS__))(name, __VA_ARGS__)
+
#else
#define BTF_ID_LIST(name) static u32 __maybe_unused name[5];
@@ -166,6 +228,9 @@ extern struct btf_id_set name;
#define BTF_SET_START(name) static struct btf_id_set __maybe_unused name = { 0 };
#define BTF_SET_START_GLOBAL(name) static struct btf_id_set __maybe_unused name = { 0 };
#define BTF_SET_END(name)
+#define BTF_SET_SUB(name) static struct btf_id_set __maybe_unused name = { 0 };
+#define BTF_ID_SUB(sub, prefix, name)
+#define BPF_VERIF_KFUNC_DEF(name, ...)
#endif /* CONFIG_DEBUG_INFO_BTF */
--
2.43.0
^ permalink raw reply related [flat|nested] 5+ messages in thread* [PATCH bpf-next 3/3] bpf: classify rbtree kfuncs with BPF_VERIF_KFUNC_DEF sets
2026-04-03 17:08 [PATCH bpf-next 0/3] bpf: Refactor how the verifier matches kfunc checks Chengkaitao
2026-04-03 17:08 ` [PATCH bpf-next 1/3] bpf: Teach resolve_btfids about the setsc type Chengkaitao
2026-04-03 17:08 ` [PATCH bpf-next 2/3] bpf: Introduce BTF_SET/ID_SUB and BPF_VERIF_KFUNC_DEF Chengkaitao
@ 2026-04-03 17:08 ` Chengkaitao
2026-04-10 2:22 ` [PATCH bpf-next 0/3] bpf: Refactor how the verifier matches kfunc checks Chengkaitao
3 siblings, 0 replies; 5+ messages in thread
From: Chengkaitao @ 2026-04-03 17:08 UTC (permalink / raw)
To: arnd, ast, ihor.solodrai, daniel, andrii, martin.lau, eddyz87,
memxor, song, yonghong.song, jolsa, john.fastabend, pengdonglin
Cc: linux-kernel, linux-arch, bpf, Kaitao Cheng
From: Kaitao Cheng <chengkaitao@kylinos.cn>
Remove verifier logic that enumerated special_kfunc_list[KF_bpf_rbtree_*]
and compared against it. Use BPF_VERIF_KFUNC_DEF so each kfunc's
verifier-facing safety classification is declared in one place next
to the implementation.
When adding new kfuncs later, you no longer need to thread ad-hoc
allowlists through scattered verifier branches—tagging the kfunc once
with BPF_VERIF_KFUNC_DEF is enough.
This patch only migrates the rbtree kfuncs; other kfunc families can
be converted the same way in follow-up work.
Signed-off-by: Kaitao Cheng <chengkaitao@kylinos.cn>
---
include/asm-generic/btf_ids.lds.h | 5 +++-
kernel/bpf/helpers.c | 7 +++++
kernel/bpf/verifier.c | 44 +++++++++++--------------------
3 files changed, 26 insertions(+), 30 deletions(-)
diff --git a/include/asm-generic/btf_ids.lds.h b/include/asm-generic/btf_ids.lds.h
index 7579ba58f5ff..879391725d97 100644
--- a/include/asm-generic/btf_ids.lds.h
+++ b/include/asm-generic/btf_ids.lds.h
@@ -16,7 +16,10 @@
KEEP(*(.BTF_ids.##sfx)) \
__BTF_ids_seg_end_##sfx = .;
-#define BTF_IDS_VERIFIER_SUBSEGS
+#define BTF_IDS_VERIFIER_SUBSEGS \
+ BTF_IDS_SUBSEG(bpf_verif_kfunc_rbtree_add) \
+ BTF_IDS_SUBSEG(bpf_verif_kfunc_rbtree_graph_node) \
+ BTF_IDS_SUBSEG(bpf_verif_kfunc_rbtree_api)
#endif /* CONFIG_DEBUG_INFO_BTF */
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 2d8538bf4cfa..d4a8b0b98210 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -2606,6 +2606,7 @@ __bpf_kfunc struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root,
WRITE_ONCE(node_internal->owner, NULL);
return (struct bpf_rb_node *)n;
}
+BPF_VERIF_KFUNC_DEF(bpf_rbtree_remove, rbtree_api, rbtree_graph_node)
/* Need to copy rbtree_add_cached's logic here because our 'less' is a BPF
* program
@@ -2667,6 +2668,7 @@ __bpf_kfunc int bpf_rbtree_add(struct bpf_rb_root *root,
return __bpf_rbtree_add(root, n, (void *)less, meta ? meta->record : NULL, off);
}
+BPF_VERIF_KFUNC_DEF(bpf_rbtree_add, rbtree_api, rbtree_add, rbtree_graph_node)
__bpf_kfunc int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node,
bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b),
@@ -2674,6 +2676,7 @@ __bpf_kfunc int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node
{
return bpf_rbtree_add(root, node, less, meta__ign, off);
}
+BPF_VERIF_KFUNC_DEF(bpf_rbtree_add_impl, rbtree_api, rbtree_add, rbtree_graph_node)
__bpf_kfunc struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root)
{
@@ -2681,6 +2684,7 @@ __bpf_kfunc struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root)
return (struct bpf_rb_node *)rb_first_cached(r);
}
+BPF_VERIF_KFUNC_DEF(bpf_rbtree_first, rbtree_api)
__bpf_kfunc struct bpf_rb_node *bpf_rbtree_root(struct bpf_rb_root *root)
{
@@ -2688,6 +2692,7 @@ __bpf_kfunc struct bpf_rb_node *bpf_rbtree_root(struct bpf_rb_root *root)
return (struct bpf_rb_node *)r->rb_root.rb_node;
}
+BPF_VERIF_KFUNC_DEF(bpf_rbtree_root, rbtree_api)
__bpf_kfunc struct bpf_rb_node *bpf_rbtree_left(struct bpf_rb_root *root, struct bpf_rb_node *node)
{
@@ -2698,6 +2703,7 @@ __bpf_kfunc struct bpf_rb_node *bpf_rbtree_left(struct bpf_rb_root *root, struct
return (struct bpf_rb_node *)node_internal->rb_node.rb_left;
}
+BPF_VERIF_KFUNC_DEF(bpf_rbtree_left, rbtree_api, rbtree_graph_node)
__bpf_kfunc struct bpf_rb_node *bpf_rbtree_right(struct bpf_rb_root *root, struct bpf_rb_node *node)
{
@@ -2708,6 +2714,7 @@ __bpf_kfunc struct bpf_rb_node *bpf_rbtree_right(struct bpf_rb_root *root, struc
return (struct bpf_rb_node *)node_internal->rb_node.rb_right;
}
+BPF_VERIF_KFUNC_DEF(bpf_rbtree_right, rbtree_api, rbtree_graph_node)
/**
* bpf_task_acquire - Acquire a reference to a task. A task acquired by this
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 8c1cf2eb6cbb..db9ba47903ef 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -12714,6 +12714,10 @@ BTF_ID(func, bpf_session_is_return)
BTF_ID(func, bpf_stream_vprintk)
BTF_ID(func, bpf_stream_print_stack)
+BTF_SET_SUB(bpf_verif_kfunc_rbtree_add)
+BTF_SET_SUB(bpf_verif_kfunc_rbtree_graph_node)
+BTF_SET_SUB(bpf_verif_kfunc_rbtree_api)
+
static bool is_bpf_obj_new_kfunc(u32 func_id)
{
return func_id == special_kfunc_list[KF_bpf_obj_new] ||
@@ -12752,12 +12756,6 @@ static bool is_bpf_list_push_kfunc(u32 func_id)
func_id == special_kfunc_list[KF_bpf_list_push_back_impl];
}
-static bool is_bpf_rbtree_add_kfunc(u32 func_id)
-{
- return func_id == special_kfunc_list[KF_bpf_rbtree_add] ||
- func_id == special_kfunc_list[KF_bpf_rbtree_add_impl];
-}
-
static bool is_task_work_add_kfunc(u32 func_id)
{
return func_id == special_kfunc_list[KF_bpf_task_work_schedule_signal] ||
@@ -13162,16 +13160,6 @@ static bool is_bpf_list_api_kfunc(u32 btf_id)
btf_id == special_kfunc_list[KF_bpf_list_back];
}
-static bool is_bpf_rbtree_api_kfunc(u32 btf_id)
-{
- return is_bpf_rbtree_add_kfunc(btf_id) ||
- btf_id == special_kfunc_list[KF_bpf_rbtree_remove] ||
- btf_id == special_kfunc_list[KF_bpf_rbtree_first] ||
- btf_id == special_kfunc_list[KF_bpf_rbtree_root] ||
- btf_id == special_kfunc_list[KF_bpf_rbtree_left] ||
- btf_id == special_kfunc_list[KF_bpf_rbtree_right];
-}
-
static bool is_bpf_iter_num_api_kfunc(u32 btf_id)
{
return btf_id == special_kfunc_list[KF_bpf_iter_num_new] ||
@@ -13182,7 +13170,7 @@ static bool is_bpf_iter_num_api_kfunc(u32 btf_id)
static bool is_bpf_graph_api_kfunc(u32 btf_id)
{
return is_bpf_list_api_kfunc(btf_id) ||
- is_bpf_rbtree_api_kfunc(btf_id) ||
+ btf_id_set_contains(&bpf_verif_kfunc_rbtree_api, btf_id) ||
is_bpf_refcount_acquire_kfunc(btf_id);
}
@@ -13216,7 +13204,7 @@ static bool kfunc_spin_allowed(u32 btf_id)
static bool is_sync_callback_calling_kfunc(u32 btf_id)
{
- return is_bpf_rbtree_add_kfunc(btf_id);
+ return btf_id_set_contains(&bpf_verif_kfunc_rbtree_add, btf_id);
}
static bool is_async_callback_calling_kfunc(u32 btf_id)
@@ -13244,7 +13232,7 @@ static bool is_callback_calling_kfunc(u32 btf_id)
static bool is_rbtree_lock_required_kfunc(u32 btf_id)
{
- return is_bpf_rbtree_api_kfunc(btf_id);
+ return btf_id_set_contains(&bpf_verif_kfunc_rbtree_api, btf_id);
}
static bool check_kfunc_is_graph_root_api(struct bpf_verifier_env *env,
@@ -13258,7 +13246,7 @@ static bool check_kfunc_is_graph_root_api(struct bpf_verifier_env *env,
ret = is_bpf_list_api_kfunc(kfunc_btf_id);
break;
case BPF_RB_ROOT:
- ret = is_bpf_rbtree_api_kfunc(kfunc_btf_id);
+ ret = btf_id_set_contains(&bpf_verif_kfunc_rbtree_api, kfunc_btf_id);
break;
default:
verbose(env, "verifier internal error: unexpected graph root argument type %s\n",
@@ -13283,10 +13271,7 @@ static bool check_kfunc_is_graph_node_api(struct bpf_verifier_env *env,
ret = is_bpf_list_push_kfunc(kfunc_btf_id);
break;
case BPF_RB_NODE:
- ret = (is_bpf_rbtree_add_kfunc(kfunc_btf_id) ||
- kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_remove] ||
- kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_left] ||
- kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_right]);
+ ret = btf_id_set_contains(&bpf_verif_kfunc_rbtree_graph_node, kfunc_btf_id);
break;
default:
verbose(env, "verifier internal error: unexpected graph node argument type %s\n",
@@ -13823,7 +13808,7 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
return ret;
break;
case KF_ARG_PTR_TO_RB_NODE:
- if (is_bpf_rbtree_add_kfunc(meta->func_id)) {
+ if (btf_id_set_contains(&bpf_verif_kfunc_rbtree_add, meta->func_id)) {
if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) {
verbose(env, "arg#%d expected pointer to allocated object\n", i);
return -EINVAL;
@@ -14305,7 +14290,7 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
if (err < 0)
return err;
- if (is_bpf_rbtree_add_kfunc(meta.func_id)) {
+ if (btf_id_set_contains(&bpf_verif_kfunc_rbtree_add, meta.func_id)) {
err = push_callback_call(env, insn, insn_idx, meta.subprogno,
set_rbtree_add_callback_state);
if (err) {
@@ -14409,7 +14394,8 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
return err;
}
- if (is_bpf_list_push_kfunc(meta.func_id) || is_bpf_rbtree_add_kfunc(meta.func_id)) {
+ if (is_bpf_list_push_kfunc(meta.func_id) ||
+ btf_id_set_contains(&bpf_verif_kfunc_rbtree_add, meta.func_id)) {
release_ref_obj_id = regs[BPF_REG_2].ref_obj_id;
insn_aux->insert_off = regs[BPF_REG_2].var_off.value;
insn_aux->kptr_struct_meta = btf_find_struct_meta(meta.arg_btf, meta.arg_btf_id);
@@ -23438,13 +23424,13 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
insn_buf[2] = *insn;
*cnt = 3;
} else if (is_bpf_list_push_kfunc(desc->func_id) ||
- is_bpf_rbtree_add_kfunc(desc->func_id)) {
+ btf_id_set_contains(&bpf_verif_kfunc_rbtree_add, desc->func_id)) {
struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta;
int struct_meta_reg = BPF_REG_3;
int node_offset_reg = BPF_REG_4;
/* rbtree_add has extra 'less' arg, so args-to-fixup are in diff regs */
- if (is_bpf_rbtree_add_kfunc(desc->func_id)) {
+ if (btf_id_set_contains(&bpf_verif_kfunc_rbtree_add, desc->func_id)) {
struct_meta_reg = BPF_REG_4;
node_offset_reg = BPF_REG_5;
}
--
2.43.0
^ permalink raw reply related [flat|nested] 5+ messages in thread* Re: [PATCH bpf-next 0/3] bpf: Refactor how the verifier matches kfunc checks
2026-04-03 17:08 [PATCH bpf-next 0/3] bpf: Refactor how the verifier matches kfunc checks Chengkaitao
` (2 preceding siblings ...)
2026-04-03 17:08 ` [PATCH bpf-next 3/3] bpf: classify rbtree kfuncs with BPF_VERIF_KFUNC_DEF sets Chengkaitao
@ 2026-04-10 2:22 ` Chengkaitao
3 siblings, 0 replies; 5+ messages in thread
From: Chengkaitao @ 2026-04-10 2:22 UTC (permalink / raw)
To: Ihor Solodrai
Cc: linux-kernel, linux-arch, bpf, Kaitao Cheng, pengdonglin, song,
Eduard, Martin KaFai Lau, John Fastabend, Andrii Nakryiko, ast,
Daniel Borkmann, Jiri Olsa, memxor, Yonghong Song, arnd
On Wed, Apr 8, 2026 at 2:41 AM Ihor Solodrai <ihor.solodrai@linux.dev> wrote:
>
> On 4/4/26 3:38 AM, Chengkaitao wrote:
> > On Sat, Apr 4, 2026 at 12:49 PM Ihor Solodrai <ihor.solodrai@linux.dev> wrote:
> >> On 4/3/26 10:41 AM, Chengkaitao wrote:
> >>> On Tue, Mar 31, 2026 at 1:05 AM Alexei Starovoitov
> >>> <alexei.starovoitov@gmail.com> wrote:
> >>>>
> >>>> On Sun, Mar 29, 2026 at 7:05 AM Chengkaitao <pilgrimtao@gmail.com> wrote:
> >>>>>
> >>>>> From: Kaitao Cheng <chengkaitao@kylinos.cn>
> >>>>>
> >>>>> [...]
> >>>>> +
> >>>>> +/* Kfunc family related to spin_lock. */
> >>>>> +static const enum special_kfunc_type bpf_res_spin_lock_api_kfuncs[] = {
> >>>>> + KF_bpf_res_spin_lock,
> >>>>> + KF_bpf_res_spin_unlock,
> >>>>> + KF_bpf_res_spin_lock_irqsave,
> >>>>> + KF_bpf_res_spin_unlock_irqrestore,
> >>>>> +};
> >>>>
> >>>> I think it's a step in the wrong direction.
> >>>> I'd wait for Ihor's BTF_ID_NAMED cleanup.
> >>>
> >>> After reading Ihor's messages on the list, if I understand correctly,
> >>> our two approaches seem to target different problems. What Ihor's
> >>> work appears to achieve is the ability to remove the entire enum
> >>> special_kfunc_type. My goal, on the other hand, is to replace many
> >>> scattered func_id == special_kfunc_list[...] comparisons with a
> >>> table-driven approach.
> >>
> >> Hi Kaitao,
> >>
> >> I appreciate your efforts, however after a quick pass over the changes
> >> you propose (both here and in the new series) with respect to BTF_ID
> >> macros and special_kfuncs_list, I don't understand what problem you're
> >> trying to solve.
> >>
> >> The inherent complexity is in the fact that the verifier must know
> >> when a particular BTF id identifies a specific kfunc, or whether it
> >> belongs to some pre-defined set of ids. This is why
> >> special_kfuncs_list and other BTF_ID_SET/LIST-s exist.
> >>
> >> And so there is no way around defining those ids and sets *somewhere*,
> >> and so far BTF_ID_* macros did a fine job of that, all things
> >> considered.
> >>
> >> AFAICT your changes simply move around the same definitions from
> >> functions with if statements to constant arrays with a runtime search
> >> on them (which is slower by the way). What is the benefit of that vs
> >> the current implementation? We still have to maintain those arrays in
> >> the same way we have to maintain the is_foo_kfunc helpers.
> >>
> >> Your newer proposal [1] takes the same idea to the next level, by
> >> introducing an entire new BTF kind, new ELF sections and a bunch of
> >> macros that are no less complicated than existing. And all of that
> >> just moves the same arrays "upstream" to the .BTF_ids section. Again,
> >> I fail to see any benefits to that complexity. Having differentiation
> >> between LIST and SET, and having to mark START and END is not a
> >> problem that needs solving IMO.
> >
> > Your analysis of the code implementation for the new proposal is correct.
> > Let me elaborate on the purpose behind my approach.
> >
> > ****** Purpose 1 ******
> >
> > As described in this patch:
> > https://lore.kernel.org/bpf/20260303135219.33726-4-pilgrimtao@gmail.com/
> >
> > If we want to add a new kfunc, bpf_list_add_impl, we would today have
> > to add "btf_id == special_kfunc_list[KF_bpf_list_back]" (or similar)
> > five times in verifier.c. ...
>
> Kaitao,
>
> I think the maintainability pain in the current verifier-side kfunc
> handling is clear, I don't think anyone would argue that it's not a
> problem. Your bpf_list_* work is a good demonstration of that. I agree
> it can be improved.
Hi Ihor,
There is already discussion of this patch series at the link below.
Please keep follow-up replies on this thread so the conversation
stays easy to track.
https://lore.kernel.org/all/20260329140506.9595-2-pilgrimtao@gmail.com/
Adding more logic in verifier.c is not necessarily a good thing—it
is already over 26000 lines. The .BTF_ids approach does not reduce
the verifier's logical complexity, but it can cut down redundant
verifier code and hide the intricate logic from kfunc developers.
To address what you raised (for example, lowering verifier complexity
and making the verifier more modular), I need to think about whether
there is a better approach that balances these goals.
> That said, I don't think extending .BTF_ids and/or BTF is the right
> way to solve it.
>
> > ... Under the newer proposal, that is no longer
> > necessary: defining the kfunc and its verifier metadata in one place
> > is enough, for example:
> >
> > __bpf_kfunc int bpf_list_add_impl(struct bpf_list_head *head,
> > struct bpf_list_node *new,
> > struct bpf_list_node *prev,
> > void *meta__ign, u64 off)
> > {
> > /* kfunc implementation */
> > .......
> > }
> > BPF_VERIF_KFUNC_DEF(bpf_list_add_impl, list_api, graph_node_api, ... )
> >
> > If BPF_VERIF_KFUNC_DEF is extended further, BTF_ID(func, bpf_list_add_impl)
> > and BTF_ID_FLAGS(func, bpf_list_add_impl) might also become unnecessary,
> > so the snippet above could eventually be close to all the code required
> > to add a new kfunc.
>
> Defining a kfunc metadata in one place is a reasonable direction in
> principle, I agree.
>
> Where I disagree is the mechanism. The complexity does not disappear,
> it just moves into .BTF_ids, new macros and resolve_btfids. The
> primary consumer of this metadata is the verifier, and I think it is
> better to keep that metadata in verifier-local C definitions instead
> of encoding it into .BTF_ids. Adding more indirection to this will
> only hurt maintainability IMO.
>
> >
> > ****** Purpose 2 ******
> >
> > The kernel no longer needs enum special_kfunc_type to list every KF_bpf_*
> > entry. That information is folded into the .BTF_ids.##sfx section instead,
> > so kfunc authors do not have to touch or think about special_kfunc_type.
>
> This is not an argument for the .BTF_ids extension. Removing
> `special_kfunc_type` mirroring is useful, but that will be addressed
> directly by the targeted cleanup already under discussion.
>
> >
> > ****** Purpose 3 ******
> >
> > As described in this patch:
> > https://lore.kernel.org/bpf/20260303135219.33726-6-pilgrimtao@gmail.com/
> >
> > In is_bpf_list_api_kfunc(u32 btf_id) there are on the order of eleven
> > "btf_id == special_kfunc_list[*]" comparisons. As more kfuncs are added,
> > every is_bpf_* helper will only grow longer and the verifier will get
> > more repetitive. With the new design, those is_bpf_* helpers can be
> > removed entirely, including the awkward scattered "btf_id == *" checks.
>
> The repetition is real. But removing is_bpf_*() helpers is not
> automatically a simplification if the same information is now encoded
> indirectly through section layout or local const arrays.
>
> The same metadata is being maintained somewhere else, with more
> indirection than before. The complexity just moved around, and was not
> reduced.
>
> The scattered "btf_id == *" will remain as long as it is necessary for
> the verifier to do certain checks or other work depending on the
> kfunc. Whether it's an array lookup, helper call or anything else in
> the condition, the condition must be there. Refactoring can't reduce
> this complexity, without changes in the verifier control flow.
>
> >
> > ****** Purpose 4 ******
> >
> > It pushes us to untangle messy verifier safety cases and make them modular,
> > so they can be expressed as parameters to BPF_VERIF_KFUNC_DEF
>
> Again, I agree with the premise that verifier safety checks could
> become more modular where possible. But I think we should first
> separate two questions:
>
> 1. What kfunc properties should be declared centrally?
> 2. Where that declaration should live?
>
> While I'd like to answer (1) with "all of them", I am not convinced
> the answer to (2) is .BTF_ids or BTF. A better C side declarative
> representation would give us most of the benefit here without making
> the BTF tooling more complex.
>
> Here is how I think we should move forward:
>
> 1. Your bpf_list_* work is orthogonal to BTF_ID refactoring, so it's
> reasonable to first focus on landing it without changes to generic
> kfunc handling.
>
> 2. I plan to send patches (soon) for resolve_btfids, and then for
> BTF_ID macrology to eliminate the enum + array pattern. You are
> welcome to join the discussion and review / test the patches.
>
> 3. After all of the above lands, we can come back to the general
> BTF_ID / kfunc handling discussion. If you are interested in
> developing this further, I suggest to re-think the approach and come
> up with a "single kfunc metadata definition" that doesn't require
> significant changes in .BTF_ids section layout.
>
> A slightly off-topic comment: the usage of `_impl` pattern for kfuncs
> should be considered deprecated. Any new kfuncs that work with
> verifier-supplied arguments should use KF_IMPLICIT_ARGS mechanism. So
> the `bpf_list_add_impl` in your seires should only have one version:
> `bpf_list_add` marked with KF_IMPLICIT_ARGS flag.
>
--
Yours,
Chengkaitao
^ permalink raw reply [flat|nested] 5+ messages in thread