* [PATCH bpf-next v2] libbpf: Fix BTF handling in bpf_program__clone()
@ 2026-04-01 15:16 Mykyta Yatsenko
2026-04-02 20:10 ` patchwork-bot+netdevbpf
0 siblings, 1 reply; 2+ messages in thread
From: Mykyta Yatsenko @ 2026-04-01 15:16 UTC (permalink / raw)
To: bpf, ast, andrii, daniel, kafai, kernel-team; +Cc: Mykyta Yatsenko
From: Mykyta Yatsenko <yatsenko@meta.com>
Align bpf_program__clone() with bpf_object_load_prog() by gating
BTF func/line info on FEAT_BTF_FUNC kernel support, and resolve
caller-provided prog_btf_fd before checking obj->btf so that callers
with their own BTF can use clone() even when the object has no BTF
loaded.
While at it, treat func_info and line_info fields as atomic groups
to prevent mismatches between pointer and count from different sources.
Move bpf_program__clone() to libbpf 1.8.
Fixes: 970bd2dced35 ("libbpf: Introduce bpf_program__clone()")
Signed-off-by: Mykyta Yatsenko <yatsenko@meta.com>
---
v1 -> v2:
* All or nothing logic for info/line_info triples
v1: https://lore.kernel.org/all/20260331172634.57402-1-mykyta.yatsenko5@gmail.com/
---
tools/lib/bpf/libbpf.c | 59 +++++++++++++++++++++++++++++-----------
tools/lib/bpf/libbpf.map | 2 +-
2 files changed, 44 insertions(+), 17 deletions(-)
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 9ea41f40dc82..589085466903 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -9852,7 +9852,9 @@ int bpf_program__clone(struct bpf_program *prog, const struct bpf_prog_load_opts
{
LIBBPF_OPTS(bpf_prog_load_opts, attr);
struct bpf_object *obj;
- int err, fd;
+ const void *info;
+ __u32 info_cnt, info_rec_size;
+ int err, fd, prog_btf_fd;
if (!prog)
return libbpf_err(-EINVAL);
@@ -9878,19 +9880,41 @@ int bpf_program__clone(struct bpf_program *prog, const struct bpf_prog_load_opts
if (attr.token_fd)
attr.prog_flags |= BPF_F_TOKEN_FD;
- /* BTF func/line info */
- if (obj->btf && btf__fd(obj->btf) >= 0) {
- attr.prog_btf_fd = OPTS_GET(opts, prog_btf_fd, 0) ?: btf__fd(obj->btf);
- attr.func_info = OPTS_GET(opts, func_info, NULL) ?: prog->func_info;
- attr.func_info_cnt = OPTS_GET(opts, func_info_cnt, 0) ?: prog->func_info_cnt;
- attr.func_info_rec_size =
- OPTS_GET(opts, func_info_rec_size, 0) ?: prog->func_info_rec_size;
- attr.line_info = OPTS_GET(opts, line_info, NULL) ?: prog->line_info;
- attr.line_info_cnt = OPTS_GET(opts, line_info_cnt, 0) ?: prog->line_info_cnt;
- attr.line_info_rec_size =
- OPTS_GET(opts, line_info_rec_size, 0) ?: prog->line_info_rec_size;
+ prog_btf_fd = OPTS_GET(opts, prog_btf_fd, 0);
+ if (!prog_btf_fd && obj->btf)
+ prog_btf_fd = btf__fd(obj->btf);
+
+ /* BTF func/line info: only pass if kernel supports it */
+ if (kernel_supports(obj, FEAT_BTF_FUNC) && prog_btf_fd > 0) {
+ attr.prog_btf_fd = prog_btf_fd;
+
+ /* func_info/line_info triples: all-or-nothing from caller */
+ info = OPTS_GET(opts, func_info, NULL);
+ info_cnt = OPTS_GET(opts, func_info_cnt, 0);
+ info_rec_size = OPTS_GET(opts, func_info_rec_size, 0);
+ if (!!info != !!info_cnt || !!info != !!info_rec_size) {
+ pr_warn("prog '%s': func_info, func_info_cnt, and func_info_rec_size must all be specified or all omitted\n",
+ prog->name);
+ return libbpf_err(-EINVAL);
+ }
+ attr.func_info = info ?: prog->func_info;
+ attr.func_info_cnt = info ? info_cnt : prog->func_info_cnt;
+ attr.func_info_rec_size = info ? info_rec_size : prog->func_info_rec_size;
+
+ info = OPTS_GET(opts, line_info, NULL);
+ info_cnt = OPTS_GET(opts, line_info_cnt, 0);
+ info_rec_size = OPTS_GET(opts, line_info_rec_size, 0);
+ if (!!info != !!info_cnt || !!info != !!info_rec_size) {
+ pr_warn("prog '%s': line_info, line_info_cnt, and line_info_rec_size must all be specified or all omitted\n",
+ prog->name);
+ return libbpf_err(-EINVAL);
+ }
+ attr.line_info = info ?: prog->line_info;
+ attr.line_info_cnt = info ? info_cnt : prog->line_info_cnt;
+ attr.line_info_rec_size = info ? info_rec_size : prog->line_info_rec_size;
}
+ /* Logging is caller-controlled; no fallback to prog/obj log settings */
attr.log_buf = OPTS_GET(opts, log_buf, NULL);
attr.log_size = OPTS_GET(opts, log_size, 0);
attr.log_level = OPTS_GET(opts, log_level, 0);
@@ -9912,14 +9936,17 @@ int bpf_program__clone(struct bpf_program *prog, const struct bpf_prog_load_opts
/* Re-apply caller overrides for output fields */
if (OPTS_GET(opts, expected_attach_type, 0))
- attr.expected_attach_type =
- OPTS_GET(opts, expected_attach_type, 0);
+ attr.expected_attach_type = OPTS_GET(opts, expected_attach_type, 0);
if (OPTS_GET(opts, attach_btf_id, 0))
attr.attach_btf_id = OPTS_GET(opts, attach_btf_id, 0);
if (OPTS_GET(opts, attach_btf_obj_fd, 0))
- attr.attach_btf_obj_fd =
- OPTS_GET(opts, attach_btf_obj_fd, 0);
+ attr.attach_btf_obj_fd = OPTS_GET(opts, attach_btf_obj_fd, 0);
+ /*
+ * Unlike bpf_object_load_prog(), we intentionally do not call bpf_prog_bind_map()
+ * for RODATA maps here to avoid mutating the object's state. Callers can bind the
+ * required maps themselves using bpf_prog_bind_map().
+ */
fd = bpf_prog_load(prog->type, prog->name, obj->license, prog->insns, prog->insns_cnt,
&attr);
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index 346fd346666b..dfed8d60af05 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -452,12 +452,12 @@ LIBBPF_1.7.0 {
bpf_map__set_exclusive_program;
bpf_map__exclusive_program;
bpf_prog_assoc_struct_ops;
- bpf_program__clone;
bpf_program__assoc_struct_ops;
btf__permute;
} LIBBPF_1.6.0;
LIBBPF_1.8.0 {
global:
+ bpf_program__clone;
btf__new_empty_opts;
} LIBBPF_1.7.0;
--
2.52.0
^ permalink raw reply related [flat|nested] 2+ messages in thread* Re: [PATCH bpf-next v2] libbpf: Fix BTF handling in bpf_program__clone()
2026-04-01 15:16 [PATCH bpf-next v2] libbpf: Fix BTF handling in bpf_program__clone() Mykyta Yatsenko
@ 2026-04-02 20:10 ` patchwork-bot+netdevbpf
0 siblings, 0 replies; 2+ messages in thread
From: patchwork-bot+netdevbpf @ 2026-04-02 20:10 UTC (permalink / raw)
To: Mykyta Yatsenko; +Cc: bpf, ast, andrii, daniel, kafai, kernel-team, yatsenko
Hello:
This patch was applied to bpf/bpf-next.git (master)
by Andrii Nakryiko <andrii@kernel.org>:
On Wed, 1 Apr 2026 16:16:40 +0100 you wrote:
> From: Mykyta Yatsenko <yatsenko@meta.com>
>
> Align bpf_program__clone() with bpf_object_load_prog() by gating
> BTF func/line info on FEAT_BTF_FUNC kernel support, and resolve
> caller-provided prog_btf_fd before checking obj->btf so that callers
> with their own BTF can use clone() even when the object has no BTF
> loaded.
>
> [...]
Here is the summary with links:
- [bpf-next,v2] libbpf: Fix BTF handling in bpf_program__clone()
https://git.kernel.org/bpf/bpf-next/c/1cc96e0e2048
You are awesome, thank you!
--
Deet-doot-dot, I am a bot.
https://korg.docs.kernel.org/patchwork/pwbot.html
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2026-04-02 20:10 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-04-01 15:16 [PATCH bpf-next v2] libbpf: Fix BTF handling in bpf_program__clone() Mykyta Yatsenko
2026-04-02 20:10 ` patchwork-bot+netdevbpf
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox