* [PATCH bpf-next v2 1/6] bpf: Add fsession to verbose log in check_get_func_ip()
2026-03-02 15:03 [PATCH bpf-next v2 0/6] bpf: Enhance __bpf_prog_map_compatible() Leon Hwang
@ 2026-03-02 15:03 ` Leon Hwang
2026-03-02 15:03 ` [PATCH bpf-next v2 2/6] bpf: Factor out bpf_map_owner_[init,matches]() helpers Leon Hwang
` (4 subsequent siblings)
5 siblings, 0 replies; 11+ messages in thread
From: Leon Hwang @ 2026-03-02 15:03 UTC (permalink / raw)
To: bpf
Cc: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
Martin KaFai Lau, Eduard Zingerman, Song Liu, Yonghong Song,
John Fastabend, KP Singh, Stanislav Fomichev, Hao Luo, Jiri Olsa,
Shuah Khan, Feng Yang, Leon Hwang, Menglong Dong, Puranjay Mohan,
Björn Töpel, Pu Lehui, linux-kernel, linux-kselftest,
netdev, kernel-patches-bot
Since bpf_get_func_ip() is supported for fsession, add fsession to the
verbose log message in check_get_func_ip().
No functional change intended.
Signed-off-by: Leon Hwang <leon.hwang@linux.dev>
---
kernel/bpf/verifier.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index fc4ccd1de569..636836a315b7 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -11493,7 +11493,7 @@ static int check_get_func_ip(struct bpf_verifier_env *env)
if (type == BPF_PROG_TYPE_TRACING) {
if (!bpf_prog_has_trampoline(env->prog)) {
- verbose(env, "func %s#%d supported only for fentry/fexit/fmod_ret programs\n",
+ verbose(env, "func %s#%d supported only for fentry/fexit/fsession/fmod_ret programs\n",
func_id_name(func_id), func_id);
return -ENOTSUPP;
}
--
2.52.0
^ permalink raw reply related [flat|nested] 11+ messages in thread* [PATCH bpf-next v2 2/6] bpf: Factor out bpf_map_owner_[init,matches]() helpers
2026-03-02 15:03 [PATCH bpf-next v2 0/6] bpf: Enhance __bpf_prog_map_compatible() Leon Hwang
2026-03-02 15:03 ` [PATCH bpf-next v2 1/6] bpf: Add fsession to verbose log in check_get_func_ip() Leon Hwang
@ 2026-03-02 15:03 ` Leon Hwang
2026-03-02 15:03 ` [PATCH bpf-next v2 3/6] bpf: Disallow !kprobe_write_ctx progs tail-calling kprobe_write_ctx progs Leon Hwang
` (3 subsequent siblings)
5 siblings, 0 replies; 11+ messages in thread
From: Leon Hwang @ 2026-03-02 15:03 UTC (permalink / raw)
To: bpf
Cc: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
Martin KaFai Lau, Eduard Zingerman, Song Liu, Yonghong Song,
John Fastabend, KP Singh, Stanislav Fomichev, Hao Luo, Jiri Olsa,
Shuah Khan, Feng Yang, Leon Hwang, Menglong Dong, Puranjay Mohan,
Björn Töpel, Pu Lehui, linux-kernel, linux-kselftest,
netdev, kernel-patches-bot
When adding more attributes to validate in __bpf_prog_map_compatible(),
both the if and else code blocks become harder to read.
To improve readability, factor out bpf_map_owner_init() and
bpf_map_owner_matches() helpers.
No functional changes intended.
Signed-off-by: Leon Hwang <leon.hwang@linux.dev>
---
kernel/bpf/core.c | 100 ++++++++++++++++++++++++++--------------------
1 file changed, 57 insertions(+), 43 deletions(-)
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 229c74f3d6ae..b24a613d99f2 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -2380,14 +2380,66 @@ static unsigned int __bpf_prog_ret0_warn(const void *ctx,
return 0;
}
+static void bpf_map_owner_init(struct bpf_map_owner *owner, const struct bpf_prog *fp,
+ enum bpf_prog_type prog_type)
+{
+ struct bpf_prog_aux *aux = fp->aux;
+ enum bpf_cgroup_storage_type i;
+
+ owner->type = prog_type;
+ owner->jited = fp->jited;
+ owner->xdp_has_frags = aux->xdp_has_frags;
+ owner->sleepable = fp->sleepable;
+ owner->expected_attach_type = fp->expected_attach_type;
+ owner->attach_func_proto = aux->attach_func_proto;
+ for_each_cgroup_storage_type(i)
+ owner->storage_cookie[i] = aux->cgroup_storage[i] ?
+ aux->cgroup_storage[i]->cookie : 0;
+}
+
+static bool bpf_map_owner_matches(const struct bpf_map *map, const struct bpf_prog *fp,
+ enum bpf_prog_type prog_type)
+{
+ struct bpf_map_owner *owner = map->owner;
+ struct bpf_prog_aux *aux = fp->aux;
+ enum bpf_cgroup_storage_type i;
+ u64 cookie;
+
+ if (owner->type != prog_type ||
+ owner->jited != fp->jited ||
+ owner->xdp_has_frags != aux->xdp_has_frags ||
+ owner->sleepable != fp->sleepable)
+ return false;
+
+ if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY &&
+ owner->expected_attach_type != fp->expected_attach_type)
+ return false;
+
+ for_each_cgroup_storage_type(i) {
+ cookie = aux->cgroup_storage[i] ? aux->cgroup_storage[i]->cookie : 0;
+ if (cookie && cookie != owner->storage_cookie[i])
+ return false;
+ }
+
+ if (owner->attach_func_proto != aux->attach_func_proto) {
+ switch (prog_type) {
+ case BPF_PROG_TYPE_TRACING:
+ case BPF_PROG_TYPE_LSM:
+ case BPF_PROG_TYPE_EXT:
+ case BPF_PROG_TYPE_STRUCT_OPS:
+ return false;
+ default:
+ break;
+ }
+ }
+ return true;
+}
+
static bool __bpf_prog_map_compatible(struct bpf_map *map,
const struct bpf_prog *fp)
{
enum bpf_prog_type prog_type = resolve_prog_type(fp);
- struct bpf_prog_aux *aux = fp->aux;
- enum bpf_cgroup_storage_type i;
bool ret = false;
- u64 cookie;
if (fp->kprobe_override)
return ret;
@@ -2398,48 +2450,10 @@ static bool __bpf_prog_map_compatible(struct bpf_map *map,
map->owner = bpf_map_owner_alloc(map);
if (!map->owner)
goto err;
- map->owner->type = prog_type;
- map->owner->jited = fp->jited;
- map->owner->xdp_has_frags = aux->xdp_has_frags;
- map->owner->sleepable = fp->sleepable;
- map->owner->expected_attach_type = fp->expected_attach_type;
- map->owner->attach_func_proto = aux->attach_func_proto;
- for_each_cgroup_storage_type(i) {
- map->owner->storage_cookie[i] =
- aux->cgroup_storage[i] ?
- aux->cgroup_storage[i]->cookie : 0;
- }
+ bpf_map_owner_init(map->owner, fp, prog_type);
ret = true;
} else {
- ret = map->owner->type == prog_type &&
- map->owner->jited == fp->jited &&
- map->owner->xdp_has_frags == aux->xdp_has_frags &&
- map->owner->sleepable == fp->sleepable;
- if (ret &&
- map->map_type == BPF_MAP_TYPE_PROG_ARRAY &&
- map->owner->expected_attach_type != fp->expected_attach_type)
- ret = false;
- for_each_cgroup_storage_type(i) {
- if (!ret)
- break;
- cookie = aux->cgroup_storage[i] ?
- aux->cgroup_storage[i]->cookie : 0;
- ret = map->owner->storage_cookie[i] == cookie ||
- !cookie;
- }
- if (ret &&
- map->owner->attach_func_proto != aux->attach_func_proto) {
- switch (prog_type) {
- case BPF_PROG_TYPE_TRACING:
- case BPF_PROG_TYPE_LSM:
- case BPF_PROG_TYPE_EXT:
- case BPF_PROG_TYPE_STRUCT_OPS:
- ret = false;
- break;
- default:
- break;
- }
- }
+ ret = bpf_map_owner_matches(map, fp, prog_type);
}
err:
spin_unlock(&map->owner_lock);
--
2.52.0
^ permalink raw reply related [flat|nested] 11+ messages in thread* [PATCH bpf-next v2 3/6] bpf: Disallow !kprobe_write_ctx progs tail-calling kprobe_write_ctx progs
2026-03-02 15:03 [PATCH bpf-next v2 0/6] bpf: Enhance __bpf_prog_map_compatible() Leon Hwang
2026-03-02 15:03 ` [PATCH bpf-next v2 1/6] bpf: Add fsession to verbose log in check_get_func_ip() Leon Hwang
2026-03-02 15:03 ` [PATCH bpf-next v2 2/6] bpf: Factor out bpf_map_owner_[init,matches]() helpers Leon Hwang
@ 2026-03-02 15:03 ` Leon Hwang
2026-03-02 15:53 ` bot+bpf-ci
2026-03-02 15:03 ` [PATCH bpf-next v2 4/6] bpf: Disallow !call_get_func_ip progs tail-calling call_get_func_ip progs Leon Hwang
` (2 subsequent siblings)
5 siblings, 1 reply; 11+ messages in thread
From: Leon Hwang @ 2026-03-02 15:03 UTC (permalink / raw)
To: bpf
Cc: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
Martin KaFai Lau, Eduard Zingerman, Song Liu, Yonghong Song,
John Fastabend, KP Singh, Stanislav Fomichev, Hao Luo, Jiri Olsa,
Shuah Khan, Feng Yang, Leon Hwang, Menglong Dong, Puranjay Mohan,
Björn Töpel, Pu Lehui, linux-kernel, linux-kselftest,
netdev, kernel-patches-bot
Uprobe programs that modify regs require different runtime assumptions
than those that do not. Mixing !kprobe_write_ctx progs with
kprobe_write_ctx progs via tail calls could break these assumptions.
To address this, reject the combination of !kprobe_write_ctx progs with
kprobe_write_ctx progs in bpf_map_owner_matches(), which prevents the
tail callee from modifying regs unexpectedly.
Also reject kprobe_write_ctx mismatches during initialization to
prevent bypassing the above restriction.
Without this check, the above restriction can be bypassed as follows.
struct {
__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
__uint(max_entries, 1);
__uint(key_size, 4);
__uint(value_size, 4);
} jmp_table SEC(".maps");
SEC("?kprobe")
int prog_a(struct pt_regs *regs)
{
regs->ax = 0;
bpf_tail_call_static(regs, &jmp_table, 0);
return 0;
}
SEC("?kprobe")
int prog_b(struct pt_regs *regs)
{
bpf_tail_call_static(regs, &jmp_table, 0);
return 0;
}
The jmp_table is shared between prog_a and prog_b.
* Load prog_a.
At this point, owner->kprobe_write_ctx=true.
* Load prog_b.
At this point, prog_b passes the compatibility check.
* Add prog_a to jmp_table.
* Attach prog_b to a kernel function.
When the kernel function runs, prog_a will unexpectedly modify regs.
Fixes: 7384893d970e ("bpf: Allow uprobe program to change context registers")
Signed-off-by: Leon Hwang <leon.hwang@linux.dev>
---
include/linux/bpf.h | 7 ++++---
kernel/bpf/core.c | 30 +++++++++++++++++++++++++-----
2 files changed, 29 insertions(+), 8 deletions(-)
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 05b34a6355b0..dbafed52b2ba 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -285,9 +285,10 @@ struct bpf_list_node_kern {
*/
struct bpf_map_owner {
enum bpf_prog_type type;
- bool jited;
- bool xdp_has_frags;
- bool sleepable;
+ u32 jited:1;
+ u32 xdp_has_frags:1;
+ u32 sleepable:1;
+ u32 kprobe_write_ctx:1;
u64 storage_cookie[MAX_BPF_CGROUP_STORAGE_TYPE];
const struct btf_type *attach_func_proto;
enum bpf_attach_type expected_attach_type;
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index b24a613d99f2..121a697d4da5 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -2390,6 +2390,7 @@ static void bpf_map_owner_init(struct bpf_map_owner *owner, const struct bpf_pro
owner->jited = fp->jited;
owner->xdp_has_frags = aux->xdp_has_frags;
owner->sleepable = fp->sleepable;
+ owner->kprobe_write_ctx = aux->kprobe_write_ctx;
owner->expected_attach_type = fp->expected_attach_type;
owner->attach_func_proto = aux->attach_func_proto;
for_each_cgroup_storage_type(i)
@@ -2397,8 +2398,14 @@ static void bpf_map_owner_init(struct bpf_map_owner *owner, const struct bpf_pro
aux->cgroup_storage[i]->cookie : 0;
}
+enum bpf_map_owner_match_type {
+ BPF_MAP_OWNER_MATCH_FOR_INIT,
+ BPF_MAP_OWNER_MATCH_FOR_UPDATE,
+};
+
static bool bpf_map_owner_matches(const struct bpf_map *map, const struct bpf_prog *fp,
- enum bpf_prog_type prog_type)
+ enum bpf_prog_type prog_type,
+ enum bpf_map_owner_match_type match)
{
struct bpf_map_owner *owner = map->owner;
struct bpf_prog_aux *aux = fp->aux;
@@ -2411,6 +2418,18 @@ static bool bpf_map_owner_matches(const struct bpf_map *map, const struct bpf_pr
owner->sleepable != fp->sleepable)
return false;
+ switch (match) {
+ case BPF_MAP_OWNER_MATCH_FOR_INIT:
+ if (owner->kprobe_write_ctx != aux->kprobe_write_ctx)
+ return false;
+ break;
+
+ case BPF_MAP_OWNER_MATCH_FOR_UPDATE:
+ if (!owner->kprobe_write_ctx && aux->kprobe_write_ctx)
+ return false;
+ break;
+ }
+
if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY &&
owner->expected_attach_type != fp->expected_attach_type)
return false;
@@ -2436,7 +2455,8 @@ static bool bpf_map_owner_matches(const struct bpf_map *map, const struct bpf_pr
}
static bool __bpf_prog_map_compatible(struct bpf_map *map,
- const struct bpf_prog *fp)
+ const struct bpf_prog *fp,
+ enum bpf_map_owner_match_type match)
{
enum bpf_prog_type prog_type = resolve_prog_type(fp);
bool ret = false;
@@ -2453,7 +2473,7 @@ static bool __bpf_prog_map_compatible(struct bpf_map *map,
bpf_map_owner_init(map->owner, fp, prog_type);
ret = true;
} else {
- ret = bpf_map_owner_matches(map, fp, prog_type);
+ ret = bpf_map_owner_matches(map, fp, prog_type, match);
}
err:
spin_unlock(&map->owner_lock);
@@ -2470,7 +2490,7 @@ bool bpf_prog_map_compatible(struct bpf_map *map, const struct bpf_prog *fp)
if (bpf_prog_is_dev_bound(fp->aux))
return false;
- return __bpf_prog_map_compatible(map, fp);
+ return __bpf_prog_map_compatible(map, fp, BPF_MAP_OWNER_MATCH_FOR_UPDATE);
}
static int bpf_check_tail_call(const struct bpf_prog *fp)
@@ -2485,7 +2505,7 @@ static int bpf_check_tail_call(const struct bpf_prog *fp)
if (!map_type_contains_progs(map))
continue;
- if (!__bpf_prog_map_compatible(map, fp)) {
+ if (!__bpf_prog_map_compatible(map, fp, BPF_MAP_OWNER_MATCH_FOR_INIT)) {
ret = -EINVAL;
goto out;
}
--
2.52.0
^ permalink raw reply related [flat|nested] 11+ messages in thread* Re: [PATCH bpf-next v2 3/6] bpf: Disallow !kprobe_write_ctx progs tail-calling kprobe_write_ctx progs
2026-03-02 15:03 ` [PATCH bpf-next v2 3/6] bpf: Disallow !kprobe_write_ctx progs tail-calling kprobe_write_ctx progs Leon Hwang
@ 2026-03-02 15:53 ` bot+bpf-ci
2026-03-03 1:44 ` Leon Hwang
0 siblings, 1 reply; 11+ messages in thread
From: bot+bpf-ci @ 2026-03-02 15:53 UTC (permalink / raw)
To: leon.hwang, bpf
Cc: ast, daniel, andrii, martin.lau, eddyz87, song, yonghong.song,
john.fastabend, kpsingh, sdf, haoluo, jolsa, shuah, yangfeng,
leon.hwang, menglong8.dong, puranjay, bjorn, pulehui,
linux-kernel, linux-kselftest, netdev, kernel-patches-bot, ast,
andrii, daniel, martin.lau, eddyz87, yonghong.song, clm,
ihor.solodrai
[-- Attachment #1: Type: text/plain, Size: 2076 bytes --]
> diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
> --- a/kernel/bpf/core.c
> +++ b/kernel/bpf/core.c
[ ... ]
> +enum bpf_map_owner_match_type {
> + BPF_MAP_OWNER_MATCH_FOR_INIT,
> + BPF_MAP_OWNER_MATCH_FOR_UPDATE,
> +};
> +
> static bool bpf_map_owner_matches(const struct bpf_map *map, const struct bpf_prog *fp,
> - enum bpf_prog_type prog_type)
> + enum bpf_prog_type prog_type,
> + enum bpf_map_owner_match_type match)
> {
[ ... ]
> + switch (match) {
> + case BPF_MAP_OWNER_MATCH_FOR_INIT:
> + if (owner->kprobe_write_ctx != aux->kprobe_write_ctx)
> + return false;
> + break;
> +
> + case BPF_MAP_OWNER_MATCH_FOR_UPDATE:
> + if (!owner->kprobe_write_ctx && aux->kprobe_write_ctx)
> + return false;
> + break;
> + }
In the v1 review, Alexei Starovoitov asked for strict conformance
both ways rather than one-directional validation:
"Don't be fancy. Require strict conformance both ways
in *all* patches."
https://lore.kernel.org/bpf/CAADnVQ+-V0-1i8_+CYpK7G0CnV-2n8e9Szv43yM3Az197eL_0A@mail.gmail.com/
The BPF_MAP_OWNER_MATCH_FOR_UPDATE case still uses one-directional
matching, rejecting only !owner->kprobe_write_ctx && aux->kprobe_write_ctx
but allowing the reverse. The v2 changelog lists other Alexei comments
as addressed but does not mention this one.
Was this an intentional design choice, and if so, should the changelog
note the disagreement?
---
AI reviewed your patch. Please fix the bug or email reply why it's not a bug.
See: https://github.com/kernel-patches/vmtest/blob/master/ci/claude/README.md
CI run summary: https://github.com/kernel-patches/bpf/actions/runs/22582690876
AI-authorship-score: medium
AI-authorship-explanation: Confirmed LLM involvement in v1 development process (selftests called out by reviewer), but core code changes and v2 revisions appear human-authored with genuine design engagement.
issues-found: 1
issue-severity-score: low
issue-severity-explanation: Unaddressed maintainer review comment requesting symmetric matching in the UPDATE path; not a code bug or security concern.
^ permalink raw reply [flat|nested] 11+ messages in thread* Re: [PATCH bpf-next v2 3/6] bpf: Disallow !kprobe_write_ctx progs tail-calling kprobe_write_ctx progs
2026-03-02 15:53 ` bot+bpf-ci
@ 2026-03-03 1:44 ` Leon Hwang
0 siblings, 0 replies; 11+ messages in thread
From: Leon Hwang @ 2026-03-03 1:44 UTC (permalink / raw)
To: bot+bpf-ci, bpf
Cc: ast, daniel, andrii, martin.lau, eddyz87, song, yonghong.song,
john.fastabend, kpsingh, sdf, haoluo, jolsa, shuah, yangfeng,
menglong8.dong, puranjay, bjorn, pulehui, linux-kernel,
linux-kselftest, netdev, kernel-patches-bot, martin.lau, clm,
ihor.solodrai
On 2/3/26 23:53, bot+bpf-ci@kernel.org wrote:
>> diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
>> --- a/kernel/bpf/core.c
>> +++ b/kernel/bpf/core.c
>
> [ ... ]
>
>> +enum bpf_map_owner_match_type {
>> + BPF_MAP_OWNER_MATCH_FOR_INIT,
>> + BPF_MAP_OWNER_MATCH_FOR_UPDATE,
>> +};
>> +
>> static bool bpf_map_owner_matches(const struct bpf_map *map, const struct bpf_prog *fp,
>> - enum bpf_prog_type prog_type)
>> + enum bpf_prog_type prog_type,
>> + enum bpf_map_owner_match_type match)
>> {
>
> [ ... ]
>
>> + switch (match) {
>> + case BPF_MAP_OWNER_MATCH_FOR_INIT:
>> + if (owner->kprobe_write_ctx != aux->kprobe_write_ctx)
>> + return false;
>> + break;
>> +
>> + case BPF_MAP_OWNER_MATCH_FOR_UPDATE:
>> + if (!owner->kprobe_write_ctx && aux->kprobe_write_ctx)
>> + return false;
>> + break;
>> + }
>
> In the v1 review, Alexei Starovoitov asked for strict conformance
> both ways rather than one-directional validation:
>
> "Don't be fancy. Require strict conformance both ways
> in *all* patches."
>
> https://lore.kernel.org/bpf/CAADnVQ+-V0-1i8_+CYpK7G0CnV-2n8e9Szv43yM3Az197eL_0A@mail.gmail.com/
>
> The BPF_MAP_OWNER_MATCH_FOR_UPDATE case still uses one-directional
> matching, rejecting only !owner->kprobe_write_ctx && aux->kprobe_write_ctx
> but allowing the reverse. The v2 changelog lists other Alexei comments
> as addressed but does not mention this one.
>
> Was this an intentional design choice, and if so, should the changelog
> note the disagreement?
>
This was intentional design choice.
My explanation in v1 should address his concern.
As his disagreement was not part of the patch series, should I note it
in changelog?
Thanks,
Leon
>
> ---
> AI reviewed your patch. Please fix the bug or email reply why it's not a bug.
> See: https://github.com/kernel-patches/vmtest/blob/master/ci/claude/README.md
>
> CI run summary: https://github.com/kernel-patches/bpf/actions/runs/22582690876
>
> AI-authorship-score: medium
> AI-authorship-explanation: Confirmed LLM involvement in v1 development process (selftests called out by reviewer), but core code changes and v2 revisions appear human-authored with genuine design engagement.
> issues-found: 1
> issue-severity-score: low
> issue-severity-explanation: Unaddressed maintainer review comment requesting symmetric matching in the UPDATE path; not a code bug or security concern.
^ permalink raw reply [flat|nested] 11+ messages in thread
* [PATCH bpf-next v2 4/6] bpf: Disallow !call_get_func_ip progs tail-calling call_get_func_ip progs
2026-03-02 15:03 [PATCH bpf-next v2 0/6] bpf: Enhance __bpf_prog_map_compatible() Leon Hwang
` (2 preceding siblings ...)
2026-03-02 15:03 ` [PATCH bpf-next v2 3/6] bpf: Disallow !kprobe_write_ctx progs tail-calling kprobe_write_ctx progs Leon Hwang
@ 2026-03-02 15:03 ` Leon Hwang
2026-03-02 15:53 ` bot+bpf-ci
2026-03-02 15:03 ` [PATCH bpf-next v2 5/6] bpf: Disallow !call_session_cookie progs tail-calling call_session_cookie progs Leon Hwang
2026-03-02 15:03 ` [PATCH bpf-next v2 6/6] selftests/bpf: Add tests to verify prog_array map compatibility Leon Hwang
5 siblings, 1 reply; 11+ messages in thread
From: Leon Hwang @ 2026-03-02 15:03 UTC (permalink / raw)
To: bpf
Cc: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
Martin KaFai Lau, Eduard Zingerman, Song Liu, Yonghong Song,
John Fastabend, KP Singh, Stanislav Fomichev, Hao Luo, Jiri Olsa,
Shuah Khan, Feng Yang, Leon Hwang, Menglong Dong, Puranjay Mohan,
Björn Töpel, Pu Lehui, linux-kernel, linux-kselftest,
netdev, kernel-patches-bot
Trampoline-based tracing programs that call bpf_get_func_ip() rely on
the func IP stored on the stack. Mixing !call_get_func_ip progs with
call_get_func_ip progs via tail calls could break this assumption.
To address this, reject the combination of !call_get_func_ip progs with
call_get_func_ip progs in bpf_map_owner_matches(), which prevents the
tail callee from getting a bogus func IP.
Also reject call_get_func_ip mismatches during initialization to
prevent bypassing the above restriction.
Without this check, the above restriction can be bypassed as follows.
struct {
__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
__uint(max_entries, 1);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(__u32));
} jmp_table SEC(".maps");
SEC("?fentry")
int BPF_PROG(prog_a)
{
bpf_printk("FUNC IP: 0x%llx\n", bpf_get_func_ip());
bpf_tail_call_static(ctx, &jmp_table, 0);
return 0;
}
SEC("?fentry")
int BPF_PROG(prog_b)
{
bpf_tail_call_static(ctx, &jmp_table, 0);
return 0;
}
The jmp_table is shared between prog_a and prog_b.
* Load prog_a first.
At this point, owner->call_get_func_ip=true.
* Load prog_b next.
At this point, prog_b passes the compatibility check.
* Add prog_a to jmp_table.
* Attach prog_b to a kernel function.
When the kernel function runs, prog_a will get a bogus func IP because
no func IP is prepared on the trampoline stack.
Fixes: 1e37392cccde ("bpf: Enable BPF_TRAMP_F_IP_ARG for trampolines with call_get_func_ip")
Signed-off-by: Leon Hwang <leon.hwang@linux.dev>
---
include/linux/bpf.h | 1 +
kernel/bpf/core.c | 7 +++++++
2 files changed, 8 insertions(+)
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index dbafed52b2ba..fb978650b169 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -289,6 +289,7 @@ struct bpf_map_owner {
u32 xdp_has_frags:1;
u32 sleepable:1;
u32 kprobe_write_ctx:1;
+ u32 call_get_func_ip:1;
u64 storage_cookie[MAX_BPF_CGROUP_STORAGE_TYPE];
const struct btf_type *attach_func_proto;
enum bpf_attach_type expected_attach_type;
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 121a697d4da5..1b88878fe5c5 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -2391,6 +2391,7 @@ static void bpf_map_owner_init(struct bpf_map_owner *owner, const struct bpf_pro
owner->xdp_has_frags = aux->xdp_has_frags;
owner->sleepable = fp->sleepable;
owner->kprobe_write_ctx = aux->kprobe_write_ctx;
+ owner->call_get_func_ip = fp->call_get_func_ip;
owner->expected_attach_type = fp->expected_attach_type;
owner->attach_func_proto = aux->attach_func_proto;
for_each_cgroup_storage_type(i)
@@ -2422,11 +2423,17 @@ static bool bpf_map_owner_matches(const struct bpf_map *map, const struct bpf_pr
case BPF_MAP_OWNER_MATCH_FOR_INIT:
if (owner->kprobe_write_ctx != aux->kprobe_write_ctx)
return false;
+ if (owner->call_get_func_ip != fp->call_get_func_ip)
+ return false;
break;
case BPF_MAP_OWNER_MATCH_FOR_UPDATE:
if (!owner->kprobe_write_ctx && aux->kprobe_write_ctx)
return false;
+ if (bpf_prog_has_trampoline(fp)) {
+ if (!owner->call_get_func_ip && fp->call_get_func_ip)
+ return false;
+ }
break;
}
--
2.52.0
^ permalink raw reply related [flat|nested] 11+ messages in thread* Re: [PATCH bpf-next v2 4/6] bpf: Disallow !call_get_func_ip progs tail-calling call_get_func_ip progs
2026-03-02 15:03 ` [PATCH bpf-next v2 4/6] bpf: Disallow !call_get_func_ip progs tail-calling call_get_func_ip progs Leon Hwang
@ 2026-03-02 15:53 ` bot+bpf-ci
2026-03-03 1:47 ` Leon Hwang
0 siblings, 1 reply; 11+ messages in thread
From: bot+bpf-ci @ 2026-03-02 15:53 UTC (permalink / raw)
To: leon.hwang, bpf
Cc: ast, daniel, andrii, martin.lau, eddyz87, song, yonghong.song,
john.fastabend, kpsingh, sdf, haoluo, jolsa, shuah, yangfeng,
leon.hwang, menglong8.dong, puranjay, bjorn, pulehui,
linux-kernel, linux-kselftest, netdev, kernel-patches-bot, ast,
andrii, daniel, martin.lau, eddyz87, yonghong.song, clm,
ihor.solodrai
[-- Attachment #1: Type: text/plain, Size: 2801 bytes --]
> diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
> --- a/kernel/bpf/core.c
> +++ b/kernel/bpf/core.c
[ ... ]
> @@ -2422,11 +2423,17 @@ static bool bpf_map_owner_matches(const struct bpf_map *map, const struct bpf_prog *fp,
> case BPF_MAP_OWNER_MATCH_FOR_INIT:
> if (owner->kprobe_write_ctx != aux->kprobe_write_ctx)
> return false;
> + if (owner->call_get_func_ip != fp->call_get_func_ip)
> + return false;
> break;
>
> case BPF_MAP_OWNER_MATCH_FOR_UPDATE:
> if (!owner->kprobe_write_ctx && aux->kprobe_write_ctx)
> return false;
> + if (bpf_prog_has_trampoline(fp)) {
> + if (!owner->call_get_func_ip && fp->call_get_func_ip)
> + return false;
> + }
> break;
The FOR_UPDATE path correctly guards the call_get_func_ip check with
bpf_prog_has_trampoline(fp), since only trampoline-based programs
rely on the func IP stored on the trampoline stack. Kprobe programs
get func_ip from kprobe_running()->addr via bpf_get_func_ip_kprobe(),
which is independent of the trampoline.
Should the FOR_INIT path have the same bpf_prog_has_trampoline()
guard? Without it, two kprobe programs that share a prog_array will
be rejected if one calls bpf_get_func_ip() and the other does not,
even though mixing is safe for kprobes.
bpf_check_tail_call() calls this with BPF_MAP_OWNER_MATCH_FOR_INIT:
kprobe prog_a (call_get_func_ip=1) loads, references jmp_table
-> owner->call_get_func_ip = 1
kprobe prog_b (call_get_func_ip=0) loads, references same jmp_table
-> owner->call_get_func_ip != fp->call_get_func_ip
-> returns false, prog_b fails to load
Something like this would match the FOR_UPDATE scoping:
case BPF_MAP_OWNER_MATCH_FOR_INIT:
if (owner->kprobe_write_ctx != aux->kprobe_write_ctx)
return false;
if (bpf_prog_has_trampoline(fp)) {
if (owner->call_get_func_ip != fp->call_get_func_ip)
return false;
}
break;
---
AI reviewed your patch. Please fix the bug or email reply why it's not a bug.
See: https://github.com/kernel-patches/vmtest/blob/master/ci/claude/README.md
CI run summary: https://github.com/kernel-patches/bpf/actions/runs/22582690876
AI-authorship-score: medium
AI-authorship-explanation: The v1 selftests carried Assisted-by Codex tags, and the kernel code follows a mechanical copy-paste pattern across commits; the v2 selftests were rewritten but the overall series structure suggests AI-assisted development.
issues-found: 1
issue-severity-score: low
issue-severity-explanation: The FOR_INIT strict equality check on call_get_func_ip is missing a bpf_prog_has_trampoline() guard, causing unnecessary rejection of kprobe programs that share a prog_array with different call_get_func_ip values.
^ permalink raw reply [flat|nested] 11+ messages in thread* Re: [PATCH bpf-next v2 4/6] bpf: Disallow !call_get_func_ip progs tail-calling call_get_func_ip progs
2026-03-02 15:53 ` bot+bpf-ci
@ 2026-03-03 1:47 ` Leon Hwang
0 siblings, 0 replies; 11+ messages in thread
From: Leon Hwang @ 2026-03-03 1:47 UTC (permalink / raw)
To: bot+bpf-ci, bpf
Cc: ast, daniel, andrii, martin.lau, eddyz87, song, yonghong.song,
john.fastabend, kpsingh, sdf, haoluo, jolsa, shuah, yangfeng,
menglong8.dong, puranjay, bjorn, pulehui, linux-kernel,
linux-kselftest, netdev, kernel-patches-bot, martin.lau, clm,
ihor.solodrai
On 2/3/26 23:53, bot+bpf-ci@kernel.org wrote:
>> diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
>> --- a/kernel/bpf/core.c
>> +++ b/kernel/bpf/core.c
>
> [ ... ]
>
>> @@ -2422,11 +2423,17 @@ static bool bpf_map_owner_matches(const struct bpf_map *map, const struct bpf_prog *fp,
>> case BPF_MAP_OWNER_MATCH_FOR_INIT:
>> if (owner->kprobe_write_ctx != aux->kprobe_write_ctx)
>> return false;
>> + if (owner->call_get_func_ip != fp->call_get_func_ip)
>> + return false;
>> break;
>>
>> case BPF_MAP_OWNER_MATCH_FOR_UPDATE:
>> if (!owner->kprobe_write_ctx && aux->kprobe_write_ctx)
>> return false;
>> + if (bpf_prog_has_trampoline(fp)) {
>> + if (!owner->call_get_func_ip && fp->call_get_func_ip)
>> + return false;
>> + }
>> break;
>
> The FOR_UPDATE path correctly guards the call_get_func_ip check with
> bpf_prog_has_trampoline(fp), since only trampoline-based programs
> rely on the func IP stored on the trampoline stack. Kprobe programs
> get func_ip from kprobe_running()->addr via bpf_get_func_ip_kprobe(),
> which is independent of the trampoline.
>
> Should the FOR_INIT path have the same bpf_prog_has_trampoline()
> guard? Without it, two kprobe programs that share a prog_array will
> be rejected if one calls bpf_get_func_ip() and the other does not,
> even though mixing is safe for kprobes.
>
> bpf_check_tail_call() calls this with BPF_MAP_OWNER_MATCH_FOR_INIT:
>
> kprobe prog_a (call_get_func_ip=1) loads, references jmp_table
> -> owner->call_get_func_ip = 1
> kprobe prog_b (call_get_func_ip=0) loads, references same jmp_table
> -> owner->call_get_func_ip != fp->call_get_func_ip
> -> returns false, prog_b fails to load
>
> Something like this would match the FOR_UPDATE scoping:
>
> case BPF_MAP_OWNER_MATCH_FOR_INIT:
> if (owner->kprobe_write_ctx != aux->kprobe_write_ctx)
> return false;
> if (bpf_prog_has_trampoline(fp)) {
> if (owner->call_get_func_ip != fp->call_get_func_ip)
> return false;
> }
> break;
>
Correct.
The FOR_INIT path should have the same bpf_prog_has_trampoline() guard.
I'll do it in the next revision.
Thanks,
Leon
>
> ---
> AI reviewed your patch. Please fix the bug or email reply why it's not a bug.
> See: https://github.com/kernel-patches/vmtest/blob/master/ci/claude/README.md
>
> CI run summary: https://github.com/kernel-patches/bpf/actions/runs/22582690876
>
> AI-authorship-score: medium
> AI-authorship-explanation: The v1 selftests carried Assisted-by Codex tags, and the kernel code follows a mechanical copy-paste pattern across commits; the v2 selftests were rewritten but the overall series structure suggests AI-assisted development.
> issues-found: 1
> issue-severity-score: low
> issue-severity-explanation: The FOR_INIT strict equality check on call_get_func_ip is missing a bpf_prog_has_trampoline() guard, causing unnecessary rejection of kprobe programs that share a prog_array with different call_get_func_ip values.
^ permalink raw reply [flat|nested] 11+ messages in thread
* [PATCH bpf-next v2 5/6] bpf: Disallow !call_session_cookie progs tail-calling call_session_cookie progs
2026-03-02 15:03 [PATCH bpf-next v2 0/6] bpf: Enhance __bpf_prog_map_compatible() Leon Hwang
` (3 preceding siblings ...)
2026-03-02 15:03 ` [PATCH bpf-next v2 4/6] bpf: Disallow !call_get_func_ip progs tail-calling call_get_func_ip progs Leon Hwang
@ 2026-03-02 15:03 ` Leon Hwang
2026-03-02 15:03 ` [PATCH bpf-next v2 6/6] selftests/bpf: Add tests to verify prog_array map compatibility Leon Hwang
5 siblings, 0 replies; 11+ messages in thread
From: Leon Hwang @ 2026-03-02 15:03 UTC (permalink / raw)
To: bpf
Cc: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
Martin KaFai Lau, Eduard Zingerman, Song Liu, Yonghong Song,
John Fastabend, KP Singh, Stanislav Fomichev, Hao Luo, Jiri Olsa,
Shuah Khan, Feng Yang, Leon Hwang, Menglong Dong, Puranjay Mohan,
Björn Töpel, Pu Lehui, linux-kernel, linux-kselftest,
netdev, kernel-patches-bot
Fsession progs that call bpf_session_cookie() kfunc depend on
consistent session metadata stored on the stack. Mixing
!call_session_cookie progs with call_session_cookie progs via tail
calls could break this assumption.
To address this, reject the combination of !call_session_cookie progs
with call_session_cookie progs in bpf_map_owner_matches(), which
prevents the tail callee from accessing a bogus session cookie.
Also reject call_session_cookie mismatches during initialization to
prevent bypassing the above restriction.
Without this check, the above restriction can be bypassed as follows.
struct {
__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
__uint(max_entries, 1);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(__u32));
} jmp_table SEC(".maps");
SEC("?fsession")
int BPF_PROG(prog_a)
{
u64 *cookie = bpf_session_cookie(ctx);
*cookie = 42;
bpf_tail_call_static(ctx, &jmp_table, 0);
return 0;
}
SEC("?fsession")
int BPF_PROG(prog_b)
{
bpf_tail_call_static(ctx, &jmp_table, 0);
return 0;
}
The jmp_table is shared between prog_a and prog_b.
* Load prog_a first.
At this point, owner->call_session_cookie=true.
* Load prog_b next.
At this point, prog_b passes the compatibility check.
* Add prog_a to jmp_table.
* Attach prog_b to a kernel function.
When the kernel function runs, prog_a will get a u64 pointer to the
first arg slot on the trampoline stack, and will modify the arg via this
pointer.
Fixes: eeee4239dbb1 ("bpf: support fsession for bpf_session_cookie")
Signed-off-by: Leon Hwang <leon.hwang@linux.dev>
---
include/linux/bpf.h | 1 +
kernel/bpf/core.c | 5 +++++
2 files changed, 6 insertions(+)
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index fb978650b169..3931fdbca3a7 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -290,6 +290,7 @@ struct bpf_map_owner {
u32 sleepable:1;
u32 kprobe_write_ctx:1;
u32 call_get_func_ip:1;
+ u32 call_session_cookie:1;
u64 storage_cookie[MAX_BPF_CGROUP_STORAGE_TYPE];
const struct btf_type *attach_func_proto;
enum bpf_attach_type expected_attach_type;
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 1b88878fe5c5..03d2d8f244c8 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -2392,6 +2392,7 @@ static void bpf_map_owner_init(struct bpf_map_owner *owner, const struct bpf_pro
owner->sleepable = fp->sleepable;
owner->kprobe_write_ctx = aux->kprobe_write_ctx;
owner->call_get_func_ip = fp->call_get_func_ip;
+ owner->call_session_cookie = fp->call_session_cookie;
owner->expected_attach_type = fp->expected_attach_type;
owner->attach_func_proto = aux->attach_func_proto;
for_each_cgroup_storage_type(i)
@@ -2425,6 +2426,8 @@ static bool bpf_map_owner_matches(const struct bpf_map *map, const struct bpf_pr
return false;
if (owner->call_get_func_ip != fp->call_get_func_ip)
return false;
+ if (owner->call_session_cookie != fp->call_session_cookie)
+ return false;
break;
case BPF_MAP_OWNER_MATCH_FOR_UPDATE:
@@ -2433,6 +2436,8 @@ static bool bpf_map_owner_matches(const struct bpf_map *map, const struct bpf_pr
if (bpf_prog_has_trampoline(fp)) {
if (!owner->call_get_func_ip && fp->call_get_func_ip)
return false;
+ if (!owner->call_session_cookie && fp->call_session_cookie)
+ return false;
}
break;
}
--
2.52.0
^ permalink raw reply related [flat|nested] 11+ messages in thread* [PATCH bpf-next v2 6/6] selftests/bpf: Add tests to verify prog_array map compatibility
2026-03-02 15:03 [PATCH bpf-next v2 0/6] bpf: Enhance __bpf_prog_map_compatible() Leon Hwang
` (4 preceding siblings ...)
2026-03-02 15:03 ` [PATCH bpf-next v2 5/6] bpf: Disallow !call_session_cookie progs tail-calling call_session_cookie progs Leon Hwang
@ 2026-03-02 15:03 ` Leon Hwang
5 siblings, 0 replies; 11+ messages in thread
From: Leon Hwang @ 2026-03-02 15:03 UTC (permalink / raw)
To: bpf
Cc: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
Martin KaFai Lau, Eduard Zingerman, Song Liu, Yonghong Song,
John Fastabend, KP Singh, Stanislav Fomichev, Hao Luo, Jiri Olsa,
Shuah Khan, Feng Yang, Leon Hwang, Menglong Dong, Puranjay Mohan,
Björn Töpel, Pu Lehui, linux-kernel, linux-kselftest,
netdev, kernel-patches-bot
Add tests to verify the following tail call restrictions:
* !kprobe_write_ctx progs are not compatible with kprobe_write_ctx progs.
* !call_get_func_ip progs are not compatible with call_get_func_ip progs.
* !call_session_cookie progs are not compatible with call_session_cookie
progs.
For kprobe_write_ctx, call_get_func_ip, and call_session_cookie, a
prog_array map cannot be shared between progs with different values.
Signed-off-by: Leon Hwang <leon.hwang@linux.dev>
---
.../selftests/bpf/prog_tests/tailcalls.c | 319 ++++++++++++++++++
.../bpf/progs/tailcall_map_compatible.c | 103 ++++++
2 files changed, 422 insertions(+)
create mode 100644 tools/testing/selftests/bpf/progs/tailcall_map_compatible.c
diff --git a/tools/testing/selftests/bpf/prog_tests/tailcalls.c b/tools/testing/selftests/bpf/prog_tests/tailcalls.c
index 7d534fde0af9..1063e73ecffa 100644
--- a/tools/testing/selftests/bpf/prog_tests/tailcalls.c
+++ b/tools/testing/selftests/bpf/prog_tests/tailcalls.c
@@ -9,6 +9,7 @@
#include "tc_bpf2bpf.skel.h"
#include "tailcall_fail.skel.h"
#include "tailcall_sleepable.skel.h"
+#include "tailcall_map_compatible.skel.h"
/* test_tailcall_1 checks basic functionality by patching multiple locations
* in a single program for a single tail call slot with nop->jmp, jmp->nop
@@ -1725,6 +1726,312 @@ static void test_tailcall_sleepable(void)
tailcall_sleepable__destroy(skel);
}
+#ifdef __x86_64__
+/* uprobe attach point */
+static noinline int trigger_uprobe_fn(int a)
+{
+ asm volatile ("" : "+r"(a));
+ return a;
+}
+
+static void test_map_compatible_update_kprobe_write_ctx(void)
+{
+ struct bpf_program *dummy, *kprobe, *fsession;
+ struct tailcall_map_compatible *skel;
+ struct bpf_link *link = NULL;
+ int err, prog_fd, key = 0;
+ struct bpf_map *map;
+ LIBBPF_OPTS(bpf_kprobe_opts, kprobe_opts);
+ LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts);
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+
+ skel = tailcall_map_compatible__open();
+ if (!ASSERT_OK_PTR(skel, "tailcall_map_compatible__open"))
+ return;
+
+ dummy = skel->progs.dummy_kprobe;
+ bpf_program__set_autoload(dummy, true);
+
+ kprobe = skel->progs.kprobe;
+ bpf_program__set_autoload(kprobe, true);
+
+ fsession = skel->progs.fsession_tailcall;
+ bpf_program__set_autoload(fsession, true);
+
+ skel->bss->data = 0xdeadbeef;
+
+ err = tailcall_map_compatible__load(skel);
+ if (!ASSERT_OK(err, "tailcall_map_compatible__load"))
+ goto out;
+
+ prog_fd = bpf_program__fd(kprobe);
+ map = skel->maps.prog_array_dummy;
+ err = bpf_map_update_elem(bpf_map__fd(map), &key, &prog_fd, BPF_ANY);
+ ASSERT_ERR(err, "bpf_map_update_elem kprobe");
+
+ skel->links.dummy_kprobe = bpf_program__attach_kprobe_opts(dummy, "bpf_fentry_test1",
+ &kprobe_opts);
+ if (!ASSERT_OK_PTR(skel->links.dummy_kprobe, "bpf_program__attach_kprobe_opts"))
+ goto out;
+
+ skel->links.fsession_tailcall = bpf_program__attach_trace(fsession);
+ if (!ASSERT_OK_PTR(skel->links.fsession_tailcall, "bpf_program__attach_trace"))
+ goto out;
+
+ err = bpf_prog_test_run_opts(bpf_program__fd(fsession), &topts);
+ ASSERT_OK(err, "bpf_prog_test_run_opts fsession");
+
+ ASSERT_EQ(topts.retval, 0, "dummy retval");
+ ASSERT_EQ(skel->bss->dummy_run, 1, "dummy_run");
+ ASSERT_EQ(skel->bss->data, 0xdeadbeef, "data");
+
+ err = bpf_map_delete_elem(bpf_map__fd(map), &key);
+ ASSERT_TRUE(!err || err == -ENOENT, "bpf_map_delete_elem");
+
+ uprobe_opts.func_name = "trigger_uprobe_fn";
+ link = bpf_program__attach_uprobe_opts(kprobe, 0, "/proc/self/exe", 0, &uprobe_opts);
+ if (!ASSERT_OK_PTR(link, "bpf_program__attach_uprobe_opts"))
+ goto out;
+
+ prog_fd = bpf_program__fd(dummy);
+ map = skel->maps.prog_array_kprobe;
+ err = bpf_map_update_elem(bpf_map__fd(map), &key, &prog_fd, BPF_ANY);
+ ASSERT_OK(err, "bpf_map_update_elem dummy");
+
+ ASSERT_EQ(trigger_uprobe_fn(1), 0, "trigger_uprobe_fn retval"); /* modified by uprobe */
+
+ ASSERT_EQ(topts.retval, 0, "dummy retval");
+ ASSERT_EQ(skel->bss->dummy_run, 2, "dummy_run");
+ ASSERT_EQ(skel->bss->data, 0, "data");
+
+out:
+ bpf_link__destroy(link);
+ tailcall_map_compatible__destroy(skel);
+}
+#else
+static void test_map_compatible_update_kprobe_write_ctx(void)
+{
+ test__skip();
+}
+#endif
+
+static void test_map_compatible_update_get_func_ip(void)
+{
+ struct tailcall_map_compatible *skel;
+ struct bpf_program *dummy, *fentry;
+ struct bpf_link *link = NULL;
+ int err, prog_fd, key = 0;
+ struct bpf_map *map;
+ __u64 func_ip;
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+
+ skel = tailcall_map_compatible__open();
+ if (!ASSERT_OK_PTR(skel, "tailcall_map_compatible__open"))
+ return;
+
+ dummy = skel->progs.dummy_fentry;
+ bpf_program__set_autoload(dummy, true);
+
+ fentry = skel->progs.fentry;
+ bpf_program__set_autoload(fentry, true);
+
+ err = tailcall_map_compatible__load(skel);
+ if (!ASSERT_OK(err, "tailcall_map_compatible__load"))
+ goto out;
+
+ link = bpf_program__attach_trace(fentry);
+ if (!ASSERT_OK_PTR(link, "bpf_program__attach_trace fentry"))
+ goto out;
+
+ err = bpf_prog_test_run_opts(bpf_program__fd(fentry), &topts);
+ if (!ASSERT_OK(err, "bpf_prog_test_run_opts fentry"))
+ goto out;
+
+ ASSERT_EQ(topts.retval, 0, "fentry retval");
+ ASSERT_EQ(skel->bss->dummy_run, 0, "dummy_run");
+ ASSERT_NEQ(skel->bss->data, 0, "data");
+ func_ip = skel->bss->data;
+
+ skel->bss->data = 0xdeadbeef;
+
+ err = bpf_link__destroy(link);
+ link = NULL;
+ if (!ASSERT_OK(err, "bpf_link__destroy"))
+ goto out;
+
+ prog_fd = bpf_program__fd(fentry);
+ map = skel->maps.prog_array_dummy;
+ err = bpf_map_update_elem(bpf_map__fd(map), &key, &prog_fd, BPF_ANY);
+ ASSERT_ERR(err, "bpf_map_update_elem fentry");
+
+ link = bpf_program__attach_trace(dummy);
+ if (!ASSERT_OK_PTR(link, "bpf_program__attach_trace dummy"))
+ goto out;
+
+ err = bpf_prog_test_run_opts(bpf_program__fd(dummy), &topts);
+ if (!ASSERT_OK(err, "bpf_prog_test_run_opts dummy"))
+ goto out;
+
+ ASSERT_EQ(topts.retval, 0, "dummy retval");
+ ASSERT_EQ(skel->bss->dummy_run, 1, "dummy_run");
+ ASSERT_EQ(skel->bss->data, 0xdeadbeef, "data");
+ ASSERT_NEQ(skel->bss->data, func_ip, "data func_ip");
+
+ err = bpf_link__destroy(link);
+ link = NULL;
+ if (!ASSERT_OK(err, "bpf_link__destroy"))
+ goto out;
+
+ err = bpf_map_delete_elem(bpf_map__fd(map), &key);
+ ASSERT_TRUE(!err || err == -ENOENT, "bpf_map_delete_elem");
+
+ prog_fd = bpf_program__fd(dummy);
+ map = skel->maps.prog_array_tracing;
+ err = bpf_map_update_elem(bpf_map__fd(map), &key, &prog_fd, BPF_ANY);
+ ASSERT_OK(err, "bpf_map_update_elem dummy");
+
+ link = bpf_program__attach_trace(fentry);
+ if (!ASSERT_OK_PTR(link, "bpf_program__attach_trace fentry"))
+ goto out;
+
+ err = bpf_prog_test_run_opts(bpf_program__fd(fentry), &topts);
+ if (!ASSERT_OK(err, "bpf_prog_test_run_opts fentry"))
+ goto out;
+
+ ASSERT_EQ(topts.retval, 0, "fentry retval");
+ ASSERT_EQ(skel->bss->dummy_run, 2, "dummy_run");
+ ASSERT_EQ(skel->bss->data, func_ip, "data");
+
+out:
+ bpf_link__destroy(link);
+ tailcall_map_compatible__destroy(skel);
+}
+
+static void test_map_compatible_update_session_cookie(void)
+{
+ struct tailcall_map_compatible *skel;
+ struct bpf_program *dummy, *fsession;
+ struct bpf_link *link = NULL;
+ int err, prog_fd, key = 0;
+ struct bpf_map *map;
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+
+ skel = tailcall_map_compatible__open();
+ if (!ASSERT_OK_PTR(skel, "tailcall_map_compatible__open"))
+ return;
+
+ dummy = skel->progs.dummy_fsession;
+ bpf_program__set_autoload(dummy, true);
+
+ fsession = skel->progs.fsession_cookie;
+ bpf_program__set_autoload(fsession, true);
+
+ skel->bss->data = 0xdeadbeef;
+
+ err = tailcall_map_compatible__load(skel);
+ if (err == -EOPNOTSUPP) {
+ test__skip();
+ goto out;
+ }
+ if (!ASSERT_OK(err, "tailcall_map_compatible__load"))
+ goto out;
+
+ prog_fd = bpf_program__fd(fsession);
+ map = skel->maps.prog_array_dummy;
+ err = bpf_map_update_elem(bpf_map__fd(map), &key, &prog_fd, BPF_ANY);
+ ASSERT_ERR(err, "bpf_map_update_elem fsession");
+
+ link = bpf_program__attach_trace(dummy);
+ if (!ASSERT_OK_PTR(link, "bpf_program__attach_trace dummy"))
+ goto out;
+
+ err = bpf_prog_test_run_opts(bpf_program__fd(dummy), &topts);
+ ASSERT_OK(err, "bpf_prog_test_run_opts dummy");
+
+ ASSERT_EQ(topts.retval, 0, "dummy retval");
+ ASSERT_EQ(skel->bss->dummy_run, 2, "dummy_run");
+ ASSERT_EQ(skel->bss->data, 0xdeadbeef, "data");
+
+ err = bpf_link__destroy(link);
+ link = NULL;
+ if (!ASSERT_OK(err, "bpf_link__destroy"))
+ goto out;
+
+ err = bpf_map_delete_elem(bpf_map__fd(map), &key);
+ ASSERT_TRUE(!err || err == -ENOENT, "bpf_map_delete_elem");
+
+ prog_fd = bpf_program__fd(dummy);
+ map = skel->maps.prog_array_tracing;
+ err = bpf_map_update_elem(bpf_map__fd(map), &key, &prog_fd, BPF_ANY);
+ ASSERT_OK(err, "bpf_map_update_elem dummy");
+
+ link = bpf_program__attach_trace(fsession);
+ if (!ASSERT_OK_PTR(link, "bpf_program__attach_trace fsession"))
+ goto out;
+
+ err = bpf_prog_test_run_opts(bpf_program__fd(fsession), &topts);
+ if (!ASSERT_OK(err, "bpf_prog_test_run_opts fsession"))
+ goto out;
+
+ ASSERT_EQ(topts.retval, 0, "fsession retval");
+ ASSERT_EQ(skel->bss->dummy_run, 4, "dummy_run");
+ ASSERT_EQ(skel->bss->data, 0, "data");
+
+out:
+ bpf_link__destroy(link);
+ tailcall_map_compatible__destroy(skel);
+}
+
+static void test_map_compatible_init(const char *prog1, const char *prog2)
+{
+ struct tailcall_map_compatible *skel;
+ struct bpf_program *p1, *p2;
+ int err;
+
+ skel = tailcall_map_compatible__open();
+ if (!ASSERT_OK_PTR(skel, "tailcall_map_compatible__open"))
+ return;
+
+ p1 = bpf_object__find_program_by_name(skel->obj, prog1);
+ if (!ASSERT_OK_PTR(p1, "bpf_object__find_program_by_name prog1"))
+ goto out;
+ bpf_program__set_autoload(p1, true);
+
+ p2 = bpf_object__find_program_by_name(skel->obj, prog2);
+ if (!ASSERT_OK_PTR(p2, "bpf_object__find_program_by_name prog2"))
+ goto out;
+ bpf_program__set_autoload(p2, true);
+
+ err = tailcall_map_compatible__load(skel);
+ if (err == -EOPNOTSUPP) {
+ test__skip();
+ goto out;
+ }
+ ASSERT_ERR(err, "tailcall_map_compatible__load");
+
+out:
+ tailcall_map_compatible__destroy(skel);
+}
+
+static void test_map_compatible_init_kprobe_write_ctx(void)
+{
+#ifdef __x86_64__
+ test_map_compatible_init("kprobe", "kprobe_tailcall");
+#else
+ test__skip();
+#endif
+}
+
+static void test_map_compatible_init_call_get_func_ip(void)
+{
+ test_map_compatible_init("fentry", "fentry_tailcall");
+}
+
+static void test_map_compatible_init_call_session_cookie(void)
+{
+ test_map_compatible_init("fsession_cookie", "fsession_tailcall");
+}
+
void test_tailcalls(void)
{
if (test__start_subtest("tailcall_1"))
@@ -1781,4 +2088,16 @@ void test_tailcalls(void)
test_tailcall_failure();
if (test__start_subtest("tailcall_sleepable"))
test_tailcall_sleepable();
+ if (test__start_subtest("map_compatible/update/kprobe_write_ctx"))
+ test_map_compatible_update_kprobe_write_ctx();
+ if (test__start_subtest("map_compatible/update/get_func_ip"))
+ test_map_compatible_update_get_func_ip();
+ if (test__start_subtest("map_compatible/update/session_cookie"))
+ test_map_compatible_update_session_cookie();
+ if (test__start_subtest("map_compatible/init/kprobe_write_ctx"))
+ test_map_compatible_init_kprobe_write_ctx();
+ if (test__start_subtest("map_compatible/init/call_get_func_ip"))
+ test_map_compatible_init_call_get_func_ip();
+ if (test__start_subtest("map_compatible/init/call_session_cookie"))
+ test_map_compatible_init_call_session_cookie();
}
diff --git a/tools/testing/selftests/bpf/progs/tailcall_map_compatible.c b/tools/testing/selftests/bpf/progs/tailcall_map_compatible.c
new file mode 100644
index 000000000000..991b799c89ac
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tailcall_map_compatible.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+int dummy_run;
+u64 data;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 1);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+} prog_array_dummy SEC(".maps");
+
+#if defined(__TARGET_ARCH_x86)
+SEC("?kprobe")
+int dummy_kprobe(void *ctx)
+{
+ dummy_run++;
+ bpf_tail_call_static(ctx, &prog_array_dummy, 0);
+ return 0;
+}
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 1);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+} prog_array_kprobe SEC(".maps");
+
+SEC("?kprobe")
+int kprobe(struct pt_regs *regs)
+{
+ data = regs->di = 0;
+ bpf_tail_call_static(regs, &prog_array_kprobe, 0);
+ return 0;
+}
+
+SEC("?kprobe")
+int kprobe_tailcall(struct pt_regs *regs)
+{
+ bpf_tail_call_static(regs, &prog_array_kprobe, 0);
+ return 0;
+}
+#endif
+
+SEC("?fentry/bpf_fentry_test1")
+int dummy_fentry(void *ctx)
+{
+ dummy_run++;
+ bpf_tail_call_static(ctx, &prog_array_dummy, 0);
+ return 0;
+}
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 1);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+} prog_array_tracing SEC(".maps");
+
+SEC("?fentry/bpf_fentry_test1")
+int BPF_PROG(fentry)
+{
+ data = bpf_get_func_ip(ctx);
+ bpf_tail_call_static(ctx, &prog_array_tracing, 0);
+ return 0;
+}
+
+SEC("?fentry/bpf_fentry_test1")
+int BPF_PROG(fentry_tailcall)
+{
+ bpf_tail_call_static(ctx, &prog_array_tracing, 0);
+ return 0;
+}
+
+SEC("?fsession/bpf_fentry_test2")
+int dummy_fsession(void *ctx)
+{
+ dummy_run++;
+ bpf_tail_call_static(ctx, &prog_array_dummy, 0);
+ return 0;
+}
+
+SEC("?fsession/bpf_fentry_test2")
+int BPF_PROG(fsession_cookie)
+{
+ u64 *cookie = bpf_session_cookie(ctx);
+
+ data = *cookie = 0;
+ bpf_tail_call_static(ctx, &prog_array_tracing, 0);
+ return 0;
+}
+
+SEC("?fsession/bpf_fentry_test2")
+int BPF_PROG(fsession_tailcall)
+{
+ bpf_tail_call_static(ctx, &prog_array_tracing, 0);
+ return 0;
+}
--
2.52.0
^ permalink raw reply related [flat|nested] 11+ messages in thread