BPF List
 help / color / mirror / Atom feed
* [PATCH bpf] bpf: Fix invalid mem access when update_effective_progs fails in __cgroup_bpf_detach
@ 2025-11-05 10:03 Pu Lehui
  2025-11-05 23:33 ` Eduard Zingerman
  0 siblings, 1 reply; 4+ messages in thread
From: Pu Lehui @ 2025-11-05 10:03 UTC (permalink / raw)
  To: bpf
  Cc: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
	Martin KaFai Lau, Eduard Zingerman, Song Liu, Yonghong Song,
	John Fastabend, KP Singh, Stanislav Fomichev, Hao Luo, Jiri Olsa,
	Alan Maguire, Pu Lehui, Pu Lehui

From: Pu Lehui <pulehui@huawei.com>

Syzkaller triggers an invalid memory access issue following fault
injection in update_effective_progs. The issue can be described as
follows:

__cgroup_bpf_detach
  update_effective_progs
    compute_effective_progs
      bpf_prog_array_alloc <-- fault inject
  purge_effective_progs
    /* change to dummy_bpf_prog */
    array->items[index] = &dummy_bpf_prog.prog

---softirq start---
__do_softirq
  ...
    __cgroup_bpf_run_filter_skb
      __bpf_prog_run_save_cb
        bpf_prog_run
          stats = this_cpu_ptr(prog->stats)
          /* invalid memory access */
          flags = u64_stats_update_begin_irqsave(&stats->syncp)
---softirq end---

  static_branch_dec(&cgroup_bpf_enabled_key[atype])

The reason is that fault injection caused update_effective_progs to fail
and then changed the original prog into dummy_bpf_prog.prog in
purge_effective_progs. Then a softirq came, and accessing the members of
dummy_bpf_prog.prog in the softirq triggers invalid mem access.

To fix it, we can skip executing the prog when it's dummy_bpf_prog.prog.

Fixes: 4c46091ee985 ("bpf: Fix KASAN use-after-free Read in compute_effective_progs")
Signed-off-by: Pu Lehui <pulehui@huawei.com>
---
 include/linux/bpf.h | 6 ++++++
 kernel/bpf/cgroup.c | 5 +++--
 kernel/bpf/core.c   | 5 ++---
 3 files changed, 11 insertions(+), 5 deletions(-)

diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index d808253f2e94..923687c47111 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -2212,6 +2212,12 @@ struct bpf_prog_array {
 	struct bpf_prog_array_item items[];
 };
 
+struct bpf_prog_dummy {
+	struct bpf_prog prog;
+};
+
+extern struct bpf_prog_dummy dummy_bpf_prog;
+
 struct bpf_empty_prog_array {
 	struct bpf_prog_array hdr;
 	struct bpf_prog *null_prog;
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
index 248f517d66d0..baad33b34cef 100644
--- a/kernel/bpf/cgroup.c
+++ b/kernel/bpf/cgroup.c
@@ -77,7 +77,9 @@ bpf_prog_run_array_cg(const struct cgroup_bpf *cgrp,
 	item = &array->items[0];
 	old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
 	while ((prog = READ_ONCE(item->prog))) {
-		run_ctx.prog_item = item;
+		run_ctx.prog_item = item++;
+		if (prog == &dummy_bpf_prog.prog)
+			continue;
 		func_ret = run_prog(prog, ctx);
 		if (ret_flags) {
 			*(ret_flags) |= (func_ret >> 1);
@@ -85,7 +87,6 @@ bpf_prog_run_array_cg(const struct cgroup_bpf *cgrp,
 		}
 		if (!func_ret && !IS_ERR_VALUE((long)run_ctx.retval))
 			run_ctx.retval = -EPERM;
-		item++;
 	}
 	bpf_reset_run_ctx(old_run_ctx);
 	rcu_read_unlock_migrate();
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index d595fe512498..eac8cc341725 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -2536,13 +2536,12 @@ static unsigned int __bpf_prog_ret1(const void *ctx,
 	return 1;
 }
 
-static struct bpf_prog_dummy {
-	struct bpf_prog prog;
-} dummy_bpf_prog = {
+struct bpf_prog_dummy dummy_bpf_prog = {
 	.prog = {
 		.bpf_func = __bpf_prog_ret1,
 	},
 };
+EXPORT_SYMBOL(dummy_bpf_prog);
 
 struct bpf_empty_prog_array bpf_empty_prog_array = {
 	.null_prog = NULL,
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH bpf] bpf: Fix invalid mem access when update_effective_progs fails in __cgroup_bpf_detach
  2025-11-05 10:03 [PATCH bpf] bpf: Fix invalid mem access when update_effective_progs fails in __cgroup_bpf_detach Pu Lehui
@ 2025-11-05 23:33 ` Eduard Zingerman
  2025-11-06  2:14   ` Pu Lehui
  0 siblings, 1 reply; 4+ messages in thread
From: Eduard Zingerman @ 2025-11-05 23:33 UTC (permalink / raw)
  To: Pu Lehui, bpf
  Cc: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
	Martin KaFai Lau, Song Liu, Yonghong Song, John Fastabend,
	KP Singh, Stanislav Fomichev, Hao Luo, Jiri Olsa, Alan Maguire,
	Pu Lehui

On Wed, 2025-11-05 at 10:03 +0000, Pu Lehui wrote:
> From: Pu Lehui <pulehui@huawei.com>
>
> Syzkaller triggers an invalid memory access issue following fault
> injection in update_effective_progs. The issue can be described as
> follows:
>
> __cgroup_bpf_detach
>   update_effective_progs
>     compute_effective_progs
>       bpf_prog_array_alloc <-- fault inject
>   purge_effective_progs
>     /* change to dummy_bpf_prog */
>     array->items[index] = &dummy_bpf_prog.prog
>
> ---softirq start---
> __do_softirq
>   ...
>     __cgroup_bpf_run_filter_skb
>       __bpf_prog_run_save_cb
>         bpf_prog_run
>           stats = this_cpu_ptr(prog->stats)
>           /* invalid memory access */
>           flags = u64_stats_update_begin_irqsave(&stats->syncp)
> ---softirq end---
>
>   static_branch_dec(&cgroup_bpf_enabled_key[atype])
>
> The reason is that fault injection caused update_effective_progs to fail
> and then changed the original prog into dummy_bpf_prog.prog in
> purge_effective_progs. Then a softirq came, and accessing the members of
> dummy_bpf_prog.prog in the softirq triggers invalid mem access.
>
> To fix it, we can skip executing the prog when it's dummy_bpf_prog.prog.
>
> Fixes: 4c46091ee985 ("bpf: Fix KASAN use-after-free Read in compute_effective_progs")
> Signed-off-by: Pu Lehui <pulehui@huawei.com>

Is there a link for syzkaller report?

[...]

> diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
> index 248f517d66d0..baad33b34cef 100644
> --- a/kernel/bpf/cgroup.c
> +++ b/kernel/bpf/cgroup.c
> @@ -77,7 +77,9 @@ bpf_prog_run_array_cg(const struct cgroup_bpf *cgrp,
>  	item = &array->items[0];
>  	old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
>  	while ((prog = READ_ONCE(item->prog))) {
> -		run_ctx.prog_item = item;
> +		run_ctx.prog_item = item++;
> +		if (prog == &dummy_bpf_prog.prog)
> +			continue;

Will the following fix the issue?

    diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
    index d595fe512498..c7c9c78f171a 100644
    --- a/kernel/bpf/core.c
    +++ b/kernel/bpf/core.c
    @@ -2536,11 +2536,14 @@ static unsigned int __bpf_prog_ret1(const void *ctx,
            return 1;
     }

    +DEFINE_PER_CPU(struct bpf_prog_stats, __dummy_stats);
    +
     static struct bpf_prog_dummy {
            struct bpf_prog prog;
     } dummy_bpf_prog = {
            .prog = {
                    .bpf_func = __bpf_prog_ret1,
    +               .stats = &__dummy_stats,
            },
     };

Or that's too much memory wasted?

[...]

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH bpf] bpf: Fix invalid mem access when update_effective_progs fails in __cgroup_bpf_detach
  2025-11-05 23:33 ` Eduard Zingerman
@ 2025-11-06  2:14   ` Pu Lehui
  2025-11-10  7:21     ` Pu Lehui
  0 siblings, 1 reply; 4+ messages in thread
From: Pu Lehui @ 2025-11-06  2:14 UTC (permalink / raw)
  To: Eduard Zingerman, bpf
  Cc: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
	Martin KaFai Lau, Song Liu, Yonghong Song, John Fastabend,
	KP Singh, Stanislav Fomichev, Hao Luo, Jiri Olsa, Alan Maguire,
	Pu Lehui



On 2025/11/6 7:33, Eduard Zingerman wrote:
> On Wed, 2025-11-05 at 10:03 +0000, Pu Lehui wrote:
>> From: Pu Lehui <pulehui@huawei.com>
>>
>> Syzkaller triggers an invalid memory access issue following fault
>> injection in update_effective_progs. The issue can be described as
>> follows:
>>
>> __cgroup_bpf_detach
>>    update_effective_progs
>>      compute_effective_progs
>>        bpf_prog_array_alloc <-- fault inject
>>    purge_effective_progs
>>      /* change to dummy_bpf_prog */
>>      array->items[index] = &dummy_bpf_prog.prog
>>
>> ---softirq start---
>> __do_softirq
>>    ...
>>      __cgroup_bpf_run_filter_skb
>>        __bpf_prog_run_save_cb
>>          bpf_prog_run
>>            stats = this_cpu_ptr(prog->stats)
>>            /* invalid memory access */
>>            flags = u64_stats_update_begin_irqsave(&stats->syncp)
>> ---softirq end---
>>
>>    static_branch_dec(&cgroup_bpf_enabled_key[atype])
>>
>> The reason is that fault injection caused update_effective_progs to fail
>> and then changed the original prog into dummy_bpf_prog.prog in
>> purge_effective_progs. Then a softirq came, and accessing the members of
>> dummy_bpf_prog.prog in the softirq triggers invalid mem access.
>>
>> To fix it, we can skip executing the prog when it's dummy_bpf_prog.prog.
>>
>> Fixes: 4c46091ee985 ("bpf: Fix KASAN use-after-free Read in compute_effective_progs")
>> Signed-off-by: Pu Lehui <pulehui@huawei.com>
> 
> Is there a link for syzkaller report?


Hi Eduard,

This is a local syzkaller test, and I have attached the report at the 
end of the email.

> 
> [...]
> 
>> diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
>> index 248f517d66d0..baad33b34cef 100644
>> --- a/kernel/bpf/cgroup.c
>> +++ b/kernel/bpf/cgroup.c
>> @@ -77,7 +77,9 @@ bpf_prog_run_array_cg(const struct cgroup_bpf *cgrp,
>>   	item = &array->items[0];
>>   	old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
>>   	while ((prog = READ_ONCE(item->prog))) {
>> -		run_ctx.prog_item = item;
>> +		run_ctx.prog_item = item++;
>> +		if (prog == &dummy_bpf_prog.prog)
>> +			continue;
> 
> Will the following fix the issue?
> 
>      diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
>      index d595fe512498..c7c9c78f171a 100644
>      --- a/kernel/bpf/core.c
>      +++ b/kernel/bpf/core.c
>      @@ -2536,11 +2536,14 @@ static unsigned int __bpf_prog_ret1(const void *ctx,
>              return 1;
>       }
> 
>      +DEFINE_PER_CPU(struct bpf_prog_stats, __dummy_stats);
>      +
>       static struct bpf_prog_dummy {
>              struct bpf_prog prog;
>       } dummy_bpf_prog = {
>              .prog = {
>                      .bpf_func = __bpf_prog_ret1,
>      +               .stats = &__dummy_stats,
>              },
>       };
> 
> Or that's too much memory wasted?

In 160 cores system, it will waste 5K bytes for this dummy.

And also, this solution will not suit for 5.10.0 or lower LTS version, 
as the bpf_prog_stats is embedded in struct bpf_prog_aux, and bpf->aux 
is empty at this time, which will trigger a null pointer access.

> 
> [...]
Report:

[  120.618153][ T3281] FAULT_INJECTION: forcing a failure.
[  120.618153][ T3281] name failslab, interval 1, probability 0, space 
0, times 0
[  120.619946][ T3281] CPU: 1 UID: 0 PID: 3281 Comm: syz.3.476 Not 
tainted 6.18.0-rc4+ #48 PREEMPT(voluntary)
[  120.619967][ T3281] Hardware name: QEMU Standard PC (i440FX + PIIX, 
1996), BIOS rel-1.16.3-0-ga6ed6b701f0a-prebuilt.qemu.org 04/01/2014
[  120.619979][ T3281] Call Trace:
[  120.619984][ T3281]  <TASK>
[  120.619993][ T3281]  dump_stack_lvl+0xfa/0x120
[  120.620017][ T3281]  should_fail_ex+0x162/0x170
[  120.620049][ T3281]  should_failslab+0x49/0x70
[  120.620071][ T3281]  __kmalloc_noprof+0xcd/0x870
[  120.620092][ T3281]  ? bpf_prog_array_alloc+0x4b/0x60
[  120.620117][ T3281]  ? bpf_prog_array_alloc+0x4b/0x60
[  120.620133][ T3281]  ? prog_list_length.isra.0+0x71/0xa0
[  120.620155][ T3281]  bpf_prog_array_alloc+0x4b/0x60
[  120.620174][ T3281]  compute_effective_progs+0xc1/0x350
[  120.620212][ T3281]  update_effective_progs+0x61/0x1a0
[  120.620239][ T3281]  __cgroup_bpf_detach+0x147/0x340
[  120.620269][ T3281]  bpf_cgroup_link_release.part.0+0x44/0x2d0
[  120.620297][ T3281]  bpf_cgroup_link_release+0x26/0x30
[  120.620322][ T3281]  bpf_link_free+0x6e/0x120
[  120.620351][ T3281]  ? __pfx_bpf_link_release+0x10/0x10
[  120.620379][ T3281]  bpf_link_release+0x39/0x50
[  120.620416][ T3281]  __fput+0x1e3/0x510
[  120.620450][ T3281]  task_work_run+0x9e/0x100
[  120.620481][ T3281]  do_exit+0x2f9/0x820
[  120.620505][ T3281]  ? get_signal+0x4fc/0xf50
[  120.620525][ T3281]  ? __lock_release.isra.0+0x5d/0x170
[  120.620553][ T3281]  do_group_exit+0x59/0xf0
[  120.620582][ T3281]  get_signal+0xf1d/0xf50
[  120.620619][ T3281]  arch_do_signal_or_restart+0x34/0x1b0
[  120.620651][ T3281]  ? __x64_sys_futex+0xbe/0x300
[  120.620680][ T3281]  ? __x64_sys_futex+0xc7/0x300
[  120.620712][ T3281]  ? fput+0x5a/0xf0
[  120.620742][ T3281]  exit_to_user_mode_loop+0xa4/0x160
[  120.620773][ T3281]  do_syscall_64+0x1f2/0x5a0
[  120.620804][ T3281]  entry_SYSCALL_64_after_hwframe+0x76/0x7e
[  120.620823][ T3281] RIP: 0033:0x7f494a7b772d
[  120.620835][ T3281] Code: Unable to access opcode bytes at 
0x7f494a7b7703.
[  120.620843][ T3281] RSP: 002b:00007f494b61cc48 EFLAGS: 00000246 
ORIG_RAX: 00000000000000ca
[  120.620858][ T3281] RAX: fffffffffffffe00 RBX: 00007f494a9e5fa0 RCX: 
00007f494a7b772d
[  120.620870][ T3281] RDX: 0000000000000000 RSI: 0000000000000080 RDI: 
00007f494a9e5fa8
[  120.620880][ T3281] RBP: 0000000000000000 R08: 0000000000000000 R09: 
0000000000000000
[  120.620890][ T3281] R10: 0000000000000000 R11: 0000000000000246 R12: 
00007f494a9e5fa8
[  120.620901][ T3281] R13: 00007f494a9e5fac R14: 00007f494a9e6038 R15: 
00007f494b61cd40
...
[  120.653922][ T2249] BUG: unable to handle page fault for address: 
ffff8882b2cf2000
[  120.654996][ T2249] #PF: supervisor write access in kernel mode
[  120.655843][ T2249] #PF: error_code(0x0002) - not-present page
[  120.656678][ T2249] PGD e201067 P4D e201067 PUD 0
[  120.657380][ T2249] Oops: Oops: 0002 [#1] SMP PTI
[  120.658069][ T2249] CPU: 1 UID: 0 PID: 2249 Comm: kworker/1:5 Not 
tainted 6.18.0-rc4+ #48 PREEMPT(voluntary)
[  120.659466][ T2249] Hardware name: QEMU Standard PC (i440FX + PIIX, 
1996), BIOS rel-1.16.3-0-ga6ed6b701f0a-prebuilt.qemu.org 04/01/2014
[  120.661153][ T2249] Workqueue: mld mld_ifc_work
[  120.661821][ T2249] RIP: 0010:__bpf_prog_run_save_cb+0xe8/0x160
[  120.662682][ T2249] Code: 6a b3 ff 48 8d 73 60 48 89 ef 49 89 c5 48 
8b 43 48 e8 9c 81 6a 05 41 89 c4 e8 a4 6a b3 ff 48 8b 53 38 65 48 03 15 
b0 38 78 0b <48> ff 02 4c 29 e8 48 01 42 08 e9 79 ff ff ff e8 54 0db
[  120.665354][ T2249] RSP: 0018:ffffc90003f5fb38 EFLAGS: 00010286
[  120.666204][ T2249] RAX: 0000001c19e24793 RBX: ffffffff89710ee0 RCX: 
ffffffff813d923e
[  120.667309][ T2249] RDX: ffff8882b2cf2000 RSI: ffffffff813d9247 RDI: 
0000000000000001
[  120.668417][ T2249] RBP: ffff8881072b3c00 R08: 0000000000000001 R09: 
0000000000000000
[  120.669522][ T2249] R10: 0000000000000000 R11: 0000000000000000 R12: 
0000000000000001
[  120.670627][ T2249] R13: 0000001c19e246f0 R14: ffffffff89710ee0 R15: 
0000000000000000
[  120.671734][ T2249] FS:  0000000000000000(0000) 
GS:ffff8882b2cf2000(0000) knlGS:0000000000000000
[  120.672974][ T2249] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[  120.673899][ T2249] CR2: ffff8882b2cf2000 CR3: 000000010adce000 CR4: 
00000000000006f0
[  120.675001][ T2249] Call Trace:
[  120.675468][ T2249]  <TASK>
[  120.675882][ T2249]  ? lock_is_held_type+0x9e/0x120
[  120.676609][ T2249]  __cgroup_bpf_run_filter_skb+0x488/0xab0
[  120.677444][ T2249]  ip6_finish_output+0x37c/0x8b0
[  120.678148][ T2249]  ip6_output+0x135/0x4b0
[  120.678770][ T2249]  NF_HOOK.constprop.0+0x7f/0x580
[  120.679489][ T2249]  mld_sendpack+0x214/0x500
[  120.680138][ T2249]  mld_send_cr+0x38e/0x630
[  120.680780][ T2249]  mld_ifc_work+0x37/0x150
[  120.681416][ T2249]  process_one_work+0x341/0xa80
[  120.682121][ T2249]  worker_thread+0x2b0/0x560
[  120.682791][ T2249]  ? __pfx_worker_thread+0x10/0x10
[  120.683527][ T2249]  kthread+0x18f/0x370
[  120.684115][ T2249]  ? ret_from_fork+0x2c/0x340
[  120.684788][ T2249]  ? __pfx_kthread+0x10/0x10
[  120.685449][ T2249]  ret_from_fork+0x2d3/0x340
[  120.686105][ T2249]  ? __pfx_kthread+0x10/0x10
[  120.686765][ T2249]  ret_from_fork_asm+0x1a/0x30
[  120.687459][ T2249]  </TASK>
[  120.687888][ T2249] Modules linked in:
[  120.688447][ T2249] CR2: ffff8882b2cf2000
[  120.689031][ T2249] ---[ end trace 0000000000000000 ]---


^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH bpf] bpf: Fix invalid mem access when update_effective_progs fails in __cgroup_bpf_detach
  2025-11-06  2:14   ` Pu Lehui
@ 2025-11-10  7:21     ` Pu Lehui
  0 siblings, 0 replies; 4+ messages in thread
From: Pu Lehui @ 2025-11-10  7:21 UTC (permalink / raw)
  To: Eduard Zingerman, Pu Lehui, bpf
  Cc: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
	Martin KaFai Lau, Song Liu, Yonghong Song, John Fastabend,
	KP Singh, Stanislav Fomichev, Hao Luo, Jiri Olsa, Alan Maguire



On 2025/11/6 10:14, Pu Lehui wrote:
> 
> 
> On 2025/11/6 7:33, Eduard Zingerman wrote:
>> On Wed, 2025-11-05 at 10:03 +0000, Pu Lehui wrote:
>>> From: Pu Lehui <pulehui@huawei.com>
>>>
>>> Syzkaller triggers an invalid memory access issue following fault
>>> injection in update_effective_progs. The issue can be described as
>>> follows:
>>>
>>> __cgroup_bpf_detach
>>>    update_effective_progs
>>>      compute_effective_progs
>>>        bpf_prog_array_alloc <-- fault inject
>>>    purge_effective_progs
>>>      /* change to dummy_bpf_prog */
>>>      array->items[index] = &dummy_bpf_prog.prog
>>>
>>> ---softirq start---
>>> __do_softirq
>>>    ...
>>>      __cgroup_bpf_run_filter_skb
>>>        __bpf_prog_run_save_cb
>>>          bpf_prog_run
>>>            stats = this_cpu_ptr(prog->stats)
>>>            /* invalid memory access */
>>>            flags = u64_stats_update_begin_irqsave(&stats->syncp)
>>> ---softirq end---
>>>
>>>    static_branch_dec(&cgroup_bpf_enabled_key[atype])
>>>
>>> The reason is that fault injection caused update_effective_progs to fail
>>> and then changed the original prog into dummy_bpf_prog.prog in
>>> purge_effective_progs. Then a softirq came, and accessing the members of
>>> dummy_bpf_prog.prog in the softirq triggers invalid mem access.
>>>
>>> To fix it, we can skip executing the prog when it's dummy_bpf_prog.prog.
>>>
>>> Fixes: 4c46091ee985 ("bpf: Fix KASAN use-after-free Read in 
>>> compute_effective_progs")
>>> Signed-off-by: Pu Lehui <pulehui@huawei.com>
>>
>> Is there a link for syzkaller report?
> 
> 
> Hi Eduard,
> 
> This is a local syzkaller test, and I have attached the report at the 
> end of the email.
> 
>>
>> [...]
>>
>>> diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
>>> index 248f517d66d0..baad33b34cef 100644
>>> --- a/kernel/bpf/cgroup.c
>>> +++ b/kernel/bpf/cgroup.c
>>> @@ -77,7 +77,9 @@ bpf_prog_run_array_cg(const struct cgroup_bpf *cgrp,
>>>       item = &array->items[0];
>>>       old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
>>>       while ((prog = READ_ONCE(item->prog))) {
>>> -        run_ctx.prog_item = item;
>>> +        run_ctx.prog_item = item++;
>>> +        if (prog == &dummy_bpf_prog.prog)
>>> +            continue;
>>
>> Will the following fix the issue?
>>
>>      diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
>>      index d595fe512498..c7c9c78f171a 100644
>>      --- a/kernel/bpf/core.c
>>      +++ b/kernel/bpf/core.c
>>      @@ -2536,11 +2536,14 @@ static unsigned int __bpf_prog_ret1(const 
>> void *ctx,
>>              return 1;
>>       }
>>
>>      +DEFINE_PER_CPU(struct bpf_prog_stats, __dummy_stats);
>>      +
>>       static struct bpf_prog_dummy {
>>              struct bpf_prog prog;
>>       } dummy_bpf_prog = {
>>              .prog = {
>>                      .bpf_func = __bpf_prog_ret1,
>>      +               .stats = &__dummy_stats,
>>              },
>>       };
>>
>> Or that's too much memory wasted?
> 
> In 160 cores system, it will waste 5K bytes for this dummy.
> 
> And also, this solution will not suit for 5.10.0 or lower LTS version, 
> as the bpf_prog_stats is embedded in struct bpf_prog_aux, and bpf->aux 
> is empty at this time, which will trigger a null pointer access.

Hi Eduard,

I've reviewed the kernel usage of static per-CPU variables and believe 
that 32 bytes per core is not a significant overhead. Moreover, similar 
approaches can be applied to older versions. I've submitted the v2 based 
on your suggestions.

https://lore.kernel.org/bpf/20251110071714.4069712-1-pulehui@huaweicloud.com/

Thanks.

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2025-11-10  7:21 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-11-05 10:03 [PATCH bpf] bpf: Fix invalid mem access when update_effective_progs fails in __cgroup_bpf_detach Pu Lehui
2025-11-05 23:33 ` Eduard Zingerman
2025-11-06  2:14   ` Pu Lehui
2025-11-10  7:21     ` Pu Lehui

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox