bpf.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH bpf-next v1 1/4] bpf: Save struct_ops instance pointer in bpf_prog_aux
@ 2025-06-09 23:27 Amery Hung
  2025-06-09 23:27 ` [PATCH bpf-next v1 2/4] bpf: Allow verifier to fixup kernel module kfuncs Amery Hung
                   ` (5 more replies)
  0 siblings, 6 replies; 12+ messages in thread
From: Amery Hung @ 2025-06-09 23:27 UTC (permalink / raw)
  To: bpf
  Cc: alexei.starovoitov, andrii, daniel, tj, martin.lau, ameryhung,
	kernel-team

Allows struct_ops implementors to infer the calling struct_ops instance
inside a kfunc through prog->aux->this_st_ops. A new field, flags, is
added to bpf_struct_ops. If BPF_STRUCT_OPS_F_THIS_PTR is set in flags,
a pointer to the struct_ops structure registered to the kernel (i.e.,
kvalue->data) will be saved to prog->aux->this_st_ops. To access it in
a kfunc, use BPF_STRUCT_OPS_F_THIS_PTR with __prog argument [0]. The
verifier will fixup the argument with a pointer to prog->aux. this_st_ops
is protected by rcu and is valid until a struct_ops map is unregistered
updated.

For a struct_ops map with BPF_STRUCT_OPS_F_THIS_PTR, to make sure all
programs in it have the same this_st_ops, cmpxchg is used. Only if a
program is not already used in another struct_ops map also with
BPF_STRUCT_OPS_F_THIS_PTR can it be assigned to the current struct_ops
map.

[0]
commit bc049387b41f ("bpf: Add support for __prog argument suffix to
pass in prog->aux")
https://lore.kernel.org/r/20250513142812.1021591-1-memxor@gmail.com

Signed-off-by: Amery Hung <ameryhung@gmail.com>
---
 include/linux/bpf.h         | 10 ++++++++++
 kernel/bpf/bpf_struct_ops.c | 38 +++++++++++++++++++++++++++++++++++++
 2 files changed, 48 insertions(+)

diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 83c56f40842b..25c3488ab926 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -1640,6 +1640,11 @@ struct bpf_prog_aux {
 		struct work_struct work;
 		struct rcu_head	rcu;
 	};
+	/* pointer to struct_ops struct registered to the kernel.
+	 * Only valid when BPF_STRUCT_OPS_F_THIS_PTR is set in
+	 * bpf_struct_ops.flags
+	 */
+	void __rcu *this_st_ops;
 };
 
 struct bpf_prog {
@@ -1788,6 +1793,10 @@ struct bpf_token {
 struct bpf_struct_ops_value;
 struct btf_member;
 
+#define BPF_STRUCT_OPS_F_THIS_PTR BIT(0)
+
+#define BPF_STRUCT_OPS_FLAG_MASK BPF_STRUCT_OPS_F_THIS_PTR
+
 #define BPF_STRUCT_OPS_MAX_NR_MEMBERS 64
 /**
  * struct bpf_struct_ops - A structure of callbacks allowing a subsystem to
@@ -1853,6 +1862,7 @@ struct bpf_struct_ops {
 	struct module *owner;
 	const char *name;
 	struct btf_func_model func_models[BPF_STRUCT_OPS_MAX_NR_MEMBERS];
+	u32 flags;
 };
 
 /* Every member of a struct_ops type has an instance even a member is not
diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c
index 96113633e391..717a2cec0b0f 100644
--- a/kernel/bpf/bpf_struct_ops.c
+++ b/kernel/bpf/bpf_struct_ops.c
@@ -533,6 +533,17 @@ static void bpf_struct_ops_map_put_progs(struct bpf_struct_ops_map *st_map)
 	}
 }
 
+static void bpf_struct_ops_map_clear_this_ptr(struct bpf_struct_ops_map *st_map)
+{
+	u32 i;
+
+	for (i = 0; i < st_map->funcs_cnt; i++) {
+		if (!st_map->links[i])
+			break;
+		RCU_INIT_POINTER(st_map->links[i]->prog->aux->this_st_ops, NULL);
+	}
+}
+
 static void bpf_struct_ops_map_free_image(struct bpf_struct_ops_map *st_map)
 {
 	int i;
@@ -695,6 +706,9 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
 	if (flags)
 		return -EINVAL;
 
+	if (st_ops->flags & ~BPF_STRUCT_OPS_FLAG_MASK)
+		return -EINVAL;
+
 	if (*(u32 *)key != 0)
 		return -E2BIG;
 
@@ -801,6 +815,19 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
 			goto reset_unlock;
 		}
 
+		if (st_ops->flags & BPF_STRUCT_OPS_F_THIS_PTR) {
+			/* Make sure a struct_ops map will not have programs with
+			 * different this_st_ops. Once a program is associated with
+			 * a struct_ops map, it cannot be used in another struct_ops
+			 * map also with BPF_STRUCT_OPS_F_THIS_PTR
+			 */
+			if (cmpxchg(&prog->aux->this_st_ops, NULL, kdata)) {
+				bpf_prog_put(prog);
+				err = -EINVAL;
+				goto reset_unlock;
+			}
+		}
+
 		link = kzalloc(sizeof(*link), GFP_USER);
 		if (!link) {
 			bpf_prog_put(prog);
@@ -894,6 +921,8 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
 	bpf_struct_ops_map_free_ksyms(st_map);
 	bpf_struct_ops_map_free_image(st_map);
 	bpf_struct_ops_map_put_progs(st_map);
+	if (st_ops->flags & BPF_STRUCT_OPS_F_THIS_PTR)
+		bpf_struct_ops_map_clear_this_ptr(st_map);
 	memset(uvalue, 0, map->value_size);
 	memset(kvalue, 0, map->value_size);
 unlock:
@@ -919,6 +948,8 @@ static long bpf_struct_ops_map_delete_elem(struct bpf_map *map, void *key)
 	switch (prev_state) {
 	case BPF_STRUCT_OPS_STATE_INUSE:
 		st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data, NULL);
+		if (st_map->st_ops_desc->st_ops->flags & BPF_STRUCT_OPS_F_THIS_PTR)
+			bpf_struct_ops_map_clear_this_ptr(st_map);
 		bpf_map_put(map);
 		return 0;
 	case BPF_STRUCT_OPS_STATE_TOBEFREE:
@@ -1194,6 +1225,8 @@ static void bpf_struct_ops_map_link_dealloc(struct bpf_link *link)
 		rcu_dereference_protected(st_link->map, true);
 	if (st_map) {
 		st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data, link);
+		if (st_map->st_ops_desc->st_ops->flags & BPF_STRUCT_OPS_F_THIS_PTR)
+			bpf_struct_ops_map_clear_this_ptr(st_map);
 		bpf_map_put(&st_map->map);
 	}
 	kfree(st_link);
@@ -1268,6 +1301,9 @@ static int bpf_struct_ops_map_link_update(struct bpf_link *link, struct bpf_map
 	if (err)
 		goto err_out;
 
+	if (st_map->st_ops_desc->st_ops->flags & BPF_STRUCT_OPS_F_THIS_PTR)
+		bpf_struct_ops_map_clear_this_ptr(st_map);
+
 	bpf_map_inc(new_map);
 	rcu_assign_pointer(st_link->map, new_map);
 	bpf_map_put(old_map);
@@ -1294,6 +1330,8 @@ static int bpf_struct_ops_map_link_detach(struct bpf_link *link)
 	st_map = container_of(map, struct bpf_struct_ops_map, map);
 
 	st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data, link);
+	if (st_map->st_ops_desc->st_ops->flags & BPF_STRUCT_OPS_F_THIS_PTR)
+		bpf_struct_ops_map_clear_this_ptr(st_map);
 
 	RCU_INIT_POINTER(st_link->map, NULL);
 	/* Pair with bpf_map_get() in bpf_struct_ops_link_create() or
-- 
2.47.1


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH bpf-next v1 2/4] bpf: Allow verifier to fixup kernel module kfuncs
  2025-06-09 23:27 [PATCH bpf-next v1 1/4] bpf: Save struct_ops instance pointer in bpf_prog_aux Amery Hung
@ 2025-06-09 23:27 ` Amery Hung
  2025-06-09 23:27 ` [PATCH bpf-next v1 3/4] selftests/bpf: Test accessing struct_ops this pointer Amery Hung
                   ` (4 subsequent siblings)
  5 siblings, 0 replies; 12+ messages in thread
From: Amery Hung @ 2025-06-09 23:27 UTC (permalink / raw)
  To: bpf
  Cc: alexei.starovoitov, andrii, daniel, tj, martin.lau, ameryhung,
	kernel-team

Allow verifier to fixup kfuncs in kernel module to support kfuncs with
__prog arguments. Currently, special kfuncs and kfuncs with __prog
arguments are kernel kfuncs. As there is no safety reason that prevents
a kernel module kfunc from accessing prog->aux, allow it by removing the
kernel BTF check.

Signed-off-by: Amery Hung <ameryhung@gmail.com>
---
 kernel/bpf/verifier.c | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index f6d3655b3a7a..54adb479315b 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -21467,8 +21467,7 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
 
 	if (!bpf_jit_supports_far_kfunc_call())
 		insn->imm = BPF_CALL_IMM(desc->addr);
-	if (insn->off)
-		return 0;
+
 	if (desc->func_id == special_kfunc_list[KF_bpf_obj_new_impl] ||
 	    desc->func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl]) {
 		struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta;
-- 
2.47.1


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH bpf-next v1 3/4] selftests/bpf: Test accessing struct_ops this pointer
  2025-06-09 23:27 [PATCH bpf-next v1 1/4] bpf: Save struct_ops instance pointer in bpf_prog_aux Amery Hung
  2025-06-09 23:27 ` [PATCH bpf-next v1 2/4] bpf: Allow verifier to fixup kernel module kfuncs Amery Hung
@ 2025-06-09 23:27 ` Amery Hung
  2025-06-09 23:27 ` [PATCH bpf-next v1 4/4] selftests/bpf: Test accessing struct_ops this pointer in timer callback Amery Hung
                   ` (3 subsequent siblings)
  5 siblings, 0 replies; 12+ messages in thread
From: Amery Hung @ 2025-06-09 23:27 UTC (permalink / raw)
  To: bpf
  Cc: alexei.starovoitov, andrii, daniel, tj, martin.lau, ameryhung,
	kernel-team

Test struct_ops this pointer using bpf_testmod_ops3. Add an integer
field, data, to bpf_testmod_ops3 and a kfunc that reads data through
aux->this_st_ops. Check if the kfunc reads the correct value of data.

Signed-off-by: Amery Hung <ameryhung@gmail.com>
---
 .../bpf/prog_tests/test_struct_ops_this_ptr.c | 10 ++++++
 .../progs/struct_ops_private_stack_recur.c    |  3 +-
 .../selftests/bpf/progs/struct_ops_this_ptr.c | 30 ++++++++++++++++
 .../selftests/bpf/test_kmods/bpf_testmod.c    | 36 +++++++++++++++++--
 .../selftests/bpf/test_kmods/bpf_testmod.h    |  1 +
 .../bpf/test_kmods/bpf_testmod_kfunc.h        |  3 ++
 6 files changed, 78 insertions(+), 5 deletions(-)
 create mode 100644 tools/testing/selftests/bpf/prog_tests/test_struct_ops_this_ptr.c
 create mode 100644 tools/testing/selftests/bpf/progs/struct_ops_this_ptr.c

diff --git a/tools/testing/selftests/bpf/prog_tests/test_struct_ops_this_ptr.c b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_this_ptr.c
new file mode 100644
index 000000000000..6ef238a2050a
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_this_ptr.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <test_progs.h>
+
+#include "struct_ops_this_ptr.skel.h"
+
+void serial_test_struct_ops_this_ptr(void)
+{
+	RUN_TESTS(struct_ops_this_ptr);
+}
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_private_stack_recur.c b/tools/testing/selftests/bpf/progs/struct_ops_private_stack_recur.c
index 31e58389bb8b..215b675ddf94 100644
--- a/tools/testing/selftests/bpf/progs/struct_ops_private_stack_recur.c
+++ b/tools/testing/selftests/bpf/progs/struct_ops_private_stack_recur.c
@@ -4,6 +4,7 @@
 #include <bpf/bpf_helpers.h>
 #include <bpf/bpf_tracing.h>
 #include "../test_kmods/bpf_testmod.h"
+#include "../test_kmods/bpf_testmod_kfunc.h"
 
 char _license[] SEC("license") = "GPL";
 
@@ -13,8 +14,6 @@ bool skip __attribute((__section__(".data"))) = false;
 bool skip = true;
 #endif
 
-void bpf_testmod_ops3_call_test_1(void) __ksym;
-
 int val_i, val_j;
 
 __noinline static int subprog2(int *a, int *b)
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_this_ptr.c b/tools/testing/selftests/bpf/progs/struct_ops_this_ptr.c
new file mode 100644
index 000000000000..e5a6463c27ad
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/struct_ops_this_ptr.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include "../test_kmods/bpf_testmod.h"
+#include "../test_kmods/bpf_testmod_kfunc.h"
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+SEC("struct_ops")
+int BPF_PROG(test1)
+{
+	return bpf_kfunc_st_ops_test_this_ptr_impl(NULL);
+}
+
+SEC("syscall")
+__success __retval(1234)
+int syscall_this_ptr(void *ctx)
+{
+	return bpf_testmod_ops3_call_test_1();
+}
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops3 testmod_this_ptr = {
+	.test_1 = (void *)test1,
+	.data = 1234,
+};
+
+
diff --git a/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c b/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c
index 2e54b95ad898..f692ee43d25c 100644
--- a/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c
+++ b/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c
@@ -272,9 +272,9 @@ static void bpf_testmod_test_struct_ops3(void)
 		st_ops3->test_1();
 }
 
-__bpf_kfunc void bpf_testmod_ops3_call_test_1(void)
+__bpf_kfunc int bpf_testmod_ops3_call_test_1(void)
 {
-	st_ops3->test_1();
+	return st_ops3->test_1();
 }
 
 __bpf_kfunc void bpf_testmod_ops3_call_test_2(void)
@@ -1057,6 +1057,23 @@ __bpf_kfunc int bpf_kfunc_st_ops_inc10(struct st_ops_args *args)
 	return args->a;
 }
 
+__bpf_kfunc int bpf_kfunc_st_ops_test_this_ptr_impl(void *aux__prog)
+{
+	struct bpf_prog_aux *aux = (struct bpf_prog_aux *)aux__prog;
+	struct bpf_testmod_ops3 *ops;
+	int data = -1;
+
+	rcu_read_lock();
+	ops = rcu_dereference(aux->this_st_ops);
+	if (!ops)
+		goto out;
+
+	data = ops->data;
+out:
+	rcu_read_unlock();
+	return data;
+}
+
 BTF_KFUNCS_START(bpf_testmod_check_kfunc_ids)
 BTF_ID_FLAGS(func, bpf_testmod_test_mod_kfunc)
 BTF_ID_FLAGS(func, bpf_kfunc_call_test1)
@@ -1097,6 +1114,7 @@ BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_prologue, KF_TRUSTED_ARGS | KF_SLEEPABL
 BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_epilogue, KF_TRUSTED_ARGS | KF_SLEEPABLE)
 BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_pro_epilogue, KF_TRUSTED_ARGS | KF_SLEEPABLE)
 BTF_ID_FLAGS(func, bpf_kfunc_st_ops_inc10, KF_TRUSTED_ARGS)
+BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_this_ptr_impl)
 BTF_KFUNCS_END(bpf_testmod_check_kfunc_ids)
 
 static int bpf_testmod_ops_init(struct btf *btf)
@@ -1269,6 +1287,17 @@ static void test_1_recursion_detected(struct bpf_prog *prog)
 	       u64_stats_read(&stats->misses));
 }
 
+static int st_ops3_init_member(const struct btf_type *t,
+			       const struct btf_member *member,
+			       void *kdata, const void *udata)
+{
+	if (member->offset == offsetof(struct bpf_testmod_ops3, data) * 8) {
+		((struct bpf_testmod_ops3 *)kdata)->data = ((struct bpf_testmod_ops3 *)udata)->data;
+		return 1;
+	}
+	return 0;
+}
+
 static int st_ops3_check_member(const struct btf_type *t,
 				const struct btf_member *member,
 				const struct bpf_prog *prog)
@@ -1289,13 +1318,14 @@ static int st_ops3_check_member(const struct btf_type *t,
 struct bpf_struct_ops bpf_testmod_ops3 = {
 	.verifier_ops = &bpf_testmod_verifier_ops3,
 	.init = bpf_testmod_ops_init,
-	.init_member = bpf_testmod_ops_init_member,
+	.init_member = st_ops3_init_member,
 	.reg = st_ops3_reg,
 	.unreg = st_ops3_unreg,
 	.check_member = st_ops3_check_member,
 	.cfi_stubs = &__bpf_testmod_ops3,
 	.name = "bpf_testmod_ops3",
 	.owner = THIS_MODULE,
+	.flags = BPF_STRUCT_OPS_F_THIS_PTR,
 };
 
 static int bpf_test_mod_st_ops__test_prologue(struct st_ops_args *args)
diff --git a/tools/testing/selftests/bpf/test_kmods/bpf_testmod.h b/tools/testing/selftests/bpf/test_kmods/bpf_testmod.h
index c9fab51f16e2..13581657fe35 100644
--- a/tools/testing/selftests/bpf/test_kmods/bpf_testmod.h
+++ b/tools/testing/selftests/bpf/test_kmods/bpf_testmod.h
@@ -103,6 +103,7 @@ struct bpf_testmod_ops2 {
 struct bpf_testmod_ops3 {
 	int (*test_1)(void);
 	int (*test_2)(void);
+	int data;
 };
 
 struct st_ops_args {
diff --git a/tools/testing/selftests/bpf/test_kmods/bpf_testmod_kfunc.h b/tools/testing/selftests/bpf/test_kmods/bpf_testmod_kfunc.h
index b58817938deb..625b3f4b03f6 100644
--- a/tools/testing/selftests/bpf/test_kmods/bpf_testmod_kfunc.h
+++ b/tools/testing/selftests/bpf/test_kmods/bpf_testmod_kfunc.h
@@ -159,4 +159,7 @@ void bpf_kfunc_trusted_task_test(struct task_struct *ptr) __ksym;
 void bpf_kfunc_trusted_num_test(int *ptr) __ksym;
 void bpf_kfunc_rcu_task_test(struct task_struct *ptr) __ksym;
 
+int bpf_testmod_ops3_call_test_1(void) __ksym;
+int bpf_kfunc_st_ops_test_this_ptr_impl(void *aux__prog) __ksym;
+
 #endif /* _BPF_TESTMOD_KFUNC_H */
-- 
2.47.1


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH bpf-next v1 4/4] selftests/bpf: Test accessing struct_ops this pointer in timer callback
  2025-06-09 23:27 [PATCH bpf-next v1 1/4] bpf: Save struct_ops instance pointer in bpf_prog_aux Amery Hung
  2025-06-09 23:27 ` [PATCH bpf-next v1 2/4] bpf: Allow verifier to fixup kernel module kfuncs Amery Hung
  2025-06-09 23:27 ` [PATCH bpf-next v1 3/4] selftests/bpf: Test accessing struct_ops this pointer Amery Hung
@ 2025-06-09 23:27 ` Amery Hung
       [not found]   ` <8c4943cb40967d152abe032b4208a7cdd89b539da26783afaa61e37eb6663cbf@mail.kernel.org>
  2025-06-10 22:15 ` [PATCH bpf-next v1 1/4] bpf: Save struct_ops instance pointer in bpf_prog_aux Tejun Heo
                   ` (2 subsequent siblings)
  5 siblings, 1 reply; 12+ messages in thread
From: Amery Hung @ 2025-06-09 23:27 UTC (permalink / raw)
  To: bpf
  Cc: alexei.starovoitov, andrii, daniel, tj, martin.lau, ameryhung,
	kernel-team

Check accessing aux->this_st_ops in timer callback. Make sure a kfunc in
a timer callback can get a correct this pointer when a struct_ops map is
attached, and NULL after the map is detached.

Signed-off-by: Amery Hung <ameryhung@gmail.com>
---
 .../bpf/prog_tests/test_struct_ops_this_ptr.c | 51 ++++++++++++++++
 .../bpf/progs/struct_ops_this_ptr_in_timer.c  | 60 +++++++++++++++++++
 .../selftests/bpf/test_kmods/bpf_testmod.c    |  1 +
 3 files changed, 112 insertions(+)
 create mode 100644 tools/testing/selftests/bpf/progs/struct_ops_this_ptr_in_timer.c

diff --git a/tools/testing/selftests/bpf/prog_tests/test_struct_ops_this_ptr.c b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_this_ptr.c
index 6ef238a2050a..933f9310f462 100644
--- a/tools/testing/selftests/bpf/prog_tests/test_struct_ops_this_ptr.c
+++ b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_this_ptr.c
@@ -3,8 +3,59 @@
 #include <test_progs.h>
 
 #include "struct_ops_this_ptr.skel.h"
+#include "struct_ops_this_ptr_in_timer.skel.h"
+
+static void test_struct_ops_this_ptr_in_timer_common(int timer_nsec, int expected_data)
+{
+	struct struct_ops_this_ptr_in_timer *skel;
+	LIBBPF_OPTS(bpf_test_run_opts, topts);
+	struct bpf_link *link;
+	int err, prog_fd;
+
+	skel = struct_ops_this_ptr_in_timer__open_and_load();
+	if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
+		return;
+
+	skel->bss->timer_nsec = timer_nsec;
+
+	link = bpf_map__attach_struct_ops(skel->maps.testmod_this_ptr);
+	if (!ASSERT_OK_PTR(link, "bpf_map__attach_struct_ops"))
+		goto out;
+
+	prog_fd = bpf_program__fd(skel->progs.syscall_this_ptr_in_timer);
+	err = bpf_prog_test_run_opts(prog_fd, &topts);
+	ASSERT_OK(err, "bpf_prog_test_run_opts");
+
+	bpf_link__destroy(link);
+
+	/* Check st_ops3_data after timer_cb runs */
+	while (!READ_ONCE(skel->bss->st_ops3_data))
+		sched_yield();
+	ASSERT_EQ(skel->bss->st_ops3_data, expected_data, "st_ops->data");
+out:
+	struct_ops_this_ptr_in_timer__destroy(skel);
+}
+
+static void test_struct_ops_this_ptr_in_timer(void)
+{
+	/* Run timer callback immediately */
+	test_struct_ops_this_ptr_in_timer_common(0, 1234);
+}
+
+static void test_struct_ops_this_ptr_in_timer_after_detach(void)
+{
+	/*
+	 * Run timer callback 0.1s after test run. By then the struct_ops map
+	 * should have been detached.
+	 */
+	test_struct_ops_this_ptr_in_timer_common(100000000, -1);
+}
 
 void serial_test_struct_ops_this_ptr(void)
 {
 	RUN_TESTS(struct_ops_this_ptr);
+	if (test__start_subtest("struct_ops_this_ptr_in_timer"))
+		test_struct_ops_this_ptr_in_timer();
+	if (test__start_subtest("struct_ops_this_ptr_in_timer_after_detach"))
+		test_struct_ops_this_ptr_in_timer_after_detach();
 }
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_this_ptr_in_timer.c b/tools/testing/selftests/bpf/progs/struct_ops_this_ptr_in_timer.c
new file mode 100644
index 000000000000..e5f76945e967
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/struct_ops_this_ptr_in_timer.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include "../test_kmods/bpf_testmod.h"
+#include "../test_kmods/bpf_testmod_kfunc.h"
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct elem {
+	struct bpf_timer timer;
+};
+
+struct {
+	__uint(type, BPF_MAP_TYPE_ARRAY);
+	__uint(max_entries, 1);
+	__type(key, int);
+	__type(value, struct elem);
+} array_map SEC(".maps");
+
+int st_ops3_data;
+int timer_nsec;
+
+__noinline static int timer_cb(void *map, int *key, struct bpf_timer *timer)
+{
+	st_ops3_data = bpf_kfunc_st_ops_test_this_ptr_impl(NULL);
+	return 0;
+}
+
+SEC("struct_ops")
+int BPF_PROG(test1)
+{
+	struct bpf_timer *timer;
+	int key = 0;
+
+	timer = bpf_map_lookup_elem(&array_map, &key);
+	if (!timer)
+		return 0;
+
+	bpf_timer_init(timer, &array_map, 1);
+	bpf_timer_set_callback(timer, timer_cb);
+	bpf_timer_start(timer, timer_nsec, 0);
+	return 1;
+}
+
+SEC("syscall")
+__success __retval(1)
+int syscall_this_ptr_in_timer(void *ctx)
+{
+	return bpf_testmod_ops3_call_test_1();
+}
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops3 testmod_this_ptr = {
+	.test_1 = (void *)test1,
+	.data = 1234,
+};
+
+
diff --git a/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c b/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c
index f692ee43d25c..65953cd63b7c 100644
--- a/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c
+++ b/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c
@@ -1157,6 +1157,7 @@ static const struct bpf_verifier_ops bpf_testmod_verifier_ops = {
 };
 
 static const struct bpf_verifier_ops bpf_testmod_verifier_ops3 = {
+	.get_func_proto	 = bpf_base_func_proto,
 	.is_valid_access = bpf_testmod_ops_is_valid_access,
 };
 
-- 
2.47.1


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* Re: [PATCH bpf-next v1 4/4] selftests/bpf: Test accessing struct_ops this pointer in timer callback
       [not found]   ` <8c4943cb40967d152abe032b4208a7cdd89b539da26783afaa61e37eb6663cbf@mail.kernel.org>
@ 2025-06-10 16:45     ` Amery Hung
  0 siblings, 0 replies; 12+ messages in thread
From: Amery Hung @ 2025-06-10 16:45 UTC (permalink / raw)
  To: bot+bpf-ci; +Cc: kernel-ci, andrii, daniel, martin.lau, bpf

On Mon, Jun 9, 2025 at 5:42 PM <bot+bpf-ci@kernel.org> wrote:
>
> Dear patch submitter,
>
> CI has tested the following submission:
> Status:     FAILURE
> Name:       [bpf-next,v1,4/4] selftests/bpf: Test accessing struct_ops this pointer in timer callback
> Patchwork:  https://patchwork.kernel.org/project/netdevbpf/list/?series=970053&state=*
> Matrix:     https://github.com/kernel-patches/bpf/actions/runs/15547147788
>
> Failed jobs:
> test_progs-aarch64-gcc-14: https://github.com/kernel-patches/bpf/actions/runs/15547147788/job/43771213307
> test_progs_cpuv4-aarch64-gcc-14: https://github.com/kernel-patches/bpf/actions/runs/15547147788/job/43771213280
> test_progs_no_alu32-aarch64-gcc-14: https://github.com/kernel-patches/bpf/actions/runs/15547147788/job/43771213268
> test_progs-s390x-gcc-14: https://github.com/kernel-patches/bpf/actions/runs/15547147788/job/43771045529
> test_progs_cpuv4-s390x-gcc-14: https://github.com/kernel-patches/bpf/actions/runs/15547147788/job/43771045522
> test_progs_no_alu32-s390x-gcc-14: https://github.com/kernel-patches/bpf/actions/runs/15547147788/job/43771045519
>
> First test_progs failure (test_progs-aarch64-gcc-14):
> #409 struct_ops_this_ptr
> tester_init:PASS:tester_log_buf 0 nsec
> process_subtest:PASS:obj_open_mem 0 nsec
> process_subtest:PASS:specs_alloc 0 nsec
> #409/1 struct_ops_this_ptr/test1
> run_subtest:PASS:obj_open_mem 0 nsec
> libbpf: prog 'test1': BPF program load failed: -EACCES
> libbpf: prog 'test1': failed to load: -EACCES
> libbpf: failed to load object 'struct_ops_this_ptr'
> run_subtest:FAIL:unexpected_load_failure unexpected error: -13 (errno 13)
> VERIFIER LOG:
> =============
> Private stack not supported by jit

The selftests failed to pass CI since bpf_testmod_ops3 is using
private stack. I will change selftests to be based on other struct_ops
in bpf_testmod.c

> processed 0 insns (limit 1000000) max_states_per_insn 0 total_states 0 peak_states 0 mark_read 0
> =============
> #409/2 struct_ops_this_ptr/syscall_this_ptr
> run_subtest:PASS:obj_open_mem 0 nsec
> libbpf: prog 'test1': BPF program load failed: -EACCES
> libbpf: prog 'test1': -- BEGIN PROG LOAD LOG --
> Private stack not supported by jit
> processed 0 insns (limit 1000000) max_states_per_insn 0 total_states 0 peak_states 0 mark_read 0
> -- END PROG LOAD LOG --
> libbpf: prog 'test1': failed to load: -EACCES
> libbpf: failed to load object 'struct_ops_this_ptr'
> run_subtest:FAIL:unexpected_load_failure unexpected error: -13 (errno 13)
> VERIFIER LOG:
> =============
> =============
> #409/3 struct_ops_this_ptr/struct_ops_this_ptr_in_timer
> libbpf: prog 'test1': BPF program load failed: -EACCES
> libbpf: prog 'test1': -- BEGIN PROG LOAD LOG --
> Private stack not supported by jit
> processed 0 insns (limit 1000000) max_states_per_insn 0 total_states 0 peak_states 0 mark_read 0
> -- END PROG LOAD LOG --
> libbpf: prog 'test1': failed to load: -EACCES
> libbpf: failed to load object 'struct_ops_this_ptr_in_timer'
> libbpf: failed to load BPF skeleton 'struct_ops_this_ptr_in_timer': -EACCES
> test_struct_ops_this_ptr_in_timer_common:FAIL:skel_open_and_load unexpected error: -13
> #409/4 struct_ops_this_ptr/struct_ops_this_ptr_in_timer_after_detach
> libbpf: prog 'test1': BPF program load failed: -EACCES
> libbpf: prog 'test1': -- BEGIN PROG LOAD LOG --
> Private stack not supported by jit
> processed 0 insns (limit 1000000) max_states_per_insn 0 total_states 0 peak_states 0 mark_read 0
> -- END PROG LOAD LOG --
> libbpf: prog 'test1': failed to load: -EACCES
> libbpf: failed to load object 'struct_ops_this_ptr_in_timer'
> libbpf: failed to load BPF skeleton 'struct_ops_this_ptr_in_timer': -EACCES
> test_struct_ops_this_ptr_in_timer_common:FAIL:skel_open_and_load unexpected error: -13
>
>
> Please note: this email is coming from an unmonitored mailbox. If you have
> questions or feedback, please reach out to the Meta Kernel CI team at
> kernel-ci@meta.com.

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH bpf-next v1 1/4] bpf: Save struct_ops instance pointer in bpf_prog_aux
  2025-06-09 23:27 [PATCH bpf-next v1 1/4] bpf: Save struct_ops instance pointer in bpf_prog_aux Amery Hung
                   ` (2 preceding siblings ...)
  2025-06-09 23:27 ` [PATCH bpf-next v1 4/4] selftests/bpf: Test accessing struct_ops this pointer in timer callback Amery Hung
@ 2025-06-10 22:15 ` Tejun Heo
  2025-06-11  7:16 ` kernel test robot
  2025-06-12 23:08 ` Andrii Nakryiko
  5 siblings, 0 replies; 12+ messages in thread
From: Tejun Heo @ 2025-06-10 22:15 UTC (permalink / raw)
  To: Amery Hung
  Cc: bpf, alexei.starovoitov, andrii, daniel, martin.lau, kernel-team

On Mon, Jun 09, 2025 at 04:27:43PM -0700, Amery Hung wrote:
> Allows struct_ops implementors to infer the calling struct_ops instance
> inside a kfunc through prog->aux->this_st_ops. A new field, flags, is
> added to bpf_struct_ops. If BPF_STRUCT_OPS_F_THIS_PTR is set in flags,
> a pointer to the struct_ops structure registered to the kernel (i.e.,
> kvalue->data) will be saved to prog->aux->this_st_ops. To access it in
> a kfunc, use BPF_STRUCT_OPS_F_THIS_PTR with __prog argument [0]. The
> verifier will fixup the argument with a pointer to prog->aux. this_st_ops
> is protected by rcu and is valid until a struct_ops map is unregistered
> updated.
> 
> For a struct_ops map with BPF_STRUCT_OPS_F_THIS_PTR, to make sure all
> programs in it have the same this_st_ops, cmpxchg is used. Only if a
> program is not already used in another struct_ops map also with
> BPF_STRUCT_OPS_F_THIS_PTR can it be assigned to the current struct_ops
> map.
> 
> [0]
> commit bc049387b41f ("bpf: Add support for __prog argument suffix to
> pass in prog->aux")
> https://lore.kernel.org/r/20250513142812.1021591-1-memxor@gmail.com
> 
> Signed-off-by: Amery Hung <ameryhung@gmail.com>

FWIW, this looks great from sched_ext POV.

Thanks.

-- 
tejun

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH bpf-next v1 1/4] bpf: Save struct_ops instance pointer in bpf_prog_aux
  2025-06-09 23:27 [PATCH bpf-next v1 1/4] bpf: Save struct_ops instance pointer in bpf_prog_aux Amery Hung
                   ` (3 preceding siblings ...)
  2025-06-10 22:15 ` [PATCH bpf-next v1 1/4] bpf: Save struct_ops instance pointer in bpf_prog_aux Tejun Heo
@ 2025-06-11  7:16 ` kernel test robot
  2025-06-12 23:08 ` Andrii Nakryiko
  5 siblings, 0 replies; 12+ messages in thread
From: kernel test robot @ 2025-06-11  7:16 UTC (permalink / raw)
  To: Amery Hung, bpf
  Cc: oe-kbuild-all, alexei.starovoitov, andrii, daniel, tj, martin.lau,
	ameryhung, kernel-team

Hi Amery,

kernel test robot noticed the following build warnings:

[auto build test WARNING on bpf-next/master]

url:    https://github.com/intel-lab-lkp/linux/commits/Amery-Hung/bpf-Allow-verifier-to-fixup-kernel-module-kfuncs/20250610-072957
base:   https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git master
patch link:    https://lore.kernel.org/r/20250609232746.1030044-1-ameryhung%40gmail.com
patch subject: [PATCH bpf-next v1 1/4] bpf: Save struct_ops instance pointer in bpf_prog_aux
config: x86_64-randconfig-121-20250611 (https://download.01.org/0day-ci/archive/20250611/202506111427.2VNV9Biy-lkp@intel.com/config)
compiler: gcc-12 (Debian 12.2.0-14) 12.2.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20250611/202506111427.2VNV9Biy-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202506111427.2VNV9Biy-lkp@intel.com/

sparse warnings: (new ones prefixed by >>)
>> kernel/bpf/bpf_struct_ops.c:824:29: sparse: sparse: incorrect type in initializer (different address spaces) @@     expected void [noderef] __rcu *__new @@     got void *[assigned] kdata @@
   kernel/bpf/bpf_struct_ops.c:824:29: sparse:     expected void [noderef] __rcu *__new
   kernel/bpf/bpf_struct_ops.c:824:29: sparse:     got void *[assigned] kdata

vim +824 kernel/bpf/bpf_struct_ops.c

   686	
   687	static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
   688						   void *value, u64 flags)
   689	{
   690		struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
   691		const struct bpf_struct_ops_desc *st_ops_desc = st_map->st_ops_desc;
   692		const struct bpf_struct_ops *st_ops = st_ops_desc->st_ops;
   693		struct bpf_struct_ops_value *uvalue, *kvalue;
   694		const struct btf_type *module_type;
   695		const struct btf_member *member;
   696		const struct btf_type *t = st_ops_desc->type;
   697		struct bpf_tramp_links *tlinks;
   698		void *udata, *kdata;
   699		int prog_fd, err;
   700		u32 i, trampoline_start, image_off = 0;
   701		void *cur_image = NULL, *image = NULL;
   702		struct bpf_link **plink;
   703		struct bpf_ksym **pksym;
   704		const char *tname, *mname;
   705	
   706		if (flags)
   707			return -EINVAL;
   708	
   709		if (st_ops->flags & ~BPF_STRUCT_OPS_FLAG_MASK)
   710			return -EINVAL;
   711	
   712		if (*(u32 *)key != 0)
   713			return -E2BIG;
   714	
   715		err = check_zero_holes(st_map->btf, st_ops_desc->value_type, value);
   716		if (err)
   717			return err;
   718	
   719		uvalue = value;
   720		err = check_zero_holes(st_map->btf, t, uvalue->data);
   721		if (err)
   722			return err;
   723	
   724		if (uvalue->common.state || refcount_read(&uvalue->common.refcnt))
   725			return -EINVAL;
   726	
   727		tlinks = kcalloc(BPF_TRAMP_MAX, sizeof(*tlinks), GFP_KERNEL);
   728		if (!tlinks)
   729			return -ENOMEM;
   730	
   731		uvalue = (struct bpf_struct_ops_value *)st_map->uvalue;
   732		kvalue = (struct bpf_struct_ops_value *)&st_map->kvalue;
   733	
   734		mutex_lock(&st_map->lock);
   735	
   736		if (kvalue->common.state != BPF_STRUCT_OPS_STATE_INIT) {
   737			err = -EBUSY;
   738			goto unlock;
   739		}
   740	
   741		memcpy(uvalue, value, map->value_size);
   742	
   743		udata = &uvalue->data;
   744		kdata = &kvalue->data;
   745	
   746		plink = st_map->links;
   747		pksym = st_map->ksyms;
   748		tname = btf_name_by_offset(st_map->btf, t->name_off);
   749		module_type = btf_type_by_id(btf_vmlinux, st_ops_ids[IDX_MODULE_ID]);
   750		for_each_member(i, t, member) {
   751			const struct btf_type *mtype, *ptype;
   752			struct bpf_prog *prog;
   753			struct bpf_tramp_link *link;
   754			struct bpf_ksym *ksym;
   755			u32 moff;
   756	
   757			moff = __btf_member_bit_offset(t, member) / 8;
   758			mname = btf_name_by_offset(st_map->btf, member->name_off);
   759			ptype = btf_type_resolve_ptr(st_map->btf, member->type, NULL);
   760			if (ptype == module_type) {
   761				if (*(void **)(udata + moff))
   762					goto reset_unlock;
   763				*(void **)(kdata + moff) = BPF_MODULE_OWNER;
   764				continue;
   765			}
   766	
   767			err = st_ops->init_member(t, member, kdata, udata);
   768			if (err < 0)
   769				goto reset_unlock;
   770	
   771			/* The ->init_member() has handled this member */
   772			if (err > 0)
   773				continue;
   774	
   775			/* If st_ops->init_member does not handle it,
   776			 * we will only handle func ptrs and zero-ed members
   777			 * here.  Reject everything else.
   778			 */
   779	
   780			/* All non func ptr member must be 0 */
   781			if (!ptype || !btf_type_is_func_proto(ptype)) {
   782				u32 msize;
   783	
   784				mtype = btf_type_by_id(st_map->btf, member->type);
   785				mtype = btf_resolve_size(st_map->btf, mtype, &msize);
   786				if (IS_ERR(mtype)) {
   787					err = PTR_ERR(mtype);
   788					goto reset_unlock;
   789				}
   790	
   791				if (memchr_inv(udata + moff, 0, msize)) {
   792					err = -EINVAL;
   793					goto reset_unlock;
   794				}
   795	
   796				continue;
   797			}
   798	
   799			prog_fd = (int)(*(unsigned long *)(udata + moff));
   800			/* Similar check as the attr->attach_prog_fd */
   801			if (!prog_fd)
   802				continue;
   803	
   804			prog = bpf_prog_get(prog_fd);
   805			if (IS_ERR(prog)) {
   806				err = PTR_ERR(prog);
   807				goto reset_unlock;
   808			}
   809	
   810			if (prog->type != BPF_PROG_TYPE_STRUCT_OPS ||
   811			    prog->aux->attach_btf_id != st_ops_desc->type_id ||
   812			    prog->expected_attach_type != i) {
   813				bpf_prog_put(prog);
   814				err = -EINVAL;
   815				goto reset_unlock;
   816			}
   817	
   818			if (st_ops->flags & BPF_STRUCT_OPS_F_THIS_PTR) {
   819				/* Make sure a struct_ops map will not have programs with
   820				 * different this_st_ops. Once a program is associated with
   821				 * a struct_ops map, it cannot be used in another struct_ops
   822				 * map also with BPF_STRUCT_OPS_F_THIS_PTR
   823				 */
 > 824				if (cmpxchg(&prog->aux->this_st_ops, NULL, kdata)) {
   825					bpf_prog_put(prog);
   826					err = -EINVAL;
   827					goto reset_unlock;
   828				}
   829			}
   830	
   831			link = kzalloc(sizeof(*link), GFP_USER);
   832			if (!link) {
   833				bpf_prog_put(prog);
   834				err = -ENOMEM;
   835				goto reset_unlock;
   836			}
   837			bpf_link_init(&link->link, BPF_LINK_TYPE_STRUCT_OPS,
   838				      &bpf_struct_ops_link_lops, prog);
   839			*plink++ = &link->link;
   840	
   841			ksym = kzalloc(sizeof(*ksym), GFP_USER);
   842			if (!ksym) {
   843				err = -ENOMEM;
   844				goto reset_unlock;
   845			}
   846			*pksym++ = ksym;
   847	
   848			trampoline_start = image_off;
   849			err = bpf_struct_ops_prepare_trampoline(tlinks, link,
   850							&st_ops->func_models[i],
   851							*(void **)(st_ops->cfi_stubs + moff),
   852							&image, &image_off,
   853							st_map->image_pages_cnt < MAX_TRAMP_IMAGE_PAGES);
   854			if (err)
   855				goto reset_unlock;
   856	
   857			if (cur_image != image) {
   858				st_map->image_pages[st_map->image_pages_cnt++] = image;
   859				cur_image = image;
   860				trampoline_start = 0;
   861			}
   862	
   863			*(void **)(kdata + moff) = image + trampoline_start + cfi_get_offset();
   864	
   865			/* put prog_id to udata */
   866			*(unsigned long *)(udata + moff) = prog->aux->id;
   867	
   868			/* init ksym for this trampoline */
   869			bpf_struct_ops_ksym_init(tname, mname,
   870						 image + trampoline_start,
   871						 image_off - trampoline_start,
   872						 ksym);
   873		}
   874	
   875		if (st_ops->validate) {
   876			err = st_ops->validate(kdata);
   877			if (err)
   878				goto reset_unlock;
   879		}
   880		for (i = 0; i < st_map->image_pages_cnt; i++) {
   881			err = arch_protect_bpf_trampoline(st_map->image_pages[i],
   882							  PAGE_SIZE);
   883			if (err)
   884				goto reset_unlock;
   885		}
   886	
   887		if (st_map->map.map_flags & BPF_F_LINK) {
   888			err = 0;
   889			/* Let bpf_link handle registration & unregistration.
   890			 *
   891			 * Pair with smp_load_acquire() during lookup_elem().
   892			 */
   893			smp_store_release(&kvalue->common.state, BPF_STRUCT_OPS_STATE_READY);
   894			goto unlock;
   895		}
   896	
   897		err = st_ops->reg(kdata, NULL);
   898		if (likely(!err)) {
   899			/* This refcnt increment on the map here after
   900			 * 'st_ops->reg()' is secure since the state of the
   901			 * map must be set to INIT at this moment, and thus
   902			 * bpf_struct_ops_map_delete_elem() can't unregister
   903			 * or transition it to TOBEFREE concurrently.
   904			 */
   905			bpf_map_inc(map);
   906			/* Pair with smp_load_acquire() during lookup_elem().
   907			 * It ensures the above udata updates (e.g. prog->aux->id)
   908			 * can be seen once BPF_STRUCT_OPS_STATE_INUSE is set.
   909			 */
   910			smp_store_release(&kvalue->common.state, BPF_STRUCT_OPS_STATE_INUSE);
   911			goto unlock;
   912		}
   913	
   914		/* Error during st_ops->reg(). Can happen if this struct_ops needs to be
   915		 * verified as a whole, after all init_member() calls. Can also happen if
   916		 * there was a race in registering the struct_ops (under the same name) to
   917		 * a sub-system through different struct_ops's maps.
   918		 */
   919	
   920	reset_unlock:
   921		bpf_struct_ops_map_free_ksyms(st_map);
   922		bpf_struct_ops_map_free_image(st_map);
   923		bpf_struct_ops_map_put_progs(st_map);
   924		if (st_ops->flags & BPF_STRUCT_OPS_F_THIS_PTR)
   925			bpf_struct_ops_map_clear_this_ptr(st_map);
   926		memset(uvalue, 0, map->value_size);
   927		memset(kvalue, 0, map->value_size);
   928	unlock:
   929		kfree(tlinks);
   930		mutex_unlock(&st_map->lock);
   931		if (!err)
   932			bpf_struct_ops_map_add_ksyms(st_map);
   933		return err;
   934	}
   935	

-- 
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH bpf-next v1 1/4] bpf: Save struct_ops instance pointer in bpf_prog_aux
  2025-06-09 23:27 [PATCH bpf-next v1 1/4] bpf: Save struct_ops instance pointer in bpf_prog_aux Amery Hung
                   ` (4 preceding siblings ...)
  2025-06-11  7:16 ` kernel test robot
@ 2025-06-12 23:08 ` Andrii Nakryiko
  2025-06-18 22:18   ` Amery Hung
  5 siblings, 1 reply; 12+ messages in thread
From: Andrii Nakryiko @ 2025-06-12 23:08 UTC (permalink / raw)
  To: Amery Hung
  Cc: bpf, alexei.starovoitov, andrii, daniel, tj, martin.lau,
	kernel-team

On Mon, Jun 9, 2025 at 4:27 PM Amery Hung <ameryhung@gmail.com> wrote:
>
> Allows struct_ops implementors to infer the calling struct_ops instance
> inside a kfunc through prog->aux->this_st_ops. A new field, flags, is
> added to bpf_struct_ops. If BPF_STRUCT_OPS_F_THIS_PTR is set in flags,
> a pointer to the struct_ops structure registered to the kernel (i.e.,
> kvalue->data) will be saved to prog->aux->this_st_ops. To access it in
> a kfunc, use BPF_STRUCT_OPS_F_THIS_PTR with __prog argument [0]. The
> verifier will fixup the argument with a pointer to prog->aux. this_st_ops
> is protected by rcu and is valid until a struct_ops map is unregistered
> updated.
>
> For a struct_ops map with BPF_STRUCT_OPS_F_THIS_PTR, to make sure all
> programs in it have the same this_st_ops, cmpxchg is used. Only if a
> program is not already used in another struct_ops map also with
> BPF_STRUCT_OPS_F_THIS_PTR can it be assigned to the current struct_ops
> map.
>

Have you considered an alternative to storing this_st_ops in
bpf_prog_aux by setting it at runtime (in struct_ops trampoline) into
bpf_run_ctx (which I think all struct ops programs have set), and then
letting any struct_ops kfunc just access it through current (see other
uses of bpf_run_ctx, if you are unfamiliar). This would avoid all this
business with extra flags and passing bpf_prog_aux as an extra
argument.

There will be no "only one struct_ops for this BPF program" limitation
either: technically, you could have the same BPF program used from two
struct_ops maps just fine (even at the same time). Then depending on
which struct_ops is currently active, you'd have a corresponding
bpf_run_ctx's struct_ops pointer. It feels like a cleaner approach to
me.

And in the trampoline itself it would be a hard-coded single word
assignment on the stack, so should be basically a no-op from
performance point of view.

> [0]
> commit bc049387b41f ("bpf: Add support for __prog argument suffix to
> pass in prog->aux")
> https://lore.kernel.org/r/20250513142812.1021591-1-memxor@gmail.com
>
> Signed-off-by: Amery Hung <ameryhung@gmail.com>
> ---
>  include/linux/bpf.h         | 10 ++++++++++
>  kernel/bpf/bpf_struct_ops.c | 38 +++++++++++++++++++++++++++++++++++++
>  2 files changed, 48 insertions(+)
>

[...]

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH bpf-next v1 1/4] bpf: Save struct_ops instance pointer in bpf_prog_aux
  2025-06-12 23:08 ` Andrii Nakryiko
@ 2025-06-18 22:18   ` Amery Hung
  2025-06-24 15:38     ` Andrii Nakryiko
  0 siblings, 1 reply; 12+ messages in thread
From: Amery Hung @ 2025-06-18 22:18 UTC (permalink / raw)
  To: Andrii Nakryiko
  Cc: bpf, alexei.starovoitov, andrii, daniel, tj, martin.lau,
	kernel-team

On Thu, Jun 12, 2025 at 4:08 PM Andrii Nakryiko
<andrii.nakryiko@gmail.com> wrote:
>
> On Mon, Jun 9, 2025 at 4:27 PM Amery Hung <ameryhung@gmail.com> wrote:
> >
> > Allows struct_ops implementors to infer the calling struct_ops instance
> > inside a kfunc through prog->aux->this_st_ops. A new field, flags, is
> > added to bpf_struct_ops. If BPF_STRUCT_OPS_F_THIS_PTR is set in flags,
> > a pointer to the struct_ops structure registered to the kernel (i.e.,
> > kvalue->data) will be saved to prog->aux->this_st_ops. To access it in
> > a kfunc, use BPF_STRUCT_OPS_F_THIS_PTR with __prog argument [0]. The
> > verifier will fixup the argument with a pointer to prog->aux. this_st_ops
> > is protected by rcu and is valid until a struct_ops map is unregistered
> > updated.
> >
> > For a struct_ops map with BPF_STRUCT_OPS_F_THIS_PTR, to make sure all
> > programs in it have the same this_st_ops, cmpxchg is used. Only if a
> > program is not already used in another struct_ops map also with
> > BPF_STRUCT_OPS_F_THIS_PTR can it be assigned to the current struct_ops
> > map.
> >
>
> Have you considered an alternative to storing this_st_ops in
> bpf_prog_aux by setting it at runtime (in struct_ops trampoline) into
> bpf_run_ctx (which I think all struct ops programs have set), and then
> letting any struct_ops kfunc just access it through current (see other
> uses of bpf_run_ctx, if you are unfamiliar). This would avoid all this
> business with extra flags and passing bpf_prog_aux as an extra
> argument.
>

I didn't know this. Thanks for suggesting an alternative!

> There will be no "only one struct_ops for this BPF program" limitation
> either: technically, you could have the same BPF program used from two
> struct_ops maps just fine (even at the same time). Then depending on
> which struct_ops is currently active, you'd have a corresponding
> bpf_run_ctx's struct_ops pointer. It feels like a cleaner approach to
> me.
>

This is a cleaner approach for struct_ops operators. To make it work
for kfuncs called in timer callback, I think prog->aux->st_ops is
still needed, but at least we can unify how to get this_st_ops in
kfunc, in a way that does not requires adding __prog to every kfuncs.

+enum bpf_run_ctx_type {
+        BPF_CG_RUN_CTX = 0,
+        BPF_TRACE_RUN_CTX,
+        BPF_TRAMP_RUN_CTX,
+        BPF_TIMER_RUN_CTX,
+};

struct bpf_run_ctx {
+        enum bpf_run_ctx_type type;
};

+struct bpf_timer_run_ctx {
+        struct bpf_prog_aux *aux;
+};

struct bpf_tramp_run_ctx {
        ...
+        void *st_ops;
};

In bpf_struct_ops_prepare_trampoline(), the st_ops assignment will be
emitted to the trampoline.

In bpf_timer_cb(), prepare bpf_timer_run_ctx, where st_ops comes from
prog->aux->this_st_ops and set current->bpf_ctx.

Finally, in kfuncs that want to know the current struct_ops, call this
new function below:

+void *bpf_struct_ops_current_st_ops(void)
+{
+        struct bpf_prog_aux aux;
+
+        if (!current->bpf_ctx)
+                return NULL;
+
+        switch(current->bpf_ctx->type) {
+        case BPF_TRAMP_RUN_CTX:
+                return (struct bpf_tramp_run_ctx *)(current->bpf_ctx)->st_ops;
+        case BPF_TIMER_RUN_CTX:
+                aux = (struct bpf_timer_run_ctx *)(current->bpf_ctx)->aux;
+                return rcu_dereference(aux->this_st_ops);
+        }
+        return NULL;
+}

What do you think?

> And in the trampoline itself it would be a hard-coded single word
> assignment on the stack, so should be basically a no-op from
> performance point of view.
>
> > [0]
> > commit bc049387b41f ("bpf: Add support for __prog argument suffix to
> > pass in prog->aux")
> > https://lore.kernel.org/r/20250513142812.1021591-1-memxor@gmail.com
> >
> > Signed-off-by: Amery Hung <ameryhung@gmail.com>
> > ---
> >  include/linux/bpf.h         | 10 ++++++++++
> >  kernel/bpf/bpf_struct_ops.c | 38 +++++++++++++++++++++++++++++++++++++
> >  2 files changed, 48 insertions(+)
> >
>
> [...]

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH bpf-next v1 1/4] bpf: Save struct_ops instance pointer in bpf_prog_aux
  2025-06-18 22:18   ` Amery Hung
@ 2025-06-24 15:38     ` Andrii Nakryiko
  2025-06-24 15:59       ` Alexei Starovoitov
  0 siblings, 1 reply; 12+ messages in thread
From: Andrii Nakryiko @ 2025-06-24 15:38 UTC (permalink / raw)
  To: Amery Hung
  Cc: bpf, alexei.starovoitov, andrii, daniel, tj, martin.lau,
	kernel-team

On Wed, Jun 18, 2025 at 3:19 PM Amery Hung <ameryhung@gmail.com> wrote:
>
> On Thu, Jun 12, 2025 at 4:08 PM Andrii Nakryiko
> <andrii.nakryiko@gmail.com> wrote:
> >
> > On Mon, Jun 9, 2025 at 4:27 PM Amery Hung <ameryhung@gmail.com> wrote:
> > >
> > > Allows struct_ops implementors to infer the calling struct_ops instance
> > > inside a kfunc through prog->aux->this_st_ops. A new field, flags, is
> > > added to bpf_struct_ops. If BPF_STRUCT_OPS_F_THIS_PTR is set in flags,
> > > a pointer to the struct_ops structure registered to the kernel (i.e.,
> > > kvalue->data) will be saved to prog->aux->this_st_ops. To access it in
> > > a kfunc, use BPF_STRUCT_OPS_F_THIS_PTR with __prog argument [0]. The
> > > verifier will fixup the argument with a pointer to prog->aux. this_st_ops
> > > is protected by rcu and is valid until a struct_ops map is unregistered
> > > updated.
> > >
> > > For a struct_ops map with BPF_STRUCT_OPS_F_THIS_PTR, to make sure all
> > > programs in it have the same this_st_ops, cmpxchg is used. Only if a
> > > program is not already used in another struct_ops map also with
> > > BPF_STRUCT_OPS_F_THIS_PTR can it be assigned to the current struct_ops
> > > map.
> > >
> >
> > Have you considered an alternative to storing this_st_ops in
> > bpf_prog_aux by setting it at runtime (in struct_ops trampoline) into
> > bpf_run_ctx (which I think all struct ops programs have set), and then
> > letting any struct_ops kfunc just access it through current (see other
> > uses of bpf_run_ctx, if you are unfamiliar). This would avoid all this
> > business with extra flags and passing bpf_prog_aux as an extra
> > argument.
> >
>
> I didn't know this. Thanks for suggesting an alternative!
>
> > There will be no "only one struct_ops for this BPF program" limitation
> > either: technically, you could have the same BPF program used from two
> > struct_ops maps just fine (even at the same time). Then depending on
> > which struct_ops is currently active, you'd have a corresponding
> > bpf_run_ctx's struct_ops pointer. It feels like a cleaner approach to
> > me.
> >
>
> This is a cleaner approach for struct_ops operators. To make it work
> for kfuncs called in timer callback, I think prog->aux->st_ops is
> still needed, but at least we can unify how to get this_st_ops in
> kfunc, in a way that does not requires adding __prog to every kfuncs.
>
> +enum bpf_run_ctx_type {
> +        BPF_CG_RUN_CTX = 0,
> +        BPF_TRACE_RUN_CTX,
> +        BPF_TRAMP_RUN_CTX,
> +        BPF_TIMER_RUN_CTX,
> +};
>
> struct bpf_run_ctx {
> +        enum bpf_run_ctx_type type;
> };
>
> +struct bpf_timer_run_ctx {
> +        struct bpf_prog_aux *aux;
> +};
>
> struct bpf_tramp_run_ctx {
>         ...
> +        void *st_ops;
> };
>
> In bpf_struct_ops_prepare_trampoline(), the st_ops assignment will be
> emitted to the trampoline.
>
> In bpf_timer_cb(), prepare bpf_timer_run_ctx, where st_ops comes from
> prog->aux->this_st_ops and set current->bpf_ctx.
>
> Finally, in kfuncs that want to know the current struct_ops, call this
> new function below:
>
> +void *bpf_struct_ops_current_st_ops(void)
> +{
> +        struct bpf_prog_aux aux;
> +
> +        if (!current->bpf_ctx)
> +                return NULL;
> +
> +        switch(current->bpf_ctx->type) {
> +        case BPF_TRAMP_RUN_CTX:
> +                return (struct bpf_tramp_run_ctx *)(current->bpf_ctx)->st_ops;
> +        case BPF_TIMER_RUN_CTX:
> +                aux = (struct bpf_timer_run_ctx *)(current->bpf_ctx)->aux;
> +                return rcu_dereference(aux->this_st_ops);
> +        }
> +        return NULL;
> +}
>
> What do you think?

I'm not sure I particularly like different ways to get this st_ops
pointer depending whether we are in an async callback or not. So if
bpf_prog_aux is inevitable, I'd just stick to that. As far as
bpf_run_ctx, though, instead of bpf_run_ctx_type, shall we just put
`struct bpf_prog_aux *` pointer into common struct bpf_run_ctx and
always set it for all program types that support run_ctx?

>
> > And in the trampoline itself it would be a hard-coded single word
> > assignment on the stack, so should be basically a no-op from
> > performance point of view.
> >
> > > [0]
> > > commit bc049387b41f ("bpf: Add support for __prog argument suffix to
> > > pass in prog->aux")
> > > https://lore.kernel.org/r/20250513142812.1021591-1-memxor@gmail.com
> > >
> > > Signed-off-by: Amery Hung <ameryhung@gmail.com>
> > > ---
> > >  include/linux/bpf.h         | 10 ++++++++++
> > >  kernel/bpf/bpf_struct_ops.c | 38 +++++++++++++++++++++++++++++++++++++
> > >  2 files changed, 48 insertions(+)
> > >
> >
> > [...]

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH bpf-next v1 1/4] bpf: Save struct_ops instance pointer in bpf_prog_aux
  2025-06-24 15:38     ` Andrii Nakryiko
@ 2025-06-24 15:59       ` Alexei Starovoitov
  2025-06-24 19:41         ` Andrii Nakryiko
  0 siblings, 1 reply; 12+ messages in thread
From: Alexei Starovoitov @ 2025-06-24 15:59 UTC (permalink / raw)
  To: Andrii Nakryiko
  Cc: Amery Hung, bpf, Andrii Nakryiko, Daniel Borkmann, Tejun Heo,
	Martin KaFai Lau, Kernel Team

On Tue, Jun 24, 2025 at 8:38 AM Andrii Nakryiko
<andrii.nakryiko@gmail.com> wrote:
>
> On Wed, Jun 18, 2025 at 3:19 PM Amery Hung <ameryhung@gmail.com> wrote:
> >
> > On Thu, Jun 12, 2025 at 4:08 PM Andrii Nakryiko
> > <andrii.nakryiko@gmail.com> wrote:
> > >
> > > On Mon, Jun 9, 2025 at 4:27 PM Amery Hung <ameryhung@gmail.com> wrote:
> > > >
> > > > Allows struct_ops implementors to infer the calling struct_ops instance
> > > > inside a kfunc through prog->aux->this_st_ops. A new field, flags, is
> > > > added to bpf_struct_ops. If BPF_STRUCT_OPS_F_THIS_PTR is set in flags,
> > > > a pointer to the struct_ops structure registered to the kernel (i.e.,
> > > > kvalue->data) will be saved to prog->aux->this_st_ops. To access it in
> > > > a kfunc, use BPF_STRUCT_OPS_F_THIS_PTR with __prog argument [0]. The
> > > > verifier will fixup the argument with a pointer to prog->aux. this_st_ops
> > > > is protected by rcu and is valid until a struct_ops map is unregistered
> > > > updated.
> > > >
> > > > For a struct_ops map with BPF_STRUCT_OPS_F_THIS_PTR, to make sure all
> > > > programs in it have the same this_st_ops, cmpxchg is used. Only if a
> > > > program is not already used in another struct_ops map also with
> > > > BPF_STRUCT_OPS_F_THIS_PTR can it be assigned to the current struct_ops
> > > > map.
> > > >
> > >
> > > Have you considered an alternative to storing this_st_ops in
> > > bpf_prog_aux by setting it at runtime (in struct_ops trampoline) into
> > > bpf_run_ctx (which I think all struct ops programs have set), and then
> > > letting any struct_ops kfunc just access it through current (see other
> > > uses of bpf_run_ctx, if you are unfamiliar). This would avoid all this
> > > business with extra flags and passing bpf_prog_aux as an extra
> > > argument.
> > >
> >
> > I didn't know this. Thanks for suggesting an alternative!
> >
> > > There will be no "only one struct_ops for this BPF program" limitation
> > > either: technically, you could have the same BPF program used from two
> > > struct_ops maps just fine (even at the same time). Then depending on
> > > which struct_ops is currently active, you'd have a corresponding
> > > bpf_run_ctx's struct_ops pointer. It feels like a cleaner approach to
> > > me.
> > >
> >
> > This is a cleaner approach for struct_ops operators. To make it work
> > for kfuncs called in timer callback, I think prog->aux->st_ops is
> > still needed, but at least we can unify how to get this_st_ops in
> > kfunc, in a way that does not requires adding __prog to every kfuncs.
> >
> > +enum bpf_run_ctx_type {
> > +        BPF_CG_RUN_CTX = 0,
> > +        BPF_TRACE_RUN_CTX,
> > +        BPF_TRAMP_RUN_CTX,
> > +        BPF_TIMER_RUN_CTX,
> > +};
> >
> > struct bpf_run_ctx {
> > +        enum bpf_run_ctx_type type;
> > };
> >
> > +struct bpf_timer_run_ctx {
> > +        struct bpf_prog_aux *aux;
> > +};
> >
> > struct bpf_tramp_run_ctx {
> >         ...
> > +        void *st_ops;
> > };
> >
> > In bpf_struct_ops_prepare_trampoline(), the st_ops assignment will be
> > emitted to the trampoline.
> >
> > In bpf_timer_cb(), prepare bpf_timer_run_ctx, where st_ops comes from
> > prog->aux->this_st_ops and set current->bpf_ctx.
> >
> > Finally, in kfuncs that want to know the current struct_ops, call this
> > new function below:
> >
> > +void *bpf_struct_ops_current_st_ops(void)
> > +{
> > +        struct bpf_prog_aux aux;
> > +
> > +        if (!current->bpf_ctx)
> > +                return NULL;
> > +
> > +        switch(current->bpf_ctx->type) {
> > +        case BPF_TRAMP_RUN_CTX:
> > +                return (struct bpf_tramp_run_ctx *)(current->bpf_ctx)->st_ops;
> > +        case BPF_TIMER_RUN_CTX:
> > +                aux = (struct bpf_timer_run_ctx *)(current->bpf_ctx)->aux;
> > +                return rcu_dereference(aux->this_st_ops);
> > +        }
> > +        return NULL;
> > +}
> >
> > What do you think?
>
> I'm not sure I particularly like different ways to get this st_ops
> pointer depending whether we are in an async callback or not. So if
> bpf_prog_aux is inevitable, I'd just stick to that. As far as
> bpf_run_ctx, though, instead of bpf_run_ctx_type, shall we just put
> `struct bpf_prog_aux *` pointer into common struct bpf_run_ctx and
> always set it for all program types that support run_ctx?

No. This is death by a thousand cuts.
bpf trampoline is getting slower and slower.
scx's 'this' case is not a common pattern.

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH bpf-next v1 1/4] bpf: Save struct_ops instance pointer in bpf_prog_aux
  2025-06-24 15:59       ` Alexei Starovoitov
@ 2025-06-24 19:41         ` Andrii Nakryiko
  0 siblings, 0 replies; 12+ messages in thread
From: Andrii Nakryiko @ 2025-06-24 19:41 UTC (permalink / raw)
  To: Alexei Starovoitov
  Cc: Amery Hung, bpf, Andrii Nakryiko, Daniel Borkmann, Tejun Heo,
	Martin KaFai Lau, Kernel Team

On Tue, Jun 24, 2025 at 8:59 AM Alexei Starovoitov
<alexei.starovoitov@gmail.com> wrote:
>
> On Tue, Jun 24, 2025 at 8:38 AM Andrii Nakryiko
> <andrii.nakryiko@gmail.com> wrote:
> >
> > On Wed, Jun 18, 2025 at 3:19 PM Amery Hung <ameryhung@gmail.com> wrote:
> > >
> > > On Thu, Jun 12, 2025 at 4:08 PM Andrii Nakryiko
> > > <andrii.nakryiko@gmail.com> wrote:
> > > >
> > > > On Mon, Jun 9, 2025 at 4:27 PM Amery Hung <ameryhung@gmail.com> wrote:
> > > > >
> > > > > Allows struct_ops implementors to infer the calling struct_ops instance
> > > > > inside a kfunc through prog->aux->this_st_ops. A new field, flags, is
> > > > > added to bpf_struct_ops. If BPF_STRUCT_OPS_F_THIS_PTR is set in flags,
> > > > > a pointer to the struct_ops structure registered to the kernel (i.e.,
> > > > > kvalue->data) will be saved to prog->aux->this_st_ops. To access it in
> > > > > a kfunc, use BPF_STRUCT_OPS_F_THIS_PTR with __prog argument [0]. The
> > > > > verifier will fixup the argument with a pointer to prog->aux. this_st_ops
> > > > > is protected by rcu and is valid until a struct_ops map is unregistered
> > > > > updated.
> > > > >
> > > > > For a struct_ops map with BPF_STRUCT_OPS_F_THIS_PTR, to make sure all
> > > > > programs in it have the same this_st_ops, cmpxchg is used. Only if a
> > > > > program is not already used in another struct_ops map also with
> > > > > BPF_STRUCT_OPS_F_THIS_PTR can it be assigned to the current struct_ops
> > > > > map.
> > > > >
> > > >
> > > > Have you considered an alternative to storing this_st_ops in
> > > > bpf_prog_aux by setting it at runtime (in struct_ops trampoline) into
> > > > bpf_run_ctx (which I think all struct ops programs have set), and then
> > > > letting any struct_ops kfunc just access it through current (see other
> > > > uses of bpf_run_ctx, if you are unfamiliar). This would avoid all this
> > > > business with extra flags and passing bpf_prog_aux as an extra
> > > > argument.
> > > >
> > >
> > > I didn't know this. Thanks for suggesting an alternative!
> > >
> > > > There will be no "only one struct_ops for this BPF program" limitation
> > > > either: technically, you could have the same BPF program used from two
> > > > struct_ops maps just fine (even at the same time). Then depending on
> > > > which struct_ops is currently active, you'd have a corresponding
> > > > bpf_run_ctx's struct_ops pointer. It feels like a cleaner approach to
> > > > me.
> > > >
> > >
> > > This is a cleaner approach for struct_ops operators. To make it work
> > > for kfuncs called in timer callback, I think prog->aux->st_ops is
> > > still needed, but at least we can unify how to get this_st_ops in
> > > kfunc, in a way that does not requires adding __prog to every kfuncs.
> > >
> > > +enum bpf_run_ctx_type {
> > > +        BPF_CG_RUN_CTX = 0,
> > > +        BPF_TRACE_RUN_CTX,
> > > +        BPF_TRAMP_RUN_CTX,
> > > +        BPF_TIMER_RUN_CTX,
> > > +};
> > >
> > > struct bpf_run_ctx {
> > > +        enum bpf_run_ctx_type type;
> > > };
> > >
> > > +struct bpf_timer_run_ctx {
> > > +        struct bpf_prog_aux *aux;
> > > +};
> > >
> > > struct bpf_tramp_run_ctx {
> > >         ...
> > > +        void *st_ops;
> > > };
> > >
> > > In bpf_struct_ops_prepare_trampoline(), the st_ops assignment will be
> > > emitted to the trampoline.
> > >
> > > In bpf_timer_cb(), prepare bpf_timer_run_ctx, where st_ops comes from
> > > prog->aux->this_st_ops and set current->bpf_ctx.
> > >
> > > Finally, in kfuncs that want to know the current struct_ops, call this
> > > new function below:
> > >
> > > +void *bpf_struct_ops_current_st_ops(void)
> > > +{
> > > +        struct bpf_prog_aux aux;
> > > +
> > > +        if (!current->bpf_ctx)
> > > +                return NULL;
> > > +
> > > +        switch(current->bpf_ctx->type) {
> > > +        case BPF_TRAMP_RUN_CTX:
> > > +                return (struct bpf_tramp_run_ctx *)(current->bpf_ctx)->st_ops;
> > > +        case BPF_TIMER_RUN_CTX:
> > > +                aux = (struct bpf_timer_run_ctx *)(current->bpf_ctx)->aux;
> > > +                return rcu_dereference(aux->this_st_ops);
> > > +        }
> > > +        return NULL;
> > > +}
> > >
> > > What do you think?
> >
> > I'm not sure I particularly like different ways to get this st_ops
> > pointer depending whether we are in an async callback or not. So if
> > bpf_prog_aux is inevitable, I'd just stick to that. As far as
> > bpf_run_ctx, though, instead of bpf_run_ctx_type, shall we just put
> > `struct bpf_prog_aux *` pointer into common struct bpf_run_ctx and
> > always set it for all program types that support run_ctx?
>
> No. This is death by a thousand cuts.
> bpf trampoline is getting slower and slower.
> scx's 'this' case is not a common pattern.

ok, no big deal

^ permalink raw reply	[flat|nested] 12+ messages in thread

end of thread, other threads:[~2025-06-24 19:41 UTC | newest]

Thread overview: 12+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-06-09 23:27 [PATCH bpf-next v1 1/4] bpf: Save struct_ops instance pointer in bpf_prog_aux Amery Hung
2025-06-09 23:27 ` [PATCH bpf-next v1 2/4] bpf: Allow verifier to fixup kernel module kfuncs Amery Hung
2025-06-09 23:27 ` [PATCH bpf-next v1 3/4] selftests/bpf: Test accessing struct_ops this pointer Amery Hung
2025-06-09 23:27 ` [PATCH bpf-next v1 4/4] selftests/bpf: Test accessing struct_ops this pointer in timer callback Amery Hung
     [not found]   ` <8c4943cb40967d152abe032b4208a7cdd89b539da26783afaa61e37eb6663cbf@mail.kernel.org>
2025-06-10 16:45     ` Amery Hung
2025-06-10 22:15 ` [PATCH bpf-next v1 1/4] bpf: Save struct_ops instance pointer in bpf_prog_aux Tejun Heo
2025-06-11  7:16 ` kernel test robot
2025-06-12 23:08 ` Andrii Nakryiko
2025-06-18 22:18   ` Amery Hung
2025-06-24 15:38     ` Andrii Nakryiko
2025-06-24 15:59       ` Alexei Starovoitov
2025-06-24 19:41         ` Andrii Nakryiko

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).