* [PATCH bpf-next v4 1/7] bpf: pass bpf_struct_ops_link to callbacks in bpf_struct_ops.
2024-05-21 22:51 [PATCH bpf-next v4 0/7] Notify user space when a struct_ops object is detached/unregistered Kui-Feng Lee
@ 2024-05-21 22:51 ` Kui-Feng Lee
2024-05-21 22:51 ` [PATCH bpf-next v4 2/7] bpf: enable detaching links of struct_ops objects Kui-Feng Lee
` (5 subsequent siblings)
6 siblings, 0 replies; 16+ messages in thread
From: Kui-Feng Lee @ 2024-05-21 22:51 UTC (permalink / raw)
To: bpf, ast, martin.lau, song, kernel-team, andrii
Cc: sinquersw, kuifeng, Kui-Feng Lee
Pass an additional pointer of bpf_struct_ops_link to callback function reg,
unreg, and update provided by subsystems defined in bpf_struct_ops. A
bpf_struct_ops_map can be registered for multiple links. Passing a pointer
of bpf_struct_ops_link helps subsystems to distinguish them.
This pointer will be used in the later patches to let the subsystem
initiate a detachment on a link that was registered to it previously.
Signed-off-by: Kui-Feng Lee <thinker.li@gmail.com>
---
include/linux/bpf.h | 6 +++---
kernel/bpf/bpf_struct_ops.c | 10 +++++-----
net/bpf/bpf_dummy_struct_ops.c | 4 ++--
net/ipv4/bpf_tcp_ca.c | 6 +++---
.../selftests/bpf/bpf_test_no_cfi/bpf_test_no_cfi.c | 4 ++--
tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c | 6 +++---
6 files changed, 18 insertions(+), 18 deletions(-)
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 90094400cc63..b600767ebe02 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -1730,9 +1730,9 @@ struct bpf_struct_ops {
int (*init_member)(const struct btf_type *t,
const struct btf_member *member,
void *kdata, const void *udata);
- int (*reg)(void *kdata);
- void (*unreg)(void *kdata);
- int (*update)(void *kdata, void *old_kdata);
+ int (*reg)(void *kdata, struct bpf_link *link);
+ void (*unreg)(void *kdata, struct bpf_link *link);
+ int (*update)(void *kdata, void *old_kdata, struct bpf_link *link);
int (*validate)(void *kdata);
void *cfi_stubs;
struct module *owner;
diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c
index 86c7884abaf8..1542dded7489 100644
--- a/kernel/bpf/bpf_struct_ops.c
+++ b/kernel/bpf/bpf_struct_ops.c
@@ -757,7 +757,7 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
goto unlock;
}
- err = st_ops->reg(kdata);
+ err = st_ops->reg(kdata, NULL);
if (likely(!err)) {
/* This refcnt increment on the map here after
* 'st_ops->reg()' is secure since the state of the
@@ -805,7 +805,7 @@ static long bpf_struct_ops_map_delete_elem(struct bpf_map *map, void *key)
BPF_STRUCT_OPS_STATE_TOBEFREE);
switch (prev_state) {
case BPF_STRUCT_OPS_STATE_INUSE:
- st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data);
+ st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data, NULL);
bpf_map_put(map);
return 0;
case BPF_STRUCT_OPS_STATE_TOBEFREE:
@@ -1060,7 +1060,7 @@ static void bpf_struct_ops_map_link_dealloc(struct bpf_link *link)
/* st_link->map can be NULL if
* bpf_struct_ops_link_create() fails to register.
*/
- st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data);
+ st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data, link);
bpf_map_put(&st_map->map);
}
kfree(st_link);
@@ -1125,7 +1125,7 @@ static int bpf_struct_ops_map_link_update(struct bpf_link *link, struct bpf_map
goto err_out;
}
- err = st_map->st_ops_desc->st_ops->update(st_map->kvalue.data, old_st_map->kvalue.data);
+ err = st_map->st_ops_desc->st_ops->update(st_map->kvalue.data, old_st_map->kvalue.data, link);
if (err)
goto err_out;
@@ -1176,7 +1176,7 @@ int bpf_struct_ops_link_create(union bpf_attr *attr)
if (err)
goto err_out;
- err = st_map->st_ops_desc->st_ops->reg(st_map->kvalue.data);
+ err = st_map->st_ops_desc->st_ops->reg(st_map->kvalue.data, &link->link);
if (err) {
bpf_link_cleanup(&link_primer);
link = NULL;
diff --git a/net/bpf/bpf_dummy_struct_ops.c b/net/bpf/bpf_dummy_struct_ops.c
index 891cdf61c65a..3ea52b05adfb 100644
--- a/net/bpf/bpf_dummy_struct_ops.c
+++ b/net/bpf/bpf_dummy_struct_ops.c
@@ -272,12 +272,12 @@ static int bpf_dummy_init_member(const struct btf_type *t,
return -EOPNOTSUPP;
}
-static int bpf_dummy_reg(void *kdata)
+static int bpf_dummy_reg(void *kdata, struct bpf_link *link)
{
return -EOPNOTSUPP;
}
-static void bpf_dummy_unreg(void *kdata)
+static void bpf_dummy_unreg(void *kdata, struct bpf_link *link)
{
}
diff --git a/net/ipv4/bpf_tcp_ca.c b/net/ipv4/bpf_tcp_ca.c
index 18227757ec0c..3f88d0961e5b 100644
--- a/net/ipv4/bpf_tcp_ca.c
+++ b/net/ipv4/bpf_tcp_ca.c
@@ -260,17 +260,17 @@ static int bpf_tcp_ca_check_member(const struct btf_type *t,
return 0;
}
-static int bpf_tcp_ca_reg(void *kdata)
+static int bpf_tcp_ca_reg(void *kdata, struct bpf_link *link)
{
return tcp_register_congestion_control(kdata);
}
-static void bpf_tcp_ca_unreg(void *kdata)
+static void bpf_tcp_ca_unreg(void *kdata, struct bpf_link *link)
{
tcp_unregister_congestion_control(kdata);
}
-static int bpf_tcp_ca_update(void *kdata, void *old_kdata)
+static int bpf_tcp_ca_update(void *kdata, void *old_kdata, struct bpf_link *link)
{
return tcp_update_congestion_control(kdata, old_kdata);
}
diff --git a/tools/testing/selftests/bpf/bpf_test_no_cfi/bpf_test_no_cfi.c b/tools/testing/selftests/bpf/bpf_test_no_cfi/bpf_test_no_cfi.c
index b1dd889d5d7d..948eb3962732 100644
--- a/tools/testing/selftests/bpf/bpf_test_no_cfi/bpf_test_no_cfi.c
+++ b/tools/testing/selftests/bpf/bpf_test_no_cfi/bpf_test_no_cfi.c
@@ -22,12 +22,12 @@ static int dummy_init_member(const struct btf_type *t,
return 0;
}
-static int dummy_reg(void *kdata)
+static int dummy_reg(void *kdata, struct bpf_link *link)
{
return 0;
}
-static void dummy_unreg(void *kdata)
+static void dummy_unreg(void *kdata, struct bpf_link *link)
{
}
diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
index 2a18bd320e92..0a09732cde4b 100644
--- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
+++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
@@ -820,7 +820,7 @@ static const struct bpf_verifier_ops bpf_testmod_verifier_ops = {
.is_valid_access = bpf_testmod_ops_is_valid_access,
};
-static int bpf_dummy_reg(void *kdata)
+static int bpf_dummy_reg(void *kdata, struct bpf_link *link)
{
struct bpf_testmod_ops *ops = kdata;
@@ -835,7 +835,7 @@ static int bpf_dummy_reg(void *kdata)
return 0;
}
-static void bpf_dummy_unreg(void *kdata)
+static void bpf_dummy_unreg(void *kdata, struct bpf_link *link)
{
}
@@ -871,7 +871,7 @@ struct bpf_struct_ops bpf_bpf_testmod_ops = {
.owner = THIS_MODULE,
};
-static int bpf_dummy_reg2(void *kdata)
+static int bpf_dummy_reg2(void *kdata, struct bpf_link *link)
{
struct bpf_testmod_ops2 *ops = kdata;
--
2.34.1
^ permalink raw reply related [flat|nested] 16+ messages in thread* [PATCH bpf-next v4 2/7] bpf: enable detaching links of struct_ops objects.
2024-05-21 22:51 [PATCH bpf-next v4 0/7] Notify user space when a struct_ops object is detached/unregistered Kui-Feng Lee
2024-05-21 22:51 ` [PATCH bpf-next v4 1/7] bpf: pass bpf_struct_ops_link to callbacks in bpf_struct_ops Kui-Feng Lee
@ 2024-05-21 22:51 ` Kui-Feng Lee
2024-05-23 18:09 ` Martin KaFai Lau
2024-05-21 22:51 ` [PATCH bpf-next v4 3/7] bpf: support epoll from bpf struct_ops links Kui-Feng Lee
` (4 subsequent siblings)
6 siblings, 1 reply; 16+ messages in thread
From: Kui-Feng Lee @ 2024-05-21 22:51 UTC (permalink / raw)
To: bpf, ast, martin.lau, song, kernel-team, andrii
Cc: sinquersw, kuifeng, Kui-Feng Lee
Implement the detach callback in bpf_link_ops for struct_ops so that user
programs can detach a struct_ops link. The subsystems that struct_ops
objects are registered to can also use this callback to detach the links
being passed to them.
Signed-off-by: Kui-Feng Lee <thinker.li@gmail.com>
---
kernel/bpf/bpf_struct_ops.c | 63 +++++++++++++++++++++++++++++++++----
1 file changed, 57 insertions(+), 6 deletions(-)
diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c
index 1542dded7489..fb6e8a3190ef 100644
--- a/kernel/bpf/bpf_struct_ops.c
+++ b/kernel/bpf/bpf_struct_ops.c
@@ -1057,9 +1057,6 @@ static void bpf_struct_ops_map_link_dealloc(struct bpf_link *link)
st_map = (struct bpf_struct_ops_map *)
rcu_dereference_protected(st_link->map, true);
if (st_map) {
- /* st_link->map can be NULL if
- * bpf_struct_ops_link_create() fails to register.
- */
st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data, link);
bpf_map_put(&st_map->map);
}
@@ -1075,7 +1072,8 @@ static void bpf_struct_ops_map_link_show_fdinfo(const struct bpf_link *link,
st_link = container_of(link, struct bpf_struct_ops_link, link);
rcu_read_lock();
map = rcu_dereference(st_link->map);
- seq_printf(seq, "map_id:\t%d\n", map->id);
+ if (map)
+ seq_printf(seq, "map_id:\t%d\n", map->id);
rcu_read_unlock();
}
@@ -1088,7 +1086,8 @@ static int bpf_struct_ops_map_link_fill_link_info(const struct bpf_link *link,
st_link = container_of(link, struct bpf_struct_ops_link, link);
rcu_read_lock();
map = rcu_dereference(st_link->map);
- info->struct_ops.map_id = map->id;
+ if (map)
+ info->struct_ops.map_id = map->id;
rcu_read_unlock();
return 0;
}
@@ -1113,6 +1112,10 @@ static int bpf_struct_ops_map_link_update(struct bpf_link *link, struct bpf_map
mutex_lock(&update_mutex);
old_map = rcu_dereference_protected(st_link->map, lockdep_is_held(&update_mutex));
+ if (!old_map) {
+ err = -EINVAL;
+ goto err_out;
+ }
if (expected_old_map && old_map != expected_old_map) {
err = -EPERM;
goto err_out;
@@ -1139,8 +1142,37 @@ static int bpf_struct_ops_map_link_update(struct bpf_link *link, struct bpf_map
return err;
}
+static int bpf_struct_ops_map_link_detach(struct bpf_link *link)
+{
+ struct bpf_struct_ops_link *st_link = container_of(link, struct bpf_struct_ops_link, link);
+ struct bpf_struct_ops_map *st_map;
+ struct bpf_map *map;
+
+ mutex_lock(&update_mutex);
+
+ map = rcu_dereference_protected(st_link->map, lockdep_is_held(&update_mutex));
+ if (!map) {
+ mutex_unlock(&update_mutex);
+ return -EINVAL;
+ }
+ st_map = container_of(map, struct bpf_struct_ops_map, map);
+
+ st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data, link);
+
+ rcu_assign_pointer(st_link->map, NULL);
+ /* Pair with bpf_map_get() in bpf_struct_ops_link_create() or
+ * bpf_map_inc() in bpf_struct_ops_map_link_update().
+ */
+ bpf_map_put(&st_map->map);
+
+ mutex_unlock(&update_mutex);
+
+ return 0;
+}
+
static const struct bpf_link_ops bpf_struct_ops_map_lops = {
.dealloc = bpf_struct_ops_map_link_dealloc,
+ .detach = bpf_struct_ops_map_link_detach,
.show_fdinfo = bpf_struct_ops_map_link_show_fdinfo,
.fill_link_info = bpf_struct_ops_map_link_fill_link_info,
.update_map = bpf_struct_ops_map_link_update,
@@ -1176,13 +1208,32 @@ int bpf_struct_ops_link_create(union bpf_attr *attr)
if (err)
goto err_out;
+ /* Init link->map before calling reg() in case being detached
+ * immediately.
+ */
+ RCU_INIT_POINTER(link->map, map);
+
+ /* Once reg() is called, the object and link is already available
+ * to the subsystem, and it can call
+ * bpf_struct_ops_map_link_detach() to unreg() it. However, it is
+ * sfae not holding update_mutex here.
+ *
+ * In the case of failure in reg(), the subsystem has no reason to
+ * call bpf_struct_ops_map_link_detach() since the object is not
+ * accepted by it. In the case of success, the subsystem may call
+ * bpf_struct_ops_map_link_detach() to unreg() it, but we don't
+ * change the content of the link anymore except changing link->id
+ * in bpf_link_settle(). So, it is safe to not hold update_mutex
+ * here.
+ */
err = st_map->st_ops_desc->st_ops->reg(st_map->kvalue.data, &link->link);
if (err) {
+ RCU_INIT_POINTER(link->map, NULL);
bpf_link_cleanup(&link_primer);
+ /* The link has been free by bpf_link_cleanup() */
link = NULL;
goto err_out;
}
- RCU_INIT_POINTER(link->map, map);
return bpf_link_settle(&link_primer);
--
2.34.1
^ permalink raw reply related [flat|nested] 16+ messages in thread* Re: [PATCH bpf-next v4 2/7] bpf: enable detaching links of struct_ops objects.
2024-05-21 22:51 ` [PATCH bpf-next v4 2/7] bpf: enable detaching links of struct_ops objects Kui-Feng Lee
@ 2024-05-23 18:09 ` Martin KaFai Lau
2024-05-23 18:28 ` Kui-Feng Lee
0 siblings, 1 reply; 16+ messages in thread
From: Martin KaFai Lau @ 2024-05-23 18:09 UTC (permalink / raw)
To: Kui-Feng Lee; +Cc: bpf, ast, song, kernel-team, andrii, sinquersw, kuifeng
On 5/21/24 3:51 PM, Kui-Feng Lee wrote:
> Implement the detach callback in bpf_link_ops for struct_ops so that user
> programs can detach a struct_ops link. The subsystems that struct_ops
> objects are registered to can also use this callback to detach the links
> being passed to them.
>
> Signed-off-by: Kui-Feng Lee <thinker.li@gmail.com>
> ---
> kernel/bpf/bpf_struct_ops.c | 63 +++++++++++++++++++++++++++++++++----
> 1 file changed, 57 insertions(+), 6 deletions(-)
>
> diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c
> index 1542dded7489..fb6e8a3190ef 100644
> --- a/kernel/bpf/bpf_struct_ops.c
> +++ b/kernel/bpf/bpf_struct_ops.c
> @@ -1057,9 +1057,6 @@ static void bpf_struct_ops_map_link_dealloc(struct bpf_link *link)
> st_map = (struct bpf_struct_ops_map *)
> rcu_dereference_protected(st_link->map, true);
> if (st_map) {
> - /* st_link->map can be NULL if
> - * bpf_struct_ops_link_create() fails to register.
> - */
> st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data, link);
> bpf_map_put(&st_map->map);
> }
> @@ -1075,7 +1072,8 @@ static void bpf_struct_ops_map_link_show_fdinfo(const struct bpf_link *link,
> st_link = container_of(link, struct bpf_struct_ops_link, link);
> rcu_read_lock();
> map = rcu_dereference(st_link->map);
> - seq_printf(seq, "map_id:\t%d\n", map->id);
> + if (map)
> + seq_printf(seq, "map_id:\t%d\n", map->id);
> rcu_read_unlock();
> }
>
> @@ -1088,7 +1086,8 @@ static int bpf_struct_ops_map_link_fill_link_info(const struct bpf_link *link,
> st_link = container_of(link, struct bpf_struct_ops_link, link);
> rcu_read_lock();
> map = rcu_dereference(st_link->map);
> - info->struct_ops.map_id = map->id;
> + if (map)
> + info->struct_ops.map_id = map->id;
> rcu_read_unlock();
> return 0;
> }
> @@ -1113,6 +1112,10 @@ static int bpf_struct_ops_map_link_update(struct bpf_link *link, struct bpf_map
> mutex_lock(&update_mutex);
>
> old_map = rcu_dereference_protected(st_link->map, lockdep_is_held(&update_mutex));
> + if (!old_map) {
> + err = -EINVAL;
Just noticed this while checking the return value in patch 3.
This should be -ENOLINK such that it is consistent to the other links'
.update_prog (e.g. cgroup, tcx, net_namespace...).
> + goto err_out;
> + }
> if (expected_old_map && old_map != expected_old_map) {
> err = -EPERM;
> goto err_out;
> @@ -1139,8 +1142,37 @@ static int bpf_struct_ops_map_link_update(struct bpf_link *link, struct bpf_map
> return err;
> }
>
> +static int bpf_struct_ops_map_link_detach(struct bpf_link *link)
> +{
> + struct bpf_struct_ops_link *st_link = container_of(link, struct bpf_struct_ops_link, link);
> + struct bpf_struct_ops_map *st_map;
> + struct bpf_map *map;
> +
> + mutex_lock(&update_mutex);
> +
> + map = rcu_dereference_protected(st_link->map, lockdep_is_held(&update_mutex));
> + if (!map) {
> + mutex_unlock(&update_mutex);
> + return -EINVAL;
Same here but should be always 0 (detach always succeeds).
> + }
> + st_map = container_of(map, struct bpf_struct_ops_map, map);
> +
> + st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data, link);
> +
> + rcu_assign_pointer(st_link->map, NULL);
> + /* Pair with bpf_map_get() in bpf_struct_ops_link_create() or
> + * bpf_map_inc() in bpf_struct_ops_map_link_update().
> + */
> + bpf_map_put(&st_map->map);
> +
> + mutex_unlock(&update_mutex);
> +
> + return 0;
> +}
> +
> static const struct bpf_link_ops bpf_struct_ops_map_lops = {
> .dealloc = bpf_struct_ops_map_link_dealloc,
> + .detach = bpf_struct_ops_map_link_detach,
> .show_fdinfo = bpf_struct_ops_map_link_show_fdinfo,
> .fill_link_info = bpf_struct_ops_map_link_fill_link_info,
> .update_map = bpf_struct_ops_map_link_update,
> @@ -1176,13 +1208,32 @@ int bpf_struct_ops_link_create(union bpf_attr *attr)
> if (err)
> goto err_out;
>
> + /* Init link->map before calling reg() in case being detached
> + * immediately.
> + */
> + RCU_INIT_POINTER(link->map, map);
> +
> + /* Once reg() is called, the object and link is already available
> + * to the subsystem, and it can call
> + * bpf_struct_ops_map_link_detach() to unreg() it. However, it is
> + * sfae not holding update_mutex here.
> + *
> + * In the case of failure in reg(), the subsystem has no reason to
> + * call bpf_struct_ops_map_link_detach() since the object is not
> + * accepted by it. In the case of success, the subsystem may call
> + * bpf_struct_ops_map_link_detach() to unreg() it, but we don't
> + * change the content of the link anymore except changing link->id
> + * in bpf_link_settle(). So, it is safe to not hold update_mutex
> + * here.
After sleeping on the RCU_INIT_POINTER dance and re-reading this comment, I need
to walk back my early reply.
Instead of having comment to explain the RCU_INIT_POINTER dance (resetting it to
NULL on reg() err because bpf_struct_ops_map_link_dealloc is not supposed to
unreg when the reg did fail), how about simplifying it and just take the
update_mutex here such that the subsystem cannot detach until the
RCU_INIT_POINTER(link->map, map) is done. Performance is not a concern here, so
I would prefer simplicity.
> + */
> err = st_map->st_ops_desc->st_ops->reg(st_map->kvalue.data, &link->link);
> if (err) {
> + RCU_INIT_POINTER(link->map, NULL);
> bpf_link_cleanup(&link_primer);
> + /* The link has been free by bpf_link_cleanup() */
> link = NULL;
> goto err_out;
> }
> - RCU_INIT_POINTER(link->map, map);
>
> return bpf_link_settle(&link_primer);
>
^ permalink raw reply [flat|nested] 16+ messages in thread* Re: [PATCH bpf-next v4 2/7] bpf: enable detaching links of struct_ops objects.
2024-05-23 18:09 ` Martin KaFai Lau
@ 2024-05-23 18:28 ` Kui-Feng Lee
0 siblings, 0 replies; 16+ messages in thread
From: Kui-Feng Lee @ 2024-05-23 18:28 UTC (permalink / raw)
To: Martin KaFai Lau, Kui-Feng Lee
Cc: bpf, ast, song, kernel-team, andrii, kuifeng
On 5/23/24 11:09, Martin KaFai Lau wrote:
> On 5/21/24 3:51 PM, Kui-Feng Lee wrote:
>> Implement the detach callback in bpf_link_ops for struct_ops so that user
>> programs can detach a struct_ops link. The subsystems that struct_ops
>> objects are registered to can also use this callback to detach the links
>> being passed to them.
>>
>> Signed-off-by: Kui-Feng Lee <thinker.li@gmail.com>
>> ---
>> kernel/bpf/bpf_struct_ops.c | 63 +++++++++++++++++++++++++++++++++----
>> 1 file changed, 57 insertions(+), 6 deletions(-)
>>
>> diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c
>> index 1542dded7489..fb6e8a3190ef 100644
>> --- a/kernel/bpf/bpf_struct_ops.c
>> +++ b/kernel/bpf/bpf_struct_ops.c
>> @@ -1057,9 +1057,6 @@ static void
>> bpf_struct_ops_map_link_dealloc(struct bpf_link *link)
>> st_map = (struct bpf_struct_ops_map *)
>> rcu_dereference_protected(st_link->map, true);
>> if (st_map) {
>> - /* st_link->map can be NULL if
>> - * bpf_struct_ops_link_create() fails to register.
>> - */
>> st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data, link);
>> bpf_map_put(&st_map->map);
>> }
>> @@ -1075,7 +1072,8 @@ static void
>> bpf_struct_ops_map_link_show_fdinfo(const struct bpf_link *link,
>> st_link = container_of(link, struct bpf_struct_ops_link, link);
>> rcu_read_lock();
>> map = rcu_dereference(st_link->map);
>> - seq_printf(seq, "map_id:\t%d\n", map->id);
>> + if (map)
>> + seq_printf(seq, "map_id:\t%d\n", map->id);
>> rcu_read_unlock();
>> }
>> @@ -1088,7 +1086,8 @@ static int
>> bpf_struct_ops_map_link_fill_link_info(const struct bpf_link *link,
>> st_link = container_of(link, struct bpf_struct_ops_link, link);
>> rcu_read_lock();
>> map = rcu_dereference(st_link->map);
>> - info->struct_ops.map_id = map->id;
>> + if (map)
>> + info->struct_ops.map_id = map->id;
>> rcu_read_unlock();
>> return 0;
>> }
>> @@ -1113,6 +1112,10 @@ static int
>> bpf_struct_ops_map_link_update(struct bpf_link *link, struct bpf_map
>> mutex_lock(&update_mutex);
>> old_map = rcu_dereference_protected(st_link->map,
>> lockdep_is_held(&update_mutex));
>> + if (!old_map) {
>> + err = -EINVAL;
>
> Just noticed this while checking the return value in patch 3.
>
> This should be -ENOLINK such that it is consistent to the other links'
> .update_prog (e.g. cgroup, tcx, net_namespace...).
Sure
>
>> + goto err_out;
>> + }
>> if (expected_old_map && old_map != expected_old_map) {
>> err = -EPERM;
>> goto err_out;
>> @@ -1139,8 +1142,37 @@ static int
>> bpf_struct_ops_map_link_update(struct bpf_link *link, struct bpf_map
>> return err;
>> }
>> +static int bpf_struct_ops_map_link_detach(struct bpf_link *link)
>> +{
>> + struct bpf_struct_ops_link *st_link = container_of(link, struct
>> bpf_struct_ops_link, link);
>> + struct bpf_struct_ops_map *st_map;
>> + struct bpf_map *map;
>> +
>> + mutex_lock(&update_mutex);
>> +
>> + map = rcu_dereference_protected(st_link->map,
>> lockdep_is_held(&update_mutex));
>> + if (!map) {
>> + mutex_unlock(&update_mutex);
>> + return -EINVAL;
>
> Same here but should be always 0 (detach always succeeds).
Got it.
>
>> + }
>> + st_map = container_of(map, struct bpf_struct_ops_map, map);
>> +
>> + st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data, link);
>> +
>> + rcu_assign_pointer(st_link->map, NULL);
>> + /* Pair with bpf_map_get() in bpf_struct_ops_link_create() or
>> + * bpf_map_inc() in bpf_struct_ops_map_link_update().
>> + */
>> + bpf_map_put(&st_map->map);
>> +
>> + mutex_unlock(&update_mutex);
>> +
>> + return 0;
>> +}
>> +
>> static const struct bpf_link_ops bpf_struct_ops_map_lops = {
>> .dealloc = bpf_struct_ops_map_link_dealloc,
>> + .detach = bpf_struct_ops_map_link_detach,
>> .show_fdinfo = bpf_struct_ops_map_link_show_fdinfo,
>> .fill_link_info = bpf_struct_ops_map_link_fill_link_info,
>> .update_map = bpf_struct_ops_map_link_update,
>> @@ -1176,13 +1208,32 @@ int bpf_struct_ops_link_create(union bpf_attr
>> *attr)
>> if (err)
>> goto err_out;
>> + /* Init link->map before calling reg() in case being detached
>> + * immediately.
>> + */
>> + RCU_INIT_POINTER(link->map, map);
>> +
>> + /* Once reg() is called, the object and link is already available
>> + * to the subsystem, and it can call
>> + * bpf_struct_ops_map_link_detach() to unreg() it. However, it is
>> + * sfae not holding update_mutex here.
>> + *
>> + * In the case of failure in reg(), the subsystem has no reason to
>> + * call bpf_struct_ops_map_link_detach() since the object is not
>> + * accepted by it. In the case of success, the subsystem may call
>> + * bpf_struct_ops_map_link_detach() to unreg() it, but we don't
>> + * change the content of the link anymore except changing link->id
>> + * in bpf_link_settle(). So, it is safe to not hold update_mutex
>> + * here.
>
> After sleeping on the RCU_INIT_POINTER dance and re-reading this
> comment, I need to walk back my early reply.
>
> Instead of having comment to explain the RCU_INIT_POINTER dance
> (resetting it to NULL on reg() err because
> bpf_struct_ops_map_link_dealloc is not supposed to unreg when the reg
> did fail), how about simplifying it and just take the update_mutex here
> such that the subsystem cannot detach until the
> RCU_INIT_POINTER(link->map, map) is done. Performance is not a concern
> here, so I would prefer simplicity.
sure!
>
>> + */
>> err = st_map->st_ops_desc->st_ops->reg(st_map->kvalue.data,
>> &link->link);
>> if (err) {
>> + RCU_INIT_POINTER(link->map, NULL);
>> bpf_link_cleanup(&link_primer);
>> + /* The link has been free by bpf_link_cleanup() */
>> link = NULL;
>> goto err_out;
>> }
>> - RCU_INIT_POINTER(link->map, map);
>> return bpf_link_settle(&link_primer);
>
^ permalink raw reply [flat|nested] 16+ messages in thread
* [PATCH bpf-next v4 3/7] bpf: support epoll from bpf struct_ops links.
2024-05-21 22:51 [PATCH bpf-next v4 0/7] Notify user space when a struct_ops object is detached/unregistered Kui-Feng Lee
2024-05-21 22:51 ` [PATCH bpf-next v4 1/7] bpf: pass bpf_struct_ops_link to callbacks in bpf_struct_ops Kui-Feng Lee
2024-05-21 22:51 ` [PATCH bpf-next v4 2/7] bpf: enable detaching links of struct_ops objects Kui-Feng Lee
@ 2024-05-21 22:51 ` Kui-Feng Lee
2024-05-23 17:23 ` Martin KaFai Lau
2024-05-21 22:51 ` [PATCH bpf-next v4 4/7] bpf: export bpf_link_inc_not_zero Kui-Feng Lee
` (3 subsequent siblings)
6 siblings, 1 reply; 16+ messages in thread
From: Kui-Feng Lee @ 2024-05-21 22:51 UTC (permalink / raw)
To: bpf, ast, martin.lau, song, kernel-team, andrii
Cc: sinquersw, kuifeng, Kui-Feng Lee
Add epoll support to bpf struct_ops links to trigger EPOLLHUP event upon
detachment.
This patch implements the "poll" of the "struct file_operations" for BPF
links and introduces a new "poll" operator in the "struct bpf_link_ops". By
implementing "poll" of "struct bpf_link_ops" for the links of struct_ops,
the file descriptor of a struct_ops link can be added to an epoll file
descriptor to receive EPOLLHUP events.
Signed-off-by: Kui-Feng Lee <thinker.li@gmail.com>
---
include/linux/bpf.h | 1 +
kernel/bpf/bpf_struct_ops.c | 17 +++++++++++++++++
kernel/bpf/syscall.c | 11 +++++++++++
3 files changed, 29 insertions(+)
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index b600767ebe02..5f7496ef8b7c 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -1612,6 +1612,7 @@ struct bpf_link_ops {
struct bpf_link_info *info);
int (*update_map)(struct bpf_link *link, struct bpf_map *new_map,
struct bpf_map *old_map);
+ __poll_t (*poll)(struct file *file, struct poll_table_struct *pts);
};
struct bpf_tramp_link {
diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c
index fb6e8a3190ef..794549dc9f4b 100644
--- a/kernel/bpf/bpf_struct_ops.c
+++ b/kernel/bpf/bpf_struct_ops.c
@@ -12,6 +12,7 @@
#include <linux/mutex.h>
#include <linux/btf_ids.h>
#include <linux/rcupdate_wait.h>
+#include <linux/poll.h>
struct bpf_struct_ops_value {
struct bpf_struct_ops_common_value common;
@@ -56,6 +57,7 @@ struct bpf_struct_ops_map {
struct bpf_struct_ops_link {
struct bpf_link link;
struct bpf_map __rcu *map;
+ wait_queue_head_t wait_hup;
};
static DEFINE_MUTEX(update_mutex);
@@ -1167,15 +1169,28 @@ static int bpf_struct_ops_map_link_detach(struct bpf_link *link)
mutex_unlock(&update_mutex);
+ wake_up_interruptible_poll(&st_link->wait_hup, EPOLLHUP);
+
return 0;
}
+static __poll_t bpf_struct_ops_map_link_poll(struct file *file,
+ struct poll_table_struct *pts)
+{
+ struct bpf_struct_ops_link *st_link = file->private_data;
+
+ poll_wait(file, &st_link->wait_hup, pts);
+
+ return rcu_access_pointer(st_link->map) ? 0 : EPOLLHUP;
+}
+
static const struct bpf_link_ops bpf_struct_ops_map_lops = {
.dealloc = bpf_struct_ops_map_link_dealloc,
.detach = bpf_struct_ops_map_link_detach,
.show_fdinfo = bpf_struct_ops_map_link_show_fdinfo,
.fill_link_info = bpf_struct_ops_map_link_fill_link_info,
.update_map = bpf_struct_ops_map_link_update,
+ .poll = bpf_struct_ops_map_link_poll,
};
int bpf_struct_ops_link_create(union bpf_attr *attr)
@@ -1213,6 +1228,8 @@ int bpf_struct_ops_link_create(union bpf_attr *attr)
*/
RCU_INIT_POINTER(link->map, map);
+ init_waitqueue_head(&link->wait_hup);
+
/* Once reg() is called, the object and link is already available
* to the subsystem, and it can call
* bpf_struct_ops_map_link_detach() to unreg() it. However, it is
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 13ad74ecf2cd..ad4f81ed27f0 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -3150,6 +3150,16 @@ static void bpf_link_show_fdinfo(struct seq_file *m, struct file *filp)
}
#endif
+static __poll_t bpf_link_poll(struct file *file, struct poll_table_struct *pts)
+{
+ struct bpf_link *link = file->private_data;
+
+ if (link->ops->poll)
+ return link->ops->poll(file, pts);
+
+ return 0;
+}
+
static const struct file_operations bpf_link_fops = {
#ifdef CONFIG_PROC_FS
.show_fdinfo = bpf_link_show_fdinfo,
@@ -3157,6 +3167,7 @@ static const struct file_operations bpf_link_fops = {
.release = bpf_link_release,
.read = bpf_dummy_read,
.write = bpf_dummy_write,
+ .poll = bpf_link_poll,
};
static int bpf_link_alloc_id(struct bpf_link *link)
--
2.34.1
^ permalink raw reply related [flat|nested] 16+ messages in thread* Re: [PATCH bpf-next v4 3/7] bpf: support epoll from bpf struct_ops links.
2024-05-21 22:51 ` [PATCH bpf-next v4 3/7] bpf: support epoll from bpf struct_ops links Kui-Feng Lee
@ 2024-05-23 17:23 ` Martin KaFai Lau
2024-05-23 18:24 ` Kui-Feng Lee
0 siblings, 1 reply; 16+ messages in thread
From: Martin KaFai Lau @ 2024-05-23 17:23 UTC (permalink / raw)
To: Kui-Feng Lee; +Cc: bpf, ast, song, kernel-team, andrii, sinquersw, kuifeng
On 5/21/24 3:51 PM, Kui-Feng Lee wrote:
> +static __poll_t bpf_link_poll(struct file *file, struct poll_table_struct *pts)
> +{
> + struct bpf_link *link = file->private_data;
> +
> + if (link->ops->poll)
> + return link->ops->poll(file, pts);
> +
> + return 0;
The current bpf_link_fops.poll is NULL before this patch. From vfs_poll, it
seems to be DEFAULT_POLLMASK for this case. Please double check.
> +}
> +
> static const struct file_operations bpf_link_fops = {
> #ifdef CONFIG_PROC_FS
> .show_fdinfo = bpf_link_show_fdinfo,
> @@ -3157,6 +3167,7 @@ static const struct file_operations bpf_link_fops = {
> .release = bpf_link_release,
> .read = bpf_dummy_read,
> .write = bpf_dummy_write,
> + .poll = bpf_link_poll,
Same here. What does the epoll_ctl(EPOLL_CTL_ADD) currently expect for link
(e.g. cgroup) that does not support poll?
> };
>
^ permalink raw reply [flat|nested] 16+ messages in thread* Re: [PATCH bpf-next v4 3/7] bpf: support epoll from bpf struct_ops links.
2024-05-23 17:23 ` Martin KaFai Lau
@ 2024-05-23 18:24 ` Kui-Feng Lee
2024-05-23 18:34 ` Martin KaFai Lau
0 siblings, 1 reply; 16+ messages in thread
From: Kui-Feng Lee @ 2024-05-23 18:24 UTC (permalink / raw)
To: Martin KaFai Lau, Kui-Feng Lee
Cc: bpf, ast, song, kernel-team, andrii, kuifeng
On 5/23/24 10:23, Martin KaFai Lau wrote:
> On 5/21/24 3:51 PM, Kui-Feng Lee wrote:
>> +static __poll_t bpf_link_poll(struct file *file, struct
>> poll_table_struct *pts)
>> +{
>> + struct bpf_link *link = file->private_data;
>> +
>> + if (link->ops->poll)
>> + return link->ops->poll(file, pts);
>> +
>> + return 0;
>
> The current bpf_link_fops.poll is NULL before this patch. From vfs_poll,
> it seems to be DEFAULT_POLLMASK for this case. Please double check.
Yes, it returns DEFAULT_POLLMASK if file->f_op->epoll is NULL. But,
before this patch, link can not be added to an epoll. See the
explanation below.
>
>> +}
>> +
>> static const struct file_operations bpf_link_fops = {
>> #ifdef CONFIG_PROC_FS
>> .show_fdinfo = bpf_link_show_fdinfo,
>> @@ -3157,6 +3167,7 @@ static const struct file_operations
>> bpf_link_fops = {
>> .release = bpf_link_release,
>> .read = bpf_dummy_read,
>> .write = bpf_dummy_write,
>> + .poll = bpf_link_poll,
>
> Same here. What does the epoll_ctl(EPOLL_CTL_ADD) currently expect for
> link (e.g. cgroup) that does not support poll?
>
epoll_ctl() always returns -EPERM for files not supporting poll.
Should I add another instance of struct file_operations to keep the
consistency for other types of links?
>> };
>
^ permalink raw reply [flat|nested] 16+ messages in thread* Re: [PATCH bpf-next v4 3/7] bpf: support epoll from bpf struct_ops links.
2024-05-23 18:24 ` Kui-Feng Lee
@ 2024-05-23 18:34 ` Martin KaFai Lau
2024-05-23 19:03 ` Kui-Feng Lee
0 siblings, 1 reply; 16+ messages in thread
From: Martin KaFai Lau @ 2024-05-23 18:34 UTC (permalink / raw)
To: Kui-Feng Lee, Kui-Feng Lee; +Cc: bpf, ast, song, kernel-team, andrii, kuifeng
On 5/23/24 11:24 AM, Kui-Feng Lee wrote:
>
>
> On 5/23/24 10:23, Martin KaFai Lau wrote:
>> On 5/21/24 3:51 PM, Kui-Feng Lee wrote:
>>> +static __poll_t bpf_link_poll(struct file *file, struct poll_table_struct *pts)
>>> +{
>>> + struct bpf_link *link = file->private_data;
>>> +
>>> + if (link->ops->poll)
>>> + return link->ops->poll(file, pts);
>>> +
>>> + return 0;
>>
>> The current bpf_link_fops.poll is NULL before this patch. From vfs_poll, it
>> seems to be DEFAULT_POLLMASK for this case. Please double check.
>
>
> Yes, it returns DEFAULT_POLLMASK if file->f_op->epoll is NULL. But,
> before this patch, link can not be added to an epoll. See the
> explanation below.
How about select() and poll() that do not need epoll_ctl() setup?
>
>>
>>> +}
>>> +
>>> static const struct file_operations bpf_link_fops = {
>>> #ifdef CONFIG_PROC_FS
>>> .show_fdinfo = bpf_link_show_fdinfo,
>>> @@ -3157,6 +3167,7 @@ static const struct file_operations bpf_link_fops = {
>>> .release = bpf_link_release,
>>> .read = bpf_dummy_read,
>>> .write = bpf_dummy_write,
>>> + .poll = bpf_link_poll,
>>
>> Same here. What does the epoll_ctl(EPOLL_CTL_ADD) currently expect for link
>> (e.g. cgroup) that does not support poll?
>>
>
> epoll_ctl() always returns -EPERM for files not supporting poll.
> Should I add another instance of struct file_operations to keep the
> consistency for other types of links?
imo, it makes sense to have another instance for link that supports poll such
that epoll_ctl(EPOLL_CTL_ADD) can fail early for the unsupported links.
>
>>> };
>>
^ permalink raw reply [flat|nested] 16+ messages in thread* Re: [PATCH bpf-next v4 3/7] bpf: support epoll from bpf struct_ops links.
2024-05-23 18:34 ` Martin KaFai Lau
@ 2024-05-23 19:03 ` Kui-Feng Lee
2024-05-23 19:10 ` Martin KaFai Lau
0 siblings, 1 reply; 16+ messages in thread
From: Kui-Feng Lee @ 2024-05-23 19:03 UTC (permalink / raw)
To: Martin KaFai Lau, Kui-Feng Lee
Cc: bpf, ast, song, kernel-team, andrii, kuifeng
On 5/23/24 11:34, Martin KaFai Lau wrote:
> On 5/23/24 11:24 AM, Kui-Feng Lee wrote:
>>
>>
>> On 5/23/24 10:23, Martin KaFai Lau wrote:
>>> On 5/21/24 3:51 PM, Kui-Feng Lee wrote:
>>>> +static __poll_t bpf_link_poll(struct file *file, struct
>>>> poll_table_struct *pts)
>>>> +{
>>>> + struct bpf_link *link = file->private_data;
>>>> +
>>>> + if (link->ops->poll)
>>>> + return link->ops->poll(file, pts);
>>>> +
>>>> + return 0;
>>>
>>> The current bpf_link_fops.poll is NULL before this patch. From
>>> vfs_poll, it seems to be DEFAULT_POLLMASK for this case. Please
>>> double check.
>>
>>
>> Yes, it returns DEFAULT_POLLMASK if file->f_op->epoll is NULL. But,
>> before this patch, link can not be added to an epoll. See the
>> explanation below.
>
> How about select() and poll() that do not need epoll_ctl() setup?
AFAIK, they just don't check it at all, calling vfs_poll() directly.
>
>>
>>>
>>>> +}
>>>> +
>>>> static const struct file_operations bpf_link_fops = {
>>>> #ifdef CONFIG_PROC_FS
>>>> .show_fdinfo = bpf_link_show_fdinfo,
>>>> @@ -3157,6 +3167,7 @@ static const struct file_operations
>>>> bpf_link_fops = {
>>>> .release = bpf_link_release,
>>>> .read = bpf_dummy_read,
>>>> .write = bpf_dummy_write,
>>>> + .poll = bpf_link_poll,
>>>
>>> Same here. What does the epoll_ctl(EPOLL_CTL_ADD) currently expect
>>> for link (e.g. cgroup) that does not support poll?
>>>
>>
>> epoll_ctl() always returns -EPERM for files not supporting poll.
>> Should I add another instance of struct file_operations to keep the
>> consistency for other types of links?
>
> imo, it makes sense to have another instance for link that supports poll
> such that epoll_ctl(EPOLL_CTL_ADD) can fail early for the unsupported
> links.
Ok! I will add another instance.
>
>>
>>>> };
>>>
>
^ permalink raw reply [flat|nested] 16+ messages in thread* Re: [PATCH bpf-next v4 3/7] bpf: support epoll from bpf struct_ops links.
2024-05-23 19:03 ` Kui-Feng Lee
@ 2024-05-23 19:10 ` Martin KaFai Lau
2024-05-23 19:28 ` Kui-Feng Lee
0 siblings, 1 reply; 16+ messages in thread
From: Martin KaFai Lau @ 2024-05-23 19:10 UTC (permalink / raw)
To: Kui-Feng Lee, Kui-Feng Lee; +Cc: bpf, ast, song, kernel-team, andrii, kuifeng
On 5/23/24 12:03 PM, Kui-Feng Lee wrote:
>
>
> On 5/23/24 11:34, Martin KaFai Lau wrote:
>> On 5/23/24 11:24 AM, Kui-Feng Lee wrote:
>>>
>>>
>>> On 5/23/24 10:23, Martin KaFai Lau wrote:
>>>> On 5/21/24 3:51 PM, Kui-Feng Lee wrote:
>>>>> +static __poll_t bpf_link_poll(struct file *file, struct poll_table_struct
>>>>> *pts)
>>>>> +{
>>>>> + struct bpf_link *link = file->private_data;
>>>>> +
>>>>> + if (link->ops->poll)
>>>>> + return link->ops->poll(file, pts);
>>>>> +
>>>>> + return 0;
>>>>
>>>> The current bpf_link_fops.poll is NULL before this patch. From vfs_poll, it
>>>> seems to be DEFAULT_POLLMASK for this case. Please double check.
>>>
>>>
>>> Yes, it returns DEFAULT_POLLMASK if file->f_op->epoll is NULL. But,
>>> before this patch, link can not be added to an epoll. See the
>>> explanation below.
>>
>> How about select() and poll() that do not need epoll_ctl() setup?
>
> AFAIK, they just don't check it at all, calling vfs_poll() directly.
right, vfs_poll returns DEFAULT_POLLMASK which is not 0.
#define DEFAULT_POLLMASK (EPOLLIN | EPOLLOUT | EPOLLRDNORM | EPOLLWRNORM)
static inline __poll_t vfs_poll(struct file *file, struct poll_table_struct *pt)
{
if (unlikely(!file->f_op->poll))
return DEFAULT_POLLMASK;
return file->f_op->poll(file, pt);
}
but this discussion is moot if another file_operations instance is used.
>
>>
>>>
>>>>
>>>>> +}
>>>>> +
>>>>> static const struct file_operations bpf_link_fops = {
>>>>> #ifdef CONFIG_PROC_FS
>>>>> .show_fdinfo = bpf_link_show_fdinfo,
>>>>> @@ -3157,6 +3167,7 @@ static const struct file_operations bpf_link_fops = {
>>>>> .release = bpf_link_release,
>>>>> .read = bpf_dummy_read,
>>>>> .write = bpf_dummy_write,
>>>>> + .poll = bpf_link_poll,
>>>>
>>>> Same here. What does the epoll_ctl(EPOLL_CTL_ADD) currently expect for link
>>>> (e.g. cgroup) that does not support poll?
>>>>
>>>
>>> epoll_ctl() always returns -EPERM for files not supporting poll.
>>> Should I add another instance of struct file_operations to keep the
>>> consistency for other types of links?
>>
>> imo, it makes sense to have another instance for link that supports poll such
>> that epoll_ctl(EPOLL_CTL_ADD) can fail early for the unsupported links.
>
> Ok! I will add another instance.
^ permalink raw reply [flat|nested] 16+ messages in thread* Re: [PATCH bpf-next v4 3/7] bpf: support epoll from bpf struct_ops links.
2024-05-23 19:10 ` Martin KaFai Lau
@ 2024-05-23 19:28 ` Kui-Feng Lee
0 siblings, 0 replies; 16+ messages in thread
From: Kui-Feng Lee @ 2024-05-23 19:28 UTC (permalink / raw)
To: Martin KaFai Lau, Kui-Feng Lee
Cc: bpf, ast, song, kernel-team, andrii, kuifeng
On 5/23/24 12:10, Martin KaFai Lau wrote:
> On 5/23/24 12:03 PM, Kui-Feng Lee wrote:
>>
>>
>> On 5/23/24 11:34, Martin KaFai Lau wrote:
>>> On 5/23/24 11:24 AM, Kui-Feng Lee wrote:
>>>>
>>>>
>>>> On 5/23/24 10:23, Martin KaFai Lau wrote:
>>>>> On 5/21/24 3:51 PM, Kui-Feng Lee wrote:
>>>>>> +static __poll_t bpf_link_poll(struct file *file, struct
>>>>>> poll_table_struct *pts)
>>>>>> +{
>>>>>> + struct bpf_link *link = file->private_data;
>>>>>> +
>>>>>> + if (link->ops->poll)
>>>>>> + return link->ops->poll(file, pts);
>>>>>> +
>>>>>> + return 0;
>>>>>
>>>>> The current bpf_link_fops.poll is NULL before this patch. From
>>>>> vfs_poll, it seems to be DEFAULT_POLLMASK for this case. Please
>>>>> double check.
>>>>
>>>>
>>>> Yes, it returns DEFAULT_POLLMASK if file->f_op->epoll is NULL. But,
>>>> before this patch, link can not be added to an epoll. See the
>>>> explanation below.
>>>
>>> How about select() and poll() that do not need epoll_ctl() setup?
>>
>> AFAIK, they just don't check it at all, calling vfs_poll() directly.
>
> right, vfs_poll returns DEFAULT_POLLMASK which is not 0.
>
> #define DEFAULT_POLLMASK (EPOLLIN | EPOLLOUT | EPOLLRDNORM | EPOLLWRNORM)
>
> static inline __poll_t vfs_poll(struct file *file, struct
> poll_table_struct *pt)
> {
> if (unlikely(!file->f_op->poll))
> return DEFAULT_POLLMASK;
> return file->f_op->poll(file, pt);
> }
>
> but this discussion is moot if another file_operations instance is used.
Sure! I am adding another instance.
>
>>
>>>
>>>>
>>>>>
>>>>>> +}
>>>>>> +
>>>>>> static const struct file_operations bpf_link_fops = {
>>>>>> #ifdef CONFIG_PROC_FS
>>>>>> .show_fdinfo = bpf_link_show_fdinfo,
>>>>>> @@ -3157,6 +3167,7 @@ static const struct file_operations
>>>>>> bpf_link_fops = {
>>>>>> .release = bpf_link_release,
>>>>>> .read = bpf_dummy_read,
>>>>>> .write = bpf_dummy_write,
>>>>>> + .poll = bpf_link_poll,
>>>>>
>>>>> Same here. What does the epoll_ctl(EPOLL_CTL_ADD) currently expect
>>>>> for link (e.g. cgroup) that does not support poll?
>>>>>
>>>>
>>>> epoll_ctl() always returns -EPERM for files not supporting poll.
>>>> Should I add another instance of struct file_operations to keep the
>>>> consistency for other types of links?
>>>
>>> imo, it makes sense to have another instance for link that supports
>>> poll such that epoll_ctl(EPOLL_CTL_ADD) can fail early for the
>>> unsupported links.
>>
>> Ok! I will add another instance.
>
^ permalink raw reply [flat|nested] 16+ messages in thread
* [PATCH bpf-next v4 4/7] bpf: export bpf_link_inc_not_zero.
2024-05-21 22:51 [PATCH bpf-next v4 0/7] Notify user space when a struct_ops object is detached/unregistered Kui-Feng Lee
` (2 preceding siblings ...)
2024-05-21 22:51 ` [PATCH bpf-next v4 3/7] bpf: support epoll from bpf struct_ops links Kui-Feng Lee
@ 2024-05-21 22:51 ` Kui-Feng Lee
2024-05-21 22:51 ` [PATCH bpf-next v4 5/7] selftests/bpf: test struct_ops with epoll Kui-Feng Lee
` (2 subsequent siblings)
6 siblings, 0 replies; 16+ messages in thread
From: Kui-Feng Lee @ 2024-05-21 22:51 UTC (permalink / raw)
To: bpf, ast, martin.lau, song, kernel-team, andrii
Cc: sinquersw, kuifeng, Kui-Feng Lee
bpf_link_inc_not_zero() will be used by kernel modules. We will use it in
bpf_testmod.c later.
Signed-off-by: Kui-Feng Lee <thinker.li@gmail.com>
---
include/linux/bpf.h | 6 ++++++
kernel/bpf/syscall.c | 3 ++-
2 files changed, 8 insertions(+), 1 deletion(-)
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 5f7496ef8b7c..6b592094f9b4 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -2351,6 +2351,7 @@ int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer);
int bpf_link_settle(struct bpf_link_primer *primer);
void bpf_link_cleanup(struct bpf_link_primer *primer);
void bpf_link_inc(struct bpf_link *link);
+struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link);
void bpf_link_put(struct bpf_link *link);
int bpf_link_new_fd(struct bpf_link *link);
struct bpf_link *bpf_link_get_from_fd(u32 ufd);
@@ -2722,6 +2723,11 @@ static inline void bpf_link_inc(struct bpf_link *link)
{
}
+static inline struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link)
+{
+ return NULL;
+}
+
static inline void bpf_link_put(struct bpf_link *link)
{
}
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index ad4f81ed27f0..31fabe26371d 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -5422,10 +5422,11 @@ static int link_detach(union bpf_attr *attr)
return ret;
}
-static struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link)
+struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link)
{
return atomic64_fetch_add_unless(&link->refcnt, 1, 0) ? link : ERR_PTR(-ENOENT);
}
+EXPORT_SYMBOL(bpf_link_inc_not_zero);
struct bpf_link *bpf_link_by_id(u32 id)
{
--
2.34.1
^ permalink raw reply related [flat|nested] 16+ messages in thread* [PATCH bpf-next v4 5/7] selftests/bpf: test struct_ops with epoll
2024-05-21 22:51 [PATCH bpf-next v4 0/7] Notify user space when a struct_ops object is detached/unregistered Kui-Feng Lee
` (3 preceding siblings ...)
2024-05-21 22:51 ` [PATCH bpf-next v4 4/7] bpf: export bpf_link_inc_not_zero Kui-Feng Lee
@ 2024-05-21 22:51 ` Kui-Feng Lee
2024-05-21 22:51 ` [PATCH bpf-next v4 6/7] selftests/bpf: detach a struct_ops link from the subsystem managing it Kui-Feng Lee
2024-05-21 22:51 ` [PATCH bpf-next v4 7/7] selftests/bpf: make sure bpf_testmod handling racing link destroying well Kui-Feng Lee
6 siblings, 0 replies; 16+ messages in thread
From: Kui-Feng Lee @ 2024-05-21 22:51 UTC (permalink / raw)
To: bpf, ast, martin.lau, song, kernel-team, andrii
Cc: sinquersw, kuifeng, Kui-Feng Lee
Verify whether a user space program is informed through epoll with EPOLLHUP
when a struct_ops object is detached.
The BPF code in selftests/bpf/progs/struct_ops_module.c has become
complex. Therefore, struct_ops_detach.c has been added to segregate the BPF
code for detachment tests from the BPF code for other tests based on the
recommendation of Andrii Nakryiko.
Suggested-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Kui-Feng Lee <thinker.li@gmail.com>
---
.../bpf/prog_tests/test_struct_ops_module.c | 57 +++++++++++++++++++
.../selftests/bpf/progs/struct_ops_detach.c | 9 +++
2 files changed, 66 insertions(+)
create mode 100644 tools/testing/selftests/bpf/progs/struct_ops_detach.c
diff --git a/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c
index 29e183a80f49..bbcf12696a6b 100644
--- a/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c
+++ b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c
@@ -3,9 +3,12 @@
#include <test_progs.h>
#include <time.h>
+#include <sys/epoll.h>
+
#include "struct_ops_module.skel.h"
#include "struct_ops_nulled_out_cb.skel.h"
#include "struct_ops_forgotten_cb.skel.h"
+#include "struct_ops_detach.skel.h"
static void check_map_info(struct bpf_map_info *info)
{
@@ -242,6 +245,58 @@ static void test_struct_ops_forgotten_cb(void)
struct_ops_forgotten_cb__destroy(skel);
}
+/* Detach a link from a user space program */
+static void test_detach_link(void)
+{
+ struct epoll_event ev, events[2];
+ struct struct_ops_detach *skel;
+ struct bpf_link *link = NULL;
+ int fd, epollfd = -1, nfds;
+ int err;
+
+ skel = struct_ops_detach__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "struct_ops_detach__open_and_load"))
+ return;
+
+ link = bpf_map__attach_struct_ops(skel->maps.testmod_do_detach);
+ if (!ASSERT_OK_PTR(link, "attach_struct_ops"))
+ goto cleanup;
+
+ fd = bpf_link__fd(link);
+ if (!ASSERT_GE(fd, 0, "link_fd"))
+ goto cleanup;
+
+ epollfd = epoll_create1(0);
+ if (!ASSERT_GE(epollfd, 0, "epoll_create1"))
+ goto cleanup;
+
+ ev.events = EPOLLHUP;
+ ev.data.fd = fd;
+ err = epoll_ctl(epollfd, EPOLL_CTL_ADD, fd, &ev);
+ if (!ASSERT_OK(err, "epoll_ctl"))
+ goto cleanup;
+
+ err = bpf_link__detach(link);
+ if (!ASSERT_OK(err, "detach_link"))
+ goto cleanup;
+
+ /* Wait for EPOLLHUP */
+ nfds = epoll_wait(epollfd, events, 2, 500);
+ if (!ASSERT_EQ(nfds, 1, "epoll_wait"))
+ goto cleanup;
+
+ if (!ASSERT_EQ(events[0].data.fd, fd, "epoll_wait_fd"))
+ goto cleanup;
+ if (!ASSERT_TRUE(events[0].events & EPOLLHUP, "events[0].events"))
+ goto cleanup;
+
+cleanup:
+ if (epollfd >= 0)
+ close(epollfd);
+ bpf_link__destroy(link);
+ struct_ops_detach__destroy(skel);
+}
+
void serial_test_struct_ops_module(void)
{
if (test__start_subtest("struct_ops_load"))
@@ -254,5 +309,7 @@ void serial_test_struct_ops_module(void)
test_struct_ops_nulled_out_cb();
if (test__start_subtest("struct_ops_forgotten_cb"))
test_struct_ops_forgotten_cb();
+ if (test__start_subtest("test_detach_link"))
+ test_detach_link();
}
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_detach.c b/tools/testing/selftests/bpf/progs/struct_ops_detach.c
new file mode 100644
index 000000000000..45eacc2ca657
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/struct_ops_detach.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include "../bpf_testmod/bpf_testmod.h"
+
+char _license[] SEC("license") = "GPL";
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops testmod_do_detach;
--
2.34.1
^ permalink raw reply related [flat|nested] 16+ messages in thread* [PATCH bpf-next v4 6/7] selftests/bpf: detach a struct_ops link from the subsystem managing it.
2024-05-21 22:51 [PATCH bpf-next v4 0/7] Notify user space when a struct_ops object is detached/unregistered Kui-Feng Lee
` (4 preceding siblings ...)
2024-05-21 22:51 ` [PATCH bpf-next v4 5/7] selftests/bpf: test struct_ops with epoll Kui-Feng Lee
@ 2024-05-21 22:51 ` Kui-Feng Lee
2024-05-21 22:51 ` [PATCH bpf-next v4 7/7] selftests/bpf: make sure bpf_testmod handling racing link destroying well Kui-Feng Lee
6 siblings, 0 replies; 16+ messages in thread
From: Kui-Feng Lee @ 2024-05-21 22:51 UTC (permalink / raw)
To: bpf, ast, martin.lau, song, kernel-team, andrii
Cc: sinquersw, kuifeng, Kui-Feng Lee
Not only a user space program can detach a struct_ops link, the subsystem
managing a link can also detach the link. This patch adds a kfunc to
simulate detaching a link by the subsystem managing it and makes sure user
space programs get notified through epoll.
Signed-off-by: Kui-Feng Lee <thinker.li@gmail.com>
---
.../selftests/bpf/bpf_testmod/bpf_testmod.c | 42 ++++++++++++
.../bpf/bpf_testmod/bpf_testmod_kfunc.h | 1 +
.../bpf/prog_tests/test_struct_ops_module.c | 67 +++++++++++++++++++
.../selftests/bpf/progs/struct_ops_detach.c | 7 ++
4 files changed, 117 insertions(+)
diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
index 0a09732cde4b..2b3a89609b7e 100644
--- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
+++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
@@ -744,6 +744,38 @@ __bpf_kfunc int bpf_kfunc_call_kernel_getpeername(struct addr_args *args)
return err;
}
+static DEFINE_SPINLOCK(detach_lock);
+static struct bpf_link *link_to_detach;
+
+__bpf_kfunc int bpf_dummy_do_link_detach(void)
+{
+ struct bpf_link *link;
+ int ret = -ENOENT;
+
+ /* A subsystem must ensure that a link is valid when detaching the
+ * link. In order to achieve that, the subsystem may need to obtain
+ * a lock to safeguard a table that holds the pointer to the link
+ * being detached. However, the subsystem cannot invoke
+ * link->ops->detach() while holding the lock because other tasks
+ * may be in the process of unregistering, which could lead to
+ * acquiring the same lock and causing a deadlock. This is why
+ * bpf_link_inc_not_zero() is used to maintain the link's validity.
+ */
+ spin_lock(&detach_lock);
+ link = link_to_detach;
+ /* Make sure the link is still valid by increasing its refcnt */
+ if (link && IS_ERR(bpf_link_inc_not_zero(link)))
+ link = NULL;
+ spin_unlock(&detach_lock);
+
+ if (link) {
+ ret = link->ops->detach(link);
+ bpf_link_put(link);
+ }
+
+ return ret;
+}
+
BTF_KFUNCS_START(bpf_testmod_check_kfunc_ids)
BTF_ID_FLAGS(func, bpf_testmod_test_mod_kfunc)
BTF_ID_FLAGS(func, bpf_kfunc_call_test1)
@@ -780,6 +812,7 @@ BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_sendmsg, KF_SLEEPABLE)
BTF_ID_FLAGS(func, bpf_kfunc_call_sock_sendmsg, KF_SLEEPABLE)
BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_getsockname, KF_SLEEPABLE)
BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_getpeername, KF_SLEEPABLE)
+BTF_ID_FLAGS(func, bpf_dummy_do_link_detach)
BTF_KFUNCS_END(bpf_testmod_check_kfunc_ids)
static int bpf_testmod_ops_init(struct btf *btf)
@@ -832,11 +865,20 @@ static int bpf_dummy_reg(void *kdata, struct bpf_link *link)
if (ops->test_2)
ops->test_2(4, ops->data);
+ spin_lock(&detach_lock);
+ if (!link_to_detach)
+ link_to_detach = link;
+ spin_unlock(&detach_lock);
+
return 0;
}
static void bpf_dummy_unreg(void *kdata, struct bpf_link *link)
{
+ spin_lock(&detach_lock);
+ if (link == link_to_detach)
+ link_to_detach = NULL;
+ spin_unlock(&detach_lock);
}
static int bpf_testmod_test_1(void)
diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h
index b0d586a6751f..19131baf4a9e 100644
--- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h
+++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h
@@ -121,6 +121,7 @@ void bpf_kfunc_call_test_fail1(struct prog_test_fail1 *p);
void bpf_kfunc_call_test_fail2(struct prog_test_fail2 *p);
void bpf_kfunc_call_test_fail3(struct prog_test_fail3 *p);
void bpf_kfunc_call_test_mem_len_fail1(void *mem, int len);
+int bpf_dummy_do_link_detach(void) __ksym;
void bpf_kfunc_common_test(void) __ksym;
diff --git a/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c
index bbcf12696a6b..f4000bf04752 100644
--- a/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c
+++ b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
#include <test_progs.h>
#include <time.h>
+#include <network_helpers.h>
#include <sys/epoll.h>
@@ -297,6 +298,70 @@ static void test_detach_link(void)
struct_ops_detach__destroy(skel);
}
+/* Detach a link from the subsystem that the link was registered to */
+static void test_subsystem_detach(void)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4));
+ struct epoll_event ev, events[2];
+ struct struct_ops_detach *skel;
+ struct bpf_link *link = NULL;
+ int fd, epollfd = -1, nfds;
+ int prog_fd;
+ int err;
+
+ skel = struct_ops_detach__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "struct_ops_detach_open_and_load"))
+ return;
+
+ link = bpf_map__attach_struct_ops(skel->maps.testmod_do_detach);
+ if (!ASSERT_OK_PTR(link, "attach_struct_ops"))
+ goto cleanup;
+
+ fd = bpf_link__fd(link);
+ if (!ASSERT_GE(fd, 0, "link_fd"))
+ goto cleanup;
+
+ prog_fd = bpf_program__fd(skel->progs.start_detach);
+ if (!ASSERT_GE(prog_fd, 0, "start_detach_fd"))
+ goto cleanup;
+
+ /* Do detachment from the registered subsystem */
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ if (!ASSERT_OK(err, "start_detach_run"))
+ goto cleanup;
+
+ if (!ASSERT_EQ(topts.retval, 0, "start_detach_run_retval"))
+ goto cleanup;
+
+ epollfd = epoll_create1(0);
+ if (!ASSERT_GE(epollfd, 0, "epoll_create1"))
+ goto cleanup;
+
+ ev.events = EPOLLHUP;
+ ev.data.fd = fd;
+ err = epoll_ctl(epollfd, EPOLL_CTL_ADD, fd, &ev);
+ if (!ASSERT_OK(err, "epoll_ctl"))
+ goto cleanup;
+
+ /* Wait for EPOLLHUP */
+ nfds = epoll_wait(epollfd, events, 2, 5000);
+ if (!ASSERT_EQ(nfds, 1, "epoll_wait"))
+ goto cleanup;
+
+ if (!ASSERT_EQ(events[0].data.fd, fd, "epoll_wait_fd"))
+ goto cleanup;
+ if (!ASSERT_TRUE(events[0].events & EPOLLHUP, "events[0].events"))
+ goto cleanup;
+
+cleanup:
+ if (epollfd >= 0)
+ close(epollfd);
+ bpf_link__destroy(link);
+ struct_ops_detach__destroy(skel);
+}
+
void serial_test_struct_ops_module(void)
{
if (test__start_subtest("struct_ops_load"))
@@ -311,5 +376,7 @@ void serial_test_struct_ops_module(void)
test_struct_ops_forgotten_cb();
if (test__start_subtest("test_detach_link"))
test_detach_link();
+ if (test__start_subtest("test_subsystem_detach"))
+ test_subsystem_detach();
}
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_detach.c b/tools/testing/selftests/bpf/progs/struct_ops_detach.c
index 45eacc2ca657..5c742b0df04d 100644
--- a/tools/testing/selftests/bpf/progs/struct_ops_detach.c
+++ b/tools/testing/selftests/bpf/progs/struct_ops_detach.c
@@ -2,8 +2,15 @@
/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
#include <vmlinux.h>
#include "../bpf_testmod/bpf_testmod.h"
+#include "../bpf_testmod/bpf_testmod_kfunc.h"
char _license[] SEC("license") = "GPL";
SEC(".struct_ops.link")
struct bpf_testmod_ops testmod_do_detach;
+
+SEC("tc")
+int start_detach(void *skb)
+{
+ return bpf_dummy_do_link_detach();
+}
--
2.34.1
^ permalink raw reply related [flat|nested] 16+ messages in thread* [PATCH bpf-next v4 7/7] selftests/bpf: make sure bpf_testmod handling racing link destroying well.
2024-05-21 22:51 [PATCH bpf-next v4 0/7] Notify user space when a struct_ops object is detached/unregistered Kui-Feng Lee
` (5 preceding siblings ...)
2024-05-21 22:51 ` [PATCH bpf-next v4 6/7] selftests/bpf: detach a struct_ops link from the subsystem managing it Kui-Feng Lee
@ 2024-05-21 22:51 ` Kui-Feng Lee
6 siblings, 0 replies; 16+ messages in thread
From: Kui-Feng Lee @ 2024-05-21 22:51 UTC (permalink / raw)
To: bpf, ast, martin.lau, song, kernel-team, andrii
Cc: sinquersw, kuifeng, Kui-Feng Lee
Do detachment from the subsystem after a link being closed/freed. This
test make sure the pattern implemented by bpf_dummy_do_link_detach() works
correctly.
Refer to bpf_dummy_do_link_detach() in bpf_testmod.c for more details.
Signed-off-by: Kui-Feng Lee <thinker.li@gmail.com>
---
.../bpf/prog_tests/test_struct_ops_module.c | 44 +++++++++++++++++++
1 file changed, 44 insertions(+)
diff --git a/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c
index f4000bf04752..3a8cdf440edd 100644
--- a/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c
+++ b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c
@@ -362,6 +362,48 @@ static void test_subsystem_detach(void)
struct_ops_detach__destroy(skel);
}
+/* A subsystem detaches a link while the link is going to be free. */
+static void test_subsystem_detach_free(void)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4));
+ struct struct_ops_detach *skel;
+ struct bpf_link *link = NULL;
+ int prog_fd;
+ int err;
+
+ skel = struct_ops_detach__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "struct_ops_detach_open_and_load"))
+ return;
+
+ link = bpf_map__attach_struct_ops(skel->maps.testmod_do_detach);
+ if (!ASSERT_OK_PTR(link, "attach_struct_ops"))
+ goto cleanup;
+
+ bpf_link__destroy(link);
+
+ prog_fd = bpf_program__fd(skel->progs.start_detach);
+ if (!ASSERT_GE(prog_fd, 0, "start_detach_fd"))
+ goto cleanup;
+
+ /* Do detachment from the registered subsystem */
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ if (!ASSERT_OK(err, "start_detach_run"))
+ goto cleanup;
+
+ /* The link has zeroed refcount value or even has been
+ * unregistered, so the detachment from the subsystem should fail.
+ */
+ ASSERT_EQ(topts.retval, (u32)-ENOENT, "start_detach_run_retval");
+
+ /* Sync RCU to make sure the link is freed without any crash */
+ ASSERT_OK(kern_sync_rcu(), "kern_sync_rcu");
+
+cleanup:
+ struct_ops_detach__destroy(skel);
+}
+
void serial_test_struct_ops_module(void)
{
if (test__start_subtest("struct_ops_load"))
@@ -378,5 +420,7 @@ void serial_test_struct_ops_module(void)
test_detach_link();
if (test__start_subtest("test_subsystem_detach"))
test_subsystem_detach();
+ if (test__start_subtest("test_subsystem_detach_free"))
+ test_subsystem_detach_free();
}
--
2.34.1
^ permalink raw reply related [flat|nested] 16+ messages in thread