* [PATCH bpf-next] bpf: support deferring bpf_link dealloc to after RCU grace period
@ 2024-03-26 21:14 Andrii Nakryiko
2024-03-28 10:10 ` Jiri Olsa
0 siblings, 1 reply; 2+ messages in thread
From: Andrii Nakryiko @ 2024-03-26 21:14 UTC (permalink / raw)
To: bpf, ast, daniel, martin.lau
Cc: andrii, kernel-team, syzbot+981935d9485a560bfbcb,
syzbot+2cb5a6c573e98db598cc, syzbot+62d8b26793e8a2bd0516
BPF link for some program types is passed as a "context" which can be
used by those BPF programs to look up additional information. E.g., for
BPF raw tracepoints, link is used to fetch BPF cookie value, similarly
for BPF multi-kprobes and multi-uprobes.
Because of this runtime dependency, when bpf_link refcnt drops to zero
there could still be active BPF programs running accessing link data
(cookie, program pointer, etc).
This patch adds generic support to defer bpf_link dealloc callback to
after RCU GP, if requested. This is done by exposing two different
deallocation callbacks, one synchronous and one deferred. If deferred
one is provided, bpf_link_free() will schedule dealloc_deferred()
callback to happen after RCU GP.
BPF is using two flavors of RCU: "classic" non-sleepable one and RCU
tasks trace one. The latter is used when sleepable BPF programs are
used. bpf_link_free() accommodates that by checking underlying BPF
program's sleepable flag, and goes either through normal RCU GP only for
non-sleepable, or through RCU tasks trace GP *and* then normal RCU GP
(taking into account rcu_trace_implies_rcu_gp() optimization), if BPF
program is sleepable.
We use this for raw tracepoint, multi-kprobe, and multi-uprobe links,
all of which dereference link during program run.
Fixes: d4dfc5700e86 ("bpf: pass whole link instead of prog when triggering raw tracepoint")
Fixes: 0dcac2725406 ("bpf: Add multi kprobe link")
Fixes: 89ae89f53d20 ("bpf: Add multi uprobe link")
Reported-by: syzbot+981935d9485a560bfbcb@syzkaller.appspotmail.com
Reported-by: syzbot+2cb5a6c573e98db598cc@syzkaller.appspotmail.com
Reported-by: syzbot+62d8b26793e8a2bd0516@syzkaller.appspotmail.com
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
---
include/linux/bpf.h | 16 +++++++++++++++-
kernel/bpf/syscall.c | 35 ++++++++++++++++++++++++++++++++---
kernel/trace/bpf_trace.c | 4 ++--
3 files changed, 49 insertions(+), 6 deletions(-)
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 62762390c93d..e52d5b3ee45e 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -1573,12 +1573,26 @@ struct bpf_link {
enum bpf_link_type type;
const struct bpf_link_ops *ops;
struct bpf_prog *prog;
- struct work_struct work;
+ /* rcu is used before freeing, work can be used to schedule that
+ * RCU-based freeing before that, so they never overlap
+ */
+ union {
+ struct rcu_head rcu;
+ struct work_struct work;
+ };
};
struct bpf_link_ops {
void (*release)(struct bpf_link *link);
+ /* deallocate link resources callback, called without RCU grace period
+ * waiting
+ */
void (*dealloc)(struct bpf_link *link);
+ /* deallocate link resources callback, called after RCU grace period;
+ * if underlying BPF program is sleepable we go through tasks trace
+ * RCU GP and then "classic" RCU GP
+ */
+ void (*dealloc_deferred)(struct bpf_link *link);
int (*detach)(struct bpf_link *link);
int (*update_prog)(struct bpf_link *link, struct bpf_prog *new_prog,
struct bpf_prog *old_prog);
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index e44c276e8617..c0f2f052a02c 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -3024,17 +3024,46 @@ void bpf_link_inc(struct bpf_link *link)
atomic64_inc(&link->refcnt);
}
+static void bpf_link_defer_dealloc_rcu_gp(struct rcu_head *rcu)
+{
+ struct bpf_link *link = container_of(rcu, struct bpf_link, rcu);
+
+ /* free bpf_link and its containing memory */
+ link->ops->dealloc_deferred(link);
+}
+
+static void bpf_link_defer_dealloc_mult_rcu_gp(struct rcu_head *rcu)
+{
+ if (rcu_trace_implies_rcu_gp())
+ bpf_link_defer_dealloc_rcu_gp(rcu);
+ else
+ call_rcu(rcu, bpf_link_defer_dealloc_rcu_gp);
+}
+
/* bpf_link_free is guaranteed to be called from process context */
static void bpf_link_free(struct bpf_link *link)
{
+ bool sleepable = false;
+
bpf_link_free_id(link->id);
if (link->prog) {
+ sleepable = link->prog->sleepable;
/* detach BPF program, clean up used resources */
link->ops->release(link);
bpf_prog_put(link->prog);
}
- /* free bpf_link and its containing memory */
- link->ops->dealloc(link);
+ if (link->ops->dealloc_deferred) {
+ /* schedule BPF link deallocation; if underlying BPF program
+ * is sleepable, we need to first wait for RCU tasks trace
+ * sync, then go through "classic" RCU grace period
+ */
+ if (sleepable)
+ call_rcu_tasks_trace(&link->rcu, bpf_link_defer_dealloc_mult_rcu_gp);
+ else
+ call_rcu(&link->rcu, bpf_link_defer_dealloc_rcu_gp);
+ }
+ if (link->ops->dealloc)
+ link->ops->dealloc(link);
}
static void bpf_link_put_deferred(struct work_struct *work)
@@ -3539,7 +3568,7 @@ static int bpf_raw_tp_link_fill_link_info(const struct bpf_link *link,
static const struct bpf_link_ops bpf_raw_tp_link_lops = {
.release = bpf_raw_tp_link_release,
- .dealloc = bpf_raw_tp_link_dealloc,
+ .dealloc_deferred = bpf_raw_tp_link_dealloc,
.show_fdinfo = bpf_raw_tp_link_show_fdinfo,
.fill_link_info = bpf_raw_tp_link_fill_link_info,
};
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 6d0c95638e1b..98eacfacb73a 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -2740,7 +2740,7 @@ static int bpf_kprobe_multi_link_fill_link_info(const struct bpf_link *link,
static const struct bpf_link_ops bpf_kprobe_multi_link_lops = {
.release = bpf_kprobe_multi_link_release,
- .dealloc = bpf_kprobe_multi_link_dealloc,
+ .dealloc_deferred = bpf_kprobe_multi_link_dealloc,
.fill_link_info = bpf_kprobe_multi_link_fill_link_info,
};
@@ -3254,7 +3254,7 @@ static int bpf_uprobe_multi_link_fill_link_info(const struct bpf_link *link,
static const struct bpf_link_ops bpf_uprobe_multi_link_lops = {
.release = bpf_uprobe_multi_link_release,
- .dealloc = bpf_uprobe_multi_link_dealloc,
+ .dealloc_deferred = bpf_uprobe_multi_link_dealloc,
.fill_link_info = bpf_uprobe_multi_link_fill_link_info,
};
--
2.43.0
^ permalink raw reply related [flat|nested] 2+ messages in thread* Re: [PATCH bpf-next] bpf: support deferring bpf_link dealloc to after RCU grace period
2024-03-26 21:14 [PATCH bpf-next] bpf: support deferring bpf_link dealloc to after RCU grace period Andrii Nakryiko
@ 2024-03-28 10:10 ` Jiri Olsa
0 siblings, 0 replies; 2+ messages in thread
From: Jiri Olsa @ 2024-03-28 10:10 UTC (permalink / raw)
To: Andrii Nakryiko
Cc: bpf, ast, daniel, martin.lau, kernel-team,
syzbot+981935d9485a560bfbcb, syzbot+2cb5a6c573e98db598cc,
syzbot+62d8b26793e8a2bd0516
On Tue, Mar 26, 2024 at 02:14:27PM -0700, Andrii Nakryiko wrote:
> BPF link for some program types is passed as a "context" which can be
> used by those BPF programs to look up additional information. E.g., for
> BPF raw tracepoints, link is used to fetch BPF cookie value, similarly
> for BPF multi-kprobes and multi-uprobes.
>
> Because of this runtime dependency, when bpf_link refcnt drops to zero
> there could still be active BPF programs running accessing link data
> (cookie, program pointer, etc).
>
> This patch adds generic support to defer bpf_link dealloc callback to
> after RCU GP, if requested. This is done by exposing two different
> deallocation callbacks, one synchronous and one deferred. If deferred
> one is provided, bpf_link_free() will schedule dealloc_deferred()
> callback to happen after RCU GP.
>
> BPF is using two flavors of RCU: "classic" non-sleepable one and RCU
> tasks trace one. The latter is used when sleepable BPF programs are
> used. bpf_link_free() accommodates that by checking underlying BPF
> program's sleepable flag, and goes either through normal RCU GP only for
> non-sleepable, or through RCU tasks trace GP *and* then normal RCU GP
> (taking into account rcu_trace_implies_rcu_gp() optimization), if BPF
> program is sleepable.
>
> We use this for raw tracepoint, multi-kprobe, and multi-uprobe links,
> all of which dereference link during program run.
>
> Fixes: d4dfc5700e86 ("bpf: pass whole link instead of prog when triggering raw tracepoint")
> Fixes: 0dcac2725406 ("bpf: Add multi kprobe link")
> Fixes: 89ae89f53d20 ("bpf: Add multi uprobe link")
> Reported-by: syzbot+981935d9485a560bfbcb@syzkaller.appspotmail.com
> Reported-by: syzbot+2cb5a6c573e98db598cc@syzkaller.appspotmail.com
> Reported-by: syzbot+62d8b26793e8a2bd0516@syzkaller.appspotmail.com
> Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Acked-by: Jiri Olsa <jolsa@kernel.org>
jirka
> ---
> include/linux/bpf.h | 16 +++++++++++++++-
> kernel/bpf/syscall.c | 35 ++++++++++++++++++++++++++++++++---
> kernel/trace/bpf_trace.c | 4 ++--
> 3 files changed, 49 insertions(+), 6 deletions(-)
>
> diff --git a/include/linux/bpf.h b/include/linux/bpf.h
> index 62762390c93d..e52d5b3ee45e 100644
> --- a/include/linux/bpf.h
> +++ b/include/linux/bpf.h
> @@ -1573,12 +1573,26 @@ struct bpf_link {
> enum bpf_link_type type;
> const struct bpf_link_ops *ops;
> struct bpf_prog *prog;
> - struct work_struct work;
> + /* rcu is used before freeing, work can be used to schedule that
> + * RCU-based freeing before that, so they never overlap
> + */
> + union {
> + struct rcu_head rcu;
> + struct work_struct work;
> + };
> };
>
> struct bpf_link_ops {
> void (*release)(struct bpf_link *link);
> + /* deallocate link resources callback, called without RCU grace period
> + * waiting
> + */
> void (*dealloc)(struct bpf_link *link);
> + /* deallocate link resources callback, called after RCU grace period;
> + * if underlying BPF program is sleepable we go through tasks trace
> + * RCU GP and then "classic" RCU GP
> + */
> + void (*dealloc_deferred)(struct bpf_link *link);
> int (*detach)(struct bpf_link *link);
> int (*update_prog)(struct bpf_link *link, struct bpf_prog *new_prog,
> struct bpf_prog *old_prog);
> diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
> index e44c276e8617..c0f2f052a02c 100644
> --- a/kernel/bpf/syscall.c
> +++ b/kernel/bpf/syscall.c
> @@ -3024,17 +3024,46 @@ void bpf_link_inc(struct bpf_link *link)
> atomic64_inc(&link->refcnt);
> }
>
> +static void bpf_link_defer_dealloc_rcu_gp(struct rcu_head *rcu)
> +{
> + struct bpf_link *link = container_of(rcu, struct bpf_link, rcu);
> +
> + /* free bpf_link and its containing memory */
> + link->ops->dealloc_deferred(link);
> +}
> +
> +static void bpf_link_defer_dealloc_mult_rcu_gp(struct rcu_head *rcu)
> +{
> + if (rcu_trace_implies_rcu_gp())
> + bpf_link_defer_dealloc_rcu_gp(rcu);
> + else
> + call_rcu(rcu, bpf_link_defer_dealloc_rcu_gp);
> +}
> +
> /* bpf_link_free is guaranteed to be called from process context */
> static void bpf_link_free(struct bpf_link *link)
> {
> + bool sleepable = false;
> +
> bpf_link_free_id(link->id);
> if (link->prog) {
> + sleepable = link->prog->sleepable;
> /* detach BPF program, clean up used resources */
> link->ops->release(link);
> bpf_prog_put(link->prog);
> }
> - /* free bpf_link and its containing memory */
> - link->ops->dealloc(link);
> + if (link->ops->dealloc_deferred) {
> + /* schedule BPF link deallocation; if underlying BPF program
> + * is sleepable, we need to first wait for RCU tasks trace
> + * sync, then go through "classic" RCU grace period
> + */
> + if (sleepable)
> + call_rcu_tasks_trace(&link->rcu, bpf_link_defer_dealloc_mult_rcu_gp);
> + else
> + call_rcu(&link->rcu, bpf_link_defer_dealloc_rcu_gp);
> + }
> + if (link->ops->dealloc)
> + link->ops->dealloc(link);
> }
>
> static void bpf_link_put_deferred(struct work_struct *work)
> @@ -3539,7 +3568,7 @@ static int bpf_raw_tp_link_fill_link_info(const struct bpf_link *link,
>
> static const struct bpf_link_ops bpf_raw_tp_link_lops = {
> .release = bpf_raw_tp_link_release,
> - .dealloc = bpf_raw_tp_link_dealloc,
> + .dealloc_deferred = bpf_raw_tp_link_dealloc,
> .show_fdinfo = bpf_raw_tp_link_show_fdinfo,
> .fill_link_info = bpf_raw_tp_link_fill_link_info,
> };
> diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
> index 6d0c95638e1b..98eacfacb73a 100644
> --- a/kernel/trace/bpf_trace.c
> +++ b/kernel/trace/bpf_trace.c
> @@ -2740,7 +2740,7 @@ static int bpf_kprobe_multi_link_fill_link_info(const struct bpf_link *link,
>
> static const struct bpf_link_ops bpf_kprobe_multi_link_lops = {
> .release = bpf_kprobe_multi_link_release,
> - .dealloc = bpf_kprobe_multi_link_dealloc,
> + .dealloc_deferred = bpf_kprobe_multi_link_dealloc,
> .fill_link_info = bpf_kprobe_multi_link_fill_link_info,
> };
>
> @@ -3254,7 +3254,7 @@ static int bpf_uprobe_multi_link_fill_link_info(const struct bpf_link *link,
>
> static const struct bpf_link_ops bpf_uprobe_multi_link_lops = {
> .release = bpf_uprobe_multi_link_release,
> - .dealloc = bpf_uprobe_multi_link_dealloc,
> + .dealloc_deferred = bpf_uprobe_multi_link_dealloc,
> .fill_link_info = bpf_uprobe_multi_link_fill_link_info,
> };
>
> --
> 2.43.0
>
>
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2024-03-28 10:10 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2024-03-26 21:14 [PATCH bpf-next] bpf: support deferring bpf_link dealloc to after RCU grace period Andrii Nakryiko
2024-03-28 10:10 ` Jiri Olsa
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox