* [PATCHv5 bpf-next 01/13] uprobe: Add data pointer to consumer handlers
2024-09-29 20:57 [PATCHv5 bpf-next 00/13] uprobe, bpf: Add session support Jiri Olsa
@ 2024-09-29 20:57 ` Jiri Olsa
2024-09-30 9:34 ` Oleg Nesterov
2024-09-30 21:35 ` Andrii Nakryiko
2024-09-29 20:57 ` [PATCHv5 bpf-next 02/13] uprobe: Add support for session consumer Jiri Olsa
` (11 subsequent siblings)
12 siblings, 2 replies; 29+ messages in thread
From: Jiri Olsa @ 2024-09-29 20:57 UTC (permalink / raw)
To: Oleg Nesterov, Peter Zijlstra, Alexei Starovoitov,
Daniel Borkmann, Andrii Nakryiko
Cc: bpf, Martin KaFai Lau, Song Liu, Yonghong Song, John Fastabend,
KP Singh, Stanislav Fomichev, Hao Luo, Steven Rostedt,
Masami Hiramatsu, linux-kernel, linux-trace-kernel
Adding data pointer to both entry and exit consumer handlers and all
its users. The functionality itself is coming in following change.
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
---
include/linux/uprobes.h | 4 ++--
kernel/events/uprobes.c | 4 ++--
kernel/trace/bpf_trace.c | 6 ++++--
kernel/trace/trace_uprobe.c | 12 ++++++++----
.../testing/selftests/bpf/bpf_testmod/bpf_testmod.c | 2 +-
5 files changed, 17 insertions(+), 11 deletions(-)
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
index 2b294bf1881f..bb265a632b91 100644
--- a/include/linux/uprobes.h
+++ b/include/linux/uprobes.h
@@ -37,10 +37,10 @@ struct uprobe_consumer {
* for the current process. If filter() is omitted or returns true,
* UPROBE_HANDLER_REMOVE is effectively ignored.
*/
- int (*handler)(struct uprobe_consumer *self, struct pt_regs *regs);
+ int (*handler)(struct uprobe_consumer *self, struct pt_regs *regs, __u64 *data);
int (*ret_handler)(struct uprobe_consumer *self,
unsigned long func,
- struct pt_regs *regs);
+ struct pt_regs *regs, __u64 *data);
bool (*filter)(struct uprobe_consumer *self, struct mm_struct *mm);
struct list_head cons_node;
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 2ec796e2f055..2ba93f8a31aa 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -2139,7 +2139,7 @@ static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
int rc = 0;
if (uc->handler) {
- rc = uc->handler(uc, regs);
+ rc = uc->handler(uc, regs, NULL);
WARN(rc & ~UPROBE_HANDLER_MASK,
"bad rc=0x%x from %ps()\n", rc, uc->handler);
}
@@ -2179,7 +2179,7 @@ handle_uretprobe_chain(struct return_instance *ri, struct pt_regs *regs)
list_for_each_entry_srcu(uc, &uprobe->consumers, cons_node,
srcu_read_lock_held(&uprobes_srcu)) {
if (uc->ret_handler)
- uc->ret_handler(uc, ri->func, regs);
+ uc->ret_handler(uc, ri->func, regs, NULL);
}
srcu_read_unlock(&uprobes_srcu, srcu_idx);
}
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index a582cd25ca87..fdab7ecd8dfa 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -3244,7 +3244,8 @@ uprobe_multi_link_filter(struct uprobe_consumer *con, struct mm_struct *mm)
}
static int
-uprobe_multi_link_handler(struct uprobe_consumer *con, struct pt_regs *regs)
+uprobe_multi_link_handler(struct uprobe_consumer *con, struct pt_regs *regs,
+ __u64 *data)
{
struct bpf_uprobe *uprobe;
@@ -3253,7 +3254,8 @@ uprobe_multi_link_handler(struct uprobe_consumer *con, struct pt_regs *regs)
}
static int
-uprobe_multi_link_ret_handler(struct uprobe_consumer *con, unsigned long func, struct pt_regs *regs)
+uprobe_multi_link_ret_handler(struct uprobe_consumer *con, unsigned long func, struct pt_regs *regs,
+ __u64 *data)
{
struct bpf_uprobe *uprobe;
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index f7443e996b1b..11103dde897b 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -88,9 +88,11 @@ static struct trace_uprobe *to_trace_uprobe(struct dyn_event *ev)
static int register_uprobe_event(struct trace_uprobe *tu);
static int unregister_uprobe_event(struct trace_uprobe *tu);
-static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
+static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs,
+ __u64 *data);
static int uretprobe_dispatcher(struct uprobe_consumer *con,
- unsigned long func, struct pt_regs *regs);
+ unsigned long func, struct pt_regs *regs,
+ __u64 *data);
#ifdef CONFIG_STACK_GROWSUP
static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
@@ -1500,7 +1502,8 @@ trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
}
}
-static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
+static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs,
+ __u64 *data)
{
struct trace_uprobe *tu;
struct uprobe_dispatch_data udd;
@@ -1530,7 +1533,8 @@ static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
}
static int uretprobe_dispatcher(struct uprobe_consumer *con,
- unsigned long func, struct pt_regs *regs)
+ unsigned long func, struct pt_regs *regs,
+ __u64 *data)
{
struct trace_uprobe *tu;
struct uprobe_dispatch_data udd;
diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
index 8835761d9a12..12005e3dc3e4 100644
--- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
+++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
@@ -461,7 +461,7 @@ static struct bin_attribute bin_attr_bpf_testmod_file __ro_after_init = {
static int
uprobe_ret_handler(struct uprobe_consumer *self, unsigned long func,
- struct pt_regs *regs)
+ struct pt_regs *regs, __u64 *data)
{
regs->ax = 0x12345678deadbeef;
--
2.46.1
^ permalink raw reply related [flat|nested] 29+ messages in thread* Re: [PATCHv5 bpf-next 01/13] uprobe: Add data pointer to consumer handlers
2024-09-29 20:57 ` [PATCHv5 bpf-next 01/13] uprobe: Add data pointer to consumer handlers Jiri Olsa
@ 2024-09-30 9:34 ` Oleg Nesterov
2024-09-30 21:35 ` Andrii Nakryiko
1 sibling, 0 replies; 29+ messages in thread
From: Oleg Nesterov @ 2024-09-30 9:34 UTC (permalink / raw)
To: Jiri Olsa
Cc: Peter Zijlstra, Alexei Starovoitov, Daniel Borkmann,
Andrii Nakryiko, bpf, Martin KaFai Lau, Song Liu, Yonghong Song,
John Fastabend, KP Singh, Stanislav Fomichev, Hao Luo,
Steven Rostedt, Masami Hiramatsu, linux-kernel,
linux-trace-kernel
On 09/29, Jiri Olsa wrote:
>
> Adding data pointer to both entry and exit consumer handlers and all
> its users. The functionality itself is coming in following change.
>
> Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Acked-by: Oleg Nesterov <oleg@redhat.com>
^ permalink raw reply [flat|nested] 29+ messages in thread
* Re: [PATCHv5 bpf-next 01/13] uprobe: Add data pointer to consumer handlers
2024-09-29 20:57 ` [PATCHv5 bpf-next 01/13] uprobe: Add data pointer to consumer handlers Jiri Olsa
2024-09-30 9:34 ` Oleg Nesterov
@ 2024-09-30 21:35 ` Andrii Nakryiko
1 sibling, 0 replies; 29+ messages in thread
From: Andrii Nakryiko @ 2024-09-30 21:35 UTC (permalink / raw)
To: Jiri Olsa
Cc: Oleg Nesterov, Peter Zijlstra, Alexei Starovoitov,
Daniel Borkmann, Andrii Nakryiko, bpf, Martin KaFai Lau, Song Liu,
Yonghong Song, John Fastabend, KP Singh, Stanislav Fomichev,
Hao Luo, Steven Rostedt, Masami Hiramatsu, linux-kernel,
linux-trace-kernel
On Sun, Sep 29, 2024 at 1:57 PM Jiri Olsa <jolsa@kernel.org> wrote:
>
> Adding data pointer to both entry and exit consumer handlers and all
> its users. The functionality itself is coming in following change.
>
> Signed-off-by: Jiri Olsa <jolsa@kernel.org>
> ---
> include/linux/uprobes.h | 4 ++--
> kernel/events/uprobes.c | 4 ++--
> kernel/trace/bpf_trace.c | 6 ++++--
> kernel/trace/trace_uprobe.c | 12 ++++++++----
> .../testing/selftests/bpf/bpf_testmod/bpf_testmod.c | 2 +-
> 5 files changed, 17 insertions(+), 11 deletions(-)
>
LGTM
Acked-by: Andrii Nakryiko <andrii@kernel.org>
> diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
> index 2b294bf1881f..bb265a632b91 100644
> --- a/include/linux/uprobes.h
> +++ b/include/linux/uprobes.h
> @@ -37,10 +37,10 @@ struct uprobe_consumer {
> * for the current process. If filter() is omitted or returns true,
> * UPROBE_HANDLER_REMOVE is effectively ignored.
> */
> - int (*handler)(struct uprobe_consumer *self, struct pt_regs *regs);
> + int (*handler)(struct uprobe_consumer *self, struct pt_regs *regs, __u64 *data);
> int (*ret_handler)(struct uprobe_consumer *self,
> unsigned long func,
> - struct pt_regs *regs);
> + struct pt_regs *regs, __u64 *data);
> bool (*filter)(struct uprobe_consumer *self, struct mm_struct *mm);
>
> struct list_head cons_node;
> diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
> index 2ec796e2f055..2ba93f8a31aa 100644
> --- a/kernel/events/uprobes.c
> +++ b/kernel/events/uprobes.c
> @@ -2139,7 +2139,7 @@ static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
> int rc = 0;
>
> if (uc->handler) {
> - rc = uc->handler(uc, regs);
> + rc = uc->handler(uc, regs, NULL);
> WARN(rc & ~UPROBE_HANDLER_MASK,
> "bad rc=0x%x from %ps()\n", rc, uc->handler);
> }
> @@ -2179,7 +2179,7 @@ handle_uretprobe_chain(struct return_instance *ri, struct pt_regs *regs)
> list_for_each_entry_srcu(uc, &uprobe->consumers, cons_node,
> srcu_read_lock_held(&uprobes_srcu)) {
> if (uc->ret_handler)
> - uc->ret_handler(uc, ri->func, regs);
> + uc->ret_handler(uc, ri->func, regs, NULL);
> }
> srcu_read_unlock(&uprobes_srcu, srcu_idx);
> }
> diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
> index a582cd25ca87..fdab7ecd8dfa 100644
> --- a/kernel/trace/bpf_trace.c
> +++ b/kernel/trace/bpf_trace.c
> @@ -3244,7 +3244,8 @@ uprobe_multi_link_filter(struct uprobe_consumer *con, struct mm_struct *mm)
> }
>
> static int
> -uprobe_multi_link_handler(struct uprobe_consumer *con, struct pt_regs *regs)
> +uprobe_multi_link_handler(struct uprobe_consumer *con, struct pt_regs *regs,
> + __u64 *data)
> {
> struct bpf_uprobe *uprobe;
>
> @@ -3253,7 +3254,8 @@ uprobe_multi_link_handler(struct uprobe_consumer *con, struct pt_regs *regs)
> }
>
> static int
> -uprobe_multi_link_ret_handler(struct uprobe_consumer *con, unsigned long func, struct pt_regs *regs)
> +uprobe_multi_link_ret_handler(struct uprobe_consumer *con, unsigned long func, struct pt_regs *regs,
> + __u64 *data)
> {
> struct bpf_uprobe *uprobe;
>
> diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
> index f7443e996b1b..11103dde897b 100644
> --- a/kernel/trace/trace_uprobe.c
> +++ b/kernel/trace/trace_uprobe.c
> @@ -88,9 +88,11 @@ static struct trace_uprobe *to_trace_uprobe(struct dyn_event *ev)
> static int register_uprobe_event(struct trace_uprobe *tu);
> static int unregister_uprobe_event(struct trace_uprobe *tu);
>
> -static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
> +static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs,
> + __u64 *data);
> static int uretprobe_dispatcher(struct uprobe_consumer *con,
> - unsigned long func, struct pt_regs *regs);
> + unsigned long func, struct pt_regs *regs,
> + __u64 *data);
>
> #ifdef CONFIG_STACK_GROWSUP
> static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
> @@ -1500,7 +1502,8 @@ trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
> }
> }
>
> -static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
> +static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs,
> + __u64 *data)
> {
> struct trace_uprobe *tu;
> struct uprobe_dispatch_data udd;
> @@ -1530,7 +1533,8 @@ static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
> }
>
> static int uretprobe_dispatcher(struct uprobe_consumer *con,
> - unsigned long func, struct pt_regs *regs)
> + unsigned long func, struct pt_regs *regs,
> + __u64 *data)
> {
> struct trace_uprobe *tu;
> struct uprobe_dispatch_data udd;
> diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
> index 8835761d9a12..12005e3dc3e4 100644
> --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
> +++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
> @@ -461,7 +461,7 @@ static struct bin_attribute bin_attr_bpf_testmod_file __ro_after_init = {
>
> static int
> uprobe_ret_handler(struct uprobe_consumer *self, unsigned long func,
> - struct pt_regs *regs)
> + struct pt_regs *regs, __u64 *data)
>
> {
> regs->ax = 0x12345678deadbeef;
> --
> 2.46.1
>
>
^ permalink raw reply [flat|nested] 29+ messages in thread
* [PATCHv5 bpf-next 02/13] uprobe: Add support for session consumer
2024-09-29 20:57 [PATCHv5 bpf-next 00/13] uprobe, bpf: Add session support Jiri Olsa
2024-09-29 20:57 ` [PATCHv5 bpf-next 01/13] uprobe: Add data pointer to consumer handlers Jiri Olsa
@ 2024-09-29 20:57 ` Jiri Olsa
2024-09-30 9:40 ` Oleg Nesterov
2024-09-30 21:36 ` Andrii Nakryiko
2024-09-29 20:57 ` [PATCHv5 bpf-next 03/13] bpf: Add support for uprobe multi session attach Jiri Olsa
` (10 subsequent siblings)
12 siblings, 2 replies; 29+ messages in thread
From: Jiri Olsa @ 2024-09-29 20:57 UTC (permalink / raw)
To: Oleg Nesterov, Peter Zijlstra, Alexei Starovoitov,
Daniel Borkmann, Andrii Nakryiko
Cc: bpf, Martin KaFai Lau, Song Liu, Yonghong Song, John Fastabend,
KP Singh, Stanislav Fomichev, Hao Luo, Steven Rostedt,
Masami Hiramatsu, linux-kernel, linux-trace-kernel
This change allows the uprobe consumer to behave as session which
means that 'handler' and 'ret_handler' callbacks are connected in
a way that allows to:
- control execution of 'ret_handler' from 'handler' callback
- share data between 'handler' and 'ret_handler' callbacks
The session concept fits to our common use case where we do filtering
on entry uprobe and based on the result we decide to run the return
uprobe (or not).
It's also convenient to share the data between session callbacks.
To achive this we are adding new return value the uprobe consumer
can return from 'handler' callback:
UPROBE_HANDLER_IGNORE
- Ignore 'ret_handler' callback for this consumer.
And store cookie and pass it to 'ret_handler' when consumer has both
'handler' and 'ret_handler' callbacks defined.
We store shared data in the return_consumer object array as part of
the return_instance object. This way the handle_uretprobe_chain can
find related return_consumer and its shared data.
We also store entry handler return value, for cases when there are
multiple consumers on single uprobe and some of them are ignored and
some of them not, in which case the return probe gets installed and
we need to have a way to find out which consumer needs to be ignored.
The tricky part is when consumer is registered 'after' the uprobe
entry handler is hit. In such case this consumer's 'ret_handler' gets
executed as well, but it won't have the proper data pointer set,
so we can filter it out.
Suggested-by: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
---
include/linux/uprobes.h | 21 +++++-
kernel/events/uprobes.c | 148 +++++++++++++++++++++++++++++++---------
2 files changed, 137 insertions(+), 32 deletions(-)
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
index bb265a632b91..dbaf04189548 100644
--- a/include/linux/uprobes.h
+++ b/include/linux/uprobes.h
@@ -23,8 +23,17 @@ struct inode;
struct notifier_block;
struct page;
+/*
+ * Allowed return values from uprobe consumer's handler callback
+ * with following meaning:
+ *
+ * UPROBE_HANDLER_REMOVE
+ * - Remove the uprobe breakpoint from current->mm.
+ * UPROBE_HANDLER_IGNORE
+ * - Ignore ret_handler callback for this consumer.
+ */
#define UPROBE_HANDLER_REMOVE 1
-#define UPROBE_HANDLER_MASK 1
+#define UPROBE_HANDLER_IGNORE 2
#define MAX_URETPROBE_DEPTH 64
@@ -44,6 +53,8 @@ struct uprobe_consumer {
bool (*filter)(struct uprobe_consumer *self, struct mm_struct *mm);
struct list_head cons_node;
+
+ __u64 id; /* set when uprobe_consumer is registered */
};
#ifdef CONFIG_UPROBES
@@ -83,14 +94,22 @@ struct uprobe_task {
unsigned int depth;
};
+struct return_consumer {
+ __u64 cookie;
+ __u64 id;
+};
+
struct return_instance {
struct uprobe *uprobe;
unsigned long func;
unsigned long stack; /* stack pointer */
unsigned long orig_ret_vaddr; /* original return address */
bool chained; /* true, if instance is nested */
+ int consumers_cnt;
struct return_instance *next; /* keep as stack */
+
+ struct return_consumer consumers[] __counted_by(consumers_cnt);
};
enum rp_check {
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 2ba93f8a31aa..76fe535c9b3c 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -65,7 +65,7 @@ struct uprobe {
struct rcu_head rcu;
loff_t offset;
loff_t ref_ctr_offset;
- unsigned long flags;
+ unsigned long flags; /* "unsigned long" so bitops work */
/*
* The generic code assumes that it has two members of unknown type
@@ -825,8 +825,11 @@ static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset,
static void consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc)
{
+ static atomic64_t id;
+
down_write(&uprobe->consumer_rwsem);
list_add_rcu(&uc->cons_node, &uprobe->consumers);
+ uc->id = (__u64) atomic64_inc_return(&id);
up_write(&uprobe->consumer_rwsem);
}
@@ -1797,6 +1800,34 @@ static struct uprobe_task *get_utask(void)
return current->utask;
}
+static size_t ri_size(int consumers_cnt)
+{
+ struct return_instance *ri;
+
+ return sizeof(*ri) + sizeof(ri->consumers[0]) * consumers_cnt;
+}
+
+#define DEF_CNT 4
+
+static struct return_instance *alloc_return_instance(void)
+{
+ struct return_instance *ri;
+
+ ri = kzalloc(ri_size(DEF_CNT), GFP_KERNEL);
+ if (!ri)
+ return ZERO_SIZE_PTR;
+
+ ri->consumers_cnt = DEF_CNT;
+ return ri;
+}
+
+static struct return_instance *dup_return_instance(struct return_instance *old)
+{
+ size_t size = ri_size(old->consumers_cnt);
+
+ return kmemdup(old, size, GFP_KERNEL);
+}
+
static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask)
{
struct uprobe_task *n_utask;
@@ -1809,11 +1840,10 @@ static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask)
p = &n_utask->return_instances;
for (o = o_utask->return_instances; o; o = o->next) {
- n = kmalloc(sizeof(struct return_instance), GFP_KERNEL);
+ n = dup_return_instance(o);
if (!n)
return -ENOMEM;
- *n = *o;
/*
* uprobe's refcnt has to be positive at this point, kept by
* utask->return_instances items; return_instances can't be
@@ -1906,39 +1936,35 @@ static void cleanup_return_instances(struct uprobe_task *utask, bool chained,
utask->return_instances = ri;
}
-static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs)
+static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs,
+ struct return_instance *ri)
{
- struct return_instance *ri;
struct uprobe_task *utask;
unsigned long orig_ret_vaddr, trampoline_vaddr;
bool chained;
if (!get_xol_area())
- return;
+ goto free;
utask = get_utask();
if (!utask)
- return;
+ goto free;
if (utask->depth >= MAX_URETPROBE_DEPTH) {
printk_ratelimited(KERN_INFO "uprobe: omit uretprobe due to"
" nestedness limit pid/tgid=%d/%d\n",
current->pid, current->tgid);
- return;
+ goto free;
}
/* we need to bump refcount to store uprobe in utask */
if (!try_get_uprobe(uprobe))
- return;
-
- ri = kmalloc(sizeof(struct return_instance), GFP_KERNEL);
- if (!ri)
- goto fail;
+ goto free;
trampoline_vaddr = uprobe_get_trampoline_vaddr();
orig_ret_vaddr = arch_uretprobe_hijack_return_addr(trampoline_vaddr, regs);
if (orig_ret_vaddr == -1)
- goto fail;
+ goto put;
/* drop the entries invalidated by longjmp() */
chained = (orig_ret_vaddr == trampoline_vaddr);
@@ -1956,7 +1982,7 @@ static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs)
* attack from user-space.
*/
uprobe_warn(current, "handle tail call");
- goto fail;
+ goto put;
}
orig_ret_vaddr = utask->return_instances->orig_ret_vaddr;
}
@@ -1971,9 +1997,10 @@ static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs)
utask->return_instances = ri;
return;
-fail:
- kfree(ri);
+put:
put_uprobe(uprobe);
+free:
+ kfree(ri);
}
/* Prepare to single-step probed instruction out of line. */
@@ -2125,35 +2152,91 @@ static struct uprobe *find_active_uprobe_rcu(unsigned long bp_vaddr, int *is_swb
return uprobe;
}
+static struct return_instance*
+push_consumer(struct return_instance *ri, int idx, __u64 id, __u64 cookie)
+{
+ if (unlikely(ri == ZERO_SIZE_PTR))
+ return ri;
+
+ if (unlikely(idx >= ri->consumers_cnt)) {
+ struct return_instance *old_ri = ri;
+
+ ri->consumers_cnt += DEF_CNT;
+ ri = krealloc(old_ri, ri_size(old_ri->consumers_cnt), GFP_KERNEL);
+ if (!ri) {
+ kfree(old_ri);
+ return ZERO_SIZE_PTR;
+ }
+ }
+
+ ri->consumers[idx].id = id;
+ ri->consumers[idx].cookie = cookie;
+ return ri;
+}
+
+static struct return_consumer *
+return_consumer_find(struct return_instance *ri, int *iter, int id)
+{
+ struct return_consumer *ric;
+ int idx = *iter;
+
+ for (ric = &ri->consumers[idx]; idx < ri->consumers_cnt; idx++, ric++) {
+ if (ric->id == id) {
+ *iter = idx + 1;
+ return ric;
+ }
+ }
+ return NULL;
+}
+
+static bool ignore_ret_handler(int rc)
+{
+ return rc == UPROBE_HANDLER_REMOVE || rc == UPROBE_HANDLER_IGNORE;
+}
+
static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
{
struct uprobe_consumer *uc;
- int remove = UPROBE_HANDLER_REMOVE;
- bool need_prep = false; /* prepare return uprobe, when needed */
- bool has_consumers = false;
+ bool has_consumers = false, remove = true;
+ struct return_instance *ri = NULL;
+ int push_idx = 0;
current->utask->auprobe = &uprobe->arch;
list_for_each_entry_srcu(uc, &uprobe->consumers, cons_node,
srcu_read_lock_held(&uprobes_srcu)) {
+ bool session = uc->handler && uc->ret_handler;
+ __u64 cookie = 0;
int rc = 0;
if (uc->handler) {
- rc = uc->handler(uc, regs, NULL);
- WARN(rc & ~UPROBE_HANDLER_MASK,
+ rc = uc->handler(uc, regs, &cookie);
+ WARN(rc < 0 || rc > 2,
"bad rc=0x%x from %ps()\n", rc, uc->handler);
}
- if (uc->ret_handler)
- need_prep = true;
-
- remove &= rc;
+ remove &= rc == UPROBE_HANDLER_REMOVE;
has_consumers = true;
+
+ if (!uc->ret_handler || ignore_ret_handler(rc))
+ continue;
+
+ if (!ri)
+ ri = alloc_return_instance();
+
+ if (session)
+ ri = push_consumer(ri, push_idx++, uc->id, cookie);
}
current->utask->auprobe = NULL;
- if (need_prep && !remove)
- prepare_uretprobe(uprobe, regs); /* put bp at return */
+ if (!ZERO_OR_NULL_PTR(ri)) {
+ /*
+ * The push_idx value has the final number of return consumers,
+ * and ri->consumers_cnt has number of allocated consumers.
+ */
+ ri->consumers_cnt = push_idx;
+ prepare_uretprobe(uprobe, regs, ri);
+ }
if (remove && has_consumers) {
down_read(&uprobe->register_rwsem);
@@ -2172,14 +2255,17 @@ static void
handle_uretprobe_chain(struct return_instance *ri, struct pt_regs *regs)
{
struct uprobe *uprobe = ri->uprobe;
+ struct return_consumer *ric;
struct uprobe_consumer *uc;
- int srcu_idx;
+ int srcu_idx, ric_idx = 0;
srcu_idx = srcu_read_lock(&uprobes_srcu);
list_for_each_entry_srcu(uc, &uprobe->consumers, cons_node,
srcu_read_lock_held(&uprobes_srcu)) {
- if (uc->ret_handler)
- uc->ret_handler(uc, ri->func, regs, NULL);
+ if (uc->ret_handler) {
+ ric = return_consumer_find(ri, &ric_idx, uc->id);
+ uc->ret_handler(uc, ri->func, regs, ric ? &ric->cookie : NULL);
+ }
}
srcu_read_unlock(&uprobes_srcu, srcu_idx);
}
--
2.46.1
^ permalink raw reply related [flat|nested] 29+ messages in thread* Re: [PATCHv5 bpf-next 02/13] uprobe: Add support for session consumer
2024-09-29 20:57 ` [PATCHv5 bpf-next 02/13] uprobe: Add support for session consumer Jiri Olsa
@ 2024-09-30 9:40 ` Oleg Nesterov
2024-09-30 11:42 ` Jiri Olsa
2024-09-30 21:36 ` Andrii Nakryiko
1 sibling, 1 reply; 29+ messages in thread
From: Oleg Nesterov @ 2024-09-30 9:40 UTC (permalink / raw)
To: Jiri Olsa
Cc: Peter Zijlstra, Alexei Starovoitov, Daniel Borkmann,
Andrii Nakryiko, bpf, Martin KaFai Lau, Song Liu, Yonghong Song,
John Fastabend, KP Singh, Stanislav Fomichev, Hao Luo,
Steven Rostedt, Masami Hiramatsu, linux-kernel,
linux-trace-kernel
Jiri,
LGTM. But I'm afraid you need to send v6, sorry ;)
This change has some (trivial) conflicts in prepare_uretprobe() with the
cleanups I sent yesterday, and Peter is going to queue them.
See https://lore.kernel.org/all/20240929144201.GA9429@redhat.com/
Oleg.
On 09/29, Jiri Olsa wrote:
>
> This change allows the uprobe consumer to behave as session which
> means that 'handler' and 'ret_handler' callbacks are connected in
> a way that allows to:
>
> - control execution of 'ret_handler' from 'handler' callback
> - share data between 'handler' and 'ret_handler' callbacks
>
> The session concept fits to our common use case where we do filtering
> on entry uprobe and based on the result we decide to run the return
> uprobe (or not).
>
> It's also convenient to share the data between session callbacks.
>
> To achive this we are adding new return value the uprobe consumer
> can return from 'handler' callback:
>
> UPROBE_HANDLER_IGNORE
> - Ignore 'ret_handler' callback for this consumer.
>
> And store cookie and pass it to 'ret_handler' when consumer has both
> 'handler' and 'ret_handler' callbacks defined.
>
> We store shared data in the return_consumer object array as part of
> the return_instance object. This way the handle_uretprobe_chain can
> find related return_consumer and its shared data.
>
> We also store entry handler return value, for cases when there are
> multiple consumers on single uprobe and some of them are ignored and
> some of them not, in which case the return probe gets installed and
> we need to have a way to find out which consumer needs to be ignored.
>
> The tricky part is when consumer is registered 'after' the uprobe
> entry handler is hit. In such case this consumer's 'ret_handler' gets
> executed as well, but it won't have the proper data pointer set,
> so we can filter it out.
>
> Suggested-by: Oleg Nesterov <oleg@redhat.com>
> Signed-off-by: Jiri Olsa <jolsa@kernel.org>
> ---
> include/linux/uprobes.h | 21 +++++-
> kernel/events/uprobes.c | 148 +++++++++++++++++++++++++++++++---------
> 2 files changed, 137 insertions(+), 32 deletions(-)
>
> diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
> index bb265a632b91..dbaf04189548 100644
> --- a/include/linux/uprobes.h
> +++ b/include/linux/uprobes.h
> @@ -23,8 +23,17 @@ struct inode;
> struct notifier_block;
> struct page;
>
> +/*
> + * Allowed return values from uprobe consumer's handler callback
> + * with following meaning:
> + *
> + * UPROBE_HANDLER_REMOVE
> + * - Remove the uprobe breakpoint from current->mm.
> + * UPROBE_HANDLER_IGNORE
> + * - Ignore ret_handler callback for this consumer.
> + */
> #define UPROBE_HANDLER_REMOVE 1
> -#define UPROBE_HANDLER_MASK 1
> +#define UPROBE_HANDLER_IGNORE 2
>
> #define MAX_URETPROBE_DEPTH 64
>
> @@ -44,6 +53,8 @@ struct uprobe_consumer {
> bool (*filter)(struct uprobe_consumer *self, struct mm_struct *mm);
>
> struct list_head cons_node;
> +
> + __u64 id; /* set when uprobe_consumer is registered */
> };
>
> #ifdef CONFIG_UPROBES
> @@ -83,14 +94,22 @@ struct uprobe_task {
> unsigned int depth;
> };
>
> +struct return_consumer {
> + __u64 cookie;
> + __u64 id;
> +};
> +
> struct return_instance {
> struct uprobe *uprobe;
> unsigned long func;
> unsigned long stack; /* stack pointer */
> unsigned long orig_ret_vaddr; /* original return address */
> bool chained; /* true, if instance is nested */
> + int consumers_cnt;
>
> struct return_instance *next; /* keep as stack */
> +
> + struct return_consumer consumers[] __counted_by(consumers_cnt);
> };
>
> enum rp_check {
> diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
> index 2ba93f8a31aa..76fe535c9b3c 100644
> --- a/kernel/events/uprobes.c
> +++ b/kernel/events/uprobes.c
> @@ -65,7 +65,7 @@ struct uprobe {
> struct rcu_head rcu;
> loff_t offset;
> loff_t ref_ctr_offset;
> - unsigned long flags;
> + unsigned long flags; /* "unsigned long" so bitops work */
>
> /*
> * The generic code assumes that it has two members of unknown type
> @@ -825,8 +825,11 @@ static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset,
>
> static void consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc)
> {
> + static atomic64_t id;
> +
> down_write(&uprobe->consumer_rwsem);
> list_add_rcu(&uc->cons_node, &uprobe->consumers);
> + uc->id = (__u64) atomic64_inc_return(&id);
> up_write(&uprobe->consumer_rwsem);
> }
>
> @@ -1797,6 +1800,34 @@ static struct uprobe_task *get_utask(void)
> return current->utask;
> }
>
> +static size_t ri_size(int consumers_cnt)
> +{
> + struct return_instance *ri;
> +
> + return sizeof(*ri) + sizeof(ri->consumers[0]) * consumers_cnt;
> +}
> +
> +#define DEF_CNT 4
> +
> +static struct return_instance *alloc_return_instance(void)
> +{
> + struct return_instance *ri;
> +
> + ri = kzalloc(ri_size(DEF_CNT), GFP_KERNEL);
> + if (!ri)
> + return ZERO_SIZE_PTR;
> +
> + ri->consumers_cnt = DEF_CNT;
> + return ri;
> +}
> +
> +static struct return_instance *dup_return_instance(struct return_instance *old)
> +{
> + size_t size = ri_size(old->consumers_cnt);
> +
> + return kmemdup(old, size, GFP_KERNEL);
> +}
> +
> static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask)
> {
> struct uprobe_task *n_utask;
> @@ -1809,11 +1840,10 @@ static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask)
>
> p = &n_utask->return_instances;
> for (o = o_utask->return_instances; o; o = o->next) {
> - n = kmalloc(sizeof(struct return_instance), GFP_KERNEL);
> + n = dup_return_instance(o);
> if (!n)
> return -ENOMEM;
>
> - *n = *o;
> /*
> * uprobe's refcnt has to be positive at this point, kept by
> * utask->return_instances items; return_instances can't be
> @@ -1906,39 +1936,35 @@ static void cleanup_return_instances(struct uprobe_task *utask, bool chained,
> utask->return_instances = ri;
> }
>
> -static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs)
> +static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs,
> + struct return_instance *ri)
> {
> - struct return_instance *ri;
> struct uprobe_task *utask;
> unsigned long orig_ret_vaddr, trampoline_vaddr;
> bool chained;
>
> if (!get_xol_area())
> - return;
> + goto free;
>
> utask = get_utask();
> if (!utask)
> - return;
> + goto free;
>
> if (utask->depth >= MAX_URETPROBE_DEPTH) {
> printk_ratelimited(KERN_INFO "uprobe: omit uretprobe due to"
> " nestedness limit pid/tgid=%d/%d\n",
> current->pid, current->tgid);
> - return;
> + goto free;
> }
>
> /* we need to bump refcount to store uprobe in utask */
> if (!try_get_uprobe(uprobe))
> - return;
> -
> - ri = kmalloc(sizeof(struct return_instance), GFP_KERNEL);
> - if (!ri)
> - goto fail;
> + goto free;
>
> trampoline_vaddr = uprobe_get_trampoline_vaddr();
> orig_ret_vaddr = arch_uretprobe_hijack_return_addr(trampoline_vaddr, regs);
> if (orig_ret_vaddr == -1)
> - goto fail;
> + goto put;
>
> /* drop the entries invalidated by longjmp() */
> chained = (orig_ret_vaddr == trampoline_vaddr);
> @@ -1956,7 +1982,7 @@ static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs)
> * attack from user-space.
> */
> uprobe_warn(current, "handle tail call");
> - goto fail;
> + goto put;
> }
> orig_ret_vaddr = utask->return_instances->orig_ret_vaddr;
> }
> @@ -1971,9 +1997,10 @@ static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs)
> utask->return_instances = ri;
>
> return;
> -fail:
> - kfree(ri);
> +put:
> put_uprobe(uprobe);
> +free:
> + kfree(ri);
> }
>
> /* Prepare to single-step probed instruction out of line. */
> @@ -2125,35 +2152,91 @@ static struct uprobe *find_active_uprobe_rcu(unsigned long bp_vaddr, int *is_swb
> return uprobe;
> }
>
> +static struct return_instance*
> +push_consumer(struct return_instance *ri, int idx, __u64 id, __u64 cookie)
> +{
> + if (unlikely(ri == ZERO_SIZE_PTR))
> + return ri;
> +
> + if (unlikely(idx >= ri->consumers_cnt)) {
> + struct return_instance *old_ri = ri;
> +
> + ri->consumers_cnt += DEF_CNT;
> + ri = krealloc(old_ri, ri_size(old_ri->consumers_cnt), GFP_KERNEL);
> + if (!ri) {
> + kfree(old_ri);
> + return ZERO_SIZE_PTR;
> + }
> + }
> +
> + ri->consumers[idx].id = id;
> + ri->consumers[idx].cookie = cookie;
> + return ri;
> +}
> +
> +static struct return_consumer *
> +return_consumer_find(struct return_instance *ri, int *iter, int id)
> +{
> + struct return_consumer *ric;
> + int idx = *iter;
> +
> + for (ric = &ri->consumers[idx]; idx < ri->consumers_cnt; idx++, ric++) {
> + if (ric->id == id) {
> + *iter = idx + 1;
> + return ric;
> + }
> + }
> + return NULL;
> +}
> +
> +static bool ignore_ret_handler(int rc)
> +{
> + return rc == UPROBE_HANDLER_REMOVE || rc == UPROBE_HANDLER_IGNORE;
> +}
> +
> static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
> {
> struct uprobe_consumer *uc;
> - int remove = UPROBE_HANDLER_REMOVE;
> - bool need_prep = false; /* prepare return uprobe, when needed */
> - bool has_consumers = false;
> + bool has_consumers = false, remove = true;
> + struct return_instance *ri = NULL;
> + int push_idx = 0;
>
> current->utask->auprobe = &uprobe->arch;
>
> list_for_each_entry_srcu(uc, &uprobe->consumers, cons_node,
> srcu_read_lock_held(&uprobes_srcu)) {
> + bool session = uc->handler && uc->ret_handler;
> + __u64 cookie = 0;
> int rc = 0;
>
> if (uc->handler) {
> - rc = uc->handler(uc, regs, NULL);
> - WARN(rc & ~UPROBE_HANDLER_MASK,
> + rc = uc->handler(uc, regs, &cookie);
> + WARN(rc < 0 || rc > 2,
> "bad rc=0x%x from %ps()\n", rc, uc->handler);
> }
>
> - if (uc->ret_handler)
> - need_prep = true;
> -
> - remove &= rc;
> + remove &= rc == UPROBE_HANDLER_REMOVE;
> has_consumers = true;
> +
> + if (!uc->ret_handler || ignore_ret_handler(rc))
> + continue;
> +
> + if (!ri)
> + ri = alloc_return_instance();
> +
> + if (session)
> + ri = push_consumer(ri, push_idx++, uc->id, cookie);
> }
> current->utask->auprobe = NULL;
>
> - if (need_prep && !remove)
> - prepare_uretprobe(uprobe, regs); /* put bp at return */
> + if (!ZERO_OR_NULL_PTR(ri)) {
> + /*
> + * The push_idx value has the final number of return consumers,
> + * and ri->consumers_cnt has number of allocated consumers.
> + */
> + ri->consumers_cnt = push_idx;
> + prepare_uretprobe(uprobe, regs, ri);
> + }
>
> if (remove && has_consumers) {
> down_read(&uprobe->register_rwsem);
> @@ -2172,14 +2255,17 @@ static void
> handle_uretprobe_chain(struct return_instance *ri, struct pt_regs *regs)
> {
> struct uprobe *uprobe = ri->uprobe;
> + struct return_consumer *ric;
> struct uprobe_consumer *uc;
> - int srcu_idx;
> + int srcu_idx, ric_idx = 0;
>
> srcu_idx = srcu_read_lock(&uprobes_srcu);
> list_for_each_entry_srcu(uc, &uprobe->consumers, cons_node,
> srcu_read_lock_held(&uprobes_srcu)) {
> - if (uc->ret_handler)
> - uc->ret_handler(uc, ri->func, regs, NULL);
> + if (uc->ret_handler) {
> + ric = return_consumer_find(ri, &ric_idx, uc->id);
> + uc->ret_handler(uc, ri->func, regs, ric ? &ric->cookie : NULL);
> + }
> }
> srcu_read_unlock(&uprobes_srcu, srcu_idx);
> }
> --
> 2.46.1
>
^ permalink raw reply [flat|nested] 29+ messages in thread* Re: [PATCHv5 bpf-next 02/13] uprobe: Add support for session consumer
2024-09-30 9:40 ` Oleg Nesterov
@ 2024-09-30 11:42 ` Jiri Olsa
0 siblings, 0 replies; 29+ messages in thread
From: Jiri Olsa @ 2024-09-30 11:42 UTC (permalink / raw)
To: Oleg Nesterov
Cc: Peter Zijlstra, Alexei Starovoitov, Daniel Borkmann,
Andrii Nakryiko, bpf, Martin KaFai Lau, Song Liu, Yonghong Song,
John Fastabend, KP Singh, Stanislav Fomichev, Hao Luo,
Steven Rostedt, Masami Hiramatsu, linux-kernel,
linux-trace-kernel
On Mon, Sep 30, 2024 at 11:40:15AM +0200, Oleg Nesterov wrote:
> Jiri,
>
> LGTM. But I'm afraid you need to send v6, sorry ;)
>
> This change has some (trivial) conflicts in prepare_uretprobe() with the
> cleanups I sent yesterday, and Peter is going to queue them.
sure, np.. will wait for any review comments and rebase/resend
thanks,
jirka
>
> See https://lore.kernel.org/all/20240929144201.GA9429@redhat.com/
>
> Oleg.
>
> On 09/29, Jiri Olsa wrote:
> >
> > This change allows the uprobe consumer to behave as session which
> > means that 'handler' and 'ret_handler' callbacks are connected in
> > a way that allows to:
> >
> > - control execution of 'ret_handler' from 'handler' callback
> > - share data between 'handler' and 'ret_handler' callbacks
> >
> > The session concept fits to our common use case where we do filtering
> > on entry uprobe and based on the result we decide to run the return
> > uprobe (or not).
> >
> > It's also convenient to share the data between session callbacks.
> >
> > To achive this we are adding new return value the uprobe consumer
> > can return from 'handler' callback:
> >
> > UPROBE_HANDLER_IGNORE
> > - Ignore 'ret_handler' callback for this consumer.
> >
> > And store cookie and pass it to 'ret_handler' when consumer has both
> > 'handler' and 'ret_handler' callbacks defined.
> >
> > We store shared data in the return_consumer object array as part of
> > the return_instance object. This way the handle_uretprobe_chain can
> > find related return_consumer and its shared data.
> >
> > We also store entry handler return value, for cases when there are
> > multiple consumers on single uprobe and some of them are ignored and
> > some of them not, in which case the return probe gets installed and
> > we need to have a way to find out which consumer needs to be ignored.
> >
> > The tricky part is when consumer is registered 'after' the uprobe
> > entry handler is hit. In such case this consumer's 'ret_handler' gets
> > executed as well, but it won't have the proper data pointer set,
> > so we can filter it out.
> >
> > Suggested-by: Oleg Nesterov <oleg@redhat.com>
> > Signed-off-by: Jiri Olsa <jolsa@kernel.org>
> > ---
> > include/linux/uprobes.h | 21 +++++-
> > kernel/events/uprobes.c | 148 +++++++++++++++++++++++++++++++---------
> > 2 files changed, 137 insertions(+), 32 deletions(-)
> >
> > diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
> > index bb265a632b91..dbaf04189548 100644
> > --- a/include/linux/uprobes.h
> > +++ b/include/linux/uprobes.h
> > @@ -23,8 +23,17 @@ struct inode;
> > struct notifier_block;
> > struct page;
> >
> > +/*
> > + * Allowed return values from uprobe consumer's handler callback
> > + * with following meaning:
> > + *
> > + * UPROBE_HANDLER_REMOVE
> > + * - Remove the uprobe breakpoint from current->mm.
> > + * UPROBE_HANDLER_IGNORE
> > + * - Ignore ret_handler callback for this consumer.
> > + */
> > #define UPROBE_HANDLER_REMOVE 1
> > -#define UPROBE_HANDLER_MASK 1
> > +#define UPROBE_HANDLER_IGNORE 2
> >
> > #define MAX_URETPROBE_DEPTH 64
> >
> > @@ -44,6 +53,8 @@ struct uprobe_consumer {
> > bool (*filter)(struct uprobe_consumer *self, struct mm_struct *mm);
> >
> > struct list_head cons_node;
> > +
> > + __u64 id; /* set when uprobe_consumer is registered */
> > };
> >
> > #ifdef CONFIG_UPROBES
> > @@ -83,14 +94,22 @@ struct uprobe_task {
> > unsigned int depth;
> > };
> >
> > +struct return_consumer {
> > + __u64 cookie;
> > + __u64 id;
> > +};
> > +
> > struct return_instance {
> > struct uprobe *uprobe;
> > unsigned long func;
> > unsigned long stack; /* stack pointer */
> > unsigned long orig_ret_vaddr; /* original return address */
> > bool chained; /* true, if instance is nested */
> > + int consumers_cnt;
> >
> > struct return_instance *next; /* keep as stack */
> > +
> > + struct return_consumer consumers[] __counted_by(consumers_cnt);
> > };
> >
> > enum rp_check {
> > diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
> > index 2ba93f8a31aa..76fe535c9b3c 100644
> > --- a/kernel/events/uprobes.c
> > +++ b/kernel/events/uprobes.c
> > @@ -65,7 +65,7 @@ struct uprobe {
> > struct rcu_head rcu;
> > loff_t offset;
> > loff_t ref_ctr_offset;
> > - unsigned long flags;
> > + unsigned long flags; /* "unsigned long" so bitops work */
> >
> > /*
> > * The generic code assumes that it has two members of unknown type
> > @@ -825,8 +825,11 @@ static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset,
> >
> > static void consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc)
> > {
> > + static atomic64_t id;
> > +
> > down_write(&uprobe->consumer_rwsem);
> > list_add_rcu(&uc->cons_node, &uprobe->consumers);
> > + uc->id = (__u64) atomic64_inc_return(&id);
> > up_write(&uprobe->consumer_rwsem);
> > }
> >
> > @@ -1797,6 +1800,34 @@ static struct uprobe_task *get_utask(void)
> > return current->utask;
> > }
> >
> > +static size_t ri_size(int consumers_cnt)
> > +{
> > + struct return_instance *ri;
> > +
> > + return sizeof(*ri) + sizeof(ri->consumers[0]) * consumers_cnt;
> > +}
> > +
> > +#define DEF_CNT 4
> > +
> > +static struct return_instance *alloc_return_instance(void)
> > +{
> > + struct return_instance *ri;
> > +
> > + ri = kzalloc(ri_size(DEF_CNT), GFP_KERNEL);
> > + if (!ri)
> > + return ZERO_SIZE_PTR;
> > +
> > + ri->consumers_cnt = DEF_CNT;
> > + return ri;
> > +}
> > +
> > +static struct return_instance *dup_return_instance(struct return_instance *old)
> > +{
> > + size_t size = ri_size(old->consumers_cnt);
> > +
> > + return kmemdup(old, size, GFP_KERNEL);
> > +}
> > +
> > static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask)
> > {
> > struct uprobe_task *n_utask;
> > @@ -1809,11 +1840,10 @@ static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask)
> >
> > p = &n_utask->return_instances;
> > for (o = o_utask->return_instances; o; o = o->next) {
> > - n = kmalloc(sizeof(struct return_instance), GFP_KERNEL);
> > + n = dup_return_instance(o);
> > if (!n)
> > return -ENOMEM;
> >
> > - *n = *o;
> > /*
> > * uprobe's refcnt has to be positive at this point, kept by
> > * utask->return_instances items; return_instances can't be
> > @@ -1906,39 +1936,35 @@ static void cleanup_return_instances(struct uprobe_task *utask, bool chained,
> > utask->return_instances = ri;
> > }
> >
> > -static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs)
> > +static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs,
> > + struct return_instance *ri)
> > {
> > - struct return_instance *ri;
> > struct uprobe_task *utask;
> > unsigned long orig_ret_vaddr, trampoline_vaddr;
> > bool chained;
> >
> > if (!get_xol_area())
> > - return;
> > + goto free;
> >
> > utask = get_utask();
> > if (!utask)
> > - return;
> > + goto free;
> >
> > if (utask->depth >= MAX_URETPROBE_DEPTH) {
> > printk_ratelimited(KERN_INFO "uprobe: omit uretprobe due to"
> > " nestedness limit pid/tgid=%d/%d\n",
> > current->pid, current->tgid);
> > - return;
> > + goto free;
> > }
> >
> > /* we need to bump refcount to store uprobe in utask */
> > if (!try_get_uprobe(uprobe))
> > - return;
> > -
> > - ri = kmalloc(sizeof(struct return_instance), GFP_KERNEL);
> > - if (!ri)
> > - goto fail;
> > + goto free;
> >
> > trampoline_vaddr = uprobe_get_trampoline_vaddr();
> > orig_ret_vaddr = arch_uretprobe_hijack_return_addr(trampoline_vaddr, regs);
> > if (orig_ret_vaddr == -1)
> > - goto fail;
> > + goto put;
> >
> > /* drop the entries invalidated by longjmp() */
> > chained = (orig_ret_vaddr == trampoline_vaddr);
> > @@ -1956,7 +1982,7 @@ static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs)
> > * attack from user-space.
> > */
> > uprobe_warn(current, "handle tail call");
> > - goto fail;
> > + goto put;
> > }
> > orig_ret_vaddr = utask->return_instances->orig_ret_vaddr;
> > }
> > @@ -1971,9 +1997,10 @@ static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs)
> > utask->return_instances = ri;
> >
> > return;
> > -fail:
> > - kfree(ri);
> > +put:
> > put_uprobe(uprobe);
> > +free:
> > + kfree(ri);
> > }
> >
> > /* Prepare to single-step probed instruction out of line. */
> > @@ -2125,35 +2152,91 @@ static struct uprobe *find_active_uprobe_rcu(unsigned long bp_vaddr, int *is_swb
> > return uprobe;
> > }
> >
> > +static struct return_instance*
> > +push_consumer(struct return_instance *ri, int idx, __u64 id, __u64 cookie)
> > +{
> > + if (unlikely(ri == ZERO_SIZE_PTR))
> > + return ri;
> > +
> > + if (unlikely(idx >= ri->consumers_cnt)) {
> > + struct return_instance *old_ri = ri;
> > +
> > + ri->consumers_cnt += DEF_CNT;
> > + ri = krealloc(old_ri, ri_size(old_ri->consumers_cnt), GFP_KERNEL);
> > + if (!ri) {
> > + kfree(old_ri);
> > + return ZERO_SIZE_PTR;
> > + }
> > + }
> > +
> > + ri->consumers[idx].id = id;
> > + ri->consumers[idx].cookie = cookie;
> > + return ri;
> > +}
> > +
> > +static struct return_consumer *
> > +return_consumer_find(struct return_instance *ri, int *iter, int id)
> > +{
> > + struct return_consumer *ric;
> > + int idx = *iter;
> > +
> > + for (ric = &ri->consumers[idx]; idx < ri->consumers_cnt; idx++, ric++) {
> > + if (ric->id == id) {
> > + *iter = idx + 1;
> > + return ric;
> > + }
> > + }
> > + return NULL;
> > +}
> > +
> > +static bool ignore_ret_handler(int rc)
> > +{
> > + return rc == UPROBE_HANDLER_REMOVE || rc == UPROBE_HANDLER_IGNORE;
> > +}
> > +
> > static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
> > {
> > struct uprobe_consumer *uc;
> > - int remove = UPROBE_HANDLER_REMOVE;
> > - bool need_prep = false; /* prepare return uprobe, when needed */
> > - bool has_consumers = false;
> > + bool has_consumers = false, remove = true;
> > + struct return_instance *ri = NULL;
> > + int push_idx = 0;
> >
> > current->utask->auprobe = &uprobe->arch;
> >
> > list_for_each_entry_srcu(uc, &uprobe->consumers, cons_node,
> > srcu_read_lock_held(&uprobes_srcu)) {
> > + bool session = uc->handler && uc->ret_handler;
> > + __u64 cookie = 0;
> > int rc = 0;
> >
> > if (uc->handler) {
> > - rc = uc->handler(uc, regs, NULL);
> > - WARN(rc & ~UPROBE_HANDLER_MASK,
> > + rc = uc->handler(uc, regs, &cookie);
> > + WARN(rc < 0 || rc > 2,
> > "bad rc=0x%x from %ps()\n", rc, uc->handler);
> > }
> >
> > - if (uc->ret_handler)
> > - need_prep = true;
> > -
> > - remove &= rc;
> > + remove &= rc == UPROBE_HANDLER_REMOVE;
> > has_consumers = true;
> > +
> > + if (!uc->ret_handler || ignore_ret_handler(rc))
> > + continue;
> > +
> > + if (!ri)
> > + ri = alloc_return_instance();
> > +
> > + if (session)
> > + ri = push_consumer(ri, push_idx++, uc->id, cookie);
> > }
> > current->utask->auprobe = NULL;
> >
> > - if (need_prep && !remove)
> > - prepare_uretprobe(uprobe, regs); /* put bp at return */
> > + if (!ZERO_OR_NULL_PTR(ri)) {
> > + /*
> > + * The push_idx value has the final number of return consumers,
> > + * and ri->consumers_cnt has number of allocated consumers.
> > + */
> > + ri->consumers_cnt = push_idx;
> > + prepare_uretprobe(uprobe, regs, ri);
> > + }
> >
> > if (remove && has_consumers) {
> > down_read(&uprobe->register_rwsem);
> > @@ -2172,14 +2255,17 @@ static void
> > handle_uretprobe_chain(struct return_instance *ri, struct pt_regs *regs)
> > {
> > struct uprobe *uprobe = ri->uprobe;
> > + struct return_consumer *ric;
> > struct uprobe_consumer *uc;
> > - int srcu_idx;
> > + int srcu_idx, ric_idx = 0;
> >
> > srcu_idx = srcu_read_lock(&uprobes_srcu);
> > list_for_each_entry_srcu(uc, &uprobe->consumers, cons_node,
> > srcu_read_lock_held(&uprobes_srcu)) {
> > - if (uc->ret_handler)
> > - uc->ret_handler(uc, ri->func, regs, NULL);
> > + if (uc->ret_handler) {
> > + ric = return_consumer_find(ri, &ric_idx, uc->id);
> > + uc->ret_handler(uc, ri->func, regs, ric ? &ric->cookie : NULL);
> > + }
> > }
> > srcu_read_unlock(&uprobes_srcu, srcu_idx);
> > }
> > --
> > 2.46.1
> >
>
^ permalink raw reply [flat|nested] 29+ messages in thread
* Re: [PATCHv5 bpf-next 02/13] uprobe: Add support for session consumer
2024-09-29 20:57 ` [PATCHv5 bpf-next 02/13] uprobe: Add support for session consumer Jiri Olsa
2024-09-30 9:40 ` Oleg Nesterov
@ 2024-09-30 21:36 ` Andrii Nakryiko
2024-10-01 13:17 ` Jiri Olsa
1 sibling, 1 reply; 29+ messages in thread
From: Andrii Nakryiko @ 2024-09-30 21:36 UTC (permalink / raw)
To: Jiri Olsa
Cc: Oleg Nesterov, Peter Zijlstra, Alexei Starovoitov,
Daniel Borkmann, Andrii Nakryiko, bpf, Martin KaFai Lau, Song Liu,
Yonghong Song, John Fastabend, KP Singh, Stanislav Fomichev,
Hao Luo, Steven Rostedt, Masami Hiramatsu, linux-kernel,
linux-trace-kernel
On Sun, Sep 29, 2024 at 1:57 PM Jiri Olsa <jolsa@kernel.org> wrote:
>
> This change allows the uprobe consumer to behave as session which
> means that 'handler' and 'ret_handler' callbacks are connected in
> a way that allows to:
>
> - control execution of 'ret_handler' from 'handler' callback
> - share data between 'handler' and 'ret_handler' callbacks
>
> The session concept fits to our common use case where we do filtering
> on entry uprobe and based on the result we decide to run the return
> uprobe (or not).
>
> It's also convenient to share the data between session callbacks.
>
> To achive this we are adding new return value the uprobe consumer
> can return from 'handler' callback:
>
> UPROBE_HANDLER_IGNORE
> - Ignore 'ret_handler' callback for this consumer.
>
> And store cookie and pass it to 'ret_handler' when consumer has both
> 'handler' and 'ret_handler' callbacks defined.
>
> We store shared data in the return_consumer object array as part of
> the return_instance object. This way the handle_uretprobe_chain can
> find related return_consumer and its shared data.
>
> We also store entry handler return value, for cases when there are
> multiple consumers on single uprobe and some of them are ignored and
> some of them not, in which case the return probe gets installed and
> we need to have a way to find out which consumer needs to be ignored.
>
> The tricky part is when consumer is registered 'after' the uprobe
> entry handler is hit. In such case this consumer's 'ret_handler' gets
> executed as well, but it won't have the proper data pointer set,
> so we can filter it out.
>
> Suggested-by: Oleg Nesterov <oleg@redhat.com>
> Signed-off-by: Jiri Olsa <jolsa@kernel.org>
> ---
> include/linux/uprobes.h | 21 +++++-
> kernel/events/uprobes.c | 148 +++++++++++++++++++++++++++++++---------
> 2 files changed, 137 insertions(+), 32 deletions(-)
>
LGTM,
Acked-by: Andrii Nakryiko <andrii@kernel.org>
Note also that I just resent the last patch from my patch set ([0]),
hopefully it will get applied, in which case you'd need to do a tiny
rebase.
[0] https://lore.kernel.org/linux-trace-kernel/20240930212246.1829395-1-andrii@kernel.org/
[...]
^ permalink raw reply [flat|nested] 29+ messages in thread
* Re: [PATCHv5 bpf-next 02/13] uprobe: Add support for session consumer
2024-09-30 21:36 ` Andrii Nakryiko
@ 2024-10-01 13:17 ` Jiri Olsa
2024-10-01 17:09 ` Andrii Nakryiko
0 siblings, 1 reply; 29+ messages in thread
From: Jiri Olsa @ 2024-10-01 13:17 UTC (permalink / raw)
To: Andrii Nakryiko
Cc: Oleg Nesterov, Peter Zijlstra, Alexei Starovoitov,
Daniel Borkmann, Andrii Nakryiko, bpf, Martin KaFai Lau, Song Liu,
Yonghong Song, John Fastabend, KP Singh, Stanislav Fomichev,
Hao Luo, Steven Rostedt, Masami Hiramatsu, linux-kernel,
linux-trace-kernel
On Mon, Sep 30, 2024 at 02:36:03PM -0700, Andrii Nakryiko wrote:
> On Sun, Sep 29, 2024 at 1:57 PM Jiri Olsa <jolsa@kernel.org> wrote:
> >
> > This change allows the uprobe consumer to behave as session which
> > means that 'handler' and 'ret_handler' callbacks are connected in
> > a way that allows to:
> >
> > - control execution of 'ret_handler' from 'handler' callback
> > - share data between 'handler' and 'ret_handler' callbacks
> >
> > The session concept fits to our common use case where we do filtering
> > on entry uprobe and based on the result we decide to run the return
> > uprobe (or not).
> >
> > It's also convenient to share the data between session callbacks.
> >
> > To achive this we are adding new return value the uprobe consumer
> > can return from 'handler' callback:
> >
> > UPROBE_HANDLER_IGNORE
> > - Ignore 'ret_handler' callback for this consumer.
> >
> > And store cookie and pass it to 'ret_handler' when consumer has both
> > 'handler' and 'ret_handler' callbacks defined.
> >
> > We store shared data in the return_consumer object array as part of
> > the return_instance object. This way the handle_uretprobe_chain can
> > find related return_consumer and its shared data.
> >
> > We also store entry handler return value, for cases when there are
> > multiple consumers on single uprobe and some of them are ignored and
> > some of them not, in which case the return probe gets installed and
> > we need to have a way to find out which consumer needs to be ignored.
> >
> > The tricky part is when consumer is registered 'after' the uprobe
> > entry handler is hit. In such case this consumer's 'ret_handler' gets
> > executed as well, but it won't have the proper data pointer set,
> > so we can filter it out.
> >
> > Suggested-by: Oleg Nesterov <oleg@redhat.com>
> > Signed-off-by: Jiri Olsa <jolsa@kernel.org>
> > ---
> > include/linux/uprobes.h | 21 +++++-
> > kernel/events/uprobes.c | 148 +++++++++++++++++++++++++++++++---------
> > 2 files changed, 137 insertions(+), 32 deletions(-)
> >
>
> LGTM,
>
> Acked-by: Andrii Nakryiko <andrii@kernel.org>
>
>
> Note also that I just resent the last patch from my patch set ([0]),
> hopefully it will get applied, in which case you'd need to do a tiny
> rebase.
>
> [0] https://lore.kernel.org/linux-trace-kernel/20240930212246.1829395-1-andrii@kernel.org/
the rebase is fine, but what I'm not clear about is that after yours and
Oleg's changes get in, my kernel changes will depend on peter's perf/core,
but bpf selftests changes will need bpf-next/master
jirka
^ permalink raw reply [flat|nested] 29+ messages in thread
* Re: [PATCHv5 bpf-next 02/13] uprobe: Add support for session consumer
2024-10-01 13:17 ` Jiri Olsa
@ 2024-10-01 17:09 ` Andrii Nakryiko
0 siblings, 0 replies; 29+ messages in thread
From: Andrii Nakryiko @ 2024-10-01 17:09 UTC (permalink / raw)
To: Jiri Olsa
Cc: Oleg Nesterov, Peter Zijlstra, Alexei Starovoitov,
Daniel Borkmann, Andrii Nakryiko, bpf, Martin KaFai Lau, Song Liu,
Yonghong Song, John Fastabend, KP Singh, Stanislav Fomichev,
Hao Luo, Steven Rostedt, Masami Hiramatsu, linux-kernel,
linux-trace-kernel
On Tue, Oct 1, 2024 at 6:17 AM Jiri Olsa <olsajiri@gmail.com> wrote:
>
> On Mon, Sep 30, 2024 at 02:36:03PM -0700, Andrii Nakryiko wrote:
> > On Sun, Sep 29, 2024 at 1:57 PM Jiri Olsa <jolsa@kernel.org> wrote:
> > >
> > > This change allows the uprobe consumer to behave as session which
> > > means that 'handler' and 'ret_handler' callbacks are connected in
> > > a way that allows to:
> > >
> > > - control execution of 'ret_handler' from 'handler' callback
> > > - share data between 'handler' and 'ret_handler' callbacks
> > >
> > > The session concept fits to our common use case where we do filtering
> > > on entry uprobe and based on the result we decide to run the return
> > > uprobe (or not).
> > >
> > > It's also convenient to share the data between session callbacks.
> > >
> > > To achive this we are adding new return value the uprobe consumer
> > > can return from 'handler' callback:
> > >
> > > UPROBE_HANDLER_IGNORE
> > > - Ignore 'ret_handler' callback for this consumer.
> > >
> > > And store cookie and pass it to 'ret_handler' when consumer has both
> > > 'handler' and 'ret_handler' callbacks defined.
> > >
> > > We store shared data in the return_consumer object array as part of
> > > the return_instance object. This way the handle_uretprobe_chain can
> > > find related return_consumer and its shared data.
> > >
> > > We also store entry handler return value, for cases when there are
> > > multiple consumers on single uprobe and some of them are ignored and
> > > some of them not, in which case the return probe gets installed and
> > > we need to have a way to find out which consumer needs to be ignored.
> > >
> > > The tricky part is when consumer is registered 'after' the uprobe
> > > entry handler is hit. In such case this consumer's 'ret_handler' gets
> > > executed as well, but it won't have the proper data pointer set,
> > > so we can filter it out.
> > >
> > > Suggested-by: Oleg Nesterov <oleg@redhat.com>
> > > Signed-off-by: Jiri Olsa <jolsa@kernel.org>
> > > ---
> > > include/linux/uprobes.h | 21 +++++-
> > > kernel/events/uprobes.c | 148 +++++++++++++++++++++++++++++++---------
> > > 2 files changed, 137 insertions(+), 32 deletions(-)
> > >
> >
> > LGTM,
> >
> > Acked-by: Andrii Nakryiko <andrii@kernel.org>
> >
> >
> > Note also that I just resent the last patch from my patch set ([0]),
> > hopefully it will get applied, in which case you'd need to do a tiny
> > rebase.
> >
> > [0] https://lore.kernel.org/linux-trace-kernel/20240930212246.1829395-1-andrii@kernel.org/
>
> the rebase is fine, but what I'm not clear about is that after yours and
> Oleg's changes get in, my kernel changes will depend on peter's perf/core,
> but bpf selftests changes will need bpf-next/master
Yep, and I was waiting for your next revision to discuss logistics,
but perhaps we could do it right here.
I think uprobe parts should stay in tip/perf/core (if that's where all
uprobe code goes in), as we have a bunch of ongoing work that all will
conflict a bit with each other, if it lands across multiple trees.
So that means that patches #1 and #2 ideally land in tip/perf/core.
But you have a lot of BPF-specific things that would be inconvenient
to route through tip, so I'd say those should go through bpf-next.
What we can do, if Ingo and Peter are OK with that, is to create a
stable (non-rebaseable) branch off of your first two patches (applied
in tip/perf/core), which we'll merge into bpf-next/master and land the
rest of your patch set there. We've done that with recent struct fd
changes, and there were few other similar cases in the past, and that
all worked well.
Peter, Ingo, are you guys OK with that approach?
>
> jirka
^ permalink raw reply [flat|nested] 29+ messages in thread
* [PATCHv5 bpf-next 03/13] bpf: Add support for uprobe multi session attach
2024-09-29 20:57 [PATCHv5 bpf-next 00/13] uprobe, bpf: Add session support Jiri Olsa
2024-09-29 20:57 ` [PATCHv5 bpf-next 01/13] uprobe: Add data pointer to consumer handlers Jiri Olsa
2024-09-29 20:57 ` [PATCHv5 bpf-next 02/13] uprobe: Add support for session consumer Jiri Olsa
@ 2024-09-29 20:57 ` Jiri Olsa
2024-09-30 21:36 ` Andrii Nakryiko
2024-09-29 20:57 ` [PATCHv5 bpf-next 04/13] bpf: Add support for uprobe multi session context Jiri Olsa
` (9 subsequent siblings)
12 siblings, 1 reply; 29+ messages in thread
From: Jiri Olsa @ 2024-09-29 20:57 UTC (permalink / raw)
To: Oleg Nesterov, Peter Zijlstra, Alexei Starovoitov,
Daniel Borkmann, Andrii Nakryiko
Cc: bpf, Martin KaFai Lau, Song Liu, Yonghong Song, John Fastabend,
KP Singh, Stanislav Fomichev, Hao Luo, Steven Rostedt,
Masami Hiramatsu, linux-kernel, linux-trace-kernel
Adding support to attach BPF program for entry and return probe
of the same function. This is common use case which at the moment
requires to create two uprobe multi links.
Adding new BPF_TRACE_UPROBE_SESSION attach type that instructs
kernel to attach single link program to both entry and exit probe.
It's possible to control execution of the BPF program on return
probe simply by returning zero or non zero from the entry BPF
program execution to execute or not the BPF program on return
probe respectively.
Acked-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
---
include/uapi/linux/bpf.h | 1 +
kernel/bpf/syscall.c | 9 ++++++--
kernel/trace/bpf_trace.c | 39 +++++++++++++++++++++++++++-------
tools/include/uapi/linux/bpf.h | 1 +
tools/lib/bpf/libbpf.c | 1 +
5 files changed, 41 insertions(+), 10 deletions(-)
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 8ab4d8184b9d..77d0bc5fa986 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -1116,6 +1116,7 @@ enum bpf_attach_type {
BPF_NETKIT_PRIMARY,
BPF_NETKIT_PEER,
BPF_TRACE_KPROBE_SESSION,
+ BPF_TRACE_UPROBE_SESSION,
__MAX_BPF_ATTACH_TYPE
};
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index a8f1808a1ca5..0cf7617e6cb6 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -3983,10 +3983,14 @@ static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI &&
attach_type != BPF_TRACE_UPROBE_MULTI)
return -EINVAL;
+ if (prog->expected_attach_type == BPF_TRACE_UPROBE_SESSION &&
+ attach_type != BPF_TRACE_UPROBE_SESSION)
+ return -EINVAL;
if (attach_type != BPF_PERF_EVENT &&
attach_type != BPF_TRACE_KPROBE_MULTI &&
attach_type != BPF_TRACE_KPROBE_SESSION &&
- attach_type != BPF_TRACE_UPROBE_MULTI)
+ attach_type != BPF_TRACE_UPROBE_MULTI &&
+ attach_type != BPF_TRACE_UPROBE_SESSION)
return -EINVAL;
return 0;
case BPF_PROG_TYPE_SCHED_CLS:
@@ -5239,7 +5243,8 @@ static int link_create(union bpf_attr *attr, bpfptr_t uattr)
else if (attr->link_create.attach_type == BPF_TRACE_KPROBE_MULTI ||
attr->link_create.attach_type == BPF_TRACE_KPROBE_SESSION)
ret = bpf_kprobe_multi_link_attach(attr, prog);
- else if (attr->link_create.attach_type == BPF_TRACE_UPROBE_MULTI)
+ else if (attr->link_create.attach_type == BPF_TRACE_UPROBE_MULTI ||
+ attr->link_create.attach_type == BPF_TRACE_UPROBE_SESSION)
ret = bpf_uprobe_multi_link_attach(attr, prog);
break;
default:
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index fdab7ecd8dfa..98e940ec184d 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -1557,6 +1557,17 @@ static inline bool is_kprobe_session(const struct bpf_prog *prog)
return prog->expected_attach_type == BPF_TRACE_KPROBE_SESSION;
}
+static inline bool is_uprobe_multi(const struct bpf_prog *prog)
+{
+ return prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI ||
+ prog->expected_attach_type == BPF_TRACE_UPROBE_SESSION;
+}
+
+static inline bool is_uprobe_session(const struct bpf_prog *prog)
+{
+ return prog->expected_attach_type == BPF_TRACE_UPROBE_SESSION;
+}
+
static const struct bpf_func_proto *
kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{
@@ -1574,13 +1585,13 @@ kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_get_func_ip:
if (is_kprobe_multi(prog))
return &bpf_get_func_ip_proto_kprobe_multi;
- if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI)
+ if (is_uprobe_multi(prog))
return &bpf_get_func_ip_proto_uprobe_multi;
return &bpf_get_func_ip_proto_kprobe;
case BPF_FUNC_get_attach_cookie:
if (is_kprobe_multi(prog))
return &bpf_get_attach_cookie_proto_kmulti;
- if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI)
+ if (is_uprobe_multi(prog))
return &bpf_get_attach_cookie_proto_umulti;
return &bpf_get_attach_cookie_proto_trace;
default:
@@ -3074,6 +3085,7 @@ struct bpf_uprobe {
u64 cookie;
struct uprobe *uprobe;
struct uprobe_consumer consumer;
+ bool session;
};
struct bpf_uprobe_multi_link {
@@ -3248,9 +3260,13 @@ uprobe_multi_link_handler(struct uprobe_consumer *con, struct pt_regs *regs,
__u64 *data)
{
struct bpf_uprobe *uprobe;
+ int ret;
uprobe = container_of(con, struct bpf_uprobe, consumer);
- return uprobe_prog_run(uprobe, instruction_pointer(regs), regs);
+ ret = uprobe_prog_run(uprobe, instruction_pointer(regs), regs);
+ if (uprobe->session)
+ return ret ? UPROBE_HANDLER_IGNORE : 0;
+ return ret;
}
static int
@@ -3260,6 +3276,12 @@ uprobe_multi_link_ret_handler(struct uprobe_consumer *con, unsigned long func, s
struct bpf_uprobe *uprobe;
uprobe = container_of(con, struct bpf_uprobe, consumer);
+ /*
+ * There's chance we could get called with NULL data if we registered uprobe
+ * after it hit entry but before it hit return probe, just ignore it.
+ */
+ if (uprobe->session && !data)
+ return 0;
return uprobe_prog_run(uprobe, func, regs);
}
@@ -3299,7 +3321,7 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
if (sizeof(u64) != sizeof(void *))
return -EOPNOTSUPP;
- if (prog->expected_attach_type != BPF_TRACE_UPROBE_MULTI)
+ if (!is_uprobe_multi(prog))
return -EINVAL;
flags = attr->link_create.uprobe_multi.flags;
@@ -3375,11 +3397,12 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
uprobes[i].link = link;
- if (flags & BPF_F_UPROBE_MULTI_RETURN)
- uprobes[i].consumer.ret_handler = uprobe_multi_link_ret_handler;
- else
+ if (!(flags & BPF_F_UPROBE_MULTI_RETURN))
uprobes[i].consumer.handler = uprobe_multi_link_handler;
-
+ if (flags & BPF_F_UPROBE_MULTI_RETURN || is_uprobe_session(prog))
+ uprobes[i].consumer.ret_handler = uprobe_multi_link_ret_handler;
+ if (is_uprobe_session(prog))
+ uprobes[i].session = true;
if (pid)
uprobes[i].consumer.filter = uprobe_multi_link_filter;
}
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 7610883c8191..09bdb1867d4a 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -1116,6 +1116,7 @@ enum bpf_attach_type {
BPF_NETKIT_PRIMARY,
BPF_NETKIT_PEER,
BPF_TRACE_KPROBE_SESSION,
+ BPF_TRACE_UPROBE_SESSION,
__MAX_BPF_ATTACH_TYPE
};
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 712b95e8891b..3587ed7ec359 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -133,6 +133,7 @@ static const char * const attach_type_name[] = {
[BPF_NETKIT_PRIMARY] = "netkit_primary",
[BPF_NETKIT_PEER] = "netkit_peer",
[BPF_TRACE_KPROBE_SESSION] = "trace_kprobe_session",
+ [BPF_TRACE_UPROBE_SESSION] = "trace_uprobe_session",
};
static const char * const link_type_name[] = {
--
2.46.1
^ permalink raw reply related [flat|nested] 29+ messages in thread* Re: [PATCHv5 bpf-next 03/13] bpf: Add support for uprobe multi session attach
2024-09-29 20:57 ` [PATCHv5 bpf-next 03/13] bpf: Add support for uprobe multi session attach Jiri Olsa
@ 2024-09-30 21:36 ` Andrii Nakryiko
2024-10-01 13:17 ` Jiri Olsa
0 siblings, 1 reply; 29+ messages in thread
From: Andrii Nakryiko @ 2024-09-30 21:36 UTC (permalink / raw)
To: Jiri Olsa
Cc: Oleg Nesterov, Peter Zijlstra, Alexei Starovoitov,
Daniel Borkmann, Andrii Nakryiko, bpf, Martin KaFai Lau, Song Liu,
Yonghong Song, John Fastabend, KP Singh, Stanislav Fomichev,
Hao Luo, Steven Rostedt, Masami Hiramatsu, linux-kernel,
linux-trace-kernel
On Sun, Sep 29, 2024 at 1:58 PM Jiri Olsa <jolsa@kernel.org> wrote:
>
> Adding support to attach BPF program for entry and return probe
> of the same function. This is common use case which at the moment
> requires to create two uprobe multi links.
>
> Adding new BPF_TRACE_UPROBE_SESSION attach type that instructs
> kernel to attach single link program to both entry and exit probe.
>
> It's possible to control execution of the BPF program on return
> probe simply by returning zero or non zero from the entry BPF
> program execution to execute or not the BPF program on return
> probe respectively.
>
> Acked-by: Andrii Nakryiko <andrii@kernel.org>
> Signed-off-by: Jiri Olsa <jolsa@kernel.org>
> ---
> include/uapi/linux/bpf.h | 1 +
> kernel/bpf/syscall.c | 9 ++++++--
> kernel/trace/bpf_trace.c | 39 +++++++++++++++++++++++++++-------
> tools/include/uapi/linux/bpf.h | 1 +
> tools/lib/bpf/libbpf.c | 1 +
> 5 files changed, 41 insertions(+), 10 deletions(-)
>
> diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
> index 8ab4d8184b9d..77d0bc5fa986 100644
> --- a/include/uapi/linux/bpf.h
> +++ b/include/uapi/linux/bpf.h
> @@ -1116,6 +1116,7 @@ enum bpf_attach_type {
> BPF_NETKIT_PRIMARY,
> BPF_NETKIT_PEER,
> BPF_TRACE_KPROBE_SESSION,
> + BPF_TRACE_UPROBE_SESSION,
> __MAX_BPF_ATTACH_TYPE
> };
>
> diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
> index a8f1808a1ca5..0cf7617e6cb6 100644
> --- a/kernel/bpf/syscall.c
> +++ b/kernel/bpf/syscall.c
> @@ -3983,10 +3983,14 @@ static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
> if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI &&
> attach_type != BPF_TRACE_UPROBE_MULTI)
> return -EINVAL;
> + if (prog->expected_attach_type == BPF_TRACE_UPROBE_SESSION &&
> + attach_type != BPF_TRACE_UPROBE_SESSION)
> + return -EINVAL;
> if (attach_type != BPF_PERF_EVENT &&
> attach_type != BPF_TRACE_KPROBE_MULTI &&
> attach_type != BPF_TRACE_KPROBE_SESSION &&
> - attach_type != BPF_TRACE_UPROBE_MULTI)
> + attach_type != BPF_TRACE_UPROBE_MULTI &&
> + attach_type != BPF_TRACE_UPROBE_SESSION)
> return -EINVAL;
> return 0;
> case BPF_PROG_TYPE_SCHED_CLS:
> @@ -5239,7 +5243,8 @@ static int link_create(union bpf_attr *attr, bpfptr_t uattr)
> else if (attr->link_create.attach_type == BPF_TRACE_KPROBE_MULTI ||
> attr->link_create.attach_type == BPF_TRACE_KPROBE_SESSION)
> ret = bpf_kprobe_multi_link_attach(attr, prog);
> - else if (attr->link_create.attach_type == BPF_TRACE_UPROBE_MULTI)
> + else if (attr->link_create.attach_type == BPF_TRACE_UPROBE_MULTI ||
> + attr->link_create.attach_type == BPF_TRACE_UPROBE_SESSION)
> ret = bpf_uprobe_multi_link_attach(attr, prog);
> break;
> default:
> diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
> index fdab7ecd8dfa..98e940ec184d 100644
> --- a/kernel/trace/bpf_trace.c
> +++ b/kernel/trace/bpf_trace.c
> @@ -1557,6 +1557,17 @@ static inline bool is_kprobe_session(const struct bpf_prog *prog)
> return prog->expected_attach_type == BPF_TRACE_KPROBE_SESSION;
> }
>
> +static inline bool is_uprobe_multi(const struct bpf_prog *prog)
> +{
> + return prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI ||
> + prog->expected_attach_type == BPF_TRACE_UPROBE_SESSION;
> +}
> +
> +static inline bool is_uprobe_session(const struct bpf_prog *prog)
> +{
> + return prog->expected_attach_type == BPF_TRACE_UPROBE_SESSION;
> +}
> +
> static const struct bpf_func_proto *
> kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
> {
> @@ -1574,13 +1585,13 @@ kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
> case BPF_FUNC_get_func_ip:
> if (is_kprobe_multi(prog))
> return &bpf_get_func_ip_proto_kprobe_multi;
> - if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI)
> + if (is_uprobe_multi(prog))
> return &bpf_get_func_ip_proto_uprobe_multi;
> return &bpf_get_func_ip_proto_kprobe;
> case BPF_FUNC_get_attach_cookie:
> if (is_kprobe_multi(prog))
> return &bpf_get_attach_cookie_proto_kmulti;
> - if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI)
> + if (is_uprobe_multi(prog))
> return &bpf_get_attach_cookie_proto_umulti;
> return &bpf_get_attach_cookie_proto_trace;
> default:
> @@ -3074,6 +3085,7 @@ struct bpf_uprobe {
> u64 cookie;
> struct uprobe *uprobe;
> struct uprobe_consumer consumer;
> + bool session;
> };
>
> struct bpf_uprobe_multi_link {
> @@ -3248,9 +3260,13 @@ uprobe_multi_link_handler(struct uprobe_consumer *con, struct pt_regs *regs,
> __u64 *data)
> {
> struct bpf_uprobe *uprobe;
> + int ret;
>
> uprobe = container_of(con, struct bpf_uprobe, consumer);
> - return uprobe_prog_run(uprobe, instruction_pointer(regs), regs);
> + ret = uprobe_prog_run(uprobe, instruction_pointer(regs), regs);
> + if (uprobe->session)
> + return ret ? UPROBE_HANDLER_IGNORE : 0;
> + return ret;
isn't this a bug that BPF program can return arbitrary value here and,
e.g., request uprobe unregistration?
Let's return 0, unless uprobe->session? (it would be good to move that
into a separate patch with Fixes)
> }
>
> static int
> @@ -3260,6 +3276,12 @@ uprobe_multi_link_ret_handler(struct uprobe_consumer *con, unsigned long func, s
> struct bpf_uprobe *uprobe;
>
> uprobe = container_of(con, struct bpf_uprobe, consumer);
> + /*
> + * There's chance we could get called with NULL data if we registered uprobe
> + * after it hit entry but before it hit return probe, just ignore it.
> + */
> + if (uprobe->session && !data)
> + return 0;
why can't handle_uretprobe_chain() do this check instead? We know when
we are dealing with session uprobe/uretprobe, so we can filter out
these spurious calls, no?
> return uprobe_prog_run(uprobe, func, regs);
> }
>
> @@ -3299,7 +3321,7 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
> if (sizeof(u64) != sizeof(void *))
> return -EOPNOTSUPP;
>
> - if (prog->expected_attach_type != BPF_TRACE_UPROBE_MULTI)
> + if (!is_uprobe_multi(prog))
> return -EINVAL;
>
> flags = attr->link_create.uprobe_multi.flags;
> @@ -3375,11 +3397,12 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
>
> uprobes[i].link = link;
>
> - if (flags & BPF_F_UPROBE_MULTI_RETURN)
> - uprobes[i].consumer.ret_handler = uprobe_multi_link_ret_handler;
> - else
> + if (!(flags & BPF_F_UPROBE_MULTI_RETURN))
> uprobes[i].consumer.handler = uprobe_multi_link_handler;
> -
> + if (flags & BPF_F_UPROBE_MULTI_RETURN || is_uprobe_session(prog))
> + uprobes[i].consumer.ret_handler = uprobe_multi_link_ret_handler;
> + if (is_uprobe_session(prog))
> + uprobes[i].session = true;
> if (pid)
> uprobes[i].consumer.filter = uprobe_multi_link_filter;
> }
> diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
> index 7610883c8191..09bdb1867d4a 100644
> --- a/tools/include/uapi/linux/bpf.h
> +++ b/tools/include/uapi/linux/bpf.h
> @@ -1116,6 +1116,7 @@ enum bpf_attach_type {
> BPF_NETKIT_PRIMARY,
> BPF_NETKIT_PEER,
> BPF_TRACE_KPROBE_SESSION,
> + BPF_TRACE_UPROBE_SESSION,
> __MAX_BPF_ATTACH_TYPE
> };
>
> diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
> index 712b95e8891b..3587ed7ec359 100644
> --- a/tools/lib/bpf/libbpf.c
> +++ b/tools/lib/bpf/libbpf.c
> @@ -133,6 +133,7 @@ static const char * const attach_type_name[] = {
> [BPF_NETKIT_PRIMARY] = "netkit_primary",
> [BPF_NETKIT_PEER] = "netkit_peer",
> [BPF_TRACE_KPROBE_SESSION] = "trace_kprobe_session",
> + [BPF_TRACE_UPROBE_SESSION] = "trace_uprobe_session",
> };
>
> static const char * const link_type_name[] = {
> --
> 2.46.1
>
^ permalink raw reply [flat|nested] 29+ messages in thread* Re: [PATCHv5 bpf-next 03/13] bpf: Add support for uprobe multi session attach
2024-09-30 21:36 ` Andrii Nakryiko
@ 2024-10-01 13:17 ` Jiri Olsa
2024-10-01 17:11 ` Andrii Nakryiko
0 siblings, 1 reply; 29+ messages in thread
From: Jiri Olsa @ 2024-10-01 13:17 UTC (permalink / raw)
To: Andrii Nakryiko
Cc: Oleg Nesterov, Peter Zijlstra, Alexei Starovoitov,
Daniel Borkmann, Andrii Nakryiko, bpf, Martin KaFai Lau, Song Liu,
Yonghong Song, John Fastabend, KP Singh, Stanislav Fomichev,
Hao Luo, Steven Rostedt, Masami Hiramatsu, linux-kernel,
linux-trace-kernel
On Mon, Sep 30, 2024 at 02:36:08PM -0700, Andrii Nakryiko wrote:
SNIP
> > struct bpf_uprobe_multi_link {
> > @@ -3248,9 +3260,13 @@ uprobe_multi_link_handler(struct uprobe_consumer *con, struct pt_regs *regs,
> > __u64 *data)
> > {
> > struct bpf_uprobe *uprobe;
> > + int ret;
> >
> > uprobe = container_of(con, struct bpf_uprobe, consumer);
> > - return uprobe_prog_run(uprobe, instruction_pointer(regs), regs);
> > + ret = uprobe_prog_run(uprobe, instruction_pointer(regs), regs);
> > + if (uprobe->session)
> > + return ret ? UPROBE_HANDLER_IGNORE : 0;
> > + return ret;
>
> isn't this a bug that BPF program can return arbitrary value here and,
> e.g., request uprobe unregistration?
>
> Let's return 0, unless uprobe->session? (it would be good to move that
> into a separate patch with Fixes)
yea there's no use case for uprobe multi user, so let's return
0 as you suggest
>
> > }
> >
> > static int
> > @@ -3260,6 +3276,12 @@ uprobe_multi_link_ret_handler(struct uprobe_consumer *con, unsigned long func, s
> > struct bpf_uprobe *uprobe;
> >
> > uprobe = container_of(con, struct bpf_uprobe, consumer);
> > + /*
> > + * There's chance we could get called with NULL data if we registered uprobe
> > + * after it hit entry but before it hit return probe, just ignore it.
> > + */
> > + if (uprobe->session && !data)
> > + return 0;
>
> why can't handle_uretprobe_chain() do this check instead? We know when
> we are dealing with session uprobe/uretprobe, so we can filter out
> these spurious calls, no?
right, now that we decide session based on presence of both callbacks
we have that info in here handle_uretprobe_chain.. but let's still check
it for sanity and warn? like
if (WARN_ON_ONCE(uprobe->session && !data))
return 0;
jirka
^ permalink raw reply [flat|nested] 29+ messages in thread* Re: [PATCHv5 bpf-next 03/13] bpf: Add support for uprobe multi session attach
2024-10-01 13:17 ` Jiri Olsa
@ 2024-10-01 17:11 ` Andrii Nakryiko
2024-10-02 13:07 ` Jiri Olsa
0 siblings, 1 reply; 29+ messages in thread
From: Andrii Nakryiko @ 2024-10-01 17:11 UTC (permalink / raw)
To: Jiri Olsa
Cc: Oleg Nesterov, Peter Zijlstra, Alexei Starovoitov,
Daniel Borkmann, Andrii Nakryiko, bpf, Martin KaFai Lau, Song Liu,
Yonghong Song, John Fastabend, KP Singh, Stanislav Fomichev,
Hao Luo, Steven Rostedt, Masami Hiramatsu, linux-kernel,
linux-trace-kernel
On Tue, Oct 1, 2024 at 6:17 AM Jiri Olsa <olsajiri@gmail.com> wrote:
>
> On Mon, Sep 30, 2024 at 02:36:08PM -0700, Andrii Nakryiko wrote:
>
> SNIP
>
> > > struct bpf_uprobe_multi_link {
> > > @@ -3248,9 +3260,13 @@ uprobe_multi_link_handler(struct uprobe_consumer *con, struct pt_regs *regs,
> > > __u64 *data)
> > > {
> > > struct bpf_uprobe *uprobe;
> > > + int ret;
> > >
> > > uprobe = container_of(con, struct bpf_uprobe, consumer);
> > > - return uprobe_prog_run(uprobe, instruction_pointer(regs), regs);
> > > + ret = uprobe_prog_run(uprobe, instruction_pointer(regs), regs);
> > > + if (uprobe->session)
> > > + return ret ? UPROBE_HANDLER_IGNORE : 0;
> > > + return ret;
> >
> > isn't this a bug that BPF program can return arbitrary value here and,
> > e.g., request uprobe unregistration?
> >
> > Let's return 0, unless uprobe->session? (it would be good to move that
> > into a separate patch with Fixes)
>
> yea there's no use case for uprobe multi user, so let's return
> 0 as you suggest
>
> >
> > > }
> > >
> > > static int
> > > @@ -3260,6 +3276,12 @@ uprobe_multi_link_ret_handler(struct uprobe_consumer *con, unsigned long func, s
> > > struct bpf_uprobe *uprobe;
> > >
> > > uprobe = container_of(con, struct bpf_uprobe, consumer);
> > > + /*
> > > + * There's chance we could get called with NULL data if we registered uprobe
> > > + * after it hit entry but before it hit return probe, just ignore it.
> > > + */
> > > + if (uprobe->session && !data)
> > > + return 0;
> >
> > why can't handle_uretprobe_chain() do this check instead? We know when
> > we are dealing with session uprobe/uretprobe, so we can filter out
> > these spurious calls, no?
>
> right, now that we decide session based on presence of both callbacks
> we have that info in here handle_uretprobe_chain.. but let's still check
> it for sanity and warn? like
>
> if (WARN_ON_ONCE(uprobe->session && !data))
You mean to check this *additionally* in uprobe_multi_link_handler(),
after core uprobe code already filtered that condition out? It won't
hurt, but I'm not sure I see the point?
> return 0;
>
> jirka
^ permalink raw reply [flat|nested] 29+ messages in thread* Re: [PATCHv5 bpf-next 03/13] bpf: Add support for uprobe multi session attach
2024-10-01 17:11 ` Andrii Nakryiko
@ 2024-10-02 13:07 ` Jiri Olsa
0 siblings, 0 replies; 29+ messages in thread
From: Jiri Olsa @ 2024-10-02 13:07 UTC (permalink / raw)
To: Andrii Nakryiko
Cc: Jiri Olsa, Oleg Nesterov, Peter Zijlstra, Alexei Starovoitov,
Daniel Borkmann, Andrii Nakryiko, bpf, Martin KaFai Lau, Song Liu,
Yonghong Song, John Fastabend, KP Singh, Stanislav Fomichev,
Hao Luo, Steven Rostedt, Masami Hiramatsu, linux-kernel,
linux-trace-kernel
On Tue, Oct 01, 2024 at 10:11:13AM -0700, Andrii Nakryiko wrote:
> On Tue, Oct 1, 2024 at 6:17 AM Jiri Olsa <olsajiri@gmail.com> wrote:
> >
> > On Mon, Sep 30, 2024 at 02:36:08PM -0700, Andrii Nakryiko wrote:
> >
> > SNIP
> >
> > > > struct bpf_uprobe_multi_link {
> > > > @@ -3248,9 +3260,13 @@ uprobe_multi_link_handler(struct uprobe_consumer *con, struct pt_regs *regs,
> > > > __u64 *data)
> > > > {
> > > > struct bpf_uprobe *uprobe;
> > > > + int ret;
> > > >
> > > > uprobe = container_of(con, struct bpf_uprobe, consumer);
> > > > - return uprobe_prog_run(uprobe, instruction_pointer(regs), regs);
> > > > + ret = uprobe_prog_run(uprobe, instruction_pointer(regs), regs);
> > > > + if (uprobe->session)
> > > > + return ret ? UPROBE_HANDLER_IGNORE : 0;
> > > > + return ret;
> > >
> > > isn't this a bug that BPF program can return arbitrary value here and,
> > > e.g., request uprobe unregistration?
> > >
> > > Let's return 0, unless uprobe->session? (it would be good to move that
> > > into a separate patch with Fixes)
> >
> > yea there's no use case for uprobe multi user, so let's return
> > 0 as you suggest
> >
> > >
> > > > }
> > > >
> > > > static int
> > > > @@ -3260,6 +3276,12 @@ uprobe_multi_link_ret_handler(struct uprobe_consumer *con, unsigned long func, s
> > > > struct bpf_uprobe *uprobe;
> > > >
> > > > uprobe = container_of(con, struct bpf_uprobe, consumer);
> > > > + /*
> > > > + * There's chance we could get called with NULL data if we registered uprobe
> > > > + * after it hit entry but before it hit return probe, just ignore it.
> > > > + */
> > > > + if (uprobe->session && !data)
> > > > + return 0;
> > >
> > > why can't handle_uretprobe_chain() do this check instead? We know when
> > > we are dealing with session uprobe/uretprobe, so we can filter out
> > > these spurious calls, no?
> >
> > right, now that we decide session based on presence of both callbacks
> > we have that info in here handle_uretprobe_chain.. but let's still check
> > it for sanity and warn? like
> >
> > if (WARN_ON_ONCE(uprobe->session && !data))
>
> You mean to check this *additionally* in uprobe_multi_link_handler(),
> after core uprobe code already filtered that condition out? It won't
> hurt, but I'm not sure I see the point?
yes, it's cross subsytem call so just to be on safe side for future,
but I don't insist
jirka
^ permalink raw reply [flat|nested] 29+ messages in thread
* [PATCHv5 bpf-next 04/13] bpf: Add support for uprobe multi session context
2024-09-29 20:57 [PATCHv5 bpf-next 00/13] uprobe, bpf: Add session support Jiri Olsa
` (2 preceding siblings ...)
2024-09-29 20:57 ` [PATCHv5 bpf-next 03/13] bpf: Add support for uprobe multi session attach Jiri Olsa
@ 2024-09-29 20:57 ` Jiri Olsa
2024-09-29 20:57 ` [PATCHv5 bpf-next 05/13] bpf: Allow return values 0 and 1 for uprobe/kprobe session Jiri Olsa
` (8 subsequent siblings)
12 siblings, 0 replies; 29+ messages in thread
From: Jiri Olsa @ 2024-09-29 20:57 UTC (permalink / raw)
To: Oleg Nesterov, Peter Zijlstra, Alexei Starovoitov,
Daniel Borkmann, Andrii Nakryiko
Cc: bpf, Martin KaFai Lau, Song Liu, Yonghong Song, John Fastabend,
KP Singh, Stanislav Fomichev, Hao Luo, Steven Rostedt,
Masami Hiramatsu, linux-kernel, linux-trace-kernel
Placing bpf_session_run_ctx layer in between bpf_run_ctx and
bpf_uprobe_multi_run_ctx, so the session data can be retrieved
from uprobe_multi link.
Plus granting session kfuncs access to uprobe session programs.
Acked-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
---
kernel/trace/bpf_trace.c | 23 +++++++++++++++--------
1 file changed, 15 insertions(+), 8 deletions(-)
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 98e940ec184d..41f83d504bf6 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -3098,7 +3098,7 @@ struct bpf_uprobe_multi_link {
};
struct bpf_uprobe_multi_run_ctx {
- struct bpf_run_ctx run_ctx;
+ struct bpf_session_run_ctx session_ctx;
unsigned long entry_ip;
struct bpf_uprobe *uprobe;
};
@@ -3211,10 +3211,15 @@ static const struct bpf_link_ops bpf_uprobe_multi_link_lops = {
static int uprobe_prog_run(struct bpf_uprobe *uprobe,
unsigned long entry_ip,
- struct pt_regs *regs)
+ struct pt_regs *regs,
+ bool is_return, void *data)
{
struct bpf_uprobe_multi_link *link = uprobe->link;
struct bpf_uprobe_multi_run_ctx run_ctx = {
+ .session_ctx = {
+ .is_return = is_return,
+ .data = data,
+ },
.entry_ip = entry_ip,
.uprobe = uprobe,
};
@@ -3233,7 +3238,7 @@ static int uprobe_prog_run(struct bpf_uprobe *uprobe,
migrate_disable();
- old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
+ old_run_ctx = bpf_set_run_ctx(&run_ctx.session_ctx.run_ctx);
err = bpf_prog_run(link->link.prog, regs);
bpf_reset_run_ctx(old_run_ctx);
@@ -3263,7 +3268,7 @@ uprobe_multi_link_handler(struct uprobe_consumer *con, struct pt_regs *regs,
int ret;
uprobe = container_of(con, struct bpf_uprobe, consumer);
- ret = uprobe_prog_run(uprobe, instruction_pointer(regs), regs);
+ ret = uprobe_prog_run(uprobe, instruction_pointer(regs), regs, false, data);
if (uprobe->session)
return ret ? UPROBE_HANDLER_IGNORE : 0;
return ret;
@@ -3282,14 +3287,15 @@ uprobe_multi_link_ret_handler(struct uprobe_consumer *con, unsigned long func, s
*/
if (uprobe->session && !data)
return 0;
- return uprobe_prog_run(uprobe, func, regs);
+ return uprobe_prog_run(uprobe, func, regs, true, data);
}
static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
{
struct bpf_uprobe_multi_run_ctx *run_ctx;
- run_ctx = container_of(current->bpf_ctx, struct bpf_uprobe_multi_run_ctx, run_ctx);
+ run_ctx = container_of(current->bpf_ctx, struct bpf_uprobe_multi_run_ctx,
+ session_ctx.run_ctx);
return run_ctx->entry_ip;
}
@@ -3297,7 +3303,8 @@ static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx)
{
struct bpf_uprobe_multi_run_ctx *run_ctx;
- run_ctx = container_of(current->bpf_ctx, struct bpf_uprobe_multi_run_ctx, run_ctx);
+ run_ctx = container_of(current->bpf_ctx, struct bpf_uprobe_multi_run_ctx,
+ session_ctx.run_ctx);
return run_ctx->uprobe->cookie;
}
@@ -3491,7 +3498,7 @@ static int bpf_kprobe_multi_filter(const struct bpf_prog *prog, u32 kfunc_id)
if (!btf_id_set8_contains(&kprobe_multi_kfunc_set_ids, kfunc_id))
return 0;
- if (!is_kprobe_session(prog))
+ if (!is_kprobe_session(prog) && !is_uprobe_session(prog))
return -EACCES;
return 0;
--
2.46.1
^ permalink raw reply related [flat|nested] 29+ messages in thread* [PATCHv5 bpf-next 05/13] bpf: Allow return values 0 and 1 for uprobe/kprobe session
2024-09-29 20:57 [PATCHv5 bpf-next 00/13] uprobe, bpf: Add session support Jiri Olsa
` (3 preceding siblings ...)
2024-09-29 20:57 ` [PATCHv5 bpf-next 04/13] bpf: Add support for uprobe multi session context Jiri Olsa
@ 2024-09-29 20:57 ` Jiri Olsa
2024-09-30 21:36 ` Andrii Nakryiko
2024-09-29 20:57 ` [PATCHv5 bpf-next 06/13] libbpf: Add support for uprobe multi session attach Jiri Olsa
` (7 subsequent siblings)
12 siblings, 1 reply; 29+ messages in thread
From: Jiri Olsa @ 2024-09-29 20:57 UTC (permalink / raw)
To: Oleg Nesterov, Peter Zijlstra, Alexei Starovoitov,
Daniel Borkmann, Andrii Nakryiko
Cc: bpf, Martin KaFai Lau, Song Liu, Yonghong Song, John Fastabend,
KP Singh, Stanislav Fomichev, Hao Luo, Steven Rostedt,
Masami Hiramatsu, linux-kernel, linux-trace-kernel
The uprobe and kprobe session program can return only 0 or 1,
instruct verifier to check for that.
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
---
kernel/bpf/verifier.c | 10 ++++++++++
1 file changed, 10 insertions(+)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 7d9b38ffd220..c4d7b7369259 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -15910,6 +15910,16 @@ static int check_return_code(struct bpf_verifier_env *env, int regno, const char
return -ENOTSUPP;
}
break;
+ case BPF_PROG_TYPE_KPROBE:
+ switch (env->prog->expected_attach_type) {
+ case BPF_TRACE_KPROBE_SESSION:
+ case BPF_TRACE_UPROBE_SESSION:
+ range = retval_range(0, 1);
+ break;
+ default:
+ return 0;
+ }
+ break;
case BPF_PROG_TYPE_SK_LOOKUP:
range = retval_range(SK_DROP, SK_PASS);
break;
--
2.46.1
^ permalink raw reply related [flat|nested] 29+ messages in thread* Re: [PATCHv5 bpf-next 05/13] bpf: Allow return values 0 and 1 for uprobe/kprobe session
2024-09-29 20:57 ` [PATCHv5 bpf-next 05/13] bpf: Allow return values 0 and 1 for uprobe/kprobe session Jiri Olsa
@ 2024-09-30 21:36 ` Andrii Nakryiko
2024-10-01 13:18 ` Jiri Olsa
0 siblings, 1 reply; 29+ messages in thread
From: Andrii Nakryiko @ 2024-09-30 21:36 UTC (permalink / raw)
To: Jiri Olsa
Cc: Oleg Nesterov, Peter Zijlstra, Alexei Starovoitov,
Daniel Borkmann, Andrii Nakryiko, bpf, Martin KaFai Lau, Song Liu,
Yonghong Song, John Fastabend, KP Singh, Stanislav Fomichev,
Hao Luo, Steven Rostedt, Masami Hiramatsu, linux-kernel,
linux-trace-kernel
On Sun, Sep 29, 2024 at 1:58 PM Jiri Olsa <jolsa@kernel.org> wrote:
>
> The uprobe and kprobe session program can return only 0 or 1,
> instruct verifier to check for that.
>
> Signed-off-by: Jiri Olsa <jolsa@kernel.org>
> ---
> kernel/bpf/verifier.c | 10 ++++++++++
> 1 file changed, 10 insertions(+)
>
Do we need Fixes: tag?
Acked-by: Andrii Nakryiko <andrii@kernel.org>
> diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
> index 7d9b38ffd220..c4d7b7369259 100644
> --- a/kernel/bpf/verifier.c
> +++ b/kernel/bpf/verifier.c
> @@ -15910,6 +15910,16 @@ static int check_return_code(struct bpf_verifier_env *env, int regno, const char
> return -ENOTSUPP;
> }
> break;
> + case BPF_PROG_TYPE_KPROBE:
> + switch (env->prog->expected_attach_type) {
> + case BPF_TRACE_KPROBE_SESSION:
> + case BPF_TRACE_UPROBE_SESSION:
> + range = retval_range(0, 1);
> + break;
> + default:
> + return 0;
> + }
> + break;
> case BPF_PROG_TYPE_SK_LOOKUP:
> range = retval_range(SK_DROP, SK_PASS);
> break;
> --
> 2.46.1
>
^ permalink raw reply [flat|nested] 29+ messages in thread* Re: [PATCHv5 bpf-next 05/13] bpf: Allow return values 0 and 1 for uprobe/kprobe session
2024-09-30 21:36 ` Andrii Nakryiko
@ 2024-10-01 13:18 ` Jiri Olsa
0 siblings, 0 replies; 29+ messages in thread
From: Jiri Olsa @ 2024-10-01 13:18 UTC (permalink / raw)
To: Andrii Nakryiko
Cc: Oleg Nesterov, Peter Zijlstra, Alexei Starovoitov,
Daniel Borkmann, Andrii Nakryiko, bpf, Martin KaFai Lau, Song Liu,
Yonghong Song, John Fastabend, KP Singh, Stanislav Fomichev,
Hao Luo, Steven Rostedt, Masami Hiramatsu, linux-kernel,
linux-trace-kernel
On Mon, Sep 30, 2024 at 02:36:28PM -0700, Andrii Nakryiko wrote:
> On Sun, Sep 29, 2024 at 1:58 PM Jiri Olsa <jolsa@kernel.org> wrote:
> >
> > The uprobe and kprobe session program can return only 0 or 1,
> > instruct verifier to check for that.
> >
> > Signed-off-by: Jiri Olsa <jolsa@kernel.org>
> > ---
> > kernel/bpf/verifier.c | 10 ++++++++++
> > 1 file changed, 10 insertions(+)
> >
>
> Do we need Fixes: tag?
right, for kprobe session, will add
thanks,
jirka
>
> Acked-by: Andrii Nakryiko <andrii@kernel.org>
>
>
> > diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
> > index 7d9b38ffd220..c4d7b7369259 100644
> > --- a/kernel/bpf/verifier.c
> > +++ b/kernel/bpf/verifier.c
> > @@ -15910,6 +15910,16 @@ static int check_return_code(struct bpf_verifier_env *env, int regno, const char
> > return -ENOTSUPP;
> > }
> > break;
> > + case BPF_PROG_TYPE_KPROBE:
> > + switch (env->prog->expected_attach_type) {
> > + case BPF_TRACE_KPROBE_SESSION:
> > + case BPF_TRACE_UPROBE_SESSION:
> > + range = retval_range(0, 1);
> > + break;
> > + default:
> > + return 0;
> > + }
> > + break;
> > case BPF_PROG_TYPE_SK_LOOKUP:
> > range = retval_range(SK_DROP, SK_PASS);
> > break;
> > --
> > 2.46.1
> >
^ permalink raw reply [flat|nested] 29+ messages in thread
* [PATCHv5 bpf-next 06/13] libbpf: Add support for uprobe multi session attach
2024-09-29 20:57 [PATCHv5 bpf-next 00/13] uprobe, bpf: Add session support Jiri Olsa
` (4 preceding siblings ...)
2024-09-29 20:57 ` [PATCHv5 bpf-next 05/13] bpf: Allow return values 0 and 1 for uprobe/kprobe session Jiri Olsa
@ 2024-09-29 20:57 ` Jiri Olsa
2024-09-30 21:36 ` Andrii Nakryiko
2024-09-29 20:57 ` [PATCHv5 bpf-next 07/13] selftests/bpf: Add uprobe session test Jiri Olsa
` (6 subsequent siblings)
12 siblings, 1 reply; 29+ messages in thread
From: Jiri Olsa @ 2024-09-29 20:57 UTC (permalink / raw)
To: Oleg Nesterov, Peter Zijlstra, Alexei Starovoitov,
Daniel Borkmann, Andrii Nakryiko
Cc: bpf, Martin KaFai Lau, Song Liu, Yonghong Song, John Fastabend,
KP Singh, Stanislav Fomichev, Hao Luo, Steven Rostedt,
Masami Hiramatsu, linux-kernel, linux-trace-kernel
Adding support to attach program in uprobe session mode
with bpf_program__attach_uprobe_multi function.
Adding session bool to bpf_uprobe_multi_opts struct that allows
to load and attach the bpf program via uprobe session.
the attachment to create uprobe multi session.
Also adding new program loader section that allows:
SEC("uprobe.session/bpf_fentry_test*")
and loads/attaches uprobe program as uprobe session.
Adding sleepable hook (uprobe.session.s) as well.
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
---
tools/lib/bpf/bpf.c | 1 +
tools/lib/bpf/libbpf.c | 21 ++++++++++++++++++---
tools/lib/bpf/libbpf.h | 4 +++-
3 files changed, 22 insertions(+), 4 deletions(-)
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index 2a4c71501a17..becdfa701c75 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -776,6 +776,7 @@ int bpf_link_create(int prog_fd, int target_fd,
return libbpf_err(-EINVAL);
break;
case BPF_TRACE_UPROBE_MULTI:
+ case BPF_TRACE_UPROBE_SESSION:
attr.link_create.uprobe_multi.flags = OPTS_GET(opts, uprobe_multi.flags, 0);
attr.link_create.uprobe_multi.cnt = OPTS_GET(opts, uprobe_multi.cnt, 0);
attr.link_create.uprobe_multi.path = ptr_to_u64(OPTS_GET(opts, uprobe_multi.path, 0));
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 3587ed7ec359..563ff5e64269 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -9410,8 +9410,10 @@ static const struct bpf_sec_def section_defs[] = {
SEC_DEF("kprobe.session+", KPROBE, BPF_TRACE_KPROBE_SESSION, SEC_NONE, attach_kprobe_session),
SEC_DEF("uprobe.multi+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_NONE, attach_uprobe_multi),
SEC_DEF("uretprobe.multi+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_NONE, attach_uprobe_multi),
+ SEC_DEF("uprobe.session+", KPROBE, BPF_TRACE_UPROBE_SESSION, SEC_NONE, attach_uprobe_multi),
SEC_DEF("uprobe.multi.s+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_SLEEPABLE, attach_uprobe_multi),
SEC_DEF("uretprobe.multi.s+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_SLEEPABLE, attach_uprobe_multi),
+ SEC_DEF("uprobe.session.s+", KPROBE, BPF_TRACE_UPROBE_SESSION, SEC_SLEEPABLE, attach_uprobe_multi),
SEC_DEF("ksyscall+", KPROBE, 0, SEC_NONE, attach_ksyscall),
SEC_DEF("kretsyscall+", KPROBE, 0, SEC_NONE, attach_ksyscall),
SEC_DEF("usdt+", KPROBE, 0, SEC_USDT, attach_usdt),
@@ -11733,7 +11735,10 @@ static int attach_uprobe_multi(const struct bpf_program *prog, long cookie, stru
ret = 0;
break;
case 3:
- opts.retprobe = str_has_pfx(probe_type, "uretprobe.multi");
+ if (str_has_pfx(probe_type, "uprobe.session"))
+ opts.session = true;
+ else
+ opts.retprobe = str_has_pfx(probe_type, "uretprobe.multi");
*link = bpf_program__attach_uprobe_multi(prog, -1, binary_path, func_name, &opts);
ret = libbpf_get_error(*link);
break;
@@ -11982,10 +11987,12 @@ bpf_program__attach_uprobe_multi(const struct bpf_program *prog,
const unsigned long *ref_ctr_offsets = NULL, *offsets = NULL;
LIBBPF_OPTS(bpf_link_create_opts, lopts);
unsigned long *resolved_offsets = NULL;
+ enum bpf_attach_type attach_type;
int err = 0, link_fd, prog_fd;
struct bpf_link *link = NULL;
char errmsg[STRERR_BUFSIZE];
char full_path[PATH_MAX];
+ bool retprobe, session;
const __u64 *cookies;
const char **syms;
size_t cnt;
@@ -12056,12 +12063,20 @@ bpf_program__attach_uprobe_multi(const struct bpf_program *prog,
offsets = resolved_offsets;
}
+ retprobe = OPTS_GET(opts, retprobe, false);
+ session = OPTS_GET(opts, session, false);
+
+ if (retprobe && session)
+ return libbpf_err_ptr(-EINVAL);
+
+ attach_type = session ? BPF_TRACE_UPROBE_SESSION : BPF_TRACE_UPROBE_MULTI;
+
lopts.uprobe_multi.path = path;
lopts.uprobe_multi.offsets = offsets;
lopts.uprobe_multi.ref_ctr_offsets = ref_ctr_offsets;
lopts.uprobe_multi.cookies = cookies;
lopts.uprobe_multi.cnt = cnt;
- lopts.uprobe_multi.flags = OPTS_GET(opts, retprobe, false) ? BPF_F_UPROBE_MULTI_RETURN : 0;
+ lopts.uprobe_multi.flags = retprobe ? BPF_F_UPROBE_MULTI_RETURN : 0;
if (pid == 0)
pid = getpid();
@@ -12075,7 +12090,7 @@ bpf_program__attach_uprobe_multi(const struct bpf_program *prog,
}
link->detach = &bpf_link__detach_fd;
- link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &lopts);
+ link_fd = bpf_link_create(prog_fd, 0, attach_type, &lopts);
if (link_fd < 0) {
err = -errno;
pr_warn("prog '%s': failed to attach multi-uprobe: %s\n",
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index 91484303849c..b2ce3a72b11d 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -577,10 +577,12 @@ struct bpf_uprobe_multi_opts {
size_t cnt;
/* create return uprobes */
bool retprobe;
+ /* create session kprobes */
+ bool session;
size_t :0;
};
-#define bpf_uprobe_multi_opts__last_field retprobe
+#define bpf_uprobe_multi_opts__last_field session
/**
* @brief **bpf_program__attach_uprobe_multi()** attaches a BPF program
--
2.46.1
^ permalink raw reply related [flat|nested] 29+ messages in thread* Re: [PATCHv5 bpf-next 06/13] libbpf: Add support for uprobe multi session attach
2024-09-29 20:57 ` [PATCHv5 bpf-next 06/13] libbpf: Add support for uprobe multi session attach Jiri Olsa
@ 2024-09-30 21:36 ` Andrii Nakryiko
2024-10-01 13:18 ` Jiri Olsa
0 siblings, 1 reply; 29+ messages in thread
From: Andrii Nakryiko @ 2024-09-30 21:36 UTC (permalink / raw)
To: Jiri Olsa
Cc: Oleg Nesterov, Peter Zijlstra, Alexei Starovoitov,
Daniel Borkmann, Andrii Nakryiko, bpf, Martin KaFai Lau, Song Liu,
Yonghong Song, John Fastabend, KP Singh, Stanislav Fomichev,
Hao Luo, Steven Rostedt, Masami Hiramatsu, linux-kernel,
linux-trace-kernel
On Sun, Sep 29, 2024 at 1:58 PM Jiri Olsa <jolsa@kernel.org> wrote:
>
> Adding support to attach program in uprobe session mode
> with bpf_program__attach_uprobe_multi function.
>
> Adding session bool to bpf_uprobe_multi_opts struct that allows
> to load and attach the bpf program via uprobe session.
> the attachment to create uprobe multi session.
>
> Also adding new program loader section that allows:
> SEC("uprobe.session/bpf_fentry_test*")
>
> and loads/attaches uprobe program as uprobe session.
>
> Adding sleepable hook (uprobe.session.s) as well.
>
> Signed-off-by: Jiri Olsa <jolsa@kernel.org>
> ---
> tools/lib/bpf/bpf.c | 1 +
> tools/lib/bpf/libbpf.c | 21 ++++++++++++++++++---
> tools/lib/bpf/libbpf.h | 4 +++-
> 3 files changed, 22 insertions(+), 4 deletions(-)
>
LGTM, though see the nit below
Acked-by: Andrii Nakryiko <andrii@kernel.org>
> diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
> index 2a4c71501a17..becdfa701c75 100644
> --- a/tools/lib/bpf/bpf.c
> +++ b/tools/lib/bpf/bpf.c
> @@ -776,6 +776,7 @@ int bpf_link_create(int prog_fd, int target_fd,
> return libbpf_err(-EINVAL);
> break;
> case BPF_TRACE_UPROBE_MULTI:
> + case BPF_TRACE_UPROBE_SESSION:
> attr.link_create.uprobe_multi.flags = OPTS_GET(opts, uprobe_multi.flags, 0);
> attr.link_create.uprobe_multi.cnt = OPTS_GET(opts, uprobe_multi.cnt, 0);
> attr.link_create.uprobe_multi.path = ptr_to_u64(OPTS_GET(opts, uprobe_multi.path, 0));
> diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
> index 3587ed7ec359..563ff5e64269 100644
> --- a/tools/lib/bpf/libbpf.c
> +++ b/tools/lib/bpf/libbpf.c
> @@ -9410,8 +9410,10 @@ static const struct bpf_sec_def section_defs[] = {
> SEC_DEF("kprobe.session+", KPROBE, BPF_TRACE_KPROBE_SESSION, SEC_NONE, attach_kprobe_session),
> SEC_DEF("uprobe.multi+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_NONE, attach_uprobe_multi),
> SEC_DEF("uretprobe.multi+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_NONE, attach_uprobe_multi),
> + SEC_DEF("uprobe.session+", KPROBE, BPF_TRACE_UPROBE_SESSION, SEC_NONE, attach_uprobe_multi),
> SEC_DEF("uprobe.multi.s+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_SLEEPABLE, attach_uprobe_multi),
> SEC_DEF("uretprobe.multi.s+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_SLEEPABLE, attach_uprobe_multi),
> + SEC_DEF("uprobe.session.s+", KPROBE, BPF_TRACE_UPROBE_SESSION, SEC_SLEEPABLE, attach_uprobe_multi),
> SEC_DEF("ksyscall+", KPROBE, 0, SEC_NONE, attach_ksyscall),
> SEC_DEF("kretsyscall+", KPROBE, 0, SEC_NONE, attach_ksyscall),
> SEC_DEF("usdt+", KPROBE, 0, SEC_USDT, attach_usdt),
> @@ -11733,7 +11735,10 @@ static int attach_uprobe_multi(const struct bpf_program *prog, long cookie, stru
> ret = 0;
> break;
> case 3:
> - opts.retprobe = str_has_pfx(probe_type, "uretprobe.multi");
> + if (str_has_pfx(probe_type, "uprobe.session"))
> + opts.session = true;
> + else
> + opts.retprobe = str_has_pfx(probe_type, "uretprobe.multi");
nit: this is very non-uniform, can you please just do:
opts.session = str_has_pfx(probe_type, "uprobe.session");
opts.retprobe = str_has_pfx(probe_type, "uretprobe.multi");
There is no need to micro-optimize str_has_pfx() calls, IMO.
> *link = bpf_program__attach_uprobe_multi(prog, -1, binary_path, func_name, &opts);
> ret = libbpf_get_error(*link);
> break;
[...]
^ permalink raw reply [flat|nested] 29+ messages in thread* Re: [PATCHv5 bpf-next 06/13] libbpf: Add support for uprobe multi session attach
2024-09-30 21:36 ` Andrii Nakryiko
@ 2024-10-01 13:18 ` Jiri Olsa
0 siblings, 0 replies; 29+ messages in thread
From: Jiri Olsa @ 2024-10-01 13:18 UTC (permalink / raw)
To: Andrii Nakryiko
Cc: Oleg Nesterov, Peter Zijlstra, Alexei Starovoitov,
Daniel Borkmann, Andrii Nakryiko, bpf, Martin KaFai Lau, Song Liu,
Yonghong Song, John Fastabend, KP Singh, Stanislav Fomichev,
Hao Luo, Steven Rostedt, Masami Hiramatsu, linux-kernel,
linux-trace-kernel
On Mon, Sep 30, 2024 at 02:36:35PM -0700, Andrii Nakryiko wrote:
> On Sun, Sep 29, 2024 at 1:58 PM Jiri Olsa <jolsa@kernel.org> wrote:
> >
> > Adding support to attach program in uprobe session mode
> > with bpf_program__attach_uprobe_multi function.
> >
> > Adding session bool to bpf_uprobe_multi_opts struct that allows
> > to load and attach the bpf program via uprobe session.
> > the attachment to create uprobe multi session.
> >
> > Also adding new program loader section that allows:
> > SEC("uprobe.session/bpf_fentry_test*")
> >
> > and loads/attaches uprobe program as uprobe session.
> >
> > Adding sleepable hook (uprobe.session.s) as well.
> >
> > Signed-off-by: Jiri Olsa <jolsa@kernel.org>
> > ---
> > tools/lib/bpf/bpf.c | 1 +
> > tools/lib/bpf/libbpf.c | 21 ++++++++++++++++++---
> > tools/lib/bpf/libbpf.h | 4 +++-
> > 3 files changed, 22 insertions(+), 4 deletions(-)
> >
>
> LGTM, though see the nit below
>
> Acked-by: Andrii Nakryiko <andrii@kernel.org>
>
> > diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
> > index 2a4c71501a17..becdfa701c75 100644
> > --- a/tools/lib/bpf/bpf.c
> > +++ b/tools/lib/bpf/bpf.c
> > @@ -776,6 +776,7 @@ int bpf_link_create(int prog_fd, int target_fd,
> > return libbpf_err(-EINVAL);
> > break;
> > case BPF_TRACE_UPROBE_MULTI:
> > + case BPF_TRACE_UPROBE_SESSION:
> > attr.link_create.uprobe_multi.flags = OPTS_GET(opts, uprobe_multi.flags, 0);
> > attr.link_create.uprobe_multi.cnt = OPTS_GET(opts, uprobe_multi.cnt, 0);
> > attr.link_create.uprobe_multi.path = ptr_to_u64(OPTS_GET(opts, uprobe_multi.path, 0));
> > diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
> > index 3587ed7ec359..563ff5e64269 100644
> > --- a/tools/lib/bpf/libbpf.c
> > +++ b/tools/lib/bpf/libbpf.c
> > @@ -9410,8 +9410,10 @@ static const struct bpf_sec_def section_defs[] = {
> > SEC_DEF("kprobe.session+", KPROBE, BPF_TRACE_KPROBE_SESSION, SEC_NONE, attach_kprobe_session),
> > SEC_DEF("uprobe.multi+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_NONE, attach_uprobe_multi),
> > SEC_DEF("uretprobe.multi+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_NONE, attach_uprobe_multi),
> > + SEC_DEF("uprobe.session+", KPROBE, BPF_TRACE_UPROBE_SESSION, SEC_NONE, attach_uprobe_multi),
> > SEC_DEF("uprobe.multi.s+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_SLEEPABLE, attach_uprobe_multi),
> > SEC_DEF("uretprobe.multi.s+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_SLEEPABLE, attach_uprobe_multi),
> > + SEC_DEF("uprobe.session.s+", KPROBE, BPF_TRACE_UPROBE_SESSION, SEC_SLEEPABLE, attach_uprobe_multi),
> > SEC_DEF("ksyscall+", KPROBE, 0, SEC_NONE, attach_ksyscall),
> > SEC_DEF("kretsyscall+", KPROBE, 0, SEC_NONE, attach_ksyscall),
> > SEC_DEF("usdt+", KPROBE, 0, SEC_USDT, attach_usdt),
> > @@ -11733,7 +11735,10 @@ static int attach_uprobe_multi(const struct bpf_program *prog, long cookie, stru
> > ret = 0;
> > break;
> > case 3:
> > - opts.retprobe = str_has_pfx(probe_type, "uretprobe.multi");
> > + if (str_has_pfx(probe_type, "uprobe.session"))
> > + opts.session = true;
> > + else
> > + opts.retprobe = str_has_pfx(probe_type, "uretprobe.multi");
>
> nit: this is very non-uniform, can you please just do:
>
> opts.session = str_has_pfx(probe_type, "uprobe.session");
> opts.retprobe = str_has_pfx(probe_type, "uretprobe.multi");
>
> There is no need to micro-optimize str_has_pfx() calls, IMO.
sure, will change
thanks,
jirka
^ permalink raw reply [flat|nested] 29+ messages in thread
* [PATCHv5 bpf-next 07/13] selftests/bpf: Add uprobe session test
2024-09-29 20:57 [PATCHv5 bpf-next 00/13] uprobe, bpf: Add session support Jiri Olsa
` (5 preceding siblings ...)
2024-09-29 20:57 ` [PATCHv5 bpf-next 06/13] libbpf: Add support for uprobe multi session attach Jiri Olsa
@ 2024-09-29 20:57 ` Jiri Olsa
2024-09-29 20:57 ` [PATCHv5 bpf-next 08/13] selftests/bpf: Add uprobe session cookie test Jiri Olsa
` (5 subsequent siblings)
12 siblings, 0 replies; 29+ messages in thread
From: Jiri Olsa @ 2024-09-29 20:57 UTC (permalink / raw)
To: Oleg Nesterov, Peter Zijlstra, Alexei Starovoitov,
Daniel Borkmann, Andrii Nakryiko
Cc: bpf, Martin KaFai Lau, Song Liu, Yonghong Song, John Fastabend,
KP Singh, Stanislav Fomichev, Hao Luo, Steven Rostedt,
Masami Hiramatsu, linux-kernel, linux-trace-kernel
Adding uprobe session test and testing that the entry program
return value controls execution of the return probe program.
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
---
.../bpf/prog_tests/uprobe_multi_test.c | 47 ++++++++++++
.../bpf/progs/uprobe_multi_session.c | 71 +++++++++++++++++++
2 files changed, 118 insertions(+)
create mode 100644 tools/testing/selftests/bpf/progs/uprobe_multi_session.c
diff --git a/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c
index 2c39902b8a09..b10d2dadb462 100644
--- a/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c
+++ b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c
@@ -8,6 +8,7 @@
#include "uprobe_multi_usdt.skel.h"
#include "uprobe_multi_consumers.skel.h"
#include "uprobe_multi_pid_filter.skel.h"
+#include "uprobe_multi_session.skel.h"
#include "bpf/libbpf_internal.h"
#include "testing_helpers.h"
#include "../sdt.h"
@@ -1015,6 +1016,50 @@ static void test_pid_filter_process(bool clone_vm)
uprobe_multi_pid_filter__destroy(skel);
}
+static void test_session_skel_api(void)
+{
+ struct uprobe_multi_session *skel = NULL;
+ LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
+ struct bpf_link *link = NULL;
+ int err;
+
+ skel = uprobe_multi_session__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "uprobe_multi_session__open_and_load"))
+ goto cleanup;
+
+ skel->bss->pid = getpid();
+ skel->bss->user_ptr = test_data;
+
+ err = uprobe_multi_session__attach(skel);
+ if (!ASSERT_OK(err, "uprobe_multi_session__attach"))
+ goto cleanup;
+
+ /* trigger all probes */
+ skel->bss->uprobe_multi_func_1_addr = (__u64) uprobe_multi_func_1;
+ skel->bss->uprobe_multi_func_2_addr = (__u64) uprobe_multi_func_2;
+ skel->bss->uprobe_multi_func_3_addr = (__u64) uprobe_multi_func_3;
+
+ uprobe_multi_func_1();
+ uprobe_multi_func_2();
+ uprobe_multi_func_3();
+
+ /*
+ * We expect 2 for uprobe_multi_func_2 because it runs both entry/return probe,
+ * uprobe_multi_func_[13] run just the entry probe. All expected numbers are
+ * doubled, because we run extra test for sleepable session.
+ */
+ ASSERT_EQ(skel->bss->uprobe_session_result[0], 2, "uprobe_multi_func_1_result");
+ ASSERT_EQ(skel->bss->uprobe_session_result[1], 4, "uprobe_multi_func_2_result");
+ ASSERT_EQ(skel->bss->uprobe_session_result[2], 2, "uprobe_multi_func_3_result");
+
+ /* We expect increase in 3 entry and 1 return session calls -> 4 */
+ ASSERT_EQ(skel->bss->uprobe_multi_sleep_result, 4, "uprobe_multi_sleep_result");
+
+cleanup:
+ bpf_link__destroy(link);
+ uprobe_multi_session__destroy(skel);
+}
+
static void test_bench_attach_uprobe(void)
{
long attach_start_ns = 0, attach_end_ns = 0;
@@ -1111,4 +1156,6 @@ void test_uprobe_multi_test(void)
test_pid_filter_process(false);
if (test__start_subtest("filter_clone_vm"))
test_pid_filter_process(true);
+ if (test__start_subtest("session"))
+ test_session_skel_api();
}
diff --git a/tools/testing/selftests/bpf/progs/uprobe_multi_session.c b/tools/testing/selftests/bpf/progs/uprobe_multi_session.c
new file mode 100644
index 000000000000..30bff90b68dc
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/uprobe_multi_session.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <stdbool.h>
+#include "bpf_kfuncs.h"
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+__u64 uprobe_multi_func_1_addr = 0;
+__u64 uprobe_multi_func_2_addr = 0;
+__u64 uprobe_multi_func_3_addr = 0;
+
+__u64 uprobe_session_result[3] = {};
+__u64 uprobe_multi_sleep_result = 0;
+
+void *user_ptr = 0;
+int pid = 0;
+
+static int uprobe_multi_check(void *ctx, bool is_return)
+{
+ const __u64 funcs[] = {
+ uprobe_multi_func_1_addr,
+ uprobe_multi_func_2_addr,
+ uprobe_multi_func_3_addr,
+ };
+ unsigned int i;
+ __u64 addr;
+
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 1;
+
+ addr = bpf_get_func_ip(ctx);
+
+ for (i = 0; i < ARRAY_SIZE(funcs); i++) {
+ if (funcs[i] == addr) {
+ uprobe_session_result[i]++;
+ break;
+ }
+ }
+
+ /* only uprobe_multi_func_2 executes return probe */
+ if ((addr == uprobe_multi_func_1_addr) ||
+ (addr == uprobe_multi_func_3_addr))
+ return 1;
+
+ return 0;
+}
+
+SEC("uprobe.session//proc/self/exe:uprobe_multi_func_*")
+int uprobe(struct pt_regs *ctx)
+{
+ return uprobe_multi_check(ctx, bpf_session_is_return());
+}
+
+static __always_inline bool verify_sleepable_user_copy(void)
+{
+ char data[9];
+
+ bpf_copy_from_user(data, sizeof(data), user_ptr);
+ return bpf_strncmp(data, sizeof(data), "test_data") == 0;
+}
+
+SEC("uprobe.session.s//proc/self/exe:uprobe_multi_func_*")
+int uprobe_sleepable(struct pt_regs *ctx)
+{
+ if (verify_sleepable_user_copy())
+ uprobe_multi_sleep_result++;
+ return uprobe_multi_check(ctx, bpf_session_is_return());
+}
--
2.46.1
^ permalink raw reply related [flat|nested] 29+ messages in thread* [PATCHv5 bpf-next 08/13] selftests/bpf: Add uprobe session cookie test
2024-09-29 20:57 [PATCHv5 bpf-next 00/13] uprobe, bpf: Add session support Jiri Olsa
` (6 preceding siblings ...)
2024-09-29 20:57 ` [PATCHv5 bpf-next 07/13] selftests/bpf: Add uprobe session test Jiri Olsa
@ 2024-09-29 20:57 ` Jiri Olsa
2024-09-29 20:57 ` [PATCHv5 bpf-next 09/13] selftests/bpf: Add uprobe session recursive test Jiri Olsa
` (4 subsequent siblings)
12 siblings, 0 replies; 29+ messages in thread
From: Jiri Olsa @ 2024-09-29 20:57 UTC (permalink / raw)
To: Oleg Nesterov, Peter Zijlstra, Alexei Starovoitov,
Daniel Borkmann, Andrii Nakryiko
Cc: bpf, Martin KaFai Lau, Song Liu, Yonghong Song, John Fastabend,
KP Singh, Stanislav Fomichev, Hao Luo, Steven Rostedt,
Masami Hiramatsu, linux-kernel, linux-trace-kernel
Adding uprobe session test that verifies the cookie value
get properly propagated from entry to return program.
Acked-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
---
.../bpf/prog_tests/uprobe_multi_test.c | 31 ++++++++++++
.../bpf/progs/uprobe_multi_session_cookie.c | 48 +++++++++++++++++++
2 files changed, 79 insertions(+)
create mode 100644 tools/testing/selftests/bpf/progs/uprobe_multi_session_cookie.c
diff --git a/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c
index b10d2dadb462..cc9030e86821 100644
--- a/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c
+++ b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c
@@ -9,6 +9,7 @@
#include "uprobe_multi_consumers.skel.h"
#include "uprobe_multi_pid_filter.skel.h"
#include "uprobe_multi_session.skel.h"
+#include "uprobe_multi_session_cookie.skel.h"
#include "bpf/libbpf_internal.h"
#include "testing_helpers.h"
#include "../sdt.h"
@@ -1060,6 +1061,34 @@ static void test_session_skel_api(void)
uprobe_multi_session__destroy(skel);
}
+static void test_session_cookie_skel_api(void)
+{
+ struct uprobe_multi_session_cookie *skel = NULL;
+ int err;
+
+ skel = uprobe_multi_session_cookie__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "uprobe_multi_session_cookie__open_and_load"))
+ goto cleanup;
+
+ skel->bss->pid = getpid();
+
+ err = uprobe_multi_session_cookie__attach(skel);
+ if (!ASSERT_OK(err, "uprobe_multi_session_cookie__attach"))
+ goto cleanup;
+
+ /* trigger all probes */
+ uprobe_multi_func_1();
+ uprobe_multi_func_2();
+ uprobe_multi_func_3();
+
+ ASSERT_EQ(skel->bss->test_uprobe_1_result, 1, "test_uprobe_1_result");
+ ASSERT_EQ(skel->bss->test_uprobe_2_result, 2, "test_uprobe_2_result");
+ ASSERT_EQ(skel->bss->test_uprobe_3_result, 3, "test_uprobe_3_result");
+
+cleanup:
+ uprobe_multi_session_cookie__destroy(skel);
+}
+
static void test_bench_attach_uprobe(void)
{
long attach_start_ns = 0, attach_end_ns = 0;
@@ -1158,4 +1187,6 @@ void test_uprobe_multi_test(void)
test_pid_filter_process(true);
if (test__start_subtest("session"))
test_session_skel_api();
+ if (test__start_subtest("session_cookie"))
+ test_session_cookie_skel_api();
}
diff --git a/tools/testing/selftests/bpf/progs/uprobe_multi_session_cookie.c b/tools/testing/selftests/bpf/progs/uprobe_multi_session_cookie.c
new file mode 100644
index 000000000000..5befdf944dc6
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/uprobe_multi_session_cookie.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <stdbool.h>
+#include "bpf_kfuncs.h"
+
+char _license[] SEC("license") = "GPL";
+
+int pid = 0;
+
+__u64 test_uprobe_1_result = 0;
+__u64 test_uprobe_2_result = 0;
+__u64 test_uprobe_3_result = 0;
+
+static int check_cookie(__u64 val, __u64 *result)
+{
+ __u64 *cookie;
+
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 1;
+
+ cookie = bpf_session_cookie();
+
+ if (bpf_session_is_return())
+ *result = *cookie == val ? val : 0;
+ else
+ *cookie = val;
+ return 0;
+}
+
+SEC("uprobe.session//proc/self/exe:uprobe_multi_func_1")
+int uprobe_1(struct pt_regs *ctx)
+{
+ return check_cookie(1, &test_uprobe_1_result);
+}
+
+SEC("uprobe.session//proc/self/exe:uprobe_multi_func_2")
+int uprobe_2(struct pt_regs *ctx)
+{
+ return check_cookie(2, &test_uprobe_2_result);
+}
+
+SEC("uprobe.session//proc/self/exe:uprobe_multi_func_3")
+int uprobe_3(struct pt_regs *ctx)
+{
+ return check_cookie(3, &test_uprobe_3_result);
+}
--
2.46.1
^ permalink raw reply related [flat|nested] 29+ messages in thread* [PATCHv5 bpf-next 09/13] selftests/bpf: Add uprobe session recursive test
2024-09-29 20:57 [PATCHv5 bpf-next 00/13] uprobe, bpf: Add session support Jiri Olsa
` (7 preceding siblings ...)
2024-09-29 20:57 ` [PATCHv5 bpf-next 08/13] selftests/bpf: Add uprobe session cookie test Jiri Olsa
@ 2024-09-29 20:57 ` Jiri Olsa
2024-09-29 20:57 ` [PATCHv5 bpf-next 10/13] selftests/bpf: Add uprobe session verifier test for return value Jiri Olsa
` (3 subsequent siblings)
12 siblings, 0 replies; 29+ messages in thread
From: Jiri Olsa @ 2024-09-29 20:57 UTC (permalink / raw)
To: Oleg Nesterov, Peter Zijlstra, Alexei Starovoitov,
Daniel Borkmann, Andrii Nakryiko
Cc: bpf, Martin KaFai Lau, Song Liu, Yonghong Song, John Fastabend,
KP Singh, Stanislav Fomichev, Hao Luo, Steven Rostedt,
Masami Hiramatsu, linux-kernel, linux-trace-kernel
Adding uprobe session test that verifies the cookie value is stored
properly when single uprobe-ed function is executed recursively.
Acked-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
---
.../bpf/prog_tests/uprobe_multi_test.c | 57 +++++++++++++++++++
.../progs/uprobe_multi_session_recursive.c | 44 ++++++++++++++
2 files changed, 101 insertions(+)
create mode 100644 tools/testing/selftests/bpf/progs/uprobe_multi_session_recursive.c
diff --git a/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c
index cc9030e86821..284cd7fce576 100644
--- a/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c
+++ b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c
@@ -10,6 +10,7 @@
#include "uprobe_multi_pid_filter.skel.h"
#include "uprobe_multi_session.skel.h"
#include "uprobe_multi_session_cookie.skel.h"
+#include "uprobe_multi_session_recursive.skel.h"
#include "bpf/libbpf_internal.h"
#include "testing_helpers.h"
#include "../sdt.h"
@@ -36,6 +37,12 @@ noinline void usdt_trigger(void)
STAP_PROBE(test, pid_filter_usdt);
}
+noinline void uprobe_session_recursive(int i)
+{
+ if (i)
+ uprobe_session_recursive(i - 1);
+}
+
struct child {
int go[2];
int c2p[2]; /* child -> parent channel */
@@ -1089,6 +1096,54 @@ static void test_session_cookie_skel_api(void)
uprobe_multi_session_cookie__destroy(skel);
}
+static void test_session_recursive_skel_api(void)
+{
+ struct uprobe_multi_session_recursive *skel = NULL;
+ int i, err;
+
+ skel = uprobe_multi_session_recursive__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "uprobe_multi_session_recursive__open_and_load"))
+ goto cleanup;
+
+ skel->bss->pid = getpid();
+
+ err = uprobe_multi_session_recursive__attach(skel);
+ if (!ASSERT_OK(err, "uprobe_multi_session_recursive__attach"))
+ goto cleanup;
+
+ for (i = 0; i < ARRAY_SIZE(skel->bss->test_uprobe_cookie_entry); i++)
+ skel->bss->test_uprobe_cookie_entry[i] = i + 1;
+
+ uprobe_session_recursive(5);
+
+ /*
+ * entry uprobe:
+ * uprobe_session_recursive(5) { *cookie = 1, return 0
+ * uprobe_session_recursive(4) { *cookie = 2, return 1
+ * uprobe_session_recursive(3) { *cookie = 3, return 0
+ * uprobe_session_recursive(2) { *cookie = 4, return 1
+ * uprobe_session_recursive(1) { *cookie = 5, return 0
+ * uprobe_session_recursive(0) { *cookie = 6, return 1
+ * return uprobe:
+ * } i = 0 not executed
+ * } i = 1 test_uprobe_cookie_return[0] = 5
+ * } i = 2 not executed
+ * } i = 3 test_uprobe_cookie_return[1] = 3
+ * } i = 4 not executed
+ * } i = 5 test_uprobe_cookie_return[2] = 1
+ */
+
+ ASSERT_EQ(skel->bss->idx_entry, 6, "idx_entry");
+ ASSERT_EQ(skel->bss->idx_return, 3, "idx_return");
+
+ ASSERT_EQ(skel->bss->test_uprobe_cookie_return[0], 5, "test_uprobe_cookie_return[0]");
+ ASSERT_EQ(skel->bss->test_uprobe_cookie_return[1], 3, "test_uprobe_cookie_return[1]");
+ ASSERT_EQ(skel->bss->test_uprobe_cookie_return[2], 1, "test_uprobe_cookie_return[2]");
+
+cleanup:
+ uprobe_multi_session_recursive__destroy(skel);
+}
+
static void test_bench_attach_uprobe(void)
{
long attach_start_ns = 0, attach_end_ns = 0;
@@ -1189,4 +1244,6 @@ void test_uprobe_multi_test(void)
test_session_skel_api();
if (test__start_subtest("session_cookie"))
test_session_cookie_skel_api();
+ if (test__start_subtest("session_cookie_recursive"))
+ test_session_recursive_skel_api();
}
diff --git a/tools/testing/selftests/bpf/progs/uprobe_multi_session_recursive.c b/tools/testing/selftests/bpf/progs/uprobe_multi_session_recursive.c
new file mode 100644
index 000000000000..8fbcd69fae22
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/uprobe_multi_session_recursive.c
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <stdbool.h>
+#include "bpf_kfuncs.h"
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+int pid = 0;
+
+int idx_entry = 0;
+int idx_return = 0;
+
+__u64 test_uprobe_cookie_entry[6];
+__u64 test_uprobe_cookie_return[3];
+
+static int check_cookie(void)
+{
+ __u64 *cookie = bpf_session_cookie();
+
+ if (bpf_session_is_return()) {
+ if (idx_return >= ARRAY_SIZE(test_uprobe_cookie_return))
+ return 1;
+ test_uprobe_cookie_return[idx_return++] = *cookie;
+ return 0;
+ }
+
+ if (idx_entry >= ARRAY_SIZE(test_uprobe_cookie_entry))
+ return 1;
+ *cookie = test_uprobe_cookie_entry[idx_entry];
+ return idx_entry++ % 2;
+}
+
+
+SEC("uprobe.session//proc/self/exe:uprobe_session_recursive")
+int uprobe_recursive(struct pt_regs *ctx)
+{
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 1;
+
+ return check_cookie();
+}
--
2.46.1
^ permalink raw reply related [flat|nested] 29+ messages in thread* [PATCHv5 bpf-next 10/13] selftests/bpf: Add uprobe session verifier test for return value
2024-09-29 20:57 [PATCHv5 bpf-next 00/13] uprobe, bpf: Add session support Jiri Olsa
` (8 preceding siblings ...)
2024-09-29 20:57 ` [PATCHv5 bpf-next 09/13] selftests/bpf: Add uprobe session recursive test Jiri Olsa
@ 2024-09-29 20:57 ` Jiri Olsa
2024-09-29 20:57 ` [PATCHv5 bpf-next 11/13] selftests/bpf: Add kprobe " Jiri Olsa
` (2 subsequent siblings)
12 siblings, 0 replies; 29+ messages in thread
From: Jiri Olsa @ 2024-09-29 20:57 UTC (permalink / raw)
To: Oleg Nesterov, Peter Zijlstra, Alexei Starovoitov,
Daniel Borkmann, Andrii Nakryiko
Cc: bpf, Martin KaFai Lau, Song Liu, Yonghong Song, John Fastabend,
KP Singh, Stanislav Fomichev, Hao Luo, Steven Rostedt,
Masami Hiramatsu, linux-kernel, linux-trace-kernel
Making sure uprobe.session program can return only [0,1] values.
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
---
.../bpf/prog_tests/uprobe_multi_test.c | 2 ++
.../bpf/progs/uprobe_multi_verifier.c | 31 +++++++++++++++++++
2 files changed, 33 insertions(+)
create mode 100644 tools/testing/selftests/bpf/progs/uprobe_multi_verifier.c
diff --git a/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c
index 284cd7fce576..e693eeb1a5a5 100644
--- a/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c
+++ b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c
@@ -11,6 +11,7 @@
#include "uprobe_multi_session.skel.h"
#include "uprobe_multi_session_cookie.skel.h"
#include "uprobe_multi_session_recursive.skel.h"
+#include "uprobe_multi_verifier.skel.h"
#include "bpf/libbpf_internal.h"
#include "testing_helpers.h"
#include "../sdt.h"
@@ -1246,4 +1247,5 @@ void test_uprobe_multi_test(void)
test_session_cookie_skel_api();
if (test__start_subtest("session_cookie_recursive"))
test_session_recursive_skel_api();
+ RUN_TESTS(uprobe_multi_verifier);
}
diff --git a/tools/testing/selftests/bpf/progs/uprobe_multi_verifier.c b/tools/testing/selftests/bpf/progs/uprobe_multi_verifier.c
new file mode 100644
index 000000000000..fe49f2cb5360
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/uprobe_multi_verifier.c
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/usdt.bpf.h>
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+
+SEC("uprobe.session")
+__success
+int uprobe_sesison_return_0(struct pt_regs *ctx)
+{
+ return 0;
+}
+
+SEC("uprobe.session")
+__success
+int uprobe_sesison_return_1(struct pt_regs *ctx)
+{
+ return 1;
+}
+
+SEC("uprobe.session")
+__failure
+__msg("At program exit the register R0 has smin=2 smax=2 should have been in [0, 1]")
+int uprobe_sesison_return_2(struct pt_regs *ctx)
+{
+ return 2;
+}
--
2.46.1
^ permalink raw reply related [flat|nested] 29+ messages in thread* [PATCHv5 bpf-next 11/13] selftests/bpf: Add kprobe session verifier test for return value
2024-09-29 20:57 [PATCHv5 bpf-next 00/13] uprobe, bpf: Add session support Jiri Olsa
` (9 preceding siblings ...)
2024-09-29 20:57 ` [PATCHv5 bpf-next 10/13] selftests/bpf: Add uprobe session verifier test for return value Jiri Olsa
@ 2024-09-29 20:57 ` Jiri Olsa
2024-09-29 20:57 ` [PATCHv5 bpf-next 12/13] selftests/bpf: Add uprobe session single consumer test Jiri Olsa
2024-09-29 20:57 ` [PATCHv5 bpf-next 13/13] selftests/bpf: Add uprobe sessions to " Jiri Olsa
12 siblings, 0 replies; 29+ messages in thread
From: Jiri Olsa @ 2024-09-29 20:57 UTC (permalink / raw)
To: Oleg Nesterov, Peter Zijlstra, Alexei Starovoitov,
Daniel Borkmann, Andrii Nakryiko
Cc: bpf, Martin KaFai Lau, Song Liu, Yonghong Song, John Fastabend,
KP Singh, Stanislav Fomichev, Hao Luo, Steven Rostedt,
Masami Hiramatsu, linux-kernel, linux-trace-kernel
Making sure kprobe.session program can return only [0,1] values.
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
---
.../bpf/prog_tests/kprobe_multi_test.c | 2 ++
.../bpf/progs/kprobe_multi_verifier.c | 31 +++++++++++++++++++
2 files changed, 33 insertions(+)
create mode 100644 tools/testing/selftests/bpf/progs/kprobe_multi_verifier.c
diff --git a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c
index 960c9323d1e0..66ab1cae923e 100644
--- a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c
+++ b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c
@@ -6,6 +6,7 @@
#include "kprobe_multi_override.skel.h"
#include "kprobe_multi_session.skel.h"
#include "kprobe_multi_session_cookie.skel.h"
+#include "kprobe_multi_verifier.skel.h"
#include "bpf/libbpf_internal.h"
#include "bpf/hashmap.h"
@@ -764,4 +765,5 @@ void test_kprobe_multi_test(void)
test_session_skel_api();
if (test__start_subtest("session_cookie"))
test_session_cookie_skel_api();
+ RUN_TESTS(kprobe_multi_verifier);
}
diff --git a/tools/testing/selftests/bpf/progs/kprobe_multi_verifier.c b/tools/testing/selftests/bpf/progs/kprobe_multi_verifier.c
new file mode 100644
index 000000000000..288577e81deb
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/kprobe_multi_verifier.c
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/usdt.bpf.h>
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+
+SEC("kprobe.session")
+__success
+int kprobe_session_return_0(struct pt_regs *ctx)
+{
+ return 0;
+}
+
+SEC("kprobe.session")
+__success
+int kprobe_session_return_1(struct pt_regs *ctx)
+{
+ return 1;
+}
+
+SEC("kprobe.session")
+__failure
+__msg("At program exit the register R0 has smin=2 smax=2 should have been in [0, 1]")
+int kprobe_session_return_2(struct pt_regs *ctx)
+{
+ return 2;
+}
--
2.46.1
^ permalink raw reply related [flat|nested] 29+ messages in thread* [PATCHv5 bpf-next 12/13] selftests/bpf: Add uprobe session single consumer test
2024-09-29 20:57 [PATCHv5 bpf-next 00/13] uprobe, bpf: Add session support Jiri Olsa
` (10 preceding siblings ...)
2024-09-29 20:57 ` [PATCHv5 bpf-next 11/13] selftests/bpf: Add kprobe " Jiri Olsa
@ 2024-09-29 20:57 ` Jiri Olsa
2024-09-29 20:57 ` [PATCHv5 bpf-next 13/13] selftests/bpf: Add uprobe sessions to " Jiri Olsa
12 siblings, 0 replies; 29+ messages in thread
From: Jiri Olsa @ 2024-09-29 20:57 UTC (permalink / raw)
To: Oleg Nesterov, Peter Zijlstra, Alexei Starovoitov,
Daniel Borkmann, Andrii Nakryiko
Cc: bpf, Martin KaFai Lau, Song Liu, Yonghong Song, John Fastabend,
KP Singh, Stanislav Fomichev, Hao Luo, Steven Rostedt,
Masami Hiramatsu, linux-kernel, linux-trace-kernel
Testing that the session ret_handler bypass works on single
uprobe with multiple consumers, each with different session
ignore return value.
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
---
.../bpf/prog_tests/uprobe_multi_test.c | 33 ++++++++++++++
.../bpf/progs/uprobe_multi_session_single.c | 44 +++++++++++++++++++
2 files changed, 77 insertions(+)
create mode 100644 tools/testing/selftests/bpf/progs/uprobe_multi_session_single.c
diff --git a/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c
index e693eeb1a5a5..7e0228f8fcfc 100644
--- a/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c
+++ b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c
@@ -9,6 +9,7 @@
#include "uprobe_multi_consumers.skel.h"
#include "uprobe_multi_pid_filter.skel.h"
#include "uprobe_multi_session.skel.h"
+#include "uprobe_multi_session_single.skel.h"
#include "uprobe_multi_session_cookie.skel.h"
#include "uprobe_multi_session_recursive.skel.h"
#include "uprobe_multi_verifier.skel.h"
@@ -1069,6 +1070,36 @@ static void test_session_skel_api(void)
uprobe_multi_session__destroy(skel);
}
+static void test_session_single_skel_api(void)
+{
+ struct uprobe_multi_session_single *skel = NULL;
+ LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
+ int err;
+
+ skel = uprobe_multi_session_single__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "uprobe_multi_session_single__open_and_load"))
+ goto cleanup;
+
+ skel->bss->pid = getpid();
+
+ err = uprobe_multi_session_single__attach(skel);
+ if (!ASSERT_OK(err, "uprobe_multi_session_single__attach"))
+ goto cleanup;
+
+ uprobe_multi_func_1();
+
+ /*
+ * We expect consumer 0 and 2 to trigger just entry handler (value 1)
+ * and consumer 1 to hit both (value 2).
+ */
+ ASSERT_EQ(skel->bss->uprobe_session_result[0], 1, "uprobe_session_result_0");
+ ASSERT_EQ(skel->bss->uprobe_session_result[1], 2, "uprobe_session_result_1");
+ ASSERT_EQ(skel->bss->uprobe_session_result[2], 1, "uprobe_session_result_2");
+
+cleanup:
+ uprobe_multi_session_single__destroy(skel);
+}
+
static void test_session_cookie_skel_api(void)
{
struct uprobe_multi_session_cookie *skel = NULL;
@@ -1243,6 +1274,8 @@ void test_uprobe_multi_test(void)
test_pid_filter_process(true);
if (test__start_subtest("session"))
test_session_skel_api();
+ if (test__start_subtest("session_single"))
+ test_session_single_skel_api();
if (test__start_subtest("session_cookie"))
test_session_cookie_skel_api();
if (test__start_subtest("session_cookie_recursive"))
diff --git a/tools/testing/selftests/bpf/progs/uprobe_multi_session_single.c b/tools/testing/selftests/bpf/progs/uprobe_multi_session_single.c
new file mode 100644
index 000000000000..1fa53d3785f6
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/uprobe_multi_session_single.c
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <stdbool.h>
+#include "bpf_kfuncs.h"
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+__u64 uprobe_session_result[3] = {};
+int pid = 0;
+
+static int uprobe_multi_check(void *ctx, bool is_return, int idx)
+{
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 1;
+
+ uprobe_session_result[idx]++;
+
+ /* only consumer 1 executes return probe */
+ if (idx == 0 || idx == 2)
+ return 1;
+
+ return 0;
+}
+
+SEC("uprobe.session//proc/self/exe:uprobe_multi_func_1")
+int uprobe_0(struct pt_regs *ctx)
+{
+ return uprobe_multi_check(ctx, bpf_session_is_return(), 0);
+}
+
+SEC("uprobe.session//proc/self/exe:uprobe_multi_func_1")
+int uprobe_1(struct pt_regs *ctx)
+{
+ return uprobe_multi_check(ctx, bpf_session_is_return(), 1);
+}
+
+SEC("uprobe.session//proc/self/exe:uprobe_multi_func_1")
+int uprobe_2(struct pt_regs *ctx)
+{
+ return uprobe_multi_check(ctx, bpf_session_is_return(), 2);
+}
--
2.46.1
^ permalink raw reply related [flat|nested] 29+ messages in thread* [PATCHv5 bpf-next 13/13] selftests/bpf: Add uprobe sessions to consumer test
2024-09-29 20:57 [PATCHv5 bpf-next 00/13] uprobe, bpf: Add session support Jiri Olsa
` (11 preceding siblings ...)
2024-09-29 20:57 ` [PATCHv5 bpf-next 12/13] selftests/bpf: Add uprobe session single consumer test Jiri Olsa
@ 2024-09-29 20:57 ` Jiri Olsa
12 siblings, 0 replies; 29+ messages in thread
From: Jiri Olsa @ 2024-09-29 20:57 UTC (permalink / raw)
To: Oleg Nesterov, Peter Zijlstra, Alexei Starovoitov,
Daniel Borkmann, Andrii Nakryiko
Cc: bpf, Martin KaFai Lau, Song Liu, Yonghong Song, John Fastabend,
KP Singh, Stanislav Fomichev, Hao Luo, Steven Rostedt,
Masami Hiramatsu, linux-kernel, linux-trace-kernel
Adding uprobe session consumers to the consumer test,
so we get the session into the test mix.
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
---
.../bpf/prog_tests/uprobe_multi_test.c | 63 ++++++++++++++-----
.../bpf/progs/uprobe_multi_consumers.c | 16 ++++-
2 files changed, 64 insertions(+), 15 deletions(-)
diff --git a/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c
index 7e0228f8fcfc..b43107e87e78 100644
--- a/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c
+++ b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c
@@ -765,6 +765,10 @@ get_program(struct uprobe_multi_consumers *skel, int prog)
return skel->progs.uprobe_2;
case 3:
return skel->progs.uprobe_3;
+ case 4:
+ return skel->progs.uprobe_4;
+ case 5:
+ return skel->progs.uprobe_5;
default:
ASSERT_FAIL("get_program");
return NULL;
@@ -783,6 +787,10 @@ get_link(struct uprobe_multi_consumers *skel, int link)
return &skel->links.uprobe_2;
case 3:
return &skel->links.uprobe_3;
+ case 4:
+ return &skel->links.uprobe_4;
+ case 5:
+ return &skel->links.uprobe_5;
default:
ASSERT_FAIL("get_link");
return NULL;
@@ -801,8 +809,10 @@ static int uprobe_attach(struct uprobe_multi_consumers *skel, int idx)
/*
* bit/prog: 0,1 uprobe entry
* bit/prog: 2,3 uprobe return
+ * bit/prog: 4,5 uprobe session
*/
opts.retprobe = idx == 2 || idx == 3;
+ opts.session = idx == 4 || idx == 5;
*link = bpf_program__attach_uprobe_multi(prog, 0, "/proc/self/exe",
"uprobe_consumer_test",
@@ -832,13 +842,13 @@ uprobe_consumer_test(struct uprobe_multi_consumers *skel,
int idx;
/* detach uprobe for each unset programs in 'before' state ... */
- for (idx = 0; idx < 4; idx++) {
+ for (idx = 0; idx < 6; idx++) {
if (test_bit(idx, before) && !test_bit(idx, after))
uprobe_detach(skel, idx);
}
/* ... and attach all new programs in 'after' state */
- for (idx = 0; idx < 4; idx++) {
+ for (idx = 0; idx < 6; idx++) {
if (!test_bit(idx, before) && test_bit(idx, after)) {
if (!ASSERT_OK(uprobe_attach(skel, idx), "uprobe_attach_after"))
return -1;
@@ -855,7 +865,7 @@ static int consumer_test(struct uprobe_multi_consumers *skel,
printf("consumer_test before %lu after %lu\n", before, after);
/* 'before' is each, we attach uprobe for every set idx */
- for (idx = 0; idx < 4; idx++) {
+ for (idx = 0; idx < 6; idx++) {
if (test_bit(idx, before)) {
if (!ASSERT_OK(uprobe_attach(skel, idx), "uprobe_attach_before"))
goto cleanup;
@@ -866,7 +876,7 @@ static int consumer_test(struct uprobe_multi_consumers *skel,
if (!ASSERT_EQ(err, 0, "uprobe_consumer_test"))
goto cleanup;
- for (idx = 0; idx < 4; idx++) {
+ for (idx = 0; idx < 6; idx++) {
const char *fmt = "BUG";
__u64 val = 0;
@@ -878,18 +888,38 @@ static int consumer_test(struct uprobe_multi_consumers *skel,
if (test_bit(idx, before))
val++;
fmt = "prog 0/1: uprobe";
- } else {
+ } else if (idx < 4) {
/*
* to trigger uretprobe consumer, the uretprobe needs to be installed,
* which means one of the 'return' uprobes was alive when probe was hit:
*
- * idxs: 2/3 uprobe return in 'installed' mask
+ * idxs: 2/3/4 uprobe return in 'installed' mask
*/
- unsigned long had_uretprobes = before & 0b1100; /* is uretprobe installed */
+ unsigned long had_uretprobes = before & 0b011100; /* is uretprobe installed */
if (had_uretprobes && test_bit(idx, after))
val++;
fmt = "idx 2/3: uretprobe";
+ } else if (idx == 4) {
+ /*
+ * session with return
+ * +1 if defined in 'before'
+ * +1 if defined in 'after'
+ */
+ if (test_bit(idx, before)) {
+ val++;
+ if (test_bit(idx, after))
+ val++;
+ }
+ fmt = "idx 4 : session with return";
+ } else if (idx == 5) {
+ /*
+ * session without return
+ * +1 if defined in 'before'
+ */
+ if (test_bit(idx, before))
+ val++;
+ fmt = "idx 5 : session with NO return";
}
if (!ASSERT_EQ(skel->bss->uprobe_result[idx], val, fmt))
@@ -900,7 +930,7 @@ static int consumer_test(struct uprobe_multi_consumers *skel,
ret = 0;
cleanup:
- for (idx = 0; idx < 4; idx++)
+ for (idx = 0; idx < 6; idx++)
uprobe_detach(skel, idx);
return ret;
}
@@ -920,40 +950,45 @@ static void test_consumers(void)
*
* - 2 uprobe entry consumer
* - 2 uprobe exit consumers
+ * - 2 uprobe session consumers
*
- * The test uses 4 uprobes attached on single function, but that
- * translates into single uprobe with 4 consumers in kernel.
+ * The test uses 6 uprobes attached on single function, but that
+ * translates into single uprobe with 6 consumers in kernel.
*
* The before/after values present the state of attached consumers
* before and after the probed function:
*
* bit/prog 0,1 : uprobe entry
* bit/prog 2,3 : uprobe return
+ * bit/prog 4 : uprobe session with return
+ * bit/prog 5 : uprobe session without return
*
* For example for:
*
- * before = 0b0101
- * after = 0b0110
+ * before = 0b010101
+ * after = 0b000110
*
* it means that before we call 'uprobe_consumer_test' we attach
* uprobes defined in 'before' value:
*
* - bit/prog 0: uprobe entry
* - bit/prog 2: uprobe return
+ * - bit/prog 4: uprobe session with return
*
* uprobe_consumer_test is called and inside it we attach and detach
* uprobes based on 'after' value:
*
* - bit/prog 0: stays untouched
* - bit/prog 2: uprobe return is detached
+ * - bit/prog 4: uprobe session is detached
*
* uprobe_consumer_test returns and we check counters values increased
* by bpf programs on each uprobe to match the expected count based on
* before/after bits.
*/
- for (before = 0; before < 16; before++) {
- for (after = 0; after < 16; after++)
+ for (before = 0; before < 64; before++) {
+ for (after = 0; after < 64; after++)
if (consumer_test(skel, before, after))
goto out;
}
diff --git a/tools/testing/selftests/bpf/progs/uprobe_multi_consumers.c b/tools/testing/selftests/bpf/progs/uprobe_multi_consumers.c
index 7e0fdcbbd242..448b753a4c66 100644
--- a/tools/testing/selftests/bpf/progs/uprobe_multi_consumers.c
+++ b/tools/testing/selftests/bpf/progs/uprobe_multi_consumers.c
@@ -8,7 +8,7 @@
char _license[] SEC("license") = "GPL";
-__u64 uprobe_result[4];
+__u64 uprobe_result[6];
SEC("uprobe.multi")
int uprobe_0(struct pt_regs *ctx)
@@ -37,3 +37,17 @@ int uprobe_3(struct pt_regs *ctx)
uprobe_result[3]++;
return 0;
}
+
+SEC("uprobe.session")
+int uprobe_4(struct pt_regs *ctx)
+{
+ uprobe_result[4]++;
+ return 0;
+}
+
+SEC("uprobe.session")
+int uprobe_5(struct pt_regs *ctx)
+{
+ uprobe_result[5]++;
+ return 1;
+}
--
2.46.1
^ permalink raw reply related [flat|nested] 29+ messages in thread