* [PATCH v6 1/5] tracing/fprobe: Reject registration of a registered fprobe before init
2026-04-14 9:14 [PATCH v6 0/5] tracing/fprobe: Fix fprobe_ip_table related bugs Masami Hiramatsu (Google)
@ 2026-04-14 9:14 ` Masami Hiramatsu (Google)
2026-04-14 9:14 ` [PATCH v6 2/5] tracing/fprobe: Remove fprobe from hash in failure path Masami Hiramatsu (Google)
` (3 subsequent siblings)
4 siblings, 0 replies; 8+ messages in thread
From: Masami Hiramatsu (Google) @ 2026-04-14 9:14 UTC (permalink / raw)
To: Steven Rostedt, Masami Hiramatsu
Cc: Menglong Dong, Mathieu Desnoyers, jiang.biao, linux-kernel,
linux-trace-kernel
From: Masami Hiramatsu (Google) <mhiramat@kernel.org>
Reject registration of a registered fprobe which is on the fprobe
hash table before initializing fprobe.
The add_fprobe_hash() checks this re-register fprobe, but since
fprobe_init() clears hlist_array field, it is too late to check it.
It has to check the re-registration before touncing fprobe.
Fixes: 4346ba160409 ("fprobe: Rewrite fprobe on function-graph tracer")
Cc: stable@vger.kernel.org
Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
---
Changes in v6:
- Newly added.
---
kernel/trace/fprobe.c | 21 ++++++++++-----------
1 file changed, 10 insertions(+), 11 deletions(-)
diff --git a/kernel/trace/fprobe.c b/kernel/trace/fprobe.c
index dcadf1d23b8a..fc7018b28fdd 100644
--- a/kernel/trace/fprobe.c
+++ b/kernel/trace/fprobe.c
@@ -4,6 +4,7 @@
*/
#define pr_fmt(fmt) "fprobe: " fmt
+#include <linux/cleanup.h>
#include <linux/err.h>
#include <linux/fprobe.h>
#include <linux/kallsyms.h>
@@ -107,7 +108,7 @@ static bool delete_fprobe_node(struct fprobe_hlist_node *node)
}
/* Check existence of the fprobe */
-static bool is_fprobe_still_exist(struct fprobe *fp)
+static bool fprobe_registered(struct fprobe *fp)
{
struct hlist_head *head;
struct fprobe_hlist *fph;
@@ -120,7 +121,7 @@ static bool is_fprobe_still_exist(struct fprobe *fp)
}
return false;
}
-NOKPROBE_SYMBOL(is_fprobe_still_exist);
+NOKPROBE_SYMBOL(fprobe_registered);
static int add_fprobe_hash(struct fprobe *fp)
{
@@ -132,9 +133,6 @@ static int add_fprobe_hash(struct fprobe *fp)
if (WARN_ON_ONCE(!fph))
return -EINVAL;
- if (is_fprobe_still_exist(fp))
- return -EEXIST;
-
head = &fprobe_table[hash_ptr(fp, FPROBE_HASH_BITS)];
hlist_add_head_rcu(&fp->hlist_array->hlist, head);
return 0;
@@ -149,7 +147,7 @@ static int del_fprobe_hash(struct fprobe *fp)
if (WARN_ON_ONCE(!fph))
return -EINVAL;
- if (!is_fprobe_still_exist(fp))
+ if (!fprobe_registered(fp))
return -ENOENT;
fph->fp = NULL;
@@ -482,7 +480,7 @@ static void fprobe_return(struct ftrace_graph_ret *trace,
if (!fp)
break;
curr += FPROBE_HEADER_SIZE_IN_LONG;
- if (is_fprobe_still_exist(fp) && !fprobe_disabled(fp)) {
+ if (fprobe_registered(fp) && !fprobe_disabled(fp)) {
if (WARN_ON_ONCE(curr + size > size_words))
break;
fp->exit_handler(fp, trace->func, ret_ip, fregs,
@@ -841,12 +839,14 @@ int register_fprobe_ips(struct fprobe *fp, unsigned long *addrs, int num)
struct fprobe_hlist *hlist_array;
int ret, i;
+ guard(mutex)(&fprobe_mutex);
+ if (fprobe_registered(fp))
+ return -EEXIST;
+
ret = fprobe_init(fp, addrs, num);
if (ret)
return ret;
- mutex_lock(&fprobe_mutex);
-
hlist_array = fp->hlist_array;
if (fprobe_is_ftrace(fp))
ret = fprobe_ftrace_add_ips(addrs, num);
@@ -866,7 +866,6 @@ int register_fprobe_ips(struct fprobe *fp, unsigned long *addrs, int num)
delete_fprobe_node(&hlist_array->array[i]);
}
}
- mutex_unlock(&fprobe_mutex);
if (ret)
fprobe_fail_cleanup(fp);
@@ -928,7 +927,7 @@ int unregister_fprobe(struct fprobe *fp)
int ret = 0, i, count;
mutex_lock(&fprobe_mutex);
- if (!fp || !is_fprobe_still_exist(fp)) {
+ if (!fp || !fprobe_registered(fp)) {
ret = -EINVAL;
goto out;
}
^ permalink raw reply related [flat|nested] 8+ messages in thread* [PATCH v6 2/5] tracing/fprobe: Remove fprobe from hash in failure path
2026-04-14 9:14 [PATCH v6 0/5] tracing/fprobe: Fix fprobe_ip_table related bugs Masami Hiramatsu (Google)
2026-04-14 9:14 ` [PATCH v6 1/5] tracing/fprobe: Reject registration of a registered fprobe before init Masami Hiramatsu (Google)
@ 2026-04-14 9:14 ` Masami Hiramatsu (Google)
2026-04-15 9:47 ` Menglong Dong
2026-04-14 9:15 ` [PATCH v6 3/5] tracing/fprobe: Avoid kcalloc() in rcu_read_lock section Masami Hiramatsu (Google)
` (2 subsequent siblings)
4 siblings, 1 reply; 8+ messages in thread
From: Masami Hiramatsu (Google) @ 2026-04-14 9:14 UTC (permalink / raw)
To: Steven Rostedt, Masami Hiramatsu
Cc: Menglong Dong, Mathieu Desnoyers, jiang.biao, linux-kernel,
linux-trace-kernel
From: Masami Hiramatsu (Google) <mhiramat@kernel.org>
When register_fprobe_ips() fails, it tries to remove a list of
fprobe_hash_node from fprobe_ip_table, but it missed to remove
fprobe itself from fprobe_table. Moreover, when removing
the fprobe_hash_node which is added to rhltable once, it must
use kfree_rcu() after removing from rhltable.
To fix these issues, this reuses unregister_fprobe() internal
code to rollback the half-way registered fprobe.
Fixes: 4346ba160409 ("fprobe: Rewrite fprobe on function-graph tracer")
Cc: stable@vger.kernel.org
Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
---
Changes in v6:
- Wait for an RCU grace period before returning error in
unregister_fprobe_nolock().
Changes in v5:
- When rolling back an fprobe that failed to register, the
fprobe_hash_node are forcibly removed and warn if failure.
Changes in v4:
- Remove short-cut case because we always need to upadte ftrace_ops.
- Use guard(mutex) in register_fprobe_ips() to unlock it correctly.
- Remove redundant !ret check in register_fprobe_ips().
- Do not set hlist_array->size in failure case, instead,
hlist_array->array[i].fp is set only when insertion is succeeded.
Changes in v3:
- Newly added.
---
kernel/trace/fprobe.c | 109 ++++++++++++++++++++++++++++---------------------
1 file changed, 63 insertions(+), 46 deletions(-)
diff --git a/kernel/trace/fprobe.c b/kernel/trace/fprobe.c
index fc7018b28fdd..6a23bb787295 100644
--- a/kernel/trace/fprobe.c
+++ b/kernel/trace/fprobe.c
@@ -79,20 +79,27 @@ static const struct rhashtable_params fprobe_rht_params = {
};
/* Node insertion and deletion requires the fprobe_mutex */
-static int insert_fprobe_node(struct fprobe_hlist_node *node)
+static int insert_fprobe_node(struct fprobe_hlist_node *node, struct fprobe *fp)
{
+ int ret;
+
lockdep_assert_held(&fprobe_mutex);
- return rhltable_insert(&fprobe_ip_table, &node->hlist, fprobe_rht_params);
+ ret = rhltable_insert(&fprobe_ip_table, &node->hlist, fprobe_rht_params);
+ /* Set the fprobe pointer if insertion was successful. */
+ if (!ret)
+ WRITE_ONCE(node->fp, fp);
+ return ret;
}
/* Return true if there are synonims */
static bool delete_fprobe_node(struct fprobe_hlist_node *node)
{
- lockdep_assert_held(&fprobe_mutex);
bool ret;
- /* Avoid double deleting */
+ lockdep_assert_held(&fprobe_mutex);
+
+ /* Avoid double deleting and non-inserted nodes */
if (READ_ONCE(node->fp) != NULL) {
WRITE_ONCE(node->fp, NULL);
rhltable_remove(&fprobe_ip_table, &node->hlist,
@@ -757,7 +764,6 @@ static int fprobe_init(struct fprobe *fp, unsigned long *addrs, int num)
fp->hlist_array = hlist_array;
hlist_array->fp = fp;
for (i = 0; i < num; i++) {
- hlist_array->array[i].fp = fp;
addr = ftrace_location(addrs[i]);
if (!addr) {
fprobe_fail_cleanup(fp);
@@ -821,6 +827,8 @@ int register_fprobe(struct fprobe *fp, const char *filter, const char *notfilter
}
EXPORT_SYMBOL_GPL(register_fprobe);
+static int unregister_fprobe_nolock(struct fprobe *fp, bool force);
+
/**
* register_fprobe_ips() - Register fprobe to ftrace by address.
* @fp: A fprobe data structure to be registered.
@@ -847,29 +855,26 @@ int register_fprobe_ips(struct fprobe *fp, unsigned long *addrs, int num)
if (ret)
return ret;
- hlist_array = fp->hlist_array;
if (fprobe_is_ftrace(fp))
ret = fprobe_ftrace_add_ips(addrs, num);
else
ret = fprobe_graph_add_ips(addrs, num);
+ if (ret) {
+ fprobe_fail_cleanup(fp);
+ return ret;
+ }
- if (!ret) {
- add_fprobe_hash(fp);
- for (i = 0; i < hlist_array->size; i++) {
- ret = insert_fprobe_node(&hlist_array->array[i]);
- if (ret)
- break;
- }
- /* fallback on insert error */
+ hlist_array = fp->hlist_array;
+ add_fprobe_hash(fp);
+ for (i = 0; i < hlist_array->size; i++) {
+ ret = insert_fprobe_node(&hlist_array->array[i], fp);
if (ret) {
- for (i--; i >= 0; i--)
- delete_fprobe_node(&hlist_array->array[i]);
+ if (unregister_fprobe_nolock(fp, true))
+ pr_warn("Failed to cleanup fprobe after insertion failure.\n");
+ break;
}
}
- if (ret)
- fprobe_fail_cleanup(fp);
-
return ret;
}
EXPORT_SYMBOL_GPL(register_fprobe_ips);
@@ -912,37 +917,29 @@ bool fprobe_is_registered(struct fprobe *fp)
return true;
}
-/**
- * unregister_fprobe() - Unregister fprobe.
- * @fp: A fprobe data structure to be unregistered.
- *
- * Unregister fprobe (and remove ftrace hooks from the function entries).
- *
- * Return 0 if @fp is unregistered successfully, -errno if not.
- */
-int unregister_fprobe(struct fprobe *fp)
+static int unregister_fprobe_nolock(struct fprobe *fp, bool force)
{
- struct fprobe_hlist *hlist_array;
+ struct fprobe_hlist *hlist_array = fp->hlist_array;
unsigned long *addrs = NULL;
- int ret = 0, i, count;
+ int i, count;
- mutex_lock(&fprobe_mutex);
- if (!fp || !fprobe_registered(fp)) {
- ret = -EINVAL;
- goto out;
- }
-
- hlist_array = fp->hlist_array;
addrs = kcalloc(hlist_array->size, sizeof(unsigned long), GFP_KERNEL);
- if (!addrs) {
- ret = -ENOMEM; /* TODO: Fallback to one-by-one loop */
- goto out;
- }
+ if (!addrs && !force)
+ return -ENOMEM;
+ /*
+ * If @force is set, this function will remove fprobe_hash_node
+ * from the hash table even if memory allocation fails. However,
+ * ftrace_ops will not be updated. Anyway, when the last fprobe
+ * is unregistered, ftrace_ops is also unregistered.
+ */
/* Remove non-synonim ips from table and hash */
count = 0;
for (i = 0; i < hlist_array->size; i++) {
- if (!delete_fprobe_node(&hlist_array->array[i]))
+ if (delete_fprobe_node(&hlist_array->array[i]))
+ continue;
+
+ if (addrs)
addrs[count++] = hlist_array->array[i].addr;
}
del_fprobe_hash(fp);
@@ -951,15 +948,35 @@ int unregister_fprobe(struct fprobe *fp)
fprobe_ftrace_remove_ips(addrs, count);
else
fprobe_graph_remove_ips(addrs, count);
+ /*
+ * If count == 0, instead of calling ftrace_set_filter_ips(),
+ * we must wait for RCU grace period to finish del_fprobe_hash().
+ */
+ if (!count)
+ synchronize_rcu();
kfree_rcu(hlist_array, rcu);
fp->hlist_array = NULL;
+ kfree(addrs);
-out:
- mutex_unlock(&fprobe_mutex);
+ return !addrs ? -ENOMEM : 0;
+}
- kfree(addrs);
- return ret;
+/**
+ * unregister_fprobe() - Unregister fprobe.
+ * @fp: A fprobe data structure to be unregistered.
+ *
+ * Unregister fprobe (and remove ftrace hooks from the function entries).
+ *
+ * Return 0 if @fp is unregistered successfully, -errno if not.
+ */
+int unregister_fprobe(struct fprobe *fp)
+{
+ guard(mutex)(&fprobe_mutex);
+ if (!fp || !fprobe_registered(fp))
+ return -EINVAL;
+
+ return unregister_fprobe_nolock(fp, false);
}
EXPORT_SYMBOL_GPL(unregister_fprobe);
^ permalink raw reply related [flat|nested] 8+ messages in thread* Re: [PATCH v6 2/5] tracing/fprobe: Remove fprobe from hash in failure path
2026-04-14 9:14 ` [PATCH v6 2/5] tracing/fprobe: Remove fprobe from hash in failure path Masami Hiramatsu (Google)
@ 2026-04-15 9:47 ` Menglong Dong
2026-04-15 10:06 ` Masami Hiramatsu
0 siblings, 1 reply; 8+ messages in thread
From: Menglong Dong @ 2026-04-15 9:47 UTC (permalink / raw)
To: Steven Rostedt, Masami Hiramatsu, Masami Hiramatsu (Google)
Cc: Menglong Dong, Mathieu Desnoyers, jiang.biao, linux-kernel,
linux-trace-kernel
On 2026/4/14 17:14 Masami Hiramatsu (Google) <mhiramat@kernel.org> write:
> From: Masami Hiramatsu (Google) <mhiramat@kernel.org>
>
> When register_fprobe_ips() fails, it tries to remove a list of
> fprobe_hash_node from fprobe_ip_table, but it missed to remove
> fprobe itself from fprobe_table. Moreover, when removing
> the fprobe_hash_node which is added to rhltable once, it must
> use kfree_rcu() after removing from rhltable.
>
> To fix these issues, this reuses unregister_fprobe() internal
> code to rollback the half-way registered fprobe.
>
> Fixes: 4346ba160409 ("fprobe: Rewrite fprobe on function-graph tracer")
> Cc: stable@vger.kernel.org
> Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
> ---
[...]
>
> +static int unregister_fprobe_nolock(struct fprobe *fp, bool force);
> +
> /**
> * register_fprobe_ips() - Register fprobe to ftrace by address.
> * @fp: A fprobe data structure to be registered.
> @@ -847,29 +855,26 @@ int register_fprobe_ips(struct fprobe *fp, unsigned long *addrs, int num)
> if (ret)
> return ret;
Hi, Masami. The logic of unregister_fprobe_nolock() looks a little
messy. How about we make the logic here like this:
for (i = 0; i < hlist_array->size; i++) {
// The node->fp is NULL, so it's safe to add the node before
// fprobe_ftrace_add_ips(), right?
ret = insert_fprobe_node(&hlist_array->array[i], fp);
if (ret)
goto fallback_err;
}
if (fprobe_is_ftrace(fp))
ret = fprobe_ftrace_add_ips(addrs, num);
else
ret = fprobe_graph_add_ips(addrs, num);
if (ret)
goto fallback_err;
add_fprobe_hash(fp);
for (i = 0; i < hlist_array->size; i++)
WRITE_ONCE(hlist_array->array[i].fp, fp);
return 0;
fallback_err:
for (i--; i >= 0; i--)
delete_fprobe_node(&hlist_array->array[i]);
fprobe_fail_cleanup(fp);
return ret;
Then, we don't need to change unregister_fprobe_nolock and
insert_fprobe_node.
Thanks!
Menglong Dong
>
> - hlist_array = fp->hlist_array;
> if (fprobe_is_ftrace(fp))
> ret = fprobe_ftrace_add_ips(addrs, num);
> else
> ret = fprobe_graph_add_ips(addrs, num);
> + if (ret) {
> + fprobe_fail_cleanup(fp);
> + return ret;
> + }
>
> - if (!ret) {
> - add_fprobe_hash(fp);
> - for (i = 0; i < hlist_array->size; i++) {
> - ret = insert_fprobe_node(&hlist_array->array[i]);
> - if (ret)
> - break;
> - }
> - /* fallback on insert error */
> + hlist_array = fp->hlist_array;
> + add_fprobe_hash(fp);
> + for (i = 0; i < hlist_array->size; i++) {
> + ret = insert_fprobe_node(&hlist_array->array[i], fp);
> if (ret) {
> - for (i--; i >= 0; i--)
> - delete_fprobe_node(&hlist_array->array[i]);
> + if (unregister_fprobe_nolock(fp, true))
> + pr_warn("Failed to cleanup fprobe after insertion failure.\n");
> + break;
> }
> }
>
> - if (ret)
> - fprobe_fail_cleanup(fp);
> -
> return ret;
> }
> EXPORT_SYMBOL_GPL(register_fprobe_ips);
> @@ -912,37 +917,29 @@ bool fprobe_is_registered(struct fprobe *fp)
> return true;
> }
>
> -/**
> - * unregister_fprobe() - Unregister fprobe.
> - * @fp: A fprobe data structure to be unregistered.
> - *
> - * Unregister fprobe (and remove ftrace hooks from the function entries).
> - *
> - * Return 0 if @fp is unregistered successfully, -errno if not.
> - */
> -int unregister_fprobe(struct fprobe *fp)
> +static int unregister_fprobe_nolock(struct fprobe *fp, bool force)
> {
> - struct fprobe_hlist *hlist_array;
> + struct fprobe_hlist *hlist_array = fp->hlist_array;
> unsigned long *addrs = NULL;
> - int ret = 0, i, count;
> + int i, count;
>
> - mutex_lock(&fprobe_mutex);
> - if (!fp || !fprobe_registered(fp)) {
> - ret = -EINVAL;
> - goto out;
> - }
> -
> - hlist_array = fp->hlist_array;
> addrs = kcalloc(hlist_array->size, sizeof(unsigned long), GFP_KERNEL);
> - if (!addrs) {
> - ret = -ENOMEM; /* TODO: Fallback to one-by-one loop */
> - goto out;
> - }
> + if (!addrs && !force)
> + return -ENOMEM;
> + /*
> + * If @force is set, this function will remove fprobe_hash_node
> + * from the hash table even if memory allocation fails. However,
> + * ftrace_ops will not be updated. Anyway, when the last fprobe
> + * is unregistered, ftrace_ops is also unregistered.
> + */
>
> /* Remove non-synonim ips from table and hash */
> count = 0;
> for (i = 0; i < hlist_array->size; i++) {
> - if (!delete_fprobe_node(&hlist_array->array[i]))
> + if (delete_fprobe_node(&hlist_array->array[i]))
> + continue;
> +
> + if (addrs)
> addrs[count++] = hlist_array->array[i].addr;
> }
> del_fprobe_hash(fp);
> @@ -951,15 +948,35 @@ int unregister_fprobe(struct fprobe *fp)
> fprobe_ftrace_remove_ips(addrs, count);
> else
> fprobe_graph_remove_ips(addrs, count);
> + /*
> + * If count == 0, instead of calling ftrace_set_filter_ips(),
> + * we must wait for RCU grace period to finish del_fprobe_hash().
> + */
> + if (!count)
> + synchronize_rcu();
>
> kfree_rcu(hlist_array, rcu);
> fp->hlist_array = NULL;
> + kfree(addrs);
>
> -out:
> - mutex_unlock(&fprobe_mutex);
> + return !addrs ? -ENOMEM : 0;
> +}
>
> - kfree(addrs);
> - return ret;
> +/**
> + * unregister_fprobe() - Unregister fprobe.
> + * @fp: A fprobe data structure to be unregistered.
> + *
> + * Unregister fprobe (and remove ftrace hooks from the function entries).
> + *
> + * Return 0 if @fp is unregistered successfully, -errno if not.
> + */
> +int unregister_fprobe(struct fprobe *fp)
> +{
> + guard(mutex)(&fprobe_mutex);
> + if (!fp || !fprobe_registered(fp))
> + return -EINVAL;
> +
> + return unregister_fprobe_nolock(fp, false);
> }
> EXPORT_SYMBOL_GPL(unregister_fprobe);
>
>
>
>
^ permalink raw reply [flat|nested] 8+ messages in thread* Re: [PATCH v6 2/5] tracing/fprobe: Remove fprobe from hash in failure path
2026-04-15 9:47 ` Menglong Dong
@ 2026-04-15 10:06 ` Masami Hiramatsu
0 siblings, 0 replies; 8+ messages in thread
From: Masami Hiramatsu @ 2026-04-15 10:06 UTC (permalink / raw)
To: Menglong Dong
Cc: Steven Rostedt, Menglong Dong, Mathieu Desnoyers, jiang.biao,
linux-kernel, linux-trace-kernel
On Wed, 15 Apr 2026 17:47:11 +0800
Menglong Dong <menglong.dong@linux.dev> wrote:
> On 2026/4/14 17:14 Masami Hiramatsu (Google) <mhiramat@kernel.org> write:
> > From: Masami Hiramatsu (Google) <mhiramat@kernel.org>
> >
> > When register_fprobe_ips() fails, it tries to remove a list of
> > fprobe_hash_node from fprobe_ip_table, but it missed to remove
> > fprobe itself from fprobe_table. Moreover, when removing
> > the fprobe_hash_node which is added to rhltable once, it must
> > use kfree_rcu() after removing from rhltable.
> >
> > To fix these issues, this reuses unregister_fprobe() internal
> > code to rollback the half-way registered fprobe.
> >
> > Fixes: 4346ba160409 ("fprobe: Rewrite fprobe on function-graph tracer")
> > Cc: stable@vger.kernel.org
> > Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
> > ---
> [...]
> >
> > +static int unregister_fprobe_nolock(struct fprobe *fp, bool force);
> > +
> > /**
> > * register_fprobe_ips() - Register fprobe to ftrace by address.
> > * @fp: A fprobe data structure to be registered.
> > @@ -847,29 +855,26 @@ int register_fprobe_ips(struct fprobe *fp, unsigned long *addrs, int num)
> > if (ret)
> > return ret;
>
> Hi, Masami. The logic of unregister_fprobe_nolock() looks a little
> messy. How about we make the logic here like this:
>
> for (i = 0; i < hlist_array->size; i++) {
> // The node->fp is NULL, so it's safe to add the node before
> // fprobe_ftrace_add_ips(), right?
> ret = insert_fprobe_node(&hlist_array->array[i], fp);
> if (ret)
> goto fallback_err;
> }
>
> if (fprobe_is_ftrace(fp))
> ret = fprobe_ftrace_add_ips(addrs, num);
> else
> ret = fprobe_graph_add_ips(addrs, num);
> if (ret)
> goto fallback_err;
>
> add_fprobe_hash(fp);
> for (i = 0; i < hlist_array->size; i++)
> WRITE_ONCE(hlist_array->array[i].fp, fp);
>
> return 0;
>
> fallback_err:
> for (i--; i >= 0; i--)
> delete_fprobe_node(&hlist_array->array[i]);
> fprobe_fail_cleanup(fp);
> return ret;
>
> Then, we don't need to change unregister_fprobe_nolock and
> insert_fprobe_node.
Thanks for the idea, but I don't like repeat it.
It is better to do the same thing(unregister) in the same code.
Above seems a bit optimized for fixing a problem.
(Maybe revisit it later for optimization)
Thank you,
--
Masami Hiramatsu (Google) <mhiramat@kernel.org>
^ permalink raw reply [flat|nested] 8+ messages in thread
* [PATCH v6 3/5] tracing/fprobe: Avoid kcalloc() in rcu_read_lock section
2026-04-14 9:14 [PATCH v6 0/5] tracing/fprobe: Fix fprobe_ip_table related bugs Masami Hiramatsu (Google)
2026-04-14 9:14 ` [PATCH v6 1/5] tracing/fprobe: Reject registration of a registered fprobe before init Masami Hiramatsu (Google)
2026-04-14 9:14 ` [PATCH v6 2/5] tracing/fprobe: Remove fprobe from hash in failure path Masami Hiramatsu (Google)
@ 2026-04-14 9:15 ` Masami Hiramatsu (Google)
2026-04-14 9:15 ` [PATCH v6 4/5] tracing/fprobe: Check the same type fprobe on table as the unregistered one Masami Hiramatsu (Google)
2026-04-14 9:15 ` [PATCH v6 5/5] tracing/fprobe: Fix to unregister ftrace_ops if it is empty on module unloading Masami Hiramatsu (Google)
4 siblings, 0 replies; 8+ messages in thread
From: Masami Hiramatsu (Google) @ 2026-04-14 9:15 UTC (permalink / raw)
To: Steven Rostedt, Masami Hiramatsu
Cc: Menglong Dong, Mathieu Desnoyers, jiang.biao, linux-kernel,
linux-trace-kernel
From: Masami Hiramatsu (Google) <mhiramat@kernel.org>
fprobe_remove_node_in_module() is called under RCU read locked, but
this invokes kcalloc() if there are more than 8 fprobes installed
on the module. Sashiko warns it because kcalloc() can sleep [1].
[1] https://sashiko.dev/#/patchset/177552432201.853249.5125045538812833325.stgit%40mhiramat.tok.corp.google.com
To fix this issue, expand the batch size to 128 and do not expand
the fprobe_addr_list, but just cancel walking on fprobe_ip_table,
update fgraph/ftrace_ops and retry the loop again.
Fixes: 0de4c70d04a4 ("tracing: fprobe: use rhltable for fprobe_ip_table")
Cc: stable@vger.kernel.org
Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
---
Changes in v6:
- Retry outside rhltable_walk_enter/exit() again.
Changes in v5:
- Skip updating ftrace_ops when fails to allocate memory in module
unloading.
Changes in v4:
- fix a build error typo in case of CONFIG_DYNAMIC_FTRACE=n.
Changes in v3:
- Retry inside rhltable_walk_enter/exit().
- Rename fprobe_set_ips() to fprobe_remove_ips().
- Rename 'retry' label to 'again'.
---
kernel/trace/fprobe.c | 92 ++++++++++++++++++++++++-------------------------
1 file changed, 45 insertions(+), 47 deletions(-)
diff --git a/kernel/trace/fprobe.c b/kernel/trace/fprobe.c
index 6a23bb787295..2059d8d83b4c 100644
--- a/kernel/trace/fprobe.c
+++ b/kernel/trace/fprobe.c
@@ -343,11 +343,10 @@ static bool fprobe_is_ftrace(struct fprobe *fp)
}
#ifdef CONFIG_MODULES
-static void fprobe_set_ips(unsigned long *ips, unsigned int cnt, int remove,
- int reset)
+static void fprobe_remove_ips(unsigned long *ips, unsigned int cnt)
{
- ftrace_set_filter_ips(&fprobe_graph_ops.ops, ips, cnt, remove, reset);
- ftrace_set_filter_ips(&fprobe_ftrace_ops, ips, cnt, remove, reset);
+ ftrace_set_filter_ips(&fprobe_graph_ops.ops, ips, cnt, 1, 0);
+ ftrace_set_filter_ips(&fprobe_ftrace_ops, ips, cnt, 1, 0);
}
#endif
#else
@@ -366,10 +365,9 @@ static bool fprobe_is_ftrace(struct fprobe *fp)
}
#ifdef CONFIG_MODULES
-static void fprobe_set_ips(unsigned long *ips, unsigned int cnt, int remove,
- int reset)
+static void fprobe_remove_ips(unsigned long *ips, unsigned int cnt)
{
- ftrace_set_filter_ips(&fprobe_graph_ops.ops, ips, cnt, remove, reset);
+ ftrace_set_filter_ips(&fprobe_graph_ops.ops, ips, cnt, 1, 0);
}
#endif
#endif /* !CONFIG_DYNAMIC_FTRACE_WITH_ARGS && !CONFIG_DYNAMIC_FTRACE_WITH_REGS */
@@ -543,7 +541,7 @@ static void fprobe_graph_remove_ips(unsigned long *addrs, int num)
#ifdef CONFIG_MODULES
-#define FPROBE_IPS_BATCH_INIT 8
+#define FPROBE_IPS_BATCH_INIT 128
/* instruction pointer address list */
struct fprobe_addr_list {
int index;
@@ -551,45 +549,24 @@ struct fprobe_addr_list {
unsigned long *addrs;
};
-static int fprobe_addr_list_add(struct fprobe_addr_list *alist, unsigned long addr)
+static int fprobe_remove_node_in_module(struct module *mod, struct fprobe_hlist_node *node,
+ struct fprobe_addr_list *alist)
{
- unsigned long *addrs;
-
- /* Previously we failed to expand the list. */
- if (alist->index == alist->size)
- return -ENOSPC;
-
- alist->addrs[alist->index++] = addr;
- if (alist->index < alist->size)
+ if (!within_module(node->addr, mod))
return 0;
- /* Expand the address list */
- addrs = kcalloc(alist->size * 2, sizeof(*addrs), GFP_KERNEL);
- if (!addrs)
- return -ENOMEM;
-
- memcpy(addrs, alist->addrs, alist->size * sizeof(*addrs));
- alist->size *= 2;
- kfree(alist->addrs);
- alist->addrs = addrs;
+ if (delete_fprobe_node(node))
+ return 0;
+ /* If no address list is available, we can't track this address. */
+ if (!alist->addrs)
+ return 0;
+ alist->addrs[alist->index++] = node->addr;
+ if (alist->index == alist->size)
+ return -ENOSPC;
return 0;
}
-static void fprobe_remove_node_in_module(struct module *mod, struct fprobe_hlist_node *node,
- struct fprobe_addr_list *alist)
-{
- if (!within_module(node->addr, mod))
- return;
- if (delete_fprobe_node(node))
- return;
- /*
- * If failed to update alist, just continue to update hlist.
- * Therefore, at list user handler will not hit anymore.
- */
- fprobe_addr_list_add(alist, node->addr);
-}
-
/* Handle module unloading to manage fprobe_ip_table. */
static int fprobe_module_callback(struct notifier_block *nb,
unsigned long val, void *data)
@@ -598,29 +575,50 @@ static int fprobe_module_callback(struct notifier_block *nb,
struct fprobe_hlist_node *node;
struct rhashtable_iter iter;
struct module *mod = data;
+ bool retry;
if (val != MODULE_STATE_GOING)
return NOTIFY_DONE;
alist.addrs = kcalloc(alist.size, sizeof(*alist.addrs), GFP_KERNEL);
- /* If failed to alloc memory, we can not remove ips from hash. */
- if (!alist.addrs)
- return NOTIFY_DONE;
+ /*
+ * If failed to alloc memory, ftrace_ops will not be able to remove ips from
+ * hash, but we can still remove nodes from fprobe_ip_table, so we can avoid
+ * the potential wrong callback. So just print a warning here and try to
+ * continue without address list.
+ */
+ WARN_ONCE(!alist.addrs,
+ "Failed to allocate memory for fprobe_addr_list, ftrace_ops will not be updated");
mutex_lock(&fprobe_mutex);
+again:
+ retry = false;
+ alist.index = 0;
rhltable_walk_enter(&fprobe_ip_table, &iter);
do {
rhashtable_walk_start(&iter);
while ((node = rhashtable_walk_next(&iter)) && !IS_ERR(node))
- fprobe_remove_node_in_module(mod, node, &alist);
+ if (fprobe_remove_node_in_module(mod, node, &alist) < 0) {
+ retry = true;
+ break;
+ }
rhashtable_walk_stop(&iter);
- } while (node == ERR_PTR(-EAGAIN));
+ } while (node == ERR_PTR(-EAGAIN) && !retry);
rhashtable_walk_exit(&iter);
+ /* Remove any ips from hash table(s) */
+ if (alist.index > 0) {
+ fprobe_remove_ips(alist.addrs, alist.index);
+ /*
+ * If we break rhashtable walk loop except for -EAGAIN, we need
+ * to restart looping from start for safety. Anyway, this is
+ * not a hotpath.
+ */
+ if (retry)
+ goto again;
+ }
- if (alist.index > 0)
- fprobe_set_ips(alist.addrs, alist.index, 1, 0);
mutex_unlock(&fprobe_mutex);
kfree(alist.addrs);
^ permalink raw reply related [flat|nested] 8+ messages in thread* [PATCH v6 4/5] tracing/fprobe: Check the same type fprobe on table as the unregistered one
2026-04-14 9:14 [PATCH v6 0/5] tracing/fprobe: Fix fprobe_ip_table related bugs Masami Hiramatsu (Google)
` (2 preceding siblings ...)
2026-04-14 9:15 ` [PATCH v6 3/5] tracing/fprobe: Avoid kcalloc() in rcu_read_lock section Masami Hiramatsu (Google)
@ 2026-04-14 9:15 ` Masami Hiramatsu (Google)
2026-04-14 9:15 ` [PATCH v6 5/5] tracing/fprobe: Fix to unregister ftrace_ops if it is empty on module unloading Masami Hiramatsu (Google)
4 siblings, 0 replies; 8+ messages in thread
From: Masami Hiramatsu (Google) @ 2026-04-14 9:15 UTC (permalink / raw)
To: Steven Rostedt, Masami Hiramatsu
Cc: Menglong Dong, Mathieu Desnoyers, jiang.biao, linux-kernel,
linux-trace-kernel
From: Masami Hiramatsu (Google) <mhiramat@kernel.org>
Commit 2c67dc457bc6 ("tracing: fprobe: optimization for entry only case")
introduced a different ftrace_ops for entry-only fprobes.
However, when unregistering an fprobe, the kernel only checks if another
fprobe exists at the same address, without checking which type of fprobe
it is.
If different fprobes are registered at the same address, the same address
will be registered in both fgraph_ops and ftrace_ops, but only one of
them will be deleted when unregistering. (the one removed first will not
be deleted from the ops).
This results in junk entries remaining in either fgraph_ops or ftrace_ops.
For example:
=======
cd /sys/kernel/tracing
# 'Add entry and exit events on the same place'
echo 'f:event1 vfs_read' >> dynamic_events
echo 'f:event2 vfs_read%return' >> dynamic_events
# 'Enable both of them'
echo 1 > events/fprobes/enable
cat enabled_functions
vfs_read (2) ->arch_ftrace_ops_list_func+0x0/0x210
# 'Disable and remove exit event'
echo 0 > events/fprobes/event2/enable
echo -:event2 >> dynamic_events
# 'Disable and remove all events'
echo 0 > events/fprobes/enable
echo > dynamic_events
# 'Add another event'
echo 'f:event3 vfs_open%return' > dynamic_events
cat dynamic_events
f:fprobes/event3 vfs_open%return
echo 1 > events/fprobes/enable
cat enabled_functions
vfs_open (1) tramp: 0xffffffffa0001000 (ftrace_graph_func+0x0/0x60) ->ftrace_graph_func+0x0/0x60 subops: {ent:fprobe_fgraph_entry+0x0/0x620 ret:fprobe_return+0x0/0x150}
vfs_read (1) tramp: 0xffffffffa0001000 (ftrace_graph_func+0x0/0x60) ->ftrace_graph_func+0x0/0x60 subops: {ent:fprobe_fgraph_entry+0x0/0x620 ret:fprobe_return+0x0/0x150}
=======
As you can see, an entry for the vfs_read remains.
To fix this issue, when unregistering, the kernel should also check if
there is the same type of fprobes still exist at the same address, and
if not, delete its entry from either fgraph_ops or ftrace_ops.
Fixes: 2c67dc457bc6 ("tracing: fprobe: optimization for entry only case")
Cc: stable@vger.kernel.org
Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
---
kernel/trace/fprobe.c | 85 +++++++++++++++++++++++++++++++++++++------------
1 file changed, 65 insertions(+), 20 deletions(-)
diff --git a/kernel/trace/fprobe.c b/kernel/trace/fprobe.c
index 2059d8d83b4c..1767c2b0884c 100644
--- a/kernel/trace/fprobe.c
+++ b/kernel/trace/fprobe.c
@@ -92,11 +92,8 @@ static int insert_fprobe_node(struct fprobe_hlist_node *node, struct fprobe *fp)
return ret;
}
-/* Return true if there are synonims */
-static bool delete_fprobe_node(struct fprobe_hlist_node *node)
+static void delete_fprobe_node(struct fprobe_hlist_node *node)
{
- bool ret;
-
lockdep_assert_held(&fprobe_mutex);
/* Avoid double deleting and non-inserted nodes */
@@ -105,13 +102,6 @@ static bool delete_fprobe_node(struct fprobe_hlist_node *node)
rhltable_remove(&fprobe_ip_table, &node->hlist,
fprobe_rht_params);
}
-
- rcu_read_lock();
- ret = !!rhltable_lookup(&fprobe_ip_table, &node->addr,
- fprobe_rht_params);
- rcu_read_unlock();
-
- return ret;
}
/* Check existence of the fprobe */
@@ -342,6 +332,32 @@ static bool fprobe_is_ftrace(struct fprobe *fp)
return !fp->exit_handler;
}
+static bool fprobe_exists_on_hash(unsigned long ip, bool ftrace)
+{
+ struct rhlist_head *head, *pos;
+ struct fprobe_hlist_node *node;
+ struct fprobe *fp;
+
+ guard(rcu)();
+ head = rhltable_lookup(&fprobe_ip_table, &ip,
+ fprobe_rht_params);
+ if (!head)
+ return false;
+ /* We have to check the same type on the list. */
+ rhl_for_each_entry_rcu(node, pos, head, hlist) {
+ if (node->addr != ip)
+ break;
+ fp = READ_ONCE(node->fp);
+ if (likely(fp)) {
+ if ((!ftrace && fp->exit_handler) ||
+ (ftrace && !fp->exit_handler))
+ return true;
+ }
+ }
+
+ return false;
+}
+
#ifdef CONFIG_MODULES
static void fprobe_remove_ips(unsigned long *ips, unsigned int cnt)
{
@@ -364,6 +380,29 @@ static bool fprobe_is_ftrace(struct fprobe *fp)
return false;
}
+static bool fprobe_exists_on_hash(unsigned long ip, bool ftrace __maybe_unused)
+{
+ struct rhlist_head *head, *pos;
+ struct fprobe_hlist_node *node;
+ struct fprobe *fp;
+
+ guard(rcu)();
+ head = rhltable_lookup(&fprobe_ip_table, &ip,
+ fprobe_rht_params);
+ if (!head)
+ return false;
+ /* We only need to check fp is there. */
+ rhl_for_each_entry_rcu(node, pos, head, hlist) {
+ if (node->addr != ip)
+ break;
+ fp = READ_ONCE(node->fp);
+ if (likely(fp))
+ return true;
+ }
+
+ return false;
+}
+
#ifdef CONFIG_MODULES
static void fprobe_remove_ips(unsigned long *ips, unsigned int cnt)
{
@@ -552,18 +591,25 @@ struct fprobe_addr_list {
static int fprobe_remove_node_in_module(struct module *mod, struct fprobe_hlist_node *node,
struct fprobe_addr_list *alist)
{
+ lockdep_assert_in_rcu_read_lock();
+
if (!within_module(node->addr, mod))
return 0;
- if (delete_fprobe_node(node))
- return 0;
+ delete_fprobe_node(node);
/* If no address list is available, we can't track this address. */
if (!alist->addrs)
return 0;
+ /*
+ * Don't care the type here, because all fprobes on the same
+ * address must be removed eventually.
+ */
+ if (!rhltable_lookup(&fprobe_ip_table, &node->addr, fprobe_rht_params)) {
+ alist->addrs[alist->index++] = node->addr;
+ if (alist->index == alist->size)
+ return -ENOSPC;
+ }
- alist->addrs[alist->index++] = node->addr;
- if (alist->index == alist->size)
- return -ENOSPC;
return 0;
}
@@ -934,10 +980,9 @@ static int unregister_fprobe_nolock(struct fprobe *fp, bool force)
/* Remove non-synonim ips from table and hash */
count = 0;
for (i = 0; i < hlist_array->size; i++) {
- if (delete_fprobe_node(&hlist_array->array[i]))
- continue;
-
- if (addrs)
+ delete_fprobe_node(&hlist_array->array[i]);
+ if (addrs && !fprobe_exists_on_hash(hlist_array->array[i].addr,
+ fprobe_is_ftrace(fp)))
addrs[count++] = hlist_array->array[i].addr;
}
del_fprobe_hash(fp);
^ permalink raw reply related [flat|nested] 8+ messages in thread* [PATCH v6 5/5] tracing/fprobe: Fix to unregister ftrace_ops if it is empty on module unloading
2026-04-14 9:14 [PATCH v6 0/5] tracing/fprobe: Fix fprobe_ip_table related bugs Masami Hiramatsu (Google)
` (3 preceding siblings ...)
2026-04-14 9:15 ` [PATCH v6 4/5] tracing/fprobe: Check the same type fprobe on table as the unregistered one Masami Hiramatsu (Google)
@ 2026-04-14 9:15 ` Masami Hiramatsu (Google)
4 siblings, 0 replies; 8+ messages in thread
From: Masami Hiramatsu (Google) @ 2026-04-14 9:15 UTC (permalink / raw)
To: Steven Rostedt, Masami Hiramatsu
Cc: Menglong Dong, Mathieu Desnoyers, jiang.biao, linux-kernel,
linux-trace-kernel
From: Masami Hiramatsu (Google) <mhiramat@kernel.org>
Fix fprobe to unregister ftrace_ops if corresponding type of fprobe
does not exist on the fprobe_ip_table and it is expected to be empty
when unloading modules.
Since ftrace thinks that the empty hash means everything to be traced,
if we set fprobes only on the unloaded module, all functions are traced
unexpectedly after unloading module.
e.g.
# modprobe xt_LOG.ko
# echo 'f:test log_tg*' > dynamic_events
# echo 1 > events/fprobes/test/enable
# cat enabled_functions
log_tg [xt_LOG] (1) tramp: 0xffffffffa0004000 (fprobe_ftrace_entry+0x0/0x490) ->fprobe_ftrace_entry+0x0/0x490
log_tg_check [xt_LOG] (1) tramp: 0xffffffffa0004000 (fprobe_ftrace_entry+0x0/0x490) ->fprobe_ftrace_entry+0x0/0x490
log_tg_destroy [xt_LOG] (1) tramp: 0xffffffffa0004000 (fprobe_ftrace_entry+0x0/0x490) ->fprobe_ftrace_entry+0x0/0x490
# rmmod xt_LOG
# wc -l enabled_functions
34085 enabled_functions
Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
---
Changes in v6:
- Newly added.
---
kernel/trace/fprobe.c | 191 +++++++++++++++++++++++++++++++++++++------------
1 file changed, 143 insertions(+), 48 deletions(-)
diff --git a/kernel/trace/fprobe.c b/kernel/trace/fprobe.c
index 1767c2b0884c..5cbe7deb855a 100644
--- a/kernel/trace/fprobe.c
+++ b/kernel/trace/fprobe.c
@@ -79,7 +79,7 @@ static const struct rhashtable_params fprobe_rht_params = {
};
/* Node insertion and deletion requires the fprobe_mutex */
-static int insert_fprobe_node(struct fprobe_hlist_node *node, struct fprobe *fp)
+static int __insert_fprobe_node(struct fprobe_hlist_node *node, struct fprobe *fp)
{
int ret;
@@ -92,7 +92,7 @@ static int insert_fprobe_node(struct fprobe_hlist_node *node, struct fprobe *fp)
return ret;
}
-static void delete_fprobe_node(struct fprobe_hlist_node *node)
+static void __delete_fprobe_node(struct fprobe_hlist_node *node)
{
lockdep_assert_held(&fprobe_mutex);
@@ -250,7 +250,63 @@ static inline int __fprobe_kprobe_handler(unsigned long ip, unsigned long parent
return ret;
}
+static int fprobe_fgraph_entry(struct ftrace_graph_ent *trace, struct fgraph_ops *gops,
+ struct ftrace_regs *fregs);
+static void fprobe_return(struct ftrace_graph_ret *trace,
+ struct fgraph_ops *gops,
+ struct ftrace_regs *fregs);
+
+static struct fgraph_ops fprobe_graph_ops = {
+ .entryfunc = fprobe_fgraph_entry,
+ .retfunc = fprobe_return,
+};
+static int fprobe_graph_active;
+/* Number of fgraph fprobes */
+static int nr_fgraph_fprobes;
+
+/* Add @addrs to the ftrace filter and register fgraph if needed. */
+static int fprobe_graph_add_ips(unsigned long *addrs, int num)
+{
+ int ret;
+
+ lockdep_assert_held(&fprobe_mutex);
+
+ ret = ftrace_set_filter_ips(&fprobe_graph_ops.ops, addrs, num, 0, 0);
+ if (ret)
+ return ret;
+
+ if (!fprobe_graph_active) {
+ ret = register_ftrace_graph(&fprobe_graph_ops);
+ if (WARN_ON_ONCE(ret)) {
+ ftrace_free_filter(&fprobe_graph_ops.ops);
+ return ret;
+ }
+ }
+ fprobe_graph_active++;
+ return 0;
+}
+
+/* Remove @addrs from the ftrace filter and unregister fgraph if possible. */
+static void fprobe_graph_remove_ips(unsigned long *addrs, int num)
+{
+ lockdep_assert_held(&fprobe_mutex);
+
+ if (!fprobe_graph_active)
+ return;
+ fprobe_graph_active--;
+ if (!fprobe_graph_active) {
+ unregister_ftrace_graph(&fprobe_graph_ops);
+ ftrace_free_filter(&fprobe_graph_ops.ops);
+ }
+
+ if (num)
+ ftrace_set_filter_ips(&fprobe_graph_ops.ops, addrs, num, 1, 0);
+}
+
#if defined(CONFIG_DYNAMIC_FTRACE_WITH_ARGS) || defined(CONFIG_DYNAMIC_FTRACE_WITH_REGS)
+/* Number of ftrace fprobes */
+static int nr_ftrace_fprobes;
+
/* ftrace_ops callback, this processes fprobes which have only entry_handler. */
static void fprobe_ftrace_entry(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *ops, struct ftrace_regs *fregs)
@@ -320,9 +376,14 @@ static void fprobe_ftrace_remove_ips(unsigned long *addrs, int num)
{
lockdep_assert_held(&fprobe_mutex);
- fprobe_ftrace_active--;
if (!fprobe_ftrace_active)
+ return;
+
+ fprobe_ftrace_active--;
+ if (!fprobe_ftrace_active) {
unregister_ftrace_function(&fprobe_ftrace_ops);
+ ftrace_free_filter(&fprobe_ftrace_ops);
+ }
if (num)
ftrace_set_filter_ips(&fprobe_ftrace_ops, addrs, num, 1, 0);
}
@@ -332,6 +393,40 @@ static bool fprobe_is_ftrace(struct fprobe *fp)
return !fp->exit_handler;
}
+/* Node insertion and deletion requires the fprobe_mutex */
+static int insert_fprobe_node(struct fprobe_hlist_node *node, struct fprobe *fp)
+{
+ int ret;
+
+ lockdep_assert_held(&fprobe_mutex);
+
+ ret = __insert_fprobe_node(node, fp);
+ if (!ret) {
+ if (fprobe_is_ftrace(fp))
+ nr_ftrace_fprobes++;
+ else
+ nr_fgraph_fprobes++;
+ }
+
+ return ret;
+}
+
+static void delete_fprobe_node(struct fprobe_hlist_node *node)
+{
+ struct fprobe *fp;
+
+ lockdep_assert_held(&fprobe_mutex);
+
+ fp = READ_ONCE(node->fp);
+ if (fp) {
+ if (fprobe_is_ftrace(fp))
+ nr_ftrace_fprobes--;
+ else
+ nr_fgraph_fprobes--;
+ }
+ __delete_fprobe_node(node);
+}
+
static bool fprobe_exists_on_hash(unsigned long ip, bool ftrace)
{
struct rhlist_head *head, *pos;
@@ -361,8 +456,19 @@ static bool fprobe_exists_on_hash(unsigned long ip, bool ftrace)
#ifdef CONFIG_MODULES
static void fprobe_remove_ips(unsigned long *ips, unsigned int cnt)
{
- ftrace_set_filter_ips(&fprobe_graph_ops.ops, ips, cnt, 1, 0);
- ftrace_set_filter_ips(&fprobe_ftrace_ops, ips, cnt, 1, 0);
+ if (!nr_fgraph_fprobes && fprobe_graph_active) {
+ unregister_ftrace_graph(&fprobe_graph_ops);
+ ftrace_free_filter(&fprobe_graph_ops.ops);
+ fprobe_graph_active = 0;
+ } else
+ ftrace_set_filter_ips(&fprobe_graph_ops.ops, ips, cnt, 1, 0);
+
+ if (!nr_ftrace_fprobes && fprobe_ftrace_active) {
+ unregister_ftrace_function(&fprobe_ftrace_ops);
+ ftrace_free_filter(&fprobe_ftrace_ops);
+ fprobe_ftrace_active = 0;
+ } else
+ ftrace_set_filter_ips(&fprobe_ftrace_ops, ips, cnt, 1, 0);
}
#endif
#else
@@ -380,6 +486,32 @@ static bool fprobe_is_ftrace(struct fprobe *fp)
return false;
}
+/* Node insertion and deletion requires the fprobe_mutex */
+static int insert_fprobe_node(struct fprobe_hlist_node *node, struct fprobe *fp)
+{
+ int ret;
+
+ lockdep_assert_held(&fprobe_mutex);
+
+ ret = __insert_fprobe_node(node, fp);
+ if (!ret)
+ nr_fgraph_fprobes++;
+
+ return ret;
+}
+
+static void delete_fprobe_node(struct fprobe_hlist_node *node)
+{
+ struct fprobe *fp;
+
+ lockdep_assert_held(&fprobe_mutex);
+
+ fp = READ_ONCE(node->fp);
+ if (fp)
+ nr_fgraph_fprobes--;
+ __delete_fprobe_node(node);
+}
+
static bool fprobe_exists_on_hash(unsigned long ip, bool ftrace __maybe_unused)
{
struct rhlist_head *head, *pos;
@@ -406,7 +538,12 @@ static bool fprobe_exists_on_hash(unsigned long ip, bool ftrace __maybe_unused)
#ifdef CONFIG_MODULES
static void fprobe_remove_ips(unsigned long *ips, unsigned int cnt)
{
- ftrace_set_filter_ips(&fprobe_graph_ops.ops, ips, cnt, 1, 0);
+ if (!nr_fgraph_fprobes && fprobe_graph_active) {
+ unregister_ftrace_graph(&fprobe_graph_ops);
+ ftrace_free_filter(&fprobe_graph_ops.ops);
+ fprobe_graph_active = 0;
+ } else
+ ftrace_set_filter_ips(&fprobe_graph_ops.ops, ips, cnt, 1, 0);
}
#endif
#endif /* !CONFIG_DYNAMIC_FTRACE_WITH_ARGS && !CONFIG_DYNAMIC_FTRACE_WITH_REGS */
@@ -536,48 +673,6 @@ static void fprobe_return(struct ftrace_graph_ret *trace,
}
NOKPROBE_SYMBOL(fprobe_return);
-static struct fgraph_ops fprobe_graph_ops = {
- .entryfunc = fprobe_fgraph_entry,
- .retfunc = fprobe_return,
-};
-static int fprobe_graph_active;
-
-/* Add @addrs to the ftrace filter and register fgraph if needed. */
-static int fprobe_graph_add_ips(unsigned long *addrs, int num)
-{
- int ret;
-
- lockdep_assert_held(&fprobe_mutex);
-
- ret = ftrace_set_filter_ips(&fprobe_graph_ops.ops, addrs, num, 0, 0);
- if (ret)
- return ret;
-
- if (!fprobe_graph_active) {
- ret = register_ftrace_graph(&fprobe_graph_ops);
- if (WARN_ON_ONCE(ret)) {
- ftrace_free_filter(&fprobe_graph_ops.ops);
- return ret;
- }
- }
- fprobe_graph_active++;
- return 0;
-}
-
-/* Remove @addrs from the ftrace filter and unregister fgraph if possible. */
-static void fprobe_graph_remove_ips(unsigned long *addrs, int num)
-{
- lockdep_assert_held(&fprobe_mutex);
-
- fprobe_graph_active--;
- /* Q: should we unregister it ? */
- if (!fprobe_graph_active)
- unregister_ftrace_graph(&fprobe_graph_ops);
-
- if (num)
- ftrace_set_filter_ips(&fprobe_graph_ops.ops, addrs, num, 1, 0);
-}
-
#ifdef CONFIG_MODULES
#define FPROBE_IPS_BATCH_INIT 128
^ permalink raw reply related [flat|nested] 8+ messages in thread