From: Steven Rostedt <rostedt@goodmis.org>
To: linux-kernel@vger.kernel.org, linux-trace-kernel@vger.kernel.org
Cc: Masami Hiramatsu <mhiramat@kernel.org>,
Mark Rutland <mark.rutland@arm.com>,
Mathieu Desnoyers <mathieu.desnoyers@efficios.com>,
Andrew Morton <akpm@linux-foundation.org>,
Alexei Starovoitov <alexei.starovoitov@gmail.com>,
Florent Revest <revest@chromium.org>,
Martin KaFai Lau <martin.lau@linux.dev>,
bpf <bpf@vger.kernel.org>, Sven Schnelle <svens@linux.ibm.com>,
Alexei Starovoitov <ast@kernel.org>, Jiri Olsa <jolsa@kernel.org>,
Arnaldo Carvalho de Melo <acme@kernel.org>,
Daniel Borkmann <daniel@iogearbox.net>,
Alan Maguire <alan.maguire@oracle.com>,
Peter Zijlstra <peterz@infradead.org>,
Thomas Gleixner <tglx@linutronix.de>, Guo Ren <guoren@kernel.org>
Subject: [PATCH v2 11/27] ftrace: Allow subops filtering to be modified
Date: Sat, 01 Jun 2024 23:37:55 -0400 [thread overview]
Message-ID: <20240602033832.870736657@goodmis.org> (raw)
In-Reply-To: 20240602033744.563858532@goodmis.org
From: "Steven Rostedt (Google)" <rostedt@goodmis.org>
The subops filters use a "manager" ops to enable and disable its filters.
The manager ops can handle more than one subops, and its filter is what
controls what functions get set. Add a ftrace_hash_move_and_update_subops()
function that will update the manager ops when the subops filters change.
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
---
include/linux/ftrace.h | 3 ++
kernel/trace/ftrace.c | 96 ++++++++++++++++++++++++++++++++++++++----
2 files changed, 90 insertions(+), 9 deletions(-)
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 978a1d3b270a..63238a9a9270 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -227,6 +227,7 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
* ftrace_enabled.
* DIRECT - Used by the direct ftrace_ops helper for direct functions
* (internal ftrace only, should not be used by others)
+ * SUBOP - Is controlled by another op in field managed.
*/
enum {
FTRACE_OPS_FL_ENABLED = BIT(0),
@@ -247,6 +248,7 @@ enum {
FTRACE_OPS_FL_TRACE_ARRAY = BIT(15),
FTRACE_OPS_FL_PERMANENT = BIT(16),
FTRACE_OPS_FL_DIRECT = BIT(17),
+ FTRACE_OPS_FL_SUBOP = BIT(18),
};
#ifndef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
@@ -336,6 +338,7 @@ struct ftrace_ops {
struct list_head list;
struct list_head subop_list;
ftrace_ops_func_t ops_func;
+ struct ftrace_ops *managed;
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
unsigned long direct_call;
#endif
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 38fb2a634b04..e447b04c0c9c 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -3424,7 +3424,8 @@ int ftrace_startup_subops(struct ftrace_ops *ops, struct ftrace_ops *subops, int
} else {
free_ftrace_hash(save_filter_hash);
free_ftrace_hash(save_notrace_hash);
- subops->flags |= FTRACE_OPS_FL_ENABLED;
+ subops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_SUBOP;
+ subops->managed = ops;
}
return ret;
}
@@ -3478,11 +3479,12 @@ int ftrace_startup_subops(struct ftrace_ops *ops, struct ftrace_ops *subops, int
ret = ftrace_update_ops(ops, filter_hash, notrace_hash);
free_ftrace_hash(filter_hash);
free_ftrace_hash(notrace_hash);
- if (ret < 0)
+ if (ret < 0) {
list_del(&subops->list);
- else
- subops->flags |= FTRACE_OPS_FL_ENABLED;
-
+ } else {
+ subops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_SUBOP;
+ subops->managed = ops;
+ }
return ret;
}
@@ -3527,6 +3529,8 @@ int ftrace_shutdown_subops(struct ftrace_ops *ops, struct ftrace_ops *subops, in
free_ftrace_hash(ops->func_hash->notrace_hash);
ops->func_hash->filter_hash = EMPTY_HASH;
ops->func_hash->notrace_hash = EMPTY_HASH;
+ subops->flags &= ~(FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_SUBOP);
+ subops->managed = NULL;
return 0;
}
@@ -3542,16 +3546,65 @@ int ftrace_shutdown_subops(struct ftrace_ops *ops, struct ftrace_ops *subops, in
}
ret = ftrace_update_ops(ops, filter_hash, notrace_hash);
- if (ret < 0)
+ if (ret < 0) {
list_add(&subops->list, &ops->subop_list);
- else
- subops->flags &= ~FTRACE_OPS_FL_ENABLED;
-
+ } else {
+ subops->flags &= ~(FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_SUBOP);
+ subops->managed = NULL;
+ }
free_ftrace_hash(filter_hash);
free_ftrace_hash(notrace_hash);
return ret;
}
+static int ftrace_hash_move_and_update_subops(struct ftrace_ops *subops,
+ struct ftrace_hash **orig_subhash,
+ struct ftrace_hash *hash,
+ int enable)
+{
+ struct ftrace_ops *ops = subops->managed;
+ struct ftrace_hash **orig_hash;
+ struct ftrace_hash *save_hash;
+ struct ftrace_hash *new_hash;
+ int ret;
+
+ /* Manager ops can not be subops (yet) */
+ if (WARN_ON_ONCE(!ops || ops->flags & FTRACE_OPS_FL_SUBOP))
+ return -EINVAL;
+
+ /* Move the new hash over to the subops hash */
+ save_hash = *orig_subhash;
+ *orig_subhash = __ftrace_hash_move(hash);
+ if (!*orig_subhash) {
+ *orig_subhash = save_hash;
+ return -ENOMEM;
+ }
+
+ /* Create a new_hash to hold the ops new functions */
+ if (enable) {
+ orig_hash = &ops->func_hash->filter_hash;
+ new_hash = append_hashes(ops);
+ } else {
+ orig_hash = &ops->func_hash->notrace_hash;
+ new_hash = intersect_hashes(ops);
+ }
+
+ /* Move the hash over to the new hash */
+ ret = ftrace_hash_move_and_update_ops(ops, orig_hash, new_hash, enable);
+
+ free_ftrace_hash(new_hash);
+
+ if (ret) {
+ /* Put back the original hash */
+ free_ftrace_hash_rcu(*orig_subhash);
+ *orig_subhash = save_hash;
+ } else {
+ free_ftrace_hash_rcu(save_hash);
+ }
+ return ret;
+}
+
+
static u64 ftrace_update_time;
unsigned long ftrace_update_tot_cnt;
unsigned long ftrace_number_of_pages;
@@ -4770,8 +4823,33 @@ static int ftrace_hash_move_and_update_ops(struct ftrace_ops *ops,
{
struct ftrace_ops_hash old_hash_ops;
struct ftrace_hash *old_hash;
+ struct ftrace_ops *op;
int ret;
+ if (ops->flags & FTRACE_OPS_FL_SUBOP)
+ return ftrace_hash_move_and_update_subops(ops, orig_hash, hash, enable);
+
+ /*
+ * If this ops is not enabled, it could be sharing its filters
+ * with a subop. If that's the case, update the subop instead of
+ * this ops. Shared filters are only allowed to have one ops set
+ * at a time, and if we update the ops that is not enabled,
+ * it will not affect subops that share it.
+ */
+ if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) {
+ /* Check if any other manager subops maps to this hash */
+ do_for_each_ftrace_op(op, ftrace_ops_list) {
+ struct ftrace_ops *subops;
+
+ list_for_each_entry(subops, &op->subop_list, list) {
+ if ((subops->flags & FTRACE_OPS_FL_ENABLED) &&
+ subops->func_hash == ops->func_hash) {
+ return ftrace_hash_move_and_update_subops(subops, orig_hash, hash, enable);
+ }
+ }
+ } while_for_each_ftrace_op(op);
+ }
+
old_hash = *orig_hash;
old_hash_ops.filter_hash = ops->func_hash->filter_hash;
old_hash_ops.notrace_hash = ops->func_hash->notrace_hash;
--
2.43.0
next prev parent reply other threads:[~2024-06-02 3:37 UTC|newest]
Thread overview: 41+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-06-02 3:37 [PATCH v2 00/27] function_graph: Allow multiple users for function graph tracing Steven Rostedt
2024-06-02 3:37 ` [PATCH v2 01/27] function_graph: Convert ret_stack to a series of longs Steven Rostedt
2024-06-02 3:37 ` [PATCH v2 02/27] fgraph: Use BUILD_BUG_ON() to make sure we have structures divisible by long Steven Rostedt
2024-06-02 3:37 ` [PATCH v2 03/27] function_graph: Add an array structure that will allow multiple callbacks Steven Rostedt
2024-06-02 3:37 ` [PATCH v2 04/27] function_graph: Allow multiple users to attach to function graph Steven Rostedt
2024-06-02 3:37 ` [PATCH v2 05/27] function_graph: Handle tail calls for stack unwinding Steven Rostedt
2024-06-02 3:37 ` [PATCH v2 06/27] function_graph: Remove logic around ftrace_graph_entry and return Steven Rostedt
2024-06-02 3:37 ` [PATCH v2 07/27] ftrace/function_graph: Pass fgraph_ops to function graph callbacks Steven Rostedt
2024-06-02 3:37 ` [PATCH v2 08/27] ftrace: Allow function_graph tracer to be enabled in instances Steven Rostedt
2024-06-02 3:37 ` [PATCH v2 09/27] ftrace: Allow ftrace startup flags to exist without dynamic ftrace Steven Rostedt
2024-06-02 3:37 ` [PATCH v2 10/27] ftrace: Add subops logic to allow one ops to manage many Steven Rostedt
2024-06-03 1:33 ` Masami Hiramatsu
2024-06-03 2:06 ` Steven Rostedt
2024-06-03 2:46 ` Masami Hiramatsu
2024-06-03 14:54 ` Steven Rostedt
2024-06-03 17:05 ` Steven Rostedt
2024-06-02 3:37 ` Steven Rostedt [this message]
2024-06-03 2:37 ` [PATCH v2 11/27] ftrace: Allow subops filtering to be modified Masami Hiramatsu
2024-06-03 14:52 ` Steven Rostedt
2024-06-03 23:12 ` Masami Hiramatsu
2024-06-02 3:37 ` [PATCH v2 12/27] function_graph: Have the instances use their own ftrace_ops for filtering Steven Rostedt
2024-06-02 3:37 ` [PATCH v2 13/27] function_graph: Add pid tracing back to function graph tracer Steven Rostedt
2024-06-02 3:37 ` [PATCH v2 14/27] function_graph: Use a simple LRU for fgraph_array index number Steven Rostedt
2024-06-02 3:37 ` [PATCH v2 15/27] function_graph: Add "task variables" per task for fgraph_ops Steven Rostedt
2024-06-02 3:38 ` [PATCH v2 16/27] function_graph: Move set_graph_function tests to shadow stack global var Steven Rostedt
2024-06-02 3:38 ` [PATCH v2 17/27] function_graph: Move graph depth stored data " Steven Rostedt
2024-06-02 3:38 ` [PATCH v2 18/27] function_graph: Move graph notrace bit " Steven Rostedt
2024-06-02 3:38 ` [PATCH v2 19/27] function_graph: Implement fgraph_reserve_data() and fgraph_retrieve_data() Steven Rostedt
2024-06-02 3:38 ` [PATCH v2 20/27] function_graph: Add selftest for passing local variables Steven Rostedt
2024-06-02 3:38 ` [PATCH v2 21/27] ftrace: Add multiple fgraph storage selftest Steven Rostedt
2024-06-02 3:38 ` [PATCH v2 22/27] function_graph: Use for_each_set_bit() in __ftrace_return_to_handler() Steven Rostedt
2024-06-02 3:38 ` [PATCH v2 23/27] function_graph: Use bitmask to loop on fgraph entry Steven Rostedt
2024-06-02 3:38 ` [PATCH v2 24/27] function_graph: Use static_call and branch to optimize entry function Steven Rostedt
2024-06-03 3:11 ` Masami Hiramatsu
2024-06-03 15:00 ` Steven Rostedt
2024-06-03 15:07 ` Steven Rostedt
2024-06-03 23:08 ` Masami Hiramatsu
2024-06-02 3:38 ` [PATCH v2 25/27] function_graph: Use static_call and branch to optimize return function Steven Rostedt
2024-06-02 3:38 ` [PATCH v2 26/27] selftests/ftrace: Add function_graph tracer to func-filter-pid test Steven Rostedt
2024-06-02 3:38 ` [PATCH v2 27/27] selftests/ftrace: Add fgraph-multi.tc test Steven Rostedt
2024-06-02 3:44 ` [PATCH v2 00/27] function_graph: Allow multiple users for function graph tracing Steven Rostedt
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240602033832.870736657@goodmis.org \
--to=rostedt@goodmis.org \
--cc=acme@kernel.org \
--cc=akpm@linux-foundation.org \
--cc=alan.maguire@oracle.com \
--cc=alexei.starovoitov@gmail.com \
--cc=ast@kernel.org \
--cc=bpf@vger.kernel.org \
--cc=daniel@iogearbox.net \
--cc=guoren@kernel.org \
--cc=jolsa@kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-trace-kernel@vger.kernel.org \
--cc=mark.rutland@arm.com \
--cc=martin.lau@linux.dev \
--cc=mathieu.desnoyers@efficios.com \
--cc=mhiramat@kernel.org \
--cc=peterz@infradead.org \
--cc=revest@chromium.org \
--cc=svens@linux.ibm.com \
--cc=tglx@linutronix.de \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).