From: Steven Rostedt <rostedt@goodmis.org>
To: "Masami Hiramatsu (Google)" <mhiramat@kernel.org>
Cc: linux-kernel@vger.kernel.org, linux-trace-kernel@vger.kernel.org,
Mark Rutland <mark.rutland@arm.com>,
Mathieu Desnoyers <mathieu.desnoyers@efficios.com>,
Andrew Morton <akpm@linux-foundation.org>,
Alexei Starovoitov <alexei.starovoitov@gmail.com>,
Florent Revest <revest@chromium.org>,
Martin KaFai Lau <martin.lau@linux.dev>,
bpf <bpf@vger.kernel.org>, Sven Schnelle <svens@linux.ibm.com>,
Alexei Starovoitov <ast@kernel.org>, Jiri Olsa <jolsa@kernel.org>,
Arnaldo Carvalho de Melo <acme@kernel.org>,
Daniel Borkmann <daniel@iogearbox.net>,
Alan Maguire <alan.maguire@oracle.com>,
Peter Zijlstra <peterz@infradead.org>,
Thomas Gleixner <tglx@linutronix.de>, Guo Ren <guoren@kernel.org>
Subject: Re: [PATCH 10/20] function_graph: Have the instances use their own ftrace_ops for filtering
Date: Fri, 31 May 2024 18:49:10 -0400 [thread overview]
Message-ID: <20240531184910.799635e8@rorschach.local.home> (raw)
In-Reply-To: <20240531235023.a0b2b207362eba2f8b5c16f7@kernel.org>
On Fri, 31 May 2024 23:50:23 +0900
Masami Hiramatsu (Google) <mhiramat@kernel.org> wrote:
> So is it similar to the fprobe/kprobe, use shared signle ftrace_ops,
> but keep each fgraph has own hash table?
Sort of.
I created helper functions in ftrace that lets you have a "manager
ftrace_ops" that will be used to assign to ftrace (with the function
that will demultiplex), and then you can have "subops" that can be
assigned that is per user. Here's a glimpse of the code:
/**
* ftrace_startup_subops - enable tracing for subops of an ops
* @ops: Manager ops (used to pick all the functions of its subops)
* @subops: A new ops to add to @ops
* @command: Extra commands to use to enable tracing
*
* The @ops is a manager @ops that has the filter that includes all the functions
* that its list of subops are tracing. Adding a new @subops will add the
* functions of @subops to @ops.
*/
int ftrace_startup_subops(struct ftrace_ops *ops, struct ftrace_ops *subops, int command)
{
struct ftrace_hash *filter_hash;
struct ftrace_hash *notrace_hash;
struct ftrace_hash *save_filter_hash;
struct ftrace_hash *save_notrace_hash;
int size_bits;
int ret;
if (unlikely(ftrace_disabled))
return -ENODEV;
ftrace_ops_init(ops);
ftrace_ops_init(subops);
/* Make everything canonical (Just in case!) */
if (!ops->func_hash->filter_hash)
ops->func_hash->filter_hash = EMPTY_HASH;
if (!ops->func_hash->notrace_hash)
ops->func_hash->notrace_hash = EMPTY_HASH;
if (!subops->func_hash->filter_hash)
subops->func_hash->filter_hash = EMPTY_HASH;
if (!subops->func_hash->notrace_hash)
subops->func_hash->notrace_hash = EMPTY_HASH;
/* For the first subops to ops just enable it normally */
if (list_empty(&ops->subop_list)) {
/* Just use the subops hashes */
filter_hash = copy_hash(subops->func_hash->filter_hash);
notrace_hash = copy_hash(subops->func_hash->notrace_hash);
if (!filter_hash || !notrace_hash) {
free_ftrace_hash(filter_hash);
free_ftrace_hash(notrace_hash);
return -ENOMEM;
}
save_filter_hash = ops->func_hash->filter_hash;
save_notrace_hash = ops->func_hash->notrace_hash;
ops->func_hash->filter_hash = filter_hash;
ops->func_hash->notrace_hash = notrace_hash;
list_add(&subops->list, &ops->subop_list);
ret = ftrace_startup(ops, command);
if (ret < 0) {
list_del(&subops->list);
ops->func_hash->filter_hash = save_filter_hash;
ops->func_hash->notrace_hash = save_notrace_hash;
free_ftrace_hash(filter_hash);
free_ftrace_hash(notrace_hash);
} else {
free_ftrace_hash(save_filter_hash);
free_ftrace_hash(save_notrace_hash);
subops->flags |= FTRACE_OPS_FL_ENABLED;
}
return ret;
}
/*
* Here there's already something attached. Here are the rules:
* o If either filter_hash is empty then the final stays empty
* o Otherwise, the final is a superset of both hashes
* o If either notrace_hash is empty then the final stays empty
* o Otherwise, the final is an intersection between the hashes
*/
if (ops->func_hash->filter_hash == EMPTY_HASH ||
subops->func_hash->filter_hash == EMPTY_HASH) {
filter_hash = EMPTY_HASH;
} else {
size_bits = max(ops->func_hash->filter_hash->size_bits,
subops->func_hash->filter_hash->size_bits);
filter_hash = alloc_and_copy_ftrace_hash(size_bits, ops->func_hash->filter_hash);
if (!filter_hash)
return -ENOMEM;
ret = append_hash(&filter_hash, subops->func_hash->filter_hash);
if (ret < 0) {
free_ftrace_hash(filter_hash);
return ret;
}
}
if (ops->func_hash->notrace_hash == EMPTY_HASH ||
subops->func_hash->notrace_hash == EMPTY_HASH) {
notrace_hash = EMPTY_HASH;
} else {
size_bits = max(ops->func_hash->filter_hash->size_bits,
subops->func_hash->filter_hash->size_bits);
notrace_hash = alloc_ftrace_hash(size_bits);
if (!notrace_hash) {
free_ftrace_hash(filter_hash);
return -ENOMEM;
}
ret = intersect_hash(¬race_hash, ops->func_hash->filter_hash,
subops->func_hash->filter_hash);
if (ret < 0) {
free_ftrace_hash(filter_hash);
free_ftrace_hash(notrace_hash);
return ret;
}
}
list_add(&subops->list, &ops->subop_list);
ret = ftrace_update_ops(ops, filter_hash, notrace_hash);
free_ftrace_hash(filter_hash);
free_ftrace_hash(notrace_hash);
if (ret < 0)
list_del(&subops->list);
return ret;
}
/**
* ftrace_shutdown_subops - Remove a subops from a manager ops
* @ops: A manager ops to remove @subops from
* @subops: The subops to remove from @ops
* @command: Any extra command flags to add to modifying the text
*
* Removes the functions being traced by the @subops from @ops. Note, it
* will not affect functions that are being traced by other subops that
* still exist in @ops.
*
* If the last subops is removed from @ops, then @ops is shutdown normally.
*/
int ftrace_shutdown_subops(struct ftrace_ops *ops, struct ftrace_ops *subops, int command)
{
struct ftrace_hash *filter_hash;
struct ftrace_hash *notrace_hash;
int ret;
if (unlikely(ftrace_disabled))
return -ENODEV;
list_del(&subops->list);
if (list_empty(&ops->subop_list)) {
/* Last one, just disable the current ops */
ret = ftrace_shutdown(ops, command);
if (ret < 0) {
list_add(&subops->list, &ops->subop_list);
return ret;
}
subops->flags &= ~FTRACE_OPS_FL_ENABLED;
free_ftrace_hash(ops->func_hash->filter_hash);
free_ftrace_hash(ops->func_hash->notrace_hash);
ops->func_hash->filter_hash = EMPTY_HASH;
ops->func_hash->notrace_hash = EMPTY_HASH;
return 0;
}
/* Rebuild the hashes without subops */
filter_hash = append_hashes(ops);
notrace_hash = intersect_hashes(ops);
if (!filter_hash || !notrace_hash) {
free_ftrace_hash(filter_hash);
free_ftrace_hash(notrace_hash);
list_add(&subops->list, &ops->subop_list);
return -ENOMEM;
}
ret = ftrace_update_ops(ops, filter_hash, notrace_hash);
if (ret < 0)
list_add(&subops->list, &ops->subop_list);
free_ftrace_hash(filter_hash);
free_ftrace_hash(notrace_hash);
return ret;
}
>
> > This removes the need to touch the architecture code. It can also be
> > used by fprobes to handle the attachments to functions for several
> > different sets of callbacks.
> >
> > I'll send out patches soon.
>
> OK, I'll wait for that.
I'm just cleaning it up. I'll post it tomorrow (your today).
-- Steve
next prev parent reply other threads:[~2024-05-31 22:49 UTC|newest]
Thread overview: 37+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-05-25 2:36 [PATCH 00/20] function_graph: Allow multiple users for function graph tracing Steven Rostedt
2024-05-25 2:36 ` [PATCH 01/20] function_graph: Convert ret_stack to a series of longs Steven Rostedt
2024-05-25 2:36 ` [PATCH 02/20] fgraph: Use BUILD_BUG_ON() to make sure we have structures divisible by long Steven Rostedt
2024-05-25 2:36 ` [PATCH 03/20] function_graph: Add an array structure that will allow multiple callbacks Steven Rostedt
2024-05-25 2:36 ` [PATCH 04/20] function_graph: Allow multiple users to attach to function graph Steven Rostedt
2024-05-27 0:34 ` Masami Hiramatsu
2024-05-27 1:17 ` Steven Rostedt
2024-05-25 2:36 ` [PATCH 05/20] function_graph: Handle tail calls for stack unwinding Steven Rostedt
2024-05-25 2:36 ` [PATCH 06/20] function_graph: Remove logic around ftrace_graph_entry and return Steven Rostedt
2024-05-25 2:36 ` [PATCH 07/20] ftrace/function_graph: Pass fgraph_ops to function graph callbacks Steven Rostedt
2024-05-25 2:37 ` [PATCH 08/20] ftrace: Allow function_graph tracer to be enabled in instances Steven Rostedt
2024-05-25 2:37 ` [PATCH 09/20] ftrace: Allow ftrace startup flags to exist without dynamic ftrace Steven Rostedt
2024-05-25 2:37 ` [PATCH 10/20] function_graph: Have the instances use their own ftrace_ops for filtering Steven Rostedt
2024-05-31 2:30 ` Steven Rostedt
2024-05-31 3:12 ` Masami Hiramatsu
2024-05-31 6:03 ` Steven Rostedt
2024-05-31 14:50 ` Masami Hiramatsu
2024-05-31 22:49 ` Steven Rostedt [this message]
2024-06-01 19:19 ` Steven Rostedt
2024-06-02 2:40 ` Masami Hiramatsu
2024-05-25 2:37 ` [PATCH 11/20] function_graph: Use a simple LRU for fgraph_array index number Steven Rostedt
2024-05-25 2:37 ` [PATCH 12/20] function_graph: Add "task variables" per task for fgraph_ops Steven Rostedt
2024-05-25 2:37 ` [PATCH 13/20] function_graph: Move set_graph_function tests to shadow stack global var Steven Rostedt
2024-05-25 2:37 ` [PATCH 14/20] function_graph: Move graph depth stored data " Steven Rostedt
2024-05-25 2:37 ` [PATCH 15/20] function_graph: Move graph notrace bit " Steven Rostedt
2024-05-25 2:37 ` [PATCH 16/20] function_graph: Implement fgraph_reserve_data() and fgraph_retrieve_data() Steven Rostedt
2024-05-25 2:37 ` [PATCH 17/20] function_graph: Add selftest for passing local variables Steven Rostedt
2024-05-25 2:37 ` [PATCH 18/20] ftrace: Add multiple fgraph storage selftest Steven Rostedt
2024-05-25 2:37 ` [PATCH 19/20] function_graph: Use for_each_set_bit() in __ftrace_return_to_handler() Steven Rostedt
2024-05-26 23:58 ` Masami Hiramatsu
2024-05-27 0:04 ` Masami Hiramatsu
2024-05-27 0:32 ` Steven Rostedt
2024-05-25 2:37 ` [PATCH 20/20] function_graph: Use bitmask to loop on fgraph entry Steven Rostedt
2024-05-27 0:09 ` Masami Hiramatsu
2024-05-27 0:33 ` Steven Rostedt
2024-05-27 0:37 ` [PATCH 00/20] function_graph: Allow multiple users for function graph tracing Masami Hiramatsu
2024-05-27 1:18 ` Steven Rostedt
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240531184910.799635e8@rorschach.local.home \
--to=rostedt@goodmis.org \
--cc=acme@kernel.org \
--cc=akpm@linux-foundation.org \
--cc=alan.maguire@oracle.com \
--cc=alexei.starovoitov@gmail.com \
--cc=ast@kernel.org \
--cc=bpf@vger.kernel.org \
--cc=daniel@iogearbox.net \
--cc=guoren@kernel.org \
--cc=jolsa@kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-trace-kernel@vger.kernel.org \
--cc=mark.rutland@arm.com \
--cc=martin.lau@linux.dev \
--cc=mathieu.desnoyers@efficios.com \
--cc=mhiramat@kernel.org \
--cc=peterz@infradead.org \
--cc=revest@chromium.org \
--cc=svens@linux.ibm.com \
--cc=tglx@linutronix.de \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).