linux-trace-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/4] tracing: Make more function graph tracer options per-instance
@ 2025-11-14 19:22 Steven Rostedt
  2025-11-14 19:22 ` [PATCH 1/4] tracing: Have function graph tracer option funcgraph-irqs be per instance Steven Rostedt
                   ` (3 more replies)
  0 siblings, 4 replies; 5+ messages in thread
From: Steven Rostedt @ 2025-11-14 19:22 UTC (permalink / raw)
  To: linux-kernel, linux-trace-kernel
  Cc: Masami Hiramatsu, Mark Rutland, Mathieu Desnoyers, Andrew Morton


Convert the function graph tracer options sleep-time and funcgraph-irqs
into per instance options (as they currently are not consistent in
affecting all instances).

Also make graph-time a top level option only, as it only affects the
function profiler and not the function graph tracer itself.

Clean up the function graph tracer set_flags() to use a switch instead
of a bunch of if statements.

Steven Rostedt (4):
      tracing: Have function graph tracer option funcgraph-irqs be per instance
      tracing: Move graph-time out of function graph options
      tracing: Have function graph tracer option sleep-time be per instance
      tracing: Convert function graph set_flags() to use a switch() statement

----
 kernel/trace/fgraph.c                |  10 +--
 kernel/trace/ftrace.c                |   4 +-
 kernel/trace/trace.c                 |  14 +++--
 kernel/trace/trace.h                 |  18 ++++--
 kernel/trace/trace_functions_graph.c | 115 ++++++++++++++++++++++++++---------
 5 files changed, 115 insertions(+), 46 deletions(-)

^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH 1/4] tracing: Have function graph tracer option funcgraph-irqs be per instance
  2025-11-14 19:22 [PATCH 0/4] tracing: Make more function graph tracer options per-instance Steven Rostedt
@ 2025-11-14 19:22 ` Steven Rostedt
  2025-11-14 19:22 ` [PATCH 2/4] tracing: Move graph-time out of function graph options Steven Rostedt
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 5+ messages in thread
From: Steven Rostedt @ 2025-11-14 19:22 UTC (permalink / raw)
  To: linux-kernel, linux-trace-kernel
  Cc: Masami Hiramatsu, Mark Rutland, Mathieu Desnoyers, Andrew Morton

From: Steven Rostedt <rostedt@goodmis.org>

Currently the option to trace interrupts in the function graph tracer is
global when the interface is per-instance. Changing the value in one
instance will affect the results of another instance that is also running
the function graph tracer. This can lead to confusing results.

Fixes: c132be2c4fcc1 ("function_graph: Have the instances use their own ftrace_ops for filtering")
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
---
 kernel/trace/trace_functions_graph.c | 41 +++++++++++++++++++++-------
 1 file changed, 31 insertions(+), 10 deletions(-)

diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 4e86adf6dd4d..3f55b49cf64e 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -16,7 +16,7 @@
 #include "trace.h"
 #include "trace_output.h"
 
-/* When set, irq functions will be ignored */
+/* When set, irq functions might be ignored */
 static int ftrace_graph_skip_irqs;
 
 struct fgraph_cpu_data {
@@ -190,11 +190,14 @@ int __trace_graph_retaddr_entry(struct trace_array *tr,
 }
 #endif
 
-static inline int ftrace_graph_ignore_irqs(void)
+static inline int ftrace_graph_ignore_irqs(struct trace_array *tr)
 {
 	if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
 		return 0;
 
+	if (tracer_flags_is_set(tr, TRACE_GRAPH_PRINT_IRQS))
+		return 0;
+
 	return in_hardirq();
 }
 
@@ -238,7 +241,7 @@ static int graph_entry(struct ftrace_graph_ent *trace,
 	if (ftrace_graph_ignore_func(gops, trace))
 		return 0;
 
-	if (ftrace_graph_ignore_irqs())
+	if (ftrace_graph_ignore_irqs(tr))
 		return 0;
 
 	if (fgraph_sleep_time) {
@@ -451,6 +454,9 @@ static int graph_trace_init(struct trace_array *tr)
 	else
 		tr->gops->retfunc = trace_graph_return;
 
+	if (!tracer_flags_is_set(tr, TRACE_GRAPH_PRINT_IRQS))
+		ftrace_graph_skip_irqs++;
+
 	/* Make gops functions visible before we start tracing */
 	smp_mb();
 
@@ -468,10 +474,6 @@ static int ftrace_graph_trace_args(struct trace_array *tr, int set)
 {
 	trace_func_graph_ent_t entry;
 
-	/* Do nothing if the current tracer is not this tracer */
-	if (tr->current_trace != &graph_trace)
-		return 0;
-
 	if (set)
 		entry = trace_graph_entry_args;
 	else
@@ -492,6 +494,11 @@ static int ftrace_graph_trace_args(struct trace_array *tr, int set)
 
 static void graph_trace_reset(struct trace_array *tr)
 {
+	if (!tracer_flags_is_set(tr, TRACE_GRAPH_PRINT_IRQS))
+		ftrace_graph_skip_irqs--;
+	if (WARN_ON_ONCE(ftrace_graph_skip_irqs < 0))
+		ftrace_graph_skip_irqs = 0;
+
 	tracing_stop_cmdline_record();
 	unregister_ftrace_graph(tr->gops);
 }
@@ -1617,15 +1624,29 @@ void graph_trace_close(struct trace_iterator *iter)
 static int
 func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
 {
-	if (bit == TRACE_GRAPH_PRINT_IRQS)
-		ftrace_graph_skip_irqs = !set;
-
 	if (bit == TRACE_GRAPH_SLEEP_TIME)
 		ftrace_graph_sleep_time_control(set);
 
 	if (bit == TRACE_GRAPH_GRAPH_TIME)
 		ftrace_graph_graph_time_control(set);
 
+	/* Do nothing if the current tracer is not this tracer */
+	if (tr->current_trace != &graph_trace)
+		return 0;
+
+	/* Do nothing if already set. */
+	if (!!set == !!(tr->current_trace_flags->val & bit))
+		return 0;
+
+	if (bit == TRACE_GRAPH_PRINT_IRQS) {
+		if (set)
+			ftrace_graph_skip_irqs--;
+		else
+			ftrace_graph_skip_irqs++;
+		if (WARN_ON_ONCE(ftrace_graph_skip_irqs < 0))
+			ftrace_graph_skip_irqs = 0;
+	}
+
 	if (bit == TRACE_GRAPH_ARGS)
 		return ftrace_graph_trace_args(tr, set);
 
-- 
2.51.0



^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH 2/4] tracing: Move graph-time out of function graph options
  2025-11-14 19:22 [PATCH 0/4] tracing: Make more function graph tracer options per-instance Steven Rostedt
  2025-11-14 19:22 ` [PATCH 1/4] tracing: Have function graph tracer option funcgraph-irqs be per instance Steven Rostedt
@ 2025-11-14 19:22 ` Steven Rostedt
  2025-11-14 19:22 ` [PATCH 3/4] tracing: Have function graph tracer option sleep-time be per instance Steven Rostedt
  2025-11-14 19:22 ` [PATCH 4/4] tracing: Convert function graph set_flags() to use a switch() statement Steven Rostedt
  3 siblings, 0 replies; 5+ messages in thread
From: Steven Rostedt @ 2025-11-14 19:22 UTC (permalink / raw)
  To: linux-kernel, linux-trace-kernel
  Cc: Masami Hiramatsu, Mark Rutland, Mathieu Desnoyers, Andrew Morton

From: Steven Rostedt <rostedt@goodmis.org>

The option "graph-time" affects the function profiler when it is using the
function graph infrastructure. It has nothing to do with the function
graph tracer itself. The option only affects the global function profiler
and does nothing to the function graph tracer.

Move it out of the function graph tracer options and make it a global
option that is only available at the top level instance.

Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
---
 kernel/trace/trace.c                 | 14 ++++++++++----
 kernel/trace/trace.h                 | 13 ++++++++++++-
 kernel/trace/trace_functions_graph.c | 10 +---------
 3 files changed, 23 insertions(+), 14 deletions(-)

diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 9268489d2ce8..8ae95800592d 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -509,10 +509,10 @@ EXPORT_SYMBOL_GPL(unregister_ftrace_export);
 
 /* trace_flags holds trace_options default values */
 #define TRACE_DEFAULT_FLAGS						\
-	(FUNCTION_DEFAULT_FLAGS |					\
-	 TRACE_ITER(PRINT_PARENT) | TRACE_ITER(PRINTK) |			\
+	(FUNCTION_DEFAULT_FLAGS | FPROFILE_DEFAULT_FLAGS |		\
+	 TRACE_ITER(PRINT_PARENT) | TRACE_ITER(PRINTK) |		\
 	 TRACE_ITER(ANNOTATE) | TRACE_ITER(CONTEXT_INFO) |		\
-	 TRACE_ITER(RECORD_CMD) | TRACE_ITER(OVERWRITE) |			\
+	 TRACE_ITER(RECORD_CMD) | TRACE_ITER(OVERWRITE) |		\
 	 TRACE_ITER(IRQ_INFO) | TRACE_ITER(MARKERS) |			\
 	 TRACE_ITER(HASH_PTR) | TRACE_ITER(TRACE_PRINTK) |		\
 	 TRACE_ITER(COPY_MARKER))
@@ -520,7 +520,7 @@ EXPORT_SYMBOL_GPL(unregister_ftrace_export);
 /* trace_options that are only supported by global_trace */
 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER(PRINTK) |			\
 	       TRACE_ITER(PRINTK_MSGONLY) | TRACE_ITER(RECORD_CMD) |	\
-	       TRACE_ITER(PROF_TEXT_OFFSET))
+	       TRACE_ITER(PROF_TEXT_OFFSET) | FPROFILE_DEFAULT_FLAGS)
 
 /* trace_flags that are default zero for instances */
 #define ZEROED_TRACE_FLAGS \
@@ -5331,6 +5331,12 @@ int set_tracer_flag(struct trace_array *tr, u64 mask, int enabled)
 		trace_printk_start_stop_comm(enabled);
 		trace_printk_control(enabled);
 		break;
+
+#if defined(CONFIG_FUNCTION_PROFILER) && defined(CONFIG_FUNCTION_GRAPH_TRACER)
+	case TRACE_GRAPH_GRAPH_TIME:
+		ftrace_graph_graph_time_control(enabled);
+		break;
+#endif
 	}
 
 	return 0;
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 299862aad66c..41b416a22450 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -1368,8 +1368,18 @@ extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
 #ifdef CONFIG_FUNCTION_PROFILER
 # define PROFILER_FLAGS					\
 		C(PROF_TEXT_OFFSET,	"prof-text-offset"),
+# ifdef CONFIG_FUNCTION_GRAPH_TRACER
+#  define FPROFILE_FLAGS				\
+		C(GRAPH_TIME,		"graph-time"),
+#  define FPROFILE_DEFAULT_FLAGS	TRACE_ITER(GRAPH_TIME)
+# else
+#  define FPROFILE_FLAGS
+#  define FPROFILE_DEFAULT_FLAGS	0UL
+# endif
 #else
 # define PROFILER_FLAGS
+# define FPROFILE_FLAGS
+# define FPROFILE_DEFAULT_FLAGS			0UL
 # define TRACE_ITER_PROF_TEXT_OFFSET_BIT	-1
 #endif
 
@@ -1412,7 +1422,8 @@ extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
 		FGRAPH_FLAGS					\
 		STACK_FLAGS					\
 		BRANCH_FLAGS					\
-		PROFILER_FLAGS
+		PROFILER_FLAGS					\
+		FPROFILE_FLAGS
 
 /*
  * By defining C, we can make TRACE_FLAGS a list of bit names
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 3f55b49cf64e..53adbe4bfedb 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -85,11 +85,6 @@ static struct tracer_opt trace_opts[] = {
 	/* Include sleep time (scheduled out) between entry and return */
 	{ TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) },
 
-#ifdef CONFIG_FUNCTION_PROFILER
-	/* Include time within nested functions */
-	{ TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) },
-#endif
-
 	{ } /* Empty entry */
 };
 
@@ -97,7 +92,7 @@ static struct tracer_flags tracer_flags = {
 	/* Don't display overruns, proc, or tail by default */
 	.val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
 	       TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS |
-	       TRACE_GRAPH_SLEEP_TIME | TRACE_GRAPH_GRAPH_TIME,
+	       TRACE_GRAPH_SLEEP_TIME,
 	.opts = trace_opts
 };
 
@@ -1627,9 +1622,6 @@ func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
 	if (bit == TRACE_GRAPH_SLEEP_TIME)
 		ftrace_graph_sleep_time_control(set);
 
-	if (bit == TRACE_GRAPH_GRAPH_TIME)
-		ftrace_graph_graph_time_control(set);
-
 	/* Do nothing if the current tracer is not this tracer */
 	if (tr->current_trace != &graph_trace)
 		return 0;
-- 
2.51.0



^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH 3/4] tracing: Have function graph tracer option sleep-time be per instance
  2025-11-14 19:22 [PATCH 0/4] tracing: Make more function graph tracer options per-instance Steven Rostedt
  2025-11-14 19:22 ` [PATCH 1/4] tracing: Have function graph tracer option funcgraph-irqs be per instance Steven Rostedt
  2025-11-14 19:22 ` [PATCH 2/4] tracing: Move graph-time out of function graph options Steven Rostedt
@ 2025-11-14 19:22 ` Steven Rostedt
  2025-11-14 19:22 ` [PATCH 4/4] tracing: Convert function graph set_flags() to use a switch() statement Steven Rostedt
  3 siblings, 0 replies; 5+ messages in thread
From: Steven Rostedt @ 2025-11-14 19:22 UTC (permalink / raw)
  To: linux-kernel, linux-trace-kernel
  Cc: Masami Hiramatsu, Mark Rutland, Mathieu Desnoyers, Andrew Morton

From: Steven Rostedt <rostedt@goodmis.org>

Currently the option to have function graph tracer to ignore time spent
when a task is sleeping is global when the interface is per-instance.
Changing the value in one instance will affect the results of another
instance that is also running the function graph tracer. This can lead to
confusing results.

Fixes: c132be2c4fcc1 ("function_graph: Have the instances use their own ftrace_ops for filtering")
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
---
 kernel/trace/fgraph.c                | 10 +----
 kernel/trace/ftrace.c                |  4 +-
 kernel/trace/trace.h                 |  5 +--
 kernel/trace/trace_functions_graph.c | 64 +++++++++++++++++++++++-----
 4 files changed, 60 insertions(+), 23 deletions(-)

diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c
index 484ad7a18463..7fb9b169d6d4 100644
--- a/kernel/trace/fgraph.c
+++ b/kernel/trace/fgraph.c
@@ -498,9 +498,6 @@ void *fgraph_retrieve_parent_data(int idx, int *size_bytes, int depth)
 	return get_data_type_data(current, offset);
 }
 
-/* Both enabled by default (can be cleared by function_graph tracer flags */
-bool fgraph_sleep_time = true;
-
 #ifdef CONFIG_DYNAMIC_FTRACE
 /*
  * archs can override this function if they must do something
@@ -1023,11 +1020,6 @@ void fgraph_init_ops(struct ftrace_ops *dst_ops,
 #endif
 }
 
-void ftrace_graph_sleep_time_control(bool enable)
-{
-	fgraph_sleep_time = enable;
-}
-
 /*
  * Simply points to ftrace_stub, but with the proper protocol.
  * Defined by the linker script in linux/vmlinux.lds.h
@@ -1098,7 +1090,7 @@ ftrace_graph_probe_sched_switch(void *ignore, bool preempt,
 	 * Does the user want to count the time a function was asleep.
 	 * If so, do not update the time stamps.
 	 */
-	if (fgraph_sleep_time)
+	if (!fgraph_no_sleep_time)
 		return;
 
 	timestamp = trace_clock_local();
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index ab601cd9638b..7c3bbebeec7a 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -862,6 +862,8 @@ static int profile_graph_entry(struct ftrace_graph_ent *trace,
 	return 1;
 }
 
+bool fprofile_no_sleep_time;
+
 static void profile_graph_return(struct ftrace_graph_ret *trace,
 				 struct fgraph_ops *gops,
 				 struct ftrace_regs *fregs)
@@ -887,7 +889,7 @@ static void profile_graph_return(struct ftrace_graph_ret *trace,
 
 	calltime = rettime - profile_data->calltime;
 
-	if (!fgraph_sleep_time) {
+	if (fprofile_no_sleep_time) {
 		if (current->ftrace_sleeptime)
 			calltime -= current->ftrace_sleeptime - profile_data->sleeptime;
 	}
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 41b416a22450..58be6d741d72 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -943,8 +943,6 @@ static __always_inline bool ftrace_hash_empty(struct ftrace_hash *hash)
 #define TRACE_GRAPH_PRINT_FILL_SHIFT	28
 #define TRACE_GRAPH_PRINT_FILL_MASK	(0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT)
 
-extern void ftrace_graph_sleep_time_control(bool enable);
-
 #ifdef CONFIG_FUNCTION_PROFILER
 extern void ftrace_graph_graph_time_control(bool enable);
 #else
@@ -1115,7 +1113,8 @@ static inline void ftrace_graph_addr_finish(struct fgraph_ops *gops, struct ftra
 #endif /* CONFIG_DYNAMIC_FTRACE */
 
 extern unsigned int fgraph_max_depth;
-extern bool fgraph_sleep_time;
+extern unsigned int fgraph_no_sleep_time;
+extern bool fprofile_no_sleep_time;
 
 static inline bool
 ftrace_graph_ignore_func(struct fgraph_ops *gops, struct ftrace_graph_ent *trace)
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 53adbe4bfedb..12315eb65925 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -19,6 +19,9 @@
 /* When set, irq functions might be ignored */
 static int ftrace_graph_skip_irqs;
 
+/* Do not record function time when task is sleeping */
+unsigned int fgraph_no_sleep_time;
+
 struct fgraph_cpu_data {
 	pid_t		last_pid;
 	int		depth;
@@ -239,13 +242,14 @@ static int graph_entry(struct ftrace_graph_ent *trace,
 	if (ftrace_graph_ignore_irqs(tr))
 		return 0;
 
-	if (fgraph_sleep_time) {
-		/* Only need to record the calltime */
-		ftimes = fgraph_reserve_data(gops->idx, sizeof(ftimes->calltime));
-	} else {
+	if (fgraph_no_sleep_time &&
+	    !tracer_flags_is_set(tr, TRACE_GRAPH_SLEEP_TIME)) {
 		ftimes = fgraph_reserve_data(gops->idx, sizeof(*ftimes));
 		if (ftimes)
 			ftimes->sleeptime = current->ftrace_sleeptime;
+	} else {
+		/* Only need to record the calltime */
+		ftimes = fgraph_reserve_data(gops->idx, sizeof(ftimes->calltime));
 	}
 	if (!ftimes)
 		return 0;
@@ -331,11 +335,15 @@ void __trace_graph_return(struct trace_array *tr,
 	trace_buffer_unlock_commit_nostack(buffer, event);
 }
 
-static void handle_nosleeptime(struct ftrace_graph_ret *trace,
+static void handle_nosleeptime(struct trace_array *tr,
+			       struct ftrace_graph_ret *trace,
 			       struct fgraph_times *ftimes,
 			       int size)
 {
-	if (fgraph_sleep_time || size < sizeof(*ftimes))
+	if (size < sizeof(*ftimes))
+		return;
+
+	if (!fgraph_no_sleep_time || tracer_flags_is_set(tr, TRACE_GRAPH_SLEEP_TIME))
 		return;
 
 	ftimes->calltime += current->ftrace_sleeptime - ftimes->sleeptime;
@@ -364,7 +372,7 @@ void trace_graph_return(struct ftrace_graph_ret *trace,
 	if (!ftimes)
 		return;
 
-	handle_nosleeptime(trace, ftimes, size);
+	handle_nosleeptime(tr, trace, ftimes, size);
 
 	calltime = ftimes->calltime;
 
@@ -377,6 +385,7 @@ static void trace_graph_thresh_return(struct ftrace_graph_ret *trace,
 				      struct ftrace_regs *fregs)
 {
 	struct fgraph_times *ftimes;
+	struct trace_array *tr;
 	int size;
 
 	ftrace_graph_addr_finish(gops, trace);
@@ -390,7 +399,8 @@ static void trace_graph_thresh_return(struct ftrace_graph_ret *trace,
 	if (!ftimes)
 		return;
 
-	handle_nosleeptime(trace, ftimes, size);
+	tr = gops->private;
+	handle_nosleeptime(tr, trace, ftimes, size);
 
 	if (tracing_thresh &&
 	    (trace_clock_local() - ftimes->calltime < tracing_thresh))
@@ -452,6 +462,9 @@ static int graph_trace_init(struct trace_array *tr)
 	if (!tracer_flags_is_set(tr, TRACE_GRAPH_PRINT_IRQS))
 		ftrace_graph_skip_irqs++;
 
+	if (!tracer_flags_is_set(tr, TRACE_GRAPH_SLEEP_TIME))
+		fgraph_no_sleep_time++;
+
 	/* Make gops functions visible before we start tracing */
 	smp_mb();
 
@@ -494,6 +507,11 @@ static void graph_trace_reset(struct trace_array *tr)
 	if (WARN_ON_ONCE(ftrace_graph_skip_irqs < 0))
 		ftrace_graph_skip_irqs = 0;
 
+	if (!tracer_flags_is_set(tr, TRACE_GRAPH_SLEEP_TIME))
+		fgraph_no_sleep_time--;
+	if (WARN_ON_ONCE(fgraph_no_sleep_time < 0))
+		fgraph_no_sleep_time = 0;
+
 	tracing_stop_cmdline_record();
 	unregister_ftrace_graph(tr->gops);
 }
@@ -1619,8 +1637,24 @@ void graph_trace_close(struct trace_iterator *iter)
 static int
 func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
 {
-	if (bit == TRACE_GRAPH_SLEEP_TIME)
-		ftrace_graph_sleep_time_control(set);
+/*
+ * The function profiler gets updated even if function graph
+ * isn't the current tracer. Handle it separately.
+ */
+#ifdef CONFIG_FUNCTION_PROFILER
+	if (bit == TRACE_GRAPH_SLEEP_TIME && (tr->flags & TRACE_ARRAY_FL_GLOBAL) &&
+	    !!set == fprofile_no_sleep_time) {
+		if (set) {
+			fgraph_no_sleep_time--;
+			if (WARN_ON_ONCE(fgraph_no_sleep_time < 0))
+				fgraph_no_sleep_time = 0;
+			fprofile_no_sleep_time = false;
+		} else {
+			fgraph_no_sleep_time++;
+			fprofile_no_sleep_time = true;
+		}
+	}
+#endif
 
 	/* Do nothing if the current tracer is not this tracer */
 	if (tr->current_trace != &graph_trace)
@@ -1630,6 +1664,16 @@ func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
 	if (!!set == !!(tr->current_trace_flags->val & bit))
 		return 0;
 
+	if (bit == TRACE_GRAPH_SLEEP_TIME) {
+		if (set) {
+			fgraph_no_sleep_time--;
+			if (WARN_ON_ONCE(fgraph_no_sleep_time < 0))
+				fgraph_no_sleep_time = 0;
+		} else {
+			fgraph_no_sleep_time++;
+		}
+	}
+
 	if (bit == TRACE_GRAPH_PRINT_IRQS) {
 		if (set)
 			ftrace_graph_skip_irqs--;
-- 
2.51.0



^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH 4/4] tracing: Convert function graph set_flags() to use a switch() statement
  2025-11-14 19:22 [PATCH 0/4] tracing: Make more function graph tracer options per-instance Steven Rostedt
                   ` (2 preceding siblings ...)
  2025-11-14 19:22 ` [PATCH 3/4] tracing: Have function graph tracer option sleep-time be per instance Steven Rostedt
@ 2025-11-14 19:22 ` Steven Rostedt
  3 siblings, 0 replies; 5+ messages in thread
From: Steven Rostedt @ 2025-11-14 19:22 UTC (permalink / raw)
  To: linux-kernel, linux-trace-kernel
  Cc: Masami Hiramatsu, Mark Rutland, Mathieu Desnoyers, Andrew Morton

From: Steven Rostedt <rostedt@goodmis.org>

Currently the set_flags() of the function graph tracer has a bunch of:

  if (bit == FLAG1) {
	[..]
  }

  if (bit == FLAG2) {
	[..]
  }

To clean it up a bit, convert it over to a switch statement.

Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
---
 kernel/trace/trace_functions_graph.c | 12 +++++++-----
 1 file changed, 7 insertions(+), 5 deletions(-)

diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 12315eb65925..44d5dc5031e2 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -1664,7 +1664,8 @@ func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
 	if (!!set == !!(tr->current_trace_flags->val & bit))
 		return 0;
 
-	if (bit == TRACE_GRAPH_SLEEP_TIME) {
+	switch (bit) {
+	case TRACE_GRAPH_SLEEP_TIME:
 		if (set) {
 			fgraph_no_sleep_time--;
 			if (WARN_ON_ONCE(fgraph_no_sleep_time < 0))
@@ -1672,19 +1673,20 @@ func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
 		} else {
 			fgraph_no_sleep_time++;
 		}
-	}
+		break;
 
-	if (bit == TRACE_GRAPH_PRINT_IRQS) {
+	case TRACE_GRAPH_PRINT_IRQS:
 		if (set)
 			ftrace_graph_skip_irqs--;
 		else
 			ftrace_graph_skip_irqs++;
 		if (WARN_ON_ONCE(ftrace_graph_skip_irqs < 0))
 			ftrace_graph_skip_irqs = 0;
-	}
+		break;
 
-	if (bit == TRACE_GRAPH_ARGS)
+	case TRACE_GRAPH_ARGS:
 		return ftrace_graph_trace_args(tr, set);
+	}
 
 	return 0;
 }
-- 
2.51.0



^ permalink raw reply related	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2025-11-14 19:23 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-11-14 19:22 [PATCH 0/4] tracing: Make more function graph tracer options per-instance Steven Rostedt
2025-11-14 19:22 ` [PATCH 1/4] tracing: Have function graph tracer option funcgraph-irqs be per instance Steven Rostedt
2025-11-14 19:22 ` [PATCH 2/4] tracing: Move graph-time out of function graph options Steven Rostedt
2025-11-14 19:22 ` [PATCH 3/4] tracing: Have function graph tracer option sleep-time be per instance Steven Rostedt
2025-11-14 19:22 ` [PATCH 4/4] tracing: Convert function graph set_flags() to use a switch() statement Steven Rostedt

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).