* [PATCH 1/3] fgraph: Use fgraph data to store subtime for profiler
2024-09-14 21:48 [PATCH 0/3] fgraph: Do not save calltime in shadow stack Steven Rostedt
@ 2024-09-14 21:48 ` Steven Rostedt
2024-09-14 21:48 ` [PATCH 2/3] ftrace: Use a running sleeptime instead of saving on shadow stack Steven Rostedt
` (2 subsequent siblings)
3 siblings, 0 replies; 5+ messages in thread
From: Steven Rostedt @ 2024-09-14 21:48 UTC (permalink / raw)
To: linux-kernel, linux-trace-kernel
Cc: Masami Hiramatsu, Mark Rutland, Mathieu Desnoyers, Andrew Morton,
Jiri Olsa
From: Steven Rostedt <rostedt@goodmis.org>
Instead of having the "subtime" for the function profiler in the
infrastructure ftrace_ret_stack structure, have it use the fgraph data
reserve and retrieve functions.
This will keep the limited shadow stack from wasting 8 bytes for something
that is seldom used.
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
---
include/linux/ftrace.h | 4 +--
kernel/trace/fgraph.c | 64 ++++++++++++++++++++++++++++++++----------
kernel/trace/ftrace.c | 23 +++++++--------
3 files changed, 62 insertions(+), 29 deletions(-)
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index fd5e84d0ec47..6bbd78052f7a 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -1055,6 +1055,7 @@ struct fgraph_ops {
void *fgraph_reserve_data(int idx, int size_bytes);
void *fgraph_retrieve_data(int idx, int *size_bytes);
+void *fgraph_retrieve_parent_data(int idx, int *size_bytes, int depth);
/*
* Stack of return addresses for functions
@@ -1065,9 +1066,6 @@ struct ftrace_ret_stack {
unsigned long ret;
unsigned long func;
unsigned long long calltime;
-#ifdef CONFIG_FUNCTION_PROFILER
- unsigned long long subtime;
-#endif
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
unsigned long fp;
#endif
diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c
index d7d4fb403f6f..095ceb752b28 100644
--- a/kernel/trace/fgraph.c
+++ b/kernel/trace/fgraph.c
@@ -390,21 +390,7 @@ void *fgraph_reserve_data(int idx, int size_bytes)
*/
void *fgraph_retrieve_data(int idx, int *size_bytes)
{
- int offset = current->curr_ret_stack - 1;
- unsigned long val;
-
- val = get_fgraph_entry(current, offset);
- while (__get_type(val) == FGRAPH_TYPE_DATA) {
- if (__get_data_index(val) == idx)
- goto found;
- offset -= __get_data_size(val) + 1;
- val = get_fgraph_entry(current, offset);
- }
- return NULL;
-found:
- if (size_bytes)
- *size_bytes = __get_data_size(val) * sizeof(long);
- return get_data_type_data(current, offset);
+ return fgraph_retrieve_parent_data(idx, size_bytes, 0);
}
/**
@@ -460,6 +446,54 @@ get_ret_stack(struct task_struct *t, int offset, int *frame_offset)
return RET_STACK(t, offset);
}
+/**
+ * fgraph_retrieve_parent_data - get data from a parent function
+ * @idx: The index into the fgraph_array (fgraph_ops::idx)
+ * @size_bytes: A pointer to retrieved data size
+ * @depth: The depth to find the parent (0 is the current function)
+ *
+ * This is similar to fgraph_retrieve_data() but can be used to retrieve
+ * data from a parent caller function.
+ *
+ * Return: a pointer to the specified parent data or NULL if not found
+ */
+void *fgraph_retrieve_parent_data(int idx, int *size_bytes, int depth)
+{
+ struct ftrace_ret_stack *ret_stack = NULL;
+ int offset = current->curr_ret_stack;
+ unsigned long val;
+
+ if (offset <= 0)
+ return NULL;
+
+ for (;;) {
+ int next_offset;
+
+ ret_stack = get_ret_stack(current, offset, &next_offset);
+ if (!ret_stack || --depth < 0)
+ break;
+ offset = next_offset;
+ }
+
+ if (!ret_stack)
+ return NULL;
+
+ offset--;
+
+ val = get_fgraph_entry(current, offset);
+ while (__get_type(val) == FGRAPH_TYPE_DATA) {
+ if (__get_data_index(val) == idx)
+ goto found;
+ offset -= __get_data_size(val) + 1;
+ val = get_fgraph_entry(current, offset);
+ }
+ return NULL;
+found:
+ if (size_bytes)
+ *size_bytes = __get_data_size(val) * sizeof(long);
+ return get_data_type_data(current, offset);
+}
+
/* Both enabled by default (can be cleared by function_graph tracer flags */
static bool fgraph_sleep_time = true;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 4c28dd177ca6..196647059800 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -823,7 +823,7 @@ void ftrace_graph_graph_time_control(bool enable)
static int profile_graph_entry(struct ftrace_graph_ent *trace,
struct fgraph_ops *gops)
{
- struct ftrace_ret_stack *ret_stack;
+ unsigned long long *subtime;
function_profile_call(trace->func, 0, NULL, NULL);
@@ -831,9 +831,9 @@ static int profile_graph_entry(struct ftrace_graph_ent *trace,
if (!current->ret_stack)
return 0;
- ret_stack = ftrace_graph_get_ret_stack(current, 0);
- if (ret_stack)
- ret_stack->subtime = 0;
+ subtime = fgraph_reserve_data(gops->idx, sizeof(*subtime));
+ if (subtime)
+ *subtime = 0;
return 1;
}
@@ -841,11 +841,12 @@ static int profile_graph_entry(struct ftrace_graph_ent *trace,
static void profile_graph_return(struct ftrace_graph_ret *trace,
struct fgraph_ops *gops)
{
- struct ftrace_ret_stack *ret_stack;
struct ftrace_profile_stat *stat;
unsigned long long calltime;
+ unsigned long long *subtime;
struct ftrace_profile *rec;
unsigned long flags;
+ int size;
local_irq_save(flags);
stat = this_cpu_ptr(&ftrace_profile_stats);
@@ -861,13 +862,13 @@ static void profile_graph_return(struct ftrace_graph_ret *trace,
if (!fgraph_graph_time) {
/* Append this call time to the parent time to subtract */
- ret_stack = ftrace_graph_get_ret_stack(current, 1);
- if (ret_stack)
- ret_stack->subtime += calltime;
+ subtime = fgraph_retrieve_parent_data(gops->idx, &size, 1);
+ if (subtime)
+ *subtime += calltime;
- ret_stack = ftrace_graph_get_ret_stack(current, 0);
- if (ret_stack && ret_stack->subtime < calltime)
- calltime -= ret_stack->subtime;
+ subtime = fgraph_retrieve_data(gops->idx, &size);
+ if (subtime && *subtime && *subtime < calltime)
+ calltime -= *subtime;
else
calltime = 0;
}
--
2.45.2
^ permalink raw reply related [flat|nested] 5+ messages in thread* [PATCH 2/3] ftrace: Use a running sleeptime instead of saving on shadow stack
2024-09-14 21:48 [PATCH 0/3] fgraph: Do not save calltime in shadow stack Steven Rostedt
2024-09-14 21:48 ` [PATCH 1/3] fgraph: Use fgraph data to store subtime for profiler Steven Rostedt
@ 2024-09-14 21:48 ` Steven Rostedt
2024-09-14 21:48 ` [PATCH 3/3] ftrace: Have calltime be saved in the fgraph storage Steven Rostedt
2024-09-15 5:22 ` [PATCH 0/3] fgraph: Do not save calltime in shadow stack Masami Hiramatsu
3 siblings, 0 replies; 5+ messages in thread
From: Steven Rostedt @ 2024-09-14 21:48 UTC (permalink / raw)
To: linux-kernel, linux-trace-kernel
Cc: Masami Hiramatsu, Mark Rutland, Mathieu Desnoyers, Andrew Morton,
Jiri Olsa
From: Steven Rostedt <rostedt@goodmis.org>
The fgraph "sleep-time" option tells the function graph tracer and the
profiler whether to include the time a function "sleeps" (is scheduled off
the CPU) in its duration for the function. By default it is true, which
means the duration of a function is calculated by the timestamp of when the
function was entered to the timestamp of when it exits.
If the "sleep-time" option is disabled, it needs to remove the time that the
task was not running on the CPU during the function. Currently it is done in
a sched_switch tracepoint probe where it moves the "calltime" (time of entry
of the function) forward by the sleep time calculated. It updates all the
calltime in the shadow stack.
This is time consuming for those users of the function graph tracer that
does not care about the sleep time. Instead, add a "ftrace_sleeptime" to the
task_struct that gets the sleep time added each time the task wakes up. Then
have the function entry save the current "ftrace_sleeptime" and on function
exit, move the calltime forward by the difference of the current
"ftrace_sleeptime" from the saved sleeptime.
This removes one dependency of "calltime" needed to be on the shadow stack.
It also simplifies the code that removes the sleep time of functions.
TODO: Only enable the sched_switch tracepoint when this is needed.
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
---
include/linux/sched.h | 1 +
kernel/trace/fgraph.c | 16 ++----------
kernel/trace/ftrace.c | 39 ++++++++++++++++++++--------
kernel/trace/trace.h | 1 +
kernel/trace/trace_functions_graph.c | 28 ++++++++++++++++++++
5 files changed, 60 insertions(+), 25 deletions(-)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index f8d150343d42..46ddda3697f7 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1417,6 +1417,7 @@ struct task_struct {
/* Timestamp for last schedule: */
unsigned long long ftrace_timestamp;
+ unsigned long long ftrace_sleeptime;
/*
* Number of functions that haven't been traced
diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c
index 095ceb752b28..b2e95bf82211 100644
--- a/kernel/trace/fgraph.c
+++ b/kernel/trace/fgraph.c
@@ -495,7 +495,7 @@ void *fgraph_retrieve_parent_data(int idx, int *size_bytes, int depth)
}
/* Both enabled by default (can be cleared by function_graph tracer flags */
-static bool fgraph_sleep_time = true;
+bool fgraph_sleep_time = true;
#ifdef CONFIG_DYNAMIC_FTRACE
/*
@@ -1046,9 +1046,7 @@ ftrace_graph_probe_sched_switch(void *ignore, bool preempt,
struct task_struct *next,
unsigned int prev_state)
{
- struct ftrace_ret_stack *ret_stack;
unsigned long long timestamp;
- int offset;
/*
* Does the user want to count the time a function was asleep.
@@ -1065,17 +1063,7 @@ ftrace_graph_probe_sched_switch(void *ignore, bool preempt,
if (!next->ftrace_timestamp)
return;
- /*
- * Update all the counters in next to make up for the
- * time next was sleeping.
- */
- timestamp -= next->ftrace_timestamp;
-
- for (offset = next->curr_ret_stack; offset > 0; ) {
- ret_stack = get_ret_stack(next, offset, &offset);
- if (ret_stack)
- ret_stack->calltime += timestamp;
- }
+ next->ftrace_sleeptime += timestamp - next->ftrace_timestamp;
}
static DEFINE_PER_CPU(unsigned long *, idle_ret_stack);
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 196647059800..f3d1702a7d8d 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -820,10 +820,15 @@ void ftrace_graph_graph_time_control(bool enable)
fgraph_graph_time = enable;
}
+struct profile_fgraph_data {
+ unsigned long long subtime;
+ unsigned long long sleeptime;
+};
+
static int profile_graph_entry(struct ftrace_graph_ent *trace,
struct fgraph_ops *gops)
{
- unsigned long long *subtime;
+ struct profile_fgraph_data *profile_data;
function_profile_call(trace->func, 0, NULL, NULL);
@@ -831,9 +836,12 @@ static int profile_graph_entry(struct ftrace_graph_ent *trace,
if (!current->ret_stack)
return 0;
- subtime = fgraph_reserve_data(gops->idx, sizeof(*subtime));
- if (subtime)
- *subtime = 0;
+ profile_data = fgraph_reserve_data(gops->idx, sizeof(*profile_data));
+ if (!profile_data)
+ return 0;
+
+ profile_data->subtime = 0;
+ profile_data->sleeptime = current->ftrace_sleeptime;
return 1;
}
@@ -841,9 +849,10 @@ static int profile_graph_entry(struct ftrace_graph_ent *trace,
static void profile_graph_return(struct ftrace_graph_ret *trace,
struct fgraph_ops *gops)
{
+ struct profile_fgraph_data *profile_data;
+ struct profile_fgraph_data *parent_data;
struct ftrace_profile_stat *stat;
unsigned long long calltime;
- unsigned long long *subtime;
struct ftrace_profile *rec;
unsigned long flags;
int size;
@@ -859,16 +868,24 @@ static void profile_graph_return(struct ftrace_graph_ret *trace,
calltime = trace->rettime - trace->calltime;
+ if (!fgraph_sleep_time) {
+ profile_data = fgraph_retrieve_data(gops->idx, &size);
+ if (profile_data && current->ftrace_sleeptime)
+ calltime -= current->ftrace_sleeptime - profile_data->sleeptime;
+ }
+
if (!fgraph_graph_time) {
/* Append this call time to the parent time to subtract */
- subtime = fgraph_retrieve_parent_data(gops->idx, &size, 1);
- if (subtime)
- *subtime += calltime;
+ parent_data = fgraph_retrieve_parent_data(gops->idx, &size, 1);
+ if (parent_data)
+ parent_data->subtime += calltime;
+
+ if (!profile_data)
+ profile_data = fgraph_retrieve_data(gops->idx, &size);
- subtime = fgraph_retrieve_data(gops->idx, &size);
- if (subtime && *subtime && *subtime < calltime)
- calltime -= *subtime;
+ if (profile_data && profile_data->subtime && profile_data->subtime < calltime)
+ calltime -= profile_data->subtime;
else
calltime = 0;
}
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index bd3e3069300e..8a3cfe67a76c 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -1039,6 +1039,7 @@ static inline void ftrace_graph_addr_finish(struct fgraph_ops *gops, struct ftra
#endif /* CONFIG_DYNAMIC_FTRACE */
extern unsigned int fgraph_max_depth;
+extern bool fgraph_sleep_time;
static inline bool
ftrace_graph_ignore_func(struct fgraph_ops *gops, struct ftrace_graph_ent *trace)
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 13d0387ac6a6..14b82fb7082c 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -133,6 +133,7 @@ int trace_graph_entry(struct ftrace_graph_ent *trace,
unsigned long *task_var = fgraph_get_task_var(gops);
struct trace_array *tr = gops->private;
struct trace_array_cpu *data;
+ unsigned long *sleeptime;
unsigned long flags;
unsigned int trace_ctx;
long disabled;
@@ -167,6 +168,13 @@ int trace_graph_entry(struct ftrace_graph_ent *trace,
if (ftrace_graph_ignore_irqs())
return 0;
+ /* save the current sleep time if we are to ignore it */
+ if (!fgraph_sleep_time) {
+ sleeptime = fgraph_reserve_data(gops->idx, sizeof(*sleeptime));
+ if (sleeptime)
+ *sleeptime = current->ftrace_sleeptime;
+ }
+
/*
* Stop here if tracing_threshold is set. We only write function return
* events to the ring buffer.
@@ -238,6 +246,22 @@ void __trace_graph_return(struct trace_array *tr,
trace_buffer_unlock_commit_nostack(buffer, event);
}
+static void handle_nosleeptime(struct ftrace_graph_ret *trace,
+ struct fgraph_ops *gops)
+{
+ unsigned long long *sleeptime;
+ int size;
+
+ if (fgraph_sleep_time)
+ return;
+
+ sleeptime = fgraph_retrieve_data(gops->idx, &size);
+ if (!sleeptime)
+ return;
+
+ trace->calltime += current->ftrace_sleeptime - *sleeptime;
+}
+
void trace_graph_return(struct ftrace_graph_ret *trace,
struct fgraph_ops *gops)
{
@@ -256,6 +280,8 @@ void trace_graph_return(struct ftrace_graph_ret *trace,
return;
}
+ handle_nosleeptime(trace, gops);
+
local_irq_save(flags);
cpu = raw_smp_processor_id();
data = per_cpu_ptr(tr->array_buffer.data, cpu);
@@ -278,6 +304,8 @@ static void trace_graph_thresh_return(struct ftrace_graph_ret *trace,
return;
}
+ handle_nosleeptime(trace, gops);
+
if (tracing_thresh &&
(trace->rettime - trace->calltime < tracing_thresh))
return;
--
2.45.2
^ permalink raw reply related [flat|nested] 5+ messages in thread* [PATCH 3/3] ftrace: Have calltime be saved in the fgraph storage
2024-09-14 21:48 [PATCH 0/3] fgraph: Do not save calltime in shadow stack Steven Rostedt
2024-09-14 21:48 ` [PATCH 1/3] fgraph: Use fgraph data to store subtime for profiler Steven Rostedt
2024-09-14 21:48 ` [PATCH 2/3] ftrace: Use a running sleeptime instead of saving on shadow stack Steven Rostedt
@ 2024-09-14 21:48 ` Steven Rostedt
2024-09-15 5:22 ` [PATCH 0/3] fgraph: Do not save calltime in shadow stack Masami Hiramatsu
3 siblings, 0 replies; 5+ messages in thread
From: Steven Rostedt @ 2024-09-14 21:48 UTC (permalink / raw)
To: linux-kernel, linux-trace-kernel
Cc: Masami Hiramatsu, Mark Rutland, Mathieu Desnoyers, Andrew Morton,
Jiri Olsa
From: Steven Rostedt <rostedt@goodmis.org>
The calltime field in the shadow stack frame is only used by the function
graph tracer and profiler. But now that there's other users of the function
graph infrastructure, this adds overhead and wastes space on the shadow
stack. Move the calltime to the fgraph data storage, where the function
graph and profiler entry functions will save it in its own graph storage and
retrieve it in its exit functions.
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
---
include/linux/ftrace.h | 1 -
kernel/trace/fgraph.c | 5 ---
kernel/trace/ftrace.c | 19 ++++-----
kernel/trace/trace_functions_graph.c | 60 +++++++++++++++++++---------
4 files changed, 51 insertions(+), 34 deletions(-)
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 6bbd78052f7a..eafe43a5fa0d 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -1065,7 +1065,6 @@ void *fgraph_retrieve_parent_data(int idx, int *size_bytes, int depth);
struct ftrace_ret_stack {
unsigned long ret;
unsigned long func;
- unsigned long long calltime;
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
unsigned long fp;
#endif
diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c
index b2e95bf82211..58a28ec35dab 100644
--- a/kernel/trace/fgraph.c
+++ b/kernel/trace/fgraph.c
@@ -558,7 +558,6 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func,
int fgraph_idx)
{
struct ftrace_ret_stack *ret_stack;
- unsigned long long calltime;
unsigned long val;
int offset;
@@ -588,8 +587,6 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func,
return -EBUSY;
}
- calltime = trace_clock_local();
-
offset = READ_ONCE(current->curr_ret_stack);
ret_stack = RET_STACK(current, offset);
offset += FGRAPH_FRAME_OFFSET;
@@ -623,7 +620,6 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func,
ret_stack->ret = ret;
ret_stack->func = func;
- ret_stack->calltime = calltime;
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
ret_stack->fp = frame_pointer;
#endif
@@ -757,7 +753,6 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
*offset += FGRAPH_FRAME_OFFSET;
*ret = ret_stack->ret;
trace->func = ret_stack->func;
- trace->calltime = ret_stack->calltime;
trace->overrun = atomic_read(¤t->trace_overrun);
trace->depth = current->curr_ret_depth;
/*
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index f3d1702a7d8d..5dccb02b6525 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -821,6 +821,7 @@ void ftrace_graph_graph_time_control(bool enable)
}
struct profile_fgraph_data {
+ unsigned long long calltime;
unsigned long long subtime;
unsigned long long sleeptime;
};
@@ -842,6 +843,7 @@ static int profile_graph_entry(struct ftrace_graph_ent *trace,
profile_data->subtime = 0;
profile_data->sleeptime = current->ftrace_sleeptime;
+ profile_data->calltime = trace_clock_local();
return 1;
}
@@ -850,9 +852,9 @@ static void profile_graph_return(struct ftrace_graph_ret *trace,
struct fgraph_ops *gops)
{
struct profile_fgraph_data *profile_data;
- struct profile_fgraph_data *parent_data;
struct ftrace_profile_stat *stat;
unsigned long long calltime;
+ unsigned long long rettime = trace_clock_local();
struct ftrace_profile *rec;
unsigned long flags;
int size;
@@ -862,29 +864,28 @@ static void profile_graph_return(struct ftrace_graph_ret *trace,
if (!stat->hash || !ftrace_profile_enabled)
goto out;
+ profile_data = fgraph_retrieve_data(gops->idx, &size);
+
/* If the calltime was zero'd ignore it */
- if (!trace->calltime)
+ if (!profile_data || !profile_data->calltime)
goto out;
- calltime = trace->rettime - trace->calltime;
+ calltime = rettime - profile_data->calltime;
if (!fgraph_sleep_time) {
- profile_data = fgraph_retrieve_data(gops->idx, &size);
- if (profile_data && current->ftrace_sleeptime)
+ if (current->ftrace_sleeptime)
calltime -= current->ftrace_sleeptime - profile_data->sleeptime;
}
if (!fgraph_graph_time) {
+ struct profile_fgraph_data *parent_data;
/* Append this call time to the parent time to subtract */
parent_data = fgraph_retrieve_parent_data(gops->idx, &size, 1);
if (parent_data)
parent_data->subtime += calltime;
- if (!profile_data)
- profile_data = fgraph_retrieve_data(gops->idx, &size);
-
- if (profile_data && profile_data->subtime && profile_data->subtime < calltime)
+ if (profile_data->subtime && profile_data->subtime < calltime)
calltime -= profile_data->subtime;
else
calltime = 0;
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 14b82fb7082c..ff0871a9425a 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -127,13 +127,18 @@ static inline int ftrace_graph_ignore_irqs(void)
return in_hardirq();
}
+struct fgraph_times {
+ unsigned long long calltime;
+ unsigned long long sleeptime; /* may be optional! */
+};
+
int trace_graph_entry(struct ftrace_graph_ent *trace,
struct fgraph_ops *gops)
{
unsigned long *task_var = fgraph_get_task_var(gops);
struct trace_array *tr = gops->private;
struct trace_array_cpu *data;
- unsigned long *sleeptime;
+ struct fgraph_times *ftimes;
unsigned long flags;
unsigned int trace_ctx;
long disabled;
@@ -168,12 +173,18 @@ int trace_graph_entry(struct ftrace_graph_ent *trace,
if (ftrace_graph_ignore_irqs())
return 0;
- /* save the current sleep time if we are to ignore it */
- if (!fgraph_sleep_time) {
- sleeptime = fgraph_reserve_data(gops->idx, sizeof(*sleeptime));
- if (sleeptime)
- *sleeptime = current->ftrace_sleeptime;
+ if (fgraph_sleep_time) {
+ /* Only need to record the calltime */
+ ftimes = fgraph_reserve_data(gops->idx, sizeof(ftimes->calltime));
+ } else {
+ ftimes = fgraph_reserve_data(gops->idx, sizeof(*ftimes));
+ if (ftimes)
+ ftimes->sleeptime = current->ftrace_sleeptime;
}
+ if (!ftimes)
+ return 0;
+
+ ftimes->calltime = trace_clock_local();
/*
* Stop here if tracing_threshold is set. We only write function return
@@ -247,19 +258,13 @@ void __trace_graph_return(struct trace_array *tr,
}
static void handle_nosleeptime(struct ftrace_graph_ret *trace,
- struct fgraph_ops *gops)
+ struct fgraph_times *ftimes,
+ int size)
{
- unsigned long long *sleeptime;
- int size;
-
- if (fgraph_sleep_time)
- return;
-
- sleeptime = fgraph_retrieve_data(gops->idx, &size);
- if (!sleeptime)
+ if (fgraph_sleep_time || size < sizeof(*ftimes))
return;
- trace->calltime += current->ftrace_sleeptime - *sleeptime;
+ ftimes->calltime += current->ftrace_sleeptime - ftimes->sleeptime;
}
void trace_graph_return(struct ftrace_graph_ret *trace,
@@ -268,9 +273,11 @@ void trace_graph_return(struct ftrace_graph_ret *trace,
unsigned long *task_var = fgraph_get_task_var(gops);
struct trace_array *tr = gops->private;
struct trace_array_cpu *data;
+ struct fgraph_times *ftimes;
unsigned long flags;
unsigned int trace_ctx;
long disabled;
+ int size;
int cpu;
ftrace_graph_addr_finish(gops, trace);
@@ -280,7 +287,13 @@ void trace_graph_return(struct ftrace_graph_ret *trace,
return;
}
- handle_nosleeptime(trace, gops);
+ ftimes = fgraph_retrieve_data(gops->idx, &size);
+ if (!ftimes)
+ return;
+
+ handle_nosleeptime(trace, ftimes, size);
+
+ trace->calltime = ftimes->calltime;
local_irq_save(flags);
cpu = raw_smp_processor_id();
@@ -297,6 +310,9 @@ void trace_graph_return(struct ftrace_graph_ret *trace,
static void trace_graph_thresh_return(struct ftrace_graph_ret *trace,
struct fgraph_ops *gops)
{
+ struct fgraph_times *ftimes;
+ int size;
+
ftrace_graph_addr_finish(gops, trace);
if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT)) {
@@ -304,10 +320,16 @@ static void trace_graph_thresh_return(struct ftrace_graph_ret *trace,
return;
}
- handle_nosleeptime(trace, gops);
+ ftimes = fgraph_retrieve_data(gops->idx, &size);
+ if (!ftimes)
+ return;
+
+ handle_nosleeptime(trace, ftimes, size);
+
+ trace->calltime = ftimes->calltime;
if (tracing_thresh &&
- (trace->rettime - trace->calltime < tracing_thresh))
+ (trace->rettime - ftimes->calltime < tracing_thresh))
return;
else
trace_graph_return(trace, gops);
--
2.45.2
^ permalink raw reply related [flat|nested] 5+ messages in thread* Re: [PATCH 0/3] fgraph: Do not save calltime in shadow stack
2024-09-14 21:48 [PATCH 0/3] fgraph: Do not save calltime in shadow stack Steven Rostedt
` (2 preceding siblings ...)
2024-09-14 21:48 ` [PATCH 3/3] ftrace: Have calltime be saved in the fgraph storage Steven Rostedt
@ 2024-09-15 5:22 ` Masami Hiramatsu
3 siblings, 0 replies; 5+ messages in thread
From: Masami Hiramatsu @ 2024-09-15 5:22 UTC (permalink / raw)
To: Steven Rostedt
Cc: linux-kernel, linux-trace-kernel, Masami Hiramatsu, Mark Rutland,
Mathieu Desnoyers, Andrew Morton, Jiri Olsa
On Sat, 14 Sep 2024 17:48:05 -0400
Steven Rostedt <rostedt@goodmis.org> wrote:
>
> Now that there's more users of the function graph infrastructure, the
> calltime field on the shadow stack wastes space and also getting the
> timestamp is a waste of time for those that do not use it.
>
> Instead, have the only two users of it (function graph tracer and the
> profiler) store it in its own shadow stack variable.
This series looks good to me. Instead of adding add-hoc flag, this
solves call-time clearer.
For this series,
Acked-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
Thank you,
>
> Steven Rostedt (3):
> fgraph: Use fgraph data to store subtime for profiler
> ftrace: Use a running sleeptime instead of saving in shadow stack
> ftrace: Have calltime be saved in the fgraph storage
>
> ----
> include/linux/ftrace.h | 5 +--
> include/linux/sched.h | 1 +
> kernel/trace/fgraph.c | 85 +++++++++++++++++++++---------------
> kernel/trace/ftrace.c | 45 +++++++++++++------
> kernel/trace/trace.h | 1 +
> kernel/trace/trace_functions_graph.c | 52 +++++++++++++++++++++-
> 6 files changed, 137 insertions(+), 52 deletions(-)
--
Masami Hiramatsu (Google) <mhiramat@kernel.org>
^ permalink raw reply [flat|nested] 5+ messages in thread